Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/FreeRTOS/FreeRTOS-Kernel.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Brosch <9060129+Techcore123@users.noreply.github.com>2023-09-20 13:17:42 +0300
committerGitHub <noreply@github.com>2023-09-20 13:17:42 +0300
commit83861f5b1def1e4a3e1ea75562446f06e5b4359e (patch)
tree2f030beb72c7712a4e4425e37373a4ff46ad0742 /tasks.c
parent15e0364968aff9c6c3cc7b4893e8ec0311047ce2 (diff)
Add Trace Hook Macros to all API calls (#786)
This pull-request adds out-of-the-box support for different tracing tools. New trace hook macros have been added for all public API functions, one for each function entry and one for each exit. There are no functional changes, as the macros are by default empty. For more information see following forum post: https://forums.freertos.org/t/add-tracing-functionality-for-all-api-calls/18007.
Diffstat (limited to 'tasks.c')
-rw-r--r--tasks.c353
1 files changed, 353 insertions, 0 deletions
diff --git a/tasks.c b/tasks.c
index 99878a3a3..863535c89 100644
--- a/tasks.c
+++ b/tasks.c
@@ -1204,6 +1204,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
TCB_t * pxNewTCB;
TaskHandle_t xReturn;
+ traceENTER_xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer );
+
configASSERT( puxStackBuffer != NULL );
configASSERT( pxTaskBuffer != NULL );
@@ -1250,6 +1252,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
xReturn = NULL;
}
+ traceRETURN_xTaskCreateStatic( xReturn );
+
return xReturn;
}
@@ -1273,6 +1277,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
TCB_t * pxNewTCB;
BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
+ traceENTER_xTaskCreateRestrictedStatic( pxTaskDefinition, pxCreatedTask );
+
configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
@@ -1314,6 +1320,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
xReturn = pdPASS;
}
+ traceRETURN_xTaskCreateRestrictedStatic( xReturn );
+
return xReturn;
}
@@ -1337,6 +1345,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
TCB_t * pxNewTCB;
BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
+ traceENTER_xTaskCreateRestricted( pxTaskDefinition, pxCreatedTask );
+
configASSERT( pxTaskDefinition->puxStackBuffer );
if( pxTaskDefinition->puxStackBuffer != NULL )
@@ -1379,6 +1389,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
}
}
+ traceRETURN_xTaskCreateRestricted( xReturn );
+
return xReturn;
}
@@ -1410,6 +1422,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
TCB_t * pxNewTCB;
BaseType_t xReturn;
+ traceENTER_xTaskCreate( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask );
+
/* If the stack grows down then allocate the stack then the TCB so the stack
* does not grow into the TCB. Likewise if the stack grows up then allocate
* the TCB then the stack. */
@@ -1497,6 +1511,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
}
+ traceRETURN_xTaskCreate( xReturn );
+
return xReturn;
}
@@ -1904,6 +1920,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
TCB_t * pxTCB;
+ traceENTER_vTaskDelete( xTaskToDelete );
+
taskENTER_CRITICAL();
{
/* If null is passed in here then it is the calling task that is
@@ -2034,6 +2052,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
taskEXIT_CRITICAL();
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
+
+ traceRETURN_vTaskDelete();
}
#endif /* INCLUDE_vTaskDelete */
@@ -2047,6 +2067,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
TickType_t xTimeToWake;
BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
+ traceENTER_xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement );
+
configASSERT( pxPreviousWakeTime );
configASSERT( ( xTimeIncrement > 0U ) );
@@ -2125,6 +2147,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
mtCOVERAGE_TEST_MARKER();
}
+ traceRETURN_xTaskDelayUntil( xShouldDelay );
+
return xShouldDelay;
}
@@ -2137,6 +2161,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
BaseType_t xAlreadyYielded = pdFALSE;
+ traceENTER_vTaskDelay( xTicksToDelay );
+
/* A delay time of zero just forces a reschedule. */
if( xTicksToDelay > ( TickType_t ) 0U )
{
@@ -2176,6 +2202,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
mtCOVERAGE_TEST_MARKER();
}
+
+ traceRETURN_vTaskDelay();
}
#endif /* INCLUDE_vTaskDelay */
@@ -2192,6 +2220,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
List_t const * pxOverflowedDelayedList;
const TCB_t * const pxTCB = xTask;
+ traceENTER_eTaskGetState( xTask );
+
configASSERT( pxTCB );
#if ( configNUMBER_OF_CORES == 1 )
@@ -2303,6 +2333,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
}
+ traceRETURN_eTaskGetState( eReturn );
+
return eReturn;
} /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
@@ -2316,6 +2348,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
TCB_t const * pxTCB;
UBaseType_t uxReturn;
+ traceENTER_uxTaskPriorityGet( xTask );
+
taskENTER_CRITICAL();
{
/* If null is passed in here then it is the priority of the task
@@ -2325,6 +2359,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
taskEXIT_CRITICAL();
+ traceRETURN_uxTaskPriorityGet( uxReturn );
+
return uxReturn;
}
@@ -2339,6 +2375,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
UBaseType_t uxReturn;
UBaseType_t uxSavedInterruptStatus;
+ traceENTER_uxTaskPriorityGetFromISR( xTask );
+
/* RTOS ports that support interrupt nesting have the concept of a
* maximum system call (or maximum API call) interrupt priority.
* Interrupts that are above the maximum system call priority are keep
@@ -2366,6 +2404,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
+ traceRETURN_uxTaskPriorityGetFromISR( uxReturn );
+
return uxReturn;
}
@@ -2381,6 +2421,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
BaseType_t xYieldRequired = pdFALSE;
+ traceENTER_vTaskPrioritySet( xTask, uxNewPriority );
+
#if ( configNUMBER_OF_CORES > 1 )
BaseType_t xYieldForTask = pdFALSE;
#endif
@@ -2577,6 +2619,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
}
taskEXIT_CRITICAL();
+
+ traceRETURN_vTaskPrioritySet();
}
#endif /* INCLUDE_vTaskPrioritySet */
@@ -2594,6 +2638,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
UBaseType_t uxPrevNotAllowedCores;
#endif
+ traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask );
+
taskENTER_CRITICAL();
{
pxTCB = prvGetTCBFromHandle( xTask );
@@ -2639,6 +2685,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
}
taskEXIT_CRITICAL();
+
+ traceRETURN_vTaskCoreAffinitySet();
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
/*-----------------------------------------------------------*/
@@ -2649,6 +2697,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
const TCB_t * pxTCB;
UBaseType_t uxCoreAffinityMask;
+ traceENTER_vTaskCoreAffinityGet( xTask );
+
taskENTER_CRITICAL();
{
pxTCB = prvGetTCBFromHandle( xTask );
@@ -2656,6 +2706,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
taskEXIT_CRITICAL();
+ traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask );
+
return uxCoreAffinityMask;
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
@@ -2668,6 +2720,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
TCB_t * pxTCB;
+ traceENTER_vTaskPreemptionDisable( xTask );
+
taskENTER_CRITICAL();
{
pxTCB = prvGetTCBFromHandle( xTask );
@@ -2675,6 +2729,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
pxTCB->xPreemptionDisable = pdTRUE;
}
taskEXIT_CRITICAL();
+
+ traceRETURN_vTaskPreemptionDisable();
}
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
@@ -2687,6 +2743,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
TCB_t * pxTCB;
BaseType_t xCoreID;
+ traceENTER_vTaskPreemptionEnable( xTask );
+
taskENTER_CRITICAL();
{
pxTCB = prvGetTCBFromHandle( xTask );
@@ -2703,6 +2761,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
}
taskEXIT_CRITICAL();
+
+ traceRETURN_vTaskPreemptionEnable();
}
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
@@ -2718,6 +2778,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
BaseType_t xTaskRunningOnCore;
#endif
+ traceENTER_vTaskSuspend( xTaskToSuspend );
+
taskENTER_CRITICAL();
{
/* If null is passed in here then it is the running task that is
@@ -2867,6 +2929,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
taskEXIT_CRITICAL();
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
+
+ traceRETURN_vTaskSuspend();
}
#endif /* INCLUDE_vTaskSuspend */
@@ -2924,6 +2988,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
TCB_t * const pxTCB = xTaskToResume;
+ traceENTER_vTaskResume( xTaskToResume );
+
/* It does not make sense to resume the calling task. */
configASSERT( xTaskToResume );
@@ -2969,6 +3035,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
mtCOVERAGE_TEST_MARKER();
}
+
+ traceRETURN_vTaskResume();
}
#endif /* INCLUDE_vTaskSuspend */
@@ -2983,6 +3051,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
TCB_t * const pxTCB = xTaskToResume;
UBaseType_t uxSavedInterruptStatus;
+ traceENTER_xTaskResumeFromISR( xTaskToResume );
+
configASSERT( xTaskToResume );
/* RTOS ports that support interrupt nesting have the concept of a
@@ -3061,6 +3131,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
+ traceRETURN_xTaskResumeFromISR( xYieldRequired );
+
return xYieldRequired;
}
@@ -3244,6 +3316,8 @@ void vTaskStartScheduler( void )
{
BaseType_t xReturn;
+ traceENTER_vTaskStartScheduler();
+
#if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 )
{
/* Sanity check that the UBaseType_t must have greater than or equal to
@@ -3333,22 +3407,30 @@ void vTaskStartScheduler( void )
/* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority
* from getting optimized out as it is no longer used by the kernel. */
( void ) uxTopUsedPriority;
+
+ traceRETURN_vTaskStartScheduler();
}
/*-----------------------------------------------------------*/
void vTaskEndScheduler( void )
{
+ traceENTER_vTaskEndScheduler();
+
/* Stop the scheduler interrupts and call the portable scheduler end
* routine so the original ISRs can be restored if necessary. The port
* layer must ensure interrupts enable bit is left in the correct state. */
portDISABLE_INTERRUPTS();
xSchedulerRunning = pdFALSE;
vPortEndScheduler();
+
+ traceRETURN_vTaskEndScheduler();
}
/*----------------------------------------------------------*/
void vTaskSuspendAll( void )
{
+ traceENTER_vTaskSuspendAll();
+
#if ( configNUMBER_OF_CORES == 1 )
{
/* A critical section is not required as the variable is of type
@@ -3424,6 +3506,8 @@ void vTaskSuspendAll( void )
}
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
+
+ traceRETURN_vTaskSuspendAll();
}
/*----------------------------------------------------------*/
@@ -3496,6 +3580,8 @@ BaseType_t xTaskResumeAll( void )
TCB_t * pxTCB = NULL;
BaseType_t xAlreadyYielded = pdFALSE;
+ traceENTER_xTaskResumeAll();
+
#if ( configNUMBER_OF_CORES > 1 )
if( xSchedulerRunning != pdFALSE )
#endif
@@ -3630,6 +3716,8 @@ BaseType_t xTaskResumeAll( void )
taskEXIT_CRITICAL();
}
+ traceRETURN_xTaskResumeAll( xAlreadyYielded );
+
return xAlreadyYielded;
}
/*-----------------------------------------------------------*/
@@ -3638,6 +3726,8 @@ TickType_t xTaskGetTickCount( void )
{
TickType_t xTicks;
+ traceENTER_xTaskGetTickCount();
+
/* Critical section required if running on a 16 bit processor. */
portTICK_TYPE_ENTER_CRITICAL();
{
@@ -3645,6 +3735,8 @@ TickType_t xTaskGetTickCount( void )
}
portTICK_TYPE_EXIT_CRITICAL();
+ traceRETURN_xTaskGetTickCount( xTicks );
+
return xTicks;
}
/*-----------------------------------------------------------*/
@@ -3654,6 +3746,8 @@ TickType_t xTaskGetTickCountFromISR( void )
TickType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
+ traceENTER_xTaskGetTickCountFromISR();
+
/* RTOS ports that support interrupt nesting have the concept of a maximum
* system call (or maximum API call) interrupt priority. Interrupts that are
* above the maximum system call priority are kept permanently enabled, even
@@ -3676,14 +3770,20 @@ TickType_t xTaskGetTickCountFromISR( void )
}
portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
+ traceRETURN_xTaskGetTickCountFromISR( xReturn );
+
return xReturn;
}
/*-----------------------------------------------------------*/
UBaseType_t uxTaskGetNumberOfTasks( void )
{
+ traceENTER_uxTaskGetNumberOfTasks();
+
/* A critical section is not required because the variables are of type
* BaseType_t. */
+ traceRETURN_uxTaskGetNumberOfTasks( uxCurrentNumberOfTasks );
+
return uxCurrentNumberOfTasks;
}
/*-----------------------------------------------------------*/
@@ -3692,10 +3792,15 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
{
TCB_t * pxTCB;
+ traceENTER_pcTaskGetName( xTaskToQuery );
+
/* If null is passed in here then the name of the calling task is being
* queried. */
pxTCB = prvGetTCBFromHandle( xTaskToQuery );
configASSERT( pxTCB );
+
+ traceRETURN_pcTaskGetName( &( pxTCB->pcTaskName[ 0 ] ) );
+
return &( pxTCB->pcTaskName[ 0 ] );
}
/*-----------------------------------------------------------*/
@@ -3844,6 +3949,8 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
UBaseType_t uxQueue = configMAX_PRIORITIES;
TCB_t * pxTCB;
+ traceENTER_xTaskGetHandle( pcNameToQuery );
+
/* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
@@ -3895,6 +4002,8 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
}
( void ) xTaskResumeAll();
+ traceRETURN_xTaskGetHandle( pxTCB );
+
return pxTCB;
}
@@ -3910,6 +4019,8 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
BaseType_t xReturn;
TCB_t * pxTCB;
+ traceENTER_xTaskGetStaticBuffers( xTask, ppuxStackBuffer, ppxTaskBuffer );
+
configASSERT( ppuxStackBuffer != NULL );
configASSERT( ppxTaskBuffer != NULL );
@@ -3942,6 +4053,8 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
}
#endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
+ traceRETURN_xTaskGetStaticBuffers( xReturn );
+
return xReturn;
}
@@ -3956,6 +4069,8 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
{
UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
+ traceENTER_uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime );
+
vTaskSuspendAll();
{
/* Is there a space in the array for each task in the system? */
@@ -4017,6 +4132,8 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
}
( void ) xTaskResumeAll();
+ traceRETURN_uxTaskGetSystemState( uxTask );
+
return uxTask;
}
@@ -4029,9 +4146,14 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
* Consider to add another function to return the idle task handles. */
TaskHandle_t xTaskGetIdleTaskHandle( void )
{
+ traceENTER_xTaskGetIdleTaskHandle();
+
/* If xTaskGetIdleTaskHandle() is called before the scheduler has been
* started, then xIdleTaskHandles will be NULL. */
configASSERT( ( xIdleTaskHandles[ 0 ] != NULL ) );
+
+ traceRETURN_xTaskGetIdleTaskHandle( xIdleTaskHandles[ 0 ] );
+
return xIdleTaskHandles[ 0 ];
}
@@ -4046,6 +4168,8 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
void vTaskStepTick( TickType_t xTicksToJump )
{
+ traceENTER_vTaskStepTick( xTicksToJump );
+
/* Correct the tick count value after a period during which the tick
* was suppressed. Note this does *not* call the tick hook function for
* each stepped tick. */
@@ -4073,7 +4197,9 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
}
xTickCount += xTicksToJump;
+
traceINCREASE_TICK_COUNT( xTicksToJump );
+ traceRETURN_vTaskStepTick();
}
#endif /* configUSE_TICKLESS_IDLE */
@@ -4083,6 +4209,8 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
{
BaseType_t xYieldOccurred;
+ traceENTER_xTaskCatchUpTicks( xTicksToCatchUp );
+
/* Must not be called with the scheduler suspended as the implementation
* relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
@@ -4099,6 +4227,8 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
taskEXIT_CRITICAL();
xYieldOccurred = xTaskResumeAll();
+ traceRETURN_xTaskCatchUpTicks( xYieldOccurred );
+
return xYieldOccurred;
}
/*----------------------------------------------------------*/
@@ -4110,6 +4240,8 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
TCB_t * pxTCB = xTask;
BaseType_t xReturn;
+ traceENTER_xTaskAbortDelay( xTask );
+
configASSERT( pxTCB );
vTaskSuspendAll();
@@ -4189,6 +4321,8 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
}
( void ) xTaskResumeAll();
+ traceRETURN_xTaskAbortDelay( xReturn );
+
return xReturn;
}
@@ -4201,6 +4335,8 @@ BaseType_t xTaskIncrementTick( void )
TickType_t xItemValue;
BaseType_t xSwitchRequired = pdFALSE;
+ traceENTER_xTaskIncrementTick();
+
#if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 )
BaseType_t xYieldRequiredForCore[ configNUMBER_OF_CORES ] = { pdFALSE };
#endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */
@@ -4438,6 +4574,8 @@ BaseType_t xTaskIncrementTick( void )
#endif
}
+ traceRETURN_xTaskIncrementTick( xSwitchRequired );
+
return xSwitchRequired;
}
/*-----------------------------------------------------------*/
@@ -4449,6 +4587,8 @@ BaseType_t xTaskIncrementTick( void )
{
TCB_t * xTCB;
+ traceENTER_vTaskSetApplicationTaskTag( xTask, pxHookFunction );
+
/* If xTask is NULL then it is the task hook of the calling task that is
* getting set. */
if( xTask == NULL )
@@ -4467,6 +4607,8 @@ BaseType_t xTaskIncrementTick( void )
xTCB->pxTaskTag = pxHookFunction;
}
taskEXIT_CRITICAL();
+
+ traceRETURN_vTaskSetApplicationTaskTag();
}
#endif /* configUSE_APPLICATION_TASK_TAG */
@@ -4479,6 +4621,8 @@ BaseType_t xTaskIncrementTick( void )
TCB_t * pxTCB;
TaskHookFunction_t xReturn;
+ traceENTER_xTaskGetApplicationTaskTag( xTask );
+
/* If xTask is NULL then set the calling task's hook. */
pxTCB = prvGetTCBFromHandle( xTask );
@@ -4490,6 +4634,8 @@ BaseType_t xTaskIncrementTick( void )
}
taskEXIT_CRITICAL();
+ traceRETURN_xTaskGetApplicationTaskTag( xReturn );
+
return xReturn;
}
@@ -4504,6 +4650,8 @@ BaseType_t xTaskIncrementTick( void )
TaskHookFunction_t xReturn;
UBaseType_t uxSavedInterruptStatus;
+ traceENTER_xTaskGetApplicationTaskTagFromISR( xTask );
+
/* If xTask is NULL then set the calling task's hook. */
pxTCB = prvGetTCBFromHandle( xTask );
@@ -4515,6 +4663,8 @@ BaseType_t xTaskIncrementTick( void )
}
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
+ traceRETURN_xTaskGetApplicationTaskTagFromISR( xReturn );
+
return xReturn;
}
@@ -4529,6 +4679,8 @@ BaseType_t xTaskIncrementTick( void )
TCB_t * xTCB;
BaseType_t xReturn;
+ traceENTER_xTaskCallApplicationTaskHook( xTask, pvParameter );
+
/* If xTask is NULL then we are calling our own task hook. */
if( xTask == NULL )
{
@@ -4548,6 +4700,8 @@ BaseType_t xTaskIncrementTick( void )
xReturn = pdFAIL;
}
+ traceRETURN_xTaskCallApplicationTaskHook( xReturn );
+
return xReturn;
}
@@ -4557,6 +4711,8 @@ BaseType_t xTaskIncrementTick( void )
#if ( configNUMBER_OF_CORES == 1 )
void vTaskSwitchContext( void )
{
+ traceENTER_vTaskSwitchContext();
+
if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
{
/* The scheduler is currently suspended - do not allow a context
@@ -4626,10 +4782,14 @@ BaseType_t xTaskIncrementTick( void )
}
#endif
}
+
+ traceRETURN_vTaskSwitchContext();
}
#else /* if ( configNUMBER_OF_CORES == 1 ) */
void vTaskSwitchContext( BaseType_t xCoreID )
{
+ traceENTER_vTaskSwitchContext();
+
/* Acquire both locks:
* - The ISR lock protects the ready list from simultaneous access by
* both other ISRs and tasks.
@@ -4717,6 +4877,8 @@ BaseType_t xTaskIncrementTick( void )
}
portRELEASE_ISR_LOCK();
portRELEASE_TASK_LOCK();
+
+ traceRETURN_vTaskSwitchContext();
}
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
/*-----------------------------------------------------------*/
@@ -4724,6 +4886,8 @@ BaseType_t xTaskIncrementTick( void )
void vTaskPlaceOnEventList( List_t * const pxEventList,
const TickType_t xTicksToWait )
{
+ traceENTER_vTaskPlaceOnEventList( pxEventList, xTicksToWait );
+
configASSERT( pxEventList );
/* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
@@ -4743,6 +4907,8 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
+
+ traceRETURN_vTaskPlaceOnEventList();
}
/*-----------------------------------------------------------*/
@@ -4750,6 +4916,8 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
const TickType_t xItemValue,
const TickType_t xTicksToWait )
{
+ traceENTER_vTaskPlaceOnUnorderedEventList( pxEventList, xItemValue, xTicksToWait );
+
configASSERT( pxEventList );
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
@@ -4769,6 +4937,8 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
+
+ traceRETURN_vTaskPlaceOnUnorderedEventList();
}
/*-----------------------------------------------------------*/
@@ -4778,6 +4948,8 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
TickType_t xTicksToWait,
const BaseType_t xWaitIndefinitely )
{
+ traceENTER_vTaskPlaceOnEventListRestricted( pxEventList, xTicksToWait, xWaitIndefinitely );
+
configASSERT( pxEventList );
/* This function should not be called by application code hence the
@@ -4802,6 +4974,8 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
+
+ traceRETURN_vTaskPlaceOnEventListRestricted();
}
#endif /* configUSE_TIMERS */
@@ -4812,6 +4986,8 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
TCB_t * pxUnblockedTCB;
BaseType_t xReturn;
+ traceENTER_xTaskRemoveFromEventList( pxEventList );
+
/* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
* called from a critical section within an ISR. */
@@ -4890,6 +5066,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
+ traceRETURN_xTaskRemoveFromEventList( xReturn );
return xReturn;
}
/*-----------------------------------------------------------*/
@@ -4899,6 +5076,8 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
{
TCB_t * pxUnblockedTCB;
+ traceENTER_vTaskRemoveFromUnorderedEventList( pxEventListItem, xItemValue );
+
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
* the event flags implementation. */
configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
@@ -4956,11 +5135,15 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
#endif
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
+
+ traceRETURN_vTaskRemoveFromUnorderedEventList();
}
/*-----------------------------------------------------------*/
void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
{
+ traceENTER_vTaskSetTimeOutState( pxTimeOut );
+
configASSERT( pxTimeOut );
taskENTER_CRITICAL();
{
@@ -4968,14 +5151,20 @@ void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
pxTimeOut->xTimeOnEntering = xTickCount;
}
taskEXIT_CRITICAL();
+
+ traceRETURN_vTaskSetTimeOutState();
}
/*-----------------------------------------------------------*/
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
{
+ traceENTER_vTaskInternalSetTimeOutState( pxTimeOut );
+
/* For internal use only as it does not use a critical section. */
pxTimeOut->xOverflowCount = xNumOfOverflows;
pxTimeOut->xTimeOnEntering = xTickCount;
+
+ traceRETURN_vTaskInternalSetTimeOutState();
}
/*-----------------------------------------------------------*/
@@ -4984,6 +5173,8 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
{
BaseType_t xReturn;
+ traceENTER_xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait );
+
configASSERT( pxTimeOut );
configASSERT( pxTicksToWait );
@@ -5040,14 +5231,20 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
}
taskEXIT_CRITICAL();
+ traceRETURN_xTaskCheckForTimeOut( xReturn );
+
return xReturn;
}
/*-----------------------------------------------------------*/
void vTaskMissedYield( void )
{
+ traceENTER_vTaskMissedYield();
+
/* Must be called from within a critical section. */
xYieldPendings[ portGET_CORE_ID() ] = pdTRUE;
+
+ traceRETURN_vTaskMissedYield();
}
/*-----------------------------------------------------------*/
@@ -5058,6 +5255,8 @@ void vTaskMissedYield( void )
UBaseType_t uxReturn;
TCB_t const * pxTCB;
+ traceENTER_uxTaskGetTaskNumber( xTask );
+
if( xTask != NULL )
{
pxTCB = xTask;
@@ -5068,6 +5267,8 @@ void vTaskMissedYield( void )
uxReturn = 0U;
}
+ traceRETURN_uxTaskGetTaskNumber( uxReturn );
+
return uxReturn;
}
@@ -5081,11 +5282,15 @@ void vTaskMissedYield( void )
{
TCB_t * pxTCB;
+ traceENTER_vTaskSetTaskNumber( xTask, uxHandle );
+
if( xTask != NULL )
{
pxTCB = xTask;
pxTCB->uxTaskNumber = uxHandle;
}
+
+ traceRETURN_vTaskSetTaskNumber();
}
#endif /* configUSE_TRACE_FACILITY */
@@ -5320,6 +5525,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
const UBaseType_t uxNonApplicationTasks = 1;
#endif /* INCLUDE_vTaskSuspend */
+ traceENTER_eTaskConfirmSleepModeStatus();
+
eSleepModeStatus eReturn = eStandardSleep;
/* This function must be called from a critical section. */
@@ -5356,6 +5563,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
mtCOVERAGE_TEST_MARKER();
}
+ traceRETURN_eTaskConfirmSleepModeStatus( eReturn );
+
return eReturn;
}
@@ -5370,6 +5579,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
{
TCB_t * pxTCB;
+ traceENTER_vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue );
+
if( ( xIndex >= 0 ) &&
( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
{
@@ -5377,6 +5588,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
configASSERT( pxTCB != NULL );
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
}
+
+ traceRETURN_vTaskSetThreadLocalStoragePointer();
}
#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
@@ -5390,6 +5603,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
void * pvReturn = NULL;
TCB_t * pxTCB;
+ traceENTER_pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex );
+
if( ( xIndex >= 0 ) &&
( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
{
@@ -5401,6 +5616,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
pvReturn = NULL;
}
+ traceRETURN_pvTaskGetThreadLocalStoragePointer( pvReturn );
+
return pvReturn;
}
@@ -5414,11 +5631,15 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
{
TCB_t * pxTCB;
+ traceENTER_vTaskAllocateMPURegions( xTaskToModify, pxRegions );
+
/* If null is passed in here then we are modifying the MPU settings of
* the calling task. */
pxTCB = prvGetTCBFromHandle( xTaskToModify );
vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), pxRegions, NULL, 0 );
+
+ traceRETURN_vTaskAllocateMPURegions();
}
#endif /* portUSING_MPU_WRAPPERS */
@@ -5535,6 +5756,8 @@ static void prvCheckTasksWaitingTermination( void )
{
TCB_t * pxTCB;
+ traceENTER_vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState );
+
/* xTask is NULL then get the state of the calling task. */
pxTCB = prvGetTCBFromHandle( xTask );
@@ -5642,6 +5865,8 @@ static void prvCheckTasksWaitingTermination( void )
{
pxTaskStatus->usStackHighWaterMark = 0;
}
+
+ traceRETURN_vTaskGetInfo();
}
#endif /* configUSE_TRACE_FACILITY */
@@ -5716,6 +5941,8 @@ static void prvCheckTasksWaitingTermination( void )
uint8_t * pucEndOfStack;
configSTACK_DEPTH_TYPE uxReturn;
+ traceENTER_uxTaskGetStackHighWaterMark2( xTask );
+
/* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
* the same except for their return type. Using configSTACK_DEPTH_TYPE
* allows the user to determine the return type. It gets around the
@@ -5737,6 +5964,8 @@ static void prvCheckTasksWaitingTermination( void )
uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
+ traceRETURN_uxTaskGetStackHighWaterMark2( uxReturn );
+
return uxReturn;
}
@@ -5751,6 +5980,8 @@ static void prvCheckTasksWaitingTermination( void )
uint8_t * pucEndOfStack;
UBaseType_t uxReturn;
+ traceENTER_uxTaskGetStackHighWaterMark( xTask );
+
pxTCB = prvGetTCBFromHandle( xTask );
#if portSTACK_GROWTH < 0
@@ -5765,6 +5996,8 @@ static void prvCheckTasksWaitingTermination( void )
uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
+ traceRETURN_uxTaskGetStackHighWaterMark( uxReturn );
+
return uxReturn;
}
@@ -5854,11 +6087,15 @@ static void prvResetNextTaskUnblockTime( void )
{
TaskHandle_t xReturn;
+ traceENTER_xTaskGetCurrentTaskHandle();
+
/* A critical section is not required as this is not called from
* an interrupt and the current TCB will always be the same for any
* individual execution thread. */
xReturn = pxCurrentTCB;
+ traceRETURN_xTaskGetCurrentTaskHandle( xReturn );
+
return xReturn;
}
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
@@ -5867,12 +6104,16 @@ static void prvResetNextTaskUnblockTime( void )
TaskHandle_t xReturn;
UBaseType_t uxSavedInterruptStatus;
+ traceENTER_xTaskGetCurrentTaskHandle();
+
uxSavedInterruptStatus = portSET_INTERRUPT_MASK();
{
xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
}
portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus );
+ traceRETURN_xTaskGetCurrentTaskHandle( xReturn );
+
return xReturn;
}
@@ -5880,11 +6121,15 @@ static void prvResetNextTaskUnblockTime( void )
{
TaskHandle_t xReturn = NULL;
+ traceENTER_xTaskGetCurrentTaskHandleCPU( xCoreID );
+
if( taskVALID_CORE_ID( xCoreID ) != pdFALSE )
{
xReturn = pxCurrentTCBs[ xCoreID ];
}
+ traceRETURN_xTaskGetCurrentTaskHandleCPU( xReturn );
+
return xReturn;
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
@@ -5898,6 +6143,8 @@ static void prvResetNextTaskUnblockTime( void )
{
BaseType_t xReturn;
+ traceENTER_xTaskGetSchedulerState();
+
if( xSchedulerRunning == pdFALSE )
{
xReturn = taskSCHEDULER_NOT_STARTED;
@@ -5922,6 +6169,8 @@ static void prvResetNextTaskUnblockTime( void )
#endif
}
+ traceRETURN_xTaskGetSchedulerState( xReturn );
+
return xReturn;
}
@@ -5935,6 +6184,8 @@ static void prvResetNextTaskUnblockTime( void )
TCB_t * const pxMutexHolderTCB = pxMutexHolder;
BaseType_t xReturn = pdFALSE;
+ traceENTER_xTaskPriorityInherit( pxMutexHolder );
+
/* If the mutex was given back by an interrupt while the queue was
* locked then the mutex holder might now be NULL. _RB_ Is this still
* needed as interrupts can no longer use mutexes? */
@@ -6022,6 +6273,8 @@ static void prvResetNextTaskUnblockTime( void )
mtCOVERAGE_TEST_MARKER();
}
+ traceRETURN_xTaskPriorityInherit( xReturn );
+
return xReturn;
}
@@ -6035,6 +6288,8 @@ static void prvResetNextTaskUnblockTime( void )
TCB_t * const pxTCB = pxMutexHolder;
BaseType_t xReturn = pdFALSE;
+ traceENTER_xTaskPriorityDisinherit( pxMutexHolder );
+
if( pxMutexHolder != NULL )
{
/* A task can only have an inherited priority if it holds the mutex.
@@ -6112,6 +6367,8 @@ static void prvResetNextTaskUnblockTime( void )
mtCOVERAGE_TEST_MARKER();
}
+ traceRETURN_xTaskPriorityDisinherit( xReturn );
+
return xReturn;
}
@@ -6127,6 +6384,8 @@ static void prvResetNextTaskUnblockTime( void )
UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
+ traceENTER_vTaskPriorityDisinheritAfterTimeout( pxMutexHolder, uxHighestPriorityWaitingTask );
+
if( pxMutexHolder != NULL )
{
/* If pxMutexHolder is not NULL then the holder must hold at least
@@ -6229,6 +6488,8 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
+
+ traceRETURN_vTaskPriorityDisinheritAfterTimeout();
}
#endif /* configUSE_MUTEXES */
@@ -6242,6 +6503,8 @@ static void prvResetNextTaskUnblockTime( void )
*/
void vTaskYieldWithinAPI( void )
{
+ traceENTER_vTaskYieldWithinAPI();
+
if( portGET_CRITICAL_NESTING_COUNT() == 0U )
{
portYIELD();
@@ -6250,6 +6513,8 @@ static void prvResetNextTaskUnblockTime( void )
{
xYieldPendings[ portGET_CORE_ID() ] = pdTRUE;
}
+
+ traceRETURN_vTaskYieldWithinAPI();
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
@@ -6259,6 +6524,8 @@ static void prvResetNextTaskUnblockTime( void )
void vTaskEnterCritical( void )
{
+ traceENTER_vTaskEnterCritical();
+
portDISABLE_INTERRUPTS();
if( xSchedulerRunning != pdFALSE )
@@ -6280,6 +6547,8 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
+
+ traceRETURN_vTaskEnterCritical();
}
#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
@@ -6289,6 +6558,8 @@ static void prvResetNextTaskUnblockTime( void )
void vTaskEnterCritical( void )
{
+ traceENTER_vTaskEnterCritical();
+
portDISABLE_INTERRUPTS();
if( xSchedulerRunning != pdFALSE )
@@ -6325,6 +6596,8 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
+
+ traceRETURN_vTaskEnterCritical();
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
@@ -6337,6 +6610,8 @@ static void prvResetNextTaskUnblockTime( void )
{
UBaseType_t uxSavedInterruptStatus = 0;
+ traceENTER_vTaskEnterCriticalFromISR();
+
if( xSchedulerRunning != pdFALSE )
{
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
@@ -6353,6 +6628,8 @@ static void prvResetNextTaskUnblockTime( void )
mtCOVERAGE_TEST_MARKER();
}
+ traceRETURN_vTaskEnterCriticalFromISR( uxSavedInterruptStatus );
+
return uxSavedInterruptStatus;
}
@@ -6363,6 +6640,8 @@ static void prvResetNextTaskUnblockTime( void )
void vTaskExitCritical( void )
{
+ traceENTER_vTaskExitCritical();
+
if( xSchedulerRunning != pdFALSE )
{
/* If pxCurrentTCB->uxCriticalNesting is zero then this function
@@ -6395,6 +6674,8 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
+
+ traceRETURN_vTaskExitCritical();
}
#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
@@ -6404,6 +6685,8 @@ static void prvResetNextTaskUnblockTime( void )
void vTaskExitCritical( void )
{
+ traceENTER_vTaskExitCritical();
+
if( xSchedulerRunning != pdFALSE )
{
/* If critical nesting count is zero then this function
@@ -6452,6 +6735,8 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
+
+ traceRETURN_vTaskExitCritical();
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
@@ -6461,6 +6746,8 @@ static void prvResetNextTaskUnblockTime( void )
void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus )
{
+ traceENTER_vTaskExitCriticalFromISR( uxSavedInterruptStatus );
+
if( xSchedulerRunning != pdFALSE )
{
/* If critical nesting count is zero then this function
@@ -6490,6 +6777,8 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
+
+ traceRETURN_vTaskExitCriticalFromISR();
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
@@ -6530,6 +6819,8 @@ static void prvResetNextTaskUnblockTime( void )
UBaseType_t uxArraySize, x;
char cStatus;
+ traceENTER_vTaskList( pcWriteBuffer );
+
/*
* PLEASE NOTE:
*
@@ -6623,6 +6914,8 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
+
+ traceRETURN_vTaskList();
}
#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
@@ -6636,6 +6929,8 @@ static void prvResetNextTaskUnblockTime( void )
UBaseType_t uxArraySize, x;
configRUN_TIME_COUNTER_TYPE ulTotalTime, ulStatsAsPercentage;
+ traceENTER_vTaskGetRunTimeStats( pcWriteBuffer );
+
/*
* PLEASE NOTE:
*
@@ -6744,6 +7039,8 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
+
+ traceRETURN_vTaskGetRunTimeStats();
}
#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
@@ -6753,12 +7050,16 @@ TickType_t uxTaskResetEventItemValue( void )
{
TickType_t uxReturn;
+ traceENTER_uxTaskResetEventItemValue();
+
uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
/* Reset the event list item to its normal value - so it can be used with
* queues and semaphores. */
listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
+ traceRETURN_uxTaskResetEventItemValue( uxReturn );
+
return uxReturn;
}
/*-----------------------------------------------------------*/
@@ -6769,6 +7070,8 @@ TickType_t uxTaskResetEventItemValue( void )
{
TCB_t * pxTCB;
+ traceENTER_pvTaskIncrementMutexHeldCount();
+
pxTCB = pxCurrentTCB;
/* If xSemaphoreCreateMutex() is called before any tasks have been created
@@ -6778,6 +7081,8 @@ TickType_t uxTaskResetEventItemValue( void )
( pxTCB->uxMutexesHeld )++;
}
+ traceRETURN_pvTaskIncrementMutexHeldCount( pxTCB );
+
return pxTCB;
}
@@ -6792,6 +7097,8 @@ TickType_t uxTaskResetEventItemValue( void )
{
uint32_t ulReturn;
+ traceENTER_ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait );
+
configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
taskENTER_CRITICAL();
@@ -6858,6 +7165,8 @@ TickType_t uxTaskResetEventItemValue( void )
}
taskEXIT_CRITICAL();
+ traceRETURN_ulTaskGenericNotifyTake( ulReturn );
+
return ulReturn;
}
@@ -6874,6 +7183,8 @@ TickType_t uxTaskResetEventItemValue( void )
{
BaseType_t xReturn;
+ traceENTER_xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait );
+
configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
taskENTER_CRITICAL();
@@ -6952,6 +7263,8 @@ TickType_t uxTaskResetEventItemValue( void )
}
taskEXIT_CRITICAL();
+ traceRETURN_xTaskGenericNotifyWait( xReturn );
+
return xReturn;
}
@@ -6970,6 +7283,8 @@ TickType_t uxTaskResetEventItemValue( void )
BaseType_t xReturn = pdPASS;
uint8_t ucOriginalNotifyState;
+ traceENTER_xTaskGenericNotify( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue );
+
configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
configASSERT( xTaskToNotify );
pxTCB = xTaskToNotify;
@@ -7068,6 +7383,8 @@ TickType_t uxTaskResetEventItemValue( void )
}
taskEXIT_CRITICAL();
+ traceRETURN_xTaskGenericNotify( xReturn );
+
return xReturn;
}
@@ -7088,6 +7405,8 @@ TickType_t uxTaskResetEventItemValue( void )
BaseType_t xReturn = pdPASS;
UBaseType_t uxSavedInterruptStatus;
+ traceENTER_xTaskGenericNotifyFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken );
+
configASSERT( xTaskToNotify );
configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
@@ -7227,6 +7546,8 @@ TickType_t uxTaskResetEventItemValue( void )
}
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
+ traceRETURN_xTaskGenericNotifyFromISR( xReturn );
+
return xReturn;
}
@@ -7243,6 +7564,8 @@ TickType_t uxTaskResetEventItemValue( void )
uint8_t ucOriginalNotifyState;
UBaseType_t uxSavedInterruptStatus;
+ traceENTER_vTaskGenericNotifyGiveFromISR( xTaskToNotify, uxIndexToNotify, pxHigherPriorityTaskWoken );
+
configASSERT( xTaskToNotify );
configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
@@ -7337,6 +7660,8 @@ TickType_t uxTaskResetEventItemValue( void )
}
}
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
+
+ traceRETURN_vTaskGenericNotifyGiveFromISR();
}
#endif /* configUSE_TASK_NOTIFICATIONS */
@@ -7350,6 +7675,8 @@ TickType_t uxTaskResetEventItemValue( void )
TCB_t * pxTCB;
BaseType_t xReturn;
+ traceENTER_xTaskGenericNotifyStateClear( xTask, uxIndexToClear );
+
configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
/* If null is passed in here then it is the calling task that is having
@@ -7370,6 +7697,8 @@ TickType_t uxTaskResetEventItemValue( void )
}
taskEXIT_CRITICAL();
+ traceRETURN_xTaskGenericNotifyStateClear( xReturn );
+
return xReturn;
}
@@ -7385,6 +7714,8 @@ TickType_t uxTaskResetEventItemValue( void )
TCB_t * pxTCB;
uint32_t ulReturn;
+ traceENTER_ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear );
+
configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
/* If null is passed in here then it is the calling task that is having
@@ -7400,6 +7731,8 @@ TickType_t uxTaskResetEventItemValue( void )
}
taskEXIT_CRITICAL();
+ traceRETURN_ulTaskGenericNotifyValueClear( ulReturn );
+
return ulReturn;
}
@@ -7412,8 +7745,12 @@ TickType_t uxTaskResetEventItemValue( void )
{
TCB_t * pxTCB;
+ traceENTER_ulTaskGetRunTimeCounter( xTask );
+
pxTCB = prvGetTCBFromHandle( xTask );
+ traceRETURN_ulTaskGetRunTimeCounter( pxTCB->ulRunTimeCounter );
+
return pxTCB->ulRunTimeCounter;
}
@@ -7427,6 +7764,8 @@ TickType_t uxTaskResetEventItemValue( void )
TCB_t * pxTCB;
configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
+ traceENTER_ulTaskGetRunTimePercent( xTask );
+
ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
/* For percentage calculations. */
@@ -7443,6 +7782,8 @@ TickType_t uxTaskResetEventItemValue( void )
ulReturn = 0;
}
+ traceRETURN_ulTaskGetRunTimePercent( ulReturn );
+
return ulReturn;
}
@@ -7456,11 +7797,15 @@ TickType_t uxTaskResetEventItemValue( void )
configRUN_TIME_COUNTER_TYPE ulReturn = 0;
BaseType_t i;
+ traceENTER_ulTaskGetIdleRunTimeCounter();
+
for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ )
{
ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter;
}
+ traceRETURN_ulTaskGetIdleRunTimeCounter( ulReturn );
+
return ulReturn;
}
@@ -7475,6 +7820,8 @@ TickType_t uxTaskResetEventItemValue( void )
configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0;
BaseType_t i;
+ traceENTER_ulTaskGetIdleRunTimePercent();
+
ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES;
/* For percentage calculations. */
@@ -7495,6 +7842,8 @@ TickType_t uxTaskResetEventItemValue( void )
ulReturn = 0;
}
+ traceRETURN_ulTaskGetIdleRunTimePercent( ulReturn );
+
return ulReturn;
}
@@ -7624,8 +7973,12 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
{
TCB_t * pxTCB;
+ traceENTER_xTaskGetMPUSettings( xTask );
+
pxTCB = prvGetTCBFromHandle( xTask );
+ traceRETURN_xTaskGetMPUSettings( &( pxTCB->xMPUSettings ) );
+
return &( pxTCB->xMPUSettings );
}