summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/threadgetcputimeused.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-10-12 08:26:39 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-10-25 08:01:02 +0200
commit0221da5f56353c9b238ef51d5a24802ba67b8c56 (patch)
tree4823b8d298eb678c20db4078c2ec8e1e13fa52e5 /cpukit/score/src/threadgetcputimeused.c
parentscore: Optimize default idle task stack allocator (diff)
downloadrtems-0221da5f56353c9b238ef51d5a24802ba67b8c56.tar.bz2
rtems: Fix rate monotonic statistics
The rate monotonic period statistics were affected by rtems_cpu_usage_reset(). The logic to detect and work around a CPU usage reset was broken. The Thread_Contol::cpu_time_used is changed to contain the processor time used throughout the entire lifetime of the thread. The new member Thread_Contol::cpu_time_used_at_last_reset is added to contain the processor time used at the time of the last reset through rtems_cpu_usage_reset(). This decouples the resets of the CPU usage and the rate monotonic period statistics. Update #4528.
Diffstat (limited to 'cpukit/score/src/threadgetcputimeused.c')
-rw-r--r--cpukit/score/src/threadgetcputimeused.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/cpukit/score/src/threadgetcputimeused.c b/cpukit/score/src/threadgetcputimeused.c
index 7406da0bf3..f23f606411 100644
--- a/cpukit/score/src/threadgetcputimeused.c
+++ b/cpukit/score/src/threadgetcputimeused.c
@@ -4,7 +4,7 @@
* @ingroup RTEMSScoreThread
*
* @brief This source file contains the implementation of
- * _Thread_Get_CPU_time_used().
+ * _Thread_Get_CPU_time_used() and _Thread_Get_CPU_time_used_locked().
*/
/*
@@ -37,25 +37,39 @@ static bool _Thread_Is_scheduled( const Thread_Control *the_thread )
#endif
}
-void _Thread_Get_CPU_time_used(
- Thread_Control *the_thread,
- Timestamp_Control *cpu_time_used
+Timestamp_Control _Thread_Get_CPU_time_used_locked(
+ Thread_Control *the_thread
)
{
+ _Assert( _Thread_State_is_owner( the_thread ) );
+ _Assert(
+ _ISR_lock_Is_owner(
+ &_Scheduler_Get_context( _Thread_Scheduler_get_home( the_thread ) )->Lock
+ )
+ );
+
+ if ( _Thread_Is_scheduled( the_thread ) ) {
+ _Thread_Update_CPU_time_used( the_thread, _Thread_Get_CPU( the_thread ) );
+ }
+
+ return the_thread->cpu_time_used;
+}
+
+Timestamp_Control _Thread_Get_CPU_time_used( Thread_Control *the_thread )
+{
const Scheduler_Control *scheduler;
ISR_lock_Context state_lock_context;
ISR_lock_Context scheduler_lock_context;
+ Timestamp_Control cpu_time_used;
_Thread_State_acquire( the_thread, &state_lock_context );
scheduler = _Thread_Scheduler_get_home( the_thread );
_Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
- if ( _Thread_Is_scheduled( the_thread ) ) {
- _Thread_Update_CPU_time_used( the_thread, _Thread_Get_CPU( the_thread ) );
- }
-
- *cpu_time_used = the_thread->cpu_time_used;
+ cpu_time_used = _Thread_Get_CPU_time_used_locked( the_thread );
_Scheduler_Release_critical( scheduler, &scheduler_lock_context );
_Thread_State_release( the_thread, &state_lock_context );
+
+ return cpu_time_used;
}