blob: abfd4db384335a5b4ab5a2b395fe204d1be45dda (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
/**
* @file
*
* @brief CPU Usage Reset
* @ingroup libmisc_cpuuse CPU Usage
*/
/*
* COPYRIGHT (c) 1989-2009
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems/cpuuse.h>
#include <rtems/score/percpu.h>
#include <rtems/score/todimpl.h>
#include <rtems/score/schedulerimpl.h>
#include <rtems/score/watchdogimpl.h>
#include "cpuuseimpl.h"
static void CPU_usage_Per_thread_handler(
Thread_Control *the_thread
)
{
const Scheduler_Control *scheduler;
ISR_lock_Context state_lock_context;
ISR_lock_Context scheduler_lock_context;
_Thread_State_acquire( the_thread, &state_lock_context );
scheduler = _Scheduler_Get( the_thread );
_Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
_Timestamp_Set_to_zero( &the_thread->cpu_time_used );
_Scheduler_Release_critical( scheduler, &scheduler_lock_context );
_Thread_State_release( the_thread, &state_lock_context );
}
/*
* rtems_cpu_usage_reset
*/
void rtems_cpu_usage_reset( void )
{
uint32_t cpu_count;
uint32_t cpu_index;
_TOD_Get_uptime( &CPU_usage_Uptime_at_last_reset );
cpu_count = rtems_get_processor_count();
for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
cpu->cpu_usage_timestamp = CPU_usage_Uptime_at_last_reset;
}
rtems_iterate_over_all_threads(CPU_usage_Per_thread_handler);
}
|