haiku/headers/private/system/system_profiler_defs.h

170 lines
3.6 KiB
C
Raw Normal View History

/*
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef _SYSTEM_SYSTEM_PROFILER_DEFS_H
#define _SYSTEM_SYSTEM_PROFILER_DEFS_H
#include <image.h>
struct system_profiler_parameters {
// general
area_id buffer_area; // area the events will be written to
uint32 flags; // flags selecting the events to receive
// scheduling
size_t locking_lookup_size; // size of the lookup table used for
// caching the locking primitive infos
// sampling
bigtime_t interval; // interval at which to take samples
uint32 stack_depth; // maximum stack depth to sample
};
// event flags
enum {
B_SYSTEM_PROFILER_TEAM_EVENTS = 0x01,
B_SYSTEM_PROFILER_THREAD_EVENTS = 0x02,
B_SYSTEM_PROFILER_IMAGE_EVENTS = 0x04,
B_SYSTEM_PROFILER_SAMPLING_EVENTS = 0x08,
B_SYSTEM_PROFILER_SCHEDULING_EVENTS = 0x10
};
// events
enum {
// reserved for the user application
B_SYSTEM_PROFILER_USER_EVENT = 0,
// ring buffer wrap-around marker
B_SYSTEM_PROFILER_BUFFER_END,
// team
B_SYSTEM_PROFILER_TEAM_ADDED,
B_SYSTEM_PROFILER_TEAM_REMOVED,
B_SYSTEM_PROFILER_TEAM_EXEC,
// thread
B_SYSTEM_PROFILER_THREAD_ADDED,
B_SYSTEM_PROFILER_THREAD_REMOVED,
// image
B_SYSTEM_PROFILER_IMAGE_ADDED,
B_SYSTEM_PROFILER_IMAGE_REMOVED,
// profiling samples
B_SYSTEM_PROFILER_SAMPLES,
// scheduling
B_SYSTEM_PROFILER_THREAD_SCHEDULED,
B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE,
B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE,
B_SYSTEM_PROFILER_WAIT_OBJECT_INFO
};
struct system_profiler_buffer_header {
size_t start;
size_t size;
};
struct system_profiler_event_header {
uint8 event;
uint8 cpu; // only for B_SYSTEM_PROFILER_SAMPLES
uint16 size; // size of the event structure excluding the header
};
// B_SYSTEM_PROFILER_TEAM_ADDED
struct system_profiler_team_added {
team_id team;
* Scheduler/wait object listener: - Moved scheduler listening interface to <listeners.h> and added more convenient to use templatized notification functions. - Added a listener mechanism for the wait objects (semaphores, condition variables, mutex, rw_lock). * system profiler: - Hopefully fixed locking issues related to notifying the profiler thread for good. We still had an inconsistent locking order, since the scheduler notification callbacks are invoked with the thread lock held and have to acquire the object lock then, while the other callbacks acquired the object lock first and as a side effect of ConditionVariable::NotifyOne() acquired the thread lock. Now we make sure the object lock is the innermost lock. - Track the number of dropped events due to a full buffer. _user_system_profiler_next_buffer() returns this count now. - When scheduling profiling events are requested also listen to wait objects and generate the respective profiling events. We send those events lazily and cache the infos to avoid resending an event for the same wait object. - When starting profiling we do now generate "thread scheduled" events for the already running threads. - _user_system_profiler_start(): Check whether the parameters pointer is a userland address at all. - The system_profiler_team_added event does now also contain the team's name. * Added a sem_get_name_unsafe() returning a semaphore's name. It is "unsafe", since the caller has to ensure that the semaphore exists and continues to exist as long as the returned name is used. * Adjusted the "profile" and "scheduling_recorder" according to the system profiling changes. The latter prints the number of dropped events, now. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@30345 a95241bf-73f2-0310-859d-f6bbb57e9c96
2009-04-23 17:47:52 +04:00
uint16 args_offset;
char name[1];
};
// B_SYSTEM_PROFILER_TEAM_REMOVED
struct system_profiler_team_removed {
team_id team;
};
// B_SYSTEM_PROFILER_TEAM_EXEC
struct system_profiler_team_exec {
team_id team;
char thread_name[B_OS_NAME_LENGTH];
char args[1];
};
// B_SYSTEM_PROFILER_THREAD_ADDED
struct system_profiler_thread_added {
team_id team;
thread_id thread;
char name[B_OS_NAME_LENGTH];
};
// B_SYSTEM_PROFILER_THREAD_REMOVED
struct system_profiler_thread_removed {
team_id team;
thread_id thread;
};
// B_SYSTEM_PROFILER_IMAGE_ADDED
struct system_profiler_image_added {
team_id team;
image_info info;
};
// B_SYSTEM_PROFILER_IMAGE_REMOVED
struct system_profiler_image_removed {
team_id team;
image_id image;
};
// B_SYSTEM_PROFILER_SAMPLES
struct system_profiler_samples {
thread_id thread;
addr_t samples[0];
};
// base structure for the following three
struct system_profiler_thread_scheduling_event {
bigtime_t time;
thread_id thread;
};
* Scheduler/wait object listener: - Moved scheduler listening interface to <listeners.h> and added more convenient to use templatized notification functions. - Added a listener mechanism for the wait objects (semaphores, condition variables, mutex, rw_lock). * system profiler: - Hopefully fixed locking issues related to notifying the profiler thread for good. We still had an inconsistent locking order, since the scheduler notification callbacks are invoked with the thread lock held and have to acquire the object lock then, while the other callbacks acquired the object lock first and as a side effect of ConditionVariable::NotifyOne() acquired the thread lock. Now we make sure the object lock is the innermost lock. - Track the number of dropped events due to a full buffer. _user_system_profiler_next_buffer() returns this count now. - When scheduling profiling events are requested also listen to wait objects and generate the respective profiling events. We send those events lazily and cache the infos to avoid resending an event for the same wait object. - When starting profiling we do now generate "thread scheduled" events for the already running threads. - _user_system_profiler_start(): Check whether the parameters pointer is a userland address at all. - The system_profiler_team_added event does now also contain the team's name. * Added a sem_get_name_unsafe() returning a semaphore's name. It is "unsafe", since the caller has to ensure that the semaphore exists and continues to exist as long as the returned name is used. * Adjusted the "profile" and "scheduling_recorder" according to the system profiling changes. The latter prints the number of dropped events, now. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@30345 a95241bf-73f2-0310-859d-f6bbb57e9c96
2009-04-23 17:47:52 +04:00
// B_SYSTEM_PROFILER_THREAD_SCHEDULED
struct system_profiler_thread_scheduled {
bigtime_t time;
thread_id thread;
thread_id previous_thread;
uint16 previous_thread_state;
uint16 previous_thread_wait_object_type;
addr_t previous_thread_wait_object;
};
* Scheduler/wait object listener: - Moved scheduler listening interface to <listeners.h> and added more convenient to use templatized notification functions. - Added a listener mechanism for the wait objects (semaphores, condition variables, mutex, rw_lock). * system profiler: - Hopefully fixed locking issues related to notifying the profiler thread for good. We still had an inconsistent locking order, since the scheduler notification callbacks are invoked with the thread lock held and have to acquire the object lock then, while the other callbacks acquired the object lock first and as a side effect of ConditionVariable::NotifyOne() acquired the thread lock. Now we make sure the object lock is the innermost lock. - Track the number of dropped events due to a full buffer. _user_system_profiler_next_buffer() returns this count now. - When scheduling profiling events are requested also listen to wait objects and generate the respective profiling events. We send those events lazily and cache the infos to avoid resending an event for the same wait object. - When starting profiling we do now generate "thread scheduled" events for the already running threads. - _user_system_profiler_start(): Check whether the parameters pointer is a userland address at all. - The system_profiler_team_added event does now also contain the team's name. * Added a sem_get_name_unsafe() returning a semaphore's name. It is "unsafe", since the caller has to ensure that the semaphore exists and continues to exist as long as the returned name is used. * Adjusted the "profile" and "scheduling_recorder" according to the system profiling changes. The latter prints the number of dropped events, now. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@30345 a95241bf-73f2-0310-859d-f6bbb57e9c96
2009-04-23 17:47:52 +04:00
// B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE
struct system_profiler_thread_enqueued_in_run_queue {
bigtime_t time;
thread_id thread;
uint8 priority;
};
* Scheduler/wait object listener: - Moved scheduler listening interface to <listeners.h> and added more convenient to use templatized notification functions. - Added a listener mechanism for the wait objects (semaphores, condition variables, mutex, rw_lock). * system profiler: - Hopefully fixed locking issues related to notifying the profiler thread for good. We still had an inconsistent locking order, since the scheduler notification callbacks are invoked with the thread lock held and have to acquire the object lock then, while the other callbacks acquired the object lock first and as a side effect of ConditionVariable::NotifyOne() acquired the thread lock. Now we make sure the object lock is the innermost lock. - Track the number of dropped events due to a full buffer. _user_system_profiler_next_buffer() returns this count now. - When scheduling profiling events are requested also listen to wait objects and generate the respective profiling events. We send those events lazily and cache the infos to avoid resending an event for the same wait object. - When starting profiling we do now generate "thread scheduled" events for the already running threads. - _user_system_profiler_start(): Check whether the parameters pointer is a userland address at all. - The system_profiler_team_added event does now also contain the team's name. * Added a sem_get_name_unsafe() returning a semaphore's name. It is "unsafe", since the caller has to ensure that the semaphore exists and continues to exist as long as the returned name is used. * Adjusted the "profile" and "scheduling_recorder" according to the system profiling changes. The latter prints the number of dropped events, now. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@30345 a95241bf-73f2-0310-859d-f6bbb57e9c96
2009-04-23 17:47:52 +04:00
// B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE
struct system_profiler_thread_removed_from_run_queue {
bigtime_t time;
thread_id thread;
};
// B_SYSTEM_PROFILER_WAIT_OBJECT_INFO
struct system_profiler_wait_object_info {
uint32 type;
addr_t object;
addr_t referenced_object;
char name[1];
};
#endif /* _SYSTEM_SYSTEM_PROFILER_DEFS_H */