summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/sys/proc.h
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/sys/proc.h')
-rw-r--r--freebsd/sys/sys/proc.h284
1 files changed, 212 insertions, 72 deletions
diff --git a/freebsd/sys/sys/proc.h b/freebsd/sys/sys/proc.h
index e866552c..4a695ef9 100644
--- a/freebsd/sys/sys/proc.h
+++ b/freebsd/sys/sys/proc.h
@@ -63,6 +63,7 @@
#endif
#include <sys/ucontext.h>
#include <sys/ucred.h>
+#include <sys/_vm_domain.h>
#include <machine/proc.h> /* Machine-dependent proc substruct. */
/*
@@ -148,6 +149,8 @@ struct pargs {
* q - td_contested lock
* r - p_peers lock
* t - thread lock
+ * u - process stat lock
+ * w - process timer lock
* x - created at fork, only changes during single threading in exec
* y - created at first aio, doesn't change until exit or exec at which
* point we are single-threaded and only curthread changes it
@@ -158,6 +161,8 @@ struct pargs {
* for write access.
*/
struct cpuset;
+struct filecaps;
+struct filemon;
struct kaioinfo;
struct kaudit_record;
struct kdtrace_proc;
@@ -170,6 +175,7 @@ struct procdesc;
struct racct;
struct sbuf;
struct sleepqueue;
+struct syscall_args;
struct td_sched;
struct thread;
struct trapframe;
@@ -183,14 +189,14 @@ struct turnstile;
* userland asks for rusage info. Backwards compatibility prevents putting
* this directly in the user-visible rusage struct.
*
- * Locking for p_rux: (cj) means (j) for p_rux and (c) for p_crux.
+ * Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux.
* Locking for td_rux: (t) for all fields.
*/
struct rusage_ext {
- uint64_t rux_runtime; /* (cj) Real time. */
- uint64_t rux_uticks; /* (cj) Statclock hits in user mode. */
- uint64_t rux_sticks; /* (cj) Statclock hits in sys mode. */
- uint64_t rux_iticks; /* (cj) Statclock hits in intr mode. */
+ uint64_t rux_runtime; /* (cu) Real time. */
+ uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */
+ uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */
+ uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */
uint64_t rux_uu; /* (c) Previous user time in usec. */
uint64_t rux_su; /* (c) Previous sys time in usec. */
uint64_t rux_tu; /* (c) Previous total time in usec. */
@@ -235,7 +241,9 @@ struct thread {
struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
#ifndef __rtems__
struct turnstile *td_turnstile; /* (k) Associated turnstile. */
+ struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */
struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */
+ struct vm_domain_policy td_vm_dom_policy; /* (c) current numa domain policy */
lwpid_t td_tid; /* (b) Thread ID. */
sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */
#define td_siglist td_sigqueue.sq_signals
@@ -255,11 +263,9 @@ struct thread {
void *td_wchan; /* (t) Sleep address. */
const char *td_wmesg; /* (t) Reason for sleep. */
#ifndef __rtems__
- u_char td_lastcpu; /* (t) Last cpu we were on. */
- u_char td_oncpu; /* (t) Which cpu we are on. */
volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */
u_char td_tsqueue; /* (t) Turnstile queue blocked on. */
- short td_locks; /* (k) Count of non-spin locks. */
+ short td_locks; /* (k) Debug: count of non-spin locks */
short td_rw_rlocks; /* (k) Count of rwlock read locks. */
short td_lk_slocks; /* (k) Count of lockmgr shared locks. */
short td_stopsched; /* (k) Scheduler stopped. */
@@ -272,10 +278,12 @@ struct thread {
#endif /* __rtems__ */
struct ucred *td_ucred; /* (k) Reference to credentials. */
#ifndef __rtems__
+ struct plimit *td_limit; /* (k) Resource limits. */
u_int td_estcpu; /* (t) estimated cpu utilization */
int td_slptick; /* (t) Time at sleep. */
int td_blktick; /* (t) Time spent blocked. */
int td_swvoltick; /* (t) Time at last SW_VOL switch. */
+ int td_swinvoltick; /* (t) Time at last SW_INVOL switch. */
u_int td_cow; /* (*) Number of copy-on-write faults */
struct rusage td_ru; /* (t) rusage information. */
struct rusage_ext td_rux; /* (t) Internal rusage information. */
@@ -287,7 +295,6 @@ struct thread {
u_int td_uticks; /* (t) Statclock hits in user mode. */
int td_intrval; /* (t) Return value for sleepq. */
sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */
- sigset_t td_sigmask; /* (c) Current signal mask. */
volatile u_int td_generation; /* (k) For detection of preemption */
stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
int td_xsig; /* (c) Signal for ptrace */
@@ -301,20 +308,31 @@ struct thread {
struct osd td_osd; /* (k) Object specific data. */
struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */
pid_t td_dbg_forked; /* (c) Child pid for debugger. */
-#define td_endzero td_rqindex
+ u_int td_vp_reserv; /* (k) Count of reserved vnodes. */
+ int td_no_sleeping; /* (k) Sleeping disabled count. */
+ int td_dom_rr_idx; /* (k) RR Numa domain selection. */
+ void *td_su; /* (k) FFS SU private */
+ sbintime_t td_sleeptimo; /* (t) Sleep timeout. */
+#define td_endzero td_sigmask
-/* Copied during fork1() or thread_sched_upcall(). */
+/* Copied during fork1() or create_thread(). */
#define td_startcopy td_endzero
+ sigset_t td_sigmask; /* (c) Current signal mask. */
u_char td_rqindex; /* (t) Run queue index. */
u_char td_base_pri; /* (t) Thread base kernel priority. */
u_char td_priority; /* (t) Thread active priority. */
u_char td_pri_class; /* (t) Scheduling class. */
u_char td_user_pri; /* (t) User pri from estcpu and nice. */
u_char td_base_user_pri; /* (t) Base user pri */
+ u_int td_dbg_sc_code; /* (c) Syscall code to debugger. */
+ u_int td_dbg_sc_narg; /* (c) Syscall arg count to debugger.*/
+ uintptr_t td_rb_list; /* (k) Robust list head. */
+ uintptr_t td_rbp_list; /* (k) Robust priv list head. */
+ uintptr_t td_rb_inact; /* (k) Current in-action mutex loc. */
#define td_endcopy td_pcb
/*
- * Fields that must be manually set in fork1() or thread_sched_upcall()
+ * Fields that must be manually set in fork1() or create_thread()
* or already have been set in the allocator, constructor, etc.
*/
struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */
@@ -325,9 +343,16 @@ struct thread {
TDS_RUNQ,
TDS_RUNNING
} td_state; /* (t) thread state */
-#endif /* __rtems__ */
+ union {
+ register_t tdu_retval[2];
+ off_t tdu_off;
+ } td_uretoff; /* (k) Syscall aux returns. */
+#else /* __rtems__ */
register_t td_retval[2]; /* (k) Syscall aux returns. */
+#endif /* __rtems__ */
#ifndef __rtems__
+#define td_retval td_uretoff.tdu_retval
+ u_int td_cowgen; /* (k) Generation of COW pointers. */
struct callout td_slpcallout; /* (h) Callout for sleep. */
struct trapframe *td_frame; /* (k) */
struct vm_object *td_kstack_obj;/* (a) Kstack object. */
@@ -335,7 +360,6 @@ struct thread {
int td_kstack_pages; /* (a) Size of the kstack. */
volatile u_int td_critnest; /* (k*) Critical section nest level. */
struct mdthread td_md; /* (k) Any machine-dependent fields. */
- struct td_sched *td_sched; /* (*) Scheduler-specific data. */
struct kaudit_record *td_ar; /* (k) Active audit record, if any. */
struct lpohead td_lprof[2]; /* (a) lock profiling objects. */
struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */
@@ -346,11 +370,17 @@ struct thread {
struct proc *td_rfppwait_p; /* (k) The vforked child */
struct vm_page **td_ma; /* (k) uio pages held */
int td_ma_cnt; /* (k) size of *td_ma */
- struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */
- u_int td_vp_reserv; /* (k) Count of reserved vnodes. */
+ void *td_emuldata; /* Emulator state data */
+ int td_lastcpu; /* (t) Last cpu we were on. */
+ int td_oncpu; /* (t) Which cpu we are on. */
#endif /* __rtems__ */
};
+struct thread0_storage {
+ struct thread t0st_thread;
+ uint64_t t0st_sched[10];
+};
+
struct mtx *thread_lock_block(struct thread *);
void thread_lock_unblock(struct thread *, struct mtx *);
void thread_lock_set(struct thread *, struct mtx *);
@@ -372,12 +402,15 @@ do { \
KASSERT((__m == &blocked_lock || __m == (lock)), \
("Thread %p lock %p does not match %p", td, __m, (lock))); \
} while (0)
+
+#define TD_LOCKS_INC(td) ((td)->td_locks++)
+#define TD_LOCKS_DEC(td) ((td)->td_locks--)
#else
#define THREAD_LOCKPTR_ASSERT(td, lock)
-#endif
-#define CRITICAL_ASSERT(td) \
- KASSERT((td)->td_critnest >= 1, ("Not in critical section"));
+#define TD_LOCKS_INC(td)
+#define TD_LOCKS_DEC(td)
+#endif
/*
* Flags kept in td_flags:
@@ -392,19 +425,19 @@ do { \
#define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */
#define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */
#define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */
-#define TDF_UNUSED09 0x00000200 /* --available-- */
+#define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */
#define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */
#define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */
-#define TDF_TIMOFAIL 0x00001000 /* Timeout from sleep after we were awake. */
+#define TDF_UNUSED12 0x00001000 /* --available-- */
#define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */
#define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */
#define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */
#define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */
#define TDF_NEEDSIGCHK 0x00020000 /* Thread may need signal delivery. */
#define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */
-#define TDF_UNUSED19 0x00080000 /* --available-- */
+#define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */
#define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */
-#define TDF_UNUSED21 0x00200000 /* --available-- */
+#define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */
#define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */
#define TDF_UNUSED23 0x00800000 /* --available-- */
#define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */
@@ -427,6 +460,10 @@ do { \
#define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child
only) */
#define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */
+#define TDB_BORN 0x00000200 /* New LWP indicator for ptrace() */
+#define TDB_EXIT 0x00000400 /* Exiting LWP indicator for ptrace() */
+#define TDB_VFORK 0x00000800 /* vfork indicator for ptrace() */
+#define TDB_FSTP 0x00001000 /* The thread is PT_ATTACH leader */
/*
* "Private" flags kept in td_pflags:
@@ -438,9 +475,9 @@ do { \
#define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */
#define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */
#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
-#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */
+#define TDP_DEADLKTREAT 0x00000040 /* Lock acquisition - deadlock treatment. */
#define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */
-#define TDP_NOSLEEPING 0x00000100 /* Thread is not allowed to sleep on a sq. */
+#define TDP_UNUSED9 0x00000100 /* --available-- */
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
#define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */
#define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */
@@ -461,7 +498,7 @@ do { \
#define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */
#define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */
#define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */
-#define TDP_DEVMEMIO 0x20000000 /* Accessing memory for /dev/mem */
+#define TDP_FORKING 0x20000000 /* Thread is being created through fork() */
#define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */
/*
@@ -520,6 +557,11 @@ do { \
#define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ
#define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN
+#define TD_SBDRY_INTR(td) \
+ (((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0)
+#define TD_SBDRY_ERRNO(td) \
+ (((td)->td_flags & TDF_SEINTR) != 0 ? EINTR : ERESTART)
+
/*
* Process structure.
*/
@@ -532,7 +574,7 @@ struct proc {
struct filedesc *p_fd; /* (b) Open files. */
struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */
struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */
- struct plimit *p_limit; /* (c) Process limits. */
+ struct plimit *p_limit; /* (c) Resource limits. */
struct callout p_limco; /* (c) Limit callout handle */
struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */
@@ -549,7 +591,15 @@ struct proc {
struct proc *p_pptr; /* (c + e) Pointer to parent process. */
LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */
LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */
+ struct proc *p_reaper; /* (e) My reaper. */
+ LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants
+ (if I am reaper). */
+ LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of
+ the same reaper. */
struct mtx p_mtx; /* (n) Lock for this struct. */
+ struct mtx p_statmtx; /* Lock for the stats */
+ struct mtx p_itimmtx; /* Lock for the virt/prof timers */
+ struct mtx p_profmtx; /* Lock for the profiling */
struct ksiginfo *p_ksi; /* Locked by parent proc lock */
sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */
#define p_siglist p_sigqueue.sq_signals
@@ -557,12 +607,12 @@ struct proc {
/* The following fields are all zeroed upon creation in fork. */
#define p_startzero p_oppid
pid_t p_oppid; /* (c + e) Save ppid in ptrace. XXX */
- int p_pad_dbg_child;
struct vmspace *p_vmspace; /* (b) Address space. */
u_int p_swtick; /* (c) Tick when swapped in or out. */
+ u_int p_cowgen; /* (c) Generation of COW pointers. */
struct itimerval p_realtimer; /* (c) Alarm timer. */
struct rusage p_ru; /* (a) Exit information. */
- struct rusage_ext p_rux; /* (cj) Internal resource usage. */
+ struct rusage_ext p_rux; /* (cu) Internal resource usage. */
struct rusage_ext p_crux; /* (c) Internal child resource usage. */
int p_profthreads; /* (c) Num threads in addupc_task. */
volatile int p_exitthreads; /* (j) Number of threads exiting */
@@ -579,6 +629,7 @@ struct proc {
u_int p_stype; /* (c) Stop event type. */
char p_step; /* (c) Process is stopped. */
u_char p_pfsflags; /* (c) Procfs flags. */
+ u_int p_ptevents; /* (c) ptrace() event mask. */
struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */
struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */
struct thread *p_singlethread;/* (c + j) If single threading this is it */
@@ -588,6 +639,9 @@ struct proc {
int p_pendingcnt; /* how many signals are pending */
struct itimers *p_itimers; /* (c) POSIX interval timers. */
struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */
+ u_int p_treeflag; /* (e) P_TREE flags */
+ int p_pendingexits; /* (c) Count of pending thread exits. */
+ struct filemon *p_filemon; /* (c) filemon-specific data. */
/* End area that is zeroed on creation. */
#define p_endzero p_magic
@@ -596,18 +650,21 @@ struct proc {
u_int p_magic; /* (b) Magic number. */
int p_osrel; /* (x) osreldate for the
binary (from ELF note, if any) */
- char p_comm[MAXCOMLEN + 1]; /* (b) Process name. */
- struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */
+ char p_comm[MAXCOMLEN + 1]; /* (x) Process name. */
struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */
struct pargs *p_args; /* (c) Process arguments. */
rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */
signed char p_nice; /* (c) Process "nice" value. */
int p_fibnum; /* in this routing domain XXX MRT */
+ pid_t p_reapsubtree; /* (e) Pid of the direct child of the
+ reaper which spawned
+ our subtree. */
+ u_int p_xexit; /* (c) Exit code. */
+ u_int p_xsig; /* (c) Stop/kill sig. */
/* End area that is copied on creation. */
-#define p_endcopy p_xstat
-
- u_short p_xstat; /* (c) Exit status; also stop sig. */
- struct knlist p_klist; /* (c) Knotes attached to this proc. */
+#define p_endcopy p_xsig
+ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */
+ struct knlist *p_klist; /* (c) Knotes attached to this proc. */
int p_numthreads; /* (c) Number of threads. */
struct mdproc p_md; /* Any machine-dependent fields. */
struct callout p_itcallout; /* (h + c) Interval timer callout. */
@@ -616,7 +673,6 @@ struct proc {
struct proc *p_leader; /* (b) */
void *p_emuldata; /* (c) Emulator state data. */
struct label *p_label; /* (*) Proc (not subject) MAC label. */
- struct p_sched *p_sched; /* (*) Scheduler-specific data. */
STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */
LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/
struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */
@@ -625,6 +681,8 @@ struct proc {
after fork. */
uint64_t p_prev_runtime; /* (c) Resource usage accounting. */
struct racct *p_racct; /* (b) Resource accounting. */
+ int p_throttled; /* (c) Flag for racct pcpu throttling */
+ struct vm_domain_policy p_vm_dom_policy; /* (c) process default VM domain, or -1 */
/*
* An orphan is the child that has beed re-parented to the
* debugger as a result of attaching to it. Need to keep
@@ -633,24 +691,37 @@ struct proc {
*/
LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */
LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */
- u_char p_throttled; /* (c) Flag for racct pcpu throttling */
#endif /* __rtems__ */
};
#define p_session p_pgrp->pg_session
#define p_pgid p_pgrp->pg_id
-#define NOCPU 0xff /* For when we aren't on a CPU. */
+#define NOCPU (-1) /* For when we aren't on a CPU. */
+#define NOCPU_OLD (255)
+#define MAXCPU_OLD (254)
#define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock)
#define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock)
#define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type))
+#define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx)
+#define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx)
+#define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type))
+
+#define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx)
+#define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx)
+#define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type))
+
+#define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx)
+#define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx)
+#define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type))
+
/* These flags are kept in p_flag. */
#define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */
#define P_CONTROLT 0x00002 /* Has a controlling terminal. */
-#define P_KTHREAD 0x00004 /* Kernel thread (*). */
-#define P_FOLLOWFORK 0x00008 /* Attach parent debugger to children. */
+#define P_KPROC 0x00004 /* Kernel process. */
+#define P_UNUSED3 0x00008 /* --available-- */
#define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */
#define P_PROFIL 0x00020 /* Has started profiling. */
#define P_STOPPROF 0x00040 /* Has thread requesting to stop profiling. */
@@ -672,7 +743,7 @@ struct proc {
#define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */
#define P_HWPMC 0x800000 /* Process is using HWPMCs */
#define P_JAILED 0x1000000 /* Process is in jail. */
-#define P_ORPHAN 0x2000000 /* Orphaned. */
+#define P_TOTAL_STOP 0x2000000 /* Stopped in stop_all_proc. */
#define P_INEXEC 0x4000000 /* Process is in execve(). */
#define P_STATCHILD 0x8000000 /* Child process stopped or exited. */
#define P_INMEM 0x10000000 /* Loaded into memory. */
@@ -686,6 +757,16 @@ struct proc {
/* These flags are kept in p_flag2. */
#define P2_INHERIT_PROTECTED 0x00000001 /* New children get P_PROTECTED. */
+#define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or coredumps. */
+#define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */
+#define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */
+#define P2_PTRACE_FSTP 0x00000010 /* SIGSTOP from PT_ATTACH not yet handled. */
+
+/* Flags protected by proctree_lock, kept in p_treeflags. */
+#define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */
+#define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan
+ list */
+#define P_TREE_REAPER 0x00000004 /* Reaper of subtree */
/*
* These were process status values (p_stat), now they are only used in
@@ -707,7 +788,7 @@ struct proc {
#define SW_TYPE_MASK 0xff /* First 8 bits are switch type */
#define SWT_NONE 0 /* Unspecified switch. */
#define SWT_PREEMPT 1 /* Switching due to preemption. */
-#define SWT_OWEPREEMPT 2 /* Switching due to opepreempt. */
+#define SWT_OWEPREEMPT 2 /* Switching due to owepreempt. */
#define SWT_TURNSTILE 3 /* Turnstile contention. */
#define SWT_SLEEPQ 4 /* Sleepq wait. */
#define SWT_SLEEPQTIMO 5 /* Sleepq timeout wait. */
@@ -728,6 +809,7 @@ struct proc {
#define SINGLE_NO_EXIT 0
#define SINGLE_EXIT 1
#define SINGLE_BOUNDARY 2
+#define SINGLE_ALLPROC 3
#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_PARGS);
@@ -755,6 +837,8 @@ extern pid_t pid_max;
#define STOPEVENT(p, e, v) do { \
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \
+ "checking stopevent %d", (e)); \
if ((p)->p_stops & (e)) { \
PROC_LOCK(p); \
stopevent((p), (e), (v)); \
@@ -797,8 +881,21 @@ extern pid_t pid_max;
#define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx)
#define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type))
+/*
+ * Non-zero p_lock ensures that:
+ * - exit1() is not performed until p_lock reaches zero;
+ * - the process' threads stack are not swapped out if they are currently
+ * not (P_INMEM).
+ *
+ * PHOLD() asserts that the process (except the current process) is
+ * not exiting, increments p_lock and swaps threads stacks into memory,
+ * if needed.
+ * _PHOLD() is same as PHOLD(), it takes the process locked.
+ * _PHOLD_LITE() also takes the process locked, but comparing with
+ * _PHOLD(), it only guarantees that exit1() is not executed,
+ * faultin() is not called.
+ */
#ifndef __rtems__
-/* Hold process U-area in memory, normally for ptrace/procfs work. */
#define PHOLD(p) do { \
PROC_LOCK(p); \
_PHOLD(p); \
@@ -807,13 +904,19 @@ extern pid_t pid_max;
#define _PHOLD(p) do { \
PROC_LOCK_ASSERT((p), MA_OWNED); \
KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \
- ("PHOLD of exiting process")); \
+ ("PHOLD of exiting process %p", p)); \
(p)->p_lock++; \
if (((p)->p_flag & P_INMEM) == 0) \
faultin((p)); \
} while (0)
-#define PROC_ASSERT_HELD(p) do { \
- KASSERT((p)->p_lock > 0, ("process not held")); \
+#define _PHOLD_LITE(p) do { \
+ PROC_LOCK_ASSERT((p), MA_OWNED); \
+ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \
+ ("PHOLD of exiting process %p", p)); \
+ (p)->p_lock++; \
+} while (0)
+#define PROC_ASSERT_HELD(p) do { \
+ KASSERT((p)->p_lock > 0, ("process %p not held", p)); \
} while (0)
#define PRELE(p) do { \
@@ -828,8 +931,13 @@ extern pid_t pid_max;
if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \
wakeup(&(p)->p_lock); \
} while (0)
-#define PROC_ASSERT_NOT_HELD(p) do { \
- KASSERT((p)->p_lock == 0, ("process held")); \
+#define PROC_ASSERT_NOT_HELD(p) do { \
+ KASSERT((p)->p_lock == 0, ("process %p held", p)); \
+} while (0)
+
+#define PROC_UPDATE_COW(p) do { \
+ PROC_LOCK_ASSERT((p), MA_OWNED); \
+ (p)->p_cowgen++; \
} while (0)
#else /* __rtems__ */
#define PHOLD(x) do { } while (0)
@@ -840,17 +948,11 @@ extern pid_t pid_max;
#define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP)
/* Control whether or not it is safe for curthread to sleep. */
-#define THREAD_NO_SLEEPING() do { \
- KASSERT(!(curthread->td_pflags & TDP_NOSLEEPING), \
- ("nested no sleeping")); \
- curthread->td_pflags |= TDP_NOSLEEPING; \
-} while (0)
+#define THREAD_NO_SLEEPING() ((curthread)->td_no_sleeping++)
-#define THREAD_SLEEPING_OK() do { \
- KASSERT((curthread->td_pflags & TDP_NOSLEEPING), \
- ("nested sleeping ok")); \
- curthread->td_pflags &= ~TDP_NOSLEEPING; \
-} while (0)
+#define THREAD_SLEEPING_OK() ((curthread)->td_no_sleeping--)
+
+#define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0)
#define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash])
extern LIST_HEAD(pidhashhead, proc) *pidhashtbl;
@@ -865,10 +967,12 @@ extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl;
extern u_long pgrphash;
extern struct sx allproc_lock;
+extern int allproc_gen;
extern struct sx proctree_lock;
extern struct mtx ppeers_lock;
extern struct proc proc0; /* Process slot for swapper. */
-extern struct thread thread0; /* Primary thread in proc0. */
+extern struct thread0_storage thread0_st; /* Primary thread in proc0. */
+#define thread0 (thread0_st.t0st_thread)
extern struct vmspace vmspace0; /* VM space for proc0. */
extern int hogticks; /* Limit on kernel cpu hogs. */
extern int lastpid;
@@ -890,6 +994,16 @@ struct proc *pfind_locked(pid_t pid);
struct pgrp *pgfind(pid_t); /* Find process group by id. */
struct proc *zpfind(pid_t); /* Find zombie process by id. */
+struct fork_req {
+ int fr_flags;
+ int fr_pages;
+ int *fr_pidp;
+ struct proc **fr_procp;
+ int *fr_pd_fd;
+ int fr_pd_flags;
+ struct filecaps *fr_pd_fcaps;
+};
+
/*
* pget() flags.
*/
@@ -907,13 +1021,22 @@ int pget(pid_t pid, int flags, struct proc **pp);
void ast(struct trapframe *framep);
struct thread *choosethread(void);
+#ifndef __rtems__
+int cr_cansee(struct ucred *u1, struct ucred *u2);
+int cr_canseesocket(struct ucred *cred, struct socket *so);
+#else /* __rtems__ */
+#define cr_cansee(u1, u2) 0
+#define cr_canseesocket(cred, so) 0
+#endif /* __rtems__ */
+int cr_canseeothergids(struct ucred *u1, struct ucred *u2);
+int cr_canseeotheruids(struct ucred *u1, struct ucred *u2);
int cr_cansignal(struct ucred *cred, struct proc *proc, int signum);
int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp,
struct session *sess);
int enterthispgrp(struct proc *p, struct pgrp *pgrp);
void faultin(struct proc *p);
void fixjobc(struct proc *p, struct pgrp *pgrp, int entering);
-int fork1(struct thread *, int, int, struct proc **, int *, int);
+int fork1(struct thread *, struct fork_req *);
void fork_exit(void (*)(void *, struct trapframe *), void *,
struct trapframe *);
void fork_return(struct thread *, struct trapframe *);
@@ -924,6 +1047,7 @@ void kick_proc0(void);
#else /* __rtems__ */
#define kick_proc0()
#endif /* __rtems__ */
+void killjobc(void);
int leavepgrp(struct proc *p);
int maybe_preempt(struct thread *td);
void maybe_yield(void);
@@ -942,11 +1066,14 @@ int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb);
void procinit(void);
void proc_linkup0(struct proc *p, struct thread *td);
void proc_linkup(struct proc *p, struct thread *td);
+struct proc *proc_realparent(struct proc *child);
void proc_reap(struct thread *td, struct proc *p, int *status, int options);
void proc_reparent(struct proc *child, struct proc *newparent);
+void proc_set_traced(struct proc *p, bool stop);
struct pstats *pstats_alloc(void);
void pstats_fork(struct pstats *src, struct pstats *dst);
void pstats_free(struct pstats *ps);
+void reaper_abandon_children(struct proc *p, bool exiting);
#ifndef __rtems__
int securelevel_ge(struct ucred *cr, int level);
int securelevel_gt(struct ucred *cr, int level);
@@ -960,7 +1087,6 @@ int setrunnable(struct thread *);
void setsugid(struct proc *p);
int should_yield(void);
int sigonstack(size_t sp);
-void sleepinit(void);
void stopevent(struct proc *, u_int, u_int);
struct thread *tdfind(lwpid_t, pid_t);
void threadinit(void);
@@ -968,22 +1094,21 @@ void tidhash_add(struct thread *);
void tidhash_remove(struct thread *);
void cpu_idle(int);
int cpu_idle_wakeup(int);
-extern void (*cpu_idle_hook)(void); /* Hook to machdep CPU idler. */
+extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */
void cpu_switch(struct thread *, struct thread *, struct mtx *);
void cpu_throw(struct thread *, struct thread *) __dead2;
void unsleep(struct thread *);
void userret(struct thread *, struct trapframe *);
void cpu_exit(struct thread *);
-void exit1(struct thread *, int) __dead2;
-struct syscall_args;
+void exit1(struct thread *, int, int) __dead2;
+void cpu_copy_thread(struct thread *td, struct thread *td0);
int cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa);
void cpu_fork(struct thread *, struct proc *, struct thread *, int);
-void cpu_set_fork_handler(struct thread *, void (*)(void *), void *);
+void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *);
void cpu_set_syscall_retval(struct thread *, int);
-void cpu_set_upcall(struct thread *td, struct thread *td0);
#ifndef __rtems__
-void cpu_set_upcall_kse(struct thread *, void (*)(void *), void *,
+void cpu_set_upcall(struct thread *, void (*)(void *), void *,
stack_t *);
#endif /* __rtems__ */
int cpu_set_user_tls(struct thread *, void *tls_base);
@@ -995,27 +1120,35 @@ void cpu_thread_swapin(struct thread *);
void cpu_thread_swapout(struct thread *);
struct thread *thread_alloc(int pages);
int thread_alloc_stack(struct thread *, int pages);
+void thread_cow_get_proc(struct thread *newtd, struct proc *p);
+void thread_cow_get(struct thread *newtd, struct thread *td);
+void thread_cow_free(struct thread *td);
+void thread_cow_update(struct thread *td);
+int thread_create(struct thread *td, struct rtprio *rtp,
+ int (*initialize_thread)(struct thread *, void *), void *thunk);
void thread_exit(void) __dead2;
void thread_free(struct thread *td);
void thread_link(struct thread *td, struct proc *p);
void thread_reap(void);
-int thread_single(int how);
-void thread_single_end(void);
+int thread_single(struct proc *p, int how);
+void thread_single_end(struct proc *p, int how);
void thread_stash(struct thread *td);
void thread_stopped(struct proc *p);
void childproc_stopped(struct proc *child, int reason);
void childproc_continued(struct proc *child);
void childproc_exited(struct proc *child);
int thread_suspend_check(int how);
-void thread_suspend_switch(struct thread *);
+bool thread_suspend_check_needed(void);
+void thread_suspend_switch(struct thread *, struct proc *p);
void thread_suspend_one(struct thread *td);
void thread_unlink(struct thread *td);
void thread_unsuspend(struct proc *p);
-int thread_unsuspend_one(struct thread *td);
-void thread_unthread(struct thread *td);
void thread_wait(struct proc *p);
struct thread *thread_find(struct proc *p, lwpid_t tid);
+void stop_all_proc(void);
+void resume_all_proc(void);
+
#ifndef __rtems__
static __inline int
curthread_pflags_set(int flags)
@@ -1035,6 +1168,13 @@ curthread_pflags_restore(int save)
curthread->td_pflags &= save;
}
+
+static __inline __pure2 struct td_sched *
+td_get_sched(struct thread *td)
+{
+
+ return ((struct td_sched *)&td[1]);
+}
#endif /* __rtems__ */
#endif /* _KERNEL */