summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/sys/pcpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/sys/pcpu.h')
-rw-r--r--freebsd/sys/sys/pcpu.h56
1 files changed, 37 insertions, 19 deletions
diff --git a/freebsd/sys/sys/pcpu.h b/freebsd/sys/sys/pcpu.h
index ec3f9f94..2d3f3411 100644
--- a/freebsd/sys/sys/pcpu.h
+++ b/freebsd/sys/sys/pcpu.h
@@ -38,7 +38,11 @@
#endif
#include <sys/_cpuset.h>
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
+#include <sys/_sx.h>
#include <sys/queue.h>
+#include <sys/_rmlock.h>
#include <sys/vmmeter.h>
#include <rtems/bsd/sys/resource.h>
#include <machine/pcpu.h>
@@ -141,15 +145,6 @@ extern uintptr_t dpcpu_off[];
#endif /* _KERNEL */
-/*
- * XXXUPS remove as soon as we have per cpu variable
- * linker sets and can define rm_queue in _rm_lock.h
- */
-struct rm_queue {
- struct rm_queue* volatile rmq_next;
- struct rm_queue* volatile rmq_prev;
-};
-
/*
* This structure maps out the global data that needs to be kept on a
* per-cpu basis. The members are accessed via the PCPU_GET/SET/PTR
@@ -157,6 +152,7 @@ struct rm_queue {
* defined in the PCPU_MD_FIELDS macro defined in <machine/pcpu.h>.
*/
struct pcpu {
+#ifndef __rtems__
struct thread *pc_curthread; /* Current thread */
struct thread *pc_idlethread; /* Idle thread */
struct thread *pc_fpcurthread; /* Fp state owner */
@@ -173,17 +169,9 @@ struct pcpu {
long pc_cp_time[CPUSTATES]; /* statclock ticks */
struct device *pc_device;
void *pc_netisr; /* netisr SWI cookie */
- int pc_dnweight; /* vm_page_dontneed() */
+ int pc_unused1; /* unused field */
int pc_domain; /* Memory domain. */
-
- /*
- * Stuff for read mostly lock
- *
- * XXXUPS remove as soon as we have per cpu variable
- * linker sets.
- */
- struct rm_queue pc_rm_queue;
-
+ struct rm_queue pc_rm_queue; /* rmlock list of trackers */
uintptr_t pc_dynamic; /* Dynamic per-cpu data area */
/*
@@ -197,8 +185,19 @@ struct pcpu {
* if only to make kernel debugging easier.
*/
PCPU_MD_FIELDS;
+#else /* __rtems__ */
+ int pc_dummy;
+#endif /* __rtems__ */
} __aligned(CACHE_LINE_SIZE);
+#ifdef CTASSERT
+/*
+ * To minimize memory waste in per-cpu UMA zones, size of struct pcpu
+ * should be denominator of PAGE_SIZE.
+ */
+CTASSERT((PAGE_SIZE / sizeof(struct pcpu)) * sizeof(struct pcpu) == PAGE_SIZE);
+#endif
+
#ifdef _KERNEL
STAILQ_HEAD(cpuhead, pcpu);
@@ -213,6 +212,25 @@ extern struct pcpu *cpuid_to_pcpu[];
#endif
#define curvidata PCPU_GET(vidata)
+/* Accessor to elements allocated via UMA_ZONE_PCPU zone. */
+static inline void *
+zpcpu_get(void *base)
+{
+
+#ifndef __rtems__
+ return ((char *)(base) + sizeof(struct pcpu) * curcpu);
+#else /* __rtems__ */
+ return ((char *)(base) + sizeof(struct pcpu) * _SMP_Get_current_processor());
+#endif /* __rtems__ */
+}
+
+static inline void *
+zpcpu_get_cpu(void *base, int cpu)
+{
+
+ return ((char *)(base) + sizeof(struct pcpu) * cpu);
+}
+
/*
* Machine dependent callouts. cpu_pcpu_init() is responsible for
* initializing machine dependent fields of struct pcpu, and