summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/contrib
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-08-21 13:47:02 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-09-21 10:29:41 +0200
commitbcdce02d9bc8150e1d191ed5ca9da45b7604964a (patch)
tree3b2faf509db7672ee1fc98857736470be97e7ed8 /freebsd/sys/contrib
parentUpdate to FreeBSD head 2018-04-01 (diff)
downloadrtems-libbsd-bcdce02d9bc8150e1d191ed5ca9da45b7604964a.tar.bz2
Update to FreeBSD head 2018-06-01
Git mirror commit fb63610a69b0eb7f69a201ba05c4c1a7a2739cf9. Update #3472.
Diffstat (limited to 'freebsd/sys/contrib')
-rw-r--r--freebsd/sys/contrib/ck/include/ck_backoff.h57
-rw-r--r--freebsd/sys/contrib/ck/include/ck_cc.h173
-rw-r--r--freebsd/sys/contrib/ck/include/ck_epoch.h281
-rw-r--r--freebsd/sys/contrib/ck/include/ck_limits.h48
-rw-r--r--freebsd/sys/contrib/ck/include/ck_md.h136
-rw-r--r--freebsd/sys/contrib/ck/include/ck_pr.h1225
-rw-r--r--freebsd/sys/contrib/ck/include/ck_queue.h428
-rw-r--r--freebsd/sys/contrib/ck/include/ck_stack.h357
-rw-r--r--freebsd/sys/contrib/ck/include/ck_stdbool.h31
-rw-r--r--freebsd/sys/contrib/ck/include/ck_stddef.h31
-rw-r--r--freebsd/sys/contrib/ck/include/ck_stdint.h34
-rw-r--r--freebsd/sys/contrib/ck/include/ck_string.h31
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/aarch64/ck_f_pr.h167
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr.h227
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/arm/ck_f_pr.h162
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/arm/ck_pr.h563
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/ck_cc.h141
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/ck_f_pr.h105
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/ck_pr.h297
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/ppc/ck_f_pr.h79
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/ppc/ck_pr.h327
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/ppc64/ck_f_pr.h97
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/ppc64/ck_pr.h427
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/sparcv9/ck_f_pr.h26
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/sparcv9/ck_pr.h228
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/x86/ck_f_pr.h152
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/x86/ck_pr.h408
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/x86_64/ck_f_pr.h202
-rw-r--r--freebsd/sys/contrib/ck/include/gcc/x86_64/ck_pr.h606
-rw-r--r--freebsd/sys/contrib/ck/src/ck_epoch.c597
30 files changed, 7643 insertions, 0 deletions
diff --git a/freebsd/sys/contrib/ck/include/ck_backoff.h b/freebsd/sys/contrib/ck/include/ck_backoff.h
new file mode 100644
index 00000000..82a4f215
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_backoff.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_BACKOFF_H
+#define CK_BACKOFF_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+
+#ifndef CK_BACKOFF_CEILING
+#define CK_BACKOFF_CEILING ((1 << 20) - 1)
+#endif
+
+#define CK_BACKOFF_INITIALIZER (1 << 9)
+
+typedef unsigned int ck_backoff_t;
+
+/*
+ * This is a exponential back-off implementation.
+ */
+CK_CC_INLINE static void
+ck_backoff_eb(unsigned int *c)
+{
+ unsigned int i, ceiling;
+
+ ceiling = *c;
+ for (i = 0; i < ceiling; i++)
+ ck_pr_barrier();
+
+ *c = ceiling <<= ceiling < CK_BACKOFF_CEILING;
+ return;
+}
+
+#endif /* CK_BACKOFF_H */
diff --git a/freebsd/sys/contrib/ck/include/ck_cc.h b/freebsd/sys/contrib/ck/include/ck_cc.h
new file mode 100644
index 00000000..9a152a3c
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_cc.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2014 Paul Khuong.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_CC_H
+#define CK_CC_H
+
+#if defined(__GNUC__) || defined(__SUNPRO_C)
+#include "gcc/ck_cc.h"
+#endif
+
+#ifndef CK_CC_RESTRICT
+#define CK_CC_RESTRICT
+#endif
+
+#ifndef CK_CC_INLINE
+#define CK_CC_INLINE inline
+#endif
+
+#ifndef CK_CC_FORCE_INLINE
+#define CK_CC_FORCE_INLINE inline
+#endif
+
+#define CK_CC_DECONST_PTR(X) ((void *)(uintptr_t)(X))
+
+/*
+ * Container function.
+ * This relies on (compiler) implementation-defined behavior.
+ */
+#define CK_CC_CONTAINER(F, T, M, N) \
+ CK_CC_INLINE static T * \
+ N(F *p) \
+ { \
+ F *n = p; \
+ return (T *)(void *)(((char *)n) - ((size_t)&((T *)0)->M)); \
+ }
+
+#define CK_CC_PAD(x) union { char pad[x]; }
+
+#ifndef CK_CC_ALIASED
+#define CK_CC_ALIASED
+#endif
+
+#ifndef CK_CC_UNUSED
+#define CK_CC_UNUSED
+#endif
+
+#ifndef CK_CC_USED
+#define CK_CC_USED
+#endif
+
+#ifndef CK_CC_IMM
+#define CK_CC_IMM
+#endif
+
+#ifndef CK_CC_PACKED
+#define CK_CC_PACKED
+#endif
+
+#ifndef CK_CC_WEAKREF
+#define CK_CC_WEAKREF
+#endif
+
+#ifndef CK_CC_ALIGN
+#define CK_CC_ALIGN(X)
+#endif
+
+#ifndef CK_CC_CACHELINE
+#define CK_CC_CACHELINE
+#endif
+
+#ifndef CK_CC_LIKELY
+#define CK_CC_LIKELY(x) x
+#endif
+
+#ifndef CK_CC_UNLIKELY
+#define CK_CC_UNLIKELY(x) x
+#endif
+
+#ifndef CK_CC_TYPEOF
+#define CK_CC_TYPEOF(X, DEFAULT) (DEFAULT)
+#endif
+
+#define CK_F_CC_FFS_G(L, T) \
+CK_CC_INLINE static int \
+ck_cc_##L(T v) \
+{ \
+ unsigned int i; \
+ \
+ if (v == 0) \
+ return 0; \
+ \
+ for (i = 1; (v & 1) == 0; i++, v >>= 1); \
+ return i; \
+}
+
+#ifndef CK_F_CC_FFS
+#define CK_F_CC_FFS
+CK_F_CC_FFS_G(ffs, unsigned int)
+#endif /* CK_F_CC_FFS */
+
+#ifndef CK_F_CC_FFSL
+#define CK_F_CC_FFSL
+CK_F_CC_FFS_G(ffsl, unsigned long)
+#endif /* CK_F_CC_FFSL */
+
+#ifndef CK_F_CC_FFSLL
+#define CK_F_CC_FFSLL
+CK_F_CC_FFS_G(ffsll, unsigned long long)
+#endif /* CK_F_CC_FFSLL */
+
+#undef CK_F_CC_FFS_G
+
+#ifndef CK_F_CC_CTZ
+#define CK_F_CC_CTZ
+CK_CC_INLINE static int
+ck_cc_ctz(unsigned int x)
+{
+ unsigned int i;
+
+ if (x == 0)
+ return 0;
+
+ for (i = 0; (x & 1) == 0; i++, x >>= 1);
+ return i;
+}
+#endif
+
+#ifndef CK_F_CC_POPCOUNT
+#define CK_F_CC_POPCOUNT
+CK_CC_INLINE static int
+ck_cc_popcount(unsigned int x)
+{
+ unsigned int acc;
+
+ for (acc = 0; x != 0; x >>= 1)
+ acc += x & 1;
+
+ return acc;
+}
+#endif
+
+
+#ifdef __cplusplus
+#define CK_CPP_CAST(type, arg) static_cast<type>(arg)
+#else
+#define CK_CPP_CAST(type, arg) arg
+#endif
+
+#endif /* CK_CC_H */
diff --git a/freebsd/sys/contrib/ck/include/ck_epoch.h b/freebsd/sys/contrib/ck/include/ck_epoch.h
new file mode 100644
index 00000000..58f3d28a
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_epoch.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_EPOCH_H
+#define CK_EPOCH_H
+
+/*
+ * The implementation here is inspired from the work described in:
+ * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
+ * of Cambridge Computing Laboratory.
+ */
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stack.h>
+#include <ck_stdbool.h>
+
+#ifndef CK_EPOCH_LENGTH
+#define CK_EPOCH_LENGTH 4
+#endif
+
+/*
+ * This is used for sense detection with-respect to concurrent
+ * epoch sections.
+ */
+#define CK_EPOCH_SENSE (2)
+
+struct ck_epoch_entry;
+typedef struct ck_epoch_entry ck_epoch_entry_t;
+typedef void ck_epoch_cb_t(ck_epoch_entry_t *);
+
+/*
+ * This should be embedded into objects you wish to be the target of
+ * ck_epoch_cb_t functions (with ck_epoch_call).
+ */
+struct ck_epoch_entry {
+ ck_epoch_cb_t *function;
+ ck_stack_entry_t stack_entry;
+};
+
+/*
+ * A section object may be passed to every begin-end pair to allow for
+ * forward progress guarantees with-in prolonged active sections.
+ */
+struct ck_epoch_section {
+ unsigned int bucket;
+};
+typedef struct ck_epoch_section ck_epoch_section_t;
+
+/*
+ * Return pointer to ck_epoch_entry container object.
+ */
+#define CK_EPOCH_CONTAINER(T, M, N) \
+ CK_CC_CONTAINER(struct ck_epoch_entry, T, M, N)
+
+struct ck_epoch_ref {
+ unsigned int epoch;
+ unsigned int count;
+};
+
+struct ck_epoch_record {
+ ck_stack_entry_t record_next;
+ struct ck_epoch *global;
+ unsigned int state;
+ unsigned int epoch;
+ unsigned int active;
+ struct {
+ struct ck_epoch_ref bucket[CK_EPOCH_SENSE];
+ } local CK_CC_CACHELINE;
+ unsigned int n_pending;
+ unsigned int n_peak;
+ unsigned int n_dispatch;
+ void *ct;
+ ck_stack_t pending[CK_EPOCH_LENGTH];
+} CK_CC_CACHELINE;
+typedef struct ck_epoch_record ck_epoch_record_t;
+
+struct ck_epoch {
+ unsigned int epoch;
+ unsigned int n_free;
+ ck_stack_t records;
+};
+typedef struct ck_epoch ck_epoch_t;
+
+/*
+ * Internal functions.
+ */
+void _ck_epoch_addref(ck_epoch_record_t *, ck_epoch_section_t *);
+bool _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *);
+
+CK_CC_FORCE_INLINE static void *
+ck_epoch_record_ct(const ck_epoch_record_t *record)
+{
+
+ return ck_pr_load_ptr(&record->ct);
+}
+
+/*
+ * Marks the beginning of an epoch-protected section.
+ */
+CK_CC_FORCE_INLINE static void
+ck_epoch_begin(ck_epoch_record_t *record, ck_epoch_section_t *section)
+{
+ struct ck_epoch *epoch = record->global;
+
+ /*
+ * Only observe new epoch if thread is not recursing into a read
+ * section.
+ */
+ if (record->active == 0) {
+ unsigned int g_epoch;
+
+ /*
+ * It is possible for loads to be re-ordered before the store
+ * is committed into the caller's epoch and active fields.
+ * For this reason, store to load serialization is necessary.
+ */
+#if defined(CK_MD_TSO)
+ ck_pr_fas_uint(&record->active, 1);
+ ck_pr_fence_atomic_load();
+#else
+ ck_pr_store_uint(&record->active, 1);
+ ck_pr_fence_memory();
+#endif
+
+ /*
+ * This load is allowed to be re-ordered prior to setting
+ * active flag due to monotonic nature of the global epoch.
+ * However, stale values lead to measurable performance
+ * degradation in some torture tests so we disallow early load
+ * of global epoch.
+ */
+ g_epoch = ck_pr_load_uint(&epoch->epoch);
+ ck_pr_store_uint(&record->epoch, g_epoch);
+ } else {
+ ck_pr_store_uint(&record->active, record->active + 1);
+ }
+
+ if (section != NULL)
+ _ck_epoch_addref(record, section);
+
+ return;
+}
+
+/*
+ * Marks the end of an epoch-protected section. Returns true if no more
+ * sections exist for the caller.
+ */
+CK_CC_FORCE_INLINE static bool
+ck_epoch_end(ck_epoch_record_t *record, ck_epoch_section_t *section)
+{
+
+ ck_pr_fence_release();
+ ck_pr_store_uint(&record->active, record->active - 1);
+
+ if (section != NULL)
+ return _ck_epoch_delref(record, section);
+
+ return record->active == 0;
+}
+
+/*
+ * Defers the execution of the function pointed to by the "cb"
+ * argument until an epoch counter loop. This allows for a
+ * non-blocking deferral.
+ *
+ * We can get away without a fence here due to the monotonic nature
+ * of the epoch counter. Worst case, this will result in some delays
+ * before object destruction.
+ */
+CK_CC_FORCE_INLINE static void
+ck_epoch_call(ck_epoch_record_t *record,
+ ck_epoch_entry_t *entry,
+ ck_epoch_cb_t *function)
+{
+ struct ck_epoch *epoch = record->global;
+ unsigned int e = ck_pr_load_uint(&epoch->epoch);
+ unsigned int offset = e & (CK_EPOCH_LENGTH - 1);
+
+ record->n_pending++;
+ entry->function = function;
+ ck_stack_push_spnc(&record->pending[offset], &entry->stack_entry);
+ return;
+}
+
+/*
+ * Same as ck_epoch_call, but allows for records to be shared and is reentrant.
+ */
+CK_CC_FORCE_INLINE static void
+ck_epoch_call_strict(ck_epoch_record_t *record,
+ ck_epoch_entry_t *entry,
+ ck_epoch_cb_t *function)
+{
+ struct ck_epoch *epoch = record->global;
+ unsigned int e = ck_pr_load_uint(&epoch->epoch);
+ unsigned int offset = e & (CK_EPOCH_LENGTH - 1);
+
+ ck_pr_inc_uint(&record->n_pending);
+ entry->function = function;
+
+ /* Store fence is implied by push operation. */
+ ck_stack_push_upmc(&record->pending[offset], &entry->stack_entry);
+ return;
+}
+
+/*
+ * This callback is used for synchronize_wait to allow for custom blocking
+ * behavior.
+ */
+typedef void ck_epoch_wait_cb_t(ck_epoch_t *, ck_epoch_record_t *,
+ void *);
+
+/*
+ * Return latest epoch value. This operation provides load ordering.
+ */
+CK_CC_FORCE_INLINE static unsigned int
+ck_epoch_value(const ck_epoch_t *ep)
+{
+
+ ck_pr_fence_load();
+ return ck_pr_load_uint(&ep->epoch);
+}
+
+void ck_epoch_init(ck_epoch_t *);
+
+/*
+ * Attempts to recycle an unused epoch record. If one is successfully
+ * allocated, the record context pointer is also updated.
+ */
+ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *, void *);
+
+/*
+ * Registers an epoch record. An optional context pointer may be passed that
+ * is retrievable with ck_epoch_record_ct.
+ */
+void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *, void *);
+
+/*
+ * Marks a record as available for re-use by a subsequent recycle operation.
+ * Note that the record cannot be physically destroyed.
+ */
+void ck_epoch_unregister(ck_epoch_record_t *);
+
+bool ck_epoch_poll(ck_epoch_record_t *);
+bool ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred);
+void ck_epoch_synchronize(ck_epoch_record_t *);
+void ck_epoch_synchronize_wait(ck_epoch_t *, ck_epoch_wait_cb_t *, void *);
+void ck_epoch_barrier(ck_epoch_record_t *);
+void ck_epoch_barrier_wait(ck_epoch_record_t *, ck_epoch_wait_cb_t *, void *);
+
+/*
+ * Reclaim entries associated with a record. This is safe to call only on
+ * the caller's record or records that are using call_strict.
+ */
+void ck_epoch_reclaim(ck_epoch_record_t *);
+
+#endif /* CK_EPOCH_H */
diff --git a/freebsd/sys/contrib/ck/include/ck_limits.h b/freebsd/sys/contrib/ck/include/ck_limits.h
new file mode 100644
index 00000000..c8749550
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_limits.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/kernel.h>
+
+#ifndef UINT8_MAX
+#define UINT8_MAX ((u8)(~0U))
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX USHRT_MAX
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX UINT_MAX
+#endif
+#ifndef UINT64_MAX
+#define UINT64_MAX ULLONG_MAX
+#endif
+
+#elif defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/stdint.h>
+#include <sys/limits.h>
+#else
+#include <limits.h>
+#endif /* __linux__ && __KERNEL__ */
diff --git a/freebsd/sys/contrib/ck/include/ck_md.h b/freebsd/sys/contrib/ck/include/ck_md.h
new file mode 100644
index 00000000..3a69584e
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_md.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2018 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: head/sys/contrib/ck/include/ck_md.h 329388 2018-02-16 17:50:06Z cog
+net $
+ */
+
+/*
+ * This header file is meant for use of Concurrency Kit in the FreeBSD kernel.
+ */
+
+#ifndef CK_MD_H
+#define CK_MD_H
+
+#include <sys/param.h>
+
+#ifndef __rtems__
+#ifndef _KERNEL
+#error This header file is meant for the FreeBSD kernel.
+#endif /* _KERNEL */
+#endif /* __rtems__ */
+
+#ifndef CK_MD_CACHELINE
+/*
+ * FreeBSD's CACHE_LINE macro is a compile-time maximum cache-line size for an
+ * architecture, defined to be 128 bytes by default on x86*. Even in presence
+ * of adjacent sector prefetch, this doesn't make sense from a modeling
+ * perspective.
+ */
+#if defined(__amd64__) || defined(__i386__)
+#define CK_MD_CACHELINE (64)
+#else
+#define CK_MD_CACHELINE (CACHE_LINE_SIZE)
+#endif /* !__amd64__ && !__i386__ */
+#endif /* CK_MD_CACHELINE */
+
+#ifndef CK_MD_PAGESIZE
+#define CK_MD_PAGESIZE (PAGE_SIZE)
+#endif
+
+/*
+ * Once FreeBSD has a mechanism to detect RTM, this can be enabled and RTM
+ * facilities can be called. These facilities refer to TSX.
+ */
+#ifndef CK_MD_RTM_DISABLE
+#define CK_MD_RTM_DISABLE
+#endif /* CK_MD_RTM_DISABLE */
+
+/*
+ * Do not enable pointer-packing-related (VMA) optimizations in kernel-space.
+ */
+#ifndef CK_MD_POINTER_PACK_DISABLE
+#define CK_MD_POINTER_PACK_DISABLE
+#endif /* CK_MD_POINTER_PACK_DISABLE */
+
+/*
+ * The following would be used for pointer-packing tricks, disabled for the
+ * kernel.
+ */
+#ifndef CK_MD_VMA_BITS_UNKNOWN
+#define CK_MD_VMA_BITS_UNKNOWN
+#endif /* CK_MD_VMA_BITS_UNKNOWN */
+
+/*
+ * Do not enable double operations in kernel-space.
+ */
+#ifndef CK_PR_DISABLE_DOUBLE
+#define CK_PR_DISABLE_DOUBLE
+#endif /* CK_PR_DISABLE_DOUBLE */
+
+/*
+ * If building for a uni-processor target, then enable the uniprocessor
+ * feature flag. This, among other things, will remove the lock prefix.
+ */
+#ifndef SMP
+#define CK_MD_UMP
+#endif /* SMP */
+
+/*
+ * Disable the use of compiler builtin functions.
+ */
+#define CK_MD_CC_BUILTIN_DISABLE 1
+
+/*
+ * CK expects those, which are normally defined by the build system.
+ */
+#if defined(__i386__) && !defined(__x86__)
+#define __x86__
+/*
+ * If x86 becomes more relevant, we may want to consider importing in
+ * __mbk() to avoid potential issues around false sharing.
+ */
+#define CK_MD_TSO
+#define CK_MD_SSE_DISABLE 1
+#elif defined(__amd64__)
+#define CK_MD_TSO
+#elif defined(__sparc64__) && !defined(__sparcv9__)
+#define __sparcv9__
+#define CK_MD_TSO
+#elif defined(__powerpc64__) && !defined(__ppc64__)
+#define __ppc64__
+#elif defined(__powerpc__) && !defined(__ppc__)
+#define __ppc__
+#endif
+
+/* If no memory model has been defined, assume RMO. */
+#if !defined(CK_MD_RMO) && !defined(CK_MD_TSO) && !defined(CK_MD_PSO)
+#define CK_MD_RMO
+#endif
+
+#define CK_VERSION "0.7.0"
+#define CK_GIT_SHA "db5db44"
+
+#endif /* CK_MD_H */
diff --git a/freebsd/sys/contrib/ck/include/ck_pr.h b/freebsd/sys/contrib/ck/include/ck_pr.h
new file mode 100644
index 00000000..7fa57a8e
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_pr.h
@@ -0,0 +1,1225 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_H
+#define CK_PR_H
+
+#include <ck_cc.h>
+#include <ck_limits.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+#include <ck_stdbool.h>
+
+#ifndef CK_USE_CC_BUILTINS
+#if defined(__x86_64__)
+#include "gcc/x86_64/ck_pr.h"
+#elif defined(__x86__)
+#include "gcc/x86/ck_pr.h"
+#elif defined(__sparcv9__)
+#include "gcc/sparcv9/ck_pr.h"
+#elif defined(__ppc64__)
+#include "gcc/ppc64/ck_pr.h"
+#elif defined(__s390x__)
+#include "gcc/s390x/ck_pr.h"
+#elif defined(__ppc__)
+#include "gcc/ppc/ck_pr.h"
+#elif defined(__arm__)
+#if __ARM_ARCH >= 6
+#include "gcc/arm/ck_pr.h"
+#else
+#include "gcc/arm/ck_pr_armv4.h"
+#endif
+#elif defined(__aarch64__)
+#include "gcc/aarch64/ck_pr.h"
+#elif !defined(__GNUC__)
+#error Your platform is unsupported
+#endif
+#endif /* !CK_USE_CC_BUILTINS */
+
+#if defined(__GNUC__)
+#include "gcc/ck_pr.h"
+#endif
+
+#define CK_PR_FENCE_EMIT(T) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_##T(void) \
+ { \
+ ck_pr_fence_strict_##T(); \
+ return; \
+ }
+#define CK_PR_FENCE_NOOP(T) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_##T(void) \
+ { \
+ ck_pr_barrier(); \
+ return; \
+ }
+
+/*
+ * None of the currently supported platforms allow for data-dependent
+ * load ordering.
+ */
+CK_PR_FENCE_NOOP(load_depends)
+#define ck_pr_fence_strict_load_depends ck_pr_fence_load_depends
+
+/*
+ * In memory models where atomic operations do not have serializing
+ * effects, atomic read-modify-write operations are modeled as stores.
+ */
+#if defined(CK_MD_RMO)
+/*
+ * Only stores to the same location have a global
+ * ordering.
+ */
+CK_PR_FENCE_EMIT(atomic)
+CK_PR_FENCE_EMIT(atomic_load)
+CK_PR_FENCE_EMIT(atomic_store)
+CK_PR_FENCE_EMIT(store_atomic)
+CK_PR_FENCE_EMIT(load_atomic)
+CK_PR_FENCE_EMIT(load_store)
+CK_PR_FENCE_EMIT(store_load)
+CK_PR_FENCE_EMIT(load)
+CK_PR_FENCE_EMIT(store)
+CK_PR_FENCE_EMIT(memory)
+CK_PR_FENCE_EMIT(acquire)
+CK_PR_FENCE_EMIT(release)
+CK_PR_FENCE_EMIT(acqrel)
+CK_PR_FENCE_EMIT(lock)
+CK_PR_FENCE_EMIT(unlock)
+#elif defined(CK_MD_PSO)
+/*
+ * Anything can be re-ordered with respect to stores.
+ * Otherwise, loads are executed in-order.
+ */
+CK_PR_FENCE_EMIT(atomic)
+CK_PR_FENCE_NOOP(atomic_load)
+CK_PR_FENCE_EMIT(atomic_store)
+CK_PR_FENCE_EMIT(store_atomic)
+CK_PR_FENCE_NOOP(load_atomic)
+CK_PR_FENCE_EMIT(load_store)
+CK_PR_FENCE_EMIT(store_load)
+CK_PR_FENCE_NOOP(load)
+CK_PR_FENCE_EMIT(store)
+CK_PR_FENCE_EMIT(memory)
+CK_PR_FENCE_EMIT(acquire)
+CK_PR_FENCE_EMIT(release)
+CK_PR_FENCE_EMIT(acqrel)
+CK_PR_FENCE_EMIT(lock)
+CK_PR_FENCE_EMIT(unlock)
+#elif defined(CK_MD_TSO)
+/*
+ * Only loads are re-ordered and only with respect to
+ * prior stores. Atomic operations are serializing.
+ */
+CK_PR_FENCE_NOOP(atomic)
+CK_PR_FENCE_NOOP(atomic_load)
+CK_PR_FENCE_NOOP(atomic_store)
+CK_PR_FENCE_NOOP(store_atomic)
+CK_PR_FENCE_NOOP(load_atomic)
+CK_PR_FENCE_NOOP(load_store)
+CK_PR_FENCE_EMIT(store_load)
+CK_PR_FENCE_NOOP(load)
+CK_PR_FENCE_NOOP(store)
+CK_PR_FENCE_EMIT(memory)
+CK_PR_FENCE_NOOP(acquire)
+CK_PR_FENCE_NOOP(release)
+CK_PR_FENCE_NOOP(acqrel)
+CK_PR_FENCE_NOOP(lock)
+CK_PR_FENCE_NOOP(unlock)
+#else
+#error "No memory model has been defined."
+#endif /* CK_MD_TSO */
+
+#undef CK_PR_FENCE_EMIT
+#undef CK_PR_FENCE_NOOP
+
+#ifndef CK_F_PR_RFO
+#define CK_F_PR_RFO
+CK_CC_INLINE static void
+ck_pr_rfo(const void *m)
+{
+
+ (void)m;
+ return;
+}
+#endif /* CK_F_PR_RFO */
+
+#define CK_PR_STORE_SAFE(DST, VAL, TYPE) \
+ ck_pr_md_store_##TYPE( \
+ ((void)sizeof(*(DST) = (VAL)), (DST)), \
+ (VAL))
+
+#define ck_pr_store_ptr(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), ptr)
+#define ck_pr_store_char(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), char)
+#ifndef CK_PR_DISABLE_DOUBLE
+#define ck_pr_store_double(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), double)
+#endif
+#define ck_pr_store_uint(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), uint)
+#define ck_pr_store_int(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), int)
+#define ck_pr_store_32(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 32)
+#define ck_pr_store_16(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 16)
+#define ck_pr_store_8(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 8)
+
+#define ck_pr_store_ptr_unsafe(DST, VAL) ck_pr_md_store_ptr((DST), (VAL))
+
+#ifdef CK_F_PR_LOAD_64
+#define ck_pr_store_64(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 64)
+#endif /* CK_F_PR_LOAD_64 */
+
+#define CK_PR_LOAD_PTR_SAFE(SRC) (CK_CC_TYPEOF(*(SRC), (void *)))ck_pr_md_load_ptr((SRC))
+#define ck_pr_load_ptr(SRC) CK_PR_LOAD_PTR_SAFE((SRC))
+
+#define CK_PR_LOAD_SAFE(SRC, TYPE) ck_pr_md_load_##TYPE((SRC))
+#define ck_pr_load_char(SRC) CK_PR_LOAD_SAFE((SRC), char)
+#ifndef CK_PR_DISABLE_DOUBLE
+#define ck_pr_load_double(SRC) CK_PR_LOAD_SAFE((SRC), double)
+#endif
+#define ck_pr_load_uint(SRC) CK_PR_LOAD_SAFE((SRC), uint)
+#define ck_pr_load_int(SRC) CK_PR_LOAD_SAFE((SRC), int)
+#define ck_pr_load_32(SRC) CK_PR_LOAD_SAFE((SRC), 32)
+#define ck_pr_load_16(SRC) CK_PR_LOAD_SAFE((SRC), 16)
+#define ck_pr_load_8(SRC) CK_PR_LOAD_SAFE((SRC), 8)
+
+#ifdef CK_F_PR_LOAD_64
+#define ck_pr_load_64(SRC) CK_PR_LOAD_SAFE((SRC), 64)
+#endif /* CK_F_PR_LOAD_64 */
+
+#define CK_PR_BIN(K, S, M, T, P, C) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target, T value) \
+ { \
+ T previous; \
+ C punt; \
+ punt = ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, \
+ (C)previous, \
+ (C)(previous P value), \
+ &previous) == false) \
+ ck_pr_stall(); \
+ \
+ return; \
+ }
+
+#define CK_PR_BIN_S(K, S, T, P) CK_PR_BIN(K, S, T, T, P, T)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_CHAR
+CK_PR_BIN_S(add, char, char, +)
+#endif /* CK_F_PR_ADD_CHAR */
+
+#ifndef CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_CHAR
+CK_PR_BIN_S(sub, char, char, -)
+#endif /* CK_F_PR_SUB_CHAR */
+
+#ifndef CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_CHAR
+CK_PR_BIN_S(and, char, char, &)
+#endif /* CK_F_PR_AND_CHAR */
+
+#ifndef CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_CHAR
+CK_PR_BIN_S(xor, char, char, ^)
+#endif /* CK_F_PR_XOR_CHAR */
+
+#ifndef CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_CHAR
+CK_PR_BIN_S(or, char, char, |)
+#endif /* CK_F_PR_OR_CHAR */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_INT
+CK_PR_BIN_S(add, int, int, +)
+#endif /* CK_F_PR_ADD_INT */
+
+#ifndef CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_INT
+CK_PR_BIN_S(sub, int, int, -)
+#endif /* CK_F_PR_SUB_INT */
+
+#ifndef CK_F_PR_AND_INT
+#define CK_F_PR_AND_INT
+CK_PR_BIN_S(and, int, int, &)
+#endif /* CK_F_PR_AND_INT */
+
+#ifndef CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_INT
+CK_PR_BIN_S(xor, int, int, ^)
+#endif /* CK_F_PR_XOR_INT */
+
+#ifndef CK_F_PR_OR_INT
+#define CK_F_PR_OR_INT
+CK_PR_BIN_S(or, int, int, |)
+#endif /* CK_F_PR_OR_INT */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \
+ !defined(CK_PR_DISABLE_DOUBLE)
+
+#ifndef CK_F_PR_ADD_DOUBLE
+#define CK_F_PR_ADD_DOUBLE
+CK_PR_BIN_S(add, double, double, +)
+#endif /* CK_F_PR_ADD_DOUBLE */
+
+#ifndef CK_F_PR_SUB_DOUBLE
+#define CK_F_PR_SUB_DOUBLE
+CK_PR_BIN_S(sub, double, double, -)
+#endif /* CK_F_PR_SUB_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_ADD_UINT
+#define CK_F_PR_ADD_UINT
+CK_PR_BIN_S(add, uint, unsigned int, +)
+#endif /* CK_F_PR_ADD_UINT */
+
+#ifndef CK_F_PR_SUB_UINT
+#define CK_F_PR_SUB_UINT
+CK_PR_BIN_S(sub, uint, unsigned int, -)
+#endif /* CK_F_PR_SUB_UINT */
+
+#ifndef CK_F_PR_AND_UINT
+#define CK_F_PR_AND_UINT
+CK_PR_BIN_S(and, uint, unsigned int, &)
+#endif /* CK_F_PR_AND_UINT */
+
+#ifndef CK_F_PR_XOR_UINT
+#define CK_F_PR_XOR_UINT
+CK_PR_BIN_S(xor, uint, unsigned int, ^)
+#endif /* CK_F_PR_XOR_UINT */
+
+#ifndef CK_F_PR_OR_UINT
+#define CK_F_PR_OR_UINT
+CK_PR_BIN_S(or, uint, unsigned int, |)
+#endif /* CK_F_PR_OR_UINT */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_PTR
+CK_PR_BIN(add, ptr, void, uintptr_t, +, void *)
+#endif /* CK_F_PR_ADD_PTR */
+
+#ifndef CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_PTR
+CK_PR_BIN(sub, ptr, void, uintptr_t, -, void *)
+#endif /* CK_F_PR_SUB_PTR */
+
+#ifndef CK_F_PR_AND_PTR
+#define CK_F_PR_AND_PTR
+CK_PR_BIN(and, ptr, void, uintptr_t, &, void *)
+#endif /* CK_F_PR_AND_PTR */
+
+#ifndef CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_PTR
+CK_PR_BIN(xor, ptr, void, uintptr_t, ^, void *)
+#endif /* CK_F_PR_XOR_PTR */
+
+#ifndef CK_F_PR_OR_PTR
+#define CK_F_PR_OR_PTR
+CK_PR_BIN(or, ptr, void, uintptr_t, |, void *)
+#endif /* CK_F_PR_OR_PTR */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_ADD_64
+#define CK_F_PR_ADD_64
+CK_PR_BIN_S(add, 64, uint64_t, +)
+#endif /* CK_F_PR_ADD_64 */
+
+#ifndef CK_F_PR_SUB_64
+#define CK_F_PR_SUB_64
+CK_PR_BIN_S(sub, 64, uint64_t, -)
+#endif /* CK_F_PR_SUB_64 */
+
+#ifndef CK_F_PR_AND_64
+#define CK_F_PR_AND_64
+CK_PR_BIN_S(and, 64, uint64_t, &)
+#endif /* CK_F_PR_AND_64 */
+
+#ifndef CK_F_PR_XOR_64
+#define CK_F_PR_XOR_64
+CK_PR_BIN_S(xor, 64, uint64_t, ^)
+#endif /* CK_F_PR_XOR_64 */
+
+#ifndef CK_F_PR_OR_64
+#define CK_F_PR_OR_64
+CK_PR_BIN_S(or, 64, uint64_t, |)
+#endif /* CK_F_PR_OR_64 */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_ADD_32
+#define CK_F_PR_ADD_32
+CK_PR_BIN_S(add, 32, uint32_t, +)
+#endif /* CK_F_PR_ADD_32 */
+
+#ifndef CK_F_PR_SUB_32
+#define CK_F_PR_SUB_32
+CK_PR_BIN_S(sub, 32, uint32_t, -)
+#endif /* CK_F_PR_SUB_32 */
+
+#ifndef CK_F_PR_AND_32
+#define CK_F_PR_AND_32
+CK_PR_BIN_S(and, 32, uint32_t, &)
+#endif /* CK_F_PR_AND_32 */
+
+#ifndef CK_F_PR_XOR_32
+#define CK_F_PR_XOR_32
+CK_PR_BIN_S(xor, 32, uint32_t, ^)
+#endif /* CK_F_PR_XOR_32 */
+
+#ifndef CK_F_PR_OR_32
+#define CK_F_PR_OR_32
+CK_PR_BIN_S(or, 32, uint32_t, |)
+#endif /* CK_F_PR_OR_32 */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_ADD_16
+#define CK_F_PR_ADD_16
+CK_PR_BIN_S(add, 16, uint16_t, +)
+#endif /* CK_F_PR_ADD_16 */
+
+#ifndef CK_F_PR_SUB_16
+#define CK_F_PR_SUB_16
+CK_PR_BIN_S(sub, 16, uint16_t, -)
+#endif /* CK_F_PR_SUB_16 */
+
+#ifndef CK_F_PR_AND_16
+#define CK_F_PR_AND_16
+CK_PR_BIN_S(and, 16, uint16_t, &)
+#endif /* CK_F_PR_AND_16 */
+
+#ifndef CK_F_PR_XOR_16
+#define CK_F_PR_XOR_16
+CK_PR_BIN_S(xor, 16, uint16_t, ^)
+#endif /* CK_F_PR_XOR_16 */
+
+#ifndef CK_F_PR_OR_16
+#define CK_F_PR_OR_16
+CK_PR_BIN_S(or, 16, uint16_t, |)
+#endif /* CK_F_PR_OR_16 */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_ADD_8
+#define CK_F_PR_ADD_8
+CK_PR_BIN_S(add, 8, uint8_t, +)
+#endif /* CK_F_PR_ADD_8 */
+
+#ifndef CK_F_PR_SUB_8
+#define CK_F_PR_SUB_8
+CK_PR_BIN_S(sub, 8, uint8_t, -)
+#endif /* CK_F_PR_SUB_8 */
+
+#ifndef CK_F_PR_AND_8
+#define CK_F_PR_AND_8
+CK_PR_BIN_S(and, 8, uint8_t, &)
+#endif /* CK_F_PR_AND_8 */
+
+#ifndef CK_F_PR_XOR_8
+#define CK_F_PR_XOR_8
+CK_PR_BIN_S(xor, 8, uint8_t, ^)
+#endif /* CK_F_PR_XOR_8 */
+
+#ifndef CK_F_PR_OR_8
+#define CK_F_PR_OR_8
+CK_PR_BIN_S(or, 8, uint8_t, |)
+#endif /* CK_F_PR_OR_8 */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_BIN_S
+#undef CK_PR_BIN
+
+#define CK_PR_BTX(K, S, M, T, P, C, R) \
+ CK_CC_INLINE static bool \
+ ck_pr_##K##_##S(M *target, unsigned int offset) \
+ { \
+ T previous; \
+ C punt; \
+ punt = ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, (C)previous, \
+ (C)(previous P (R ((T)1 << offset))), &previous) == false) \
+ ck_pr_stall(); \
+ return ((previous >> offset) & 1); \
+ }
+
+#define CK_PR_BTX_S(K, S, T, P, R) CK_PR_BTX(K, S, T, T, P, T, R)
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_BTC_INT
+#define CK_F_PR_BTC_INT
+CK_PR_BTX_S(btc, int, int, ^,)
+#endif /* CK_F_PR_BTC_INT */
+
+#ifndef CK_F_PR_BTR_INT
+#define CK_F_PR_BTR_INT
+CK_PR_BTX_S(btr, int, int, &, ~)
+#endif /* CK_F_PR_BTR_INT */
+
+#ifndef CK_F_PR_BTS_INT
+#define CK_F_PR_BTS_INT
+CK_PR_BTX_S(bts, int, int, |,)
+#endif /* CK_F_PR_BTS_INT */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_BTC_UINT
+#define CK_F_PR_BTC_UINT
+CK_PR_BTX_S(btc, uint, unsigned int, ^,)
+#endif /* CK_F_PR_BTC_UINT */
+
+#ifndef CK_F_PR_BTR_UINT
+#define CK_F_PR_BTR_UINT
+CK_PR_BTX_S(btr, uint, unsigned int, &, ~)
+#endif /* CK_F_PR_BTR_UINT */
+
+#ifndef CK_F_PR_BTS_UINT
+#define CK_F_PR_BTS_UINT
+CK_PR_BTX_S(bts, uint, unsigned int, |,)
+#endif /* CK_F_PR_BTS_UINT */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_BTC_PTR
+#define CK_F_PR_BTC_PTR
+CK_PR_BTX(btc, ptr, void, uintptr_t, ^, void *,)
+#endif /* CK_F_PR_BTC_PTR */
+
+#ifndef CK_F_PR_BTR_PTR
+#define CK_F_PR_BTR_PTR
+CK_PR_BTX(btr, ptr, void, uintptr_t, &, void *, ~)
+#endif /* CK_F_PR_BTR_PTR */
+
+#ifndef CK_F_PR_BTS_PTR
+#define CK_F_PR_BTS_PTR
+CK_PR_BTX(bts, ptr, void, uintptr_t, |, void *,)
+#endif /* CK_F_PR_BTS_PTR */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_BTC_64
+#define CK_F_PR_BTC_64
+CK_PR_BTX_S(btc, 64, uint64_t, ^,)
+#endif /* CK_F_PR_BTC_64 */
+
+#ifndef CK_F_PR_BTR_64
+#define CK_F_PR_BTR_64
+CK_PR_BTX_S(btr, 64, uint64_t, &, ~)
+#endif /* CK_F_PR_BTR_64 */
+
+#ifndef CK_F_PR_BTS_64
+#define CK_F_PR_BTS_64
+CK_PR_BTX_S(bts, 64, uint64_t, |,)
+#endif /* CK_F_PR_BTS_64 */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_BTC_32
+#define CK_F_PR_BTC_32
+CK_PR_BTX_S(btc, 32, uint32_t, ^,)
+#endif /* CK_F_PR_BTC_32 */
+
+#ifndef CK_F_PR_BTR_32
+#define CK_F_PR_BTR_32
+CK_PR_BTX_S(btr, 32, uint32_t, &, ~)
+#endif /* CK_F_PR_BTR_32 */
+
+#ifndef CK_F_PR_BTS_32
+#define CK_F_PR_BTS_32
+CK_PR_BTX_S(bts, 32, uint32_t, |,)
+#endif /* CK_F_PR_BTS_32 */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_BTC_16
+#define CK_F_PR_BTC_16
+CK_PR_BTX_S(btc, 16, uint16_t, ^,)
+#endif /* CK_F_PR_BTC_16 */
+
+#ifndef CK_F_PR_BTR_16
+#define CK_F_PR_BTR_16
+CK_PR_BTX_S(btr, 16, uint16_t, &, ~)
+#endif /* CK_F_PR_BTR_16 */
+
+#ifndef CK_F_PR_BTS_16
+#define CK_F_PR_BTS_16
+CK_PR_BTX_S(bts, 16, uint16_t, |,)
+#endif /* CK_F_PR_BTS_16 */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#undef CK_PR_BTX_S
+#undef CK_PR_BTX
+
+#define CK_PR_UNARY(K, X, S, M, T) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target) \
+ { \
+ ck_pr_##X##_##S(target, (T)1); \
+ return; \
+ }
+
+#define CK_PR_UNARY_Z(K, S, M, T, P, C, Z) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S##_zero(M *target, bool *zero) \
+ { \
+ T previous; \
+ C punt; \
+ punt = (C)ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, \
+ (C)previous, \
+ (C)(previous P 1), \
+ &previous) == false) \
+ ck_pr_stall(); \
+ *zero = previous == (T)Z; \
+ return; \
+ }
+
+#define CK_PR_UNARY_S(K, X, S, M) CK_PR_UNARY(K, X, S, M, M)
+#define CK_PR_UNARY_Z_S(K, S, M, P, Z) CK_PR_UNARY_Z(K, S, M, M, P, M, Z)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_CHAR
+CK_PR_UNARY_S(inc, add, char, char)
+#endif /* CK_F_PR_INC_CHAR */
+
+#ifndef CK_F_PR_INC_CHAR_ZERO
+#define CK_F_PR_INC_CHAR_ZERO
+CK_PR_UNARY_Z_S(inc, char, char, +, -1)
+#endif /* CK_F_PR_INC_CHAR_ZERO */
+
+#ifndef CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_CHAR
+CK_PR_UNARY_S(dec, sub, char, char)
+#endif /* CK_F_PR_DEC_CHAR */
+
+#ifndef CK_F_PR_DEC_CHAR_ZERO
+#define CK_F_PR_DEC_CHAR_ZERO
+CK_PR_UNARY_Z_S(dec, char, char, -, 1)
+#endif /* CK_F_PR_DEC_CHAR_ZERO */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_INC_INT
+#define CK_F_PR_INC_INT
+CK_PR_UNARY_S(inc, add, int, int)
+#endif /* CK_F_PR_INC_INT */
+
+#ifndef CK_F_PR_INC_INT_ZERO
+#define CK_F_PR_INC_INT_ZERO
+CK_PR_UNARY_Z_S(inc, int, int, +, -1)
+#endif /* CK_F_PR_INC_INT_ZERO */
+
+#ifndef CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_INT
+CK_PR_UNARY_S(dec, sub, int, int)
+#endif /* CK_F_PR_DEC_INT */
+
+#ifndef CK_F_PR_DEC_INT_ZERO
+#define CK_F_PR_DEC_INT_ZERO
+CK_PR_UNARY_Z_S(dec, int, int, -, 1)
+#endif /* CK_F_PR_DEC_INT_ZERO */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \
+ !defined(CK_PR_DISABLE_DOUBLE)
+
+#ifndef CK_F_PR_INC_DOUBLE
+#define CK_F_PR_INC_DOUBLE
+CK_PR_UNARY_S(inc, add, double, double)
+#endif /* CK_F_PR_INC_DOUBLE */
+
+#ifndef CK_F_PR_DEC_DOUBLE
+#define CK_F_PR_DEC_DOUBLE
+CK_PR_UNARY_S(dec, sub, double, double)
+#endif /* CK_F_PR_DEC_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_INC_UINT
+#define CK_F_PR_INC_UINT
+CK_PR_UNARY_S(inc, add, uint, unsigned int)
+#endif /* CK_F_PR_INC_UINT */
+
+#ifndef CK_F_PR_INC_UINT_ZERO
+#define CK_F_PR_INC_UINT_ZERO
+CK_PR_UNARY_Z_S(inc, uint, unsigned int, +, UINT_MAX)
+#endif /* CK_F_PR_INC_UINT_ZERO */
+
+#ifndef CK_F_PR_DEC_UINT
+#define CK_F_PR_DEC_UINT
+CK_PR_UNARY_S(dec, sub, uint, unsigned int)
+#endif /* CK_F_PR_DEC_UINT */
+
+#ifndef CK_F_PR_DEC_UINT_ZERO
+#define CK_F_PR_DEC_UINT_ZERO
+CK_PR_UNARY_Z_S(dec, uint, unsigned int, -, 1)
+#endif /* CK_F_PR_DEC_UINT_ZERO */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_INC_PTR
+#define CK_F_PR_INC_PTR
+CK_PR_UNARY(inc, add, ptr, void, uintptr_t)
+#endif /* CK_F_PR_INC_PTR */
+
+#ifndef CK_F_PR_INC_PTR_ZERO
+#define CK_F_PR_INC_PTR_ZERO
+CK_PR_UNARY_Z(inc, ptr, void, uintptr_t, +, void *, UINT_MAX)
+#endif /* CK_F_PR_INC_PTR_ZERO */
+
+#ifndef CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_PTR
+CK_PR_UNARY(dec, sub, ptr, void, uintptr_t)
+#endif /* CK_F_PR_DEC_PTR */
+
+#ifndef CK_F_PR_DEC_PTR_ZERO
+#define CK_F_PR_DEC_PTR_ZERO
+CK_PR_UNARY_Z(dec, ptr, void, uintptr_t, -, void *, 1)
+#endif /* CK_F_PR_DEC_PTR_ZERO */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_INC_64
+#define CK_F_PR_INC_64
+CK_PR_UNARY_S(inc, add, 64, uint64_t)
+#endif /* CK_F_PR_INC_64 */
+
+#ifndef CK_F_PR_INC_64_ZERO
+#define CK_F_PR_INC_64_ZERO
+CK_PR_UNARY_Z_S(inc, 64, uint64_t, +, UINT64_MAX)
+#endif /* CK_F_PR_INC_64_ZERO */
+
+#ifndef CK_F_PR_DEC_64
+#define CK_F_PR_DEC_64
+CK_PR_UNARY_S(dec, sub, 64, uint64_t)
+#endif /* CK_F_PR_DEC_64 */
+
+#ifndef CK_F_PR_DEC_64_ZERO
+#define CK_F_PR_DEC_64_ZERO
+CK_PR_UNARY_Z_S(dec, 64, uint64_t, -, 1)
+#endif /* CK_F_PR_DEC_64_ZERO */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_INC_32
+#define CK_F_PR_INC_32
+CK_PR_UNARY_S(inc, add, 32, uint32_t)
+#endif /* CK_F_PR_INC_32 */
+
+#ifndef CK_F_PR_INC_32_ZERO
+#define CK_F_PR_INC_32_ZERO
+CK_PR_UNARY_Z_S(inc, 32, uint32_t, +, UINT32_MAX)
+#endif /* CK_F_PR_INC_32_ZERO */
+
+#ifndef CK_F_PR_DEC_32
+#define CK_F_PR_DEC_32
+CK_PR_UNARY_S(dec, sub, 32, uint32_t)
+#endif /* CK_F_PR_DEC_32 */
+
+#ifndef CK_F_PR_DEC_32_ZERO
+#define CK_F_PR_DEC_32_ZERO
+CK_PR_UNARY_Z_S(dec, 32, uint32_t, -, 1)
+#endif /* CK_F_PR_DEC_32_ZERO */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_INC_16
+#define CK_F_PR_INC_16
+CK_PR_UNARY_S(inc, add, 16, uint16_t)
+#endif /* CK_F_PR_INC_16 */
+
+#ifndef CK_F_PR_INC_16_ZERO
+#define CK_F_PR_INC_16_ZERO
+CK_PR_UNARY_Z_S(inc, 16, uint16_t, +, UINT16_MAX)
+#endif /* CK_F_PR_INC_16_ZERO */
+
+#ifndef CK_F_PR_DEC_16
+#define CK_F_PR_DEC_16
+CK_PR_UNARY_S(dec, sub, 16, uint16_t)
+#endif /* CK_F_PR_DEC_16 */
+
+#ifndef CK_F_PR_DEC_16_ZERO
+#define CK_F_PR_DEC_16_ZERO
+CK_PR_UNARY_Z_S(dec, 16, uint16_t, -, 1)
+#endif /* CK_F_PR_DEC_16_ZERO */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_INC_8
+#define CK_F_PR_INC_8
+CK_PR_UNARY_S(inc, add, 8, uint8_t)
+#endif /* CK_F_PR_INC_8 */
+
+#ifndef CK_F_PR_INC_8_ZERO
+#define CK_F_PR_INC_8_ZERO
+CK_PR_UNARY_Z_S(inc, 8, uint8_t, +, UINT8_MAX)
+#endif /* CK_F_PR_INC_8_ZERO */
+
+#ifndef CK_F_PR_DEC_8
+#define CK_F_PR_DEC_8
+CK_PR_UNARY_S(dec, sub, 8, uint8_t)
+#endif /* CK_F_PR_DEC_8 */
+
+#ifndef CK_F_PR_DEC_8_ZERO
+#define CK_F_PR_DEC_8_ZERO
+CK_PR_UNARY_Z_S(dec, 8, uint8_t, -, 1)
+#endif /* CK_F_PR_DEC_8_ZERO */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_UNARY_Z_S
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY_Z
+#undef CK_PR_UNARY
+
+#define CK_PR_N(K, S, M, T, P, C) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target) \
+ { \
+ T previous; \
+ C punt; \
+ punt = (C)ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, \
+ (C)previous, \
+ (C)(P previous), \
+ &previous) == false) \
+ ck_pr_stall(); \
+ \
+ return; \
+ }
+
+#define CK_PR_N_Z(S, M, T, C) \
+ CK_CC_INLINE static void \
+ ck_pr_neg_##S##_zero(M *target, bool *zero) \
+ { \
+ T previous; \
+ C punt; \
+ punt = (C)ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, \
+ (C)previous, \
+ (C)(-previous), \
+ &previous) == false) \
+ ck_pr_stall(); \
+ \
+ *zero = previous == 0; \
+ return; \
+ }
+
+#define CK_PR_N_S(K, S, M, P) CK_PR_N(K, S, M, M, P, M)
+#define CK_PR_N_Z_S(S, M) CK_PR_N_Z(S, M, M, M)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_CHAR
+CK_PR_N_S(not, char, char, ~)
+#endif /* CK_F_PR_NOT_CHAR */
+
+#ifndef CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_CHAR
+CK_PR_N_S(neg, char, char, -)
+#endif /* CK_F_PR_NEG_CHAR */
+
+#ifndef CK_F_PR_NEG_CHAR_ZERO
+#define CK_F_PR_NEG_CHAR_ZERO
+CK_PR_N_Z_S(char, char)
+#endif /* CK_F_PR_NEG_CHAR_ZERO */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_INT
+CK_PR_N_S(not, int, int, ~)
+#endif /* CK_F_PR_NOT_INT */
+
+#ifndef CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_INT
+CK_PR_N_S(neg, int, int, -)
+#endif /* CK_F_PR_NEG_INT */
+
+#ifndef CK_F_PR_NEG_INT_ZERO
+#define CK_F_PR_NEG_INT_ZERO
+CK_PR_N_Z_S(int, int)
+#endif /* CK_F_PR_NEG_INT_ZERO */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \
+ !defined(CK_PR_DISABLE_DOUBLE)
+
+#ifndef CK_F_PR_NEG_DOUBLE
+#define CK_F_PR_NEG_DOUBLE
+CK_PR_N_S(neg, double, double, -)
+#endif /* CK_F_PR_NEG_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_NOT_UINT
+#define CK_F_PR_NOT_UINT
+CK_PR_N_S(not, uint, unsigned int, ~)
+#endif /* CK_F_PR_NOT_UINT */
+
+#ifndef CK_F_PR_NEG_UINT
+#define CK_F_PR_NEG_UINT
+CK_PR_N_S(neg, uint, unsigned int, -)
+#endif /* CK_F_PR_NEG_UINT */
+
+#ifndef CK_F_PR_NEG_UINT_ZERO
+#define CK_F_PR_NEG_UINT_ZERO
+CK_PR_N_Z_S(uint, unsigned int)
+#endif /* CK_F_PR_NEG_UINT_ZERO */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_PTR
+CK_PR_N(not, ptr, void, uintptr_t, ~, void *)
+#endif /* CK_F_PR_NOT_PTR */
+
+#ifndef CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_PTR
+CK_PR_N(neg, ptr, void, uintptr_t, -, void *)
+#endif /* CK_F_PR_NEG_PTR */
+
+#ifndef CK_F_PR_NEG_PTR_ZERO
+#define CK_F_PR_NEG_PTR_ZERO
+CK_PR_N_Z(ptr, void, uintptr_t, void *)
+#endif /* CK_F_PR_NEG_PTR_ZERO */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_NOT_64
+#define CK_F_PR_NOT_64
+CK_PR_N_S(not, 64, uint64_t, ~)
+#endif /* CK_F_PR_NOT_64 */
+
+#ifndef CK_F_PR_NEG_64
+#define CK_F_PR_NEG_64
+CK_PR_N_S(neg, 64, uint64_t, -)
+#endif /* CK_F_PR_NEG_64 */
+
+#ifndef CK_F_PR_NEG_64_ZERO
+#define CK_F_PR_NEG_64_ZERO
+CK_PR_N_Z_S(64, uint64_t)
+#endif /* CK_F_PR_NEG_64_ZERO */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_NOT_32
+#define CK_F_PR_NOT_32
+CK_PR_N_S(not, 32, uint32_t, ~)
+#endif /* CK_F_PR_NOT_32 */
+
+#ifndef CK_F_PR_NEG_32
+#define CK_F_PR_NEG_32
+CK_PR_N_S(neg, 32, uint32_t, -)
+#endif /* CK_F_PR_NEG_32 */
+
+#ifndef CK_F_PR_NEG_32_ZERO
+#define CK_F_PR_NEG_32_ZERO
+CK_PR_N_Z_S(32, uint32_t)
+#endif /* CK_F_PR_NEG_32_ZERO */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_NOT_16
+#define CK_F_PR_NOT_16
+CK_PR_N_S(not, 16, uint16_t, ~)
+#endif /* CK_F_PR_NOT_16 */
+
+#ifndef CK_F_PR_NEG_16
+#define CK_F_PR_NEG_16
+CK_PR_N_S(neg, 16, uint16_t, -)
+#endif /* CK_F_PR_NEG_16 */
+
+#ifndef CK_F_PR_NEG_16_ZERO
+#define CK_F_PR_NEG_16_ZERO
+CK_PR_N_Z_S(16, uint16_t)
+#endif /* CK_F_PR_NEG_16_ZERO */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_NOT_8
+#define CK_F_PR_NOT_8
+CK_PR_N_S(not, 8, uint8_t, ~)
+#endif /* CK_F_PR_NOT_8 */
+
+#ifndef CK_F_PR_NEG_8
+#define CK_F_PR_NEG_8
+CK_PR_N_S(neg, 8, uint8_t, -)
+#endif /* CK_F_PR_NEG_8 */
+
+#ifndef CK_F_PR_NEG_8_ZERO
+#define CK_F_PR_NEG_8_ZERO
+CK_PR_N_Z_S(8, uint8_t)
+#endif /* CK_F_PR_NEG_8_ZERO */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_N_Z_S
+#undef CK_PR_N_S
+#undef CK_PR_N_Z
+#undef CK_PR_N
+
+#define CK_PR_FAA(S, M, T, C) \
+ CK_CC_INLINE static C \
+ ck_pr_faa_##S(M *target, T delta) \
+ { \
+ T previous; \
+ C punt; \
+ punt = (C)ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, \
+ (C)previous, \
+ (C)(previous + delta), \
+ &previous) == false) \
+ ck_pr_stall(); \
+ \
+ return ((C)previous); \
+ }
+
+#define CK_PR_FAS(S, M, C) \
+ CK_CC_INLINE static C \
+ ck_pr_fas_##S(M *target, C update) \
+ { \
+ C previous; \
+ previous = ck_pr_md_load_##S(target); \
+ while (ck_pr_cas_##S##_value(target, \
+ previous, \
+ update, \
+ &previous) == false) \
+ ck_pr_stall(); \
+ \
+ return (previous); \
+ }
+
+#define CK_PR_FAA_S(S, M) CK_PR_FAA(S, M, M, M)
+#define CK_PR_FAS_S(S, M) CK_PR_FAS(S, M, M)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_CHAR
+CK_PR_FAA_S(char, char)
+#endif /* CK_F_PR_FAA_CHAR */
+
+#ifndef CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_CHAR
+CK_PR_FAS_S(char, char)
+#endif /* CK_F_PR_FAS_CHAR */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_INT
+CK_PR_FAA_S(int, int)
+#endif /* CK_F_PR_FAA_INT */
+
+#ifndef CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_INT
+CK_PR_FAS_S(int, int)
+#endif /* CK_F_PR_FAS_INT */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \
+ !defined(CK_PR_DISABLE_DOUBLE)
+
+#ifndef CK_F_PR_FAA_DOUBLE
+#define CK_F_PR_FAA_DOUBLE
+CK_PR_FAA_S(double, double)
+#endif /* CK_F_PR_FAA_DOUBLE */
+
+#ifndef CK_F_PR_FAS_DOUBLE
+#define CK_F_PR_FAS_DOUBLE
+CK_PR_FAS_S(double, double)
+#endif /* CK_F_PR_FAS_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_FAA_UINT
+#define CK_F_PR_FAA_UINT
+CK_PR_FAA_S(uint, unsigned int)
+#endif /* CK_F_PR_FAA_UINT */
+
+#ifndef CK_F_PR_FAS_UINT
+#define CK_F_PR_FAS_UINT
+CK_PR_FAS_S(uint, unsigned int)
+#endif /* CK_F_PR_FAS_UINT */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_PTR
+CK_PR_FAA(ptr, void, uintptr_t, void *)
+#endif /* CK_F_PR_FAA_PTR */
+
+#ifndef CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_PTR
+CK_PR_FAS(ptr, void, void *)
+#endif /* CK_F_PR_FAS_PTR */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_FAA_64
+#define CK_F_PR_FAA_64
+CK_PR_FAA_S(64, uint64_t)
+#endif /* CK_F_PR_FAA_64 */
+
+#ifndef CK_F_PR_FAS_64
+#define CK_F_PR_FAS_64
+CK_PR_FAS_S(64, uint64_t)
+#endif /* CK_F_PR_FAS_64 */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_FAA_32
+#define CK_F_PR_FAA_32
+CK_PR_FAA_S(32, uint32_t)
+#endif /* CK_F_PR_FAA_32 */
+
+#ifndef CK_F_PR_FAS_32
+#define CK_F_PR_FAS_32
+CK_PR_FAS_S(32, uint32_t)
+#endif /* CK_F_PR_FAS_32 */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_FAA_16
+#define CK_F_PR_FAA_16
+CK_PR_FAA_S(16, uint16_t)
+#endif /* CK_F_PR_FAA_16 */
+
+#ifndef CK_F_PR_FAS_16
+#define CK_F_PR_FAS_16
+CK_PR_FAS_S(16, uint16_t)
+#endif /* CK_F_PR_FAS_16 */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_FAA_8
+#define CK_F_PR_FAA_8
+CK_PR_FAA_S(8, uint8_t)
+#endif /* CK_F_PR_FAA_8 */
+
+#ifndef CK_F_PR_FAS_8
+#define CK_F_PR_FAS_8
+CK_PR_FAS_S(8, uint8_t)
+#endif /* CK_F_PR_FAS_8 */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAS_S
+#undef CK_PR_FAA
+#undef CK_PR_FAS
+
+#endif /* CK_PR_H */
diff --git a/freebsd/sys/contrib/ck/include/ck_queue.h b/freebsd/sys/contrib/ck/include/ck_queue.h
new file mode 100644
index 00000000..faf96a17
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_queue.h
@@ -0,0 +1,428 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ * $FreeBSD$
+ */
+
+#ifndef CK_QUEUE_H
+#define CK_QUEUE_H
+
+#include <ck_pr.h>
+
+/*
+ * This file defines three types of data structures: singly-linked lists,
+ * singly-linked tail queues and lists.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * It is safe to use _FOREACH/_FOREACH_SAFE in the presence of concurrent
+ * modifications to the list. Writers to these lists must, on the other hand,
+ * implement writer-side synchronization. The _SWAP operations are not atomic.
+ * This facility is currently unsupported on architectures such as the Alpha
+ * which require load-depend memory fences.
+ *
+ * CK_SLIST CK_LIST CK_STAILQ
+ * _HEAD + + +
+ * _HEAD_INITIALIZER + + +
+ * _ENTRY + + +
+ * _INIT + + +
+ * _EMPTY + + +
+ * _FIRST + + +
+ * _NEXT + + +
+ * _FOREACH + + +
+ * _FOREACH_SAFE + + +
+ * _INSERT_HEAD + + +
+ * _INSERT_BEFORE - + -
+ * _INSERT_AFTER + + +
+ * _INSERT_TAIL - - +
+ * _REMOVE_AFTER + - +
+ * _REMOVE_HEAD + - +
+ * _REMOVE + + +
+ * _SWAP + + +
+ * _MOVE + + +
+ */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define CK_SLIST_HEAD(name, type) \
+struct name { \
+ struct type *cslh_first; /* first element */ \
+}
+
+#define CK_SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define CK_SLIST_ENTRY(type) \
+struct { \
+ struct type *csle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define CK_SLIST_EMPTY(head) \
+ (ck_pr_load_ptr(&(head)->cslh_first) == NULL)
+
+#define CK_SLIST_FIRST(head) \
+ (ck_pr_load_ptr(&(head)->cslh_first))
+
+#define CK_SLIST_NEXT(elm, field) \
+ ck_pr_load_ptr(&((elm)->field.csle_next))
+
+#define CK_SLIST_FOREACH(var, head, field) \
+ for ((var) = CK_SLIST_FIRST((head)); \
+ (var) && (ck_pr_fence_load(), 1); \
+ (var) = CK_SLIST_NEXT((var), field))
+
+#define CK_SLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = CK_SLIST_FIRST(head); \
+ (var) && (ck_pr_fence_load(), (tvar) = CK_SLIST_NEXT(var, field), 1);\
+ (var) = (tvar))
+
+#define CK_SLIST_FOREACH_PREVPTR(var, varp, head, field) \
+ for ((varp) = &(head)->cslh_first; \
+ ((var) = ck_pr_load_ptr(varp)) != NULL && (ck_pr_fence_load(), 1); \
+ (varp) = &(var)->field.csle_next)
+
+#define CK_SLIST_INIT(head) do { \
+ ck_pr_store_ptr(&(head)->cslh_first, NULL); \
+ ck_pr_fence_store(); \
+} while (0)
+
+#define CK_SLIST_INSERT_AFTER(a, b, field) do { \
+ (b)->field.csle_next = (a)->field.csle_next; \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr(&(a)->field.csle_next, b); \
+} while (0)
+
+#define CK_SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.csle_next = (head)->cslh_first; \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr(&(head)->cslh_first, elm); \
+} while (0)
+
+#define CK_SLIST_REMOVE_AFTER(elm, field) do { \
+ ck_pr_store_ptr(&(elm)->field.csle_next, \
+ (elm)->field.csle_next->field.csle_next); \
+} while (0)
+
+#define CK_SLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->cslh_first == (elm)) { \
+ CK_SLIST_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->cslh_first; \
+ while (curelm->field.csle_next != (elm)) \
+ curelm = curelm->field.csle_next; \
+ CK_SLIST_REMOVE_AFTER(curelm, field); \
+ } \
+} while (0)
+
+#define CK_SLIST_REMOVE_HEAD(head, field) do { \
+ ck_pr_store_ptr(&(head)->cslh_first, \
+ (head)->cslh_first->field.csle_next); \
+} while (0)
+
+#define CK_SLIST_MOVE(head1, head2, field) do { \
+ ck_pr_store_ptr(&(head1)->cslh_first, (head2)->cslh_first); \
+} while (0)
+
+/*
+ * This operation is not applied atomically.
+ */
+#define CK_SLIST_SWAP(a, b, type) do { \
+ struct type *swap_first = (a)->cslh_first; \
+ (a)->cslh_first = (b)->cslh_first; \
+ (b)->cslh_first = swap_first; \
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define CK_STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *cstqh_first;/* first element */ \
+ struct type **cstqh_last;/* addr of last next element */ \
+}
+
+#define CK_STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).cstqh_first }
+
+#define CK_STAILQ_ENTRY(type) \
+struct { \
+ struct type *cstqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define CK_STAILQ_CONCAT(head1, head2) do { \
+ if ((head2)->cstqh_first != NULL) { \
+ ck_pr_store_ptr((head1)->cstqh_last, (head2)->cstqh_first); \
+ ck_pr_fence_store(); \
+ (head1)->cstqh_last = (head2)->cstqh_last; \
+ CK_STAILQ_INIT((head2)); \
+ } \
+} while (0)
+
+#define CK_STAILQ_EMPTY(head) (ck_pr_load_ptr(&(head)->cstqh_first) == NULL)
+
+#define CK_STAILQ_FIRST(head) (ck_pr_load_ptr(&(head)->cstqh_first))
+
+#define CK_STAILQ_FOREACH(var, head, field) \
+ for((var) = CK_STAILQ_FIRST((head)); \
+ (var) && (ck_pr_fence_load(), 1); \
+ (var) = CK_STAILQ_NEXT((var), field))
+
+#define CK_STAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = CK_STAILQ_FIRST((head)); \
+ (var) && (ck_pr_fence_load(), (tvar) = \
+ CK_STAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define CK_STAILQ_INIT(head) do { \
+ ck_pr_store_ptr(&(head)->cstqh_first, NULL); \
+ ck_pr_fence_store(); \
+ (head)->cstqh_last = &(head)->cstqh_first; \
+} while (0)
+
+#define CK_STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
+ (elm)->field.cstqe_next = (tqelm)->field.cstqe_next; \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr(&(tqelm)->field.cstqe_next, elm); \
+ if ((elm)->field.cstqe_next == NULL) \
+ (head)->cstqh_last = &(elm)->field.cstqe_next; \
+} while (0)
+
+#define CK_STAILQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cstqe_next = (head)->cstqh_first; \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr(&(head)->cstqh_first, elm); \
+ if ((elm)->field.cstqe_next == NULL) \
+ (head)->cstqh_last = &(elm)->field.cstqe_next; \
+} while (0)
+
+#define CK_STAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cstqe_next = NULL; \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr((head)->cstqh_last, (elm)); \
+ (head)->cstqh_last = &(elm)->field.cstqe_next; \
+} while (0)
+
+#define CK_STAILQ_NEXT(elm, field) \
+ (ck_pr_load_ptr(&(elm)->field.cstqe_next))
+
+#define CK_STAILQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->cstqh_first == (elm)) { \
+ CK_STAILQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->cstqh_first; \
+ while (curelm->field.cstqe_next != (elm)) \
+ curelm = curelm->field.cstqe_next; \
+ CK_STAILQ_REMOVE_AFTER(head, curelm, field); \
+ } \
+} while (0)
+
+#define CK_STAILQ_REMOVE_AFTER(head, elm, field) do { \
+ ck_pr_store_ptr(&(elm)->field.cstqe_next, \
+ (elm)->field.cstqe_next->field.cstqe_next); \
+ if ((elm)->field.cstqe_next == NULL) \
+ (head)->cstqh_last = &(elm)->field.cstqe_next; \
+} while (0)
+
+#define CK_STAILQ_REMOVE_HEAD(head, field) do { \
+ ck_pr_store_ptr(&(head)->cstqh_first, \
+ (head)->cstqh_first->field.cstqe_next); \
+ if ((head)->cstqh_first == NULL) \
+ (head)->cstqh_last = &(head)->cstqh_first; \
+} while (0)
+
+#define CK_STAILQ_MOVE(head1, head2, field) do { \
+ ck_pr_store_ptr(&(head1)->cstqh_first, (head2)->cstqh_first); \
+ (head1)->cstqh_last = (head2)->cstqh_last; \
+ if ((head2)->cstqh_last == &(head2)->cstqh_first) \
+ (head1)->cstqh_last = &(head1)->cstqh_first; \
+} while (0)
+
+/*
+ * This operation is not applied atomically.
+ */
+#define CK_STAILQ_SWAP(head1, head2, type) do { \
+ struct type *swap_first = CK_STAILQ_FIRST(head1); \
+ struct type **swap_last = (head1)->cstqh_last; \
+ CK_STAILQ_FIRST(head1) = CK_STAILQ_FIRST(head2); \
+ (head1)->cstqh_last = (head2)->cstqh_last; \
+ CK_STAILQ_FIRST(head2) = swap_first; \
+ (head2)->cstqh_last = swap_last; \
+ if (CK_STAILQ_EMPTY(head1)) \
+ (head1)->cstqh_last = &(head1)->cstqh_first; \
+ if (CK_STAILQ_EMPTY(head2)) \
+ (head2)->cstqh_last = &(head2)->cstqh_first; \
+} while (0)
+
+/*
+ * List declarations.
+ */
+#define CK_LIST_HEAD(name, type) \
+struct name { \
+ struct type *clh_first; /* first element */ \
+}
+
+#define CK_LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define CK_LIST_ENTRY(type) \
+struct { \
+ struct type *cle_next; /* next element */ \
+ struct type **cle_prev; /* address of previous next element */ \
+}
+
+#define CK_LIST_FIRST(head) ck_pr_load_ptr(&(head)->clh_first)
+#define CK_LIST_EMPTY(head) (CK_LIST_FIRST(head) == NULL)
+#define CK_LIST_NEXT(elm, field) ck_pr_load_ptr(&(elm)->field.cle_next)
+
+#define CK_LIST_FOREACH(var, head, field) \
+ for ((var) = CK_LIST_FIRST((head)); \
+ (var) && (ck_pr_fence_load(), 1); \
+ (var) = CK_LIST_NEXT((var), field))
+
+#define CK_LIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = CK_LIST_FIRST((head)); \
+ (var) && (ck_pr_fence_load(), (tvar) = CK_LIST_NEXT((var), field), 1);\
+ (var) = (tvar))
+
+#define CK_LIST_INIT(head) do { \
+ ck_pr_store_ptr(&(head)->clh_first, NULL); \
+ ck_pr_fence_store(); \
+} while (0)
+
+#define CK_LIST_INSERT_AFTER(listelm, elm, field) do { \
+ (elm)->field.cle_next = (listelm)->field.cle_next; \
+ (elm)->field.cle_prev = &(listelm)->field.cle_next; \
+ ck_pr_fence_store(); \
+ if ((listelm)->field.cle_next != NULL) \
+ (listelm)->field.cle_next->field.cle_prev = &(elm)->field.cle_next;\
+ ck_pr_store_ptr(&(listelm)->field.cle_next, elm); \
+} while (0)
+
+#define CK_LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.cle_prev = (listelm)->field.cle_prev; \
+ (elm)->field.cle_next = (listelm); \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr((listelm)->field.cle_prev, (elm)); \
+ (listelm)->field.cle_prev = &(elm)->field.cle_next; \
+} while (0)
+
+#define CK_LIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cle_next = (head)->clh_first; \
+ ck_pr_fence_store(); \
+ if ((elm)->field.cle_next != NULL) \
+ (head)->clh_first->field.cle_prev = &(elm)->field.cle_next; \
+ ck_pr_store_ptr(&(head)->clh_first, elm); \
+ (elm)->field.cle_prev = &(head)->clh_first; \
+} while (0)
+
+#define CK_LIST_REMOVE(elm, field) do { \
+ ck_pr_store_ptr((elm)->field.cle_prev, (elm)->field.cle_next); \
+ if ((elm)->field.cle_next != NULL) \
+ (elm)->field.cle_next->field.cle_prev = (elm)->field.cle_prev; \
+} while (0)
+
+#define CK_LIST_MOVE(head1, head2, field) do { \
+ ck_pr_store_ptr(&(head1)->clh_first, (head2)->clh_first); \
+ if ((head1)->clh_first != NULL) \
+ (head1)->clh_first->field.cle_prev = &(head1)->clh_first; \
+} while (0)
+
+/*
+ * This operation is not applied atomically.
+ */
+#define CK_LIST_SWAP(head1, head2, type, field) do { \
+ struct type *swap_tmp = (head1)->clh_first; \
+ (head1)->clh_first = (head2)->clh_first; \
+ (head2)->clh_first = swap_tmp; \
+ if ((swap_tmp = (head1)->clh_first) != NULL) \
+ swap_tmp->field.cle_prev = &(head1)->clh_first; \
+ if ((swap_tmp = (head2)->clh_first) != NULL) \
+ swap_tmp->field.cle_prev = &(head2)->clh_first; \
+} while (0)
+
+#endif /* CK_QUEUE_H */
diff --git a/freebsd/sys/contrib/ck/include/ck_stack.h b/freebsd/sys/contrib/ck/include/ck_stack.h
new file mode 100644
index 00000000..eb2b685f
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_stack.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_STACK_H
+#define CK_STACK_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+struct ck_stack_entry {
+ struct ck_stack_entry *next;
+};
+typedef struct ck_stack_entry ck_stack_entry_t;
+
+struct ck_stack {
+ struct ck_stack_entry *head;
+ char *generation CK_CC_PACKED;
+} CK_CC_ALIASED;
+typedef struct ck_stack ck_stack_t;
+
+#define CK_STACK_INITIALIZER { NULL, NULL }
+
+#ifndef CK_F_STACK_PUSH_UPMC
+#define CK_F_STACK_PUSH_UPMC
+/*
+ * Stack producer operation safe for multiple unique producers and multiple consumers.
+ */
+CK_CC_INLINE static void
+ck_stack_push_upmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+ struct ck_stack_entry *stack;
+
+ stack = ck_pr_load_ptr(&target->head);
+ entry->next = stack;
+ ck_pr_fence_store();
+
+ while (ck_pr_cas_ptr_value(&target->head, stack, entry, &stack) == false) {
+ entry->next = stack;
+ ck_pr_fence_store();
+ }
+
+ return;
+}
+#endif /* CK_F_STACK_PUSH_UPMC */
+
+#ifndef CK_F_STACK_TRYPUSH_UPMC
+#define CK_F_STACK_TRYPUSH_UPMC
+/*
+ * Stack producer operation for multiple unique producers and multiple consumers.
+ * Returns true on success and false on failure.
+ */
+CK_CC_INLINE static bool
+ck_stack_trypush_upmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+ struct ck_stack_entry *stack;
+
+ stack = ck_pr_load_ptr(&target->head);
+ entry->next = stack;
+ ck_pr_fence_store();
+
+ return ck_pr_cas_ptr(&target->head, stack, entry);
+}
+#endif /* CK_F_STACK_TRYPUSH_UPMC */
+
+#ifndef CK_F_STACK_POP_UPMC
+#define CK_F_STACK_POP_UPMC
+/*
+ * Stack consumer operation safe for multiple unique producers and multiple consumers.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_pop_upmc(struct ck_stack *target)
+{
+ struct ck_stack_entry *entry, *next;
+
+ entry = ck_pr_load_ptr(&target->head);
+ if (entry == NULL)
+ return NULL;
+
+ ck_pr_fence_load();
+ next = entry->next;
+ while (ck_pr_cas_ptr_value(&target->head, entry, next, &entry) == false) {
+ if (entry == NULL)
+ break;
+
+ ck_pr_fence_load();
+ next = entry->next;
+ }
+
+ return entry;
+}
+#endif
+
+#ifndef CK_F_STACK_TRYPOP_UPMC
+#define CK_F_STACK_TRYPOP_UPMC
+/*
+ * Stack production operation for multiple unique producers and multiple consumers.
+ * Returns true on success and false on failure. The value pointed to by the second
+ * argument is set to a valid ck_stack_entry_t reference if true is returned. If
+ * false is returned, then the value pointed to by the second argument is undefined.
+ */
+CK_CC_INLINE static bool
+ck_stack_trypop_upmc(struct ck_stack *target, struct ck_stack_entry **r)
+{
+ struct ck_stack_entry *entry;
+
+ entry = ck_pr_load_ptr(&target->head);
+ if (entry == NULL)
+ return false;
+
+ ck_pr_fence_load();
+ if (ck_pr_cas_ptr(&target->head, entry, entry->next) == true) {
+ *r = entry;
+ return true;
+ }
+
+ return false;
+}
+#endif /* CK_F_STACK_TRYPOP_UPMC */
+
+#ifndef CK_F_STACK_BATCH_POP_UPMC
+#define CK_F_STACK_BATCH_POP_UPMC
+/*
+ * Pop all items off the stack.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_batch_pop_upmc(struct ck_stack *target)
+{
+ struct ck_stack_entry *entry;
+
+ entry = ck_pr_fas_ptr(&target->head, NULL);
+ ck_pr_fence_load();
+ return entry;
+}
+#endif /* CK_F_STACK_BATCH_POP_UPMC */
+
+#ifndef CK_F_STACK_PUSH_MPMC
+#define CK_F_STACK_PUSH_MPMC
+/*
+ * Stack producer operation safe for multiple producers and multiple consumers.
+ */
+CK_CC_INLINE static void
+ck_stack_push_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+
+ ck_stack_push_upmc(target, entry);
+ return;
+}
+#endif /* CK_F_STACK_PUSH_MPMC */
+
+#ifndef CK_F_STACK_TRYPUSH_MPMC
+#define CK_F_STACK_TRYPUSH_MPMC
+/*
+ * Stack producer operation safe for multiple producers and multiple consumers.
+ */
+CK_CC_INLINE static bool
+ck_stack_trypush_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+
+ return ck_stack_trypush_upmc(target, entry);
+}
+#endif /* CK_F_STACK_TRYPUSH_MPMC */
+
+#ifdef CK_F_PR_CAS_PTR_2_VALUE
+#ifndef CK_F_STACK_POP_MPMC
+#define CK_F_STACK_POP_MPMC
+/*
+ * Stack consumer operation safe for multiple producers and multiple consumers.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_pop_mpmc(struct ck_stack *target)
+{
+ struct ck_stack original, update;
+
+ original.generation = ck_pr_load_ptr(&target->generation);
+ ck_pr_fence_load();
+ original.head = ck_pr_load_ptr(&target->head);
+ if (original.head == NULL)
+ return NULL;
+
+ /* Order with respect to next pointer. */
+ ck_pr_fence_load();
+
+ update.generation = original.generation + 1;
+ update.head = original.head->next;
+
+ while (ck_pr_cas_ptr_2_value(target, &original, &update, &original) == false) {
+ if (original.head == NULL)
+ return NULL;
+
+ update.generation = original.generation + 1;
+
+ /* Order with respect to next pointer. */
+ ck_pr_fence_load();
+ update.head = original.head->next;
+ }
+
+ return original.head;
+}
+#endif /* CK_F_STACK_POP_MPMC */
+
+#ifndef CK_F_STACK_TRYPOP_MPMC
+#define CK_F_STACK_TRYPOP_MPMC
+CK_CC_INLINE static bool
+ck_stack_trypop_mpmc(struct ck_stack *target, struct ck_stack_entry **r)
+{
+ struct ck_stack original, update;
+
+ original.generation = ck_pr_load_ptr(&target->generation);
+ ck_pr_fence_load();
+ original.head = ck_pr_load_ptr(&target->head);
+ if (original.head == NULL)
+ return false;
+
+ update.generation = original.generation + 1;
+ ck_pr_fence_load();
+ update.head = original.head->next;
+
+ if (ck_pr_cas_ptr_2_value(target, &original, &update, &original) == true) {
+ *r = original.head;
+ return true;
+ }
+
+ return false;
+}
+#endif /* CK_F_STACK_TRYPOP_MPMC */
+#endif /* CK_F_PR_CAS_PTR_2_VALUE */
+
+#ifndef CK_F_STACK_BATCH_POP_MPMC
+#define CK_F_STACK_BATCH_POP_MPMC
+/*
+ * This is equivalent to the UP/MC version as NULL does not need a
+ * a generation count.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_batch_pop_mpmc(struct ck_stack *target)
+{
+
+ return ck_stack_batch_pop_upmc(target);
+}
+#endif /* CK_F_STACK_BATCH_POP_MPMC */
+
+#ifndef CK_F_STACK_PUSH_MPNC
+#define CK_F_STACK_PUSH_MPNC
+/*
+ * Stack producer operation safe with no concurrent consumers.
+ */
+CK_CC_INLINE static void
+ck_stack_push_mpnc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+ struct ck_stack_entry *stack;
+
+ entry->next = NULL;
+ ck_pr_fence_store_atomic();
+ stack = ck_pr_fas_ptr(&target->head, entry);
+ ck_pr_store_ptr(&entry->next, stack);
+ ck_pr_fence_store();
+
+ return;
+}
+#endif /* CK_F_STACK_PUSH_MPNC */
+
+/*
+ * Stack producer operation for single producer and no concurrent consumers.
+ */
+CK_CC_INLINE static void
+ck_stack_push_spnc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+
+ entry->next = target->head;
+ target->head = entry;
+ return;
+}
+
+/*
+ * Stack consumer operation for no concurrent producers and single consumer.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_pop_npsc(struct ck_stack *target)
+{
+ struct ck_stack_entry *n;
+
+ if (target->head == NULL)
+ return NULL;
+
+ n = target->head;
+ target->head = n->next;
+
+ return n;
+}
+
+/*
+ * Pop all items off a stack.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_batch_pop_npsc(struct ck_stack *target)
+{
+ struct ck_stack_entry *n;
+
+ n = target->head;
+ target->head = NULL;
+
+ return n;
+}
+
+/*
+ * Stack initialization function. Guarantees initialization across processors.
+ */
+CK_CC_INLINE static void
+ck_stack_init(struct ck_stack *stack)
+{
+
+ stack->head = NULL;
+ stack->generation = NULL;
+ return;
+}
+
+/* Defines a container_of functions for */
+#define CK_STACK_CONTAINER(T, M, N) CK_CC_CONTAINER(ck_stack_entry_t, T, M, N)
+
+#define CK_STACK_ISEMPTY(m) ((m)->head == NULL)
+#define CK_STACK_FIRST(s) ((s)->head)
+#define CK_STACK_NEXT(m) ((m)->next)
+#define CK_STACK_FOREACH(stack, entry) \
+ for ((entry) = CK_STACK_FIRST(stack); \
+ (entry) != NULL; \
+ (entry) = CK_STACK_NEXT(entry))
+#define CK_STACK_FOREACH_SAFE(stack, entry, T) \
+ for ((entry) = CK_STACK_FIRST(stack); \
+ (entry) != NULL && ((T) = (entry)->next, 1); \
+ (entry) = (T))
+
+#endif /* CK_STACK_H */
diff --git a/freebsd/sys/contrib/ck/include/ck_stdbool.h b/freebsd/sys/contrib/ck/include/ck_stdbool.h
new file mode 100644
index 00000000..b9a79829
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_stdbool.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/types.h>
+#else
+#include <stdbool.h>
+#endif
diff --git a/freebsd/sys/contrib/ck/include/ck_stddef.h b/freebsd/sys/contrib/ck/include/ck_stddef.h
new file mode 100644
index 00000000..6019ea95
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_stddef.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/stddef.h>
+#else
+#include <stddef.h>
+#endif
diff --git a/freebsd/sys/contrib/ck/include/ck_stdint.h b/freebsd/sys/contrib/ck/include/ck_stdint.h
new file mode 100644
index 00000000..8f416a92
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_stdint.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include <linux/types.h>
+#elif defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/stdint.h>
+#else
+#include <stdint.h>
+#endif /* __linux__ && __KERNEL__ */
diff --git a/freebsd/sys/contrib/ck/include/ck_string.h b/freebsd/sys/contrib/ck/include/ck_string.h
new file mode 100644
index 00000000..8d2c2525
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/ck_string.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/systm.h>
+#else
+#include <string.h>
+#endif
diff --git a/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_f_pr.h b/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_f_pr.h
new file mode 100644
index 00000000..93ecee07
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_f_pr.h
@@ -0,0 +1,167 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_64
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_SHORT
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_64
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_SHORT
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_BARRIER
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_64_2
+#define CK_F_PR_CAS_64_2_VALUE
+#define CK_F_PR_CAS_DOUBLE
+#define CK_F_PR_CAS_DOUBLE_VALUE
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_2
+#define CK_F_PR_CAS_PTR_2_VALUE
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_SHORT
+#define CK_F_PR_CAS_SHORT_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_64
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_SHORT
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_64
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_SHORT
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_16
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_64
+#define CK_F_PR_FAS_8
+#define CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_SHORT
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FENCE_ATOMIC
+#define CK_F_PR_FENCE_ATOMIC_LOAD
+#define CK_F_PR_FENCE_ATOMIC_STORE
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_ATOMIC
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_LOAD_STORE
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STORE_ATOMIC
+#define CK_F_PR_FENCE_STORE_LOAD
+#define CK_F_PR_FENCE_STRICT_ATOMIC
+#define CK_F_PR_FENCE_STRICT_ATOMIC_LOAD
+#define CK_F_PR_FENCE_STRICT_ATOMIC_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_ATOMIC
+#define CK_F_PR_FENCE_STRICT_LOAD_STORE
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_FENCE_STRICT_STORE_ATOMIC
+#define CK_F_PR_FENCE_STRICT_STORE_LOAD
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_64
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_SHORT
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_DOUBLE
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_SHORT
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_16
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_64
+#define CK_F_PR_NEG_8
+#define CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_SHORT
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NOT_16
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_64
+#define CK_F_PR_NOT_8
+#define CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_SHORT
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_64
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_SHORT
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_DOUBLE
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_SHORT
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_64
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_SHORT
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_64
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_SHORT
+#define CK_F_PR_XOR_UINT
diff --git a/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr.h b/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr.h
new file mode 100644
index 00000000..e739c4d5
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2009-2016 Samy Al Bahra.
+ * Copyright 2013-2016 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_AARCH64_H
+#define CK_PR_AARCH64_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Minimum interface requirement met.
+ */
+#define CK_F_PR
+
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ __asm__ __volatile__("" ::: "memory");
+ return;
+}
+
+#define CK_DMB_SY __asm __volatile("dmb ish" : : "r" (0) : "memory")
+#define CK_DMB_LD __asm __volatile("dmb ishld" : : "r" (0) : "memory")
+#define CK_DMB_ST __asm __volatile("dmb ishst" : : "r" (0) : "memory")
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ I; \
+ }
+
+CK_PR_FENCE(atomic, CK_DMB_ST)
+CK_PR_FENCE(atomic_store, CK_DMB_ST)
+CK_PR_FENCE(atomic_load, CK_DMB_SY)
+CK_PR_FENCE(store_atomic, CK_DMB_ST)
+CK_PR_FENCE(load_atomic, CK_DMB_SY)
+CK_PR_FENCE(store, CK_DMB_ST)
+CK_PR_FENCE(store_load, CK_DMB_SY)
+CK_PR_FENCE(load, CK_DMB_LD)
+CK_PR_FENCE(load_store, CK_DMB_SY)
+CK_PR_FENCE(memory, CK_DMB_SY)
+CK_PR_FENCE(acquire, CK_DMB_SY)
+CK_PR_FENCE(release, CK_DMB_SY)
+CK_PR_FENCE(acqrel, CK_DMB_SY)
+CK_PR_FENCE(lock, CK_DMB_SY)
+CK_PR_FENCE(unlock, CK_DMB_SY)
+
+#undef CK_PR_FENCE
+
+#undef CK_DMB_SI
+#undef CK_DMB_LD
+#undef CK_DMB_ST
+
+#define CK_PR_LOAD(S, M, T, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ long r = 0; \
+ __asm__ __volatile__(I " %w0, [%1];" \
+ : "=r" (r) \
+ : "r" (target) \
+ : "memory"); \
+ return ((T)r); \
+ }
+#define CK_PR_LOAD_64(S, M, T, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ long r = 0; \
+ __asm__ __volatile__(I " %0, [%1];" \
+ : "=r" (r) \
+ : "r" (target) \
+ : "memory"); \
+ return ((T)r); \
+ }
+
+
+CK_PR_LOAD_64(ptr, void, void *, "ldr")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, I)
+#define CK_PR_LOAD_S_64(S, T, I) CK_PR_LOAD_64(S, T, T, I)
+
+CK_PR_LOAD_S_64(64, uint64_t, "ldr")
+CK_PR_LOAD_S(32, uint32_t, "ldr")
+CK_PR_LOAD_S(16, uint16_t, "ldrh")
+CK_PR_LOAD_S(8, uint8_t, "ldrb")
+CK_PR_LOAD_S(uint, unsigned int, "ldr")
+CK_PR_LOAD_S(int, int, "ldr")
+CK_PR_LOAD_S(short, short, "ldrh")
+CK_PR_LOAD_S(char, char, "ldrb")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_LOAD_S_64(double, double, "ldr")
+#endif
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD_S_64
+#undef CK_PR_LOAD
+#undef CK_PR_LAOD_64
+
+#define CK_PR_STORE(S, M, T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %w1, [%0]" \
+ : \
+ : "r" (target), \
+ "r" (v) \
+ : "memory"); \
+ return; \
+ }
+#define CK_PR_STORE_64(S, M, T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %1, [%0]" \
+ : \
+ : "r" (target), \
+ "r" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE_64(ptr, void, const void *, "str")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, I)
+#define CK_PR_STORE_S_64(S, T, I) CK_PR_STORE_64(S, T, T, I)
+
+CK_PR_STORE_S_64(64, uint64_t, "str")
+CK_PR_STORE_S(32, uint32_t, "str")
+CK_PR_STORE_S(16, uint16_t, "strh")
+CK_PR_STORE_S(8, uint8_t, "strb")
+CK_PR_STORE_S(uint, unsigned int, "str")
+CK_PR_STORE_S(int, int, "str")
+CK_PR_STORE_S(short, short, "strh")
+CK_PR_STORE_S(char, char, "strb")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_STORE_S_64(double, double, "str")
+#endif
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE_S_64
+#undef CK_PR_STORE
+#undef CK_PR_STORE_64
+
+#ifdef CK_MD_LSE_ENABLE
+#include "ck_pr_lse.h"
+#else
+#include "ck_pr_llsc.h"
+#endif
+
+/*
+ * ck_pr_neg_*() functions can only be implemented via LL/SC, as there are no
+ * LSE alternatives.
+ */
+#define CK_PR_NEG(N, M, T, W, R) \
+ CK_CC_INLINE static void \
+ ck_pr_neg_##N(M *target) \
+ { \
+ T previous = 0; \
+ T tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldxr" W " %" R "0, [%2];" \
+ "neg %" R "0, %" R "0;" \
+ "stxr" W " %w1, %" R "0, [%2];" \
+ "cbnz %w1, 1b;" \
+ : "=&r" (previous), \
+ "=&r" (tmp) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_NEG(ptr, void, void *, "", "")
+CK_PR_NEG(64, uint64_t, uint64_t, "", "")
+
+#define CK_PR_NEG_S(S, T, W) \
+ CK_PR_NEG(S, T, T, W, "w") \
+
+CK_PR_NEG_S(32, uint32_t, "")
+CK_PR_NEG_S(uint, unsigned int, "")
+CK_PR_NEG_S(int, int, "")
+CK_PR_NEG_S(16, uint16_t, "h")
+CK_PR_NEG_S(8, uint8_t, "b")
+CK_PR_NEG_S(short, short, "h")
+CK_PR_NEG_S(char, char, "b")
+
+#undef CK_PR_NEG_S
+#undef CK_PR_NEG
+
+#endif /* CK_PR_AARCH64_H */
+
diff --git a/freebsd/sys/contrib/ck/include/gcc/arm/ck_f_pr.h b/freebsd/sys/contrib/ck/include/gcc/arm/ck_f_pr.h
new file mode 100644
index 00000000..c508f855
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/arm/ck_f_pr.h
@@ -0,0 +1,162 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_SHORT
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_SHORT
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_BARRIER
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_DOUBLE
+#define CK_F_PR_CAS_DOUBLE_VALUE
+#endif
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+#define CK_F_PR_CAS_PTR_2
+#define CK_F_PR_CAS_PTR_2_VALUE
+#endif
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_SHORT
+#define CK_F_PR_CAS_SHORT_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_SHORT
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_SHORT
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_16
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_8
+#define CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_SHORT
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FENCE_ATOMIC
+#define CK_F_PR_FENCE_ATOMIC_LOAD
+#define CK_F_PR_FENCE_ATOMIC_STORE
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_ATOMIC
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_LOAD_STORE
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STORE_ATOMIC
+#define CK_F_PR_FENCE_STORE_LOAD
+#define CK_F_PR_FENCE_STRICT_ATOMIC
+#define CK_F_PR_FENCE_STRICT_ATOMIC_LOAD
+#define CK_F_PR_FENCE_STRICT_ATOMIC_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_ATOMIC
+#define CK_F_PR_FENCE_STRICT_LOAD_STORE
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_FENCE_STRICT_STORE_ATOMIC
+#define CK_F_PR_FENCE_STRICT_STORE_LOAD
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_SHORT
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_DOUBLE
+#endif
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_SHORT
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_16
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_8
+#define CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_SHORT
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NOT_16
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_8
+#define CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_SHORT
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_SHORT
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_DOUBLE
+#endif
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_SHORT
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_SHORT
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_SHORT
+#define CK_F_PR_XOR_UINT
diff --git a/freebsd/sys/contrib/ck/include/gcc/arm/ck_pr.h b/freebsd/sys/contrib/ck/include/gcc/arm/ck_pr.h
new file mode 100644
index 00000000..b1f36997
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/arm/ck_pr.h
@@ -0,0 +1,563 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2013-2015 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_ARM_H
+#define CK_PR_ARM_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Minimum interface requirement met.
+ */
+#define CK_F_PR
+
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ __asm__ __volatile__("" ::: "memory");
+ return;
+}
+
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+#define CK_ISB __asm __volatile("isb" : : "r" (0) : "memory")
+#define CK_DMB __asm __volatile("dmb" : : "r" (0) : "memory")
+#define CK_DSB __asm __volatile("dsb" : : "r" (0) : "memory")
+/* FreeBSD's toolchain doesn't accept dmb st, so use the opcode instead */
+#if defined(__FreeBSD__) && !defined(__rtems__)
+#define CK_DMB_ST __asm __volatile(".word 0xf57ff05e" : : "r" (0) : "memory")
+#else
+#define CK_DMB_ST __asm __volatile("dmb st" : : "r" (0) : "memory")
+#endif /* __FreeBSD__ */
+#else
+/* armv6 doesn't have dsb/dmb/isb, and no way to wait only for stores */
+#define CK_ISB \
+ __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory")
+#define CK_DSB \
+ __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory")
+#define CK_DMB \
+ __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory")
+#define CK_DMB_ST CK_DMB
+#endif
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ I; \
+ }
+
+CK_PR_FENCE(atomic, CK_DMB_ST)
+CK_PR_FENCE(atomic_store, CK_DMB_ST)
+CK_PR_FENCE(atomic_load, CK_DMB_ST)
+CK_PR_FENCE(store_atomic, CK_DMB_ST)
+CK_PR_FENCE(load_atomic, CK_DMB)
+CK_PR_FENCE(store, CK_DMB_ST)
+CK_PR_FENCE(store_load, CK_DMB)
+CK_PR_FENCE(load, CK_DMB)
+CK_PR_FENCE(load_store, CK_DMB)
+CK_PR_FENCE(memory, CK_DMB)
+CK_PR_FENCE(acquire, CK_DMB)
+CK_PR_FENCE(release, CK_DMB)
+CK_PR_FENCE(acqrel, CK_DMB)
+CK_PR_FENCE(lock, CK_DMB)
+CK_PR_FENCE(unlock, CK_DMB)
+
+#undef CK_PR_FENCE
+
+#undef CK_ISB
+#undef CK_DSB
+#undef CK_DMB
+#undef CK_DMB_ST
+
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ long r = 0; \
+ __asm__ __volatile__(I " %0, [%1];" \
+ : "=r" (r) \
+ : "r" (target) \
+ : "memory"); \
+ return ((T)r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, uint32_t, "ldr")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(32, uint32_t, "ldr")
+CK_PR_LOAD_S(16, uint16_t, "ldrh")
+CK_PR_LOAD_S(8, uint8_t, "ldrb")
+CK_PR_LOAD_S(uint, unsigned int, "ldr")
+CK_PR_LOAD_S(int, int, "ldr")
+CK_PR_LOAD_S(short, short, "ldrh")
+CK_PR_LOAD_S(char, char, "ldrb")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+
+#define CK_PR_DOUBLE_LOAD(T, N) \
+CK_CC_INLINE static T \
+ck_pr_md_load_##N(const T *target) \
+{ \
+ register T ret; \
+ \
+ __asm __volatile("ldrexd %0, [%1]" \
+ : "=&r" (ret) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return (ret); \
+}
+
+CK_PR_DOUBLE_LOAD(uint64_t, 64)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_DOUBLE_LOAD(double, double)
+#endif
+#undef CK_PR_DOUBLE_LOAD
+#endif
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %1, [%0]" \
+ : \
+ : "r" (target), \
+ "r" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE(ptr, void, const void *, uint32_t, "str")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(32, uint32_t, "str")
+CK_PR_STORE_S(16, uint16_t, "strh")
+CK_PR_STORE_S(8, uint8_t, "strb")
+CK_PR_STORE_S(uint, unsigned int, "str")
+CK_PR_STORE_S(int, int, "str")
+CK_PR_STORE_S(short, short, "strh")
+CK_PR_STORE_S(char, char, "strb")
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+
+#define CK_PR_DOUBLE_STORE(T, N) \
+CK_CC_INLINE static void \
+ck_pr_md_store_##N(const T *target, T value) \
+{ \
+ T tmp; \
+ uint32_t flag; \
+ __asm __volatile("1: \n" \
+ "ldrexd %0, [%2]\n" \
+ "strexd %1, %3, [%2]\n" \
+ "teq %1, #0\n" \
+ "it ne \n" \
+ "bne 1b\n" \
+ : "=&r" (tmp), "=&r" (flag) \
+ : "r" (target), "r" (value) \
+ : "memory", "cc"); \
+}
+
+CK_PR_DOUBLE_STORE(uint64_t, 64)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_DOUBLE_STORE(double, double)
+#endif
+
+#undef CK_PR_DOUBLE_STORE
+
+#define CK_PR_DOUBLE_CAS_VALUE(T, N) \
+CK_CC_INLINE static bool \
+ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
+{ \
+ T previous; \
+ int tmp; \
+ \
+ __asm__ __volatile__("1:" \
+ "ldrexd %0, [%4];" \
+ "cmp %Q0, %Q2;" \
+ "ittt eq;" \
+ "cmpeq %R0, %R2;" \
+ "strexdeq %1, %3, [%4];" \
+ "cmpeq %1, #1;" \
+ "beq 1b;" \
+ :"=&r" (previous), "=&r" (tmp) \
+ : "r" (compare), "r" (set) , \
+ "r"(target) \
+ : "memory", "cc"); \
+ *value = previous; \
+ return (*value == compare); \
+}
+
+CK_PR_DOUBLE_CAS_VALUE(uint64_t, 64)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_DOUBLE_CAS_VALUE(double, double)
+#endif
+
+#undef CK_PR_DOUBLE_CAS_VALUE
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2_value(void *target, void *compare, void *set, void *value)
+{
+ uint32_t *_compare = CK_CPP_CAST(uint32_t *, compare);
+ uint32_t *_set = CK_CPP_CAST(uint32_t *, set);
+ uint64_t __compare = ((uint64_t)_compare[0]) | ((uint64_t)_compare[1] << 32);
+ uint64_t __set = ((uint64_t)_set[0]) | ((uint64_t)_set[1] << 32);
+
+ return (ck_pr_cas_64_value(CK_CPP_CAST(uint64_t *, target),
+ __compare,
+ __set,
+ CK_CPP_CAST(uint64_t *, value)));
+}
+
+#define CK_PR_DOUBLE_CAS(T, N) \
+CK_CC_INLINE static bool \
+ck_pr_cas_##N(T *target, T compare, T set) \
+{ \
+ int ret; \
+ T tmp; \
+ \
+ __asm__ __volatile__("1:" \
+ "mov %0, #0;" \
+ "ldrexd %1, [%4];" \
+ "cmp %Q1, %Q2;" \
+ "itttt eq;" \
+ "cmpeq %R1, %R2;" \
+ "strexdeq %1, %3, [%4];" \
+ "moveq %0, #1;" \
+ "cmpeq %1, #1;" \
+ "beq 1b;" \
+ : "=&r" (ret), "=&r" (tmp) \
+ : "r" (compare), "r" (set) , \
+ "r"(target) \
+ : "memory", "cc"); \
+ \
+ return (ret); \
+}
+
+CK_PR_DOUBLE_CAS(uint64_t, 64)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_DOUBLE_CAS(double, double)
+#endif
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2(void *target, void *compare, void *set)
+{
+ uint32_t *_compare = CK_CPP_CAST(uint32_t *, compare);
+ uint32_t *_set = CK_CPP_CAST(uint32_t *, set);
+ uint64_t __compare = ((uint64_t)_compare[0]) | ((uint64_t)_compare[1] << 32);
+ uint64_t __set = ((uint64_t)_set[0]) | ((uint64_t)_set[1] << 32);
+ return (ck_pr_cas_64(CK_CPP_CAST(uint64_t *, target),
+ __compare,
+ __set));
+}
+
+#endif
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *value)
+{
+ void *previous, *tmp;
+ __asm__ __volatile__("1:"
+ "ldrex %0, [%2];"
+ "cmp %0, %4;"
+ "itt eq;"
+ "strexeq %1, %3, [%2];"
+ "cmpeq %1, #1;"
+ "beq 1b;"
+ : "=&r" (previous),
+ "=&r" (tmp)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+ *(void **)value = previous;
+ return (previous == compare);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr(void *target, void *compare, void *set)
+{
+ void *previous, *tmp;
+ __asm__ __volatile__("1:"
+ "ldrex %0, [%2];"
+ "cmp %0, %4;"
+ "itt eq;"
+ "strexeq %1, %3, [%2];"
+ "cmpeq %1, #1;"
+ "beq 1b;"
+ : "=&r" (previous),
+ "=&r" (tmp)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+ return (previous == compare);
+}
+
+#define CK_PR_CAS(N, T, W) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
+ { \
+ T previous = 0, tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%2];" \
+ "cmp %0, %4;" \
+ "itt eq;" \
+ "strex" W "eq %1, %3, [%2];" \
+ "cmpeq %1, #1;" \
+ "beq 1b;" \
+ /* \
+ * Using "+&" instead of "=&" to avoid bogus \
+ * clang warnings. \
+ */ \
+ : "+&r" (previous), \
+ "+&r" (tmp) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ *value = previous; \
+ return (previous == compare); \
+ } \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N(T *target, T compare, T set) \
+ { \
+ T previous = 0, tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%2];" \
+ "cmp %0, %4;" \
+ "itt eq;" \
+ "strex" W "eq %1, %3, [%2];" \
+ "cmpeq %1, #1;" \
+ "beq 1b;" \
+ : "+&r" (previous), \
+ "+&r" (tmp) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ return (previous == compare); \
+ }
+
+CK_PR_CAS(32, uint32_t, "")
+CK_PR_CAS(uint, unsigned int, "")
+CK_PR_CAS(int, int, "")
+CK_PR_CAS(16, uint16_t, "h")
+CK_PR_CAS(8, uint8_t, "b")
+CK_PR_CAS(short, short, "h")
+CK_PR_CAS(char, char, "b")
+
+
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, M, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##N(M *target, T v) \
+ { \
+ T previous = 0; \
+ T tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%2];" \
+ "strex" W " %1, %3, [%2];" \
+ "cmp %1, #0;" \
+ "bne 1b;" \
+ : "+&r" (previous), \
+ "+&r" (tmp) \
+ : "r" (target), \
+ "r" (v) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAS(32, uint32_t, uint32_t, "")
+CK_PR_FAS(ptr, void, void *, "")
+CK_PR_FAS(int, int, int, "")
+CK_PR_FAS(uint, unsigned int, unsigned int, "")
+CK_PR_FAS(16, uint16_t, uint16_t, "h")
+CK_PR_FAS(8, uint8_t, uint8_t, "b")
+CK_PR_FAS(short, short, short, "h")
+CK_PR_FAS(char, char, char, "b")
+
+
+#undef CK_PR_FAS
+
+#define CK_PR_UNARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target) \
+ { \
+ T previous = 0; \
+ T tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%2];" \
+ I ";" \
+ "strex" W " %1, %0, [%2];" \
+ "cmp %1, #0;" \
+ "bne 1b;" \
+ : "+&r" (previous), \
+ "+&r" (tmp) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_UNARY(inc, ptr, void, void *, "add %0, %0, #1", "")
+CK_PR_UNARY(dec, ptr, void, void *, "sub %0, %0, #1", "")
+CK_PR_UNARY(not, ptr, void, void *, "mvn %0, %0", "")
+CK_PR_UNARY(neg, ptr, void, void *, "neg %0, %0", "")
+
+#define CK_PR_UNARY_S(S, T, W) \
+ CK_PR_UNARY(inc, S, T, T, "add %0, %0, #1", W) \
+ CK_PR_UNARY(dec, S, T, T, "sub %0, %0, #1", W) \
+ CK_PR_UNARY(not, S, T, T, "mvn %0, %0", W) \
+ CK_PR_UNARY(neg, S, T, T, "neg %0, %0", W) \
+
+CK_PR_UNARY_S(32, uint32_t, "")
+CK_PR_UNARY_S(uint, unsigned int, "")
+CK_PR_UNARY_S(int, int, "")
+CK_PR_UNARY_S(16, uint16_t, "h")
+CK_PR_UNARY_S(8, uint8_t, "b")
+CK_PR_UNARY_S(short, short, "h")
+CK_PR_UNARY_S(char, char, "b")
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+
+#define CK_PR_BINARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target, T delta) \
+ { \
+ T previous = 0; \
+ T tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%2];" \
+ I " %0, %0, %3;" \
+ "strex" W " %1, %0, [%2];" \
+ "cmp %1, #0;" \
+ "bne 1b;" \
+ : "+&r" (previous), \
+ "+&r" (tmp) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "")
+CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "")
+CK_PR_BINARY(or, ptr, void, uintptr_t, "orr", "")
+CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "")
+CK_PR_BINARY(xor, ptr, void, uintptr_t, "eor", "")
+
+#define CK_PR_BINARY_S(S, T, W) \
+ CK_PR_BINARY(and, S, T, T, "and", W) \
+ CK_PR_BINARY(add, S, T, T, "add", W) \
+ CK_PR_BINARY(or, S, T, T, "orr", W) \
+ CK_PR_BINARY(sub, S, T, T, "sub", W) \
+ CK_PR_BINARY(xor, S, T, T, "eor", W)
+
+CK_PR_BINARY_S(32, uint32_t, "")
+CK_PR_BINARY_S(uint, unsigned int, "")
+CK_PR_BINARY_S(int, int, "")
+CK_PR_BINARY_S(16, uint16_t, "h")
+CK_PR_BINARY_S(8, uint8_t, "b")
+CK_PR_BINARY_S(short, short, "h")
+CK_PR_BINARY_S(char, char, "b")
+
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+CK_CC_INLINE static void *
+ck_pr_faa_ptr(void *target, uintptr_t delta)
+{
+ uintptr_t previous, r, tmp;
+
+ __asm__ __volatile__("1:"
+ "ldrex %0, [%3];"
+ "add %1, %4, %0;"
+ "strex %2, %1, [%3];"
+ "cmp %2, #0;"
+ "bne 1b;"
+ : "=&r" (previous),
+ "=&r" (r),
+ "=&r" (tmp)
+ : "r" (target),
+ "r" (delta)
+ : "memory", "cc");
+
+ return (void *)(previous);
+}
+
+#define CK_PR_FAA(S, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(T *target, T delta) \
+ { \
+ T previous = 0, r = 0, tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%3];" \
+ "add %1, %4, %0;" \
+ "strex" W " %2, %1, [%3];" \
+ "cmp %2, #0;" \
+ "bne 1b;" \
+ : "+&r" (previous), \
+ "+&r" (r), \
+ "+&r" (tmp) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAA(32, uint32_t, "")
+CK_PR_FAA(uint, unsigned int, "")
+CK_PR_FAA(int, int, "")
+CK_PR_FAA(16, uint16_t, "h")
+CK_PR_FAA(8, uint8_t, "b")
+CK_PR_FAA(short, short, "h")
+CK_PR_FAA(char, char, "b")
+
+#undef CK_PR_FAA
+
+#endif /* CK_PR_ARM_H */
+
diff --git a/freebsd/sys/contrib/ck/include/gcc/ck_cc.h b/freebsd/sys/contrib/ck/include/gcc/ck_cc.h
new file mode 100644
index 00000000..6ebc59cb
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/ck_cc.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2014 Paul Khuong.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_GCC_CC_H
+#define CK_GCC_CC_H
+
+#include <ck_md.h>
+
+#ifdef __SUNPRO_C
+#define CK_CC_UNUSED
+#define CK_CC_USED
+#define CK_CC_IMM
+#define CK_CC_IMM_U32
+#else
+#define CK_CC_UNUSED __attribute__((unused))
+#define CK_CC_USED __attribute__((used))
+#define CK_CC_IMM "i"
+#if defined(__x86_64__) || defined(__x86__)
+#define CK_CC_IMM_U32 "Z"
+#define CK_CC_IMM_S32 "e"
+#else
+#define CK_CC_IMM_U32 CK_CC_IMM
+#define CK_CC_IMM_S32 CK_CC_IMM
+#endif /* __x86_64__ || __x86__ */
+#endif
+
+#ifdef __OPTIMIZE__
+#define CK_CC_INLINE CK_CC_UNUSED inline
+#else
+#define CK_CC_INLINE CK_CC_UNUSED
+#endif
+
+#define CK_CC_FORCE_INLINE CK_CC_UNUSED __attribute__((always_inline)) inline
+#define CK_CC_RESTRICT __restrict__
+
+/*
+ * Packed attribute.
+ */
+#define CK_CC_PACKED __attribute__((packed))
+
+/*
+ * Weak reference.
+ */
+#define CK_CC_WEAKREF __attribute__((weakref))
+
+/*
+ * Alignment attribute.
+ */
+#define CK_CC_ALIGN(B) __attribute__((aligned(B)))
+
+/*
+ * Cache align.
+ */
+#define CK_CC_CACHELINE CK_CC_ALIGN(CK_MD_CACHELINE)
+
+/*
+ * These are functions which should be avoided.
+ */
+#ifdef __freestanding__
+#pragma GCC poison malloc free
+#endif
+
+/*
+ * Branch execution hints.
+ */
+#define CK_CC_LIKELY(x) (__builtin_expect(!!(x), 1))
+#define CK_CC_UNLIKELY(x) (__builtin_expect(!!(x), 0))
+
+/*
+ * Some compilers are overly strict regarding aliasing semantics.
+ * Unfortunately, in many cases it makes more sense to pay aliasing
+ * cost rather than overly expensive register spillage.
+ */
+#define CK_CC_ALIASED __attribute__((__may_alias__))
+
+/*
+ * Compile-time typeof
+ */
+#define CK_CC_TYPEOF(X, DEFAULT) __typeof__(X)
+
+/*
+ * Portability wrappers for bitwise operations.
+ */
+#ifndef CK_MD_CC_BUILTIN_DISABLE
+#define CK_F_CC_FFS
+CK_CC_INLINE static int
+ck_cc_ffs(unsigned int x)
+{
+
+ return __builtin_ffsl(x);
+}
+
+#define CK_F_CC_FFSL
+CK_CC_INLINE static int
+ck_cc_ffsl(unsigned long x)
+{
+
+ return __builtin_ffsll(x);
+}
+
+#define CK_F_CC_CTZ
+CK_CC_INLINE static int
+ck_cc_ctz(unsigned int x)
+{
+
+ return __builtin_ctz(x);
+}
+
+#define CK_F_CC_POPCOUNT
+CK_CC_INLINE static int
+ck_cc_popcount(unsigned int x)
+{
+
+ return __builtin_popcount(x);
+}
+#endif /* CK_MD_CC_BUILTIN_DISABLE */
+#endif /* CK_GCC_CC_H */
diff --git a/freebsd/sys/contrib/ck/include/gcc/ck_f_pr.h b/freebsd/sys/contrib/ck/include/gcc/ck_f_pr.h
new file mode 100644
index 00000000..0ef0d108
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/ck_f_pr.h
@@ -0,0 +1,105 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_64
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_64
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_64
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_64
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_64
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_64
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_64
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_64
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
diff --git a/freebsd/sys/contrib/ck/include/gcc/ck_pr.h b/freebsd/sys/contrib/ck/include/gcc/ck_pr.h
new file mode 100644
index 00000000..108e983a
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/ck_pr.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2010 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_GCC_H
+#define CK_PR_GCC_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+
+CK_CC_INLINE static void
+ck_pr_barrier(void)
+{
+
+ __asm__ __volatile__("" ::: "memory");
+ return;
+}
+
+#ifndef CK_F_PR
+#define CK_F_PR
+
+#include <ck_stdbool.h>
+#include <ck_stdint.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+#define CK_PR_ACCESS(x) (*(volatile __typeof__(x) *)&(x))
+
+#define CK_PR_LOAD(S, M, T) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ ck_pr_barrier(); \
+ r = CK_PR_ACCESS(*(const T *)target); \
+ ck_pr_barrier(); \
+ return (r); \
+ } \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ ck_pr_barrier(); \
+ CK_PR_ACCESS(*(T *)target) = v; \
+ ck_pr_barrier(); \
+ return; \
+ }
+
+CK_CC_INLINE static void *
+ck_pr_md_load_ptr(const void *target)
+{
+ void *r;
+
+ ck_pr_barrier();
+ r = CK_CC_DECONST_PTR(*(volatile void *const*)(target));
+ ck_pr_barrier();
+
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_pr_md_store_ptr(void *target, const void *v)
+{
+
+ ck_pr_barrier();
+ *(volatile void **)target = CK_CC_DECONST_PTR(v);
+ ck_pr_barrier();
+ return;
+}
+
+#define CK_PR_LOAD_S(S, T) CK_PR_LOAD(S, T, T)
+
+CK_PR_LOAD_S(char, char)
+CK_PR_LOAD_S(uint, unsigned int)
+CK_PR_LOAD_S(int, int)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_LOAD_S(double, double)
+#endif
+CK_PR_LOAD_S(64, uint64_t)
+CK_PR_LOAD_S(32, uint32_t)
+CK_PR_LOAD_S(16, uint16_t)
+CK_PR_LOAD_S(8, uint8_t)
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ ck_pr_barrier();
+}
+
+/*
+ * Load and store fences are equivalent to full fences in the GCC port.
+ */
+#define CK_PR_FENCE(T) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __sync_synchronize(); \
+ }
+
+CK_PR_FENCE(atomic)
+CK_PR_FENCE(atomic_atomic)
+CK_PR_FENCE(atomic_load)
+CK_PR_FENCE(atomic_store)
+CK_PR_FENCE(store_atomic)
+CK_PR_FENCE(load_atomic)
+CK_PR_FENCE(load)
+CK_PR_FENCE(load_load)
+CK_PR_FENCE(load_store)
+CK_PR_FENCE(store)
+CK_PR_FENCE(store_store)
+CK_PR_FENCE(store_load)
+CK_PR_FENCE(memory)
+CK_PR_FENCE(acquire)
+CK_PR_FENCE(release)
+CK_PR_FENCE(acqrel)
+CK_PR_FENCE(lock)
+CK_PR_FENCE(unlock)
+
+#undef CK_PR_FENCE
+
+/*
+ * Atomic compare and swap.
+ */
+#define CK_PR_CAS(S, M, T) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S(M *target, T compare, T set) \
+ { \
+ bool z; \
+ z = __sync_bool_compare_and_swap((T *)target, compare, set); \
+ return z; \
+ }
+
+CK_PR_CAS(ptr, void, void *)
+
+#define CK_PR_CAS_S(S, T) CK_PR_CAS(S, T, T)
+
+CK_PR_CAS_S(char, char)
+CK_PR_CAS_S(int, int)
+CK_PR_CAS_S(uint, unsigned int)
+CK_PR_CAS_S(64, uint64_t)
+CK_PR_CAS_S(32, uint32_t)
+CK_PR_CAS_S(16, uint16_t)
+CK_PR_CAS_S(8, uint8_t)
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+/*
+ * Compare and swap, set *v to old value of target.
+ */
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *v)
+{
+ set = __sync_val_compare_and_swap((void **)target, compare, set);
+ *(void **)v = set;
+ return (set == compare);
+}
+
+#define CK_PR_CAS_O(S, T) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S##_value(T *target, T compare, T set, T *v) \
+ { \
+ set = __sync_val_compare_and_swap(target, compare, set);\
+ *v = set; \
+ return (set == compare); \
+ }
+
+CK_PR_CAS_O(char, char)
+CK_PR_CAS_O(int, int)
+CK_PR_CAS_O(uint, unsigned int)
+CK_PR_CAS_O(64, uint64_t)
+CK_PR_CAS_O(32, uint32_t)
+CK_PR_CAS_O(16, uint16_t)
+CK_PR_CAS_O(8, uint8_t)
+
+#undef CK_PR_CAS_O
+
+/*
+ * Atomic fetch-and-add operations.
+ */
+#define CK_PR_FAA(S, M, T) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(M *target, T d) \
+ { \
+ d = __sync_fetch_and_add((T *)target, d); \
+ return (d); \
+ }
+
+CK_PR_FAA(ptr, void, void *)
+
+#define CK_PR_FAA_S(S, T) CK_PR_FAA(S, T, T)
+
+CK_PR_FAA_S(char, char)
+CK_PR_FAA_S(uint, unsigned int)
+CK_PR_FAA_S(int, int)
+CK_PR_FAA_S(64, uint64_t)
+CK_PR_FAA_S(32, uint32_t)
+CK_PR_FAA_S(16, uint16_t)
+CK_PR_FAA_S(8, uint8_t)
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAA
+
+/*
+ * Atomic store-only binary operations.
+ */
+#define CK_PR_BINARY(K, S, M, T) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target, T d) \
+ { \
+ d = __sync_fetch_and_##K((T *)target, d); \
+ return; \
+ }
+
+#define CK_PR_BINARY_S(K, S, T) CK_PR_BINARY(K, S, T, T)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_BINARY(K, ptr, void, void *) \
+ CK_PR_BINARY_S(K, char, char) \
+ CK_PR_BINARY_S(K, int, int) \
+ CK_PR_BINARY_S(K, uint, unsigned int) \
+ CK_PR_BINARY_S(K, 64, uint64_t) \
+ CK_PR_BINARY_S(K, 32, uint32_t) \
+ CK_PR_BINARY_S(K, 16, uint16_t) \
+ CK_PR_BINARY_S(K, 8, uint8_t)
+
+CK_PR_GENERATE(add)
+CK_PR_GENERATE(sub)
+CK_PR_GENERATE(and)
+CK_PR_GENERATE(or)
+CK_PR_GENERATE(xor)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+#define CK_PR_UNARY(S, M, T) \
+ CK_CC_INLINE static void \
+ ck_pr_inc_##S(M *target) \
+ { \
+ ck_pr_add_##S(target, (T)1); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_pr_dec_##S(M *target) \
+ { \
+ ck_pr_sub_##S(target, (T)1); \
+ return; \
+ }
+
+#define CK_PR_UNARY_S(S, M) CK_PR_UNARY(S, M, M)
+
+CK_PR_UNARY(ptr, void, void *)
+CK_PR_UNARY_S(char, char)
+CK_PR_UNARY_S(int, int)
+CK_PR_UNARY_S(uint, unsigned int)
+CK_PR_UNARY_S(64, uint64_t)
+CK_PR_UNARY_S(32, uint32_t)
+CK_PR_UNARY_S(16, uint16_t)
+CK_PR_UNARY_S(8, uint8_t)
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+#endif /* !CK_F_PR */
+#endif /* CK_PR_GCC_H */
diff --git a/freebsd/sys/contrib/ck/include/gcc/ppc/ck_f_pr.h b/freebsd/sys/contrib/ck/include/gcc/ppc/ck_f_pr.h
new file mode 100644
index 00000000..0aec33e4
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/ppc/ck_f_pr.h
@@ -0,0 +1,79 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_SHORT
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_SHORT
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
+
diff --git a/freebsd/sys/contrib/ck/include/gcc/ppc/ck_pr.h b/freebsd/sys/contrib/ck/include/gcc/ppc/ck_pr.h
new file mode 100644
index 00000000..cd7935dd
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/ppc/ck_pr.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2012 João Fernandes.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_PPC_H
+#define CK_PR_PPC_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Minimum interface requirement met.
+ */
+#define CK_F_PR
+
+/*
+ * This bounces the hardware thread from low to medium
+ * priority. I am unsure of the benefits of this approach
+ * but it is used by the Linux kernel.
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ __asm__ __volatile__("or 1, 1, 1;"
+ "or 2, 2, 2;" ::: "memory");
+ return;
+}
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__(I ::: "memory"); \
+ }
+
+CK_PR_FENCE(atomic, "lwsync")
+CK_PR_FENCE(atomic_store, "lwsync")
+CK_PR_FENCE(atomic_load, "sync")
+CK_PR_FENCE(store_atomic, "lwsync")
+CK_PR_FENCE(load_atomic, "lwsync")
+CK_PR_FENCE(store, "lwsync")
+CK_PR_FENCE(store_load, "sync")
+CK_PR_FENCE(load, "lwsync")
+CK_PR_FENCE(load_store, "lwsync")
+CK_PR_FENCE(memory, "sync")
+CK_PR_FENCE(acquire, "lwsync")
+CK_PR_FENCE(release, "lwsync")
+CK_PR_FENCE(acqrel, "lwsync")
+CK_PR_FENCE(lock, "lwsync")
+CK_PR_FENCE(unlock, "lwsync")
+
+#undef CK_PR_FENCE
+
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ __asm__ __volatile__(I "%U1%X1 %0, %1" \
+ : "=r" (r) \
+ : "m" (*(const C *)target) \
+ : "memory"); \
+ return (r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, uint32_t, "lwz")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(32, uint32_t, "lwz")
+CK_PR_LOAD_S(16, uint16_t, "lhz")
+CK_PR_LOAD_S(8, uint8_t, "lbz")
+CK_PR_LOAD_S(uint, unsigned int, "lwz")
+CK_PR_LOAD_S(int, int, "lwz")
+CK_PR_LOAD_S(short, short, "lhz")
+CK_PR_LOAD_S(char, char, "lbz")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I "%U0%X0 %1, %0" \
+ : "=m" (*(C *)target) \
+ : "r" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE(ptr, void, const void *, uint32_t, "stw")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(32, uint32_t, "stw")
+CK_PR_STORE_S(16, uint16_t, "sth")
+CK_PR_STORE_S(8, uint8_t, "stb")
+CK_PR_STORE_S(uint, unsigned int, "stw")
+CK_PR_STORE_S(int, int, "stw")
+CK_PR_STORE_S(short, short, "sth")
+CK_PR_STORE_S(char, char, "stb")
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+#define CK_PR_CAS(N, T, M) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N##_value(M *target, T compare, T set, M *value) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "lwarx %0, 0, %1;" \
+ "cmpw 0, %0, %3;" \
+ "bne- 2f;" \
+ "stwcx. %2, 0, %1;" \
+ "bne- 1b;" \
+ "2:" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ *(T *)value = previous; \
+ return (previous == compare); \
+ } \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N(M *target, T compare, T set) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "lwarx %0, 0, %1;" \
+ "cmpw 0, %0, %3;" \
+ "bne- 2f;" \
+ "stwcx. %2, 0, %1;" \
+ "bne- 1b;" \
+ "2:" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ return (previous == compare); \
+ }
+
+CK_PR_CAS(ptr, void *, void)
+#define CK_PR_CAS_S(a, b) CK_PR_CAS(a, b, b)
+CK_PR_CAS_S(32, uint32_t)
+CK_PR_CAS_S(uint, unsigned int)
+CK_PR_CAS_S(int, int)
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, M, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##N(M *target, T v) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ "st" W "cx. %2, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (v) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAS(32, uint32_t, uint32_t, "w")
+CK_PR_FAS(ptr, void, void *, "w")
+CK_PR_FAS(int, int, int, "w")
+CK_PR_FAS(uint, unsigned int, unsigned int, "w")
+
+#undef CK_PR_FAS
+
+#define CK_PR_UNARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ I ";" \
+ "st" W "cx. %0, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_UNARY(inc, ptr, void, void *, "addic %0, %0, 1", "w")
+CK_PR_UNARY(dec, ptr, void, void *, "addic %0, %0, -1", "w")
+CK_PR_UNARY(not, ptr, void, void *, "not %0, %0", "w")
+CK_PR_UNARY(neg, ptr, void, void *, "neg %0, %0", "w")
+
+#define CK_PR_UNARY_S(S, T, W) \
+ CK_PR_UNARY(inc, S, T, T, "addic %0, %0, 1", W) \
+ CK_PR_UNARY(dec, S, T, T, "addic %0, %0, -1", W) \
+ CK_PR_UNARY(not, S, T, T, "not %0, %0", W) \
+ CK_PR_UNARY(neg, S, T, T, "neg %0, %0", W)
+
+CK_PR_UNARY_S(32, uint32_t, "w")
+CK_PR_UNARY_S(uint, unsigned int, "w")
+CK_PR_UNARY_S(int, int, "w")
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+
+#define CK_PR_BINARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target, T delta) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ I " %0, %2, %0;" \
+ "st" W "cx. %0, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "w")
+CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "w")
+CK_PR_BINARY(or, ptr, void, uintptr_t, "or", "w")
+CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "w")
+CK_PR_BINARY(xor, ptr, void, uintptr_t, "xor", "w")
+
+#define CK_PR_BINARY_S(S, T, W) \
+ CK_PR_BINARY(and, S, T, T, "and", W) \
+ CK_PR_BINARY(add, S, T, T, "add", W) \
+ CK_PR_BINARY(or, S, T, T, "or", W) \
+ CK_PR_BINARY(sub, S, T, T, "subf", W) \
+ CK_PR_BINARY(xor, S, T, T, "xor", W)
+
+CK_PR_BINARY_S(32, uint32_t, "w")
+CK_PR_BINARY_S(uint, unsigned int, "w")
+CK_PR_BINARY_S(int, int, "w")
+
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+CK_CC_INLINE static void *
+ck_pr_faa_ptr(void *target, uintptr_t delta)
+{
+ uintptr_t previous, r;
+
+ __asm__ __volatile__("1:"
+ "lwarx %0, 0, %2;"
+ "add %1, %3, %0;"
+ "stwcx. %1, 0, %2;"
+ "bne- 1b;"
+ : "=&r" (previous),
+ "=&r" (r)
+ : "r" (target),
+ "r" (delta)
+ : "memory", "cc");
+
+ return (void *)(previous);
+}
+
+#define CK_PR_FAA(S, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(T *target, T delta) \
+ { \
+ T previous, r; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %2;" \
+ "add %1, %3, %0;" \
+ "st" W "cx. %1, 0, %2;" \
+ "bne- 1b;" \
+ : "=&r" (previous), \
+ "=&r" (r) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAA(32, uint32_t, "w")
+CK_PR_FAA(uint, unsigned int, "w")
+CK_PR_FAA(int, int, "w")
+
+#undef CK_PR_FAA
+
+#endif /* CK_PR_PPC_H */
+
diff --git a/freebsd/sys/contrib/ck/include/gcc/ppc64/ck_f_pr.h b/freebsd/sys/contrib/ck/include/gcc/ppc64/ck_f_pr.h
new file mode 100644
index 00000000..cd54a289
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/ppc64/ck_f_pr.h
@@ -0,0 +1,97 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_64
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_64
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_64
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_64
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_64
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FAS_DOUBLE
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_64
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_DOUBLE
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_SHORT
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_64
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_64
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_64
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_DOUBLE
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_SHORT
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_64
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_64
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
+
diff --git a/freebsd/sys/contrib/ck/include/gcc/ppc64/ck_pr.h b/freebsd/sys/contrib/ck/include/gcc/ppc64/ck_pr.h
new file mode 100644
index 00000000..3f5e5db0
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/ppc64/ck_pr.h
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_PPC64_H
+#define CK_PR_PPC64_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Minimum interface requirement met.
+ */
+#define CK_F_PR
+
+/*
+ * This bounces the hardware thread from low to medium
+ * priority. I am unsure of the benefits of this approach
+ * but it is used by the Linux kernel.
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ __asm__ __volatile__("or 1, 1, 1;"
+ "or 2, 2, 2;" ::: "memory");
+ return;
+}
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__(I ::: "memory"); \
+ }
+
+/*
+ * These are derived from:
+ * http://www.ibm.com/developerworks/systems/articles/powerpc.html
+ */
+CK_PR_FENCE(atomic, "lwsync")
+CK_PR_FENCE(atomic_store, "lwsync")
+CK_PR_FENCE(atomic_load, "sync")
+CK_PR_FENCE(store_atomic, "lwsync")
+CK_PR_FENCE(load_atomic, "lwsync")
+CK_PR_FENCE(store, "lwsync")
+CK_PR_FENCE(store_load, "sync")
+CK_PR_FENCE(load, "lwsync")
+CK_PR_FENCE(load_store, "lwsync")
+CK_PR_FENCE(memory, "sync")
+CK_PR_FENCE(acquire, "lwsync")
+CK_PR_FENCE(release, "lwsync")
+CK_PR_FENCE(acqrel, "lwsync")
+CK_PR_FENCE(lock, "lwsync")
+CK_PR_FENCE(unlock, "lwsync")
+
+#undef CK_PR_FENCE
+
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ __asm__ __volatile__(I "%U1%X1 %0, %1" \
+ : "=r" (r) \
+ : "m" (*(const C *)target) \
+ : "memory"); \
+ return (r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, uint64_t, "ld")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(64, uint64_t, "ld")
+CK_PR_LOAD_S(32, uint32_t, "lwz")
+CK_PR_LOAD_S(16, uint16_t, "lhz")
+CK_PR_LOAD_S(8, uint8_t, "lbz")
+CK_PR_LOAD_S(uint, unsigned int, "lwz")
+CK_PR_LOAD_S(int, int, "lwz")
+CK_PR_LOAD_S(short, short, "lhz")
+CK_PR_LOAD_S(char, char, "lbz")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_LOAD_S(double, double, "ld")
+#endif
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I "%U0%X0 %1, %0" \
+ : "=m" (*(C *)target) \
+ : "r" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE(ptr, void, const void *, uint64_t, "std")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(64, uint64_t, "std")
+CK_PR_STORE_S(32, uint32_t, "stw")
+CK_PR_STORE_S(16, uint16_t, "sth")
+CK_PR_STORE_S(8, uint8_t, "stb")
+CK_PR_STORE_S(uint, unsigned int, "stw")
+CK_PR_STORE_S(int, int, "stw")
+CK_PR_STORE_S(short, short, "sth")
+CK_PR_STORE_S(char, char, "stb")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_STORE_S(double, double, "std")
+#endif
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_value(uint64_t *target, uint64_t compare, uint64_t set, uint64_t *value)
+{
+ uint64_t previous;
+
+ __asm__ __volatile__("1:"
+ "ldarx %0, 0, %1;"
+ "cmpd 0, %0, %3;"
+ "bne- 2f;"
+ "stdcx. %2, 0, %1;"
+ "bne- 1b;"
+ "2:"
+ : "=&r" (previous)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+
+ *value = previous;
+ return (previous == compare);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *value)
+{
+ void *previous;
+
+ __asm__ __volatile__("1:"
+ "ldarx %0, 0, %1;"
+ "cmpd 0, %0, %3;"
+ "bne- 2f;"
+ "stdcx. %2, 0, %1;"
+ "bne- 1b;"
+ "2:"
+ : "=&r" (previous)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+
+ ck_pr_md_store_ptr(value, previous);
+ return (previous == compare);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64(uint64_t *target, uint64_t compare, uint64_t set)
+{
+ uint64_t previous;
+
+ __asm__ __volatile__("1:"
+ "ldarx %0, 0, %1;"
+ "cmpd 0, %0, %3;"
+ "bne- 2f;"
+ "stdcx. %2, 0, %1;"
+ "bne- 1b;"
+ "2:"
+ : "=&r" (previous)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+
+ return (previous == compare);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr(void *target, void *compare, void *set)
+{
+ void *previous;
+
+ __asm__ __volatile__("1:"
+ "ldarx %0, 0, %1;"
+ "cmpd 0, %0, %3;"
+ "bne- 2f;"
+ "stdcx. %2, 0, %1;"
+ "bne- 1b;"
+ "2:"
+ : "=&r" (previous)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+
+ return (previous == compare);
+}
+
+#define CK_PR_CAS(N, T) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "lwarx %0, 0, %1;" \
+ "cmpw 0, %0, %3;" \
+ "bne- 2f;" \
+ "stwcx. %2, 0, %1;" \
+ "bne- 1b;" \
+ "2:" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ *value = previous; \
+ return (previous == compare); \
+ } \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N(T *target, T compare, T set) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "lwarx %0, 0, %1;" \
+ "cmpw 0, %0, %3;" \
+ "bne- 2f;" \
+ "stwcx. %2, 0, %1;" \
+ "bne- 1b;" \
+ "2:" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ return (previous == compare); \
+ }
+
+CK_PR_CAS(32, uint32_t)
+CK_PR_CAS(uint, unsigned int)
+CK_PR_CAS(int, int)
+
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, M, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##N(M *target, T v) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ "st" W "cx. %2, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (v) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAS(64, uint64_t, uint64_t, "d")
+CK_PR_FAS(32, uint32_t, uint32_t, "w")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_FAS(double, double, double, "d")
+#endif
+CK_PR_FAS(ptr, void, void *, "d")
+CK_PR_FAS(int, int, int, "w")
+CK_PR_FAS(uint, unsigned int, unsigned int, "w")
+
+#undef CK_PR_FAS
+
+#define CK_PR_UNARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ I ";" \
+ "st" W "cx. %0, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_UNARY(inc, ptr, void, void *, "addic %0, %0, 1", "d")
+CK_PR_UNARY(dec, ptr, void, void *, "addic %0, %0, -1", "d")
+CK_PR_UNARY(not, ptr, void, void *, "not %0, %0", "d")
+CK_PR_UNARY(neg, ptr, void, void *, "neg %0, %0", "d")
+
+#define CK_PR_UNARY_S(S, T, W) \
+ CK_PR_UNARY(inc, S, T, T, "addic %0, %0, 1", W) \
+ CK_PR_UNARY(dec, S, T, T, "addic %0, %0, -1", W) \
+ CK_PR_UNARY(not, S, T, T, "not %0, %0", W) \
+ CK_PR_UNARY(neg, S, T, T, "neg %0, %0", W)
+
+CK_PR_UNARY_S(64, uint64_t, "d")
+CK_PR_UNARY_S(32, uint32_t, "w")
+CK_PR_UNARY_S(uint, unsigned int, "w")
+CK_PR_UNARY_S(int, int, "w")
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+
+#define CK_PR_BINARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target, T delta) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ I " %0, %2, %0;" \
+ "st" W "cx. %0, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "d")
+CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "d")
+CK_PR_BINARY(or, ptr, void, uintptr_t, "or", "d")
+CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "d")
+CK_PR_BINARY(xor, ptr, void, uintptr_t, "xor", "d")
+
+#define CK_PR_BINARY_S(S, T, W) \
+ CK_PR_BINARY(and, S, T, T, "and", W) \
+ CK_PR_BINARY(add, S, T, T, "add", W) \
+ CK_PR_BINARY(or, S, T, T, "or", W) \
+ CK_PR_BINARY(sub, S, T, T, "subf", W) \
+ CK_PR_BINARY(xor, S, T, T, "xor", W)
+
+CK_PR_BINARY_S(64, uint64_t, "d")
+CK_PR_BINARY_S(32, uint32_t, "w")
+CK_PR_BINARY_S(uint, unsigned int, "w")
+CK_PR_BINARY_S(int, int, "w")
+
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+CK_CC_INLINE static void *
+ck_pr_faa_ptr(void *target, uintptr_t delta)
+{
+ uintptr_t previous, r;
+
+ __asm__ __volatile__("1:"
+ "ldarx %0, 0, %2;"
+ "add %1, %3, %0;"
+ "stdcx. %1, 0, %2;"
+ "bne- 1b;"
+ : "=&r" (previous),
+ "=&r" (r)
+ : "r" (target),
+ "r" (delta)
+ : "memory", "cc");
+
+ return (void *)(previous);
+}
+
+#define CK_PR_FAA(S, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(T *target, T delta) \
+ { \
+ T previous, r; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %2;" \
+ "add %1, %3, %0;" \
+ "st" W "cx. %1, 0, %2;" \
+ "bne- 1b;" \
+ : "=&r" (previous), \
+ "=&r" (r) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAA(64, uint64_t, "d")
+CK_PR_FAA(32, uint32_t, "w")
+CK_PR_FAA(uint, unsigned int, "w")
+CK_PR_FAA(int, int, "w")
+
+#undef CK_PR_FAA
+
+#endif /* CK_PR_PPC64_H */
diff --git a/freebsd/sys/contrib/ck/include/gcc/sparcv9/ck_f_pr.h b/freebsd/sys/contrib/ck/include/gcc/sparcv9/ck_f_pr.h
new file mode 100644
index 00000000..0398680e
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/sparcv9/ck_f_pr.h
@@ -0,0 +1,26 @@
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_DOUBLE
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_DOUBLE
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+
diff --git a/freebsd/sys/contrib/ck/include/gcc/sparcv9/ck_pr.h b/freebsd/sys/contrib/ck/include/gcc/sparcv9/ck_pr.h
new file mode 100644
index 00000000..7dc71725
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/sparcv9/ck_pr.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2009, 2010 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_SPARCV9_H
+#define CK_PR_SPARCV9_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Minimum interface requirement met.
+ */
+#define CK_F_PR
+
+/*
+ * Order loads at the least.
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ __asm__ __volatile__("membar #LoadLoad" ::: "memory");
+ return;
+}
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__(I ::: "memory"); \
+ }
+
+/*
+ * Atomic operations are treated as both load and store
+ * operations on SPARCv9.
+ */
+CK_PR_FENCE(atomic, "membar #StoreStore")
+CK_PR_FENCE(atomic_store, "membar #StoreStore")
+CK_PR_FENCE(atomic_load, "membar #StoreLoad")
+CK_PR_FENCE(store_atomic, "membar #StoreStore")
+CK_PR_FENCE(load_atomic, "membar #LoadStore")
+CK_PR_FENCE(store, "membar #StoreStore")
+CK_PR_FENCE(store_load, "membar #StoreLoad")
+CK_PR_FENCE(load, "membar #LoadLoad")
+CK_PR_FENCE(load_store, "membar #LoadStore")
+CK_PR_FENCE(memory, "membar #MemIssue")
+CK_PR_FENCE(acquire, "membar #LoadLoad | #LoadStore")
+CK_PR_FENCE(release, "membar #LoadStore | #StoreStore")
+CK_PR_FENCE(acqrel, "membar #LoadLoad | #LoadStore | #StoreStore")
+CK_PR_FENCE(lock, "membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
+CK_PR_FENCE(unlock, "membar #LoadStore | #StoreStore")
+
+#undef CK_PR_FENCE
+
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ __asm__ __volatile__(I " [%1], %0" \
+ : "=&r" (r) \
+ : "r" (target) \
+ : "memory"); \
+ return (r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, uint64_t, "ldx")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(64, uint64_t, "ldx")
+CK_PR_LOAD_S(32, uint32_t, "lduw")
+CK_PR_LOAD_S(uint, unsigned int, "lduw")
+CK_PR_LOAD_S(double, double, "ldx")
+CK_PR_LOAD_S(int, int, "ldsw")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %0, [%1]" \
+ : \
+ : "r" (v), \
+ "r" (target) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE(ptr, void, const void *, uint64_t, "stx")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(8, uint8_t, "stub")
+CK_PR_STORE_S(64, uint64_t, "stx")
+CK_PR_STORE_S(32, uint32_t, "stuw")
+CK_PR_STORE_S(uint, unsigned int, "stuw")
+CK_PR_STORE_S(double, double, "stx")
+CK_PR_STORE_S(int, int, "stsw")
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_value(uint64_t *target, uint64_t compare, uint64_t set, uint64_t *value)
+{
+
+ __asm__ __volatile__("casx [%1], %2, %0"
+ : "+&r" (set)
+ : "r" (target),
+ "r" (compare)
+ : "memory");
+
+ *value = set;
+ return (compare == set);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64(uint64_t *target, uint64_t compare, uint64_t set)
+{
+
+ __asm__ __volatile__("casx [%1], %2, %0"
+ : "+&r" (set)
+ : "r" (target),
+ "r" (compare)
+ : "memory");
+
+ return (compare == set);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr(void *target, void *compare, void *set)
+{
+
+ return ck_pr_cas_64(target, (uint64_t)compare, (uint64_t)set);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *previous)
+{
+
+ return ck_pr_cas_64_value(target, (uint64_t)compare, (uint64_t)set, previous);
+}
+
+#define CK_PR_CAS(N, T) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
+ { \
+ __asm__ __volatile__("cas [%1], %2, %0" \
+ : "+&r" (set) \
+ : "r" (target), \
+ "r" (compare) \
+ : "memory"); \
+ *value = set; \
+ return (compare == set); \
+ } \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N(T *target, T compare, T set) \
+ { \
+ __asm__ __volatile__("cas [%1], %2, %0" \
+ : "+&r" (set) \
+ : "r" (target), \
+ "r" (compare) \
+ : "memory"); \
+ return (compare == set); \
+ }
+
+CK_PR_CAS(32, uint32_t)
+CK_PR_CAS(uint, unsigned int)
+CK_PR_CAS(int, int)
+
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, T) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##N(T *target, T update) \
+ { \
+ \
+ __asm__ __volatile__("swap [%1], %0" \
+ : "+&r" (update) \
+ : "r" (target) \
+ : "memory"); \
+ return (update); \
+ }
+
+CK_PR_FAS(int, int)
+CK_PR_FAS(uint, unsigned int)
+CK_PR_FAS(32, uint32_t)
+
+#undef CK_PR_FAS
+
+#endif /* CK_PR_SPARCV9_H */
+
diff --git a/freebsd/sys/contrib/ck/include/gcc/x86/ck_f_pr.h b/freebsd/sys/contrib/ck/include/gcc/x86/ck_f_pr.h
new file mode 100644
index 00000000..f82c66b0
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/x86/ck_f_pr.h
@@ -0,0 +1,152 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_BTC_16
+#define CK_F_PR_BTC_32
+#define CK_F_PR_BTC_INT
+#define CK_F_PR_BTC_PTR
+#define CK_F_PR_BTC_UINT
+#define CK_F_PR_BTR_16
+#define CK_F_PR_BTR_32
+#define CK_F_PR_BTR_INT
+#define CK_F_PR_BTR_PTR
+#define CK_F_PR_BTR_UINT
+#define CK_F_PR_BTS_16
+#define CK_F_PR_BTS_32
+#define CK_F_PR_BTS_INT
+#define CK_F_PR_BTS_PTR
+#define CK_F_PR_BTS_UINT
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_16_ZERO
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_32_ZERO
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_8_ZERO
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_CHAR_ZERO
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_INT_ZERO
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_PTR_ZERO
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_DEC_UINT_ZERO
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_16
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_8
+#define CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_16_ZERO
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_32_ZERO
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_8_ZERO
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_CHAR_ZERO
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_INT_ZERO
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_PTR_ZERO
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_INC_UINT_ZERO
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_16
+#define CK_F_PR_NEG_16_ZERO
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_32_ZERO
+#define CK_F_PR_NEG_8
+#define CK_F_PR_NEG_8_ZERO
+#define CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_CHAR_ZERO
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_INT_ZERO
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_PTR_ZERO
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NEG_UINT_ZERO
+#define CK_F_PR_NOT_16
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_8
+#define CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
+
diff --git a/freebsd/sys/contrib/ck/include/gcc/x86/ck_pr.h b/freebsd/sys/contrib/ck/include/gcc/x86/ck_pr.h
new file mode 100644
index 00000000..3e36376f
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/x86/ck_pr.h
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2011 Devon H. O'Dell <devon.odell@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_X86_H
+#define CK_PR_X86_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/* Minimum requirements for the CK_PR interface are met. */
+#define CK_F_PR
+
+/*
+ * Prevent speculative execution in busy-wait loops (P4 <=) or "predefined
+ * delay".
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+ __asm__ __volatile__("pause" ::: "memory");
+ return;
+}
+
+#ifdef CK_MD_UMP
+#define CK_PR_LOCK_PREFIX
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__("" ::: "memory"); \
+ return; \
+ }
+#else
+#define CK_PR_LOCK_PREFIX "lock "
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__(I ::: "memory"); \
+ return; \
+ }
+#endif /* CK_MD_UMP */
+
+#if defined(CK_MD_SSE_DISABLE)
+/* If SSE is disabled, then use atomic operations for serialization. */
+#define CK_MD_X86_MFENCE "lock addl $0, (%%esp)"
+#define CK_MD_X86_SFENCE CK_MD_X86_MFENCE
+#define CK_MD_X86_LFENCE CK_MD_X86_MFENCE
+#else
+#define CK_MD_X86_SFENCE "sfence"
+#define CK_MD_X86_LFENCE "lfence"
+#define CK_MD_X86_MFENCE "mfence"
+#endif /* !CK_MD_SSE_DISABLE */
+
+CK_PR_FENCE(atomic, "")
+CK_PR_FENCE(atomic_store, "")
+CK_PR_FENCE(atomic_load, "")
+CK_PR_FENCE(store_atomic, "")
+CK_PR_FENCE(load_atomic, "")
+CK_PR_FENCE(load, CK_MD_X86_LFENCE)
+CK_PR_FENCE(load_store, CK_MD_X86_MFENCE)
+CK_PR_FENCE(store, CK_MD_X86_SFENCE)
+CK_PR_FENCE(store_load, CK_MD_X86_MFENCE)
+CK_PR_FENCE(memory, CK_MD_X86_MFENCE)
+CK_PR_FENCE(release, CK_MD_X86_MFENCE)
+CK_PR_FENCE(acquire, CK_MD_X86_MFENCE)
+CK_PR_FENCE(acqrel, CK_MD_X86_MFENCE)
+CK_PR_FENCE(lock, CK_MD_X86_MFENCE)
+CK_PR_FENCE(unlock, CK_MD_X86_MFENCE)
+
+#undef CK_PR_FENCE
+
+/*
+ * Atomic fetch-and-store operations.
+ */
+#define CK_PR_FAS(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %0, %1" \
+ : "+m" (*(C *)target), \
+ "+q" (v) \
+ : \
+ : "memory"); \
+ return v; \
+ }
+
+CK_PR_FAS(ptr, void, void *, char, "xchgl")
+
+#define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I)
+
+CK_PR_FAS_S(char, char, "xchgb")
+CK_PR_FAS_S(uint, unsigned int, "xchgl")
+CK_PR_FAS_S(int, int, "xchgl")
+CK_PR_FAS_S(32, uint32_t, "xchgl")
+CK_PR_FAS_S(16, uint16_t, "xchgw")
+CK_PR_FAS_S(8, uint8_t, "xchgb")
+
+#undef CK_PR_FAS_S
+#undef CK_PR_FAS
+
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ __asm__ __volatile__(I " %1, %0" \
+ : "=q" (r) \
+ : "m" (*(const C *)target) \
+ : "memory"); \
+ return (r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, char, "movl")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(char, char, "movb")
+CK_PR_LOAD_S(uint, unsigned int, "movl")
+CK_PR_LOAD_S(int, int, "movl")
+CK_PR_LOAD_S(32, uint32_t, "movl")
+CK_PR_LOAD_S(16, uint16_t, "movw")
+CK_PR_LOAD_S(8, uint8_t, "movb")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %1, %0" \
+ : "=m" (*(C *)target) \
+ : CK_CC_IMM "q" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE(ptr, void, const void *, char, "movl")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(char, char, "movb")
+CK_PR_STORE_S(uint, unsigned int, "movl")
+CK_PR_STORE_S(int, int, "movl")
+CK_PR_STORE_S(32, uint32_t, "movl")
+CK_PR_STORE_S(16, uint16_t, "movw")
+CK_PR_STORE_S(8, uint8_t, "movb")
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+/*
+ * Atomic fetch-and-add operations.
+ */
+#define CK_PR_FAA(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(M *target, T d) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
+ : "+m" (*(C *)target), \
+ "+q" (d) \
+ : \
+ : "memory", "cc"); \
+ return (d); \
+ }
+
+CK_PR_FAA(ptr, void, uintptr_t, char, "xaddl")
+
+#define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I)
+
+CK_PR_FAA_S(char, char, "xaddb")
+CK_PR_FAA_S(uint, unsigned int, "xaddl")
+CK_PR_FAA_S(int, int, "xaddl")
+CK_PR_FAA_S(32, uint32_t, "xaddl")
+CK_PR_FAA_S(16, uint16_t, "xaddw")
+CK_PR_FAA_S(8, uint8_t, "xaddb")
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAA
+
+/*
+ * Atomic store-only unary operations.
+ */
+#define CK_PR_UNARY(K, S, T, C, I) \
+ CK_PR_UNARY_R(K, S, T, C, I) \
+ CK_PR_UNARY_V(K, S, T, C, I)
+
+#define CK_PR_UNARY_R(K, S, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(T *target) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0" \
+ : "+m" (*(C *)target) \
+ : \
+ : "memory", "cc"); \
+ return; \
+ }
+
+#define CK_PR_UNARY_V(K, S, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S##_zero(T *target, bool *r) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \
+ : "+m" (*(C *)target), \
+ "=m" (*r) \
+ : \
+ : "memory", "cc"); \
+ return; \
+ }
+
+
+#define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_UNARY(K, ptr, void, char, #K "l") \
+ CK_PR_UNARY_S(K, char, char, #K "b") \
+ CK_PR_UNARY_S(K, int, int, #K "l") \
+ CK_PR_UNARY_S(K, uint, unsigned int, #K "l") \
+ CK_PR_UNARY_S(K, 32, uint32_t, #K "l") \
+ CK_PR_UNARY_S(K, 16, uint16_t, #K "w") \
+ CK_PR_UNARY_S(K, 8, uint8_t, #K "b")
+
+CK_PR_GENERATE(inc)
+CK_PR_GENERATE(dec)
+CK_PR_GENERATE(neg)
+
+/* not does not affect condition flags. */
+#undef CK_PR_UNARY_V
+#define CK_PR_UNARY_V(a, b, c, d, e)
+CK_PR_GENERATE(not)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY_V
+#undef CK_PR_UNARY_R
+#undef CK_PR_UNARY
+
+/*
+ * Atomic store-only binary operations.
+ */
+#define CK_PR_BINARY(K, S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target, T d) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
+ : "+m" (*(C *)target) \
+ : CK_CC_IMM "q" (d) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+#define CK_PR_BINARY_S(K, S, T, I) CK_PR_BINARY(K, S, T, T, T, I)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "l") \
+ CK_PR_BINARY_S(K, char, char, #K "b") \
+ CK_PR_BINARY_S(K, int, int, #K "l") \
+ CK_PR_BINARY_S(K, uint, unsigned int, #K "l") \
+ CK_PR_BINARY_S(K, 32, uint32_t, #K "l") \
+ CK_PR_BINARY_S(K, 16, uint16_t, #K "w") \
+ CK_PR_BINARY_S(K, 8, uint8_t, #K "b")
+
+CK_PR_GENERATE(add)
+CK_PR_GENERATE(sub)
+CK_PR_GENERATE(and)
+CK_PR_GENERATE(or)
+CK_PR_GENERATE(xor)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+/*
+ * Atomic compare and swap.
+ */
+#define CK_PR_CAS(S, M, T, C, I) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S(M *target, T compare, T set) \
+ { \
+ bool z; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1" \
+ : "+m" (*(C *)target), \
+ "=a" (z) \
+ : "q" (set), \
+ "a" (compare) \
+ : "memory", "cc"); \
+ return z; \
+ }
+
+CK_PR_CAS(ptr, void, void *, char, "cmpxchgl")
+
+#define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I)
+
+CK_PR_CAS_S(char, char, "cmpxchgb")
+CK_PR_CAS_S(int, int, "cmpxchgl")
+CK_PR_CAS_S(uint, unsigned int, "cmpxchgl")
+CK_PR_CAS_S(32, uint32_t, "cmpxchgl")
+CK_PR_CAS_S(16, uint16_t, "cmpxchgw")
+CK_PR_CAS_S(8, uint8_t, "cmpxchgb")
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+/*
+ * Compare and swap, set *v to old value of target.
+ */
+#define CK_PR_CAS_O(S, M, T, C, I, R) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S##_value(M *target, T compare, T set, M *v) \
+ { \
+ bool z; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;" \
+ "mov %% " R ", %2;" \
+ "setz %1;" \
+ : "+m" (*(C *)target), \
+ "=a" (z), \
+ "=m" (*(C *)v) \
+ : "q" (set), \
+ "a" (compare) \
+ : "memory", "cc"); \
+ return (bool)z; \
+ }
+
+CK_PR_CAS_O(ptr, void, void *, char, "l", "eax")
+
+#define CK_PR_CAS_O_S(S, T, I, R) \
+ CK_PR_CAS_O(S, T, T, T, I, R)
+
+CK_PR_CAS_O_S(char, char, "b", "al")
+CK_PR_CAS_O_S(int, int, "l", "eax")
+CK_PR_CAS_O_S(uint, unsigned int, "l", "eax")
+CK_PR_CAS_O_S(32, uint32_t, "l", "eax")
+CK_PR_CAS_O_S(16, uint16_t, "w", "ax")
+CK_PR_CAS_O_S(8, uint8_t, "b", "al")
+
+#undef CK_PR_CAS_O_S
+#undef CK_PR_CAS_O
+
+/*
+ * Atomic bit test operations.
+ */
+#define CK_PR_BT(K, S, T, P, C, I) \
+ CK_CC_INLINE static bool \
+ ck_pr_##K##_##S(T *target, unsigned int b) \
+ { \
+ bool c; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1" \
+ : "+m" (*(C *)target), \
+ "=q" (c) \
+ : "q" ((P)b) \
+ : "memory", "cc"); \
+ return (bool)c; \
+ }
+
+#define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_BT(K, ptr, void, uint32_t, char, #K "l %2, %0") \
+ CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0") \
+ CK_PR_BT_S(K, int, int, #K "l %2, %0") \
+ CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0") \
+ CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0")
+
+CK_PR_GENERATE(btc)
+CK_PR_GENERATE(bts)
+CK_PR_GENERATE(btr)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BT
+
+#endif /* CK_PR_X86_H */
+
diff --git a/freebsd/sys/contrib/ck/include/gcc/x86_64/ck_f_pr.h b/freebsd/sys/contrib/ck/include/gcc/x86_64/ck_f_pr.h
new file mode 100644
index 00000000..545f5fd6
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/x86_64/ck_f_pr.h
@@ -0,0 +1,202 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_64
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_64
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_BTC_16
+#define CK_F_PR_BTC_32
+#define CK_F_PR_BTC_64
+#define CK_F_PR_BTC_INT
+#define CK_F_PR_BTC_PTR
+#define CK_F_PR_BTC_UINT
+#define CK_F_PR_BTR_16
+#define CK_F_PR_BTR_32
+#define CK_F_PR_BTR_64
+#define CK_F_PR_BTR_INT
+#define CK_F_PR_BTR_PTR
+#define CK_F_PR_BTR_UINT
+#define CK_F_PR_BTS_16
+#define CK_F_PR_BTS_32
+#define CK_F_PR_BTS_64
+#define CK_F_PR_BTS_INT
+#define CK_F_PR_BTS_PTR
+#define CK_F_PR_BTS_UINT
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_8
+#define CK_F_PR_CAS_16_8_VALUE
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_4
+#define CK_F_PR_CAS_32_4_VALUE
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_2
+#define CK_F_PR_CAS_64_2_VALUE
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_16
+#define CK_F_PR_CAS_8_16_VALUE
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_16
+#define CK_F_PR_CAS_CHAR_16_VALUE
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_4
+#define CK_F_PR_CAS_INT_4_VALUE
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_2
+#define CK_F_PR_CAS_PTR_2_VALUE
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_DOUBLE
+#define CK_F_PR_CAS_DOUBLE_2
+#define CK_F_PR_CAS_DOUBLE_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_4
+#define CK_F_PR_CAS_UINT_4_VALUE
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_16_ZERO
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_32_ZERO
+#define CK_F_PR_DEC_64
+#define CK_F_PR_DEC_64_ZERO
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_8_ZERO
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_CHAR_ZERO
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_INT_ZERO
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_PTR_ZERO
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_DEC_UINT_ZERO
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_64
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_16
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_64
+#define CK_F_PR_FAS_8
+#define CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FAS_DOUBLE
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_16_ZERO
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_32_ZERO
+#define CK_F_PR_INC_64
+#define CK_F_PR_INC_64_ZERO
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_8_ZERO
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_CHAR_ZERO
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_INT_ZERO
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_PTR_ZERO
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_INC_UINT_ZERO
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_16_8
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_32_4
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_64_2
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_8_16
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_CHAR_16
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_INT_4
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_PTR_2
+#define CK_F_PR_LOAD_DOUBLE
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_LOAD_UINT_4
+#define CK_F_PR_NEG_16
+#define CK_F_PR_NEG_16_ZERO
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_32_ZERO
+#define CK_F_PR_NEG_64
+#define CK_F_PR_NEG_64_ZERO
+#define CK_F_PR_NEG_8
+#define CK_F_PR_NEG_8_ZERO
+#define CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_CHAR_ZERO
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_INT_ZERO
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_PTR_ZERO
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NEG_UINT_ZERO
+#define CK_F_PR_NOT_16
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_64
+#define CK_F_PR_NOT_8
+#define CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_64
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_DOUBLE
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_64
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_64
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
+
diff --git a/freebsd/sys/contrib/ck/include/gcc/x86_64/ck_pr.h b/freebsd/sys/contrib/ck/include/gcc/x86_64/ck_pr.h
new file mode 100644
index 00000000..4de13329
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/x86_64/ck_pr.h
@@ -0,0 +1,606 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_X86_64_H
+#define CK_PR_X86_64_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Support for TSX extensions.
+ */
+#ifdef CK_MD_RTM_ENABLE
+#include "ck_pr_rtm.h"
+#endif
+
+/* Minimum requirements for the CK_PR interface are met. */
+#define CK_F_PR
+
+#ifdef CK_MD_UMP
+#define CK_PR_LOCK_PREFIX
+#else
+#define CK_PR_LOCK_PREFIX "lock "
+#endif
+
+/*
+ * Prevent speculative execution in busy-wait loops (P4 <=) or "predefined
+ * delay".
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+ __asm__ __volatile__("pause" ::: "memory");
+ return;
+}
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__(I ::: "memory"); \
+ }
+
+/* Atomic operations are always serializing. */
+CK_PR_FENCE(atomic, "")
+CK_PR_FENCE(atomic_store, "")
+CK_PR_FENCE(atomic_load, "")
+CK_PR_FENCE(store_atomic, "")
+CK_PR_FENCE(load_atomic, "")
+
+/* Traditional fence interface. */
+CK_PR_FENCE(load, "lfence")
+CK_PR_FENCE(load_store, "mfence")
+CK_PR_FENCE(store, "sfence")
+CK_PR_FENCE(store_load, "mfence")
+CK_PR_FENCE(memory, "mfence")
+
+/* Below are stdatomic-style fences. */
+
+/*
+ * Provides load-store and store-store ordering. However, Intel specifies that
+ * the WC memory model is relaxed. It is likely an sfence *is* sufficient (in
+ * particular, stores are not re-ordered with respect to prior loads and it is
+ * really just the stores that are subject to re-ordering). However, we take
+ * the conservative route as the manuals are too ambiguous for my taste.
+ */
+CK_PR_FENCE(release, "mfence")
+
+/*
+ * Provides load-load and load-store ordering. The lfence instruction ensures
+ * all prior load operations are complete before any subsequent instructions
+ * actually begin execution. However, the manual also ends up going to describe
+ * WC memory as a relaxed model.
+ */
+CK_PR_FENCE(acquire, "mfence")
+
+CK_PR_FENCE(acqrel, "mfence")
+CK_PR_FENCE(lock, "mfence")
+CK_PR_FENCE(unlock, "mfence")
+
+#undef CK_PR_FENCE
+
+/*
+ * Read for ownership. Older compilers will generate the 32-bit
+ * 3DNow! variant which is binary compatible with x86-64 variant
+ * of prefetchw.
+ */
+#ifndef CK_F_PR_RFO
+#define CK_F_PR_RFO
+CK_CC_INLINE static void
+ck_pr_rfo(const void *m)
+{
+
+ __asm__ __volatile__("prefetchw (%0)"
+ :
+ : "r" (m)
+ : "memory");
+
+ return;
+}
+#endif /* CK_F_PR_RFO */
+
+/*
+ * Atomic fetch-and-store operations.
+ */
+#define CK_PR_FAS(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %0, %1" \
+ : "+m" (*(C *)target), \
+ "+q" (v) \
+ : \
+ : "memory"); \
+ return v; \
+ }
+
+CK_PR_FAS(ptr, void, void *, char, "xchgq")
+
+#define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I)
+
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_FAS_S(double, double, "xchgq")
+#endif
+CK_PR_FAS_S(char, char, "xchgb")
+CK_PR_FAS_S(uint, unsigned int, "xchgl")
+CK_PR_FAS_S(int, int, "xchgl")
+CK_PR_FAS_S(64, uint64_t, "xchgq")
+CK_PR_FAS_S(32, uint32_t, "xchgl")
+CK_PR_FAS_S(16, uint16_t, "xchgw")
+CK_PR_FAS_S(8, uint8_t, "xchgb")
+
+#undef CK_PR_FAS_S
+#undef CK_PR_FAS
+
+/*
+ * Atomic load-from-memory operations.
+ */
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ __asm__ __volatile__(I " %1, %0" \
+ : "=q" (r) \
+ : "m" (*(const C *)target) \
+ : "memory"); \
+ return (r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, char, "movq")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(char, char, "movb")
+CK_PR_LOAD_S(uint, unsigned int, "movl")
+CK_PR_LOAD_S(int, int, "movl")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_LOAD_S(double, double, "movq")
+#endif
+CK_PR_LOAD_S(64, uint64_t, "movq")
+CK_PR_LOAD_S(32, uint32_t, "movl")
+CK_PR_LOAD_S(16, uint16_t, "movw")
+CK_PR_LOAD_S(8, uint8_t, "movb")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+CK_CC_INLINE static void
+ck_pr_load_64_2(const uint64_t target[2], uint64_t v[2])
+{
+ __asm__ __volatile__("movq %%rdx, %%rcx;"
+ "movq %%rax, %%rbx;"
+ CK_PR_LOCK_PREFIX "cmpxchg16b %2;"
+ : "=a" (v[0]),
+ "=d" (v[1])
+ : "m" (*(const uint64_t *)target)
+ : "rbx", "rcx", "memory", "cc");
+ return;
+}
+
+CK_CC_INLINE static void
+ck_pr_load_ptr_2(const void *t, void *v)
+{
+ ck_pr_load_64_2(CK_CPP_CAST(const uint64_t *, t),
+ CK_CPP_CAST(uint64_t *, v));
+ return;
+}
+
+#define CK_PR_LOAD_2(S, W, T) \
+ CK_CC_INLINE static void \
+ ck_pr_md_load_##S##_##W(const T t[2], T v[2]) \
+ { \
+ ck_pr_load_64_2((const uint64_t *)(const void *)t, \
+ (uint64_t *)(void *)v); \
+ return; \
+ }
+
+CK_PR_LOAD_2(char, 16, char)
+CK_PR_LOAD_2(int, 4, int)
+CK_PR_LOAD_2(uint, 4, unsigned int)
+CK_PR_LOAD_2(32, 4, uint32_t)
+CK_PR_LOAD_2(16, 8, uint16_t)
+CK_PR_LOAD_2(8, 16, uint8_t)
+
+#undef CK_PR_LOAD_2
+
+/*
+ * Atomic store-to-memory operations.
+ */
+#define CK_PR_STORE_IMM(S, M, T, C, I, K) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %1, %0" \
+ : "=m" (*(C *)target) \
+ : K "q" (v) \
+ : "memory"); \
+ return; \
+ }
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %1, %0" \
+ : "=m" (*(C *)target) \
+ : "q" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE_IMM(ptr, void, const void *, char, "movq", CK_CC_IMM_U32)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_STORE(double, double, double, double, "movq")
+#endif
+
+#define CK_PR_STORE_S(S, T, I, K) CK_PR_STORE_IMM(S, T, T, T, I, K)
+
+CK_PR_STORE_S(char, char, "movb", CK_CC_IMM_S32)
+CK_PR_STORE_S(int, int, "movl", CK_CC_IMM_S32)
+CK_PR_STORE_S(uint, unsigned int, "movl", CK_CC_IMM_U32)
+CK_PR_STORE_S(64, uint64_t, "movq", CK_CC_IMM_U32)
+CK_PR_STORE_S(32, uint32_t, "movl", CK_CC_IMM_U32)
+CK_PR_STORE_S(16, uint16_t, "movw", CK_CC_IMM_U32)
+CK_PR_STORE_S(8, uint8_t, "movb", CK_CC_IMM_U32)
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE_IMM
+#undef CK_PR_STORE
+
+/*
+ * Atomic fetch-and-add operations.
+ */
+#define CK_PR_FAA(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(M *target, T d) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
+ : "+m" (*(C *)target), \
+ "+q" (d) \
+ : \
+ : "memory", "cc"); \
+ return (d); \
+ }
+
+CK_PR_FAA(ptr, void, uintptr_t, char, "xaddq")
+
+#define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I)
+
+CK_PR_FAA_S(char, char, "xaddb")
+CK_PR_FAA_S(uint, unsigned int, "xaddl")
+CK_PR_FAA_S(int, int, "xaddl")
+CK_PR_FAA_S(64, uint64_t, "xaddq")
+CK_PR_FAA_S(32, uint32_t, "xaddl")
+CK_PR_FAA_S(16, uint16_t, "xaddw")
+CK_PR_FAA_S(8, uint8_t, "xaddb")
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAA
+
+/*
+ * Atomic store-only unary operations.
+ */
+#define CK_PR_UNARY(K, S, T, C, I) \
+ CK_PR_UNARY_R(K, S, T, C, I) \
+ CK_PR_UNARY_V(K, S, T, C, I)
+
+#define CK_PR_UNARY_R(K, S, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(T *target) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0" \
+ : "+m" (*(C *)target) \
+ : \
+ : "memory", "cc"); \
+ return; \
+ }
+
+#define CK_PR_UNARY_V(K, S, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S##_zero(T *target, bool *r) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \
+ : "+m" (*(C *)target), \
+ "=m" (*r) \
+ : \
+ : "memory", "cc"); \
+ return; \
+ }
+
+
+#define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_UNARY(K, ptr, void, char, #K "q") \
+ CK_PR_UNARY_S(K, char, char, #K "b") \
+ CK_PR_UNARY_S(K, int, int, #K "l") \
+ CK_PR_UNARY_S(K, uint, unsigned int, #K "l") \
+ CK_PR_UNARY_S(K, 64, uint64_t, #K "q") \
+ CK_PR_UNARY_S(K, 32, uint32_t, #K "l") \
+ CK_PR_UNARY_S(K, 16, uint16_t, #K "w") \
+ CK_PR_UNARY_S(K, 8, uint8_t, #K "b")
+
+CK_PR_GENERATE(inc)
+CK_PR_GENERATE(dec)
+CK_PR_GENERATE(neg)
+
+/* not does not affect condition flags. */
+#undef CK_PR_UNARY_V
+#define CK_PR_UNARY_V(a, b, c, d, e)
+CK_PR_GENERATE(not)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY_V
+#undef CK_PR_UNARY_R
+#undef CK_PR_UNARY
+
+/*
+ * Atomic store-only binary operations.
+ */
+#define CK_PR_BINARY(K, S, M, T, C, I, O) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target, T d) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
+ : "+m" (*(C *)target) \
+ : O "q" (d) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+#define CK_PR_BINARY_S(K, S, T, I, O) CK_PR_BINARY(K, S, T, T, T, I, O)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "q", CK_CC_IMM_U32) \
+ CK_PR_BINARY_S(K, char, char, #K "b", CK_CC_IMM_S32) \
+ CK_PR_BINARY_S(K, int, int, #K "l", CK_CC_IMM_S32) \
+ CK_PR_BINARY_S(K, uint, unsigned int, #K "l", CK_CC_IMM_U32) \
+ CK_PR_BINARY_S(K, 64, uint64_t, #K "q", CK_CC_IMM_U32) \
+ CK_PR_BINARY_S(K, 32, uint32_t, #K "l", CK_CC_IMM_U32) \
+ CK_PR_BINARY_S(K, 16, uint16_t, #K "w", CK_CC_IMM_U32) \
+ CK_PR_BINARY_S(K, 8, uint8_t, #K "b", CK_CC_IMM_U32)
+
+CK_PR_GENERATE(add)
+CK_PR_GENERATE(sub)
+CK_PR_GENERATE(and)
+CK_PR_GENERATE(or)
+CK_PR_GENERATE(xor)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+/*
+ * Atomic compare and swap.
+ */
+#define CK_PR_CAS(S, M, T, C, I) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S(M *target, T compare, T set) \
+ { \
+ bool z; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1" \
+ : "+m" (*(C *)target), \
+ "=a" (z) \
+ : "q" (set), \
+ "a" (compare) \
+ : "memory", "cc"); \
+ return z; \
+ }
+
+CK_PR_CAS(ptr, void, void *, char, "cmpxchgq")
+
+#define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I)
+
+CK_PR_CAS_S(char, char, "cmpxchgb")
+CK_PR_CAS_S(int, int, "cmpxchgl")
+CK_PR_CAS_S(uint, unsigned int, "cmpxchgl")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_CAS_S(double, double, "cmpxchgq")
+#endif
+CK_PR_CAS_S(64, uint64_t, "cmpxchgq")
+CK_PR_CAS_S(32, uint32_t, "cmpxchgl")
+CK_PR_CAS_S(16, uint16_t, "cmpxchgw")
+CK_PR_CAS_S(8, uint8_t, "cmpxchgb")
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+/*
+ * Compare and swap, set *v to old value of target.
+ */
+#define CK_PR_CAS_O(S, M, T, C, I, R) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S##_value(M *target, T compare, T set, M *v) \
+ { \
+ bool z; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;" \
+ "mov %% " R ", %2;" \
+ "setz %1;" \
+ : "+m" (*(C *)target), \
+ "=a" (z), \
+ "=m" (*(C *)v) \
+ : "q" (set), \
+ "a" (compare) \
+ : "memory", "cc"); \
+ return z; \
+ }
+
+CK_PR_CAS_O(ptr, void, void *, char, "q", "rax")
+
+#define CK_PR_CAS_O_S(S, T, I, R) \
+ CK_PR_CAS_O(S, T, T, T, I, R)
+
+CK_PR_CAS_O_S(char, char, "b", "al")
+CK_PR_CAS_O_S(int, int, "l", "eax")
+CK_PR_CAS_O_S(uint, unsigned int, "l", "eax")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_CAS_O_S(double, double, "q", "rax")
+#endif
+CK_PR_CAS_O_S(64, uint64_t, "q", "rax")
+CK_PR_CAS_O_S(32, uint32_t, "l", "eax")
+CK_PR_CAS_O_S(16, uint16_t, "w", "ax")
+CK_PR_CAS_O_S(8, uint8_t, "b", "al")
+
+#undef CK_PR_CAS_O_S
+#undef CK_PR_CAS_O
+
+/*
+ * Contrary to C-interface, alignment requirements are that of uint64_t[2].
+ */
+CK_CC_INLINE static bool
+ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2])
+{
+ bool z;
+
+ __asm__ __volatile__("movq 0(%4), %%rax;"
+ "movq 8(%4), %%rdx;"
+ CK_PR_LOCK_PREFIX "cmpxchg16b %0; setz %1"
+ : "+m" (*target),
+ "=q" (z)
+ : "b" (set[0]),
+ "c" (set[1]),
+ "q" (compare)
+ : "memory", "cc", "%rax", "%rdx");
+ return z;
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2(void *t, void *c, void *s)
+{
+ return ck_pr_cas_64_2(CK_CPP_CAST(uint64_t *, t),
+ CK_CPP_CAST(uint64_t *, c),
+ CK_CPP_CAST(uint64_t *, s));
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2_value(uint64_t target[2],
+ uint64_t compare[2],
+ uint64_t set[2],
+ uint64_t v[2])
+{
+ bool z;
+
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg16b %0;"
+ "setz %3"
+ : "+m" (*target),
+ "=a" (v[0]),
+ "=d" (v[1]),
+ "=q" (z)
+ : "a" (compare[0]),
+ "d" (compare[1]),
+ "b" (set[0]),
+ "c" (set[1])
+ : "memory", "cc");
+ return z;
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2_value(void *t, void *c, void *s, void *v)
+{
+ return ck_pr_cas_64_2_value(CK_CPP_CAST(uint64_t *,t),
+ CK_CPP_CAST(uint64_t *,c),
+ CK_CPP_CAST(uint64_t *,s),
+ CK_CPP_CAST(uint64_t *,v));
+}
+
+#define CK_PR_CAS_V(S, W, T) \
+CK_CC_INLINE static bool \
+ck_pr_cas_##S##_##W(T t[W], T c[W], T s[W]) \
+{ \
+ return ck_pr_cas_64_2((uint64_t *)(void *)t, \
+ (uint64_t *)(void *)c, \
+ (uint64_t *)(void *)s); \
+} \
+CK_CC_INLINE static bool \
+ck_pr_cas_##S##_##W##_value(T *t, T c[W], T s[W], T *v) \
+{ \
+ return ck_pr_cas_64_2_value((uint64_t *)(void *)t, \
+ (uint64_t *)(void *)c, \
+ (uint64_t *)(void *)s, \
+ (uint64_t *)(void *)v); \
+}
+
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_CAS_V(double, 2, double)
+#endif
+CK_PR_CAS_V(char, 16, char)
+CK_PR_CAS_V(int, 4, int)
+CK_PR_CAS_V(uint, 4, unsigned int)
+CK_PR_CAS_V(32, 4, uint32_t)
+CK_PR_CAS_V(16, 8, uint16_t)
+CK_PR_CAS_V(8, 16, uint8_t)
+
+#undef CK_PR_CAS_V
+
+/*
+ * Atomic bit test operations.
+ */
+#define CK_PR_BT(K, S, T, P, C, I) \
+ CK_CC_INLINE static bool \
+ ck_pr_##K##_##S(T *target, unsigned int b) \
+ { \
+ bool c; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1" \
+ : "+m" (*(C *)target), \
+ "=q" (c) \
+ : "q" ((P)b) \
+ : "memory", "cc"); \
+ return c; \
+ }
+
+#define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_BT(K, ptr, void, uint64_t, char, #K "q %2, %0") \
+ CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0") \
+ CK_PR_BT_S(K, int, int, #K "l %2, %0") \
+ CK_PR_BT_S(K, 64, uint64_t, #K "q %2, %0") \
+ CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0") \
+ CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0")
+
+CK_PR_GENERATE(btc)
+CK_PR_GENERATE(bts)
+CK_PR_GENERATE(btr)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BT
+
+#endif /* CK_PR_X86_64_H */
+
diff --git a/freebsd/sys/contrib/ck/src/ck_epoch.c b/freebsd/sys/contrib/ck/src/ck_epoch.c
new file mode 100644
index 00000000..be0f201d
--- /dev/null
+++ b/freebsd/sys/contrib/ck/src/ck_epoch.c
@@ -0,0 +1,597 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * The implementation here is inspired from the work described in:
+ * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
+ * of Cambridge Computing Laboratory.
+ */
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_epoch.h>
+#include <ck_pr.h>
+#include <ck_stack.h>
+#include <ck_stdbool.h>
+#include <ck_string.h>
+
+/*
+ * Only three distinct values are used for reclamation, but reclamation occurs
+ * at e+2 rather than e+1. Any thread in a "critical section" would have
+ * acquired some snapshot (e) of the global epoch value (e_g) and set an active
+ * flag. Any hazardous references will only occur after a full memory barrier.
+ * For example, assume an initial e_g value of 1, e value of 0 and active value
+ * of 0.
+ *
+ * ck_epoch_begin(...)
+ * e = e_g
+ * active = 1
+ * memory_barrier();
+ *
+ * Any serialized reads may observe e = 0 or e = 1 with active = 0, or e = 0 or
+ * e = 1 with active = 1. The e_g value can only go from 1 to 2 if every thread
+ * has already observed the value of "1" (or the value we are incrementing
+ * from). This guarantees us that for any given value e_g, any threads with-in
+ * critical sections (referred to as "active" threads from here on) would have
+ * an e value of e_g-1 or e_g. This also means that hazardous references may be
+ * shared in both e_g-1 and e_g even if they are logically deleted in e_g.
+ *
+ * For example, assume all threads have an e value of e_g. Another thread may
+ * increment to e_g to e_g+1. Older threads may have a reference to an object
+ * which is only deleted in e_g+1. It could be that reader threads are
+ * executing some hash table look-ups, while some other writer thread (which
+ * causes epoch counter tick) actually deletes the same items that reader
+ * threads are looking up (this writer thread having an e value of e_g+1).
+ * This is possible if the writer thread re-observes the epoch after the
+ * counter tick.
+ *
+ * Psuedo-code for writer:
+ * ck_epoch_begin()
+ * ht_delete(x)
+ * ck_epoch_end()
+ * ck_epoch_begin()
+ * ht_delete(x)
+ * ck_epoch_end()
+ *
+ * Psuedo-code for reader:
+ * for (;;) {
+ * x = ht_lookup(x)
+ * ck_pr_inc(&x->value);
+ * }
+ *
+ * Of course, it is also possible for references logically deleted at e_g-1 to
+ * still be accessed at e_g as threads are "active" at the same time
+ * (real-world time) mutating shared objects.
+ *
+ * Now, if the epoch counter is ticked to e_g+1, then no new hazardous
+ * references could exist to objects logically deleted at e_g-1. The reason for
+ * this is that at e_g+1, all epoch read-side critical sections started at
+ * e_g-1 must have been completed. If any epoch read-side critical sections at
+ * e_g-1 were still active, then we would never increment to e_g+1 (active != 0
+ * ^ e != e_g). Additionally, e_g may still have hazardous references to
+ * objects logically deleted at e_g-1 which means objects logically deleted at
+ * e_g-1 cannot be deleted at e_g+1 unless all threads have observed e_g+1
+ * (since it is valid for active threads to be at e_g and threads at e_g still
+ * require safe memory accesses).
+ *
+ * However, at e_g+2, all active threads must be either at e_g+1 or e_g+2.
+ * Though e_g+2 may share hazardous references with e_g+1, and e_g+1 shares
+ * hazardous references to e_g, no active threads are at e_g or e_g-1. This
+ * means no hazardous references could exist to objects deleted at e_g-1 (at
+ * e_g+2).
+ *
+ * To summarize these important points,
+ * 1) Active threads will always have a value of e_g or e_g-1.
+ * 2) Items that are logically deleted e_g or e_g-1 cannot be physically
+ * deleted.
+ * 3) Objects logically deleted at e_g-1 can be physically destroyed at e_g+2
+ * or at e_g+1 if no threads are at e_g.
+ *
+ * Last but not least, if we are at e_g+2, then no active thread is at e_g
+ * which means it is safe to apply modulo-3 arithmetic to e_g value in order to
+ * re-use e_g to represent the e_g+3 state. This means it is sufficient to
+ * represent e_g using only the values 0, 1 or 2. Every time a thread re-visits
+ * a e_g (which can be determined with a non-empty deferral list) it can assume
+ * objects in the e_g deferral list involved at least three e_g transitions and
+ * are thus, safe, for physical deletion.
+ *
+ * Blocking semantics for epoch reclamation have additional restrictions.
+ * Though we only require three deferral lists, reasonable blocking semantics
+ * must be able to more gracefully handle bursty write work-loads which could
+ * easily cause e_g wrap-around if modulo-3 arithmetic is used. This allows for
+ * easy-to-trigger live-lock situations. The work-around to this is to not
+ * apply modulo arithmetic to e_g but only to deferral list indexing.
+ */
+#define CK_EPOCH_GRACE 3U
+
+enum {
+ CK_EPOCH_STATE_USED = 0,
+ CK_EPOCH_STATE_FREE = 1
+};
+
+CK_STACK_CONTAINER(struct ck_epoch_record, record_next,
+ ck_epoch_record_container)
+CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
+ ck_epoch_entry_container)
+
+#define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1)
+
+bool
+_ck_epoch_delref(struct ck_epoch_record *record,
+ struct ck_epoch_section *section)
+{
+ struct ck_epoch_ref *current, *other;
+ unsigned int i = section->bucket;
+
+ current = &record->local.bucket[i];
+ current->count--;
+
+ if (current->count > 0)
+ return false;
+
+ /*
+ * If the current bucket no longer has any references, then
+ * determine whether we have already transitioned into a newer
+ * epoch. If so, then make sure to update our shared snapshot
+ * to allow for forward progress.
+ *
+ * If no other active bucket exists, then the record will go
+ * inactive in order to allow for forward progress.
+ */
+ other = &record->local.bucket[(i + 1) & CK_EPOCH_SENSE_MASK];
+ if (other->count > 0 &&
+ ((int)(current->epoch - other->epoch) < 0)) {
+ /*
+ * The other epoch value is actually the newest,
+ * transition to it.
+ */
+ ck_pr_store_uint(&record->epoch, other->epoch);
+ }
+
+ return true;
+}
+
+void
+_ck_epoch_addref(struct ck_epoch_record *record,
+ struct ck_epoch_section *section)
+{
+ struct ck_epoch *global = record->global;
+ struct ck_epoch_ref *ref;
+ unsigned int epoch, i;
+
+ epoch = ck_pr_load_uint(&global->epoch);
+ i = epoch & CK_EPOCH_SENSE_MASK;
+ ref = &record->local.bucket[i];
+
+ if (ref->count++ == 0) {
+#ifndef CK_MD_TSO
+ struct ck_epoch_ref *previous;
+
+ /*
+ * The system has already ticked. If another non-zero bucket
+ * exists, make sure to order our observations with respect
+ * to it. Otherwise, it is possible to acquire a reference
+ * from the previous epoch generation.
+ *
+ * On TSO architectures, the monoticity of the global counter
+ * and load-{store, load} ordering are sufficient to guarantee
+ * this ordering.
+ */
+ previous = &record->local.bucket[(i + 1) &
+ CK_EPOCH_SENSE_MASK];
+ if (previous->count > 0)
+ ck_pr_fence_acqrel();
+#endif /* !CK_MD_TSO */
+
+ /*
+ * If this is this is a new reference into the current
+ * bucket then cache the associated epoch value.
+ */
+ ref->epoch = epoch;
+ }
+
+ section->bucket = i;
+ return;
+}
+
+void
+ck_epoch_init(struct ck_epoch *global)
+{
+
+ ck_stack_init(&global->records);
+ global->epoch = 1;
+ global->n_free = 0;
+ ck_pr_fence_store();
+ return;
+}
+
+struct ck_epoch_record *
+ck_epoch_recycle(struct ck_epoch *global, void *ct)
+{
+ struct ck_epoch_record *record;
+ ck_stack_entry_t *cursor;
+ unsigned int state;
+
+ if (ck_pr_load_uint(&global->n_free) == 0)
+ return NULL;
+
+ CK_STACK_FOREACH(&global->records, cursor) {
+ record = ck_epoch_record_container(cursor);
+
+ if (ck_pr_load_uint(&record->state) == CK_EPOCH_STATE_FREE) {
+ /* Serialize with respect to deferral list clean-up. */
+ ck_pr_fence_load();
+ state = ck_pr_fas_uint(&record->state,
+ CK_EPOCH_STATE_USED);
+ if (state == CK_EPOCH_STATE_FREE) {
+ ck_pr_dec_uint(&global->n_free);
+ ck_pr_store_ptr(&record->ct, ct);
+
+ /*
+ * The context pointer is ordered by a
+ * subsequent protected section.
+ */
+ return record;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+void
+ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record,
+ void *ct)
+{
+ size_t i;
+
+ record->global = global;
+ record->state = CK_EPOCH_STATE_USED;
+ record->active = 0;
+ record->epoch = 0;
+ record->n_dispatch = 0;
+ record->n_peak = 0;
+ record->n_pending = 0;
+ record->ct = ct;
+ memset(&record->local, 0, sizeof record->local);
+
+ for (i = 0; i < CK_EPOCH_LENGTH; i++)
+ ck_stack_init(&record->pending[i]);
+
+ ck_pr_fence_store();
+ ck_stack_push_upmc(&global->records, &record->record_next);
+ return;
+}
+
+void
+ck_epoch_unregister(struct ck_epoch_record *record)
+{
+ struct ck_epoch *global = record->global;
+ size_t i;
+
+ record->active = 0;
+ record->epoch = 0;
+ record->n_dispatch = 0;
+ record->n_peak = 0;
+ record->n_pending = 0;
+ memset(&record->local, 0, sizeof record->local);
+
+ for (i = 0; i < CK_EPOCH_LENGTH; i++)
+ ck_stack_init(&record->pending[i]);
+
+ ck_pr_store_ptr(&record->ct, NULL);
+ ck_pr_fence_store();
+ ck_pr_store_uint(&record->state, CK_EPOCH_STATE_FREE);
+ ck_pr_inc_uint(&global->n_free);
+ return;
+}
+
+static struct ck_epoch_record *
+ck_epoch_scan(struct ck_epoch *global,
+ struct ck_epoch_record *cr,
+ unsigned int epoch,
+ bool *af)
+{
+ ck_stack_entry_t *cursor;
+
+ if (cr == NULL) {
+ cursor = CK_STACK_FIRST(&global->records);
+ *af = false;
+ } else {
+ cursor = &cr->record_next;
+ *af = true;
+ }
+
+ while (cursor != NULL) {
+ unsigned int state, active;
+
+ cr = ck_epoch_record_container(cursor);
+
+ state = ck_pr_load_uint(&cr->state);
+ if (state & CK_EPOCH_STATE_FREE) {
+ cursor = CK_STACK_NEXT(cursor);
+ continue;
+ }
+
+ active = ck_pr_load_uint(&cr->active);
+ *af |= active;
+
+ if (active != 0 && ck_pr_load_uint(&cr->epoch) != epoch)
+ return cr;
+
+ cursor = CK_STACK_NEXT(cursor);
+ }
+
+ return NULL;
+}
+
+static void
+ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e, ck_stack_t *deferred)
+{
+ unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
+ ck_stack_entry_t *head, *next, *cursor;
+ unsigned int n_pending, n_peak;
+ unsigned int i = 0;
+
+ head = ck_stack_batch_pop_upmc(&record->pending[epoch]);
+ for (cursor = head; cursor != NULL; cursor = next) {
+ struct ck_epoch_entry *entry =
+ ck_epoch_entry_container(cursor);
+
+ next = CK_STACK_NEXT(cursor);
+ if (deferred != NULL)
+ ck_stack_push_spnc(deferred, &entry->stack_entry);
+ else
+ entry->function(entry);
+ i++;
+ }
+
+ n_peak = ck_pr_load_uint(&record->n_peak);
+ n_pending = ck_pr_load_uint(&record->n_pending);
+
+ /* We don't require accuracy around peak calculation. */
+ if (n_pending > n_peak)
+ ck_pr_store_uint(&record->n_peak, n_peak);
+
+ if (i > 0) {
+ ck_pr_add_uint(&record->n_dispatch, i);
+ ck_pr_sub_uint(&record->n_pending, i);
+ }
+
+ return;
+}
+
+/*
+ * Reclaim all objects associated with a record.
+ */
+void
+ck_epoch_reclaim(struct ck_epoch_record *record)
+{
+ unsigned int epoch;
+
+ for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
+ ck_epoch_dispatch(record, epoch, NULL);
+
+ return;
+}
+
+CK_CC_FORCE_INLINE static void
+epoch_block(struct ck_epoch *global, struct ck_epoch_record *cr,
+ ck_epoch_wait_cb_t *cb, void *ct)
+{
+
+ if (cb != NULL)
+ cb(global, cr, ct);
+
+ return;
+}
+
+/*
+ * This function must not be called with-in read section.
+ */
+void
+ck_epoch_synchronize_wait(struct ck_epoch *global,
+ ck_epoch_wait_cb_t *cb, void *ct)
+{
+ struct ck_epoch_record *cr;
+ unsigned int delta, epoch, goal, i;
+ bool active;
+
+ ck_pr_fence_memory();
+
+ /*
+ * The observation of the global epoch must be ordered with respect to
+ * all prior operations. The re-ordering of loads is permitted given
+ * monoticity of global epoch counter.
+ *
+ * If UINT_MAX concurrent mutations were to occur then it is possible
+ * to encounter an ABA-issue. If this is a concern, consider tuning
+ * write-side concurrency.
+ */
+ delta = epoch = ck_pr_load_uint(&global->epoch);
+ goal = epoch + CK_EPOCH_GRACE;
+
+ for (i = 0, cr = NULL; i < CK_EPOCH_GRACE - 1; cr = NULL, i++) {
+ bool r;
+
+ /*
+ * Determine whether all threads have observed the current
+ * epoch with respect to the updates on invocation.
+ */
+ while (cr = ck_epoch_scan(global, cr, delta, &active),
+ cr != NULL) {
+ unsigned int e_d;
+
+ ck_pr_stall();
+
+ /*
+ * Another writer may have already observed a grace
+ * period.
+ */
+ e_d = ck_pr_load_uint(&global->epoch);
+ if (e_d == delta) {
+ epoch_block(global, cr, cb, ct);
+ continue;
+ }
+
+ /*
+ * If the epoch has been updated, we may have already
+ * met our goal.
+ */
+ delta = e_d;
+ if ((goal > epoch) & (delta >= goal))
+ goto leave;
+
+ epoch_block(global, cr, cb, ct);
+
+ /*
+ * If the epoch has been updated, then a grace period
+ * requires that all threads are observed idle at the
+ * same epoch.
+ */
+ cr = NULL;
+ }
+
+ /*
+ * If we have observed all threads as inactive, then we assume
+ * we are at a grace period.
+ */
+ if (active == false)
+ break;
+
+ /*
+ * Increment current epoch. CAS semantics are used to eliminate
+ * increment operations for synchronization that occurs for the
+ * same global epoch value snapshot.
+ *
+ * If we can guarantee there will only be one active barrier or
+ * epoch tick at a given time, then it is sufficient to use an
+ * increment operation. In a multi-barrier workload, however,
+ * it is possible to overflow the epoch value if we apply
+ * modulo-3 arithmetic.
+ */
+ r = ck_pr_cas_uint_value(&global->epoch, delta, delta + 1,
+ &delta);
+
+ /* Order subsequent thread active checks. */
+ ck_pr_fence_atomic_load();
+
+ /*
+ * If CAS has succeeded, then set delta to latest snapshot.
+ * Otherwise, we have just acquired latest snapshot.
+ */
+ delta = delta + r;
+ }
+
+ /*
+ * A majority of use-cases will not require full barrier semantics.
+ * However, if non-temporal instructions are used, full barrier
+ * semantics are necessary.
+ */
+leave:
+ ck_pr_fence_memory();
+ return;
+}
+
+void
+ck_epoch_synchronize(struct ck_epoch_record *record)
+{
+
+ ck_epoch_synchronize_wait(record->global, NULL, NULL);
+ return;
+}
+
+void
+ck_epoch_barrier(struct ck_epoch_record *record)
+{
+
+ ck_epoch_synchronize(record);
+ ck_epoch_reclaim(record);
+ return;
+}
+
+void
+ck_epoch_barrier_wait(struct ck_epoch_record *record, ck_epoch_wait_cb_t *cb,
+ void *ct)
+{
+
+ ck_epoch_synchronize_wait(record->global, cb, ct);
+ ck_epoch_reclaim(record);
+ return;
+}
+
+/*
+ * It may be worth it to actually apply these deferral semantics to an epoch
+ * that was observed at ck_epoch_call time. The problem is that the latter
+ * would require a full fence.
+ *
+ * ck_epoch_call will dispatch to the latest epoch snapshot that was observed.
+ * There are cases where it will fail to reclaim as early as it could. If this
+ * becomes a problem, we could actually use a heap for epoch buckets but that
+ * is far from ideal too.
+ */
+bool
+ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred)
+{
+ bool active;
+ unsigned int epoch;
+ struct ck_epoch_record *cr = NULL;
+ struct ck_epoch *global = record->global;
+
+ epoch = ck_pr_load_uint(&global->epoch);
+
+ /* Serialize epoch snapshots with respect to global epoch. */
+ ck_pr_fence_memory();
+ cr = ck_epoch_scan(global, cr, epoch, &active);
+ if (cr != NULL) {
+ record->epoch = epoch;
+ return false;
+ }
+
+ /* We are at a grace period if all threads are inactive. */
+ if (active == false) {
+ record->epoch = epoch;
+ for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
+ ck_epoch_dispatch(record, epoch, deferred);
+
+ return true;
+ }
+
+ /* If an active thread exists, rely on epoch observation. */
+ (void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1);
+
+ ck_epoch_dispatch(record, epoch + 1, deferred);
+ return true;
+}
+
+bool
+ck_epoch_poll(struct ck_epoch_record *record)
+{
+
+ return ck_epoch_poll_deferred(record, NULL);
+}