summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--freebsd/sys/dev/nvme/nvme.c385
-rw-r--r--freebsd/sys/dev/nvme/nvme_ctrlr.c1402
-rw-r--r--freebsd/sys/dev/nvme/nvme_ctrlr_cmd.c329
-rw-r--r--freebsd/sys/dev/nvme/nvme_ns.c627
-rw-r--r--freebsd/sys/dev/nvme/nvme_ns_cmd.c208
-rw-r--r--freebsd/sys/dev/nvme/nvme_pci.c358
-rw-r--r--freebsd/sys/dev/nvme/nvme_private.h562
-rw-r--r--freebsd/sys/dev/nvme/nvme_qpair.c1266
-rw-r--r--freebsd/sys/dev/nvme/nvme_sysctl.c368
-rw-r--r--freebsd/sys/dev/nvme/nvme_util.c65
10 files changed, 5570 insertions, 0 deletions
diff --git a/freebsd/sys/dev/nvme/nvme.c b/freebsd/sys/dev/nvme/nvme.c
new file mode 100644
index 00000000..a9ac0e77
--- /dev/null
+++ b/freebsd/sys/dev/nvme/nvme.c
@@ -0,0 +1,385 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2012-2014 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/module.h>
+
+#include <vm/uma.h>
+
+#include "nvme_private.h"
+
+struct nvme_consumer {
+ uint32_t id;
+ nvme_cons_ns_fn_t ns_fn;
+ nvme_cons_ctrlr_fn_t ctrlr_fn;
+ nvme_cons_async_fn_t async_fn;
+ nvme_cons_fail_fn_t fail_fn;
+};
+
+struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS];
+#define INVALID_CONSUMER_ID 0xFFFF
+
+uma_zone_t nvme_request_zone;
+int32_t nvme_retry_count;
+
+
+MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
+
+devclass_t nvme_devclass;
+
+static void
+nvme_init(void)
+{
+ uint32_t i;
+
+ nvme_request_zone = uma_zcreate("nvme_request",
+ sizeof(struct nvme_request), NULL, NULL, NULL, NULL, 0, 0);
+
+ for (i = 0; i < NVME_MAX_CONSUMERS; i++)
+ nvme_consumer[i].id = INVALID_CONSUMER_ID;
+}
+
+SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
+
+static void
+nvme_uninit(void)
+{
+ uma_zdestroy(nvme_request_zone);
+}
+
+SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
+
+int
+nvme_shutdown(device_t dev)
+{
+ struct nvme_controller *ctrlr;
+
+ ctrlr = DEVICE2SOFTC(dev);
+ nvme_ctrlr_shutdown(ctrlr);
+
+ return (0);
+}
+
+void
+nvme_dump_command(struct nvme_command *cmd)
+{
+
+ printf(
+"opc:%x f:%x cid:%x nsid:%x r2:%x r3:%x mptr:%jx prp1:%jx prp2:%jx cdw:%x %x %x %x %x %x\n",
+ cmd->opc, cmd->fuse, cmd->cid, le32toh(cmd->nsid),
+ cmd->rsvd2, cmd->rsvd3,
+ (uintmax_t)le64toh(cmd->mptr), (uintmax_t)le64toh(cmd->prp1), (uintmax_t)le64toh(cmd->prp2),
+ le32toh(cmd->cdw10), le32toh(cmd->cdw11), le32toh(cmd->cdw12),
+ le32toh(cmd->cdw13), le32toh(cmd->cdw14), le32toh(cmd->cdw15));
+}
+
+void
+nvme_dump_completion(struct nvme_completion *cpl)
+{
+ uint8_t p, sc, sct, m, dnr;
+ uint16_t status;
+
+ status = le16toh(cpl->status);
+
+ p = NVME_STATUS_GET_P(status);
+ sc = NVME_STATUS_GET_SC(status);
+ sct = NVME_STATUS_GET_SCT(status);
+ m = NVME_STATUS_GET_M(status);
+ dnr = NVME_STATUS_GET_DNR(status);
+
+ printf("cdw0:%08x sqhd:%04x sqid:%04x "
+ "cid:%04x p:%x sc:%02x sct:%x m:%x dnr:%x\n",
+ le32toh(cpl->cdw0), le16toh(cpl->sqhd), le16toh(cpl->sqid),
+ cpl->cid, p, sc, sct, m, dnr);
+}
+
+int
+nvme_attach(device_t dev)
+{
+ struct nvme_controller *ctrlr = DEVICE2SOFTC(dev);
+ int status;
+
+ status = nvme_ctrlr_construct(ctrlr, dev);
+
+ if (status != 0) {
+ nvme_ctrlr_destruct(ctrlr, dev);
+ return (status);
+ }
+
+ /*
+ * Reset controller twice to ensure we do a transition from cc.en==1 to
+ * cc.en==0. This is because we don't really know what status the
+ * controller was left in when boot handed off to OS. Linux doesn't do
+ * this, however. If we adopt that policy, see also nvme_ctrlr_resume().
+ */
+ status = nvme_ctrlr_hw_reset(ctrlr);
+ if (status != 0) {
+ nvme_ctrlr_destruct(ctrlr, dev);
+ return (status);
+ }
+
+ status = nvme_ctrlr_hw_reset(ctrlr);
+ if (status != 0) {
+ nvme_ctrlr_destruct(ctrlr, dev);
+ return (status);
+ }
+
+ ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
+ ctrlr->config_hook.ich_arg = ctrlr;
+
+ config_intrhook_establish(&ctrlr->config_hook);
+
+ return (0);
+}
+
+int
+nvme_detach (device_t dev)
+{
+ struct nvme_controller *ctrlr = DEVICE2SOFTC(dev);
+
+ nvme_ctrlr_destruct(ctrlr, dev);
+ return (0);
+}
+
+static void
+nvme_notify(struct nvme_consumer *cons,
+ struct nvme_controller *ctrlr)
+{
+ struct nvme_namespace *ns;
+ void *ctrlr_cookie;
+ int cmpset, ns_idx;
+
+ /*
+ * The consumer may register itself after the nvme devices
+ * have registered with the kernel, but before the
+ * driver has completed initialization. In that case,
+ * return here, and when initialization completes, the
+ * controller will make sure the consumer gets notified.
+ */
+ if (!ctrlr->is_initialized)
+ return;
+
+ cmpset = atomic_cmpset_32(&ctrlr->notification_sent, 0, 1);
+ if (cmpset == 0)
+ return;
+
+ if (cons->ctrlr_fn != NULL)
+ ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr);
+ else
+ ctrlr_cookie = (void *)(uintptr_t)0xdeadc0dedeadc0de;
+ ctrlr->cons_cookie[cons->id] = ctrlr_cookie;
+
+ /* ctrlr_fn has failed. Nothing to notify here any more. */
+ if (ctrlr_cookie == NULL)
+ return;
+
+ if (ctrlr->is_failed) {
+ ctrlr->cons_cookie[cons->id] = NULL;
+ if (cons->fail_fn != NULL)
+ (*cons->fail_fn)(ctrlr_cookie);
+ /*
+ * Do not notify consumers about the namespaces of a
+ * failed controller.
+ */
+ return;
+ }
+ for (ns_idx = 0; ns_idx < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); ns_idx++) {
+ ns = &ctrlr->ns[ns_idx];
+ if (ns->data.nsze == 0)
+ continue;
+ if (cons->ns_fn != NULL)
+ ns->cons_cookie[cons->id] =
+ (*cons->ns_fn)(ns, ctrlr_cookie);
+ }
+}
+
+void
+nvme_notify_new_controller(struct nvme_controller *ctrlr)
+{
+ int i;
+
+ for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
+ if (nvme_consumer[i].id != INVALID_CONSUMER_ID) {
+ nvme_notify(&nvme_consumer[i], ctrlr);
+ }
+ }
+}
+
+static void
+nvme_notify_new_consumer(struct nvme_consumer *cons)
+{
+ device_t *devlist;
+ struct nvme_controller *ctrlr;
+ int dev_idx, devcount;
+
+ if (devclass_get_devices(nvme_devclass, &devlist, &devcount))
+ return;
+
+ for (dev_idx = 0; dev_idx < devcount; dev_idx++) {
+ ctrlr = DEVICE2SOFTC(devlist[dev_idx]);
+ nvme_notify(cons, ctrlr);
+ }
+
+ free(devlist, M_TEMP);
+}
+
+void
+nvme_notify_async_consumers(struct nvme_controller *ctrlr,
+ const struct nvme_completion *async_cpl,
+ uint32_t log_page_id, void *log_page_buffer,
+ uint32_t log_page_size)
+{
+ struct nvme_consumer *cons;
+ void *ctrlr_cookie;
+ uint32_t i;
+
+ for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
+ cons = &nvme_consumer[i];
+ if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL &&
+ (ctrlr_cookie = ctrlr->cons_cookie[i]) != NULL) {
+ (*cons->async_fn)(ctrlr_cookie, async_cpl,
+ log_page_id, log_page_buffer, log_page_size);
+ }
+ }
+}
+
+void
+nvme_notify_fail_consumers(struct nvme_controller *ctrlr)
+{
+ struct nvme_consumer *cons;
+ void *ctrlr_cookie;
+ uint32_t i;
+
+ /*
+ * This controller failed during initialization (i.e. IDENTIFY
+ * command failed or timed out). Do not notify any nvme
+ * consumers of the failure here, since the consumer does not
+ * even know about the controller yet.
+ */
+ if (!ctrlr->is_initialized)
+ return;
+
+ for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
+ cons = &nvme_consumer[i];
+ if (cons->id != INVALID_CONSUMER_ID &&
+ (ctrlr_cookie = ctrlr->cons_cookie[i]) != NULL) {
+ ctrlr->cons_cookie[i] = NULL;
+ if (cons->fail_fn != NULL)
+ cons->fail_fn(ctrlr_cookie);
+ }
+ }
+}
+
+void
+nvme_notify_ns(struct nvme_controller *ctrlr, int nsid)
+{
+ struct nvme_consumer *cons;
+ struct nvme_namespace *ns = &ctrlr->ns[nsid - 1];
+ void *ctrlr_cookie;
+ uint32_t i;
+
+ if (!ctrlr->is_initialized)
+ return;
+
+ for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
+ cons = &nvme_consumer[i];
+ if (cons->id != INVALID_CONSUMER_ID && cons->ns_fn != NULL &&
+ (ctrlr_cookie = ctrlr->cons_cookie[i]) != NULL)
+ ns->cons_cookie[i] = (*cons->ns_fn)(ns, ctrlr_cookie);
+ }
+}
+
+struct nvme_consumer *
+nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
+ nvme_cons_async_fn_t async_fn,
+ nvme_cons_fail_fn_t fail_fn)
+{
+ int i;
+
+ /*
+ * TODO: add locking around consumer registration.
+ */
+ for (i = 0; i < NVME_MAX_CONSUMERS; i++)
+ if (nvme_consumer[i].id == INVALID_CONSUMER_ID) {
+ nvme_consumer[i].id = i;
+ nvme_consumer[i].ns_fn = ns_fn;
+ nvme_consumer[i].ctrlr_fn = ctrlr_fn;
+ nvme_consumer[i].async_fn = async_fn;
+ nvme_consumer[i].fail_fn = fail_fn;
+
+ nvme_notify_new_consumer(&nvme_consumer[i]);
+ return (&nvme_consumer[i]);
+ }
+
+ printf("nvme(4): consumer not registered - no slots available\n");
+ return (NULL);
+}
+
+void
+nvme_unregister_consumer(struct nvme_consumer *consumer)
+{
+
+ consumer->id = INVALID_CONSUMER_ID;
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_completion_poll_status *status = arg;
+
+ /*
+ * Copy status into the argument passed by the caller, so that
+ * the caller can check the status to determine if the
+ * the request passed or failed.
+ */
+ memcpy(&status->cpl, cpl, sizeof(*cpl));
+ atomic_store_rel_int(&status->done, 1);
+}
+
+static int
+nvme_modevent(module_t mod __unused, int type __unused, void *argp __unused)
+{
+ return (0);
+}
+
+static moduledata_t nvme_mod = {
+ "nvme",
+ nvme_modevent,
+ 0
+};
+
+DECLARE_MODULE(nvme, nvme_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(nvme, 1);
+MODULE_DEPEND(nvme, cam, 1, 1, 1);
diff --git a/freebsd/sys/dev/nvme/nvme_ctrlr.c b/freebsd/sys/dev/nvme/nvme_ctrlr.c
new file mode 100644
index 00000000..e05cd961
--- /dev/null
+++ b/freebsd/sys/dev/nvme/nvme_ctrlr.c
@@ -0,0 +1,1402 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2012-2016 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/local/opt_cam.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+#include <sys/uio.h>
+#include <sys/endian.h>
+
+#include "nvme_private.h"
+
+#define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */
+
+static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
+ struct nvme_async_event_request *aer);
+
+static int
+nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
+{
+ struct nvme_qpair *qpair;
+ uint32_t num_entries;
+ int error;
+
+ qpair = &ctrlr->adminq;
+
+ num_entries = NVME_ADMIN_ENTRIES;
+ TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
+ /*
+ * If admin_entries was overridden to an invalid value, revert it
+ * back to our default value.
+ */
+ if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
+ num_entries > NVME_MAX_ADMIN_ENTRIES) {
+ nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
+ "specified\n", num_entries);
+ num_entries = NVME_ADMIN_ENTRIES;
+ }
+
+ /*
+ * The admin queue's max xfer size is treated differently than the
+ * max I/O xfer size. 16KB is sufficient here - maybe even less?
+ */
+ error = nvme_qpair_construct(qpair,
+ 0, /* qpair ID */
+ 0, /* vector */
+ num_entries,
+ NVME_ADMIN_TRACKERS,
+ ctrlr);
+ return (error);
+}
+
+static int
+nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
+{
+ struct nvme_qpair *qpair;
+ uint32_t cap_lo;
+ uint16_t mqes;
+ int i, error, num_entries, num_trackers;
+
+ num_entries = NVME_IO_ENTRIES;
+ TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
+
+ /*
+ * NVMe spec sets a hard limit of 64K max entries, but
+ * devices may specify a smaller limit, so we need to check
+ * the MQES field in the capabilities register.
+ */
+ cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
+ mqes = NVME_CAP_LO_MQES(cap_lo);
+ num_entries = min(num_entries, mqes + 1);
+
+ num_trackers = NVME_IO_TRACKERS;
+ TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
+
+ num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
+ num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
+ /*
+ * No need to have more trackers than entries in the submit queue.
+ * Note also that for a queue size of N, we can only have (N-1)
+ * commands outstanding, hence the "-1" here.
+ */
+ num_trackers = min(num_trackers, (num_entries-1));
+
+ /*
+ * Our best estimate for the maximum number of I/Os that we should
+ * normally have in flight at one time. This should be viewed as a hint,
+ * not a hard limit and will need to be revisited when the upper layers
+ * of the storage system grows multi-queue support.
+ */
+ ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
+
+ /*
+ * This was calculated previously when setting up interrupts, but
+ * a controller could theoretically support fewer I/O queues than
+ * MSI-X vectors. So calculate again here just to be safe.
+ */
+ ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
+
+ ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
+ M_NVME, M_ZERO | M_WAITOK);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ qpair = &ctrlr->ioq[i];
+
+ /*
+ * Admin queue has ID=0. IO queues start at ID=1 -
+ * hence the 'i+1' here.
+ *
+ * For I/O queues, use the controller-wide max_xfer_size
+ * calculated in nvme_attach().
+ */
+ error = nvme_qpair_construct(qpair,
+ i+1, /* qpair ID */
+ ctrlr->msix_enabled ? i+1 : 0, /* vector */
+ num_entries,
+ num_trackers,
+ ctrlr);
+ if (error)
+ return (error);
+
+ /*
+ * Do not bother binding interrupts if we only have one I/O
+ * interrupt thread for this controller.
+ */
+ if (ctrlr->num_io_queues > 1)
+ bus_bind_intr(ctrlr->dev, qpair->res,
+ i * ctrlr->num_cpus_per_ioq);
+ }
+
+ return (0);
+}
+
+static void
+nvme_ctrlr_fail(struct nvme_controller *ctrlr)
+{
+ int i;
+
+ ctrlr->is_failed = TRUE;
+ nvme_admin_qpair_disable(&ctrlr->adminq);
+ nvme_qpair_fail(&ctrlr->adminq);
+ if (ctrlr->ioq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ nvme_io_qpair_disable(&ctrlr->ioq[i]);
+ nvme_qpair_fail(&ctrlr->ioq[i]);
+ }
+ }
+ nvme_notify_fail_consumers(ctrlr);
+}
+
+void
+nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req)
+{
+
+ mtx_lock(&ctrlr->lock);
+ STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
+ mtx_unlock(&ctrlr->lock);
+ taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
+}
+
+static void
+nvme_ctrlr_fail_req_task(void *arg, int pending)
+{
+ struct nvme_controller *ctrlr = arg;
+ struct nvme_request *req;
+
+ mtx_lock(&ctrlr->lock);
+ while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) {
+ STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
+ mtx_unlock(&ctrlr->lock);
+ nvme_qpair_manual_complete_request(req->qpair, req,
+ NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
+ mtx_lock(&ctrlr->lock);
+ }
+ mtx_unlock(&ctrlr->lock);
+}
+
+static int
+nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
+{
+ int ms_waited;
+ uint32_t csts;
+
+ ms_waited = 0;
+ while (1) {
+ csts = nvme_mmio_read_4(ctrlr, csts);
+ if (csts == 0xffffffff) /* Hot unplug. */
+ return (ENXIO);
+ if (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK)
+ == desired_val)
+ break;
+ if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
+ nvme_printf(ctrlr, "controller ready did not become %d "
+ "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
+ return (ENXIO);
+ }
+ DELAY(1000);
+ }
+
+ return (0);
+}
+
+static int
+nvme_ctrlr_disable(struct nvme_controller *ctrlr)
+{
+ uint32_t cc;
+ uint32_t csts;
+ uint8_t en, rdy;
+ int err;
+
+ cc = nvme_mmio_read_4(ctrlr, cc);
+ csts = nvme_mmio_read_4(ctrlr, csts);
+
+ en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
+ rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
+
+ /*
+ * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
+ * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
+ * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
+ * isn't the desired value. Short circuit if we're already disabled.
+ */
+ if (en == 1) {
+ if (rdy == 0) {
+ /* EN == 1, wait for RDY == 1 or fail */
+ err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
+ if (err != 0)
+ return (err);
+ }
+ } else {
+ /* EN == 0 already wait for RDY == 0 */
+ if (rdy == 0)
+ return (0);
+ else
+ return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
+ }
+
+ cc &= ~NVME_CC_REG_EN_MASK;
+ nvme_mmio_write_4(ctrlr, cc, cc);
+ /*
+ * Some drives have issues with accessing the mmio after we
+ * disable, so delay for a bit after we write the bit to
+ * cope with these issues.
+ */
+ if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
+ pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000);
+ return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
+}
+
+static int
+nvme_ctrlr_enable(struct nvme_controller *ctrlr)
+{
+ uint32_t cc;
+ uint32_t csts;
+ uint32_t aqa;
+ uint32_t qsize;
+ uint8_t en, rdy;
+ int err;
+
+ cc = nvme_mmio_read_4(ctrlr, cc);
+ csts = nvme_mmio_read_4(ctrlr, csts);
+
+ en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
+ rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
+
+ /*
+ * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
+ */
+ if (en == 1) {
+ if (rdy == 1)
+ return (0);
+ else
+ return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
+ } else {
+ /* EN == 0 already wait for RDY == 0 or fail */
+ err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
+ if (err != 0)
+ return (err);
+ }
+
+ nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
+ DELAY(5000);
+ nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
+ DELAY(5000);
+
+ /* acqs and asqs are 0-based. */
+ qsize = ctrlr->adminq.num_entries - 1;
+
+ aqa = 0;
+ aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
+ aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
+ nvme_mmio_write_4(ctrlr, aqa, aqa);
+ DELAY(5000);
+
+ /* Initialization values for CC */
+ cc = 0;
+ cc |= 1 << NVME_CC_REG_EN_SHIFT;
+ cc |= 0 << NVME_CC_REG_CSS_SHIFT;
+ cc |= 0 << NVME_CC_REG_AMS_SHIFT;
+ cc |= 0 << NVME_CC_REG_SHN_SHIFT;
+ cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
+ cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
+
+ /* This evaluates to 0, which is according to spec. */
+ cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT;
+
+ nvme_mmio_write_4(ctrlr, cc, cc);
+
+ return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
+}
+
+static void
+nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr)
+{
+ int i;
+
+ nvme_admin_qpair_disable(&ctrlr->adminq);
+ /*
+ * I/O queues are not allocated before the initial HW
+ * reset, so do not try to disable them. Use is_initialized
+ * to determine if this is the initial HW reset.
+ */
+ if (ctrlr->is_initialized) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_io_qpair_disable(&ctrlr->ioq[i]);
+ }
+}
+
+int
+nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
+{
+ int err;
+
+ nvme_ctrlr_disable_qpairs(ctrlr);
+
+ DELAY(100*1000);
+
+ err = nvme_ctrlr_disable(ctrlr);
+ if (err != 0)
+ return err;
+ return (nvme_ctrlr_enable(ctrlr));
+}
+
+void
+nvme_ctrlr_reset(struct nvme_controller *ctrlr)
+{
+ int cmpset;
+
+ cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
+
+ if (cmpset == 0 || ctrlr->is_failed)
+ /*
+ * Controller is already resetting or has failed. Return
+ * immediately since there is no need to kick off another
+ * reset in these cases.
+ */
+ return;
+
+ taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
+}
+
+static int
+nvme_ctrlr_identify(struct nvme_controller *ctrlr)
+{
+ struct nvme_completion_poll_status status;
+
+ status.done = 0;
+ nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
+ nvme_completion_poll_cb, &status);
+ nvme_completion_poll(&status);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
+ return (ENXIO);
+ }
+
+ /* Convert data to host endian */
+ nvme_controller_data_swapbytes(&ctrlr->cdata);
+
+ /*
+ * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
+ * controller supports.
+ */
+ if (ctrlr->cdata.mdts > 0)
+ ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
+ ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
+
+ return (0);
+}
+
+static int
+nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
+{
+ struct nvme_completion_poll_status status;
+ int cq_allocated, sq_allocated;
+
+ status.done = 0;
+ nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
+ nvme_completion_poll_cb, &status);
+ nvme_completion_poll(&status);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
+ return (ENXIO);
+ }
+
+ /*
+ * Data in cdw0 is 0-based.
+ * Lower 16-bits indicate number of submission queues allocated.
+ * Upper 16-bits indicate number of completion queues allocated.
+ */
+ sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
+ cq_allocated = (status.cpl.cdw0 >> 16) + 1;
+
+ /*
+ * Controller may allocate more queues than we requested,
+ * so use the minimum of the number requested and what was
+ * actually allocated.
+ */
+ ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
+ ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
+
+ return (0);
+}
+
+static int
+nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
+{
+ struct nvme_completion_poll_status status;
+ struct nvme_qpair *qpair;
+ int i;
+
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ qpair = &ctrlr->ioq[i];
+
+ status.done = 0;
+ nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
+ nvme_completion_poll_cb, &status);
+ nvme_completion_poll(&status);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
+ return (ENXIO);
+ }
+
+ status.done = 0;
+ nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
+ nvme_completion_poll_cb, &status);
+ nvme_completion_poll(&status);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
+ return (ENXIO);
+ }
+ }
+
+ return (0);
+}
+
+static int
+nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr)
+{
+ struct nvme_completion_poll_status status;
+ struct nvme_qpair *qpair;
+
+ for (int i = 0; i < ctrlr->num_io_queues; i++) {
+ qpair = &ctrlr->ioq[i];
+
+ status.done = 0;
+ nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
+ nvme_completion_poll_cb, &status);
+ nvme_completion_poll(&status);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
+ return (ENXIO);
+ }
+
+ status.done = 0;
+ nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
+ nvme_completion_poll_cb, &status);
+ nvme_completion_poll(&status);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
+ return (ENXIO);
+ }
+ }
+
+ return (0);
+}
+
+static int
+nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
+{
+ struct nvme_namespace *ns;
+ uint32_t i;
+
+ for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
+ ns = &ctrlr->ns[i];
+ nvme_ns_construct(ns, i+1, ctrlr);
+ }
+
+ return (0);
+}
+
+static boolean_t
+is_log_page_id_valid(uint8_t page_id)
+{
+
+ switch (page_id) {
+ case NVME_LOG_ERROR:
+ case NVME_LOG_HEALTH_INFORMATION:
+ case NVME_LOG_FIRMWARE_SLOT:
+ case NVME_LOG_CHANGED_NAMESPACE:
+ case NVME_LOG_COMMAND_EFFECT:
+ case NVME_LOG_RES_NOTIFICATION:
+ case NVME_LOG_SANITIZE_STATUS:
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+static uint32_t
+nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
+{
+ uint32_t log_page_size;
+
+ switch (page_id) {
+ case NVME_LOG_ERROR:
+ log_page_size = min(
+ sizeof(struct nvme_error_information_entry) *
+ (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
+ break;
+ case NVME_LOG_HEALTH_INFORMATION:
+ log_page_size = sizeof(struct nvme_health_information_page);
+ break;
+ case NVME_LOG_FIRMWARE_SLOT:
+ log_page_size = sizeof(struct nvme_firmware_page);
+ break;
+ case NVME_LOG_CHANGED_NAMESPACE:
+ log_page_size = sizeof(struct nvme_ns_list);
+ break;
+ case NVME_LOG_COMMAND_EFFECT:
+ log_page_size = sizeof(struct nvme_command_effects_page);
+ break;
+ case NVME_LOG_RES_NOTIFICATION:
+ log_page_size = sizeof(struct nvme_res_notification_page);
+ break;
+ case NVME_LOG_SANITIZE_STATUS:
+ log_page_size = sizeof(struct nvme_sanitize_status_page);
+ break;
+ default:
+ log_page_size = 0;
+ break;
+ }
+
+ return (log_page_size);
+}
+
+static void
+nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
+ uint8_t state)
+{
+
+ if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
+ nvme_printf(ctrlr, "available spare space below threshold\n");
+
+ if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
+ nvme_printf(ctrlr, "temperature above threshold\n");
+
+ if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
+ nvme_printf(ctrlr, "device reliability degraded\n");
+
+ if (state & NVME_CRIT_WARN_ST_READ_ONLY)
+ nvme_printf(ctrlr, "media placed in read only mode\n");
+
+ if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
+ nvme_printf(ctrlr, "volatile memory backup device failed\n");
+
+ if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
+ nvme_printf(ctrlr,
+ "unknown critical warning(s): state = 0x%02x\n", state);
+}
+
+static void
+nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_async_event_request *aer = arg;
+ struct nvme_health_information_page *health_info;
+ struct nvme_ns_list *nsl;
+ struct nvme_error_information_entry *err;
+ int i;
+
+ /*
+ * If the log page fetch for some reason completed with an error,
+ * don't pass log page data to the consumers. In practice, this case
+ * should never happen.
+ */
+ if (nvme_completion_is_error(cpl))
+ nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
+ aer->log_page_id, NULL, 0);
+ else {
+ /* Convert data to host endian */
+ switch (aer->log_page_id) {
+ case NVME_LOG_ERROR:
+ err = (struct nvme_error_information_entry *)aer->log_page_buffer;
+ for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
+ nvme_error_information_entry_swapbytes(err++);
+ break;
+ case NVME_LOG_HEALTH_INFORMATION:
+ nvme_health_information_page_swapbytes(
+ (struct nvme_health_information_page *)aer->log_page_buffer);
+ break;
+ case NVME_LOG_FIRMWARE_SLOT:
+ nvme_firmware_page_swapbytes(
+ (struct nvme_firmware_page *)aer->log_page_buffer);
+ break;
+ case NVME_LOG_CHANGED_NAMESPACE:
+ nvme_ns_list_swapbytes(
+ (struct nvme_ns_list *)aer->log_page_buffer);
+ break;
+ case NVME_LOG_COMMAND_EFFECT:
+ nvme_command_effects_page_swapbytes(
+ (struct nvme_command_effects_page *)aer->log_page_buffer);
+ break;
+ case NVME_LOG_RES_NOTIFICATION:
+ nvme_res_notification_page_swapbytes(
+ (struct nvme_res_notification_page *)aer->log_page_buffer);
+ break;
+ case NVME_LOG_SANITIZE_STATUS:
+ nvme_sanitize_status_page_swapbytes(
+ (struct nvme_sanitize_status_page *)aer->log_page_buffer);
+ break;
+ case INTEL_LOG_TEMP_STATS:
+ intel_log_temp_stats_swapbytes(
+ (struct intel_log_temp_stats *)aer->log_page_buffer);
+ break;
+ default:
+ break;
+ }
+
+ if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
+ health_info = (struct nvme_health_information_page *)
+ aer->log_page_buffer;
+ nvme_ctrlr_log_critical_warnings(aer->ctrlr,
+ health_info->critical_warning);
+ /*
+ * Critical warnings reported through the
+ * SMART/health log page are persistent, so
+ * clear the associated bits in the async event
+ * config so that we do not receive repeated
+ * notifications for the same event.
+ */
+ aer->ctrlr->async_event_config &=
+ ~health_info->critical_warning;
+ nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
+ aer->ctrlr->async_event_config, NULL, NULL);
+ } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
+ !nvme_use_nvd) {
+ nsl = (struct nvme_ns_list *)aer->log_page_buffer;
+ for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
+ if (nsl->ns[i] > NVME_MAX_NAMESPACES)
+ break;
+ nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
+ }
+ }
+
+
+ /*
+ * Pass the cpl data from the original async event completion,
+ * not the log page fetch.
+ */
+ nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
+ aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
+ }
+
+ /*
+ * Repost another asynchronous event request to replace the one
+ * that just completed.
+ */
+ nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
+}
+
+static void
+nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_async_event_request *aer = arg;
+
+ if (nvme_completion_is_error(cpl)) {
+ /*
+ * Do not retry failed async event requests. This avoids
+ * infinite loops where a new async event request is submitted
+ * to replace the one just failed, only to fail again and
+ * perpetuate the loop.
+ */
+ return;
+ }
+
+ /* Associated log page is in bits 23:16 of completion entry dw0. */
+ aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
+
+ nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
+ " page 0x%02x)\n", (cpl->cdw0 & 0x07), (cpl->cdw0 & 0xFF00) >> 8,
+ aer->log_page_id);
+
+ if (is_log_page_id_valid(aer->log_page_id)) {
+ aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
+ aer->log_page_id);
+ memcpy(&aer->cpl, cpl, sizeof(*cpl));
+ nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
+ NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
+ aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
+ aer);
+ /* Wait to notify consumers until after log page is fetched. */
+ } else {
+ nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
+ NULL, 0);
+
+ /*
+ * Repost another asynchronous event request to replace the one
+ * that just completed.
+ */
+ nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
+ }
+}
+
+static void
+nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
+ struct nvme_async_event_request *aer)
+{
+ struct nvme_request *req;
+
+ aer->ctrlr = ctrlr;
+ req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
+ aer->req = req;
+
+ /*
+ * Disable timeout here, since asynchronous event requests should by
+ * nature never be timed out.
+ */
+ req->timeout = FALSE;
+ req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+static void
+nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
+{
+ struct nvme_completion_poll_status status;
+ struct nvme_async_event_request *aer;
+ uint32_t i;
+
+ ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
+ NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
+ NVME_CRIT_WARN_ST_READ_ONLY |
+ NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
+ if (ctrlr->cdata.ver >= NVME_REV(1, 2))
+ ctrlr->async_event_config |= 0x300;
+
+ status.done = 0;
+ nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
+ 0, NULL, 0, nvme_completion_poll_cb, &status);
+ nvme_completion_poll(&status);
+ if (nvme_completion_is_error(&status.cpl) ||
+ (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
+ (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
+ nvme_printf(ctrlr, "temperature threshold not supported\n");
+ } else
+ ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
+
+ nvme_ctrlr_cmd_set_async_event_config(ctrlr,
+ ctrlr->async_event_config, NULL, NULL);
+
+ /* aerl is a zero-based value, so we need to add 1 here. */
+ ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
+
+ for (i = 0; i < ctrlr->num_aers; i++) {
+ aer = &ctrlr->aer[i];
+ nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
+ }
+}
+
+static void
+nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
+{
+
+ ctrlr->int_coal_time = 0;
+ TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
+ &ctrlr->int_coal_time);
+
+ ctrlr->int_coal_threshold = 0;
+ TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
+ &ctrlr->int_coal_threshold);
+
+ nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
+ ctrlr->int_coal_threshold, NULL, NULL);
+}
+
+static void
+nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
+{
+ struct nvme_controller *ctrlr = ctrlr_arg;
+ uint32_t old_num_io_queues;
+ int i;
+
+ /*
+ * Only reset adminq here when we are restarting the
+ * controller after a reset. During initialization,
+ * we have already submitted admin commands to get
+ * the number of I/O queues supported, so cannot reset
+ * the adminq again here.
+ */
+ if (resetting)
+ nvme_qpair_reset(&ctrlr->adminq);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_qpair_reset(&ctrlr->ioq[i]);
+
+ nvme_admin_qpair_enable(&ctrlr->adminq);
+
+ if (nvme_ctrlr_identify(ctrlr) != 0) {
+ nvme_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /*
+ * The number of qpairs are determined during controller initialization,
+ * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
+ * HW limit. We call SET_FEATURES again here so that it gets called
+ * after any reset for controllers that depend on the driver to
+ * explicit specify how many queues it will use. This value should
+ * never change between resets, so panic if somehow that does happen.
+ */
+ if (resetting) {
+ old_num_io_queues = ctrlr->num_io_queues;
+ if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
+ nvme_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (old_num_io_queues != ctrlr->num_io_queues) {
+ panic("num_io_queues changed from %u to %u",
+ old_num_io_queues, ctrlr->num_io_queues);
+ }
+ }
+
+ if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
+ nvme_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
+ nvme_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ nvme_ctrlr_configure_aer(ctrlr);
+ nvme_ctrlr_configure_int_coalescing(ctrlr);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_io_qpair_enable(&ctrlr->ioq[i]);
+}
+
+void
+nvme_ctrlr_start_config_hook(void *arg)
+{
+ struct nvme_controller *ctrlr = arg;
+
+ nvme_qpair_reset(&ctrlr->adminq);
+ nvme_admin_qpair_enable(&ctrlr->adminq);
+
+ if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
+ nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
+ nvme_ctrlr_start(ctrlr, false);
+ else
+ nvme_ctrlr_fail(ctrlr);
+
+ nvme_sysctl_initialize_ctrlr(ctrlr);
+ config_intrhook_disestablish(&ctrlr->config_hook);
+
+ ctrlr->is_initialized = 1;
+ nvme_notify_new_controller(ctrlr);
+}
+
+static void
+nvme_ctrlr_reset_task(void *arg, int pending)
+{
+ struct nvme_controller *ctrlr = arg;
+ int status;
+
+ nvme_printf(ctrlr, "resetting controller\n");
+ status = nvme_ctrlr_hw_reset(ctrlr);
+ /*
+ * Use pause instead of DELAY, so that we yield to any nvme interrupt
+ * handlers on this CPU that were blocked on a qpair lock. We want
+ * all nvme interrupts completed before proceeding with restarting the
+ * controller.
+ *
+ * XXX - any way to guarantee the interrupt handlers have quiesced?
+ */
+ pause("nvmereset", hz / 10);
+ if (status == 0)
+ nvme_ctrlr_start(ctrlr, true);
+ else
+ nvme_ctrlr_fail(ctrlr);
+
+ atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
+}
+
+/*
+ * Poll all the queues enabled on the device for completion.
+ */
+void
+nvme_ctrlr_poll(struct nvme_controller *ctrlr)
+{
+ int i;
+
+ nvme_qpair_process_completions(&ctrlr->adminq);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ if (ctrlr->ioq && ctrlr->ioq[i].cpl)
+ nvme_qpair_process_completions(&ctrlr->ioq[i]);
+}
+
+/*
+ * Poll the single-vector interrupt case: num_io_queues will be 1 and
+ * there's only a single vector. While we're polling, we mask further
+ * interrupts in the controller.
+ */
+void
+nvme_ctrlr_intx_handler(void *arg)
+{
+ struct nvme_controller *ctrlr = arg;
+
+ nvme_mmio_write_4(ctrlr, intms, 1);
+ nvme_ctrlr_poll(ctrlr);
+ nvme_mmio_write_4(ctrlr, intmc, 1);
+}
+
+static void
+nvme_pt_done(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_pt_command *pt = arg;
+ struct mtx *mtx = pt->driver_lock;
+ uint16_t status;
+
+ bzero(&pt->cpl, sizeof(pt->cpl));
+ pt->cpl.cdw0 = cpl->cdw0;
+
+ status = cpl->status;
+ status &= ~NVME_STATUS_P_MASK;
+ pt->cpl.status = status;
+
+ mtx_lock(mtx);
+ pt->driver_lock = NULL;
+ wakeup(pt);
+ mtx_unlock(mtx);
+}
+
+int
+nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
+ struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
+ int is_admin_cmd)
+{
+ struct nvme_request *req;
+ struct mtx *mtx;
+ struct buf *buf = NULL;
+ int ret = 0;
+ vm_offset_t addr, end;
+
+ if (pt->len > 0) {
+ /*
+ * vmapbuf calls vm_fault_quick_hold_pages which only maps full
+ * pages. Ensure this request has fewer than MAXPHYS bytes when
+ * extended to full pages.
+ */
+ addr = (vm_offset_t)pt->buf;
+ end = round_page(addr + pt->len);
+ addr = trunc_page(addr);
+ if (end - addr > MAXPHYS)
+ return EIO;
+
+ if (pt->len > ctrlr->max_xfer_size) {
+ nvme_printf(ctrlr, "pt->len (%d) "
+ "exceeds max_xfer_size (%d)\n", pt->len,
+ ctrlr->max_xfer_size);
+ return EIO;
+ }
+ if (is_user_buffer) {
+ /*
+ * Ensure the user buffer is wired for the duration of
+ * this pass-through command.
+ */
+ PHOLD(curproc);
+ buf = getpbuf(NULL);
+ buf->b_data = pt->buf;
+ buf->b_bufsize = pt->len;
+ buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
+ if (vmapbuf(buf, 1) < 0) {
+ ret = EFAULT;
+ goto err;
+ }
+ req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
+ nvme_pt_done, pt);
+ } else
+ req = nvme_allocate_request_vaddr(pt->buf, pt->len,
+ nvme_pt_done, pt);
+ } else
+ req = nvme_allocate_request_null(nvme_pt_done, pt);
+
+ /* Assume user space already converted to little-endian */
+ req->cmd.opc = pt->cmd.opc;
+ req->cmd.fuse = pt->cmd.fuse;
+ req->cmd.rsvd2 = pt->cmd.rsvd2;
+ req->cmd.rsvd3 = pt->cmd.rsvd3;
+ req->cmd.cdw10 = pt->cmd.cdw10;
+ req->cmd.cdw11 = pt->cmd.cdw11;
+ req->cmd.cdw12 = pt->cmd.cdw12;
+ req->cmd.cdw13 = pt->cmd.cdw13;
+ req->cmd.cdw14 = pt->cmd.cdw14;
+ req->cmd.cdw15 = pt->cmd.cdw15;
+
+ req->cmd.nsid = htole32(nsid);
+
+ mtx = mtx_pool_find(mtxpool_sleep, pt);
+ pt->driver_lock = mtx;
+
+ if (is_admin_cmd)
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+ else
+ nvme_ctrlr_submit_io_request(ctrlr, req);
+
+ mtx_lock(mtx);
+ while (pt->driver_lock != NULL)
+ mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
+ mtx_unlock(mtx);
+
+err:
+ if (buf != NULL) {
+ relpbuf(buf, NULL);
+ PRELE(curproc);
+ }
+
+ return (ret);
+}
+
+static int
+nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
+ struct thread *td)
+{
+ struct nvme_controller *ctrlr;
+ struct nvme_pt_command *pt;
+
+ ctrlr = cdev->si_drv1;
+
+ switch (cmd) {
+ case NVME_RESET_CONTROLLER:
+ nvme_ctrlr_reset(ctrlr);
+ break;
+ case NVME_PASSTHROUGH_CMD:
+ pt = (struct nvme_pt_command *)arg;
+ return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
+ 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
+ case NVME_GET_NSID:
+ {
+ struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
+ strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
+ sizeof(gnsid->cdev));
+ gnsid->nsid = 0;
+ break;
+ }
+ default:
+ return (ENOTTY);
+ }
+
+ return (0);
+}
+
+static struct cdevsw nvme_ctrlr_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = 0,
+ .d_ioctl = nvme_ctrlr_ioctl
+};
+
+int
+nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
+{
+ struct make_dev_args md_args;
+ uint32_t cap_lo;
+ uint32_t cap_hi;
+ uint32_t to;
+ uint8_t dstrd;
+ uint8_t mpsmin;
+ int status, timeout_period;
+
+ ctrlr->dev = dev;
+
+ mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
+
+ /*
+ * Software emulators may set the doorbell stride to something
+ * other than zero, but this driver is not set up to handle that.
+ */
+ cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
+ dstrd = NVME_CAP_HI_DSTRD(cap_hi);
+ if (dstrd != 0)
+ return (ENXIO);
+
+ mpsmin = NVME_CAP_HI_MPSMIN(cap_hi);
+ ctrlr->min_page_size = 1 << (12 + mpsmin);
+
+ /* Get ready timeout value from controller, in units of 500ms. */
+ cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
+ to = NVME_CAP_LO_TO(cap_lo) + 1;
+ ctrlr->ready_timeout_in_ms = to * 500;
+
+ timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
+ TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
+ timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
+ timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
+ ctrlr->timeout_period = timeout_period;
+
+ nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
+ TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
+
+ ctrlr->enable_aborts = 0;
+ TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
+
+ ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
+ if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
+ return (ENXIO);
+
+ ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &ctrlr->taskqueue);
+ taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
+
+ ctrlr->is_resetting = 0;
+ ctrlr->is_initialized = 0;
+ ctrlr->notification_sent = 0;
+ TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
+ TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
+ STAILQ_INIT(&ctrlr->fail_req);
+ ctrlr->is_failed = FALSE;
+
+ make_dev_args_init(&md_args);
+ md_args.mda_devsw = &nvme_ctrlr_cdevsw;
+ md_args.mda_uid = UID_ROOT;
+ md_args.mda_gid = GID_WHEEL;
+ md_args.mda_mode = 0600;
+ md_args.mda_unit = device_get_unit(dev);
+ md_args.mda_si_drv1 = (void *)ctrlr;
+ status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d",
+ device_get_unit(dev));
+ if (status != 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+void
+nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
+{
+ int gone, i;
+
+ if (ctrlr->resource == NULL)
+ goto nores;
+
+ /*
+ * Check whether it is a hot unplug or a clean driver detach.
+ * If device is not there any more, skip any shutdown commands.
+ */
+ gone = (nvme_mmio_read_4(ctrlr, csts) == 0xffffffff);
+ if (gone)
+ nvme_ctrlr_fail(ctrlr);
+ else
+ nvme_notify_fail_consumers(ctrlr);
+
+ for (i = 0; i < NVME_MAX_NAMESPACES; i++)
+ nvme_ns_destruct(&ctrlr->ns[i]);
+
+ if (ctrlr->cdev)
+ destroy_dev(ctrlr->cdev);
+
+ if (ctrlr->is_initialized) {
+ if (!gone)
+ nvme_ctrlr_delete_qpairs(ctrlr);
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_io_qpair_destroy(&ctrlr->ioq[i]);
+ free(ctrlr->ioq, M_NVME);
+ nvme_admin_qpair_destroy(&ctrlr->adminq);
+ }
+
+ /*
+ * Notify the controller of a shutdown, even though this is due to
+ * a driver unload, not a system shutdown (this path is not invoked
+ * during shutdown). This ensures the controller receives a
+ * shutdown notification in case the system is shutdown before
+ * reloading the driver.
+ */
+ if (!gone)
+ nvme_ctrlr_shutdown(ctrlr);
+
+ if (!gone)
+ nvme_ctrlr_disable(ctrlr);
+
+ if (ctrlr->taskqueue)
+ taskqueue_free(ctrlr->taskqueue);
+
+ if (ctrlr->tag)
+ bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
+
+ if (ctrlr->res)
+ bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
+ rman_get_rid(ctrlr->res), ctrlr->res);
+
+ if (ctrlr->bar4_resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ ctrlr->bar4_resource_id, ctrlr->bar4_resource);
+ }
+
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ ctrlr->resource_id, ctrlr->resource);
+
+nores:
+ mtx_destroy(&ctrlr->lock);
+}
+
+void
+nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
+{
+ uint32_t cc;
+ uint32_t csts;
+ int ticks = 0;
+
+ cc = nvme_mmio_read_4(ctrlr, cc);
+ cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT);
+ cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT;
+ nvme_mmio_write_4(ctrlr, cc, cc);
+
+ while (1) {
+ csts = nvme_mmio_read_4(ctrlr, csts);
+ if (csts == 0xffffffff) /* Hot unplug. */
+ break;
+ if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE)
+ break;
+ if (ticks++ > 5*hz) {
+ nvme_printf(ctrlr, "did not complete shutdown within"
+ " 5 seconds of notification\n");
+ break;
+ }
+ pause("nvme shn", 1);
+ }
+}
+
+void
+nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req)
+{
+
+ nvme_qpair_submit_request(&ctrlr->adminq, req);
+}
+
+void
+nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req)
+{
+ struct nvme_qpair *qpair;
+
+ qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
+ nvme_qpair_submit_request(qpair, req);
+}
+
+device_t
+nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
+{
+
+ return (ctrlr->dev);
+}
+
+const struct nvme_controller_data *
+nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
+{
+
+ return (&ctrlr->cdata);
+}
+
+int
+nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
+{
+ int to = hz;
+
+ /*
+ * Can't touch failed controllers, so it's already suspended.
+ */
+ if (ctrlr->is_failed)
+ return (0);
+
+ /*
+ * We don't want the reset taskqueue running, since it does similar
+ * things, so prevent it from running after we start. Wait for any reset
+ * that may have been started to complete. The reset process we follow
+ * will ensure that any new I/O will queue and be given to the hardware
+ * after we resume (though there should be none).
+ */
+ while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0)
+ pause("nvmesusp", 1);
+ if (to <= 0) {
+ nvme_printf(ctrlr,
+ "Competing reset task didn't finish. Try again later.\n");
+ return (EWOULDBLOCK);
+ }
+
+ /*
+ * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to
+ * delete the hardware I/O queues, and then shutdown. This properly
+ * flushes any metadata the drive may have stored so it can survive
+ * having its power removed and prevents the unsafe shutdown count from
+ * incriminating. Once we delete the qpairs, we have to disable them
+ * before shutting down. The delay is out of paranoia in
+ * nvme_ctrlr_hw_reset, and is repeated here (though we should have no
+ * pending I/O that the delay copes with).
+ */
+ nvme_ctrlr_delete_qpairs(ctrlr);
+ nvme_ctrlr_disable_qpairs(ctrlr);
+ DELAY(100*1000);
+ nvme_ctrlr_shutdown(ctrlr);
+
+ return (0);
+}
+
+int
+nvme_ctrlr_resume(struct nvme_controller *ctrlr)
+{
+
+ /*
+ * Can't touch failed controllers, so nothing to do to resume.
+ */
+ if (ctrlr->is_failed)
+ return (0);
+
+ /*
+ * Have to reset the hardware twice, just like we do on attach. See
+ * nmve_attach() for why.
+ */
+ if (nvme_ctrlr_hw_reset(ctrlr) != 0)
+ goto fail;
+ if (nvme_ctrlr_hw_reset(ctrlr) != 0)
+ goto fail;
+
+ /*
+ * Now that we're reset the hardware, we can restart the controller. Any
+ * I/O that was pending is requeued. Any admin commands are aborted with
+ * an error. Once we've restarted, take the controller out of reset.
+ */
+ nvme_ctrlr_start(ctrlr, true);
+ atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
+
+ return (0);
+fail:
+ /*
+ * Since we can't bring the controller out of reset, announce and fail
+ * the controller. However, we have to return success for the resume
+ * itself, due to questionable APIs.
+ */
+ nvme_printf(ctrlr, "Failed to reset on resume, failing.\n");
+ nvme_ctrlr_fail(ctrlr);
+ atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
+ return (0);
+}
diff --git a/freebsd/sys/dev/nvme/nvme_ctrlr_cmd.c b/freebsd/sys/dev/nvme/nvme_ctrlr_cmd.c
new file mode 100644
index 00000000..f5c1832c
--- /dev/null
+++ b/freebsd/sys/dev/nvme/nvme_ctrlr_cmd.c
@@ -0,0 +1,329 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "nvme_private.h"
+
+void
+nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_vaddr(payload,
+ sizeof(struct nvme_controller_data), cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_IDENTIFY;
+
+ /*
+ * TODO: create an identify command data structure, which
+ * includes this CNS bit in cdw10.
+ */
+ cmd->cdw10 = htole32(1);
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint32_t nsid,
+ void *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_vaddr(payload,
+ sizeof(struct nvme_namespace_data), cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_IDENTIFY;
+
+ /*
+ * TODO: create an identify command data structure
+ */
+ cmd->nsid = htole32(nsid);
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que, uint16_t vector, nvme_cb_fn_t cb_fn,
+ void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_CREATE_IO_CQ;
+
+ /*
+ * TODO: create a create io completion queue command data
+ * structure.
+ */
+ cmd->cdw10 = htole32(((io_que->num_entries-1) << 16) | io_que->id);
+ /* 0x3 = interrupts enabled | physically contiguous */
+ cmd->cdw11 = htole32((vector << 16) | 0x3);
+ cmd->prp1 = htole64(io_que->cpl_bus_addr);
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_CREATE_IO_SQ;
+
+ /*
+ * TODO: create a create io submission queue command data
+ * structure.
+ */
+ cmd->cdw10 = htole32(((io_que->num_entries-1) << 16) | io_que->id);
+ /* 0x1 = physically contiguous */
+ cmd->cdw11 = htole32((io_que->id << 16) | 0x1);
+ cmd->prp1 = htole64(io_que->cmd_bus_addr);
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_DELETE_IO_CQ;
+
+ /*
+ * TODO: create a delete io completion queue command data
+ * structure.
+ */
+ cmd->cdw10 = htole32(io_que->id);
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_DELETE_IO_SQ;
+
+ /*
+ * TODO: create a delete io submission queue command data
+ * structure.
+ */
+ cmd->cdw10 = htole32(io_que->id);
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
+ uint32_t cdw11, void *payload, uint32_t payload_size,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_SET_FEATURES;
+ cmd->cdw10 = htole32(feature);
+ cmd->cdw11 = htole32(cdw11);
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
+ uint32_t cdw11, void *payload, uint32_t payload_size,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_GET_FEATURES;
+ cmd->cdw10 = htole32(feature);
+ cmd->cdw11 = htole32(cdw11);
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
+ uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ uint32_t cdw11;
+
+ cdw11 = ((num_queues - 1) << 16) | (num_queues - 1);
+ nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11,
+ NULL, 0, cb_fn, cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
+ uint32_t state, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ uint32_t cdw11;
+
+ cdw11 = state;
+ nvme_ctrlr_cmd_set_feature(ctrlr,
+ NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, NULL, 0, cb_fn,
+ cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
+ uint32_t microseconds, uint32_t threshold, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ uint32_t cdw11;
+
+ if ((microseconds/100) >= 0x100) {
+ nvme_printf(ctrlr, "invalid coal time %d, disabling\n",
+ microseconds);
+ microseconds = 0;
+ threshold = 0;
+ }
+
+ if (threshold >= 0x100) {
+ nvme_printf(ctrlr, "invalid threshold %d, disabling\n",
+ threshold);
+ threshold = 0;
+ microseconds = 0;
+ }
+
+ cdw11 = ((microseconds/100) << 8) | threshold;
+ nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_INTERRUPT_COALESCING, cdw11,
+ NULL, 0, cb_fn, cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
+ uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
+ void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_vaddr(payload, payload_size, cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_GET_LOG_PAGE;
+ cmd->nsid = htole32(nsid);
+ cmd->cdw10 = ((payload_size/sizeof(uint32_t)) - 1) << 16;
+ cmd->cdw10 |= log_page;
+ cmd->cdw10 = htole32(cmd->cdw10);
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
+ struct nvme_error_information_entry *payload, uint32_t num_entries,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+
+ KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__));
+
+ /* Controller's error log page entries is 0-based. */
+ KASSERT(num_entries <= (ctrlr->cdata.elpe + 1),
+ ("%s called with num_entries=%d but (elpe+1)=%d\n", __func__,
+ num_entries, ctrlr->cdata.elpe + 1));
+
+ if (num_entries > (ctrlr->cdata.elpe + 1))
+ num_entries = ctrlr->cdata.elpe + 1;
+
+ nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_ERROR,
+ NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload) * num_entries,
+ cb_fn, cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
+ uint32_t nsid, struct nvme_health_information_page *payload,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+
+ nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION,
+ nsid, payload, sizeof(*payload), cb_fn, cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
+ struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+
+ nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT,
+ NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn,
+ cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
+ uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_ABORT;
+ cmd->cdw10 = htole32((cid << 16) | sqid);
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
diff --git a/freebsd/sys/dev/nvme/nvme_ns.c b/freebsd/sys/dev/nvme/nvme_ns.c
new file mode 100644
index 00000000..7cc59071
--- /dev/null
+++ b/freebsd/sys/dev/nvme/nvme_ns.c
@@ -0,0 +1,627 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/disk.h>
+#include <sys/fcntl.h>
+#include <sys/ioccom.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+
+#include <dev/pci/pcivar.h>
+
+#include <geom/geom.h>
+
+#include "nvme_private.h"
+
+static void nvme_bio_child_inbed(struct bio *parent, int bio_error);
+static void nvme_bio_child_done(void *arg,
+ const struct nvme_completion *cpl);
+static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size,
+ uint32_t alignment);
+static void nvme_free_child_bios(int num_bios,
+ struct bio **child_bios);
+static struct bio ** nvme_allocate_child_bios(int num_bios);
+static struct bio ** nvme_construct_child_bios(struct bio *bp,
+ uint32_t alignment,
+ int *num_bios);
+static int nvme_ns_split_bio(struct nvme_namespace *ns,
+ struct bio *bp,
+ uint32_t alignment);
+
+static int
+nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
+ struct thread *td)
+{
+ struct nvme_namespace *ns;
+ struct nvme_controller *ctrlr;
+ struct nvme_pt_command *pt;
+
+ ns = cdev->si_drv1;
+ ctrlr = ns->ctrlr;
+
+ switch (cmd) {
+ case NVME_IO_TEST:
+ case NVME_BIO_TEST:
+ nvme_ns_test(ns, cmd, arg);
+ break;
+ case NVME_PASSTHROUGH_CMD:
+ pt = (struct nvme_pt_command *)arg;
+ return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id,
+ 1 /* is_user_buffer */, 0 /* is_admin_cmd */));
+ case NVME_GET_NSID:
+ {
+ struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
+ strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
+ sizeof(gnsid->cdev));
+ gnsid->nsid = ns->id;
+ break;
+ }
+ case DIOCGMEDIASIZE:
+ *(off_t *)arg = (off_t)nvme_ns_get_size(ns);
+ break;
+ case DIOCGSECTORSIZE:
+ *(u_int *)arg = nvme_ns_get_sector_size(ns);
+ break;
+ default:
+ return (ENOTTY);
+ }
+
+ return (0);
+}
+
+static int
+nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused,
+ struct thread *td)
+{
+ int error = 0;
+
+ if (flags & FWRITE)
+ error = securelevel_gt(td->td_ucred, 0);
+
+ return (error);
+}
+
+static int
+nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
+ struct thread *td)
+{
+
+ return (0);
+}
+
+static void
+nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
+{
+ struct bio *bp = arg;
+
+ /*
+ * TODO: add more extensive translation of NVMe status codes
+ * to different bio error codes (i.e. EIO, EINVAL, etc.)
+ */
+ if (nvme_completion_is_error(cpl)) {
+ bp->bio_error = EIO;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ } else
+ bp->bio_resid = 0;
+
+ biodone(bp);
+}
+
+static void
+nvme_ns_strategy(struct bio *bp)
+{
+ struct nvme_namespace *ns;
+ int err;
+
+ ns = bp->bio_dev->si_drv1;
+ err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done);
+
+ if (err) {
+ bp->bio_error = err;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ biodone(bp);
+ }
+
+}
+
+static struct cdevsw nvme_ns_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = D_DISK,
+ .d_read = physread,
+ .d_write = physwrite,
+ .d_open = nvme_ns_open,
+ .d_close = nvme_ns_close,
+ .d_strategy = nvme_ns_strategy,
+ .d_ioctl = nvme_ns_ioctl
+};
+
+uint32_t
+nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+uint32_t
+nvme_ns_get_sector_size(struct nvme_namespace *ns)
+{
+ uint8_t flbas_fmt, lbads;
+
+ flbas_fmt = (ns->data.flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) &
+ NVME_NS_DATA_FLBAS_FORMAT_MASK;
+ lbads = (ns->data.lbaf[flbas_fmt] >> NVME_NS_DATA_LBAF_LBADS_SHIFT) &
+ NVME_NS_DATA_LBAF_LBADS_MASK;
+
+ return (1 << lbads);
+}
+
+uint64_t
+nvme_ns_get_num_sectors(struct nvme_namespace *ns)
+{
+ return (ns->data.nsze);
+}
+
+uint64_t
+nvme_ns_get_size(struct nvme_namespace *ns)
+{
+ return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns));
+}
+
+uint32_t
+nvme_ns_get_flags(struct nvme_namespace *ns)
+{
+ return (ns->flags);
+}
+
+const char *
+nvme_ns_get_serial_number(struct nvme_namespace *ns)
+{
+ return ((const char *)ns->ctrlr->cdata.sn);
+}
+
+const char *
+nvme_ns_get_model_number(struct nvme_namespace *ns)
+{
+ return ((const char *)ns->ctrlr->cdata.mn);
+}
+
+const struct nvme_namespace_data *
+nvme_ns_get_data(struct nvme_namespace *ns)
+{
+
+ return (&ns->data);
+}
+
+uint32_t
+nvme_ns_get_stripesize(struct nvme_namespace *ns)
+{
+
+ if (((ns->data.nsfeat >> NVME_NS_DATA_NSFEAT_NPVALID_SHIFT) &
+ NVME_NS_DATA_NSFEAT_NPVALID_MASK) != 0 && ns->data.npwg != 0) {
+ return ((ns->data.npwg + 1) * nvme_ns_get_sector_size(ns));
+ }
+ return (ns->boundary);
+}
+
+static void
+nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
+{
+ struct bio *bp = arg;
+ nvme_cb_fn_t bp_cb_fn;
+
+ bp_cb_fn = bp->bio_driver1;
+
+ if (bp->bio_driver2)
+ free(bp->bio_driver2, M_NVME);
+
+ if (nvme_completion_is_error(status)) {
+ bp->bio_flags |= BIO_ERROR;
+ if (bp->bio_error == 0)
+ bp->bio_error = EIO;
+ }
+
+ if ((bp->bio_flags & BIO_ERROR) == 0)
+ bp->bio_resid = 0;
+ else
+ bp->bio_resid = bp->bio_bcount;
+
+ bp_cb_fn(bp, status);
+}
+
+static void
+nvme_bio_child_inbed(struct bio *parent, int bio_error)
+{
+ struct nvme_completion parent_cpl;
+ int children, inbed;
+
+ if (bio_error != 0) {
+ parent->bio_flags |= BIO_ERROR;
+ parent->bio_error = bio_error;
+ }
+
+ /*
+ * atomic_fetchadd will return value before adding 1, so we still
+ * must add 1 to get the updated inbed number. Save bio_children
+ * before incrementing to guard against race conditions when
+ * two children bios complete on different queues.
+ */
+ children = atomic_load_acq_int(&parent->bio_children);
+ inbed = atomic_fetchadd_int(&parent->bio_inbed, 1) + 1;
+ if (inbed == children) {
+ bzero(&parent_cpl, sizeof(parent_cpl));
+ if (parent->bio_flags & BIO_ERROR) {
+ parent_cpl.status &= ~(NVME_STATUS_SC_MASK << NVME_STATUS_SC_SHIFT);
+ parent_cpl.status |= (NVME_SC_DATA_TRANSFER_ERROR) << NVME_STATUS_SC_SHIFT;
+ }
+ nvme_ns_bio_done(parent, &parent_cpl);
+ }
+}
+
+static void
+nvme_bio_child_done(void *arg, const struct nvme_completion *cpl)
+{
+ struct bio *child = arg;
+ struct bio *parent;
+ int bio_error;
+
+ parent = child->bio_parent;
+ g_destroy_bio(child);
+ bio_error = nvme_completion_is_error(cpl) ? EIO : 0;
+ nvme_bio_child_inbed(parent, bio_error);
+}
+
+static uint32_t
+nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t align)
+{
+ uint32_t num_segs, offset, remainder;
+
+ if (align == 0)
+ return (1);
+
+ KASSERT((align & (align - 1)) == 0, ("alignment not power of 2\n"));
+
+ num_segs = size / align;
+ remainder = size & (align - 1);
+ offset = addr & (align - 1);
+ if (remainder > 0 || offset > 0)
+ num_segs += 1 + (remainder + offset - 1) / align;
+ return (num_segs);
+}
+
+static void
+nvme_free_child_bios(int num_bios, struct bio **child_bios)
+{
+ int i;
+
+ for (i = 0; i < num_bios; i++) {
+ if (child_bios[i] != NULL)
+ g_destroy_bio(child_bios[i]);
+ }
+
+ free(child_bios, M_NVME);
+}
+
+static struct bio **
+nvme_allocate_child_bios(int num_bios)
+{
+ struct bio **child_bios;
+ int err = 0, i;
+
+ child_bios = malloc(num_bios * sizeof(struct bio *), M_NVME, M_NOWAIT);
+ if (child_bios == NULL)
+ return (NULL);
+
+ for (i = 0; i < num_bios; i++) {
+ child_bios[i] = g_new_bio();
+ if (child_bios[i] == NULL)
+ err = ENOMEM;
+ }
+
+ if (err == ENOMEM) {
+ nvme_free_child_bios(num_bios, child_bios);
+ return (NULL);
+ }
+
+ return (child_bios);
+}
+
+static struct bio **
+nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios)
+{
+ struct bio **child_bios;
+ struct bio *child;
+ uint64_t cur_offset;
+ caddr_t data;
+ uint32_t rem_bcount;
+ int i;
+ struct vm_page **ma;
+ uint32_t ma_offset;
+
+ *num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount,
+ alignment);
+ child_bios = nvme_allocate_child_bios(*num_bios);
+ if (child_bios == NULL)
+ return (NULL);
+
+ bp->bio_children = *num_bios;
+ bp->bio_inbed = 0;
+ cur_offset = bp->bio_offset;
+ rem_bcount = bp->bio_bcount;
+ data = bp->bio_data;
+ ma_offset = bp->bio_ma_offset;
+ ma = bp->bio_ma;
+
+ for (i = 0; i < *num_bios; i++) {
+ child = child_bios[i];
+ child->bio_parent = bp;
+ child->bio_cmd = bp->bio_cmd;
+ child->bio_offset = cur_offset;
+ child->bio_bcount = min(rem_bcount,
+ alignment - (cur_offset & (alignment - 1)));
+ child->bio_flags = bp->bio_flags;
+ if (bp->bio_flags & BIO_UNMAPPED) {
+ child->bio_ma_offset = ma_offset;
+ child->bio_ma = ma;
+ child->bio_ma_n =
+ nvme_get_num_segments(child->bio_ma_offset,
+ child->bio_bcount, PAGE_SIZE);
+ ma_offset = (ma_offset + child->bio_bcount) &
+ PAGE_MASK;
+ ma += child->bio_ma_n;
+ if (ma_offset != 0)
+ ma -= 1;
+ } else {
+ child->bio_data = data;
+ data += child->bio_bcount;
+ }
+ cur_offset += child->bio_bcount;
+ rem_bcount -= child->bio_bcount;
+ }
+
+ return (child_bios);
+}
+
+static int
+nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp,
+ uint32_t alignment)
+{
+ struct bio *child;
+ struct bio **child_bios;
+ int err, i, num_bios;
+
+ child_bios = nvme_construct_child_bios(bp, alignment, &num_bios);
+ if (child_bios == NULL)
+ return (ENOMEM);
+
+ for (i = 0; i < num_bios; i++) {
+ child = child_bios[i];
+ err = nvme_ns_bio_process(ns, child, nvme_bio_child_done);
+ if (err != 0) {
+ nvme_bio_child_inbed(bp, err);
+ g_destroy_bio(child);
+ }
+ }
+
+ free(child_bios, M_NVME);
+ return (0);
+}
+
+int
+nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
+ nvme_cb_fn_t cb_fn)
+{
+ struct nvme_dsm_range *dsm_range;
+ uint32_t num_bios;
+ int err;
+
+ bp->bio_driver1 = cb_fn;
+
+ if (ns->boundary > 0 &&
+ (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
+ num_bios = nvme_get_num_segments(bp->bio_offset,
+ bp->bio_bcount, ns->boundary);
+ if (num_bios > 1)
+ return (nvme_ns_split_bio(ns, bp, ns->boundary));
+ }
+
+ switch (bp->bio_cmd) {
+ case BIO_READ:
+ err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp);
+ break;
+ case BIO_WRITE:
+ err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp);
+ break;
+ case BIO_FLUSH:
+ err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
+ break;
+ case BIO_DELETE:
+ dsm_range =
+ malloc(sizeof(struct nvme_dsm_range), M_NVME,
+ M_ZERO | M_WAITOK);
+ if (!dsm_range) {
+ err = ENOMEM;
+ break;
+ }
+ dsm_range->length =
+ htole32(bp->bio_bcount/nvme_ns_get_sector_size(ns));
+ dsm_range->starting_lba =
+ htole64(bp->bio_offset/nvme_ns_get_sector_size(ns));
+ bp->bio_driver2 = dsm_range;
+ err = nvme_ns_cmd_deallocate(ns, dsm_range, 1,
+ nvme_ns_bio_done, bp);
+ if (err != 0)
+ free(dsm_range, M_NVME);
+ break;
+ default:
+ err = EIO;
+ break;
+ }
+
+ return (err);
+}
+
+int
+nvme_ns_ioctl_process(struct nvme_namespace *ns, u_long cmd, caddr_t arg,
+ int flag, struct thread *td)
+{
+ return (nvme_ns_ioctl(ns->cdev, cmd, arg, flag, td));
+}
+
+int
+nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
+ struct nvme_controller *ctrlr)
+{
+ struct make_dev_args md_args;
+ struct nvme_completion_poll_status status;
+ int res;
+ int unit;
+ uint8_t flbas_fmt;
+ uint8_t vwc_present;
+
+ ns->ctrlr = ctrlr;
+ ns->id = id;
+
+ /*
+ * Namespaces are reconstructed after a controller reset, so check
+ * to make sure we only call mtx_init once on each mtx.
+ *
+ * TODO: Move this somewhere where it gets called at controller
+ * construction time, which is not invoked as part of each
+ * controller reset.
+ */
+ if (!mtx_initialized(&ns->lock))
+ mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF);
+
+ status.done = 0;
+ nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
+ nvme_completion_poll_cb, &status);
+ nvme_completion_poll(&status);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
+ return (ENXIO);
+ }
+
+ /* Convert data to host endian */
+ nvme_namespace_data_swapbytes(&ns->data);
+
+ /*
+ * If the size of is zero, chances are this isn't a valid
+ * namespace (eg one that's not been configured yet). The
+ * standard says the entire id will be zeros, so this is a
+ * cheap way to test for that.
+ */
+ if (ns->data.nsze == 0)
+ return (ENXIO);
+
+ flbas_fmt = (ns->data.flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) &
+ NVME_NS_DATA_FLBAS_FORMAT_MASK;
+ /*
+ * Note: format is a 0-based value, so > is appropriate here,
+ * not >=.
+ */
+ if (flbas_fmt > ns->data.nlbaf) {
+ printf("lba format %d exceeds number supported (%d)\n",
+ flbas_fmt, ns->data.nlbaf + 1);
+ return (ENXIO);
+ }
+
+ /*
+ * Older Intel devices advertise in vendor specific space an alignment
+ * that improves performance. If present use for the stripe size. NVMe
+ * 1.3 standardized this as NOIOB, and newer Intel drives use that.
+ */
+ switch (pci_get_devid(ctrlr->dev)) {
+ case 0x09538086: /* Intel DC PC3500 */
+ case 0x0a538086: /* Intel DC PC3520 */
+ case 0x0a548086: /* Intel DC PC4500 */
+ case 0x0a558086: /* Dell Intel P4600 */
+ if (ctrlr->cdata.vs[3] != 0)
+ ns->boundary =
+ (1 << ctrlr->cdata.vs[3]) * ctrlr->min_page_size;
+ else
+ ns->boundary = 0;
+ break;
+ default:
+ ns->boundary = ns->data.noiob * nvme_ns_get_sector_size(ns);
+ break;
+ }
+
+ if (nvme_ctrlr_has_dataset_mgmt(&ctrlr->cdata))
+ ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
+
+ vwc_present = (ctrlr->cdata.vwc >> NVME_CTRLR_DATA_VWC_PRESENT_SHIFT) &
+ NVME_CTRLR_DATA_VWC_PRESENT_MASK;
+ if (vwc_present)
+ ns->flags |= NVME_NS_FLUSH_SUPPORTED;
+
+ /*
+ * cdev may have already been created, if we are reconstructing the
+ * namespace after a controller-level reset.
+ */
+ if (ns->cdev != NULL)
+ return (0);
+
+ /*
+ * Namespace IDs start at 1, so we need to subtract 1 to create a
+ * correct unit number.
+ */
+ unit = device_get_unit(ctrlr->dev) * NVME_MAX_NAMESPACES + ns->id - 1;
+
+ make_dev_args_init(&md_args);
+ md_args.mda_devsw = &nvme_ns_cdevsw;
+ md_args.mda_unit = unit;
+ md_args.mda_mode = 0600;
+ md_args.mda_si_drv1 = ns;
+ res = make_dev_s(&md_args, &ns->cdev, "nvme%dns%d",
+ device_get_unit(ctrlr->dev), ns->id);
+ if (res != 0)
+ return (ENXIO);
+
+ ns->cdev->si_flags |= SI_UNMAPPED;
+
+ return (0);
+}
+
+void nvme_ns_destruct(struct nvme_namespace *ns)
+{
+
+ if (ns->cdev != NULL)
+ destroy_dev(ns->cdev);
+}
diff --git a/freebsd/sys/dev/nvme/nvme_ns_cmd.c b/freebsd/sys/dev/nvme/nvme_ns_cmd.c
new file mode 100644
index 00000000..5fe2820e
--- /dev/null
+++ b/freebsd/sys/dev/nvme/nvme_ns_cmd.c
@@ -0,0 +1,208 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "nvme_private.h"
+
+int
+nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
+ uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = nvme_allocate_request_vaddr(payload,
+ lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+
+ nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count);
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+int
+nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ uint64_t lba;
+ uint64_t lba_count;
+
+ req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+
+ lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
+ lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
+ nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count);
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+int
+nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
+ uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = nvme_allocate_request_vaddr(payload,
+ lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+
+ nvme_ns_write_cmd(&req->cmd, ns->id, lba, lba_count);
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+int
+nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ uint64_t lba;
+ uint64_t lba_count;
+
+ req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+ lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
+ lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
+ nvme_ns_write_cmd(&req->cmd, ns->id, lba, lba_count);
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+int
+nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
+ uint8_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_vaddr(payload,
+ num_ranges * sizeof(struct nvme_dsm_range), cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
+ cmd->nsid = htole32(ns->id);
+
+ /* TODO: create a delete command data structure */
+ cmd->cdw10 = htole32(num_ranges - 1);
+ cmd->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+int
+nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+
+ nvme_ns_flush_cmd(&req->cmd, ns->id);
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+/* Timeout = 1 sec */
+#define NVD_DUMP_TIMEOUT 200000
+
+int
+nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset, size_t len)
+{
+ struct nvme_completion_poll_status status;
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+ uint64_t lba, lba_count;
+ int i;
+
+ status.done = FALSE;
+ req = nvme_allocate_request_vaddr(virt, len, nvme_completion_poll_cb,
+ &status);
+ if (req == NULL)
+ return (ENOMEM);
+
+ cmd = &req->cmd;
+
+ if (len > 0) {
+ lba = offset / nvme_ns_get_sector_size(ns);
+ lba_count = len / nvme_ns_get_sector_size(ns);
+ nvme_ns_write_cmd(cmd, ns->id, lba, lba_count);
+ } else
+ nvme_ns_flush_cmd(cmd, ns->id);
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+ if (req->qpair == NULL)
+ return (ENXIO);
+
+ i = 0;
+ while ((i++ < NVD_DUMP_TIMEOUT) && (status.done == FALSE)) {
+ DELAY(5);
+ nvme_qpair_process_completions(req->qpair);
+ }
+
+ /*
+ * Normally, when using the polling interface, we can't return a
+ * timeout error because we don't know when the completion routines
+ * will be called if the command later completes. However, in this
+ * case we're running a system dump, so all interrupts are turned
+ * off, the scheduler isn't running so there's nothing to complete
+ * the transaction.
+ */
+ if (status.done == FALSE)
+ return (ETIMEDOUT);
+
+ return (0);
+}
diff --git a/freebsd/sys/dev/nvme/nvme_pci.c b/freebsd/sys/dev/nvme/nvme_pci.c
new file mode 100644
index 00000000..b9d46a8b
--- /dev/null
+++ b/freebsd/sys/dev/nvme/nvme_pci.c
@@ -0,0 +1,358 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (C) 2012-2016 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "nvme_private.h"
+
+static int nvme_pci_probe(device_t);
+static int nvme_pci_attach(device_t);
+static int nvme_pci_detach(device_t);
+static int nvme_pci_suspend(device_t);
+static int nvme_pci_resume(device_t);
+
+static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
+
+static device_method_t nvme_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, nvme_pci_probe),
+ DEVMETHOD(device_attach, nvme_pci_attach),
+ DEVMETHOD(device_detach, nvme_pci_detach),
+ DEVMETHOD(device_suspend, nvme_pci_suspend),
+ DEVMETHOD(device_resume, nvme_pci_resume),
+ DEVMETHOD(device_shutdown, nvme_shutdown),
+ { 0, 0 }
+};
+
+static driver_t nvme_pci_driver = {
+ "nvme",
+ nvme_pci_methods,
+ sizeof(struct nvme_controller),
+};
+
+DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, NULL, 0);
+
+static struct _pcsid
+{
+ uint32_t devid;
+ int match_subdevice;
+ uint16_t subdevice;
+ const char *desc;
+ uint32_t quirks;
+} pci_ids[] = {
+ { 0x01118086, 0, 0, "NVMe Controller" },
+ { IDT32_PCI_ID, 0, 0, "IDT NVMe Controller (32 channel)" },
+ { IDT8_PCI_ID, 0, 0, "IDT NVMe Controller (8 channel)" },
+ { 0x09538086, 1, 0x3702, "DC P3700 SSD" },
+ { 0x09538086, 1, 0x3703, "DC P3700 SSD [2.5\" SFF]" },
+ { 0x09538086, 1, 0x3704, "DC P3500 SSD [Add-in Card]" },
+ { 0x09538086, 1, 0x3705, "DC P3500 SSD [2.5\" SFF]" },
+ { 0x09538086, 1, 0x3709, "DC P3600 SSD [Add-in Card]" },
+ { 0x09538086, 1, 0x370a, "DC P3600 SSD [2.5\" SFF]" },
+ { 0x00031c58, 0, 0, "HGST SN100", QUIRK_DELAY_B4_CHK_RDY },
+ { 0x00231c58, 0, 0, "WDC SN200", QUIRK_DELAY_B4_CHK_RDY },
+ { 0x05401c5f, 0, 0, "Memblaze Pblaze4", QUIRK_DELAY_B4_CHK_RDY },
+ { 0xa821144d, 0, 0, "Samsung PM1725", QUIRK_DELAY_B4_CHK_RDY },
+ { 0xa822144d, 0, 0, "Samsung PM1725a", QUIRK_DELAY_B4_CHK_RDY },
+ { 0x00000000, 0, 0, NULL }
+};
+
+
+static int
+nvme_match(uint32_t devid, uint16_t subdevice, struct _pcsid *ep)
+{
+ if (devid != ep->devid)
+ return 0;
+
+ if (!ep->match_subdevice)
+ return 1;
+
+ if (subdevice == ep->subdevice)
+ return 1;
+ else
+ return 0;
+}
+
+static int
+nvme_pci_probe (device_t device)
+{
+ struct nvme_controller *ctrlr = DEVICE2SOFTC(device);
+ struct _pcsid *ep;
+ uint32_t devid;
+ uint16_t subdevice;
+
+ devid = pci_get_devid(device);
+ subdevice = pci_get_subdevice(device);
+ ep = pci_ids;
+
+ while (ep->devid) {
+ if (nvme_match(devid, subdevice, ep))
+ break;
+ ++ep;
+ }
+ if (ep->devid)
+ ctrlr->quirks = ep->quirks;
+
+ if (ep->desc) {
+ device_set_desc(device, ep->desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+
+#if defined(PCIS_STORAGE_NVM)
+ if (pci_get_class(device) == PCIC_STORAGE &&
+ pci_get_subclass(device) == PCIS_STORAGE_NVM &&
+ pci_get_progif(device) == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) {
+ device_set_desc(device, "Generic NVMe Device");
+ return (BUS_PROBE_GENERIC);
+ }
+#endif
+
+ return (ENXIO);
+}
+
+static int
+nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
+{
+
+ ctrlr->resource_id = PCIR_BAR(0);
+
+ ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
+ &ctrlr->resource_id, RF_ACTIVE);
+
+ if(ctrlr->resource == NULL) {
+ nvme_printf(ctrlr, "unable to allocate pci resource\n");
+ return (ENOMEM);
+ }
+
+ ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
+ ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
+ ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
+
+ /*
+ * The NVMe spec allows for the MSI-X table to be placed behind
+ * BAR 4/5, separate from the control/doorbell registers. Always
+ * try to map this bar, because it must be mapped prior to calling
+ * pci_alloc_msix(). If the table isn't behind BAR 4/5,
+ * bus_alloc_resource() will just return NULL which is OK.
+ */
+ ctrlr->bar4_resource_id = PCIR_BAR(4);
+ ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
+ &ctrlr->bar4_resource_id, RF_ACTIVE);
+
+ return (0);
+}
+
+static int
+nvme_pci_attach(device_t dev)
+{
+ struct nvme_controller*ctrlr = DEVICE2SOFTC(dev);
+ int status;
+
+ ctrlr->dev = dev;
+ status = nvme_ctrlr_allocate_bar(ctrlr);
+ if (status != 0)
+ goto bad;
+ pci_enable_busmaster(dev);
+ nvme_ctrlr_setup_interrupts(ctrlr);
+ return nvme_attach(dev);
+bad:
+ if (ctrlr->resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ ctrlr->resource_id, ctrlr->resource);
+ }
+
+ if (ctrlr->bar4_resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ ctrlr->bar4_resource_id, ctrlr->bar4_resource);
+ }
+
+ if (ctrlr->tag)
+ bus_teardown_intr(dev, ctrlr->res, ctrlr->tag);
+
+ if (ctrlr->res)
+ bus_release_resource(dev, SYS_RES_IRQ,
+ rman_get_rid(ctrlr->res), ctrlr->res);
+
+ if (ctrlr->msix_enabled)
+ pci_release_msi(dev);
+
+ return status;
+}
+
+static int
+nvme_pci_detach(device_t dev)
+{
+ struct nvme_controller*ctrlr = DEVICE2SOFTC(dev);
+ int rv;
+
+ rv = nvme_detach(dev);
+ if (ctrlr->msix_enabled)
+ pci_release_msi(dev);
+ pci_disable_busmaster(dev);
+ return (rv);
+}
+
+static int
+nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
+{
+
+ ctrlr->msix_enabled = 0;
+ ctrlr->num_io_queues = 1;
+ ctrlr->num_cpus_per_ioq = mp_ncpus;
+ ctrlr->rid = 0;
+ ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
+ &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
+
+ if (ctrlr->res == NULL) {
+ nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
+ return (ENOMEM);
+ }
+
+ bus_setup_intr(ctrlr->dev, ctrlr->res,
+ INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
+ ctrlr, &ctrlr->tag);
+
+ if (ctrlr->tag == NULL) {
+ nvme_printf(ctrlr, "unable to setup intx handler\n");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+static void
+nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
+{
+ device_t dev;
+ int per_cpu_io_queues;
+ int min_cpus_per_ioq;
+ int num_vectors_requested, num_vectors_allocated;
+ int num_vectors_available;
+
+ dev = ctrlr->dev;
+ min_cpus_per_ioq = 1;
+ TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
+
+ if (min_cpus_per_ioq < 1) {
+ min_cpus_per_ioq = 1;
+ } else if (min_cpus_per_ioq > mp_ncpus) {
+ min_cpus_per_ioq = mp_ncpus;
+ }
+
+ per_cpu_io_queues = 1;
+ TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
+
+ if (per_cpu_io_queues == 0) {
+ min_cpus_per_ioq = mp_ncpus;
+ }
+
+ ctrlr->force_intx = 0;
+ TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
+
+ /*
+ * FreeBSD currently cannot allocate more than about 190 vectors at
+ * boot, meaning that systems with high core count and many devices
+ * requesting per-CPU interrupt vectors will not get their full
+ * allotment. So first, try to allocate as many as we may need to
+ * understand what is available, then immediately release them.
+ * Then figure out how many of those we will actually use, based on
+ * assigning an equal number of cores to each I/O queue.
+ */
+
+ /* One vector for per core I/O queue, plus one vector for admin queue. */
+ num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
+ if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
+ num_vectors_available = 0;
+ }
+ pci_release_msi(dev);
+
+ if (ctrlr->force_intx || num_vectors_available < 2) {
+ nvme_ctrlr_configure_intx(ctrlr);
+ return;
+ }
+
+ /*
+ * Do not use all vectors for I/O queues - one must be saved for the
+ * admin queue.
+ */
+ ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
+ howmany(mp_ncpus, num_vectors_available - 1));
+
+ ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
+ num_vectors_requested = ctrlr->num_io_queues + 1;
+ num_vectors_allocated = num_vectors_requested;
+
+ /*
+ * Now just allocate the number of vectors we need. This should
+ * succeed, since we previously called pci_alloc_msix()
+ * successfully returning at least this many vectors, but just to
+ * be safe, if something goes wrong just revert to INTx.
+ */
+ if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
+ nvme_ctrlr_configure_intx(ctrlr);
+ return;
+ }
+
+ if (num_vectors_allocated < num_vectors_requested) {
+ pci_release_msi(dev);
+ nvme_ctrlr_configure_intx(ctrlr);
+ return;
+ }
+
+ ctrlr->msix_enabled = 1;
+}
+
+static int
+nvme_pci_suspend(device_t dev)
+{
+ struct nvme_controller *ctrlr;
+
+ ctrlr = DEVICE2SOFTC(dev);
+ return (nvme_ctrlr_suspend(ctrlr));
+}
+
+static int
+nvme_pci_resume(device_t dev)
+{
+ struct nvme_controller *ctrlr;
+
+ ctrlr = DEVICE2SOFTC(dev);
+ return (nvme_ctrlr_resume(ctrlr));
+}
diff --git a/freebsd/sys/dev/nvme/nvme_private.h b/freebsd/sys/dev/nvme/nvme_private.h
new file mode 100644
index 00000000..d26d376c
--- /dev/null
+++ b/freebsd/sys/dev/nvme/nvme_private.h
@@ -0,0 +1,562 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2012-2014 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __NVME_PRIVATE_H__
+#define __NVME_PRIVATE_H__
+
+#include <sys/param.h>
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/systm.h>
+#include <sys/taskqueue.h>
+
+#include <vm/uma.h>
+
+#include <machine/bus.h>
+
+#include "nvme.h"
+
+#define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
+
+MALLOC_DECLARE(M_NVME);
+
+#define IDT32_PCI_ID 0x80d0111d /* 32 channel board */
+#define IDT8_PCI_ID 0x80d2111d /* 8 channel board */
+
+/*
+ * For commands requiring more than 2 PRP entries, one PRP will be
+ * embedded in the command (prp1), and the rest of the PRP entries
+ * will be in a list pointed to by the command (prp2). This means
+ * that real max number of PRP entries we support is 32+1, which
+ * results in a max xfer size of 32*PAGE_SIZE.
+ */
+#define NVME_MAX_PRP_LIST_ENTRIES (NVME_MAX_XFER_SIZE / PAGE_SIZE)
+
+#define NVME_ADMIN_TRACKERS (16)
+#define NVME_ADMIN_ENTRIES (128)
+/* min and max are defined in admin queue attributes section of spec */
+#define NVME_MIN_ADMIN_ENTRIES (2)
+#define NVME_MAX_ADMIN_ENTRIES (4096)
+
+/*
+ * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
+ * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
+ * will allow outstanding on an I/O qpair at any time. The only advantage in
+ * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
+ * the contents of the submission and completion queues, it will show a longer
+ * history of data.
+ */
+#define NVME_IO_ENTRIES (256)
+#define NVME_IO_TRACKERS (128)
+#define NVME_MIN_IO_TRACKERS (4)
+#define NVME_MAX_IO_TRACKERS (1024)
+
+/*
+ * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
+ * for each controller.
+ */
+
+#define NVME_INT_COAL_TIME (0) /* disabled */
+#define NVME_INT_COAL_THRESHOLD (0) /* 0-based */
+
+#define NVME_MAX_NAMESPACES (16)
+#define NVME_MAX_CONSUMERS (2)
+#define NVME_MAX_ASYNC_EVENTS (8)
+
+#define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */
+#define NVME_MIN_TIMEOUT_PERIOD (5)
+#define NVME_MAX_TIMEOUT_PERIOD (120)
+
+#define NVME_DEFAULT_RETRY_COUNT (4)
+
+/* Maximum log page size to fetch for AERs. */
+#define NVME_MAX_AER_LOG_SIZE (4096)
+
+/*
+ * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
+ * it.
+ */
+#ifndef CACHE_LINE_SIZE
+#define CACHE_LINE_SIZE (64)
+#endif
+
+extern uma_zone_t nvme_request_zone;
+extern int32_t nvme_retry_count;
+extern bool nvme_verbose_cmd_dump;
+
+struct nvme_completion_poll_status {
+
+ struct nvme_completion cpl;
+ int done;
+};
+
+extern devclass_t nvme_devclass;
+
+#define NVME_REQUEST_VADDR 1
+#define NVME_REQUEST_NULL 2 /* For requests with no payload. */
+#define NVME_REQUEST_UIO 3
+#define NVME_REQUEST_BIO 4
+#define NVME_REQUEST_CCB 5
+
+struct nvme_request {
+
+ struct nvme_command cmd;
+ struct nvme_qpair *qpair;
+ union {
+ void *payload;
+ struct bio *bio;
+ } u;
+ uint32_t type;
+ uint32_t payload_size;
+ boolean_t timeout;
+ nvme_cb_fn_t cb_fn;
+ void *cb_arg;
+ int32_t retries;
+ STAILQ_ENTRY(nvme_request) stailq;
+};
+
+struct nvme_async_event_request {
+
+ struct nvme_controller *ctrlr;
+ struct nvme_request *req;
+ struct nvme_completion cpl;
+ uint32_t log_page_id;
+ uint32_t log_page_size;
+ uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE];
+};
+
+struct nvme_tracker {
+
+ TAILQ_ENTRY(nvme_tracker) tailq;
+ struct nvme_request *req;
+ struct nvme_qpair *qpair;
+ struct callout timer;
+ bus_dmamap_t payload_dma_map;
+ uint16_t cid;
+
+ uint64_t *prp;
+ bus_addr_t prp_bus_addr;
+};
+
+struct nvme_qpair {
+
+ struct nvme_controller *ctrlr;
+ uint32_t id;
+ uint32_t phase;
+
+ uint16_t vector;
+ int rid;
+ struct resource *res;
+ void *tag;
+
+ uint32_t num_entries;
+ uint32_t num_trackers;
+ uint32_t sq_tdbl_off;
+ uint32_t cq_hdbl_off;
+
+ uint32_t sq_head;
+ uint32_t sq_tail;
+ uint32_t cq_head;
+
+ int64_t num_cmds;
+ int64_t num_intr_handler_calls;
+ int64_t num_retries;
+ int64_t num_failures;
+
+ struct nvme_command *cmd;
+ struct nvme_completion *cpl;
+
+ bus_dma_tag_t dma_tag;
+ bus_dma_tag_t dma_tag_payload;
+
+ bus_dmamap_t queuemem_map;
+ uint64_t cmd_bus_addr;
+ uint64_t cpl_bus_addr;
+
+ TAILQ_HEAD(, nvme_tracker) free_tr;
+ TAILQ_HEAD(, nvme_tracker) outstanding_tr;
+ STAILQ_HEAD(, nvme_request) queued_req;
+
+ struct nvme_tracker **act_tr;
+
+ boolean_t is_enabled;
+
+ struct mtx lock __aligned(CACHE_LINE_SIZE);
+
+} __aligned(CACHE_LINE_SIZE);
+
+struct nvme_namespace {
+
+ struct nvme_controller *ctrlr;
+ struct nvme_namespace_data data;
+ uint32_t id;
+ uint32_t flags;
+ struct cdev *cdev;
+ void *cons_cookie[NVME_MAX_CONSUMERS];
+ uint32_t boundary;
+ struct mtx lock;
+};
+
+/*
+ * One of these per allocated PCI device.
+ */
+struct nvme_controller {
+
+ device_t dev;
+
+ struct mtx lock;
+
+ uint32_t ready_timeout_in_ms;
+ uint32_t quirks;
+#define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */
+#define QUIRK_DISABLE_TIMEOUT 2 /* Disable broken completion timeout feature */
+
+ bus_space_tag_t bus_tag;
+ bus_space_handle_t bus_handle;
+ int resource_id;
+ struct resource *resource;
+
+ /*
+ * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
+ * separate from the control registers which are in BAR 0/1. These
+ * members track the mapping of BAR 4/5 for that reason.
+ */
+ int bar4_resource_id;
+ struct resource *bar4_resource;
+
+ uint32_t msix_enabled;
+ uint32_t force_intx;
+ uint32_t enable_aborts;
+
+ uint32_t num_io_queues;
+ uint32_t num_cpus_per_ioq;
+ uint32_t max_hw_pend_io;
+
+ /* Fields for tracking progress during controller initialization. */
+ struct intr_config_hook config_hook;
+ uint32_t ns_identified;
+ uint32_t queues_created;
+
+ struct task reset_task;
+ struct task fail_req_task;
+ struct taskqueue *taskqueue;
+
+ /* For shared legacy interrupt. */
+ int rid;
+ struct resource *res;
+ void *tag;
+
+ bus_dma_tag_t hw_desc_tag;
+ bus_dmamap_t hw_desc_map;
+
+ /** maximum i/o size in bytes */
+ uint32_t max_xfer_size;
+
+ /** minimum page size supported by this controller in bytes */
+ uint32_t min_page_size;
+
+ /** interrupt coalescing time period (in microseconds) */
+ uint32_t int_coal_time;
+
+ /** interrupt coalescing threshold */
+ uint32_t int_coal_threshold;
+
+ /** timeout period in seconds */
+ uint32_t timeout_period;
+
+ struct nvme_qpair adminq;
+ struct nvme_qpair *ioq;
+
+ struct nvme_registers *regs;
+
+ struct nvme_controller_data cdata;
+ struct nvme_namespace ns[NVME_MAX_NAMESPACES];
+
+ struct cdev *cdev;
+
+ /** bit mask of event types currently enabled for async events */
+ uint32_t async_event_config;
+
+ uint32_t num_aers;
+ struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
+
+ void *cons_cookie[NVME_MAX_CONSUMERS];
+
+ uint32_t is_resetting;
+ uint32_t is_initialized;
+ uint32_t notification_sent;
+
+ boolean_t is_failed;
+ STAILQ_HEAD(, nvme_request) fail_req;
+};
+
+#define nvme_mmio_offsetof(reg) \
+ offsetof(struct nvme_registers, reg)
+
+#define nvme_mmio_read_4(sc, reg) \
+ bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
+ nvme_mmio_offsetof(reg))
+
+#define nvme_mmio_write_4(sc, reg, val) \
+ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
+ nvme_mmio_offsetof(reg), val)
+
+#define nvme_mmio_write_8(sc, reg, val) \
+ do { \
+ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
+ nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \
+ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
+ nvme_mmio_offsetof(reg)+4, \
+ (val & 0xFFFFFFFF00000000ULL) >> 32); \
+ } while (0);
+
+#define nvme_printf(ctrlr, fmt, args...) \
+ device_printf(ctrlr->dev, fmt, ##args)
+
+void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
+
+void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
+ void *payload,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
+ uint32_t nsid, void *payload,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
+ uint32_t microseconds,
+ uint32_t threshold,
+ nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
+ struct nvme_error_information_entry *payload,
+ uint32_t num_entries, /* 0 = max */
+ nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
+ uint32_t nsid,
+ struct nvme_health_information_page *payload,
+ nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
+ struct nvme_firmware_page *payload,
+ nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que, uint16_t vector,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
+ uint32_t num_queues, nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
+ uint32_t state,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
+ uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
+
+void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
+
+int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
+void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
+void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr);
+int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr);
+void nvme_ctrlr_reset(struct nvme_controller *ctrlr);
+/* ctrlr defined as void * to allow use with config_intrhook. */
+void nvme_ctrlr_start_config_hook(void *ctrlr_arg);
+void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req);
+void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req);
+void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req);
+
+int nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
+ uint16_t vector, uint32_t num_entries,
+ uint32_t num_trackers,
+ struct nvme_controller *ctrlr);
+void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
+ struct nvme_tracker *tr);
+bool nvme_qpair_process_completions(struct nvme_qpair *qpair);
+void nvme_qpair_submit_request(struct nvme_qpair *qpair,
+ struct nvme_request *req);
+void nvme_qpair_reset(struct nvme_qpair *qpair);
+void nvme_qpair_fail(struct nvme_qpair *qpair);
+void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
+ struct nvme_request *req,
+ uint32_t sct, uint32_t sc);
+
+void nvme_admin_qpair_enable(struct nvme_qpair *qpair);
+void nvme_admin_qpair_disable(struct nvme_qpair *qpair);
+void nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
+
+void nvme_io_qpair_enable(struct nvme_qpair *qpair);
+void nvme_io_qpair_disable(struct nvme_qpair *qpair);
+void nvme_io_qpair_destroy(struct nvme_qpair *qpair);
+
+int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
+ struct nvme_controller *ctrlr);
+void nvme_ns_destruct(struct nvme_namespace *ns);
+
+void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
+
+void nvme_dump_command(struct nvme_command *cmd);
+void nvme_dump_completion(struct nvme_completion *cpl);
+
+int nvme_attach(device_t dev);
+int nvme_shutdown(device_t dev);
+int nvme_detach(device_t dev);
+
+/*
+ * Wait for a command to complete using the nvme_completion_poll_cb.
+ * Used in limited contexts where the caller knows it's OK to block
+ * briefly while the command runs. The ISR will run the callback which
+ * will set status->done to true.usually within microseconds. A 1s
+ * pause means something is seriously AFU and we should panic to
+ * provide the proper context to diagnose.
+ */
+static __inline
+void
+nvme_completion_poll(struct nvme_completion_poll_status *status)
+{
+ int sanity = hz * 1;
+
+ while (!atomic_load_acq_int(&status->done) && --sanity > 0)
+ pause("nvme", 1);
+ if (sanity <= 0)
+ panic("NVME polled command failed to complete within 1s.");
+}
+
+static __inline void
+nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ uint64_t *bus_addr = (uint64_t *)arg;
+
+ if (error != 0)
+ printf("nvme_single_map err %d\n", error);
+ *bus_addr = seg[0].ds_addr;
+}
+
+static __inline struct nvme_request *
+_nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
+ if (req != NULL) {
+ req->cb_fn = cb_fn;
+ req->cb_arg = cb_arg;
+ req->timeout = TRUE;
+ }
+ return (req);
+}
+
+static __inline struct nvme_request *
+nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = _nvme_allocate_request(cb_fn, cb_arg);
+ if (req != NULL) {
+ req->type = NVME_REQUEST_VADDR;
+ req->u.payload = payload;
+ req->payload_size = payload_size;
+ }
+ return (req);
+}
+
+static __inline struct nvme_request *
+nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = _nvme_allocate_request(cb_fn, cb_arg);
+ if (req != NULL)
+ req->type = NVME_REQUEST_NULL;
+ return (req);
+}
+
+static __inline struct nvme_request *
+nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = _nvme_allocate_request(cb_fn, cb_arg);
+ if (req != NULL) {
+ req->type = NVME_REQUEST_BIO;
+ req->u.bio = bio;
+ }
+ return (req);
+}
+
+static __inline struct nvme_request *
+nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = _nvme_allocate_request(cb_fn, cb_arg);
+ if (req != NULL) {
+ req->type = NVME_REQUEST_CCB;
+ req->u.payload = ccb;
+ }
+
+ return (req);
+}
+
+#define nvme_free_request(req) uma_zfree(nvme_request_zone, req)
+
+void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
+ const struct nvme_completion *async_cpl,
+ uint32_t log_page_id, void *log_page_buffer,
+ uint32_t log_page_size);
+void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
+void nvme_notify_new_controller(struct nvme_controller *ctrlr);
+void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
+
+void nvme_ctrlr_intx_handler(void *arg);
+void nvme_ctrlr_poll(struct nvme_controller *ctrlr);
+
+int nvme_ctrlr_suspend(struct nvme_controller *ctrlr);
+int nvme_ctrlr_resume(struct nvme_controller *ctrlr);
+
+#endif /* __NVME_PRIVATE_H__ */
diff --git a/freebsd/sys/dev/nvme/nvme_qpair.c b/freebsd/sys/dev/nvme/nvme_qpair.c
new file mode 100644
index 00000000..a9a3217c
--- /dev/null
+++ b/freebsd/sys/dev/nvme/nvme_qpair.c
@@ -0,0 +1,1266 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2012-2014 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/proc.h>
+
+#include <dev/pci/pcivar.h>
+
+#include "nvme_private.h"
+
+typedef enum error_print { ERROR_PRINT_NONE, ERROR_PRINT_NO_RETRY, ERROR_PRINT_ALL } error_print_t;
+#define DO_NOT_RETRY 1
+
+static void _nvme_qpair_submit_request(struct nvme_qpair *qpair,
+ struct nvme_request *req);
+static void nvme_qpair_destroy(struct nvme_qpair *qpair);
+
+struct nvme_opcode_string {
+
+ uint16_t opc;
+ const char * str;
+};
+
+static struct nvme_opcode_string admin_opcode[] = {
+ { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" },
+ { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" },
+ { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" },
+ { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" },
+ { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" },
+ { NVME_OPC_IDENTIFY, "IDENTIFY" },
+ { NVME_OPC_ABORT, "ABORT" },
+ { NVME_OPC_SET_FEATURES, "SET FEATURES" },
+ { NVME_OPC_GET_FEATURES, "GET FEATURES" },
+ { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" },
+ { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" },
+ { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" },
+ { NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" },
+ { NVME_OPC_NAMESPACE_ATTACHMENT, "NAMESPACE ATTACHMENT" },
+ { NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" },
+ { NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" },
+ { NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" },
+ { NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" },
+ { NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" },
+ { NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" },
+ { NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" },
+ { NVME_OPC_FORMAT_NVM, "FORMAT NVM" },
+ { NVME_OPC_SECURITY_SEND, "SECURITY SEND" },
+ { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" },
+ { NVME_OPC_SANITIZE, "SANITIZE" },
+ { NVME_OPC_GET_LBA_STATUS, "GET LBA STATUS" },
+ { 0xFFFF, "ADMIN COMMAND" }
+};
+
+static struct nvme_opcode_string io_opcode[] = {
+ { NVME_OPC_FLUSH, "FLUSH" },
+ { NVME_OPC_WRITE, "WRITE" },
+ { NVME_OPC_READ, "READ" },
+ { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" },
+ { NVME_OPC_COMPARE, "COMPARE" },
+ { NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" },
+ { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" },
+ { NVME_OPC_VERIFY, "VERIFY" },
+ { NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" },
+ { NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" },
+ { NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" },
+ { NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" },
+ { 0xFFFF, "IO COMMAND" }
+};
+
+static const char *
+get_admin_opcode_string(uint16_t opc)
+{
+ struct nvme_opcode_string *entry;
+
+ entry = admin_opcode;
+
+ while (entry->opc != 0xFFFF) {
+ if (entry->opc == opc)
+ return (entry->str);
+ entry++;
+ }
+ return (entry->str);
+}
+
+static const char *
+get_io_opcode_string(uint16_t opc)
+{
+ struct nvme_opcode_string *entry;
+
+ entry = io_opcode;
+
+ while (entry->opc != 0xFFFF) {
+ if (entry->opc == opc)
+ return (entry->str);
+ entry++;
+ }
+ return (entry->str);
+}
+
+
+static void
+nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
+ struct nvme_command *cmd)
+{
+
+ nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
+ "cdw10:%08x cdw11:%08x\n",
+ get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
+ le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11));
+}
+
+static void
+nvme_io_qpair_print_command(struct nvme_qpair *qpair,
+ struct nvme_command *cmd)
+{
+
+ switch (cmd->opc) {
+ case NVME_OPC_WRITE:
+ case NVME_OPC_READ:
+ case NVME_OPC_WRITE_UNCORRECTABLE:
+ case NVME_OPC_COMPARE:
+ case NVME_OPC_WRITE_ZEROES:
+ case NVME_OPC_VERIFY:
+ nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
+ "lba:%llu len:%d\n",
+ get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid),
+ ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10),
+ (le32toh(cmd->cdw12) & 0xFFFF) + 1);
+ break;
+ case NVME_OPC_FLUSH:
+ case NVME_OPC_DATASET_MANAGEMENT:
+ case NVME_OPC_RESERVATION_REGISTER:
+ case NVME_OPC_RESERVATION_REPORT:
+ case NVME_OPC_RESERVATION_ACQUIRE:
+ case NVME_OPC_RESERVATION_RELEASE:
+ nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
+ get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid));
+ break;
+ default:
+ nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
+ get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
+ cmd->cid, le32toh(cmd->nsid));
+ break;
+ }
+}
+
+static void
+nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
+{
+ if (qpair->id == 0)
+ nvme_admin_qpair_print_command(qpair, cmd);
+ else
+ nvme_io_qpair_print_command(qpair, cmd);
+ if (nvme_verbose_cmd_dump) {
+ nvme_printf(qpair->ctrlr,
+ "nsid:%#x rsvd2:%#x rsvd3:%#x mptr:%#jx prp1:%#jx prp2:%#jx\n",
+ cmd->nsid, cmd->rsvd2, cmd->rsvd3, (uintmax_t)cmd->mptr,
+ (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2);
+ nvme_printf(qpair->ctrlr,
+ "cdw10: %#x cdw11:%#x cdw12:%#x cdw13:%#x cdw14:%#x cdw15:%#x\n",
+ cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
+ cmd->cdw15);
+ }
+}
+
+struct nvme_status_string {
+
+ uint16_t sc;
+ const char * str;
+};
+
+static struct nvme_status_string generic_status[] = {
+ { NVME_SC_SUCCESS, "SUCCESS" },
+ { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
+ { NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
+ { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
+ { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
+ { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
+ { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
+ { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
+ { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
+ { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
+ { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
+ { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
+ { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
+ { NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" },
+ { NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" },
+ { NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" },
+ { NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" },
+ { NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" },
+ { NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" },
+ { NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" },
+ { NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" },
+ { NVME_SC_OPERATION_DENIED, "OPERATION DENIED" },
+ { NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" },
+ { NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" },
+ { NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" },
+ { NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" },
+ { NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" },
+ { NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" },
+ { NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" },
+ { NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" },
+ { NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" },
+ { NVME_SC_NAMESPACE_IS_WRITE_PROTECTED, "NAMESPACE IS WRITE PROTECTED" },
+ { NVME_SC_COMMAND_INTERRUPTED, "COMMAND INTERRUPTED" },
+ { NVME_SC_TRANSIENT_TRANSPORT_ERROR, "TRANSIENT TRANSPORT ERROR" },
+
+ { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
+ { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
+ { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
+ { NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" },
+ { NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" },
+ { 0xFFFF, "GENERIC" }
+};
+
+static struct nvme_status_string command_specific_status[] = {
+ { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
+ { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
+ { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
+ { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
+ { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
+ { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
+ { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
+ { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
+ { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
+ { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
+ { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
+ { NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" },
+ { NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" },
+ { NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" },
+ { NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" },
+ { NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" },
+ { NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" },
+ { NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" },
+ { NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" },
+ { NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" },
+ { NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" },
+ { NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" },
+ { NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" },
+ { NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" },
+ { NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" },
+ { NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" },
+ { NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" },
+ { NVME_SC_SELT_TEST_IN_PROGRESS, "DEVICE SELT-TEST IN PROGRESS" },
+ { NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" },
+ { NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" },
+ { NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" },
+ { NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" },
+ { NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" },
+ { NVME_SC_SANITIZE_PROHIBITED_WPMRE, "SANITIZE PROHIBITED WRITE PERSISTENT MEMORY REGION ENABLED" },
+ { NVME_SC_ANA_GROUP_ID_INVALID, "ANA GROUP IDENTIFIED INVALID" },
+ { NVME_SC_ANA_ATTACH_FAILED, "ANA ATTACH FAILED" },
+
+ { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
+ { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
+ { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
+ { 0xFFFF, "COMMAND SPECIFIC" }
+};
+
+static struct nvme_status_string media_error_status[] = {
+ { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
+ { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
+ { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
+ { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
+ { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
+ { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
+ { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
+ { NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" },
+ { 0xFFFF, "MEDIA ERROR" }
+};
+
+static struct nvme_status_string path_related_status[] = {
+ { NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" },
+ { NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS, "ASYMMETRIC ACCESS PERSISTENT LOSS" },
+ { NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE, "ASYMMETRIC ACCESS INACCESSIBLE" },
+ { NVME_SC_ASYMMETRIC_ACCESS_TRANSITION, "ASYMMETRIC ACCESS TRANSITION" },
+ { NVME_SC_CONTROLLER_PATHING_ERROR, "CONTROLLER PATHING ERROR" },
+ { NVME_SC_HOST_PATHING_ERROR, "HOST PATHING ERROR" },
+ { NVME_SC_COMMAND_ABOTHED_BY_HOST, "COMMAND ABOTHED BY HOST" },
+ { 0xFFFF, "PATH RELATED" },
+};
+
+static const char *
+get_status_string(uint16_t sct, uint16_t sc)
+{
+ struct nvme_status_string *entry;
+
+ switch (sct) {
+ case NVME_SCT_GENERIC:
+ entry = generic_status;
+ break;
+ case NVME_SCT_COMMAND_SPECIFIC:
+ entry = command_specific_status;
+ break;
+ case NVME_SCT_MEDIA_ERROR:
+ entry = media_error_status;
+ break;
+ case NVME_SCT_PATH_RELATED:
+ entry = path_related_status;
+ break;
+ case NVME_SCT_VENDOR_SPECIFIC:
+ return ("VENDOR SPECIFIC");
+ default:
+ return ("RESERVED");
+ }
+
+ while (entry->sc != 0xFFFF) {
+ if (entry->sc == sc)
+ return (entry->str);
+ entry++;
+ }
+ return (entry->str);
+}
+
+static void
+nvme_qpair_print_completion(struct nvme_qpair *qpair,
+ struct nvme_completion *cpl)
+{
+ uint16_t sct, sc;
+
+ sct = NVME_STATUS_GET_SCT(cpl->status);
+ sc = NVME_STATUS_GET_SC(cpl->status);
+
+ nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n",
+ get_status_string(sct, sc), sct, sc, cpl->sqid, cpl->cid,
+ cpl->cdw0);
+}
+
+static boolean_t
+nvme_completion_is_retry(const struct nvme_completion *cpl)
+{
+ uint8_t sct, sc, dnr;
+
+ sct = NVME_STATUS_GET_SCT(cpl->status);
+ sc = NVME_STATUS_GET_SC(cpl->status);
+ dnr = NVME_STATUS_GET_DNR(cpl->status); /* Do Not Retry Bit */
+
+ /*
+ * TODO: spec is not clear how commands that are aborted due
+ * to TLER will be marked. So for now, it seems
+ * NAMESPACE_NOT_READY is the only case where we should
+ * look at the DNR bit. Requests failed with ABORTED_BY_REQUEST
+ * set the DNR bit correctly since the driver controls that.
+ */
+ switch (sct) {
+ case NVME_SCT_GENERIC:
+ switch (sc) {
+ case NVME_SC_ABORTED_BY_REQUEST:
+ case NVME_SC_NAMESPACE_NOT_READY:
+ if (dnr)
+ return (0);
+ else
+ return (1);
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_COMMAND_ID_CONFLICT:
+ case NVME_SC_DATA_TRANSFER_ERROR:
+ case NVME_SC_ABORTED_POWER_LOSS:
+ case NVME_SC_INTERNAL_DEVICE_ERROR:
+ case NVME_SC_ABORTED_SQ_DELETION:
+ case NVME_SC_ABORTED_FAILED_FUSED:
+ case NVME_SC_ABORTED_MISSING_FUSED:
+ case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
+ case NVME_SC_COMMAND_SEQUENCE_ERROR:
+ case NVME_SC_LBA_OUT_OF_RANGE:
+ case NVME_SC_CAPACITY_EXCEEDED:
+ default:
+ return (0);
+ }
+ case NVME_SCT_COMMAND_SPECIFIC:
+ case NVME_SCT_MEDIA_ERROR:
+ return (0);
+ case NVME_SCT_PATH_RELATED:
+ switch (sc) {
+ case NVME_SC_INTERNAL_PATH_ERROR:
+ if (dnr)
+ return (0);
+ else
+ return (1);
+ default:
+ return (0);
+ }
+ case NVME_SCT_VENDOR_SPECIFIC:
+ default:
+ return (0);
+ }
+}
+
+static void
+nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
+ struct nvme_completion *cpl, error_print_t print_on_error)
+{
+ struct nvme_request *req;
+ boolean_t retry, error, retriable;
+
+ req = tr->req;
+ error = nvme_completion_is_error(cpl);
+ retriable = nvme_completion_is_retry(cpl);
+ retry = error && retriable && req->retries < nvme_retry_count;
+ if (retry)
+ qpair->num_retries++;
+ if (error && req->retries >= nvme_retry_count && retriable)
+ qpair->num_failures++;
+
+ if (error && (print_on_error == ERROR_PRINT_ALL ||
+ (!retry && print_on_error == ERROR_PRINT_NO_RETRY))) {
+ nvme_qpair_print_command(qpair, &req->cmd);
+ nvme_qpair_print_completion(qpair, cpl);
+ }
+
+ qpair->act_tr[cpl->cid] = NULL;
+
+ KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
+
+ if (req->cb_fn && !retry)
+ req->cb_fn(req->cb_arg, cpl);
+
+ mtx_lock(&qpair->lock);
+ callout_stop(&tr->timer);
+
+ if (retry) {
+ req->retries++;
+ nvme_qpair_submit_tracker(qpair, tr);
+ } else {
+ if (req->type != NVME_REQUEST_NULL) {
+ bus_dmamap_sync(qpair->dma_tag_payload,
+ tr->payload_dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(qpair->dma_tag_payload,
+ tr->payload_dma_map);
+ }
+
+ nvme_free_request(req);
+ tr->req = NULL;
+
+ TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
+ TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
+
+ /*
+ * If the controller is in the middle of resetting, don't
+ * try to submit queued requests here - let the reset logic
+ * handle that instead.
+ */
+ if (!STAILQ_EMPTY(&qpair->queued_req) &&
+ !qpair->ctrlr->is_resetting) {
+ req = STAILQ_FIRST(&qpair->queued_req);
+ STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
+ _nvme_qpair_submit_request(qpair, req);
+ }
+ }
+
+ mtx_unlock(&qpair->lock);
+}
+
+static void
+nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair,
+ struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
+ error_print_t print_on_error)
+{
+ struct nvme_completion cpl;
+
+ memset(&cpl, 0, sizeof(cpl));
+ cpl.sqid = qpair->id;
+ cpl.cid = tr->cid;
+ cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
+ cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
+ cpl.status |= (dnr & NVME_STATUS_DNR_MASK) << NVME_STATUS_DNR_SHIFT;
+ nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
+}
+
+void
+nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
+ struct nvme_request *req, uint32_t sct, uint32_t sc)
+{
+ struct nvme_completion cpl;
+ boolean_t error;
+
+ memset(&cpl, 0, sizeof(cpl));
+ cpl.sqid = qpair->id;
+ cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
+ cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
+
+ error = nvme_completion_is_error(&cpl);
+
+ if (error) {
+ nvme_qpair_print_command(qpair, &req->cmd);
+ nvme_qpair_print_completion(qpair, &cpl);
+ }
+
+ if (req->cb_fn)
+ req->cb_fn(req->cb_arg, &cpl);
+
+ nvme_free_request(req);
+}
+
+bool
+nvme_qpair_process_completions(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+ struct nvme_completion cpl;
+ int done = 0;
+ bool in_panic = dumping || SCHEDULER_STOPPED();
+
+ qpair->num_intr_handler_calls++;
+
+ /*
+ * qpair is not enabled, likely because a controller reset is is in
+ * progress. Ignore the interrupt - any I/O that was associated with
+ * this interrupt will get retried when the reset is complete.
+ */
+ if (!qpair->is_enabled)
+ return (false);
+
+ /*
+ * A panic can stop the CPU this routine is running on at any point. If
+ * we're called during a panic, complete the sq_head wrap protocol for
+ * the case where we are interrupted just after the increment at 1
+ * below, but before we can reset cq_head to zero at 2. Also cope with
+ * the case where we do the zero at 2, but may or may not have done the
+ * phase adjustment at step 3. The panic machinery flushes all pending
+ * memory writes, so we can make these strong ordering assumptions
+ * that would otherwise be unwise if we were racing in real time.
+ */
+ if (__predict_false(in_panic)) {
+ if (qpair->cq_head == qpair->num_entries) {
+ /*
+ * Here we know that we need to zero cq_head and then negate
+ * the phase, which hasn't been assigned if cq_head isn't
+ * zero due to the atomic_store_rel.
+ */
+ qpair->cq_head = 0;
+ qpair->phase = !qpair->phase;
+ } else if (qpair->cq_head == 0) {
+ /*
+ * In this case, we know that the assignment at 2
+ * happened below, but we don't know if it 3 happened or
+ * not. To do this, we look at the last completion
+ * entry and set the phase to the opposite phase
+ * that it has. This gets us back in sync
+ */
+ cpl = qpair->cpl[qpair->num_entries - 1];
+ nvme_completion_swapbytes(&cpl);
+ qpair->phase = !NVME_STATUS_GET_P(cpl.status);
+ }
+ }
+
+ bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ while (1) {
+ cpl = qpair->cpl[qpair->cq_head];
+
+ /* Convert to host endian */
+ nvme_completion_swapbytes(&cpl);
+
+ if (NVME_STATUS_GET_P(cpl.status) != qpair->phase)
+ break;
+
+ tr = qpair->act_tr[cpl.cid];
+
+ if (tr != NULL) {
+ nvme_qpair_complete_tracker(qpair, tr, &cpl, ERROR_PRINT_ALL);
+ qpair->sq_head = cpl.sqhd;
+ done++;
+ } else if (!in_panic) {
+ /*
+ * A missing tracker is normally an error. However, a
+ * panic can stop the CPU this routine is running on
+ * after completing an I/O but before updating
+ * qpair->cq_head at 1 below. Later, we re-enter this
+ * routine to poll I/O associated with the kernel
+ * dump. We find that the tr has been set to null before
+ * calling the completion routine. If it hasn't
+ * completed (or it triggers a panic), then '1' below
+ * won't have updated cq_head. Rather than panic again,
+ * ignore this condition because it's not unexpected.
+ */
+ nvme_printf(qpair->ctrlr,
+ "cpl does not map to outstanding cmd\n");
+ /* nvme_dump_completion expects device endianess */
+ nvme_dump_completion(&qpair->cpl[qpair->cq_head]);
+ KASSERT(0, ("received completion for unknown cmd"));
+ }
+
+ /*
+ * There's a number of races with the following (see above) when
+ * the system panics. We compensate for each one of them by
+ * using the atomic store to force strong ordering (at least when
+ * viewed in the aftermath of a panic).
+ */
+ if (++qpair->cq_head == qpair->num_entries) { /* 1 */
+ atomic_store_rel_int(&qpair->cq_head, 0); /* 2 */
+ qpair->phase = !qpair->phase; /* 3 */
+ }
+
+ nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl,
+ qpair->cq_head);
+ }
+ return (done != 0);
+}
+
+static void
+nvme_qpair_msix_handler(void *arg)
+{
+ struct nvme_qpair *qpair = arg;
+
+ nvme_qpair_process_completions(qpair);
+}
+
+int
+nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
+ uint16_t vector, uint32_t num_entries, uint32_t num_trackers,
+ struct nvme_controller *ctrlr)
+{
+ struct nvme_tracker *tr;
+ size_t cmdsz, cplsz, prpsz, allocsz, prpmemsz;
+ uint64_t queuemem_phys, prpmem_phys, list_phys;
+ uint8_t *queuemem, *prpmem, *prp_list;
+ int i, err;
+
+ qpair->id = id;
+ qpair->vector = vector;
+ qpair->num_entries = num_entries;
+ qpair->num_trackers = num_trackers;
+ qpair->ctrlr = ctrlr;
+
+ if (ctrlr->msix_enabled) {
+
+ /*
+ * MSI-X vector resource IDs start at 1, so we add one to
+ * the queue's vector to get the corresponding rid to use.
+ */
+ qpair->rid = vector + 1;
+
+ qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
+ &qpair->rid, RF_ACTIVE);
+ bus_setup_intr(ctrlr->dev, qpair->res,
+ INTR_TYPE_MISC | INTR_MPSAFE, NULL,
+ nvme_qpair_msix_handler, qpair, &qpair->tag);
+ if (id == 0) {
+ bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
+ "admin");
+ } else {
+ bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
+ "io%d", id - 1);
+ }
+ }
+
+ mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
+
+ /* Note: NVMe PRP format is restricted to 4-byte alignment. */
+ err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
+ 4, PAGE_SIZE, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE,
+ (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0,
+ NULL, NULL, &qpair->dma_tag_payload);
+ if (err != 0) {
+ nvme_printf(ctrlr, "payload tag create failed %d\n", err);
+ goto out;
+ }
+
+ /*
+ * Each component must be page aligned, and individual PRP lists
+ * cannot cross a page boundary.
+ */
+ cmdsz = qpair->num_entries * sizeof(struct nvme_command);
+ cmdsz = roundup2(cmdsz, PAGE_SIZE);
+ cplsz = qpair->num_entries * sizeof(struct nvme_completion);
+ cplsz = roundup2(cplsz, PAGE_SIZE);
+ prpsz = sizeof(uint64_t) * NVME_MAX_PRP_LIST_ENTRIES;;
+ prpmemsz = qpair->num_trackers * prpsz;
+ allocsz = cmdsz + cplsz + prpmemsz;
+
+ err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
+ PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag);
+ if (err != 0) {
+ nvme_printf(ctrlr, "tag create failed %d\n", err);
+ goto out;
+ }
+
+ if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem,
+ BUS_DMA_NOWAIT, &qpair->queuemem_map)) {
+ nvme_printf(ctrlr, "failed to alloc qpair memory\n");
+ goto out;
+ }
+
+ if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map,
+ queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) {
+ nvme_printf(ctrlr, "failed to load qpair memory\n");
+ goto out;
+ }
+
+ qpair->num_cmds = 0;
+ qpair->num_intr_handler_calls = 0;
+ qpair->num_retries = 0;
+ qpair->num_failures = 0;
+ qpair->cmd = (struct nvme_command *)queuemem;
+ qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz);
+ prpmem = (uint8_t *)(queuemem + cmdsz + cplsz);
+ qpair->cmd_bus_addr = queuemem_phys;
+ qpair->cpl_bus_addr = queuemem_phys + cmdsz;
+ prpmem_phys = queuemem_phys + cmdsz + cplsz;
+
+ qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl);
+ qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl);
+
+ TAILQ_INIT(&qpair->free_tr);
+ TAILQ_INIT(&qpair->outstanding_tr);
+ STAILQ_INIT(&qpair->queued_req);
+
+ list_phys = prpmem_phys;
+ prp_list = prpmem;
+ for (i = 0; i < qpair->num_trackers; i++) {
+
+ if (list_phys + prpsz > prpmem_phys + prpmemsz) {
+ qpair->num_trackers = i;
+ break;
+ }
+
+ /*
+ * Make sure that the PRP list for this tracker doesn't
+ * overflow to another page.
+ */
+ if (trunc_page(list_phys) !=
+ trunc_page(list_phys + prpsz - 1)) {
+ list_phys = roundup2(list_phys, PAGE_SIZE);
+ prp_list =
+ (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE);
+ }
+
+ tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_WAITOK);
+ bus_dmamap_create(qpair->dma_tag_payload, 0,
+ &tr->payload_dma_map);
+ callout_init(&tr->timer, 1);
+ tr->cid = i;
+ tr->qpair = qpair;
+ tr->prp = (uint64_t *)prp_list;
+ tr->prp_bus_addr = list_phys;
+ TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
+ list_phys += prpsz;
+ prp_list += prpsz;
+ }
+
+ if (qpair->num_trackers == 0) {
+ nvme_printf(ctrlr, "failed to allocate enough trackers\n");
+ goto out;
+ }
+
+ qpair->act_tr = malloc(sizeof(struct nvme_tracker *) *
+ qpair->num_entries, M_NVME, M_ZERO | M_WAITOK);
+ return (0);
+
+out:
+ nvme_qpair_destroy(qpair);
+ return (ENOMEM);
+}
+
+static void
+nvme_qpair_destroy(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+
+ if (qpair->tag)
+ bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
+
+ if (mtx_initialized(&qpair->lock))
+ mtx_destroy(&qpair->lock);
+
+ if (qpair->res)
+ bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
+ rman_get_rid(qpair->res), qpair->res);
+
+ if (qpair->cmd != NULL) {
+ bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map);
+ bus_dmamem_free(qpair->dma_tag, qpair->cmd,
+ qpair->queuemem_map);
+ }
+
+ if (qpair->act_tr)
+ free(qpair->act_tr, M_NVME);
+
+ while (!TAILQ_EMPTY(&qpair->free_tr)) {
+ tr = TAILQ_FIRST(&qpair->free_tr);
+ TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
+ bus_dmamap_destroy(qpair->dma_tag_payload,
+ tr->payload_dma_map);
+ free(tr, M_NVME);
+ }
+
+ if (qpair->dma_tag)
+ bus_dma_tag_destroy(qpair->dma_tag);
+
+ if (qpair->dma_tag_payload)
+ bus_dma_tag_destroy(qpair->dma_tag_payload);
+}
+
+static void
+nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+
+ tr = TAILQ_FIRST(&qpair->outstanding_tr);
+ while (tr != NULL) {
+ if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
+ nvme_qpair_manual_complete_tracker(qpair, tr,
+ NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
+ ERROR_PRINT_NONE);
+ tr = TAILQ_FIRST(&qpair->outstanding_tr);
+ } else {
+ tr = TAILQ_NEXT(tr, tailq);
+ }
+ }
+}
+
+void
+nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
+{
+
+ nvme_admin_qpair_abort_aers(qpair);
+ nvme_qpair_destroy(qpair);
+}
+
+void
+nvme_io_qpair_destroy(struct nvme_qpair *qpair)
+{
+
+ nvme_qpair_destroy(qpair);
+}
+
+static void
+nvme_abort_complete(void *arg, const struct nvme_completion *status)
+{
+ struct nvme_tracker *tr = arg;
+
+ /*
+ * If cdw0 == 1, the controller was not able to abort the command
+ * we requested. We still need to check the active tracker array,
+ * to cover race where I/O timed out at same time controller was
+ * completing the I/O.
+ */
+ if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
+ /*
+ * An I/O has timed out, and the controller was unable to
+ * abort it for some reason. Construct a fake completion
+ * status, and then complete the I/O's tracker manually.
+ */
+ nvme_printf(tr->qpair->ctrlr,
+ "abort command failed, aborting command manually\n");
+ nvme_qpair_manual_complete_tracker(tr->qpair, tr,
+ NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_ALL);
+ }
+}
+
+static void
+nvme_timeout(void *arg)
+{
+ struct nvme_tracker *tr = arg;
+ struct nvme_qpair *qpair = tr->qpair;
+ struct nvme_controller *ctrlr = qpair->ctrlr;
+ uint32_t csts;
+ uint8_t cfs;
+
+ /*
+ * Read csts to get value of cfs - controller fatal status.
+ * If no fatal status, try to call the completion routine, and
+ * if completes transactions, report a missed interrupt and
+ * return (this may need to be rate limited). Otherwise, if
+ * aborts are enabled and the controller is not reporting
+ * fatal status, abort the command. Otherwise, just reset the
+ * controller and hope for the best.
+ */
+ csts = nvme_mmio_read_4(ctrlr, csts);
+ cfs = (csts >> NVME_CSTS_REG_CFS_SHIFT) & NVME_CSTS_REG_CFS_MASK;
+ if (cfs == 0 && nvme_qpair_process_completions(qpair)) {
+ nvme_printf(ctrlr, "Missing interrupt\n");
+ return;
+ }
+ if (ctrlr->enable_aborts && cfs == 0) {
+ nvme_printf(ctrlr, "Aborting command due to a timeout.\n");
+ nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id,
+ nvme_abort_complete, tr);
+ } else {
+ nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n",
+ (csts == 0xffffffff) ? " and possible hot unplug" :
+ (cfs ? " and fatal error status" : ""));
+ nvme_ctrlr_reset(ctrlr);
+ }
+}
+
+void
+nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
+{
+ struct nvme_request *req;
+ struct nvme_controller *ctrlr;
+
+ mtx_assert(&qpair->lock, MA_OWNED);
+
+ req = tr->req;
+ req->cmd.cid = tr->cid;
+ qpair->act_tr[tr->cid] = tr;
+ ctrlr = qpair->ctrlr;
+
+ if (req->timeout)
+ callout_reset_curcpu(&tr->timer, ctrlr->timeout_period * hz,
+ nvme_timeout, tr);
+
+ /* Copy the command from the tracker to the submission queue. */
+ memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
+
+ if (++qpair->sq_tail == qpair->num_entries)
+ qpair->sq_tail = 0;
+
+ bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+#ifndef __powerpc__
+ /*
+ * powerpc's bus_dmamap_sync() already includes a heavyweight sync, but
+ * no other archs do.
+ */
+ wmb();
+#endif
+
+ nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl,
+ qpair->sq_tail);
+
+ qpair->num_cmds++;
+}
+
+static void
+nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ struct nvme_tracker *tr = arg;
+ uint32_t cur_nseg;
+
+ /*
+ * If the mapping operation failed, return immediately. The caller
+ * is responsible for detecting the error status and failing the
+ * tracker manually.
+ */
+ if (error != 0) {
+ nvme_printf(tr->qpair->ctrlr,
+ "nvme_payload_map err %d\n", error);
+ return;
+ }
+
+ /*
+ * Note that we specified PAGE_SIZE for alignment and max
+ * segment size when creating the bus dma tags. So here
+ * we can safely just transfer each segment to its
+ * associated PRP entry.
+ */
+ tr->req->cmd.prp1 = htole64(seg[0].ds_addr);
+
+ if (nseg == 2) {
+ tr->req->cmd.prp2 = htole64(seg[1].ds_addr);
+ } else if (nseg > 2) {
+ cur_nseg = 1;
+ tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr);
+ while (cur_nseg < nseg) {
+ tr->prp[cur_nseg-1] =
+ htole64((uint64_t)seg[cur_nseg].ds_addr);
+ cur_nseg++;
+ }
+ } else {
+ /*
+ * prp2 should not be used by the controller
+ * since there is only one segment, but set
+ * to 0 just to be safe.
+ */
+ tr->req->cmd.prp2 = 0;
+ }
+
+ bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ nvme_qpair_submit_tracker(tr->qpair, tr);
+}
+
+static void
+_nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
+{
+ struct nvme_tracker *tr;
+ int err = 0;
+
+ mtx_assert(&qpair->lock, MA_OWNED);
+
+ tr = TAILQ_FIRST(&qpair->free_tr);
+ req->qpair = qpair;
+
+ if (tr == NULL || !qpair->is_enabled) {
+ /*
+ * No tracker is available, or the qpair is disabled due to
+ * an in-progress controller-level reset or controller
+ * failure.
+ */
+
+ if (qpair->ctrlr->is_failed) {
+ /*
+ * The controller has failed. Post the request to a
+ * task where it will be aborted, so that we do not
+ * invoke the request's callback in the context
+ * of the submission.
+ */
+ nvme_ctrlr_post_failed_request(qpair->ctrlr, req);
+ } else {
+ /*
+ * Put the request on the qpair's request queue to be
+ * processed when a tracker frees up via a command
+ * completion or when the controller reset is
+ * completed.
+ */
+ STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
+ }
+ return;
+ }
+
+ TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
+ TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
+ tr->req = req;
+
+ switch (req->type) {
+ case NVME_REQUEST_VADDR:
+ KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size,
+ ("payload_size (%d) exceeds max_xfer_size (%d)\n",
+ req->payload_size, qpair->ctrlr->max_xfer_size));
+ err = bus_dmamap_load(tr->qpair->dma_tag_payload,
+ tr->payload_dma_map, req->u.payload, req->payload_size,
+ nvme_payload_map, tr, 0);
+ if (err != 0)
+ nvme_printf(qpair->ctrlr,
+ "bus_dmamap_load returned 0x%x!\n", err);
+ break;
+ case NVME_REQUEST_NULL:
+ nvme_qpair_submit_tracker(tr->qpair, tr);
+ break;
+ case NVME_REQUEST_BIO:
+ KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size,
+ ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n",
+ (intmax_t)req->u.bio->bio_bcount,
+ qpair->ctrlr->max_xfer_size));
+ err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload,
+ tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0);
+ if (err != 0)
+ nvme_printf(qpair->ctrlr,
+ "bus_dmamap_load_bio returned 0x%x!\n", err);
+ break;
+ case NVME_REQUEST_CCB:
+ err = bus_dmamap_load_ccb(tr->qpair->dma_tag_payload,
+ tr->payload_dma_map, req->u.payload,
+ nvme_payload_map, tr, 0);
+ if (err != 0)
+ nvme_printf(qpair->ctrlr,
+ "bus_dmamap_load_ccb returned 0x%x!\n", err);
+ break;
+ default:
+ panic("unknown nvme request type 0x%x\n", req->type);
+ break;
+ }
+
+ if (err != 0) {
+ /*
+ * The dmamap operation failed, so we manually fail the
+ * tracker here with DATA_TRANSFER_ERROR status.
+ *
+ * nvme_qpair_manual_complete_tracker must not be called
+ * with the qpair lock held.
+ */
+ mtx_unlock(&qpair->lock);
+ nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
+ NVME_SC_DATA_TRANSFER_ERROR, DO_NOT_RETRY, ERROR_PRINT_ALL);
+ mtx_lock(&qpair->lock);
+ }
+}
+
+void
+nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
+{
+
+ mtx_lock(&qpair->lock);
+ _nvme_qpair_submit_request(qpair, req);
+ mtx_unlock(&qpair->lock);
+}
+
+static void
+nvme_qpair_enable(struct nvme_qpair *qpair)
+{
+
+ qpair->is_enabled = TRUE;
+}
+
+void
+nvme_qpair_reset(struct nvme_qpair *qpair)
+{
+
+ qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
+
+ /*
+ * First time through the completion queue, HW will set phase
+ * bit on completions to 1. So set this to 1 here, indicating
+ * we're looking for a 1 to know which entries have completed.
+ * we'll toggle the bit each time when the completion queue
+ * rolls over.
+ */
+ qpair->phase = 1;
+
+ memset(qpair->cmd, 0,
+ qpair->num_entries * sizeof(struct nvme_command));
+ memset(qpair->cpl, 0,
+ qpair->num_entries * sizeof(struct nvme_completion));
+}
+
+void
+nvme_admin_qpair_enable(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+ struct nvme_tracker *tr_temp;
+
+ /*
+ * Manually abort each outstanding admin command. Do not retry
+ * admin commands found here, since they will be left over from
+ * a controller reset and its likely the context in which the
+ * command was issued no longer applies.
+ */
+ TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
+ nvme_printf(qpair->ctrlr,
+ "aborting outstanding admin command\n");
+ nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
+ NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
+ }
+
+ nvme_qpair_enable(qpair);
+}
+
+void
+nvme_io_qpair_enable(struct nvme_qpair *qpair)
+{
+ STAILQ_HEAD(, nvme_request) temp;
+ struct nvme_tracker *tr;
+ struct nvme_tracker *tr_temp;
+ struct nvme_request *req;
+
+ /*
+ * Manually abort each outstanding I/O. This normally results in a
+ * retry, unless the retry count on the associated request has
+ * reached its limit.
+ */
+ TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
+ nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n");
+ nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
+ NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_NO_RETRY);
+ }
+
+ mtx_lock(&qpair->lock);
+
+ nvme_qpair_enable(qpair);
+
+ STAILQ_INIT(&temp);
+ STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
+
+ while (!STAILQ_EMPTY(&temp)) {
+ req = STAILQ_FIRST(&temp);
+ STAILQ_REMOVE_HEAD(&temp, stailq);
+ nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
+ nvme_qpair_print_command(qpair, &req->cmd);
+ _nvme_qpair_submit_request(qpair, req);
+ }
+
+ mtx_unlock(&qpair->lock);
+}
+
+static void
+nvme_qpair_disable(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+
+ qpair->is_enabled = FALSE;
+ mtx_lock(&qpair->lock);
+ TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq)
+ callout_stop(&tr->timer);
+ mtx_unlock(&qpair->lock);
+}
+
+void
+nvme_admin_qpair_disable(struct nvme_qpair *qpair)
+{
+
+ nvme_qpair_disable(qpair);
+ nvme_admin_qpair_abort_aers(qpair);
+}
+
+void
+nvme_io_qpair_disable(struct nvme_qpair *qpair)
+{
+
+ nvme_qpair_disable(qpair);
+}
+
+void
+nvme_qpair_fail(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+ struct nvme_request *req;
+
+ if (!mtx_initialized(&qpair->lock))
+ return;
+
+ mtx_lock(&qpair->lock);
+
+ while (!STAILQ_EMPTY(&qpair->queued_req)) {
+ req = STAILQ_FIRST(&qpair->queued_req);
+ STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
+ nvme_printf(qpair->ctrlr, "failing queued i/o\n");
+ mtx_unlock(&qpair->lock);
+ nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
+ NVME_SC_ABORTED_BY_REQUEST);
+ mtx_lock(&qpair->lock);
+ }
+
+ /* Manually abort each outstanding I/O. */
+ while (!TAILQ_EMPTY(&qpair->outstanding_tr)) {
+ tr = TAILQ_FIRST(&qpair->outstanding_tr);
+ /*
+ * Do not remove the tracker. The abort_tracker path will
+ * do that for us.
+ */
+ nvme_printf(qpair->ctrlr, "failing outstanding i/o\n");
+ mtx_unlock(&qpair->lock);
+ nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
+ NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
+ mtx_lock(&qpair->lock);
+ }
+
+ mtx_unlock(&qpair->lock);
+}
+
diff --git a/freebsd/sys/dev/nvme/nvme_sysctl.c b/freebsd/sys/dev/nvme/nvme_sysctl.c
new file mode 100644
index 00000000..7110cb80
--- /dev/null
+++ b/freebsd/sys/dev/nvme/nvme_sysctl.c
@@ -0,0 +1,368 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2012-2016 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/local/opt_nvme.h>
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/sysctl.h>
+
+#include "nvme_private.h"
+
+#ifndef NVME_USE_NVD
+#define NVME_USE_NVD 1
+#endif
+
+int nvme_use_nvd = NVME_USE_NVD;
+bool nvme_verbose_cmd_dump = false;
+
+SYSCTL_NODE(_hw, OID_AUTO, nvme, CTLFLAG_RD, 0, "NVMe sysctl tunables");
+SYSCTL_INT(_hw_nvme, OID_AUTO, use_nvd, CTLFLAG_RDTUN,
+ &nvme_use_nvd, 1, "1 = Create NVD devices, 0 = Create NDA devices");
+SYSCTL_BOOL(_hw_nvme, OID_AUTO, verbose_cmd_dump, CTLFLAG_RWTUN,
+ &nvme_verbose_cmd_dump, 0,
+ "enable verbose command printting when a command fails");
+
+/*
+ * CTLTYPE_S64 and sysctl_handle_64 were added in r217616. Define these
+ * explicitly here for older kernels that don't include the r217616
+ * changeset.
+ */
+#ifndef CTLTYPE_S64
+#define CTLTYPE_S64 CTLTYPE_QUAD
+#define sysctl_handle_64 sysctl_handle_quad
+#endif
+
+static void
+nvme_dump_queue(struct nvme_qpair *qpair)
+{
+ struct nvme_completion *cpl;
+ struct nvme_command *cmd;
+ int i;
+
+ printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);
+
+ printf("Completion queue:\n");
+ for (i = 0; i < qpair->num_entries; i++) {
+ cpl = &qpair->cpl[i];
+ printf("%05d: ", i);
+ nvme_dump_completion(cpl);
+ }
+
+ printf("Submission queue:\n");
+ for (i = 0; i < qpair->num_entries; i++) {
+ cmd = &qpair->cmd[i];
+ printf("%05d: ", i);
+ nvme_dump_command(cmd);
+ }
+}
+
+
+static int
+nvme_sysctl_dump_debug(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_qpair *qpair = arg1;
+ uint32_t val = 0;
+
+ int error = sysctl_handle_int(oidp, &val, 0, req);
+
+ if (error)
+ return (error);
+
+ if (val != 0)
+ nvme_dump_queue(qpair);
+
+ return (0);
+}
+
+static int
+nvme_sysctl_int_coal_time(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ uint32_t oldval = ctrlr->int_coal_time;
+ int error = sysctl_handle_int(oidp, &ctrlr->int_coal_time, 0,
+ req);
+
+ if (error)
+ return (error);
+
+ if (oldval != ctrlr->int_coal_time)
+ nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
+ ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
+ NULL);
+
+ return (0);
+}
+
+static int
+nvme_sysctl_int_coal_threshold(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ uint32_t oldval = ctrlr->int_coal_threshold;
+ int error = sysctl_handle_int(oidp, &ctrlr->int_coal_threshold, 0,
+ req);
+
+ if (error)
+ return (error);
+
+ if (oldval != ctrlr->int_coal_threshold)
+ nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
+ ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
+ NULL);
+
+ return (0);
+}
+
+static int
+nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ uint32_t oldval = ctrlr->timeout_period;
+ int error = sysctl_handle_int(oidp, &ctrlr->timeout_period, 0, req);
+
+ if (error)
+ return (error);
+
+ if (ctrlr->timeout_period > NVME_MAX_TIMEOUT_PERIOD ||
+ ctrlr->timeout_period < NVME_MIN_TIMEOUT_PERIOD) {
+ ctrlr->timeout_period = oldval;
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static void
+nvme_qpair_reset_stats(struct nvme_qpair *qpair)
+{
+
+ qpair->num_cmds = 0;
+ qpair->num_intr_handler_calls = 0;
+ qpair->num_retries = 0;
+ qpair->num_failures = 0;
+}
+
+static int
+nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ int64_t num_cmds = 0;
+ int i;
+
+ num_cmds = ctrlr->adminq.num_cmds;
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_cmds += ctrlr->ioq[i].num_cmds;
+
+ return (sysctl_handle_64(oidp, &num_cmds, 0, req));
+}
+
+static int
+nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ int64_t num_intr_handler_calls = 0;
+ int i;
+
+ num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
+
+ return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
+}
+
+static int
+nvme_sysctl_num_retries(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ int64_t num_retries = 0;
+ int i;
+
+ num_retries = ctrlr->adminq.num_retries;
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_retries += ctrlr->ioq[i].num_retries;
+
+ return (sysctl_handle_64(oidp, &num_retries, 0, req));
+}
+
+static int
+nvme_sysctl_num_failures(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ int64_t num_failures = 0;
+ int i;
+
+ num_failures = ctrlr->adminq.num_failures;
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_failures += ctrlr->ioq[i].num_failures;
+
+ return (sysctl_handle_64(oidp, &num_failures, 0, req));
+}
+
+static int
+nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ uint32_t i, val = 0;
+
+ int error = sysctl_handle_int(oidp, &val, 0, req);
+
+ if (error)
+ return (error);
+
+ if (val != 0) {
+ nvme_qpair_reset_stats(&ctrlr->adminq);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_qpair_reset_stats(&ctrlr->ioq[i]);
+ }
+
+ return (0);
+}
+
+
+static void
+nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
+ struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
+{
+ struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
+ CTLFLAG_RD, &qpair->num_entries, 0,
+ "Number of entries in hardware queue");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers",
+ CTLFLAG_RD, &qpair->num_trackers, 0,
+ "Number of trackers pre-allocated for this queue pair");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head",
+ CTLFLAG_RD, &qpair->sq_head, 0,
+ "Current head of submission queue (as observed by driver)");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail",
+ CTLFLAG_RD, &qpair->sq_tail, 0,
+ "Current tail of submission queue (as observed by driver)");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
+ CTLFLAG_RD, &qpair->cq_head, 0,
+ "Current head of completion queue (as observed by driver)");
+
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
+ CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
+ CTLFLAG_RD, &qpair->num_intr_handler_calls,
+ "Number of times interrupt handler was invoked (will typically be "
+ "less than number of actual interrupts generated due to "
+ "coalescing)");
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_retries",
+ CTLFLAG_RD, &qpair->num_retries, "Number of commands retried");
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_failures",
+ CTLFLAG_RD, &qpair->num_failures,
+ "Number of commands ending in failure after all retries");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
+ "dump_debug", CTLTYPE_UINT | CTLFLAG_RW, qpair, 0,
+ nvme_sysctl_dump_debug, "IU", "Dump debug data");
+}
+
+void
+nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)
+{
+ struct sysctl_ctx_list *ctrlr_ctx;
+ struct sysctl_oid *ctrlr_tree, *que_tree;
+ struct sysctl_oid_list *ctrlr_list;
+#define QUEUE_NAME_LENGTH 16
+ char queue_name[QUEUE_NAME_LENGTH];
+ int i;
+
+ ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
+ ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
+ ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_cpus_per_ioq",
+ CTLFLAG_RD, &ctrlr->num_cpus_per_ioq, 0,
+ "Number of CPUs assigned per I/O queue pair");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
+ nvme_sysctl_int_coal_time, "IU",
+ "Interrupt coalescing timeout (in microseconds)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "int_coal_threshold", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
+ nvme_sysctl_int_coal_threshold, "IU",
+ "Interrupt coalescing threshold");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "timeout_period", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
+ nvme_sysctl_timeout_period, "IU",
+ "Timeout period (in seconds)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "num_cmds", CTLTYPE_S64 | CTLFLAG_RD,
+ ctrlr, 0, nvme_sysctl_num_cmds, "IU",
+ "Number of commands submitted");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD,
+ ctrlr, 0, nvme_sysctl_num_intr_handler_calls, "IU",
+ "Number of times interrupt handler was invoked (will "
+ "typically be less than number of actual interrupts "
+ "generated due to coalescing)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "num_retries", CTLTYPE_S64 | CTLFLAG_RD,
+ ctrlr, 0, nvme_sysctl_num_retries, "IU",
+ "Number of commands retried");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "num_failures", CTLTYPE_S64 | CTLFLAG_RD,
+ ctrlr, 0, nvme_sysctl_num_failures, "IU",
+ "Number of commands ending in failure after all retries");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "reset_stats", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
+ nvme_sysctl_reset_stats, "IU", "Reset statistics to zero");
+
+ que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
+ CTLFLAG_RD, NULL, "Admin Queue");
+
+ nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
+ que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ queue_name, CTLFLAG_RD, NULL, "IO Queue");
+ nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
+ que_tree);
+ }
+}
diff --git a/freebsd/sys/dev/nvme/nvme_util.c b/freebsd/sys/dev/nvme/nvme_util.c
new file mode 100644
index 00000000..72ec2e88
--- /dev/null
+++ b/freebsd/sys/dev/nvme/nvme_util.c
@@ -0,0 +1,65 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Copyright (C) 1997 Justin T. Gibbs
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <dev/nvme/nvme.h>
+
+void
+nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen)
+{
+ uint8_t *cur_pos;
+
+ /* Trim leading/trailing spaces, nulls. */
+ while (srclen > 0 && src[0] == ' ')
+ src++, srclen--;
+ while (srclen > 0
+ && (src[srclen - 1] == ' ' || src[srclen - 1] == '\0'))
+ srclen--;
+
+ while (srclen > 0 && dstlen > 1) {
+ cur_pos = dst;
+
+ /* Show '?' for non-printable characters. */
+ if (*src < 0x20 || *src >= 0x7F)
+ *cur_pos++ = '?';
+ else
+ *cur_pos++ = *src;
+ src++;
+ srclen--;
+ dstlen -= cur_pos - dst;
+ dst = cur_pos;
+ }
+ *dst = '\0';
+}
+