summaryrefslogtreecommitdiffstats
path: root/linux/drivers/soc/fsl/qbman
diff options
context:
space:
mode:
Diffstat (limited to 'linux/drivers/soc/fsl/qbman')
-rw-r--r--linux/drivers/soc/fsl/qbman/bman-debugfs.c121
-rw-r--r--linux/drivers/soc/fsl/qbman/bman.c692
-rw-r--r--linux/drivers/soc/fsl/qbman/bman.h542
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_api.c1123
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_portal.c399
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_priv.h136
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test.c60
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test.h34
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test_api.c188
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test_thresh.c216
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_utils.c76
-rw-r--r--linux/drivers/soc/fsl/qbman/dpaa_resource.c363
-rw-r--r--linux/drivers/soc/fsl/qbman/dpaa_sys.h292
-rw-r--r--linux/drivers/soc/fsl/qbman/qman-debugfs.c1317
-rw-r--r--linux/drivers/soc/fsl/qbman/qman.c1106
-rw-r--r--linux/drivers/soc/fsl/qbman/qman.h1133
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_api.c3026
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_driver.c87
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_portal.c796
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_priv.h293
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test.c61
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test.h44
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test_api.c222
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test_stash.c540
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_utils.c309
25 files changed, 13176 insertions, 0 deletions
diff --git a/linux/drivers/soc/fsl/qbman/bman-debugfs.c b/linux/drivers/soc/fsl/qbman/bman-debugfs.c
new file mode 100644
index 00000000..09f5a283
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman-debugfs.c
@@ -0,0 +1,121 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2010 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct dentry *dfs_root; /* debugfs root directory */
+
+/* Query Buffer Pool State */
+
+static int query_bp_state_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct bm_pool_state state;
+ int i, j;
+ u32 mask;
+
+ memset(&state, 0, sizeof(state));
+ ret = bman_query_pools(&state);
+ if (ret) {
+ seq_printf(file, "Error %d\n", ret);
+ return ret;
+ }
+
+ seq_puts(file, "bp_id free_buffers_avail bp_depleted\n");
+ for (i = 0; i < 2; i++) {
+ mask = 0x80000000;
+ for (j = 0; j < 32; j++) {
+ seq_printf(file,
+ " %-2u %-3s %-3s\n",
+ (i * 32) + j,
+ state.as.state.__state[i] & mask ? "no" : "yes",
+ state.ds.state.__state[i] & mask ? "yes" : "no");
+ mask >>= 1;
+ }
+ }
+
+ return 0;
+}
+
+static int query_bp_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, query_bp_state_show, NULL);
+}
+
+static const struct file_operations query_bp_state_fops = {
+ .owner = THIS_MODULE,
+ .open = query_bp_state_open,
+ .read = seq_read,
+ .release = single_release,
+};
+
+static int __init bman_debugfs_init(void)
+{
+ int ret = 0;
+ struct dentry *d;
+
+ dfs_root = debugfs_create_dir("bman", NULL);
+ if (dfs_root == NULL) {
+ pr_err("Cannot create dir\n");
+ return -ENOMEM;
+ }
+
+ d = debugfs_create_file("query_bp_state",
+ S_IRUGO,
+ dfs_root,
+ NULL,
+ &query_bp_state_fops);
+ if (d == NULL) {
+ ret = -ENOMEM;
+ pr_err("Cannot create query_bp_state\n");
+ goto _return;
+ }
+
+ return 0;
+
+_return:
+ debugfs_remove_recursive(dfs_root);
+
+ return ret;
+}
+
+static void __exit bman_debugfs_exit(void)
+{
+ debugfs_remove_recursive(dfs_root);
+}
+
+module_init(bman_debugfs_init);
+module_exit(bman_debugfs_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/linux/drivers/soc/fsl/qbman/bman.c b/linux/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 00000000..35459bc7
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,692 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright (c) 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+/* Last updated for v00.79 of the BG */
+
+struct bman;
+
+/* Register offsets */
+#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04))
+#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04))
+#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
+#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
+#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
+#define REG_FBPR_FPC 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FBPR_BARE 0x0c00
+#define REG_FBPR_BAR 0x0c04
+#define REG_FBPR_AR 0x0c10
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
+
+/* BMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI)
+
+union bman_ecir {
+ u32 ecir_raw;
+ struct {
+ u32 __reserved1:4;
+ u32 portal_num:4;
+ u32 __reserved2:12;
+ u32 numb:4;
+ u32 __reserved3:2;
+ u32 pid:6;
+ } __packed info;
+};
+
+union bman_eadr {
+ u32 eadr_raw;
+ struct {
+ u32 __reserved1:5;
+ u32 memid:3;
+ u32 __reserved2:14;
+ u32 eadr:10;
+ } __packed info;
+};
+
+struct bman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+ BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
+ BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
+ BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
+ BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
+ BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
+};
+#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
+
+struct bman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
+static const struct bman_error_info_mdata error_mdata[] = {
+ BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
+ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
+ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
+};
+#define BMAN_ERR_MDATA_COUNT \
+ (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
+
+/* Add this in Kconfig */
+#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
+
+/**
+ * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
+ * @v: for accessors that write values, this is the 32-bit value
+ *
+ * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
+ * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
+ * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
+ * "write the enable register" rather than "enable the write register"!
+ */
+#define bm_err_isr_status_read(bm) \
+ __bm_err_isr_read(bm, bm_isr_status)
+#define bm_err_isr_status_clear(bm, m) \
+ __bm_err_isr_write(bm, bm_isr_status, m)
+#define bm_err_isr_enable_read(bm) \
+ __bm_err_isr_read(bm, bm_isr_enable)
+#define bm_err_isr_enable_write(bm, v) \
+ __bm_err_isr_write(bm, bm_isr_enable, v)
+#define bm_err_isr_disable_read(bm) \
+ __bm_err_isr_read(bm, bm_isr_disable)
+#define bm_err_isr_disable_write(bm, v) \
+ __bm_err_isr_write(bm, bm_isr_disable, v)
+#define bm_err_isr_inhibit(bm) \
+ __bm_err_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_err_isr_uninhibit(bm) \
+ __bm_err_isr_write(bm, bm_isr_inhibit, 0)
+
+#ifndef __rtems__
+static u16 bman_pool_max;
+#else /* __rtems__ */
+/* FIXME */
+extern u16 bman_ip_rev;
+extern u16 bman_pool_max;
+#endif /* __rtems__ */
+
+/*
+ * TODO: unimplemented registers
+ *
+ * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
+ * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
+ */
+
+/* Encapsulate "struct bman *" as a cast of the register space address. */
+
+static struct bman *bm_create(void *regs)
+{
+ return (struct bman *)regs;
+}
+
+static inline u32 __bm_in(struct bman *bm, u32 offset)
+{
+ return ioread32be((void *)bm + offset);
+}
+static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
+{
+ iowrite32be(val, (void*) bm + offset);
+}
+#define bm_in(reg) __bm_in(bm, REG_##reg)
+#define bm_out(reg, val) __bm_out(bm, REG_##reg, val)
+
+static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
+{
+ return __bm_in(bm, REG_ERR_ISR + (n << 2));
+}
+
+static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
+{
+ __bm_out(bm, REG_ERR_ISR + (n << 2), val);
+}
+
+static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = bm_in(IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+static u32 __generate_thresh(u32 val, int roundup)
+{
+ u32 e = 0; /* co-efficient, exponent */
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ DPA_ASSERT(e < 0x10);
+ return val | (e << 8);
+}
+
+static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
+ u32 hwdet, u32 hwdxt)
+{
+ DPA_ASSERT(pool < bman_pool_max);
+
+ bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
+ bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
+ bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
+ bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
+}
+
+static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
+{
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
+ is_power_of_2(size));
+ /* choke if '[e]ba' has lower-alignment than 'size' */
+ DPA_ASSERT(!(ba & (size - 1)));
+ bm_out(FBPR_BARE, upper_32_bits(ba));
+ bm_out(FBPR_BAR, lower_32_bits(ba));
+ bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
+}
+
+/*****************/
+/* Config driver */
+/*****************/
+
+/* We support only one of these. */
+static struct bman *bm;
+
+/* And this state belongs to 'bm' */
+#ifndef __rtems__
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+ fbpr_a = rmem->base;
+ fbpr_sz = rmem->size;
+
+ WARN_ON(!(fbpr_a && fbpr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+#else /* __rtems__ */
+static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fbpr, 16777216);
+#define fbpr_a ((uintptr_t)&fbpr[0])
+#define fbpr_sz sizeof(fbpr)
+#endif /* __rtems__ */
+
+int bm_pool_set(u32 bpid, const u32 *thresholds)
+{
+ if (!bm)
+ return -ENODEV;
+ bm_set_pool(bm, bpid, thresholds[0], thresholds[1],
+ thresholds[2], thresholds[3]);
+ return 0;
+}
+EXPORT_SYMBOL(bm_pool_set);
+
+static void log_edata_bits(u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ pr_warn("ErrInt, EDATA:\n");
+ i = bit_count/32;
+ if (bit_count%32) {
+ i++;
+ mask = ~(mask << bit_count%32);
+ }
+ j = 16-i;
+ pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ pr_warn(" 0x%08x\n", bm_in(EDATA(j)));
+}
+
+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+{
+ union bman_ecir ecir_val;
+ union bman_eadr eadr_val;
+
+ ecir_val.ecir_raw = bm_in(ECIR);
+ /* Is portal info valid */
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ pr_warn("ErrInt: SWP id %d, numb %d, pid %d\n",
+ ecir_val.info.portal_num, ecir_val.info.numb,
+ ecir_val.info.pid);
+ }
+ if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
+ eadr_val.eadr_raw = bm_in(EADR);
+ pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[eadr_val.info.memid].txt,
+ error_mdata[eadr_val.info.memid].addr_mask
+ & eadr_val.info.eadr);
+ log_edata_bits(error_mdata[eadr_val.info.memid].bits);
+ }
+}
+
+/* BMan interrupt handler */
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+
+ ier_val = bm_err_isr_enable_read(bm);
+ isr_val = bm_err_isr_status_read(bm);
+ ecsr_val = bm_in(ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < BMAN_HWE_COUNT; i++) {
+ if (bman_hwerr_txts[i].mask & isr_mask) {
+ pr_warn("ErrInt: %s\n", bman_hwerr_txts[i].txt);
+ if (bman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(isr_mask, ecsr_val);
+ /* Re-arm error capture registers */
+ bm_out(ECSR, ecsr_val);
+ }
+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
+ pr_devel("Un-enabling error 0x%x\n",
+ bman_hwerr_txts[i].mask);
+ ier_val &= ~bman_hwerr_txts[i].mask;
+ bm_err_isr_enable_write(bm, ier_val);
+ }
+ }
+ }
+ bm_err_isr_status_clear(bm, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+u32 bm_pool_free_buffers(u32 bpid)
+{
+ return bm_in(POOL_CONTENT(bpid));
+}
+EXPORT_SYMBOL(bm_pool_free_buffers);
+
+#ifndef __rtems__
+static ssize_t show_fbpr_fpc(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
+};
+
+static ssize_t show_pool_count(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ u32 data;
+ int i;
+
+ if (kstrtoint(dev_attr->attr.name, 10, &i))
+ return -EINVAL;
+ data = bm_in(POOL_CONTENT(i));
+ return snprintf(buf, PAGE_SIZE, "%d\n", data);
+};
+
+static ssize_t show_err_isr(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
+};
+
+static ssize_t show_sbec(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ int i;
+
+ if (sscanf(dev_attr->attr.name, "sbec_%d", &i) != 1)
+ return -EINVAL;
+ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
+};
+
+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
+static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
+
+/* Didn't use DEVICE_ATTR as 64 of this would be required.
+ * Initialize them when needed. */
+static char *name_attrs_pool_count; /* "xx" + null-terminator */
+static struct device_attribute *dev_attr_buffer_pool_count;
+
+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
+
+static struct attribute *bman_dev_attributes[] = {
+ &dev_attr_fbpr_fpc.attr,
+ &dev_attr_err_isr.attr,
+ NULL
+};
+
+static struct attribute *bman_dev_ecr_attributes[] = {
+ &dev_attr_sbec_0.attr,
+ &dev_attr_sbec_1.attr,
+ NULL
+};
+
+static struct attribute **bman_dev_pool_count_attributes;
+
+/* root level */
+static const struct attribute_group bman_dev_attr_grp = {
+ .name = NULL,
+ .attrs = bman_dev_attributes
+};
+static const struct attribute_group bman_dev_ecr_grp = {
+ .name = "error_capture",
+ .attrs = bman_dev_ecr_attributes
+};
+static struct attribute_group bman_dev_pool_countent_grp = {
+ .name = "pool_count",
+};
+
+static int of_fsl_bman_remove(struct platform_device *ofdev)
+{
+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
+ return 0;
+};
+#endif /* __rtems__ */
+
+static int of_fsl_bman_probe(struct platform_device *ofdev)
+{
+ int ret, err_irq, i;
+ struct device *dev = &ofdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource res;
+ u32 __iomem *regs;
+ u16 id;
+ u8 major, minor;
+
+ if (!of_device_is_available(node))
+ return -ENODEV;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Can't get %s property 'reg'\n", node->full_name);
+ return ret;
+ }
+ regs = devm_ioremap(dev, res.start, res.end - res.start + 1);
+ if (!regs)
+ return -ENXIO;
+
+ bm = bm_create(regs);
+
+ bm_get_version(bm, &id, &major, &minor);
+ dev_info(dev, "Bman ver:%04x,%02x,%02x\n", id, major, minor);
+ if ((major == 1) && (minor == 0))
+ bman_pool_max = 64;
+ else if ((major == 2) && (minor == 0))
+ bman_pool_max = 8;
+ else if ((major == 2) && (minor == 1))
+ bman_pool_max = 64;
+ else
+ dev_warn(dev, "unknown Bman version, default to rev1.0\n");
+#ifdef __rtems__
+ bman_ip_rev = (u16)((major << 8) | minor);
+#endif /* __rtems__ */
+
+
+ bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
+
+ err_irq = of_irq_to_resource(node, 0, NULL);
+ if (err_irq == NO_IRQ) {
+ dev_info(dev, "Can't get %s property 'interrupts'\n",
+ node->full_name);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+ node);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return ret;
+ }
+ /* Disable Buffer Pool State Change */
+ bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
+ /* Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init). */
+ bm_err_isr_status_clear(bm, 0xffffffff);
+ /* Enable Error Interrupts */
+ bm_err_isr_enable_write(bm, 0xffffffff);
+
+#ifndef __rtems__
+ ret = sysfs_create_group(&dev->kobj, &bman_dev_attr_grp);
+ if (ret)
+ goto done;
+ ret = sysfs_create_group(&dev->kobj, &bman_dev_ecr_grp);
+ if (ret)
+ goto del_group_0;
+
+ name_attrs_pool_count = devm_kmalloc(dev,
+ sizeof(char) * bman_pool_max * 3, GFP_KERNEL);
+ if (!name_attrs_pool_count)
+ goto del_group_1;
+
+ dev_attr_buffer_pool_count = devm_kmalloc(dev,
+ sizeof(struct device_attribute) * bman_pool_max, GFP_KERNEL);
+ if (!dev_attr_buffer_pool_count)
+ goto del_group_1;
+
+ bman_dev_pool_count_attributes = devm_kmalloc(dev,
+ sizeof(struct attribute *) * (bman_pool_max + 1), GFP_KERNEL);
+ if (!bman_dev_pool_count_attributes)
+ goto del_group_1;
+
+ for (i = 0; i < bman_pool_max; i++) {
+ ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
+ if (!ret)
+ goto del_group_1;
+ dev_attr_buffer_pool_count[i].attr.name =
+ (name_attrs_pool_count + i * 3);
+ dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
+ dev_attr_buffer_pool_count[i].show = show_pool_count;
+ bman_dev_pool_count_attributes[i] =
+ &dev_attr_buffer_pool_count[i].attr;
+ }
+ bman_dev_pool_count_attributes[bman_pool_max] = NULL;
+
+ bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
+
+ ret = sysfs_create_group(&dev->kobj, &bman_dev_pool_countent_grp);
+ if (ret)
+ goto del_group_1;
+
+ goto done;
+
+del_group_1:
+ sysfs_remove_group(&dev->kobj, &bman_dev_ecr_grp);
+del_group_0:
+ sysfs_remove_group(&dev->kobj, &bman_dev_attr_grp);
+done:
+ if (ret)
+ dev_err(dev, "Cannot create dev attributes ret=%d\n", ret);
+#else /* __rtems__ */
+ (void)i;
+#endif /* __rtems__ */
+
+ return ret;
+};
+
+#ifndef __rtems__
+static const struct of_device_id of_fsl_bman_ids[] = {
+ {
+ .compatible = "fsl,bman",
+ },
+ {}
+};
+
+static struct platform_driver of_fsl_bman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_fsl_bman_ids,
+ },
+ .probe = of_fsl_bman_probe,
+ .remove = of_fsl_bman_remove,
+};
+
+builtin_platform_driver(of_fsl_bman_driver);
+#else /* __rtems__ */
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <rtems.h>
+#include <bsp/fdt.h>
+#include <bsp/qoriq.h>
+
+static struct bm_portal_config bman_configs[NR_CPUS];
+
+u16 bman_ip_rev;
+
+u16 bman_pool_max;
+
+SYSINIT_REFERENCE(irqs);
+
+static void
+bman_sysinit(void)
+{
+ const char *fdt = bsp_fdt_get();
+ struct device_node dn;
+ struct platform_device ofdev = {
+ .dev = {
+ .of_node = &dn,
+ .base = (uintptr_t)&qoriq
+ }
+ };
+ const char *name;
+ int cpu_count = (int)rtems_get_processor_count();
+ int cpu;
+ int ret;
+ int node;
+ int parent;
+
+ qoriq_reset_qman_and_bman();
+ qoriq_clear_ce_portal(&qoriq_bman_portal[0][0],
+ sizeof(qoriq_bman_portal[0]));
+ qoriq_clear_ci_portal(&qoriq_bman_portal[1][0],
+ sizeof(qoriq_bman_portal[1]));
+
+ memset(&dn, 0, sizeof(dn));
+
+ name = "fsl,bman";
+ node = fdt_node_offset_by_compatible(fdt, 0, name);
+ if (node < 0)
+ panic("bman: no bman in FDT");
+
+ dn.full_name = name;
+ dn.offset = node;
+ ret = of_fsl_bman_probe(&ofdev);
+ if (ret != 0)
+ panic("bman: probe failed");
+
+ name = "fsl,bman-portal";
+ node = fdt_node_offset_by_compatible(fdt, 0, name);
+ if (node < 0)
+ panic("bman: no portals in FDT");
+ parent = fdt_parent_offset(fdt, node);
+ if (parent < 0)
+ panic("bman: no parent of portals in FDT");
+ node = fdt_first_subnode(fdt, parent);
+
+ dn.full_name = name;
+ dn.offset = node;
+
+ for (cpu = 0; cpu < cpu_count; ++cpu) {
+ struct bm_portal_config *pcfg = &bman_configs[cpu];
+ struct bman_portal *portal;
+ struct resource res;
+
+ if (node < 0)
+ panic("bman: missing portal in FDT");
+
+ ret = of_address_to_resource(&dn, 0, &res);
+ if (ret != 0)
+ panic("bman: no portal CE address");
+ pcfg->addr_virt[0] = (__iomem void *)
+ ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] >=
+ (uintptr_t)&qoriq_bman_portal[0][0]);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] <
+ (uintptr_t)&qoriq_bman_portal[1][0]);
+
+ ret = of_address_to_resource(&dn, 1, &res);
+ if (ret != 0)
+ panic("bman: no portal CI address");
+ pcfg->addr_virt[1] = (__iomem void *)
+ ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] >=
+ (uintptr_t)&qoriq_bman_portal[1][0]);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
+ (uintptr_t)&qoriq_bman_portal[2][0]);
+
+ pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL);
+ if (pcfg->public_cfg.irq == NO_IRQ)
+ panic("bman: no portal interrupt");
+
+ pcfg->public_cfg.cpu = cpu;
+ bman_depletion_fill(&pcfg->public_cfg.mask);
+
+ portal = bman_create_affine_portal(pcfg);
+ if (portal == NULL)
+ panic("bman: cannot create portal");
+
+ bman_p_irqsource_add(portal, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
+
+ node = fdt_next_subnode(fdt, node);
+ dn.offset = node;
+ }
+
+ bman_seed_bpid_range(0, bman_pool_max);
+}
+SYSINIT(bman_sysinit, SI_SUB_CPU, SI_ORDER_FIRST, bman_sysinit, NULL);
+#endif /* __rtems__ */
diff --git a/linux/drivers/soc/fsl/qbman/bman.h b/linux/drivers/soc/fsl/qbman/bman.h
new file mode 100644
index 00000000..c9879386
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman.h
@@ -0,0 +1,542 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+extern u16 bman_pool_max;
+
+/***************************/
+/* Portal register assists */
+/***************************/
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x0000
+#define BM_REG_RCR_CI_CINH 0x0004
+#define BM_REG_RCR_ITR 0x0008
+#define BM_REG_CFG 0x0100
+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
+#define BM_REG_ISR 0x0e00
+#define BM_REG_IIR 0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent). */
+
+/* Cache-inhibited register access. */
+#define __bm_in(bm, o) __raw_readl((bm)->addr_ci + (o))
+#define __bm_out(bm, o, val) __raw_writel((val), (bm)->addr_ci + (o))
+#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
+#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o))
+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o))
+#define __bm_cl_in(bm, o) __raw_readl((bm)->addr_ce + (o))
+#define __bm_cl_out(bm, o, val) \
+ do { \
+ u32 *__tmpclout = (bm)->addr_ce + (o); \
+ __raw_writel((val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o))
+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
+#define bm_cl_invalidate(reg)\
+ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues. */
+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ struct bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void __iomem *addr_ce; /* cache-enabled */
+ void __iomem *addr_ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+ struct bm_portal_config config;
+} ____cacheline_aligned;
+
+/* --- RCR API --- */
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define RCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void RCR_INC(struct bm_rcr *rcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates
+ * fast code with essentially no branching overheads. We increment to
+ * the next RCR pointer and handle overflow and 'vbit'. */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = RCR_CARRYCLEAR(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ __maybe_unused enum bm_rcr_cmode cmode)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case. */
+ register struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.addr_ce + BM_CL_RCR;
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(RCR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void bm_rcr_finish(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+ DPA_ASSERT(!rcr->busy);
+ if (pi != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("losing uncommited RCR entries\n");
+ if (ci != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 1;
+#endif
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->busy);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
+ struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode != bm_rcr_pvb);
+ if (rcr->available == 1)
+ return NULL;
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcr->cursor);
+ RCR_INC(rcr);
+ rcr->available--;
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode == bm_rcr_pci);
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ hwsync();
+ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->pmode == bm_rcr_pce);
+ bm_cl_invalidate(RCR_PI);
+ bm_cl_touch_rw(RCR_PI);
+}
+
+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode == bm_rcr_pce);
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ lwsync();
+ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ lwsync();
+ rcursor = rcr->cursor;
+ rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcursor);
+ RCR_INC(rcr);
+ rcr->available--;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPA_ASSERT(rcr->cmode == bm_rcr_cci);
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(RCR_CI);
+}
+
+static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(RCR_CI);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->ithresh;
+}
+
+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(RCR_ITR, ithresh);
+}
+
+static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+/* --- Management command API --- */
+
+static inline int bm_mc_init(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.addr_ce + BM_CL_CR;
+ mc->rr = portal->addr.addr_ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ BM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static inline void bm_mc_finish(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+ DPA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+ DPA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+ DPA_ASSERT(mc->state == mc_user);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_idle;
+#endif
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPA_ASSERT(mc->state == mc_user);
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_hw;
+#endif
+}
+
+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPA_ASSERT(mc->state == mc_hw);
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering... */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+/* --- Portal interrupt register API --- */
+
+static inline int bm_isr_init(__always_unused struct bm_portal *portal)
+{
+ return 0;
+}
+
+static inline void bm_isr_finish(__always_unused struct bm_portal *portal)
+{
+}
+
+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
+ int enable)
+{
+ u32 val;
+
+ DPA_ASSERT(bpid < bman_pool_max);
+
+ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
+ val = __bm_in(&portal->addr, SCN_REG(bpid));
+ if (enable)
+ val |= SCN_BIT(bpid);
+ else
+ val &= ~SCN_BIT(bpid);
+ __bm_out(&portal->addr, SCN_REG(bpid), val);
+}
+
+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
+{
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
+}
+
+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
+ u32 val)
+{
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
+}
+
+/* Buffer Pool Cleanup */
+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ struct bm_mc_result *bm_res;
+ int aq_count = 0;
+ bool stop = false;
+
+ while (!stop) {
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(p);
+ bm_cmd->acquire.bpid = bpid;
+ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ while (!(bm_res = bm_mc_result(p)))
+ cpu_relax();
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ /* Pool is empty */
+ /* TBD : Should we do a few extra iterations in
+ case some other some blocks keep buffers 'on deck',
+ which may also be problematic */
+ stop = true;
+ } else
+ ++aq_count;
+ }
+ return 0;
+}
diff --git a/linux/drivers/soc/fsl/qbman/bman_api.c b/linux/drivers/soc/fsl/qbman/bman_api.c
new file mode 100644
index 00000000..cdfcebbd
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_api.c
@@ -0,0 +1,1123 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman.h"
+
+/* Compilation constants */
+#define RCR_THRESH 2 /* reread h/w CI when running out of space */
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+#define FSL_DPA_PORTAL_SHARE 1 /* Allow portals to be shared */
+
+struct bman_portal {
+ struct bm_portal p;
+ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
+ struct bman_depletion *pools;
+ int thresh_set;
+ unsigned long irq_sources;
+ u32 slowpoll; /* only used when interrupts are off */
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+ raw_spinlock_t sharing_lock; /* only used if is_shared */
+#ifndef __rtems__
+ int is_shared;
+ struct bman_portal *sharing_redirect;
+#endif /* __rtems__ */
+#endif
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct bm_portal_config *config;
+ /* 64-entry hash-table of pool objects that are tracking depletion
+ * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so
+ * we're not fussy about cache-misses and so forth - whereas the above
+ * members should all fit in one cacheline.
+ * BTW, with 64 entries in the hash table and 64 buffer pools to track,
+ * you'll never guess the hash-function ... */
+ struct bman_pool *cb[64];
+ char irqname[MAX_IRQNAME];
+ /* Track if the portal was alloced by the driver */
+ u8 alloced;
+};
+
+
+#ifdef FSL_DPA_PORTAL_SHARE
+/* For an explanation of the locking, redirection, or affine-portal logic,
+ * please consult the QMan driver for details. This is the same, only simpler
+ * (no fiddly QMan-specific bits.) */
+#ifndef __rtems__
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+ do { \
+ if ((p)->is_shared) \
+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
+ else \
+ local_irq_save(irqflags); \
+ } while (0)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+ do { \
+ if ((p)->is_shared) \
+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
+ irqflags); \
+ else \
+ local_irq_restore(irqflags); \
+ } while (0)
+#else /* __rtems__ */
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, irqflags)
+#endif /* __rtems__ */
+#else
+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
+#endif
+
+#ifndef __rtems__
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+#endif /* __rtems__ */
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+static inline struct bman_portal *get_raw_affine_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+#ifdef FSL_DPA_PORTAL_SHARE
+static inline struct bman_portal *get_affine_portal(void)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+
+#ifndef __rtems__
+ if (p->sharing_redirect)
+ return p->sharing_redirect;
+#endif /* __rtems__ */
+ return p;
+}
+#else
+#define get_affine_portal() get_raw_affine_portal()
+#endif
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(bman_affine_portal);
+}
+static inline struct bman_portal *get_poll_portal(void)
+{
+ return this_cpu_ptr(&bman_affine_portal);
+}
+#define put_poll_portal()
+
+/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals. */
+struct bman_pool {
+ struct bman_pool_params params;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+ /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */
+ struct bm_buffer *sp;
+ unsigned int sp_fill;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_t in_use;
+#endif
+};
+
+/* (De)Registration of depletion notification callbacks */
+static void depletion_link(struct bman_portal *portal, struct bman_pool *pool)
+{
+ __maybe_unused unsigned long irqflags;
+
+ pool->portal = portal;
+ PORTAL_IRQ_LOCK(portal, irqflags);
+ pool->next = portal->cb[pool->params.bpid];
+ portal->cb[pool->params.bpid] = pool;
+ if (!pool->next)
+ /* First object for that bpid on this portal, enable the BSCN
+ * mask bit. */
+ bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1);
+ PORTAL_IRQ_UNLOCK(portal, irqflags);
+}
+static void depletion_unlink(struct bman_pool *pool)
+{
+ struct bman_pool *it, *last = NULL;
+ struct bman_pool **base = &pool->portal->cb[pool->params.bpid];
+ __maybe_unused unsigned long irqflags;
+
+ PORTAL_IRQ_LOCK(pool->portal, irqflags);
+ it = *base; /* <-- gotcha, don't do this prior to the irq_save */
+ while (it != pool) {
+ last = it;
+ it = it->next;
+ }
+ if (!last)
+ *base = pool->next;
+ else
+ last->next = pool->next;
+ if (!last && !pool->next) {
+ /* Last object for that bpid on this portal, disable the BSCN
+ * mask bit. */
+ bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0);
+ /* And "forget" that we last saw this pool as depleted */
+ bman_depletion_unset(&pool->portal->pools[1],
+ pool->params.bpid);
+ }
+ PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
+}
+
+/* In the case that the application's core loop calls qman_poll() and
+ * bman_poll(), we ought to balance how often we incur the overheads of the
+ * slow-path poll. We'll use two decrementer sources. The idle decrementer
+ * constant is used when the last slow-poll detected no work to do, and the busy
+ * decrementer constant when the last slow-poll had work to do. */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct bman_portal *p, u32 is);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+ struct bman_portal *p = ptr;
+ u32 clear = p->irq_sources;
+ u32 is = bm_isr_status_read(&p->p) & p->irq_sources;
+
+ clear |= __poll_portal_slow(p, is);
+ bm_isr_status_clear(&p->p, clear);
+ return IRQ_HANDLED;
+}
+
+
+struct bman_portal *bman_create_portal(
+ struct bman_portal *portal,
+ const struct bm_portal_config *config)
+{
+ struct bm_portal *__p;
+ const struct bman_depletion *pools = &config->public_cfg.mask;
+ int ret;
+ u8 bpid = 0;
+
+ if (!portal) {
+ portal = kmalloc(sizeof(*portal), GFP_KERNEL);
+ if (!portal)
+ return portal;
+ portal->alloced = 1;
+ } else
+ portal->alloced = 0;
+
+ __p = &portal->p;
+
+ /* prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference... */
+ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+ if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) {
+ pr_err("RCR initialisation failed\n");
+ goto fail_rcr;
+ }
+ if (bm_mc_init(__p)) {
+ pr_err("MC initialisation failed\n");
+ goto fail_mc;
+ }
+ if (bm_isr_init(__p)) {
+ pr_err("ISR initialisation failed\n");
+ goto fail_isr;
+ }
+ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
+ if (!portal->pools)
+ goto fail_pools;
+ portal->pools[0] = *pools;
+ bman_depletion_init(portal->pools + 1);
+ while (bpid < bman_pool_max) {
+ /* Default to all BPIDs disabled, we enable as required at
+ * run-time. */
+ bm_isr_bscn_mask(__p, bpid, 0);
+ bpid++;
+ }
+ portal->slowpoll = 0;
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ portal->rcri_owned = NULL;
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+ raw_spin_lock_init(&portal->sharing_lock);
+#ifndef __rtems__
+ portal->is_shared = config->public_cfg.is_shared;
+ portal->sharing_redirect = NULL;
+#endif /* __rtems__ */
+#endif
+ memset(&portal->cb, 0, sizeof(portal->cb));
+ /* Write-to-clear any stale interrupt status bits */
+ bm_isr_disable_write(__p, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_isr_enable_write(__p, portal->irq_sources);
+ bm_isr_status_clear(__p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
+ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+#ifndef __rtems__
+ if ((config->public_cfg.cpu != -1) &&
+ irq_can_set_affinity(config->public_cfg.irq) &&
+ irq_set_affinity(config->public_cfg.irq,
+ cpumask_of(config->public_cfg.cpu))) {
+ pr_err("irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+#endif /* __rtems__ */
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(__p);
+ if (ret) {
+ pr_err("RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = config;
+
+ bm_isr_disable_write(__p, 0);
+ bm_isr_uninhibit(__p);
+ return portal;
+fail_rcr_empty:
+#ifndef __rtems__
+fail_affinity:
+#endif /* __rtems__ */
+ free_irq(config->public_cfg.irq, portal);
+fail_irq:
+ kfree(portal->pools);
+fail_pools:
+ bm_isr_finish(__p);
+fail_isr:
+ bm_mc_finish(__p);
+fail_mc:
+ bm_rcr_finish(__p);
+fail_rcr:
+ if (portal->alloced)
+ kfree(portal);
+ return NULL;
+}
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config)
+{
+ struct bman_portal *portal;
+
+ portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu);
+ portal = bman_create_portal(portal, config);
+#ifndef __rtems__
+ if (portal) {
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ }
+#endif /* __rtems__ */
+ return portal;
+}
+
+
+#ifndef __rtems__
+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
+ int cpu)
+{
+#ifdef FSL_DPA_PORTAL_SHARE
+ struct bman_portal *p = &per_cpu(bman_affine_portal, cpu);
+
+ BUG_ON(p->config);
+ BUG_ON(p->is_shared);
+ BUG_ON(!redirect->config->public_cfg.is_shared);
+ p->irq_sources = 0;
+ p->sharing_redirect = redirect;
+ put_affine_portal();
+ return p;
+#else
+ BUG();
+ return NULL;
+#endif
+}
+#endif /* __rtems__ */
+
+void bman_destroy_portal(struct bman_portal *bm)
+{
+ const struct bm_portal_config *pcfg = bm->config;
+
+ bm_rcr_cce_update(&bm->p);
+ bm_rcr_cce_update(&bm->p);
+
+ free_irq(pcfg->public_cfg.irq, bm);
+
+ kfree(bm->pools);
+ bm_isr_finish(&bm->p);
+ bm_mc_finish(&bm->p);
+ bm_rcr_finish(&bm->p);
+ bm->config = NULL;
+ if (bm->alloced)
+ kfree(bm);
+}
+
+const struct bm_portal_config *bman_destroy_affine_portal(void)
+{
+ struct bman_portal *bm = get_raw_affine_portal();
+ const struct bm_portal_config *pcfg;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (bm->sharing_redirect) {
+ bm->sharing_redirect = NULL;
+ put_affine_portal();
+ return NULL;
+ }
+ bm->is_shared = 0;
+#endif /* __rtems__ */
+#endif
+ pcfg = bm->config;
+ bman_destroy_portal(bm);
+#ifndef __rtems__
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+#endif /* __rtems__ */
+ put_affine_portal();
+ return pcfg;
+}
+
+/* When release logic waits on available RCR space, we need a global waitqueue
+ * in the case of "affine" use (as the waits wake on different cpus which means
+ * different portals - so we can't wait on any per-portal waitqueue). */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static u32 __poll_portal_slow(struct bman_portal *p, u32 is)
+{
+ struct bman_depletion tmp;
+ u32 ret = is;
+
+ /* There is a gotcha to be aware of. If we do the query before clearing
+ * the status register, we may miss state changes that occur between the
+ * two. If we write to clear the status register before the query, the
+ * cache-enabled query command may overtake the status register write
+ * unless we use a heavyweight sync (which we don't want). Instead, we
+ * write-to-clear the status register then *read it back* before doing
+ * the query, hence the odd while loop with the 'is' accumulation. */
+ if (is & BM_PIRQ_BSCN) {
+ struct bm_mc_result *mcr;
+ __maybe_unused unsigned long irqflags;
+ unsigned int i, j;
+ u32 __is;
+
+ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
+ while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) {
+ is |= __is;
+ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
+ }
+ is &= ~BM_PIRQ_BSCN;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bm_mc_start(&p->p);
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ tmp = mcr->query.ds.state;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ for (i = 0; i < 2; i++) {
+ int idx = i * 32;
+ /* tmp is a mask of currently-depleted pools.
+ * pools[0] is mask of those we care about.
+ * pools[1] is our previous view (we only want to
+ * be told about changes). */
+ tmp.__state[i] &= p->pools[0].__state[i];
+ if (tmp.__state[i] == p->pools[1].__state[i])
+ /* fast-path, nothing to see, move along */
+ continue;
+ for (j = 0; j <= 31; j++, idx++) {
+ struct bman_pool *pool = p->cb[idx];
+ int b4 = bman_depletion_get(&p->pools[1], idx);
+ int af = bman_depletion_get(&tmp, idx);
+
+ if (b4 == af)
+ continue;
+ while (pool) {
+ pool->params.cb(p, pool,
+ pool->params.cb_ctx, af);
+ pool = pool->next;
+ }
+ }
+ }
+ p->pools[1] = tmp;
+ }
+
+ if (is & BM_PIRQ_RCRI) {
+ __maybe_unused unsigned long irqflags;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bm_rcr_cce_update(&p->p);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ /* If waiting for sync, we only cancel the interrupt threshold
+ * when the ring utilisation hits zero. */
+ if (p->rcri_owned) {
+ if (!bm_rcr_get_fill(&p->p)) {
+ p->rcri_owned = NULL;
+ bm_rcr_set_ithresh(&p->p, 0);
+ }
+ } else
+#endif
+ bm_rcr_set_ithresh(&p->p, 0);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ wake_up(&affine_queue);
+ bm_isr_status_clear(&p->p, BM_PIRQ_RCRI);
+ is &= ~BM_PIRQ_RCRI;
+ }
+
+ /* There should be no status register bits left undefined */
+ DPA_ASSERT(!is);
+ return ret;
+}
+
+const struct bman_portal_config *bman_get_portal_config(void)
+{
+ struct bman_portal *p = get_affine_portal();
+ const struct bman_portal_config *ret = &p->config->public_cfg;
+
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_get_portal_config);
+
+u32 bman_irqsource_get(void)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+ u32 ret = p->irq_sources & BM_PIRQ_VISIBLE;
+
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_irqsource_get);
+
+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits)
+{
+ __maybe_unused unsigned long irqflags;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (p->sharing_redirect)
+ return -EINVAL;
+#endif /* __rtems__ */
+#endif
+ PORTAL_IRQ_LOCK(p, irqflags);
+ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+ bm_isr_enable_write(&p->p, p->irq_sources);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ return 0;
+}
+EXPORT_SYMBOL(bman_p_irqsource_add);
+
+int bman_irqsource_add(__maybe_unused u32 bits)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+ int ret = bman_p_irqsource_add(p, bits);
+
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_irqsource_add);
+
+int bman_irqsource_remove(u32 bits)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+ __maybe_unused unsigned long irqflags;
+ u32 ier;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (p->sharing_redirect) {
+ put_affine_portal();
+ return -EINVAL;
+ }
+#endif /* __rtems__ */
+#endif
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert. */
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bits &= BM_PIRQ_VISIBLE;
+ clear_bits(bits, &p->irq_sources);
+ bm_isr_enable_write(&p->p, p->irq_sources);
+ ier = bm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering. */
+ bm_isr_status_clear(&p->p, ~ier);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_irqsource_remove);
+
+#ifndef __rtems__
+const cpumask_t *bman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(bman_affine_cpus);
+#endif /* __rtems__ */
+
+u32 bman_poll_slow(void)
+{
+ struct bman_portal *p = get_poll_portal();
+ u32 ret;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (unlikely(p->sharing_redirect))
+ ret = (u32)-1;
+ else
+#endif /* __rtems__ */
+#endif
+ {
+ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
+
+ ret = __poll_portal_slow(p, is);
+ bm_isr_status_clear(&p->p, ret);
+ }
+ put_poll_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_poll_slow);
+
+/* Legacy wrapper */
+void bman_poll(void)
+{
+ struct bman_portal *p = get_poll_portal();
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (unlikely(p->sharing_redirect))
+ goto done;
+#endif /* __rtems__ */
+#endif
+ if (!(p->slowpoll--)) {
+ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
+ u32 active = __poll_portal_slow(p, is);
+
+ if (active)
+ p->slowpoll = SLOW_POLL_BUSY;
+ else
+ p->slowpoll = SLOW_POLL_IDLE;
+ }
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+done:
+#endif /* __rtems__ */
+#endif
+ put_poll_portal();
+}
+EXPORT_SYMBOL(bman_poll);
+
+static const u32 zero_thresholds[4] = {0, 0, 0, 0};
+
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
+ if (bman_alloc_bpid(&bpid))
+ return NULL;
+ } else {
+ if (params->bpid >= bman_pool_max)
+ return NULL;
+ bpid = params->bpid;
+ }
+#ifdef CONFIG_FSL_BMAN
+ if (params->flags & BMAN_POOL_FLAG_THRESH) {
+ if (bm_pool_set(bpid, params->thresholds))
+ goto err;
+ }
+#else
+ if (params->flags & BMAN_POOL_FLAG_THRESH)
+ goto err;
+#endif
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+ pool->sp = NULL;
+ pool->sp_fill = 0;
+ pool->params = *params;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_set(&pool->in_use, 1);
+#endif
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ pool->params.bpid = bpid;
+ if (params->flags & BMAN_POOL_FLAG_STOCKPILE) {
+ pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ,
+ GFP_KERNEL);
+ if (!pool->sp)
+ goto err;
+ }
+ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) {
+ struct bman_portal *p = get_affine_portal();
+
+ if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) {
+ pr_err("Depletion events disabled for bpid %d\n", bpid);
+ goto err;
+ }
+ depletion_link(p, pool);
+ put_affine_portal();
+ }
+ return pool;
+err:
+#ifdef CONFIG_FSL_BMAN
+ if (params->flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(bpid, zero_thresholds);
+#endif
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(bpid);
+ if (pool) {
+ kfree(pool->sp);
+ kfree(pool);
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+#ifdef CONFIG_FSL_BMAN
+ if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(pool->params.bpid, zero_thresholds);
+#endif
+ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION)
+ depletion_unlink(pool);
+ if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) {
+ if (pool->sp_fill)
+ pr_err("Stockpile not flushed, has %u in bpid %u.\n",
+ pool->sp_fill, pool->params.bpid);
+ kfree(pool->sp);
+ pool->sp = NULL;
+ pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE;
+ }
+ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(pool->params.bpid);
+ kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
+{
+ return &pool->params;
+}
+EXPORT_SYMBOL(bman_get_params);
+
+static noinline void update_rcr_ci(struct bman_portal *p, u8 avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+int bman_rcr_is_empty(void)
+{
+ __maybe_unused unsigned long irqflags;
+ struct bman_portal *p = get_affine_portal();
+ u8 avail;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ update_rcr_ci(p, 0);
+ avail = bm_rcr_get_fill(&p->p);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return avail == 0;
+}
+EXPORT_SYMBOL(bman_rcr_is_empty);
+
+static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p,
+#ifdef FSL_DPA_CAN_WAIT
+ __maybe_unused struct bman_pool *pool,
+#endif
+ __maybe_unused unsigned long *irqflags,
+ __maybe_unused u32 flags)
+{
+ struct bm_rcr_entry *r;
+ u8 avail;
+
+ *p = get_affine_portal();
+ PORTAL_IRQ_LOCK(*p, (*irqflags));
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+ if ((*p)->rcri_owned) {
+ PORTAL_IRQ_UNLOCK(*p, (*irqflags));
+ put_affine_portal();
+ return NULL;
+ }
+ (*p)->rcri_owned = pool;
+ }
+#endif
+ avail = bm_rcr_get_avail(&(*p)->p);
+ if (avail < 2)
+ update_rcr_ci(*p, avail);
+ r = bm_rcr_start(&(*p)->p);
+ if (unlikely(!r)) {
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC)))
+ (*p)->rcri_owned = NULL;
+#endif
+ PORTAL_IRQ_UNLOCK(*p, (*irqflags));
+ put_affine_portal();
+ }
+ return r;
+}
+
+#ifdef FSL_DPA_CAN_WAIT
+static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p,
+ struct bman_pool *pool,
+ __maybe_unused unsigned long *irqflags,
+ u32 flags)
+{
+ struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags);
+
+ if (!rcr)
+ bm_rcr_set_ithresh(&(*p)->p, 1);
+ return rcr;
+}
+
+static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p,
+ struct bman_pool *pool,
+ __maybe_unused unsigned long *irqflags,
+ u32 flags)
+{
+ struct bm_rcr_entry *rcr;
+#ifndef FSL_DPA_CAN_WAIT_SYNC
+ pool = NULL;
+#endif
+#ifndef __rtems__
+ if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (rcr = __wait_rel_start(p, pool, irqflags, flags)));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue,
+ (rcr = __wait_rel_start(p, pool, irqflags, flags)));
+ return rcr;
+}
+#endif
+
+/* to facilitate better copying of bufs into the ring without either (a) copying
+ * noise into the first byte (prematurely triggering the command), nor (b) being
+ * very inefficient by copying small fields using read-modify-write */
+struct overlay_bm_buffer {
+ u32 first;
+ u32 second;
+};
+
+static inline int __bman_release(struct bman_pool *pool,
+ const struct bm_buffer *bufs, u8 num, u32 flags)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ struct overlay_bm_buffer *o_dest;
+ struct overlay_bm_buffer *o_src = (struct overlay_bm_buffer *)&bufs[0];
+ __maybe_unused unsigned long irqflags;
+ u32 i = num - 1;
+
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & BMAN_RELEASE_FLAG_WAIT)
+ r = wait_rel_start(&p, pool, &irqflags, flags);
+ else
+ r = try_rel_start(&p, pool, &irqflags, flags);
+#else
+ r = try_rel_start(&p, &irqflags, flags);
+#endif
+ if (!r)
+ return -EBUSY;
+ /* We can copy all but the first entry, as this can trigger badness
+ * with the valid-bit. Use the overlay to mask the verb byte. */
+ o_dest = (struct overlay_bm_buffer *)&r->bufs[0];
+ o_dest->first = (o_src->first & 0x0000ffff) |
+ (((u32)pool->params.bpid << 16) & 0x00ff0000);
+ o_dest->second = o_src->second;
+ if (i)
+ copy_words(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ /* if we wish to sync we need to set the threshold after h/w sees the
+ * new ring entry. As we're mixing cache-enabled and cache-inhibited
+ * accesses, this requires a heavy-weight sync. */
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+ hwsync();
+ bm_rcr_set_ithresh(&p->p, 1);
+ }
+#endif
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+ if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->rcri_owned != pool));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue, (p->rcri_owned != pool));
+ }
+#endif
+ return 0;
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags)
+{
+ int ret = 0;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
+ return -EINVAL;
+#endif
+ /* Without stockpile, this API is a pass-through to the h/w operation */
+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+ return __bman_release(pool, bufs, num, flags);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!atomic_dec_and_test(&pool->in_use)) {
+ pr_crit("Parallel attempts to enter bman_released() detected.");
+ panic("only one instance of bman_released/acquired allowed");
+ }
+#endif
+ /* This needs some explanation. Adding the given buffers may take the
+ * stockpile over the threshold, but in fact the stockpile may already
+ * *be* over the threshold if a previous release-to-hw attempt had
+ * failed. So we have 3 cases to cover;
+ * 1. we add to the stockpile and don't hit the threshold,
+ * 2. we add to the stockpile, hit the threshold and release-to-hw,
+ * 3. we have to release-to-hw before adding to the stockpile
+ * (not enough room in the stockpile for case 2).
+ * Our constraints on thresholds guarantee that in case 3, there must be
+ * at least 8 bufs already in the stockpile, so all release-to-hw ops
+ * are for 8 bufs. Despite all this, the API must indicate whether the
+ * given buffers were taken off the caller's hands, irrespective of
+ * whether a release-to-hw was attempted. */
+ while (num) {
+ /* Add buffers to stockpile if they fit */
+ if ((pool->sp_fill + num) < BMAN_STOCKPILE_SZ) {
+ copy_words(pool->sp + pool->sp_fill, bufs,
+ sizeof(struct bm_buffer) * num);
+ pool->sp_fill += num;
+ num = 0; /* --> will return success no matter what */
+ }
+ /* Do hw op if hitting the high-water threshold */
+ if ((pool->sp_fill + num) >= BMAN_STOCKPILE_HIGH) {
+ ret = __bman_release(pool,
+ pool->sp + (pool->sp_fill - 8), 8, flags);
+ if (ret) {
+ ret = (num ? ret : 0);
+ goto release_done;
+ }
+ pool->sp_fill -= 8;
+ }
+ }
+release_done:
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_inc(&pool->in_use);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL(bman_release);
+
+static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs,
+ u8 num)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ struct bm_mc_result *mcr;
+ __maybe_unused unsigned long irqflags;
+ int ret;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = bm_mc_start(&p->p);
+ mcc->acquire.bpid = pool->params.bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs)
+ copy_words(&bufs[0], &mcr->acquire.bufs[0],
+ num * sizeof(bufs[0]));
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags)
+{
+ int ret = 0;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
+ return -EINVAL;
+#endif
+ /* Without stockpile, this API is a pass-through to the h/w operation */
+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+ return __bman_acquire(pool, bufs, num);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!atomic_dec_and_test(&pool->in_use)) {
+ pr_crit("Parallel attempts to enter bman_acquire() detected.");
+ panic("only one instance of bman_released/acquired allowed");
+ }
+#endif
+ /* Only need a h/w op if we'll hit the low-water thresh */
+ if (!(flags & BMAN_ACQUIRE_FLAG_STOCKPILE) &&
+ (pool->sp_fill <= (BMAN_STOCKPILE_LOW + num))) {
+ /* refill stockpile with max amount, but if max amount
+ * isn't available, try amount the user wants */
+ int bufcount = 8;
+
+ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, bufcount);
+ if (ret < 0 && bufcount != num) {
+ bufcount = num;
+ /* Maybe buffer pool has less than 8 */
+ ret = __bman_acquire(pool, pool->sp + pool->sp_fill,
+ bufcount);
+ }
+ if (ret < 0)
+ goto hw_starved;
+ DPA_ASSERT(ret == bufcount);
+ pool->sp_fill += bufcount;
+ } else {
+hw_starved:
+ if (pool->sp_fill < num) {
+ ret = -ENOMEM;
+ goto acquire_done;
+ }
+ }
+ copy_words(bufs, pool->sp + (pool->sp_fill - num),
+ sizeof(struct bm_buffer) * num);
+ pool->sp_fill -= num;
+ ret = num;
+acquire_done:
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_inc(&pool->in_use);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+int bman_flush_stockpile(struct bman_pool *pool, u32 flags)
+{
+ u8 num;
+ int ret;
+
+ while (pool->sp_fill) {
+ num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill);
+ ret = __bman_release(pool, pool->sp + (pool->sp_fill - num),
+ num, flags);
+ if (ret)
+ return ret;
+ pool->sp_fill -= num;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(bman_flush_stockpile);
+
+int bman_query_pools(struct bm_pool_state *state)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_result *mcr;
+ __maybe_unused unsigned long irqflags;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bm_mc_start(&p->p);
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY);
+ *state = mcr->query;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_query_pools);
+
+#ifdef CONFIG_FSL_BMAN
+u32 bman_query_free_buffers(struct bman_pool *pool)
+{
+ return bm_pool_free_buffers(pool->params.bpid);
+}
+EXPORT_SYMBOL(bman_query_free_buffers);
+
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
+{
+ u32 bpid;
+
+ bpid = bman_get_params(pool)->bpid;
+
+ return bm_pool_set(bpid, thresholds);
+}
+EXPORT_SYMBOL(bman_update_pool_thresholds);
+#endif
+
+int bman_shutdown_pool(u32 bpid)
+{
+ struct bman_portal *p = get_affine_portal();
+ __maybe_unused unsigned long irqflags;
+ int ret;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ ret = bm_shutdown_pool(&p->p, bpid);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_shutdown_pool);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+#ifndef __rtems__
+ return portal->sharing_redirect ? NULL : portal->config;
+#else /* __rtems__ */
+ return portal->config;
+#endif /* __rtems__ */
+}
diff --git a/linux/drivers/soc/fsl/qbman/bman_portal.c b/linux/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 00000000..f9fd022c
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,399 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+/*
+ * Global variables of the max portal/pool number this BMan version supported
+ */
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+u16 bman_pool_max;
+EXPORT_SYMBOL(bman_pool_max);
+
+/* After initialising cpus that own shared portal configs, we cache the
+ * resulting portals (ie. not just the configs) in this array. Then we
+ * initialise slave cpus that don't have their own portals, redirecting them to
+ * portals from this cache in a round-robin assignment. */
+static struct bman_portal *shared_portals[NR_CPUS] __initdata;
+static int num_shared_portals __initdata;
+static int shared_portals_idx __initdata;
+
+static LIST_HEAD(unused_pcfgs);
+static void *affine_bportals[NR_CPUS];
+
+#ifndef __rtems__
+static const int flags[] = {0, _PAGE_GUARDED | _PAGE_NO_CACHE};
+#else /* __rtems__ */
+static const int flags[] = {0, 0};
+#endif /* __rtems__ */
+
+static struct bm_portal_config * __init get_pcfg(struct list_head *list)
+{
+ struct bm_portal_config *pcfg;
+
+ if (list_empty(list))
+ return NULL;
+ pcfg = list_entry(list->prev, struct bm_portal_config, list);
+ list_del(&pcfg->list);
+
+ return pcfg;
+}
+
+static struct bman_portal * __init init_pcfg(struct bm_portal_config *pcfg)
+{
+ struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+ if (p) {
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+ bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
+#endif
+ pr_info("Portal %sinitialised, cpu %d\n",
+ pcfg->public_cfg.is_shared ? "(shared) " : "",
+ pcfg->public_cfg.cpu);
+ affine_bportals[pcfg->public_cfg.cpu] = p;
+ } else
+ pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu);
+
+ return p;
+}
+
+static void __init init_slave(int cpu)
+{
+ struct bman_portal *p;
+
+ p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
+ if (!p)
+ pr_err("Slave portal failure on cpu %d\n", cpu);
+ else
+ pr_info("Portal %s initialised, cpu %d\n", "(slave) ", cpu);
+ if (shared_portals_idx >= num_shared_portals)
+ shared_portals_idx = 0;
+ affine_bportals[cpu] = p;
+}
+
+/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
+ * parsing is in dpaa_sys.h. The syntax is a comma-separated list of indexes
+ * and/or ranges of indexes, with each being optionally prefixed by "s" to
+ * explicitly mark it or them for sharing.
+ * Eg;
+ * bportals=s0,1-3,s4
+ * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
+ * portals, and any remaining cpus share the portals that are assigned to cpus 0
+ * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
+ * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
+ * 0's portal.) */
+static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
+static struct cpumask want_shared __initdata; /* cpus requested with "s" */
+
+static int __init parse_bportals(char *str)
+{
+ return parse_portals_bootarg(str, &want_shared, &want_unshared,
+ "bportals");
+}
+__setup("bportals=", parse_bportals);
+
+static void __cold bman_offline_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (p) {
+ pcfg = bman_get_bm_portal_config(p);
+ if (pcfg)
+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void __cold bman_online_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (p) {
+ pcfg = bman_get_bm_portal_config(p);
+ if (pcfg)
+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
+ }
+}
+
+static int __cold bman_hotplug_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ bman_online_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ bman_offline_cpu(cpu);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bman_hotplug_cpu_notifier = {
+ .notifier_call = bman_hotplug_cpu_callback,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static int __cold bman_portal_probe(struct platform_device *of_dev)
+{
+ struct device *dev = &of_dev->dev;
+ struct device_node *node = dev->of_node;
+ struct bm_portal_config *pcfg;
+ int i, irq, ret;
+
+ if (!of_device_is_available(node))
+ return -ENODEV;
+
+ if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
+ of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
+ of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
+ bman_ip_rev = BMAN_REV20;
+ bman_pool_max = 8;
+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0") ||
+ of_device_is_compatible(node, "fsl,bman-portal-2.1.1") ||
+ of_device_is_compatible(node, "fsl,bman-portal-2.1.2") ||
+ of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
+ bman_ip_rev = BMAN_REV21;
+ bman_pool_max = 64;
+ }
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg) {
+ dev_err(dev, "Can't allocate portal config\n");
+ return -ENOMEM;
+ }
+
+ for (i = DPA_PORTAL_CE; i <= DPA_PORTAL_CI; i++) {
+ ret = of_address_to_resource(node, i, pcfg->addr_phys + i);
+ if (ret < 0) {
+ dev_err(dev, "Can't get %s property 'reg::%d'\n",
+ node->full_name, i);
+ return ret;
+ }
+ ret = devm_request_resource(dev, &iomem_resource,
+ pcfg->addr_phys + i);
+ if (ret < 0)
+ return ret;
+ pcfg->addr_virt[i] = devm_ioremap_prot(dev,
+ pcfg->addr_phys[i].start,
+ resource_size(pcfg->addr_phys + i),
+ flags[i]);
+ if (!pcfg->addr_virt[i])
+ return -ENXIO;
+ }
+
+ pcfg->public_cfg.cpu = -1;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq == NO_IRQ) {
+ dev_err(dev, "Can't get %s property 'interrupts'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ pcfg->public_cfg.irq = irq;
+
+ bman_depletion_fill(&pcfg->public_cfg.mask);
+
+ list_add_tail(&pcfg->list, &unused_pcfgs);
+
+ return 0;
+};
+
+static int __cold bman_portal_remove(struct platform_device *of_dev)
+{
+ return 0;
+};
+
+static const struct of_device_id bman_portal_ids[] = {
+ {
+ .compatible = "fsl,bman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bman_portal_ids,
+ },
+ .probe = bman_portal_probe,
+ .remove = bman_portal_remove,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+ int _errno;
+ struct cpumask slave_cpus;
+ struct cpumask unshared_cpus = *cpu_none_mask;
+ struct cpumask shared_cpus = *cpu_none_mask;
+ LIST_HEAD(unshared_pcfgs);
+ LIST_HEAD(shared_pcfgs);
+ struct bm_portal_config *pcfg;
+ struct bman_portal *p;
+ int cpu;
+ struct cpumask offline_cpus;
+
+ _errno = platform_driver_register(drv);
+ if (_errno < 0)
+ return _errno;
+
+/* Initialise the BMan driver. The meat of this function deals with portals. The
+ * following describes the flow of portal-handling, the code "steps" refer to
+ * this description;
+ * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
+ * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
+ * bound).
+ * 2. The "want_shared" and "want_unshared" lists (as filled by the
+ * "bportals=[...]" bootarg) are processed, allocating portals and assigning
+ * them to cpus, placing them in the relevant list and setting ::cpu as
+ * appropriate. If no "bportals" bootarg was present, the defaut is to try to
+ * assign portals to all online cpus at the time of driver initialisation.
+ * Any failure to allocate portals (when parsing the "want" lists or when
+ * using default behaviour) will be silently tolerated (the "fixup" logic in
+ * step 3 will determine what happens in this case).
+ * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
+ * sharing and sharing is required (because not all cpus have been assigned
+ * portals), then one portal will marked for sharing. Conversely if no
+ * sharing is required, any portals marked for sharing will not be shared. It
+ * may be that sharing occurs when it wasn't expected, if portal allocation
+ * failed to honour all the requested assignments (including the default
+ * assignments if no bootarg is present).
+ * 4. Unshared portals are initialised on their respective cpus.
+ * 5. Shared portals are initialised on their respective cpus.
+ * 6. Each remaining cpu is initialised to slave to one of the shared portals,
+ * which are selected in a round-robin fashion.
+ */
+ /* Step 2. */
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, &want_shared)) {
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &shared_pcfgs);
+ cpumask_set_cpu(cpu, &shared_cpus);
+ }
+ if (cpumask_test_cpu(cpu, &want_unshared)) {
+ if (cpumask_test_cpu(cpu, &shared_cpus))
+ continue;
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &unshared_pcfgs);
+ cpumask_set_cpu(cpu, &unshared_cpus);
+ }
+ }
+ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
+ /* Default, give an unshared portal to each online cpu */
+ for_each_possible_cpu(cpu) {
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &unshared_pcfgs);
+ cpumask_set_cpu(cpu, &unshared_cpus);
+ }
+ }
+ /* Step 3. */
+ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
+ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
+ if (cpumask_empty(&slave_cpus)) {
+ /* No sharing required */
+ if (!list_empty(&shared_pcfgs)) {
+ /* Migrate "shared" to "unshared" */
+ cpumask_or(&unshared_cpus, &unshared_cpus,
+ &shared_cpus);
+ cpumask_clear(&shared_cpus);
+ list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
+ INIT_LIST_HEAD(&shared_pcfgs);
+ }
+ } else {
+ /* Sharing required */
+ if (list_empty(&shared_pcfgs)) {
+ /* Migrate one "unshared" to "shared" */
+ pcfg = get_pcfg(&unshared_pcfgs);
+ if (!pcfg) {
+ pr_crit("No portals available!\n");
+ return 0;
+ }
+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
+ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
+ list_add_tail(&pcfg->list, &shared_pcfgs);
+ }
+ }
+ /* Step 4. */
+ list_for_each_entry(pcfg, &unshared_pcfgs, list) {
+ pcfg->public_cfg.is_shared = 0;
+ p = init_pcfg(pcfg);
+ }
+ /* Step 5. */
+ list_for_each_entry(pcfg, &shared_pcfgs, list) {
+ pcfg->public_cfg.is_shared = 1;
+ p = init_pcfg(pcfg);
+ if (p)
+ shared_portals[num_shared_portals++] = p;
+ }
+ /* Step 6. */
+ if (!cpumask_empty(&slave_cpus))
+ for_each_cpu(cpu, &slave_cpus)
+ init_slave(cpu);
+ pr_info("Portals initialised\n");
+ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
+ for_each_cpu(cpu, &offline_cpus)
+ bman_offline_cpu(cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
+#endif
+
+ bman_seed_bpid_range(0, bman_pool_max);
+
+ return 0;
+}
+
+module_driver(bman_portal_driver,
+ bman_portal_driver_register, platform_driver_unregister);
diff --git a/linux/drivers/soc/fsl/qbman/bman_priv.h b/linux/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 00000000..e87f17a3
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,136 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* used by CCSR and portal interrupt code */
+enum bm_isr_reg {
+ bm_isr_status = 0,
+ bm_isr_enable = 1,
+ bm_isr_disable = 2,
+ bm_isr_inhibit = 3
+};
+
+#ifdef CONFIG_FSL_BMAN
+/* Set depletion thresholds associated with a buffer pool. Requires that the
+ * operating system have access to BMan CCSR (ie. compiled in support and
+ * run-time access courtesy of the device-tree). */
+int bm_pool_set(u32 bpid, const u32 *thresholds);
+#define BM_POOL_THRESH_SW_ENTER 0
+#define BM_POOL_THRESH_SW_EXIT 1
+#define BM_POOL_THRESH_HW_ENTER 2
+#define BM_POOL_THRESH_HW_EXIT 3
+
+/* Read the free buffer count for a given buffer */
+u32 bm_pool_free_buffers(u32 bpid);
+
+#endif /* CONFIG_FSL_BMAN */
+
+#if defined(CONFIG_FSL_BMAN_PORTAL) || defined(CONFIG_FSL_BMAN_PORTAL_MODULE)
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
+
+struct bm_portal_config {
+ /* Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited. */
+ __iomem void *addr_virt[2];
+#ifndef __rtems__
+ struct resource addr_phys[2];
+ /* Allow these to be joined in lists */
+ struct list_head list;
+#endif /* __rtems__ */
+ /* User-visible portal configuration settings */
+ struct bman_portal_config public_cfg;
+};
+
+/* Hooks from bman_driver.c in to bman_high.c */
+struct bman_portal *bman_create_portal(
+ struct bman_portal *portal,
+ const struct bm_portal_config *config);
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
+ int cpu);
+void bman_destroy_portal(struct bman_portal *bm);
+
+const struct bm_portal_config *bman_destroy_affine_portal(void);
+
+/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
+ * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
+ * might fail (if the buffer pool is depleted). So this value provides some
+ * "stagger" in that the bman_acquire() function will only fail if lots of bufs
+ * are requested at once or if h/w has been tested a couple of times without
+ * luck. The _HIGH value: when bman_release() is called and the stockpile
+ * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
+ * the release ring is full). So this value provides some "stagger" so that
+ * ring-access is retried a couple of times prior to the API returning a
+ * failure. The following *must* be true;
+ * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
+ * (to avoid thrashing)
+ * BMAN_STOCKPILE_SZ >= 16
+ * (as the release logic expects to either send 8 buffers to hw prior to
+ * adding the given buffers to the stockpile or add the buffers to the
+ * stockpile before sending 8 to hw, as the API must be an all-or-nothing
+ * success/fail.)
+ */
+#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */
+#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */
+#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
+
+/*************************************************/
+/* BMan s/w corenet portal, low-level i/face */
+/*************************************************/
+
+/* Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
+
+/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write". */
+#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
+#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
+#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
+#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
+#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
+#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
+#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
+#endif /* CONFIG_FSL_BMAN_PORTAL* */
diff --git a/linux/drivers/soc/fsl/qbman/bman_test.c b/linux/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 00000000..154b7374
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,60 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+ int loop = 1;
+
+ while (loop--)
+ bman_test_api();
+#endif
+#ifdef CONFIG_FSL_BMAN_TEST_THRESH
+ bman_test_thresh();
+#endif
+ return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/linux/drivers/soc/fsl/qbman/bman_test.h b/linux/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 00000000..9c51c38b
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,34 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+void bman_test_api(void);
+void bman_test_thresh(void);
diff --git a/linux/drivers/soc/fsl/qbman/bman_test_api.c b/linux/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 00000000..5585bdf6
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,188 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+/*************/
+/* constants */
+/*************/
+
+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
+#define POOL_OPAQUE ((void *)0xdeadabba)
+#define NUM_BUFS 93
+#define LOOPS 3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+/***************/
+/* global vars */
+/***************/
+
+static struct bman_pool *pool;
+static int depleted;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+/* Predeclare the callback so we can instantiate pool parameters */
+static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int);
+
+/**********************/
+/* internal functions */
+/**********************/
+
+static void bufs_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BUFS; i++)
+ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+ bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+ if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) {
+
+ /* On SoCs with BMan revison 2.0, BMan only respects the 40
+ * LS-bits of buffer addresses, masking off the upper 8-bits on
+ * release commands. The API provides for 48-bit addresses
+ * because some SoCs support all 48-bits. When generating
+ * garbage addresses for testing, we either need to zero the
+ * upper 8-bits when releasing to BMan (otherwise we'll be
+ * disappointed when the buffers we acquire back from BMan
+ * don't match), or we need to mask the upper 8-bits off when
+ * comparing. We do the latter.
+ */
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
+ < (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return -1;
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
+ > (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return 1;
+ } else {
+ if (bm_buffer_get64(a) < bm_buffer_get64(b))
+ return -1;
+ if (bm_buffer_get64(a) > bm_buffer_get64(b))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bufs_confirm(void)
+{
+ int i, j;
+
+ for (i = 0; i < NUM_BUFS; i++) {
+ int matches = 0;
+
+ for (j = 0; j < NUM_BUFS; j++)
+ if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+ matches++;
+ BUG_ON(matches != 1);
+ }
+}
+
+/********/
+/* test */
+/********/
+
+static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool,
+ void *pool_ctx, int __depleted)
+{
+ BUG_ON(__pool != pool);
+ BUG_ON(pool_ctx != POOL_OPAQUE);
+ depleted = __depleted;
+}
+
+void bman_test_api(void)
+{
+ struct bman_pool_params pparams = {
+ .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID,
+ .cb = depletion_cb,
+ .cb_ctx = POOL_OPAQUE,
+ };
+ int i, loops = LOOPS;
+
+ bufs_init();
+
+ pr_info(" --- Starting high-level test ---\n");
+
+ pool = bman_new_pool(&pparams);
+ BUG_ON(!pool);
+
+ /*******************/
+ /* Release buffers */
+ /*******************/
+do_loop:
+ i = 0;
+ while (i < NUM_BUFS) {
+ u32 flags = BMAN_RELEASE_FLAG_WAIT;
+ int num = 8;
+
+ if ((i + num) > NUM_BUFS)
+ num = NUM_BUFS - i;
+ if ((i + num) == NUM_BUFS)
+ flags |= BMAN_RELEASE_FLAG_WAIT_SYNC;
+ if (bman_release(pool, bufs_in + i, num, flags))
+ panic("bman_release() failed\n");
+ i += num;
+ }
+
+ /*******************/
+ /* Acquire buffers */
+ /*******************/
+ while (i > 0) {
+ int tmp, num = 8;
+
+ if (num > i)
+ num = i;
+ tmp = bman_acquire(pool, bufs_out + i - num, num, 0);
+ BUG_ON(tmp != num);
+ i -= num;
+ }
+ i = bman_acquire(pool, NULL, 1, 0);
+ BUG_ON(i > 0);
+
+ bufs_confirm();
+
+ if (--loops)
+ goto do_loop;
+
+ /************/
+ /* Clean up */
+ /************/
+ bman_free_pool(pool);
+ pr_info(" --- Finished high-level test ---\n");
+}
diff --git a/linux/drivers/soc/fsl/qbman/bman_test_thresh.c b/linux/drivers/soc/fsl/qbman/bman_test_thresh.c
new file mode 100644
index 00000000..c0f045be
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_test_thresh.c
@@ -0,0 +1,216 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2010 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+/* Test constants */
+#define TEST_NUMBUFS 129728
+#define TEST_EXIT 129536
+#define TEST_ENTRY 129024
+
+struct affine_test_data {
+ struct task_struct *t;
+ int cpu;
+#ifndef __rtems__
+ int expect_affinity;
+#endif /* __rtems__ */
+ int drain;
+ int num_enter;
+ int num_exit;
+ struct list_head node;
+ struct completion wakethread;
+ struct completion wakeparent;
+};
+
+static void cb_depletion(struct bman_portal *portal,
+ struct bman_pool *pool,
+ void *opaque,
+ int depleted)
+{
+ struct affine_test_data *data = opaque;
+ int c = smp_processor_id();
+
+ pr_info("%s(): bpid=%d, depleted=%d, cpu=%d, original=%d\n", __func__,
+ bman_get_params(pool)->bpid, !!depleted, c, data->cpu);
+ /* We should be executing on the CPU of the thread that owns the pool if
+ * and that CPU has an affine portal (ie. it isn't slaved). */
+#ifndef __rtems__
+ BUG_ON((c != data->cpu) && data->expect_affinity);
+ BUG_ON((c == data->cpu) && !data->expect_affinity);
+#endif /* __rtems__ */
+ if (depleted)
+ data->num_enter++;
+ else
+ data->num_exit++;
+}
+
+/* Params used to set up a pool, this also dynamically allocates a BPID */
+static const struct bman_pool_params params_nocb = {
+ .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH,
+ .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 }
+};
+
+/* Params used to set up each cpu's pool with callbacks enabled */
+static struct bman_pool_params params_cb = {
+ .bpid = 0, /* will be replaced to match pool_nocb */
+ .flags = BMAN_POOL_FLAG_DEPLETION,
+ .cb = cb_depletion
+};
+
+static struct bman_pool *pool_nocb;
+static LIST_HEAD(threads);
+
+static int affine_test(void *__data)
+{
+ struct bman_pool *pool;
+ struct affine_test_data *data = __data;
+ struct bman_pool_params my_params = params_cb;
+
+ pr_info("Thread %d: starting\n", data->cpu);
+ /* create the pool */
+ my_params.cb_ctx = data;
+ pool = bman_new_pool(&my_params);
+ BUG_ON(!pool);
+ complete(&data->wakeparent);
+ wait_for_completion(&data->wakethread);
+ init_completion(&data->wakethread);
+
+ /* if we're the drainer, we get signalled for that */
+ if (data->drain) {
+ struct bm_buffer buf;
+ int ret;
+
+ pr_info("Thread %d: draining...\n", data->cpu);
+ do {
+ ret = bman_acquire(pool, &buf, 1, 0);
+ } while (ret > 0);
+ pr_info("Thread %d: draining done.\n", data->cpu);
+ complete(&data->wakeparent);
+ wait_for_completion(&data->wakethread);
+ init_completion(&data->wakethread);
+ }
+
+ /* cleanup */
+ bman_free_pool(pool);
+ while (!kthread_should_stop())
+ cpu_relax();
+ pr_info("Thread %d: exiting\n", data->cpu);
+ return 0;
+}
+
+static struct affine_test_data *start_affine_test(int cpu, int drain)
+{
+ struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (!data)
+ return NULL;
+ data->cpu = cpu;
+#ifndef __rtems__
+ data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus());
+#endif /* __rtems__ */
+ data->drain = drain;
+ data->num_enter = 0;
+ data->num_exit = 0;
+ init_completion(&data->wakethread);
+ init_completion(&data->wakeparent);
+ list_add_tail(&data->node, &threads);
+ data->t = kthread_create(affine_test, data, "threshtest%d", cpu);
+#ifndef __rtems__
+ BUG_ON(IS_ERR(data->t));
+#else /* __rtems__ */
+ BUG_ON(data->t == NULL);
+#endif /* __rtems__ */
+ kthread_bind(data->t, cpu);
+ wake_up_process(data->t);
+ return data;
+}
+
+void bman_test_thresh(void)
+{
+ int loop = TEST_NUMBUFS;
+ int ret, num_cpus = 0;
+ struct affine_test_data *data, *drainer = NULL;
+
+ pr_info("%s(): Start\n", __func__);
+
+ /* allocate a BPID and seed it */
+ pool_nocb = bman_new_pool(&params_nocb);
+ BUG_ON(!pool_nocb);
+ while (loop--) {
+ struct bm_buffer buf;
+
+ bm_buffer_set64(&buf, 0x0badbeef + loop);
+ ret = bman_release(pool_nocb, &buf, 1,
+ BMAN_RELEASE_FLAG_WAIT);
+ BUG_ON(ret);
+ }
+ while (!bman_rcr_is_empty())
+ cpu_relax();
+ pr_info("%s(): Buffers are in\n", __func__);
+
+ /* create threads and wait for them to create pools */
+ params_cb.bpid = bman_get_params(pool_nocb)->bpid;
+#ifndef __rtems__
+ for_each_cpu(loop, cpu_online_mask) {
+#else /* __rtems__ */
+ for (loop = 0; loop < rtems_get_processor_count(); ++loop) {
+#endif /* __rtems__ */
+ data = start_affine_test(loop, drainer ? 0 : 1);
+ BUG_ON(!data);
+ if (!drainer)
+ drainer = data;
+ num_cpus++;
+ wait_for_completion(&data->wakeparent);
+ }
+
+ /* signal the drainer to start draining */
+ complete(&drainer->wakethread);
+ wait_for_completion(&drainer->wakeparent);
+ init_completion(&drainer->wakeparent);
+
+ /* tear down */
+ list_for_each_entry_safe(data, drainer, &threads, node) {
+ complete(&data->wakethread);
+ ret = kthread_stop(data->t);
+ BUG_ON(ret);
+ list_del(&data->node);
+ /* check that we get the expected callbacks (and no others) */
+ BUG_ON(data->num_enter != 1);
+ BUG_ON(data->num_exit != 0);
+ kfree(data);
+ }
+ bman_free_pool(pool_nocb);
+
+ pr_info("%s(): Done\n", __func__);
+}
diff --git a/linux/drivers/soc/fsl/qbman/bman_utils.c b/linux/drivers/soc/fsl/qbman/bman_utils.c
new file mode 100644
index 00000000..c6fa0b33
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_utils.c
@@ -0,0 +1,76 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* BMan APIs are front-ends to the common code */
+
+static DECLARE_DPAA_RESOURCE(bpalloc); /* BPID allocator */
+
+/* BPID allocator front-end */
+
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return dpaa_resource_new(&bpalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(bman_alloc_bpid_range);
+
+static int bp_cleanup(u32 bpid)
+{
+ return bman_shutdown_pool(bpid) == 0;
+}
+void bman_release_bpid_range(u32 bpid, u32 count)
+{
+ u32 total_invalid = dpaa_resource_release(&bpalloc,
+ bpid, count, bp_cleanup);
+
+ if (total_invalid)
+ pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
+ bpid, bpid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(bman_release_bpid_range);
+
+void bman_seed_bpid_range(u32 bpid, u32 count)
+{
+ dpaa_resource_seed(&bpalloc, bpid, count);
+}
+EXPORT_SYMBOL(bman_seed_bpid_range);
+
+int bman_reserve_bpid_range(u32 bpid, u32 count)
+{
+ return dpaa_resource_reserve(&bpalloc, bpid, count);
+}
+EXPORT_SYMBOL(bman_reserve_bpid_range);
diff --git a/linux/drivers/soc/fsl/qbman/dpaa_resource.c b/linux/drivers/soc/fsl/qbman/dpaa_resource.c
new file mode 100644
index 00000000..3f7b74bf
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/dpaa_resource.c
@@ -0,0 +1,363 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(CONFIG_FSL_BMAN_PORTAL) || \
+ defined(CONFIG_FSL_BMAN_PORTAL_MODULE) || \
+ defined(CONFIG_FSL_QMAN_PORTAL) || \
+ defined(CONFIG_FSL_QMAN_PORTAL_MODULE)
+#include "dpaa_sys.h"
+
+/* The allocator is a (possibly-empty) list of these */
+struct dpaa_resource_node {
+ struct list_head list;
+ u32 base;
+ u32 num;
+ /* refcount and is_alloced are only set
+ when the node is in the used list */
+ unsigned int refcount;
+ int is_alloced;
+};
+
+#ifdef DPAA_RESOURCE_DEBUG
+#define DPRINT pr_info
+static void DUMP(struct dpaa_resource *alloc)
+{
+ int off = 0;
+ char buf[256];
+ struct dpaa_resource_node *p;
+
+ pr_info("Free Nodes\n");
+ list_for_each_entry(p, &alloc->free, list) {
+ if (off < 255)
+ off += snprintf(buf + off, 255-off, "{%d,%d}",
+ p->base, p->base + p->num - 1);
+ }
+ pr_info("%s\n", buf);
+
+ off = 0;
+ pr_info("Used Nodes\n");
+ list_for_each_entry(p, &alloc->used, list) {
+ if (off < 255)
+ off += snprintf(buf + off, 255-off, "{%d,%d}",
+ p->base, p->base + p->num - 1);
+ }
+ pr_info("%s\n", buf);
+}
+#else
+#define DPRINT(x...)
+#define DUMP(a)
+#endif
+
+int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result,
+ u32 count, u32 align, int partial)
+{
+ struct dpaa_resource_node *i = NULL, *next_best = NULL,
+ *used_node = NULL;
+ u32 base, next_best_base = 0, num = 0, next_best_num = 0;
+ struct dpaa_resource_node *margin_left, *margin_right;
+
+ *result = (u32)-1;
+ DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
+ DUMP(alloc);
+ /* If 'align' is 0, it should behave as though it was 1 */
+ if (!align)
+ align = 1;
+ margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
+ if (!margin_left)
+ goto err;
+ margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
+ if (!margin_right) {
+ kfree(margin_left);
+ goto err;
+ }
+ spin_lock_irq(&alloc->lock);
+ list_for_each_entry(i, &alloc->free, list) {
+ base = (i->base + align - 1) / align;
+ base *= align;
+ if ((base - i->base) >= i->num)
+ /* alignment is impossible, regardless of count */
+ continue;
+ num = i->num - (base - i->base);
+ if (num >= count) {
+ /* this one will do nicely */
+ num = count;
+ goto done;
+ }
+ if (num > next_best_num) {
+ next_best = i;
+ next_best_base = base;
+ next_best_num = num;
+ }
+ }
+ if (partial && next_best) {
+ i = next_best;
+ base = next_best_base;
+ num = next_best_num;
+ } else
+ i = NULL;
+done:
+ if (i) {
+ if (base != i->base) {
+ margin_left->base = i->base;
+ margin_left->num = base - i->base;
+ list_add_tail(&margin_left->list, &i->list);
+ } else
+ kfree(margin_left);
+ if ((base + num) < (i->base + i->num)) {
+ margin_right->base = base + num;
+ margin_right->num = (i->base + i->num) -
+ (base + num);
+ list_add(&margin_right->list, &i->list);
+ } else
+ kfree(margin_right);
+ list_del(&i->list);
+ kfree(i);
+ *result = base;
+ }
+ spin_unlock_irq(&alloc->lock);
+err:
+ DPRINT("returning %d\n", i ? num : -ENOMEM);
+ DUMP(alloc);
+ if (!i)
+ return -ENOMEM;
+
+ /* Add the allocation to the used list with a refcount of 1 */
+ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
+ if (!used_node)
+ return -ENOMEM;
+ used_node->base = *result;
+ used_node->num = num;
+ used_node->refcount = 1;
+ used_node->is_alloced = 1;
+ list_add_tail(&used_node->list, &alloc->used);
+ return (int)num;
+}
+EXPORT_SYMBOL(dpaa_resource_new);
+
+/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
+ * forcing error-handling on to users in the deallocation path. */
+static void _dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id,
+ u32 count)
+{
+ struct dpaa_resource_node *i,
+ *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+
+ BUG_ON(!node);
+ DPRINT("release_range(%d,%d)\n", base_id, count);
+ DUMP(alloc);
+ BUG_ON(!count);
+ spin_lock_irq(&alloc->lock);
+
+ node->base = base_id;
+ node->num = count;
+ list_for_each_entry(i, &alloc->free, list) {
+ if (i->base >= node->base) {
+ /* BUG_ON(any overlapping) */
+ BUG_ON(i->base < (node->base + node->num));
+ list_add_tail(&node->list, &i->list);
+ goto done;
+ }
+ }
+ list_add_tail(&node->list, &alloc->free);
+done:
+ /* Merge to the left */
+ i = list_entry(node->list.prev, struct dpaa_resource_node, list);
+ if (node->list.prev != &alloc->free) {
+ BUG_ON((i->base + i->num) > node->base);
+ if ((i->base + i->num) == node->base) {
+ node->base = i->base;
+ node->num += i->num;
+ list_del(&i->list);
+ kfree(i);
+ }
+ }
+ /* Merge to the right */
+ i = list_entry(node->list.next, struct dpaa_resource_node, list);
+ if (node->list.next != &alloc->free) {
+ BUG_ON((node->base + node->num) > i->base);
+ if ((node->base + node->num) == i->base) {
+ node->num += i->num;
+ list_del(&i->list);
+ kfree(i);
+ }
+ }
+ spin_unlock_irq(&alloc->lock);
+ DUMP(alloc);
+}
+
+static void dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id,
+ u32 count)
+{
+ struct dpaa_resource_node *i = NULL;
+
+ spin_lock_irq(&alloc->lock);
+
+ /* First find the node in the used list and decrement its ref count */
+ list_for_each_entry(i, &alloc->used, list) {
+ if (i->base == base_id && i->num == count) {
+ --i->refcount;
+ if (i->refcount == 0) {
+ list_del(&i->list);
+ spin_unlock_irq(&alloc->lock);
+ if (i->is_alloced)
+ _dpaa_resource_free(alloc, base_id,
+ count);
+ kfree(i);
+ return;
+ }
+ spin_unlock_irq(&alloc->lock);
+ return;
+ }
+ }
+ /* Couldn't find the allocation */
+ pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
+ base_id, count);
+ spin_unlock_irq(&alloc->lock);
+}
+
+/* Same as free but no previous allocation checking is needed */
+void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count)
+{
+ _dpaa_resource_free(alloc, base_id, count);
+}
+EXPORT_SYMBOL(dpaa_resource_seed);
+
+/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
+ * desired range is not available, or 0 for success
+ */
+int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num)
+{
+ struct dpaa_resource_node *i = NULL, *used_node;
+
+ DPRINT("alloc_reserve(%d,%d)\n", base, num);
+ DUMP(alloc);
+
+ spin_lock_irq(&alloc->lock);
+
+ /* Check for the node in the used list.
+ If found, increase it's refcount */
+ list_for_each_entry(i, &alloc->used, list) {
+ if ((i->base == base) && (i->num == num)) {
+ ++i->refcount;
+ spin_unlock_irq(&alloc->lock);
+ return 0;
+ }
+ if ((base >= i->base) && (base < (i->base + i->num))) {
+ /* This is an attempt to reserve a region that was
+ already reserved or alloced with a different
+ base or num */
+ pr_err("Cannot reserve %d - %d, it overlaps with"
+ " existing reservation from %d - %d\n",
+ base, base + num - 1, i->base,
+ i->base + i->num - 1);
+ spin_unlock_irq(&alloc->lock);
+ return -1;
+ }
+ }
+ /* Check to make sure this ID isn't in the free list */
+ list_for_each_entry(i, &alloc->free, list) {
+ if ((base >= i->base) && (base < (i->base + i->num))) {
+ /* yep, the reservation is within this node */
+ pr_err("Cannot reserve %d - %d, it overlaps with"
+ " free range %d - %d and must be alloced\n",
+ base, base + num - 1,
+ i->base, i->base + i->num - 1);
+ spin_unlock_irq(&alloc->lock);
+ return -1;
+ }
+ }
+ /* Add the allocation to the used list with a refcount of 1 */
+ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
+ if (!used_node) {
+ spin_unlock_irq(&alloc->lock);
+ return -ENOMEM;
+
+ }
+ used_node->base = base;
+ used_node->num = num;
+ used_node->refcount = 1;
+ used_node->is_alloced = 0;
+ list_add_tail(&used_node->list, &alloc->used);
+ spin_unlock_irq(&alloc->lock);
+ return 0;
+}
+EXPORT_SYMBOL(dpaa_resource_reserve);
+
+/* This is a sort-of-conditional dpaa_resource_free() routine. Eg. when
+ * releasing FQIDs (probably from user-space), it can filter out those
+ * that aren't in the OOS state (better to leak a h/w resource than to
+ * crash). This function returns the number of invalid IDs that were not
+ * released.
+*/
+u32 dpaa_resource_release(struct dpaa_resource *alloc,
+ u32 id, u32 count, int (*is_valid)(u32 id))
+{
+ int valid_mode = 0;
+ u32 loop = id, total_invalid = 0;
+
+ while (loop < (id + count)) {
+ int isvalid = is_valid ? is_valid(loop) : 1;
+
+ if (!valid_mode) {
+ /* We're looking for a valid ID to terminate an invalid
+ * range */
+ if (isvalid) {
+ /* We finished a range of invalid IDs, a valid
+ * range is now underway */
+ valid_mode = 1;
+ count -= (loop - id);
+ id = loop;
+ } else
+ total_invalid++;
+ } else {
+ /* We're looking for an invalid ID to terminate a
+ * valid range */
+ if (!isvalid) {
+ /* Release the range of valid IDs, an unvalid
+ * range is now underway */
+ if (loop > id)
+ dpaa_resource_free(alloc, id,
+ loop - id);
+ valid_mode = 0;
+ }
+ }
+ loop++;
+ }
+ /* Release any unterminated range of valid IDs */
+ if (valid_mode && count)
+ dpaa_resource_free(alloc, id, count);
+ return total_invalid;
+}
+EXPORT_SYMBOL(dpaa_resource_release);
+#endif /* CONFIG_FSL_*MAN_PORTAL* */
diff --git a/linux/drivers/soc/fsl/qbman/dpaa_sys.h b/linux/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 00000000..85f87800
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,292 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/ctype.h>
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#endif
+
+#include <asm/pgtable.h>
+#ifdef __rtems__
+#include <asm/cache.h>
+#include <asm/mpc85xx.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/rbtree.h>
+#include <bsp/linker-symbols.h>
+#define DPAA_NOCACHENOLOAD_ALIGNED_REGION(designator, size) \
+ BSP_NOCACHENOLOAD_SUBSECTION(designator) __aligned(size) \
+ uint8_t designator[size]
+#endif /* __rtems__ */
+
+struct dpaa_resource {
+ struct list_head free;
+ spinlock_t lock;
+ struct list_head used;
+};
+
+#define DECLARE_DPAA_RESOURCE(name) \
+struct dpaa_resource name = { \
+ .free = { \
+ .prev = &name.free, \
+ .next = &name.free \
+ }, \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .used = { \
+ .prev = &name.used, \
+ .next = &name.used \
+ } \
+}
+
+int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result,
+ u32 count, u32 align, int partial);
+u32 dpaa_resource_release(struct dpaa_resource *alloc,
+ u32 id, u32 count, int (*is_valid)(u32 id));
+void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count);
+int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num);
+
+/* When copying aligned words or shorts, try to avoid memcpy() */
+#define CONFIG_TRY_BETTER_MEMCPY
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPA_PORTAL_CE 0
+#define DPA_PORTAL_CI 1
+
+/* Misc inline assists */
+
+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
+ * barriers and that dcb*() won't fall victim to compiler or execution
+ * reordering with respect to other code/instructions that manipulate the same
+ * cacheline. */
+#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
+#ifndef __rtems__
+#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
+#else /* __rtems__ */
+ #ifdef __PPC_CPU_E6500__
+ #define lwsync() ppc_light_weight_synchronize()
+ #else
+ #define lwsync() ppc_synchronize_data()
+ #endif
+#endif /* __rtems__ */
+#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
+#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
+#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
+#define dcbi(p) dcbf(p)
+#ifdef CONFIG_PPC_E500MC
+#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
+#define dcbz_64(p) dcbzl(p)
+#define dcbf_64(p) dcbf(p)
+/* Commonly used combo */
+#define dcbit_ro(p) \
+ do { \
+ dcbi(p); \
+ dcbt_ro(p); \
+ } while (0)
+#else
+#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
+#define dcbz_64(p) \
+ do { \
+ dcbz((u32)p + 32); \
+ dcbz(p); \
+ } while (0)
+#define dcbf_64(p) \
+ do { \
+ dcbf((u32)p + 32); \
+ dcbf(p); \
+ } while (0)
+/* Commonly used combo */
+#define dcbit_ro(p) \
+ do { \
+ dcbi(p); \
+ dcbi((u32)p + 32); \
+ dcbt_ro(p); \
+ dcbt_ro((u32)p + 32); \
+ } while (0)
+#endif /* CONFIG_PPC_E500MC */
+
+static inline u64 mfatb(void)
+{
+ u32 hi, lo, chk;
+
+ do {
+ hi = mfspr(SPRN_ATBU);
+ lo = mfspr(SPRN_ATBL);
+ chk = mfspr(SPRN_ATBU);
+ } while (unlikely(hi != chk));
+ return ((u64)hi << 32) | (u64)lo;
+}
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+#define DPA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPA_ASSERT(x)
+#endif
+
+#ifdef CONFIG_TRY_BETTER_MEMCPY
+static inline void copy_words(void *dest, const void *src, size_t sz)
+{
+ u32 *__dest = dest;
+ const u32 *__src = src;
+ size_t __sz = sz >> 2;
+
+ BUG_ON((unsigned long)dest & 0x3);
+ BUG_ON((unsigned long)src & 0x3);
+ BUG_ON(sz & 0x3);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+#else
+#define copy_words memcpy
+#endif
+
+/* RB-trees */
+
+/* We encapsulate RB-trees so that its easier to use non-linux forms in
+ * non-linux systems. This also encapsulates the extra plumbing that linux code
+ * usually provides when using RB-trees. This encapsulation assumes that the
+ * data type held by the tree is u32. */
+
+struct dpa_rbtree {
+ struct rb_root root;
+};
+#define DPA_RBTREE { .root = RB_ROOT }
+
+static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
+{
+ tree->root = RB_ROOT;
+}
+
+#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
+{ \
+ struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
+ while (*p) { \
+ u32 item; \
+ parent = *p; \
+ item = rb_entry(parent, type, node_field)->val_field; \
+ if (obj->val_field < item) \
+ p = &parent->rb_left; \
+ else if (obj->val_field > item) \
+ p = &parent->rb_right; \
+ else \
+ return -EBUSY; \
+ } \
+ rb_link_node(&obj->node_field, parent, p); \
+ rb_insert_color(&obj->node_field, &tree->root); \
+ return 0; \
+} \
+static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
+{ \
+ rb_erase(&obj->node_field, &tree->root); \
+} \
+static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
+{ \
+ type *ret; \
+ struct rb_node *p = tree->root.rb_node; \
+ while (p) { \
+ ret = rb_entry(p, type, node_field); \
+ if (val < ret->val_field) \
+ p = p->rb_left; \
+ else if (val > ret->val_field) \
+ p = p->rb_right; \
+ else \
+ return ret; \
+ } \
+ return NULL; \
+}
+
+#ifndef __rtems__
+/* Bootargs */
+
+/* QMan has "qportals=" and BMan has "bportals=", they use the same syntax
+ * though; a comma-separated list of items, each item being a cpu index and/or a
+ * range of cpu indices, and each item optionally be prefixed by "s" to indicate
+ * that the portal associated with that cpu should be shared. See bman_driver.c
+ * for more specifics. */
+static int __parse_portals_cpu(const char **s, unsigned int *cpu)
+{
+ *cpu = 0;
+ if (!isdigit(**s))
+ return -EINVAL;
+ while (isdigit(**s))
+ *cpu = *cpu * 10 + (*((*s)++) - '0');
+ return 0;
+}
+static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
+ struct cpumask *want_unshared,
+ const char *argname)
+{
+ const char *s = str;
+ unsigned int shared, cpu1, cpu2, loop;
+
+keep_going:
+ if (*s == 's') {
+ shared = 1;
+ s++;
+ } else
+ shared = 0;
+ if (__parse_portals_cpu(&s, &cpu1))
+ goto err;
+ if (*s == '-') {
+ s++;
+ if (__parse_portals_cpu(&s, &cpu2))
+ goto err;
+ if (cpu2 < cpu1)
+ goto err;
+ } else
+ cpu2 = cpu1;
+ for (loop = cpu1; loop <= cpu2; loop++)
+ cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
+ if (*s == ',') {
+ s++;
+ goto keep_going;
+ } else if ((*s == '\0') || isspace(*s))
+ return 0;
+err:
+ pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
+ (unsigned long)s - (unsigned long)str);
+ return -EINVAL;
+}
+#endif /* __rtems__ */
+#endif /* __DPAA_SYS_H */
diff --git a/linux/drivers/soc/fsl/qbman/qman-debugfs.c b/linux/drivers/soc/fsl/qbman/qman-debugfs.c
new file mode 100644
index 00000000..ecdb6be6
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman-debugfs.c
@@ -0,0 +1,1317 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2010 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "qman_priv.h"
+
+#define MAX_FQID (0x00ffffff)
+#define QM_FQD_BLOCK_SIZE 64
+#define QM_FQD_AR (0xC10)
+
+static u32 fqid_max;
+static u64 qman_ccsr_start;
+static u64 qman_ccsr_size;
+
+static const char * const state_txt[] = {
+ "Out of Service",
+ "Retired",
+ "Tentatively Scheduled",
+ "Truly Scheduled",
+ "Parked",
+ "Active, Active Held or Held Suspended",
+ "Unknown State 6",
+ "Unknown State 7",
+ NULL,
+};
+
+static const u8 fqd_states[] = {
+ QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED,
+ QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED,
+ QM_MCR_NP_STATE_ACTIVE};
+
+struct mask_to_text {
+ u16 mask;
+ const char *txt;
+};
+
+struct mask_filter_s {
+ u16 mask;
+ u8 filter;
+};
+
+static const struct mask_filter_s mask_filter[] = {
+ {QM_FQCTRL_PREFERINCACHE, 0},
+ {QM_FQCTRL_PREFERINCACHE, 1},
+ {QM_FQCTRL_HOLDACTIVE, 0},
+ {QM_FQCTRL_HOLDACTIVE, 1},
+ {QM_FQCTRL_AVOIDBLOCK, 0},
+ {QM_FQCTRL_AVOIDBLOCK, 1},
+ {QM_FQCTRL_FORCESFDR, 0},
+ {QM_FQCTRL_FORCESFDR, 1},
+ {QM_FQCTRL_CPCSTASH, 0},
+ {QM_FQCTRL_CPCSTASH, 1},
+ {QM_FQCTRL_CTXASTASHING, 0},
+ {QM_FQCTRL_CTXASTASHING, 1},
+ {QM_FQCTRL_ORP, 0},
+ {QM_FQCTRL_ORP, 1},
+ {QM_FQCTRL_TDE, 0},
+ {QM_FQCTRL_TDE, 1},
+ {QM_FQCTRL_CGE, 0},
+ {QM_FQCTRL_CGE, 1}
+};
+
+static const struct mask_to_text fq_ctrl_text_list[] = {
+ {
+ .mask = QM_FQCTRL_PREFERINCACHE,
+ .txt = "Prefer in cache",
+ },
+ {
+ .mask = QM_FQCTRL_HOLDACTIVE,
+ .txt = "Hold active in portal",
+ },
+ {
+ .mask = QM_FQCTRL_AVOIDBLOCK,
+ .txt = "Avoid Blocking",
+ },
+ {
+ .mask = QM_FQCTRL_FORCESFDR,
+ .txt = "High-priority SFDRs",
+ },
+ {
+ .mask = QM_FQCTRL_CPCSTASH,
+ .txt = "CPC Stash Enable",
+ },
+ {
+ .mask = QM_FQCTRL_CTXASTASHING,
+ .txt = "Context-A stashing",
+ },
+ {
+ .mask = QM_FQCTRL_ORP,
+ .txt = "ORP Enable",
+ },
+ {
+ .mask = QM_FQCTRL_TDE,
+ .txt = "Tail-Drop Enable",
+ },
+ {
+ .mask = QM_FQCTRL_CGE,
+ .txt = "Congestion Group Enable",
+ },
+ {
+ .mask = 0,
+ .txt = NULL,
+ }
+};
+
+static const char *get_fqd_ctrl_text(u16 mask)
+{
+ int i = 0;
+
+ while (fq_ctrl_text_list[i].txt != NULL) {
+ if (fq_ctrl_text_list[i].mask == mask)
+ return fq_ctrl_text_list[i].txt;
+ i++;
+ }
+ return NULL;
+}
+
+static const struct mask_to_text stashing_text_list[] = {
+ {
+ .mask = QM_STASHING_EXCL_CTX,
+ .txt = "FQ Ctx Stash"
+ },
+ {
+ .mask = QM_STASHING_EXCL_DATA,
+ .txt = "Frame Data Stash",
+ },
+ {
+ .mask = QM_STASHING_EXCL_ANNOTATION,
+ .txt = "Frame Annotation Stash",
+ },
+ {
+ .mask = 0,
+ .txt = NULL,
+ },
+};
+
+static int user_input_convert(const char __user *user_buf, size_t count,
+ unsigned long *val)
+{
+ char buf[12];
+
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+ if (kstrtoul(buf, 0, val))
+ return -EINVAL;
+ return 0;
+}
+
+struct line_buffer_fq {
+ u32 buf[8];
+ u32 buf_cnt;
+ int line_cnt;
+};
+
+static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid,
+ struct seq_file *file)
+{
+ line_buf->buf[line_buf->buf_cnt] = fqid;
+ line_buf->buf_cnt++;
+ if (line_buf->buf_cnt == 8) {
+ /* Buffer is full, flush it */
+ if (line_buf->line_cnt != 0)
+ seq_puts(file, ",\n");
+ seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x,"
+ "0x%06x,0x%06x,0x%06x",
+ line_buf->buf[0], line_buf->buf[1], line_buf->buf[2],
+ line_buf->buf[3], line_buf->buf[4], line_buf->buf[5],
+ line_buf->buf[6], line_buf->buf[7]);
+ line_buf->buf_cnt = 0;
+ line_buf->line_cnt++;
+ }
+}
+
+static void flush_line_buffer(struct line_buffer_fq *line_buf,
+ struct seq_file *file)
+{
+ if (line_buf->buf_cnt) {
+ int y = 0;
+
+ if (line_buf->line_cnt != 0)
+ seq_puts(file, ",\n");
+ while (y != line_buf->buf_cnt) {
+ if (y+1 == line_buf->buf_cnt)
+ seq_printf(file, "0x%06x", line_buf->buf[y]);
+ else
+ seq_printf(file, "0x%06x,", line_buf->buf[y]);
+ y++;
+ }
+ line_buf->line_cnt++;
+ }
+ if (line_buf->line_cnt)
+ seq_putc(file, '\n');
+}
+
+static struct dentry *dfs_root; /* debugfs root directory */
+
+/* Query Frame Queue Non Programmable Fields */
+
+struct query_fq_np_fields_data_s {
+ u32 fqid;
+};
+static struct query_fq_np_fields_data_s query_fq_np_fields_data = {
+ .fqid = 1,
+};
+
+static int query_fq_np_fields_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_queryfq_np np;
+ struct qman_fq fq;
+
+ fq.fqid = query_fq_np_fields_data.fqid;
+ ret = qman_query_fq_np(&fq, &np);
+ if (ret)
+ return ret;
+ /* Print state */
+ seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n",
+ fq.fqid);
+ seq_printf(file, " force eligible pending: %s\n",
+ (np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no");
+ seq_printf(file, " retirement pending: %s\n",
+ (np.state & QM_MCR_NP_STATE_R) ? "yes" : "no");
+ seq_printf(file, " state: %s\n",
+ state_txt[np.state & QM_MCR_NP_STATE_MASK]);
+ seq_printf(file, " fq_link: 0x%x\n", np.fqd_link);
+ seq_printf(file, " odp_seq: %u\n", np.odp_seq);
+ seq_printf(file, " orp_nesn: %u\n", np.orp_nesn);
+ seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq);
+ seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq);
+ seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr);
+ seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr);
+ seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr);
+ seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr);
+ seq_printf(file, " is: ics_surp contains a %s\n",
+ (np.is) ? "deficit" : "surplus");
+ seq_printf(file, " ics_surp: %u\n", np.ics_surp);
+ seq_printf(file, " byte_cnt: %u\n", np.byte_cnt);
+ seq_printf(file, " frm_cnt: %u\n", np.frm_cnt);
+ seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr);
+ seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr);
+ seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr);
+ seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr);
+ seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr);
+ return 0;
+}
+
+static int query_fq_np_fields_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, query_fq_np_fields_show, NULL);
+}
+
+static ssize_t query_fq_np_fields_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > MAX_FQID)
+ return -EINVAL;
+ query_fq_np_fields_data.fqid = (u32)val;
+ return count;
+}
+
+static const struct file_operations query_fq_np_fields_fops = {
+ .owner = THIS_MODULE,
+ .open = query_fq_np_fields_open,
+ .read = seq_read,
+ .write = query_fq_np_fields_write,
+ .release = single_release,
+};
+
+/* Frame Queue Programmable Fields */
+
+struct query_fq_fields_data_s {
+ u32 fqid;
+};
+
+static struct query_fq_fields_data_s query_fq_fields_data = {
+ .fqid = 1,
+};
+
+static int query_fq_fields_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_fqd fqd;
+ struct qman_fq fq;
+ int i = 0;
+
+ memset(&fqd, 0, sizeof(struct qm_fqd));
+ fq.fqid = query_fq_fields_data.fqid;
+ ret = qman_query_fq(&fq, &fqd);
+ if (ret)
+ return ret;
+ seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n",
+ fq.fqid);
+ seq_printf(file, " orprws: %u\n", fqd.orprws);
+ seq_printf(file, " oa: %u\n", fqd.oa);
+ seq_printf(file, " olws: %u\n", fqd.olws);
+
+ seq_printf(file, " cgid: %u\n", fqd.cgid);
+
+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0)
+ seq_puts(file, " fq_ctrl: None\n");
+ else {
+ i = 0;
+ seq_puts(file, " fq_ctrl:\n");
+ while (fq_ctrl_text_list[i].txt != NULL) {
+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
+ fq_ctrl_text_list[i].mask)
+ seq_printf(file, " %s\n",
+ fq_ctrl_text_list[i].txt);
+ i++;
+ }
+ }
+ seq_printf(file, " dest_channel: %u\n", fqd.dest.channel);
+ seq_printf(file, " dest_wq: %u\n", fqd.dest.wq);
+ seq_printf(file, " ics_cred: %u\n", fqd.ics_cred);
+ seq_printf(file, " td_mant: %u\n", fqd.td.mant);
+ seq_printf(file, " td_exp: %u\n", fqd.td.exp);
+
+ seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b);
+
+ seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd));
+ /* Any stashing configured */
+ if ((fqd.context_a.stashing.exclusive & 0x7) == 0)
+ seq_puts(file, " ctx_a_stash_exclusive: None\n");
+ else {
+ seq_puts(file, " ctx_a_stash_exclusive:\n");
+ i = 0;
+ while (stashing_text_list[i].txt != NULL) {
+ if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask)
+ seq_printf(file, " %s\n",
+ stashing_text_list[i].txt);
+ i++;
+ }
+ }
+ seq_printf(file, " ctx_a_stash_annotation_cl: %u\n",
+ fqd.context_a.stashing.annotation_cl);
+ seq_printf(file, " ctx_a_stash_data_cl: %u\n",
+ fqd.context_a.stashing.data_cl);
+ seq_printf(file, " ctx_a_stash_context_cl: %u\n",
+ fqd.context_a.stashing.context_cl);
+ return 0;
+}
+
+static int query_fq_fields_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, query_fq_fields_show, NULL);
+}
+
+static ssize_t query_fq_fields_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > MAX_FQID)
+ return -EINVAL;
+ query_fq_fields_data.fqid = (u32)val;
+ return count;
+}
+
+static const struct file_operations query_fq_fields_fops = {
+ .owner = THIS_MODULE,
+ .open = query_fq_fields_open,
+ .read = seq_read,
+ .write = query_fq_fields_write,
+ .release = single_release,
+};
+
+/* Query WQ lengths */
+
+struct query_wq_lengths_data_s {
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+ u16 id:13; /* qm_channel */
+ u16 __reserved:3;
+ } __packed channel;
+ };
+};
+static struct query_wq_lengths_data_s query_wq_lengths_data;
+static int query_wq_lengths_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_querywq wq;
+ int i;
+
+ memset(&wq, 0, sizeof(struct qm_mcr_querywq));
+ wq.channel.id = query_wq_lengths_data.channel.id;
+ ret = qman_query_wq(0, &wq);
+ if (ret)
+ return ret;
+ seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id);
+ for (i = 0; i < 8; i++)
+ /* mask out upper 4 bits since they are not part of length */
+ seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff);
+ return 0;
+}
+
+static int query_wq_lengths_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, query_wq_lengths_show, NULL);
+}
+
+static ssize_t query_wq_lengths_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > 0xfff8)
+ return -EINVAL;
+ query_wq_lengths_data.channel.id = (u16)val;
+ return count;
+}
+
+static const struct file_operations query_wq_lengths_fops = {
+ .owner = THIS_MODULE,
+ .open = query_wq_lengths_open,
+ .read = seq_read,
+ .write = query_wq_lengths_write,
+ .release = single_release,
+};
+
+/* Query CGR */
+
+struct query_cgr_s {
+ u8 cgid;
+};
+static struct query_cgr_s query_cgr_data;
+
+static int query_cgr_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_querycgr cgrd;
+ struct qman_cgr cgr;
+ int i, j;
+ u32 mask;
+
+ memset(&cgr, 0, sizeof(cgr));
+ memset(&cgrd, 0, sizeof(cgrd));
+ cgr.cgrid = query_cgr_data.cgid;
+ ret = qman_query_cgr(&cgr, &cgrd);
+ if (ret)
+ return ret;
+ seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid);
+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn,
+ cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn,
+ cgrd.cgr.wr_parm_g.Pn);
+
+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn,
+ cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn,
+ cgrd.cgr.wr_parm_y.Pn);
+
+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn,
+ cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn,
+ cgrd.cgr.wr_parm_r.Pn);
+
+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
+ cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r);
+
+ seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en);
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ seq_puts(file, " cscn_targ_dcp:\n");
+ mask = 0x80000000;
+ for (i = 0; i < 32; i++) {
+ if (cgrd.cgr.cscn_targ & mask)
+ seq_printf(file, " send CSCN to dcp %u\n",
+ (31 - i));
+ mask >>= 1;
+ }
+
+ seq_puts(file, " cscn_targ_swp:\n");
+ for (i = 0; i < 4; i++) {
+ mask = 0x80000000;
+ for (j = 0; j < 32; j++) {
+ if (cgrd.cscn_targ_swp[i] & mask)
+ seq_printf(file, " send CSCN to swp"
+ " %u\n", (127 - (i * 32) - j));
+ mask >>= 1;
+ }
+ }
+ } else {
+ seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ);
+ }
+ seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en);
+ seq_printf(file, " cs: %u\n", cgrd.cgr.cs);
+
+ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
+ cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn);
+
+ seq_printf(file, " mode: %s\n",
+ (cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ?
+ "frame count" : "byte count");
+ seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd));
+ seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd));
+
+ return 0;
+}
+
+static int query_cgr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, query_cgr_show, NULL);
+}
+
+static ssize_t query_cgr_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > 0xff)
+ return -EINVAL;
+ query_cgr_data.cgid = (u8)val;
+ return count;
+}
+
+static const struct file_operations query_cgr_fops = {
+ .owner = THIS_MODULE,
+ .open = query_cgr_open,
+ .read = seq_read,
+ .write = query_cgr_write,
+ .release = single_release,
+};
+
+/* Test Write CGR */
+
+struct test_write_cgr_s {
+ u64 i_bcnt;
+ u8 cgid;
+};
+static struct test_write_cgr_s test_write_cgr_data;
+
+static int testwrite_cgr_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_cgrtestwrite result;
+ struct qman_cgr cgr;
+ u64 i_bcnt;
+
+ memset(&cgr, 0, sizeof(struct qman_cgr));
+ memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite));
+ cgr.cgrid = test_write_cgr_data.cgid;
+ i_bcnt = test_write_cgr_data.i_bcnt;
+ ret = qman_testwrite_cgr(&cgr, i_bcnt, &result);
+ if (ret)
+ return ret;
+ seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid);
+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn,
+ result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn,
+ result.cgr.wr_parm_g.Pn);
+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn,
+ result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn,
+ result.cgr.wr_parm_y.Pn);
+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn,
+ result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn,
+ result.cgr.wr_parm_r.Pn);
+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
+ result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r);
+ seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en);
+ seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ);
+ seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en);
+ seq_printf(file, " cs: %u\n", result.cgr.cs);
+ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
+ result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn);
+
+ /* Add Mode for Si 2 */
+ seq_printf(file, " mode: %s\n",
+ (result.cgr.mode & QMAN_CGR_MODE_FRAME) ?
+ "frame count" : "byte count");
+
+ seq_printf(file, " i_bcnt: %llu\n",
+ qm_mcr_cgrtestwrite_i_get64(&result));
+ seq_printf(file, " a_bcnt: %llu\n",
+ qm_mcr_cgrtestwrite_a_get64(&result));
+ seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g);
+ seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y);
+ seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r);
+ return 0;
+}
+
+static int testwrite_cgr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, testwrite_cgr_show, NULL);
+}
+
+static const struct file_operations testwrite_cgr_fops = {
+ .owner = THIS_MODULE,
+ .open = testwrite_cgr_open,
+ .read = seq_read,
+ .release = single_release,
+};
+
+
+static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset)
+{
+ seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt);
+ return 0;
+}
+static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, testwrite_cgr_ibcnt_show, NULL);
+}
+
+static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ test_write_cgr_data.i_bcnt = val;
+ return count;
+}
+
+static const struct file_operations teswrite_cgr_ibcnt_fops = {
+ .owner = THIS_MODULE,
+ .open = testwrite_cgr_ibcnt_open,
+ .read = seq_read,
+ .write = testwrite_cgr_ibcnt_write,
+ .release = single_release,
+};
+
+static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset)
+{
+ seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid);
+ return 0;
+}
+static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, testwrite_cgr_cgrid_show, NULL);
+}
+
+static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > 0xff)
+ return -EINVAL;
+ test_write_cgr_data.cgid = (u8)val;
+ return count;
+}
+
+static const struct file_operations teswrite_cgr_cgrid_fops = {
+ .owner = THIS_MODULE,
+ .open = testwrite_cgr_cgrid_open,
+ .read = seq_read,
+ .write = testwrite_cgr_cgrid_write,
+ .release = single_release,
+};
+
+/* Query Congestion State */
+
+static int query_congestion_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_querycongestion cs;
+ int i, j, in_cong = 0;
+ u32 mask;
+
+ memset(&cs, 0, sizeof(struct qm_mcr_querycongestion));
+ ret = qman_query_congestion(&cs);
+ if (ret)
+ return ret;
+ seq_puts(file, "Query Congestion Result\n");
+ for (i = 0; i < 8; i++) {
+ mask = 0x80000000;
+ for (j = 0; j < 32; j++) {
+ if (cs.state.__state[i] & mask) {
+ in_cong = 1;
+ seq_printf(file, " cg %u: %s\n", (i*32)+j,
+ "in congestion");
+ }
+ mask >>= 1;
+ }
+ }
+ if (!in_cong)
+ seq_puts(file, " All congestion groups not congested.\n");
+ return 0;
+}
+
+static int query_congestion_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, query_congestion_show, NULL);
+}
+
+static const struct file_operations query_congestion_fops = {
+ .owner = THIS_MODULE,
+ .open = query_congestion_open,
+ .read = seq_read,
+ .release = single_release,
+};
+
+/* QMan register */
+
+struct qman_register_s {
+ u32 val;
+};
+static struct qman_register_s qman_register_data;
+
+static void init_ccsrmempeek(void)
+{
+ struct device_node *dn;
+ const u32 *regaddr_p;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,qman");
+ if (!dn) {
+ pr_info("No fsl,qman node\n");
+ return;
+ }
+ regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL);
+ if (!regaddr_p) {
+ of_node_put(dn);
+ return;
+ }
+ qman_ccsr_start = of_translate_address(dn, regaddr_p);
+ of_node_put(dn);
+}
+/* This function provides access to QMan ccsr memory map */
+static int qman_ccsrmempeek(u32 *val, u32 offset)
+{
+ void __iomem *addr;
+ u64 phys_addr;
+
+ if (!qman_ccsr_start)
+ return -EINVAL;
+
+ if (offset > qman_ccsr_size - sizeof(u32))
+ return -EINVAL;
+
+ phys_addr = qman_ccsr_start + offset;
+ addr = ioremap(phys_addr, sizeof(u32));
+ if (!addr) {
+ pr_err("%s(): ioremap() failed\n", __func__);
+ return -EINVAL;
+ }
+ *val = in_be32(addr);
+ iounmap(addr);
+ return 0;
+}
+
+static int qman_ccsrmempeek_show(struct seq_file *file, void *offset)
+{
+ u32 b;
+
+ qman_ccsrmempeek(&b, qman_register_data.val);
+ seq_printf(file, "QMan register offset = 0x%x\n",
+ qman_register_data.val);
+ seq_printf(file, "value = 0x%08x\n", b);
+
+ return 0;
+}
+
+static int qman_ccsrmempeek_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_ccsrmempeek_show, NULL);
+}
+
+static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ /* multiple of 4 */
+ if (val > qman_ccsr_size - sizeof(u32)) {
+ pr_info("Input 0x%lx > 0x%llx\n",
+ val, qman_ccsr_size - sizeof(u32));
+ return -EINVAL;
+ }
+ if (val & 0x3) {
+ pr_info("Input 0x%lx not multiple of 4\n", val);
+ return -EINVAL;
+ }
+ qman_register_data.val = val;
+ return count;
+}
+
+static const struct file_operations qman_ccsrmempeek_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_ccsrmempeek_open,
+ .read = seq_read,
+ .write = qman_ccsrmempeek_write,
+};
+
+/* QMan state */
+
+static int qman_fqd_state_show(struct seq_file *file, void *offset)
+{
+ struct qm_mcr_queryfq_np np;
+ struct qman_fq fq;
+ struct line_buffer_fq line_buf;
+ int ret, i;
+ u8 *state = file->private;
+ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
+
+ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
+ memset(&line_buf, 0, sizeof(line_buf));
+
+ seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]);
+
+ for (i = 1; i < fqid_max; i++) {
+ fq.fqid = i;
+ ret = qman_query_fq_np(&fq, &np);
+ if (ret)
+ return ret;
+ if (*state == (np.state & QM_MCR_NP_STATE_MASK))
+ add_to_line_buffer(&line_buf, fq.fqid, file);
+ /* Keep a summary count of all states */
+ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
+ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
+ }
+ flush_line_buffer(&line_buf, file);
+
+ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
+ seq_printf(file, "%s count = %u\n", state_txt[i],
+ qm_fq_state_cnt[i]);
+ }
+ return 0;
+}
+
+static int qman_fqd_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_fqd_state_show, inode->i_private);
+}
+
+static const struct file_operations qman_fqd_state_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_fqd_state_open,
+ .read = seq_read,
+};
+
+static int qman_fqd_ctrl_show(struct seq_file *file, void *offset)
+{
+ struct qm_fqd fqd;
+ struct qman_fq fq;
+ u32 fq_en_cnt = 0, fq_di_cnt = 0;
+ int ret, i;
+ struct mask_filter_s *data = file->private;
+ const char *ctrl_txt = get_fqd_ctrl_text(data->mask);
+ struct line_buffer_fq line_buf;
+
+ memset(&line_buf, 0, sizeof(line_buf));
+ seq_printf(file, "List of fq ids with: %s :%s\n",
+ ctrl_txt, (data->filter) ? "enabled" : "disabled");
+ for (i = 1; i < fqid_max; i++) {
+ fq.fqid = i;
+ memset(&fqd, 0, sizeof(struct qm_fqd));
+ ret = qman_query_fq(&fq, &fqd);
+ if (ret)
+ return ret;
+ if (data->filter) {
+ if (fqd.fq_ctrl & data->mask)
+ add_to_line_buffer(&line_buf, fq.fqid, file);
+ } else {
+ if (!(fqd.fq_ctrl & data->mask))
+ add_to_line_buffer(&line_buf, fq.fqid, file);
+ }
+ if (fqd.fq_ctrl & data->mask)
+ fq_en_cnt++;
+ else
+ fq_di_cnt++;
+ }
+ flush_line_buffer(&line_buf, file);
+
+ seq_printf(file, "Total FQD with: %s : enabled = %u\n",
+ ctrl_txt, fq_en_cnt);
+ seq_printf(file, "Total FQD with: %s : disabled = %u\n",
+ ctrl_txt, fq_di_cnt);
+ return 0;
+}
+
+/* QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE */
+static int qman_fqd_ctrl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_fqd_ctrl_show, inode->i_private);
+}
+
+static const struct file_operations qman_fqd_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_fqd_ctrl_open,
+ .read = seq_read,
+};
+
+/* QMan ctrl summary */
+
+/* QMan summary state */
+
+static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset)
+{
+ struct qm_mcr_queryfq_np np;
+ struct qman_fq fq;
+ int ret, i;
+ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
+
+ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
+
+ for (i = 1; i < fqid_max; i++) {
+ fq.fqid = i;
+ ret = qman_query_fq_np(&fq, &np);
+ if (ret)
+ return ret;
+ /* Keep a summary count of all states */
+ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
+ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
+ seq_printf(file, "%s count = %u\n", state_txt[i],
+ qm_fq_state_cnt[i]);
+ }
+ return 0;
+}
+
+static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset)
+{
+ struct qm_fqd fqd;
+ struct qman_fq fq;
+ int ret, i, j;
+ u32 qm_prog_cnt[ARRAY_SIZE(mask_filter)/2];
+
+ memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt));
+
+ for (i = 1; i < fqid_max; i++) {
+ memset(&fqd, 0, sizeof(struct qm_fqd));
+ fq.fqid = i;
+ ret = qman_query_fq(&fq, &fqd);
+ if (ret)
+ return ret;
+ /* Keep a summary count of all states */
+ for (j = 0; j < ARRAY_SIZE(mask_filter); j += 2)
+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
+ mask_filter[j].mask)
+ qm_prog_cnt[j/2]++;
+ }
+ for (i = 0; i < ARRAY_SIZE(mask_filter) / 2; i++) {
+ seq_printf(file, "%s count = %u\n",
+ get_fqd_ctrl_text(mask_filter[i*2].mask),
+ qm_prog_cnt[i]);
+ }
+ return 0;
+}
+
+static int qman_fqd_summary_show(struct seq_file *file, void *offset)
+{
+ int ret;
+
+ /* Display summary of non programmable fields */
+ ret = qman_fqd_non_prog_summary_show(file, offset);
+ if (ret)
+ return ret;
+ seq_puts(file, "-----------------------------------------\n");
+ /* Display programmable fields */
+ ret = qman_fqd_prog_summary_show(file, offset);
+ return ret;
+}
+
+static int qman_fqd_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_fqd_summary_show, NULL);
+}
+
+static const struct file_operations qman_fqd_summary_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_fqd_summary_open,
+ .read = seq_read,
+};
+
+/* QMan destination work queue */
+
+struct qman_dest_wq_s {
+ u16 wq_id;
+};
+static struct qman_dest_wq_s qman_dest_wq_data = {
+ .wq_id = 0,
+};
+
+static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset)
+{
+ struct qm_fqd fqd;
+ struct qman_fq fq;
+ int ret, i;
+ u16 *wq, wq_id = qman_dest_wq_data.wq_id;
+ struct line_buffer_fq line_buf;
+
+ memset(&line_buf, 0, sizeof(line_buf));
+ /* use vmalloc : need to allocate large memory region and don't
+ * require the memory to be physically contiguous. */
+ wq = vzalloc(sizeof(u16) * (0xFFFF+1));
+ if (!wq)
+ return -ENOMEM;
+
+ seq_printf(file, "List of fq ids with destination work queue id"
+ " = 0x%x\n", wq_id);
+
+ for (i = 1; i < fqid_max; i++) {
+ fq.fqid = i;
+ memset(&fqd, 0, sizeof(struct qm_fqd));
+ ret = qman_query_fq(&fq, &fqd);
+ if (ret) {
+ vfree(wq);
+ return ret;
+ }
+ if (wq_id == fqd.dest_wq)
+ add_to_line_buffer(&line_buf, fq.fqid, file);
+ wq[fqd.dest_wq]++;
+ }
+ flush_line_buffer(&line_buf, file);
+
+ seq_puts(file, "Summary of all FQD destination work queue values\n");
+ for (i = 0; i < 0xFFFF; i++) {
+ if (wq[i])
+ seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, "
+ "count = %u\n", i >> 3, i & 0x3, i, wq[i]);
+ }
+ vfree(wq);
+ return 0;
+}
+
+static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFFFF)
+ return -EINVAL;
+ qman_dest_wq_data.wq_id = val;
+ return count;
+}
+
+static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_fqd_dest_wq_show, NULL);
+}
+
+static const struct file_operations qman_fqd_dest_wq_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_fqd_dest_wq_open,
+ .read = seq_read,
+ .write = qman_fqd_dest_wq_write,
+};
+
+/* QMan Intra-Class Scheduling Credit */
+
+static int qman_fqd_cred_show(struct seq_file *file, void *offset)
+{
+ struct qm_fqd fqd;
+ struct qman_fq fq;
+ int ret, i;
+ u32 fq_cnt = 0;
+ struct line_buffer_fq line_buf;
+
+ memset(&line_buf, 0, sizeof(line_buf));
+ seq_puts(file, "List of fq ids with Intra-Class Scheduling Credit > 0"
+ "\n");
+
+ for (i = 1; i < fqid_max; i++) {
+ fq.fqid = i;
+ memset(&fqd, 0, sizeof(struct qm_fqd));
+ ret = qman_query_fq(&fq, &fqd);
+ if (ret)
+ return ret;
+ if (fqd.ics_cred > 0) {
+ add_to_line_buffer(&line_buf, fq.fqid, file);
+ fq_cnt++;
+ }
+ }
+ flush_line_buffer(&line_buf, file);
+
+ seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt);
+ return 0;
+}
+
+static int qman_fqd_cred_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_fqd_cred_show, NULL);
+}
+
+static const struct file_operations qman_fqd_cred_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_fqd_cred_open,
+ .read = seq_read,
+};
+
+/* helper macros used in qman_debugfs_init */
+#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \
+ do { \
+ d = debugfs_create_file(name, \
+ mode, parent, \
+ data, \
+ fops); \
+ if (d == NULL) { \
+ ret = -ENOMEM; \
+ goto _return; \
+ } \
+ } while (0)
+
+/* dfs_root as parent */
+#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \
+ QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops)
+
+/* fqd_root as parent */
+#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \
+ QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops)
+
+/* fqd state */
+#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \
+ QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \
+ (void *)&mask_filter[index], &qman_fqd_ctrl_fops)
+
+static int __init qman_debugfs_init(void)
+{
+ int ret = 0;
+ struct dentry *d, *fqd_root;
+ u32 reg;
+
+ fqid_max = 0;
+ init_ccsrmempeek();
+ if (qman_ccsr_start) {
+ if (!qman_ccsrmempeek(&reg, QM_FQD_AR)) {
+ /* extract the size of the FQD window */
+ reg = reg & 0x3f;
+ /* calculate valid frame queue descriptor range */
+ fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE;
+ }
+ }
+
+ dfs_root = debugfs_create_dir("qman", NULL);
+ fqd_root = debugfs_create_dir("fqd", dfs_root);
+ if (dfs_root == NULL || fqd_root == NULL) {
+ pr_err("Cannot create dir\n");
+ return -ENOMEM;
+ }
+
+ if (fqid_max) {
+ QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO,
+ NULL, &qman_ccsrmempeek_fops);
+ }
+ QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO,
+ &query_fq_np_fields_data, &query_fq_np_fields_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO,
+ &query_fq_fields_data, &query_fq_fields_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO,
+ &query_wq_lengths_data, &query_wq_lengths_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO,
+ &query_cgr_data, &query_cgr_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO,
+ NULL, &query_congestion_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO,
+ NULL, &testwrite_cgr_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO,
+ NULL, &teswrite_cgr_cgrid_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO,
+ NULL, &teswrite_cgr_ibcnt_fops);
+
+ /* Create files with fqd_root as parent */
+
+ QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_RETIRED],
+ &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED],
+ &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED],
+ &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_PARKED],
+ &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE],
+ &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO,
+ NULL, &qman_fqd_summary_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO,
+ NULL, &qman_fqd_dest_wq_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO,
+ NULL, &qman_fqd_cred_fops);
+
+ return 0;
+
+_return:
+ debugfs_remove_recursive(dfs_root);
+
+ return ret;
+}
+
+static void __exit qman_debugfs_exit(void)
+{
+ debugfs_remove_recursive(dfs_root);
+}
+
+module_init(qman_debugfs_init);
+module_exit(qman_debugfs_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/linux/drivers/soc/fsl/qbman/qman.c b/linux/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 00000000..aa188888
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,1106 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#include <asm/cacheflush.h>
+
+/* Last updated for v00.800 of the BG */
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
+#define REG_DD_CFG 0x0200
+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC 0x0400
+#define REG_PFDR_FP_HEAD 0x0404
+#define REG_PFDR_FP_TAIL 0x0408
+#define REG_PFDR_FP_LWIT 0x0410
+#define REG_PFDR_CFG 0x0414
+#define REG_SFDR_CFG 0x0500
+#define REG_SFDR_IN_USE 0x0504
+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID 0x0630
+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_ECIR2 0x0a0c
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_MCR 0x0b00
+#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG 0x0be0
+#define REG_HID_CFG 0x0bf0
+#define REG_IDLE_STAT 0x0bf4
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FQD_BARE 0x0c00
+#define REG_PFDR_BARE 0x0c20
+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE 0x0c80
+#define REG_QCSP_BAR 0x0c84
+#define REG_CI_SCHED_CFG 0x0d00
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_CI_RLM_AVG 0x0d14
+#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */
+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR 0x01000000
+#define MCR_get_rslt(v) (u8)((v) >> 24)
+#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0))
+#define MCR_rslt_ok(r) (rslt == 0xf0)
+#define MCR_rslt_eaccess(r) (rslt == 0xf8)
+#define MCR_rslt_inval(r) (rslt == 0xff)
+
+/* Corenet initiator settings. Stash request queues are 4-deep to match cores
+ ability to snarf. Stash priority is 3, other priorities are 2. */
+#define FSL_QMAN_CI_SCHED_CFG_SRCCIV 4
+#define FSL_QMAN_CI_SCHED_CFG_SRQ_W 3
+#define FSL_QMAN_CI_SCHED_CFG_RW_W 2
+#define FSL_QMAN_CI_SCHED_CFG_BMAN_W 2
+
+struct qman;
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+ qm_wq_portal = 0,
+ qm_wq_pool = 1,
+ qm_wq_fman0 = 2,
+ qm_wq_fman1 = 3,
+ qm_wq_caam = 4,
+ qm_wq_pme = 5,
+ qm_wq_first = qm_wq_portal,
+ qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+ qm_memory_fqd,
+ qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IFSI)
+
+union qman_ecir {
+ u32 ecir_raw;
+ struct {
+ u32 __reserved:2;
+ u32 portal_type:1;
+ u32 portal_num:5;
+ u32 fqid:24;
+ } __packed info;
+};
+
+union qman_ecir2 {
+ u32 ecir2_raw;
+ struct {
+ u32 portal_type:1;
+ u32 __reserved:21;
+ u32 portal_num:10;
+ } __packed info;
+};
+
+union qman_eadr {
+ u32 eadr_raw;
+ struct {
+ u32 __reserved1:4;
+ u32 memid:4;
+ u32 __reserved2:12;
+ u32 eadr:12;
+ } __packed info;
+ struct {
+ u32 __reserved1:3;
+ u32 memid:5;
+ u32 __reserved:8;
+ u32 eadr:16;
+ } __packed info_rev3;
+};
+
+struct qman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b }
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+ QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"),
+ QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"),
+ QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"),
+ QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"),
+ QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
+ QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
+ QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"),
+ QMAN_HWE_TXT(ICVI, "Invalid Command Verb"),
+ QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"),
+ QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"),
+ QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"),
+ QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"),
+ QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"),
+ QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"),
+ QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"),
+ QMAN_HWE_TXT(IESI, "Invalid Enqueue State"),
+ QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"),
+ QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue")
+};
+#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt))
+
+struct qman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
+static const struct qman_error_info_mdata error_mdata[] = {
+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"),
+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"),
+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"),
+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"),
+ QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"),
+ QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"),
+ QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"),
+ QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"),
+ QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"),
+ QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"),
+};
+#define QMAN_ERR_MDATA_COUNT \
+ (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata))
+
+/* Add this in Kconfig */
+#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/**
+ * qm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
+ * @v: for accessors that write values, this is the 32-bit value
+ *
+ * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All
+ * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of
+ * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means
+ * "write the enable register" rather than "enable the write register"!
+ */
+#define qm_err_isr_status_read(qm) \
+ __qm_err_isr_read(qm, qm_isr_status)
+#define qm_err_isr_status_clear(qm, m) \
+ __qm_err_isr_write(qm, qm_isr_status, m)
+#define qm_err_isr_enable_read(qm) \
+ __qm_err_isr_read(qm, qm_isr_enable)
+#define qm_err_isr_enable_write(qm, v) \
+ __qm_err_isr_write(qm, qm_isr_enable, v)
+#define qm_err_isr_disable_read(qm) \
+ __qm_err_isr_read(qm, qm_isr_disable)
+#define qm_err_isr_disable_write(qm, v) \
+ __qm_err_isr_write(qm, qm_isr_disable, v)
+#define qm_err_isr_inhibit(qm) \
+ __qm_err_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_err_isr_uninhibit(qm) \
+ __qm_err_isr_write(qm, qm_isr_inhibit, 0)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Encapsulate "struct qman *" as a cast of the register space address. */
+
+static struct qman *qm_create(void *regs)
+{
+ return (struct qman *)regs;
+}
+
+static inline u32 __qm_in(struct qman *qm, u32 offset)
+{
+ return ioread32be((void *)qm + offset);
+}
+static inline void __qm_out(struct qman *qm, u32 offset, u32 val)
+{
+ iowrite32be(val, (void *)qm + offset);
+}
+#define qm_in(reg) __qm_in(qm, REG_##reg)
+#define qm_out(reg, val) __qm_out(qm, REG_##reg, val)
+
+static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n)
+{
+ return __qm_in(qm, REG_ERR_ISR + (n << 2));
+}
+
+static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val)
+{
+ __qm_out(qm, REG_ERR_ISR + (n << 2), val);
+}
+
+static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal,
+ int ed, u8 sernd)
+{
+ DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) ||
+ (portal == qm_dc_portal_fman1));
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+ else
+ qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class,
+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5,
+ u8 csw6, u8 csw7)
+{
+ qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(struct qman *qm)
+{
+ qm_out(HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(struct qman *qm)
+{
+ qm_out(CI_SCHED_CFG,
+ 0x80000000 | /* write srcciv enable */
+ (FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) |
+ (FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) |
+ (FSL_QMAN_CI_SCHED_CFG_RW_W << 4) |
+ FSL_QMAN_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = qm_in(IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
+ int enable, int prio, int stash, u32 size)
+{
+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
+ is_power_of_2(size));
+ /* choke if 'ba' has lower-alignment than 'size' */
+ DPA_ASSERT(!(ba & (size - 1)));
+ __qm_out(qm, offset, upper_32_bits(ba));
+ __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba));
+ __qm_out(qm, offset + REG_offset_AR,
+ (enable ? 0x80000000 : 0) |
+ (prio ? 0x40000000 : 0) |
+ (stash ? 0x20000000 : 0) |
+ (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k)
+{
+ qm_out(PFDR_FP_LWIT, th & 0xffffff);
+ qm_out(PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(struct qman *qm, u16 th)
+{
+ qm_out(SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num)
+{
+ u8 rslt = MCR_get_rslt(qm_in(MCR));
+
+ DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+ /* Make sure the command interface is 'idle' */
+ if (!MCR_rslt_idle(rslt))
+ panic("QMAN_MCR isn't idle");
+
+ /* Write the MCR command params then the verb */
+ qm_out(MCP(0), pfdr_start);
+ /* TODO: remove this - it's a workaround for a model bug that is
+ * corrected in more recent versions. We use the workaround until
+ * everyone has upgraded. */
+ qm_out(MCP(1), (pfdr_start + num - 16));
+ lwsync();
+ qm_out(MCR, MCR_INIT_PFDR);
+ /* Poll for the result */
+ do {
+ rslt = MCR_get_rslt(qm_in(MCR));
+ } while (!MCR_rslt_idle(rslt));
+ if (MCR_rslt_ok(rslt))
+ return 0;
+ if (MCR_rslt_eaccess(rslt))
+ return -EACCES;
+ if (MCR_rslt_inval(rslt))
+ return -EINVAL;
+ pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+ return -ENODEV;
+}
+
+/*****************/
+/* Config driver */
+/*****************/
+
+/* We support only one of these */
+static struct qman *qm;
+#ifndef __rtems__
+static struct device_node *qm_node;
+#endif /* __rtems__ */
+
+/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used
+ * during qman_init_ccsr(). */
+#ifndef __rtems__
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+ fqd_a = rmem->base;
+ fqd_sz = rmem->size;
+
+ WARN_ON(!(fqd_a && fqd_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+#else /* __rtems__ */
+static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fqd, 4194304);
+#define fqd_a ((uintptr_t)&fqd[0])
+#define fqd_sz sizeof(fqd)
+static DPAA_NOCACHENOLOAD_ALIGNED_REGION(pfdr, 33554432);
+#define pfdr_a ((uintptr_t)&pfdr[0])
+#define pfdr_sz sizeof(pfdr)
+#endif /* __rtems__ */
+
+size_t qman_fqd_size(void)
+{
+ return fqd_sz;
+}
+
+#ifndef __rtems__
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+ pfdr_a = rmem->base;
+ pfdr_sz = rmem->size;
+
+ WARN_ON(!(pfdr_a && pfdr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr);
+
+/* Parse the <name> property to extract the memory location and size and
+ * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default
+ * size. Also flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static __init int parse_mem_property(struct device_node *node,
+ dma_addr_t *addr, size_t *sz, int zero)
+{
+ int ret;
+
+ /* If using a "zero-pma", don't try to zero it, even if you asked */
+ if (zero && of_find_property(node, "zero-pma", &ret)) {
+ pr_info(" it's a 'zero-pma', not zeroing from s/w\n");
+ zero = 0;
+ }
+
+ if (zero) {
+ /* map as cacheable, non-guarded */
+ void __iomem *tmpp = ioremap_prot(*addr, *sz, 0);
+
+ memset_io(tmpp, 0, *sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + *sz);
+ iounmap(tmpp);
+ }
+
+ return 0;
+}
+#else /* __rtems__ */
+#include <bsp/qoriq.h>
+#endif /* __rtems__ */
+
+/* TODO:
+ * - there is obviously no handling of errors,
+ * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for
+ * both memory resources to zero.
+ */
+static int __init fsl_qman_init(struct device_node *node)
+{
+#ifndef __rtems__
+ struct resource res;
+#endif /* __rtems__ */
+ u32 __iomem *regs;
+#ifndef __rtems__
+ const char *s;
+ int ret, standby = 0;
+#endif /* __rtems__ */
+ u16 id;
+ u8 major, minor;
+
+#ifndef __rtems__
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("Can't get %s property 'reg'\n", node->full_name);
+ return ret;
+ }
+ s = of_get_property(node, "fsl,hv-claimable", &ret);
+ if (s && !strcmp(s, "standby"))
+ standby = 1;
+ if (!standby) {
+ ret = parse_mem_property(node, &fqd_a, &fqd_sz, 1);
+ BUG_ON(ret);
+ ret = parse_mem_property(node, &pfdr_a, &pfdr_sz, 0);
+ BUG_ON(ret);
+ }
+#else /* __rtems__ */
+ memset((void *)fqd_a, 0, fqd_sz);
+#endif /* __rtems__ */
+ /* Global configuration */
+#ifndef __rtems__
+ regs = ioremap(res.start, res.end - res.start + 1);
+#else /* __rtems__ */
+ regs = (u32 __iomem *)&qoriq.qman;
+#endif /* __rtems__ */
+ qm = qm_create(regs);
+#ifndef __rtems__
+ qm_node = node;
+#endif /* __rtems__ */
+ qm_get_version(qm, &id, &major, &minor);
+ pr_info("Ver: %04x,%02x,%02x\n", id, major, minor);
+ if (!qman_ip_rev) {
+ if ((major == 1) && (minor == 0)) {
+ pr_err("Rev1.0 on P4080 rev1 is not supported!\n");
+#ifndef __rtems__
+ iounmap(regs);
+#endif /* __rtems__ */
+ return -ENODEV;
+ } else if ((major == 1) && (minor == 1))
+ qman_ip_rev = QMAN_REV11;
+ else if ((major == 1) && (minor == 2))
+ qman_ip_rev = QMAN_REV12;
+ else if ((major == 2) && (minor == 0))
+ qman_ip_rev = QMAN_REV20;
+ else if ((major == 3) && (minor == 0))
+ qman_ip_rev = QMAN_REV30;
+ else if ((major == 3) && (minor == 1))
+ qman_ip_rev = QMAN_REV31;
+ else {
+ pr_warn("Unknown version, default to rev1.1\n");
+ qman_ip_rev = QMAN_REV11;
+ }
+ }
+
+#ifndef __rtems__
+ if (standby) {
+ pr_info(" -> in standby mode\n");
+ return 0;
+ }
+#endif /* __rtems__ */
+ return 0;
+}
+
+int qman_have_ccsr(void)
+{
+ return qm ? 1 : 0;
+}
+
+#ifndef __rtems__
+__init void qman_init_early(void)
+{
+ struct device_node *dn;
+ int ret;
+
+ for_each_compatible_node(dn, NULL, "fsl,qman") {
+ if (qm)
+ pr_err("%s: only one 'fsl,qman' allowed\n",
+ dn->full_name);
+ else {
+ if (!of_device_is_available(dn))
+ continue;
+
+ ret = fsl_qman_init(dn);
+ BUG_ON(ret);
+ }
+ }
+}
+#else /* __rtems__ */
+#include <bsp/fdt.h>
+
+static void
+qman_sysinit(void)
+{
+ const char *fdt = bsp_fdt_get();
+ struct device_node dn;
+ const char *name;
+ int node;
+ int ret;
+
+ memset(&dn, 0, sizeof(dn));
+
+ name = "fsl,qman";
+ node = fdt_node_offset_by_compatible(fdt, 0, name);
+ if (node < 0)
+ panic("qman: no qman in FDT");
+
+ dn.full_name = name;
+ dn.offset = node;
+
+ ret = fsl_qman_init(&dn);
+ if (ret != 0)
+ panic("qman: init 1 failed");
+
+ ret = qman_init_ccsr(&dn);
+ if (ret != 0)
+ panic("qman: init CCSR failed");
+
+ ret = qman_init(&dn);
+ if (ret != 0)
+ panic("qman: init 2 failed");
+
+ ret = qman_resource_init();
+ if (ret != 0)
+ panic("qman: resource init failed");
+}
+SYSINIT(qman_sysinit, SI_SUB_CPU, SI_ORDER_SECOND, qman_sysinit, NULL);
+#endif /* __rtems__ */
+
+static void log_edata_bits(u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ pr_warn("ErrInt, EDATA:\n");
+ i = bit_count/32;
+ if (bit_count%32) {
+ i++;
+ mask = ~(mask << bit_count%32);
+ }
+ j = 16-i;
+ pr_warn(" 0x%08x\n", qm_in(EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ pr_warn(" 0x%08x\n", qm_in(EDATA(j)));
+}
+
+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+{
+ union qman_ecir ecir_val;
+ union qman_eadr eadr_val;
+
+ ecir_val.ecir_raw = qm_in(ECIR);
+ /* Is portal info valid */
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ union qman_ecir2 ecir2_val;
+
+ ecir2_val.ecir2_raw = qm_in(ECIR2);
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ pr_warn("ErrInt: %s id %d\n",
+ ecir2_val.info.portal_type ? "DCP" : "SWP",
+ ecir2_val.info.portal_num);
+ }
+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+ pr_warn("ErrInt: ecir.fqid 0x%x\n", ecir_val.info.fqid);
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.eadr_raw = qm_in(EADR);
+ pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[eadr_val.info_rev3.memid].txt,
+ error_mdata[eadr_val.info_rev3.memid].addr_mask
+ & eadr_val.info_rev3.eadr);
+ log_edata_bits(
+ error_mdata[eadr_val.info_rev3.memid].bits);
+ }
+ } else {
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ pr_warn("ErrInt: %s id %d\n",
+ ecir_val.info.portal_type ? "DCP" : "SWP",
+ ecir_val.info.portal_num);
+ }
+ if (ecsr_val & FQID_ECSR_ERR)
+ pr_warn("ErrInt: ecir.fqid 0x%x\n", ecir_val.info.fqid);
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.eadr_raw = qm_in(EADR);
+ pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[eadr_val.info.memid].txt,
+ error_mdata[eadr_val.info.memid].addr_mask
+ & eadr_val.info.eadr);
+ log_edata_bits(error_mdata[eadr_val.info.memid].bits);
+ }
+ }
+}
+
+/* QMan interrupt handler */
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+
+ ier_val = qm_err_isr_enable_read(qm);
+ isr_val = qm_err_isr_status_read(qm);
+ ecsr_val = qm_in(ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+ for (i = 0; i < QMAN_HWE_COUNT; i++) {
+ if (qman_hwerr_txts[i].mask & isr_mask) {
+ pr_warn("ErrInt: %s\n", qman_hwerr_txts[i].txt);
+ if (qman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(isr_mask, ecsr_val);
+ /* Re-arm error capture registers */
+ qm_out(ECSR, ecsr_val);
+ }
+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) {
+ pr_devel("Un-enabling error 0x%x\n",
+ qman_hwerr_txts[i].mask);
+ ier_val &= ~qman_hwerr_txts[i].mask;
+ qm_err_isr_enable_write(qm, ier_val);
+ }
+ }
+ }
+ qm_err_isr_status_clear(qm, isr_val);
+ return IRQ_HANDLED;
+}
+
+static int __bind_irq(struct device_node *node)
+{
+ int ret, err_irq;
+
+ err_irq = of_irq_to_resource(node, 0, NULL);
+ if (err_irq == NO_IRQ) {
+ pr_info("Can't get %s property 'interrupts'\n",
+ node->full_name);
+ return -ENODEV;
+ }
+ ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", node);
+ if (ret) {
+ pr_err("request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return -ENODEV;
+ }
+ /* Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init). */
+ qm_err_isr_status_clear(qm, 0xffffffff);
+ /* Enable Error Interrupts */
+ qm_err_isr_enable_write(qm, 0xffffffff);
+ return 0;
+}
+
+int qman_init_ccsr(struct device_node *node)
+{
+ int ret;
+
+#ifndef __rtems__
+ if (!qman_have_ccsr())
+ return 0;
+ if (node != qm_node)
+ return -EINVAL;
+#endif /* __rtems__ */
+ /* FQD memory */
+ qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz);
+ /* PFDR memory */
+ qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz);
+ qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
+ /* thresholds */
+ qm_set_pfdr_threshold(qm, 512, 64);
+ qm_set_sfdr_threshold(qm, 128);
+ /* clear stale PEBI bit from interrupt status register */
+ qm_err_isr_status_clear(qm, QM_EIRQ_PEBI);
+ /* corenet initiator settings */
+ qm_set_corenet_initiator(qm);
+ /* HID settings */
+ qm_set_hid(qm);
+ /* Set scheduling weights to defaults */
+ for (ret = qm_wq_first; ret <= qm_wq_last; ret++)
+ qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0);
+ /* We are not prepared to accept ERNs for hardware enqueues */
+ qm_set_dc(qm, qm_dc_portal_fman0, 1, 0);
+ qm_set_dc(qm, qm_dc_portal_fman1, 1, 0);
+ /* Initialise Error Interrupt Handler */
+ ret = __bind_irq(node);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+ static int done;
+ static u32 liodn_offset;
+ u32 before, after;
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+ if (!qman_have_ccsr())
+ return;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ before = qm_in(REV3_QCSP_LIO_CFG(idx));
+ else
+ before = qm_in(QCSP_LIO_CFG(idx));
+ if (!done) {
+ liodn_offset = before & LIO_CFG_LIODN_MASK;
+ done = 1;
+ return;
+ }
+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_out(REV3_QCSP_LIO_CFG(idx), after);
+ else
+ qm_out(QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+int qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+ u32 before, after;
+
+ if (!qman_have_ccsr())
+ return -ENODEV;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ before = qm_in(REV3_QCSP_IO_CFG(idx));
+ /* Each pair of vcpu share the same SRQ(SDEST) */
+ cpu_idx /= 2;
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_out(REV3_QCSP_IO_CFG(idx), after);
+ } else {
+ before = qm_in(QCSP_IO_CFG(idx));
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_out(QCSP_IO_CFG(idx), after);
+ }
+ return 0;
+}
+
+#define MISC_CFG_WPM_MASK 0x00000002
+int qm_set_wpm(int wpm)
+{
+ u32 before;
+ u32 after;
+
+ if (!qman_have_ccsr())
+ return -ENODEV;
+
+ before = qm_in(MISC_CFG);
+ after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1);
+ qm_out(MISC_CFG, after);
+ return 0;
+}
+
+int qm_get_wpm(int *wpm)
+{
+ u32 before;
+
+ if (!qman_have_ccsr())
+ return -ENODEV;
+
+ before = qm_in(MISC_CFG);
+ *wpm = (before & MISC_CFG_WPM_MASK) >> 1;
+ return 0;
+}
+
+#ifdef CONFIG_SYSFS
+
+#define DRV_NAME "fsl-qman"
+
+static ssize_t show_pfdr_fpc(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC));
+};
+
+static ssize_t show_dlm_avg(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ u32 data;
+ int i;
+
+ if (sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i) != 1)
+ return -EINVAL;
+ data = qm_in(DCP_DLM_AVG(i));
+ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
+ (data & 0x000000ff)*390625);
+};
+
+static ssize_t set_dlm_avg(struct device *dev,
+ struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+ unsigned long val;
+ int i;
+
+ if (sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i) != 1)
+ return -EINVAL;
+ if (kstrtoul(buf, 0, &val)) {
+ dev_dbg(dev, "invalid input %s\n", buf);
+ return -EINVAL;
+ }
+ qm_out(DCP_DLM_AVG(i), val);
+ return count;
+};
+
+static ssize_t show_pfdr_cfg(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG));
+};
+
+static ssize_t set_pfdr_cfg(struct device *dev,
+ struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val)) {
+ dev_dbg(dev, "invalid input %s\n", buf);
+ return -EINVAL;
+ }
+ qm_out(PFDR_CFG, val);
+ return count;
+};
+
+static ssize_t show_sfdr_in_use(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE));
+};
+
+static ssize_t show_idle_stat(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT));
+};
+
+static ssize_t show_ci_rlm_avg(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ u32 data = qm_in(CI_RLM_AVG);
+
+ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
+ (data & 0x000000ff)*390625);
+};
+
+static ssize_t set_ci_rlm_avg(struct device *dev,
+ struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val)) {
+ dev_dbg(dev, "invalid input %s\n", buf);
+ return -EINVAL;
+ }
+ qm_out(CI_RLM_AVG, val);
+ return count;
+};
+
+static ssize_t show_err_isr(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR));
+};
+
+
+static ssize_t show_sbec(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ int i;
+
+ if (sscanf(dev_attr->attr.name, "sbec_%d", &i) != 1)
+ return -EINVAL;
+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i)));
+};
+
+static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL);
+static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg);
+static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL);
+static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR),
+ show_ci_rlm_avg, set_ci_rlm_avg);
+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
+static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL);
+
+static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+
+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL);
+
+static struct attribute *qman_dev_attributes[] = {
+ &dev_attr_pfdr_fpc.attr,
+ &dev_attr_pfdr_cfg.attr,
+ &dev_attr_idle_stat.attr,
+ &dev_attr_ci_rlm_avg.attr,
+ &dev_attr_err_isr.attr,
+ &dev_attr_dcp0_dlm_avg.attr,
+ &dev_attr_dcp1_dlm_avg.attr,
+ &dev_attr_dcp2_dlm_avg.attr,
+ &dev_attr_dcp3_dlm_avg.attr,
+ /* sfdr_in_use will be added if necessary */
+ NULL
+};
+
+static struct attribute *qman_dev_ecr_attributes[] = {
+ &dev_attr_sbec_0.attr,
+ &dev_attr_sbec_1.attr,
+ &dev_attr_sbec_2.attr,
+ &dev_attr_sbec_3.attr,
+ &dev_attr_sbec_4.attr,
+ &dev_attr_sbec_5.attr,
+ &dev_attr_sbec_6.attr,
+ &dev_attr_sbec_7.attr,
+ &dev_attr_sbec_8.attr,
+ &dev_attr_sbec_9.attr,
+ &dev_attr_sbec_10.attr,
+ &dev_attr_sbec_11.attr,
+ &dev_attr_sbec_12.attr,
+ &dev_attr_sbec_13.attr,
+ &dev_attr_sbec_14.attr,
+ NULL
+};
+
+/* root level */
+static const struct attribute_group qman_dev_attr_grp = {
+ .name = NULL,
+ .attrs = qman_dev_attributes
+};
+static const struct attribute_group qman_dev_ecr_grp = {
+ .name = "error_capture",
+ .attrs = qman_dev_ecr_attributes
+};
+
+static int of_fsl_qman_remove(struct platform_device *ofdev)
+{
+ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
+ return 0;
+};
+
+static int of_fsl_qman_probe(struct platform_device *ofdev)
+{
+ int ret;
+ struct device *dev = &ofdev->dev;
+
+ ret = sysfs_create_group(&dev->kobj, &qman_dev_attr_grp);
+ if (ret)
+ goto done;
+ ret = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name);
+ if (ret)
+ goto del_group_0;
+ ret = sysfs_create_group(&dev->kobj, &qman_dev_ecr_grp);
+ if (ret)
+ goto del_group_0;
+
+ goto done;
+
+del_group_0:
+ sysfs_remove_group(&dev->kobj, &qman_dev_attr_grp);
+done:
+ if (ret)
+ dev_err(dev, "Cannot create dev attributes ret=%d\n", ret);
+ return ret;
+};
+
+static const struct of_device_id of_fsl_qman_ids[] = {
+ {
+ .compatible = "fsl,qman",
+ },
+ {}
+};
+
+static struct platform_driver of_fsl_qman_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_fsl_qman_ids,
+ },
+ .probe = of_fsl_qman_probe,
+ .remove = of_fsl_qman_remove,
+};
+
+builtin_platform_driver(of_fsl_qman_driver);
+
+#endif /* CONFIG_SYSFS */
diff --git a/linux/drivers/soc/fsl/qbman/qman.h b/linux/drivers/soc/fsl/qbman/qman.h
new file mode 100644
index 00000000..331db7c7
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman.h
@@ -0,0 +1,1133 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x0000
+#define QM_REG_EQCR_CI_CINH 0x0004
+#define QM_REG_EQCR_ITR 0x0008
+#define QM_REG_DQRR_PI_CINH 0x0040
+#define QM_REG_DQRR_CI_CINH 0x0044
+#define QM_REG_DQRR_ITR 0x0048
+#define QM_REG_DQRR_DCAP 0x0050
+#define QM_REG_DQRR_SDQCR 0x0054
+#define QM_REG_DQRR_VDQCR 0x0058
+#define QM_REG_DQRR_PDQCR 0x005c
+#define QM_REG_MR_PI_CINH 0x0080
+#define QM_REG_MR_CI_CINH 0x0084
+#define QM_REG_MR_ITR 0x0088
+#define QM_REG_CFG 0x0100
+#define QM_REG_ISR 0x0e00
+#define QM_REG_IIR 0x0e0c
+#define QM_REG_ITPR 0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3100
+#define QM_CL_DQRR_PI_CENA 0x3200
+#define QM_CL_DQRR_CI_CENA 0x3300
+#define QM_CL_MR_PI_CENA 0x3400
+#define QM_CL_MR_CI_CENA 0x3500
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent). */
+
+/* Cache-inhibited register access. */
+#define __qm_in(qm, o) __raw_readl((qm)->addr_ci + (o))
+#define __qm_out(qm, o, val) __raw_writel((val), (qm)->addr_ci + (o))
+#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
+#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o))
+#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o))
+#define __qm_cl_in(qm, o) __raw_readl((qm)->addr_ce + (o))
+#define __qm_cl_out(qm, o, val) \
+ do { \
+ u32 *__tmpclout = (qm)->addr_ce + (o); \
+ __raw_writel((val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o))
+#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
+#define qm_cl_invalidate(reg)\
+ __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues. */
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ const struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ const struct qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+struct qm_mc {
+ struct qm_mc_command *cr;
+ struct qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
+
+struct qm_addr {
+ void __iomem *addr_ce; /* cache-enabled */
+ void __iomem *addr_ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to
+ * and including 'mc' fits within a cacheline (yay!). The 'config' part
+ * is setup-only, so isn't a cause for a concern. In other words, don't
+ * rearrange this structure on a whim, there be dragons ... */
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} QM_PORTAL_ALIGNMENT;
+
+/* --- EQCR API --- */
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define EQCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void EQCR_INC(struct qm_eqcr *eqcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates fast
+ * code with essentially no branching overheads. We increment to the
+ * next EQCR pointer and handle overflow and 'vbit'. */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = EQCR_CARRYCLEAR(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case. */
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(EQCR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+ return (qm_in(CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ u8 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ DPA_ASSERT(!eqcr->busy);
+ if (pi != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("losing uncommited EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPA_ASSERT(!eqcr->busy);
+ if (!eqcr->available)
+ return NULL;
+
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dcbz_64(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dcbz_64(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline void qm_eqcr_abort(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPA_ASSERT(eqcr->busy);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
+ struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPA_ASSERT(eqcr->busy);
+ DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
+ if (eqcr->available == 1)
+ return NULL;
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcr->cursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbz_64(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+#define EQCR_COMMIT_CHECKS(eqcr) \
+do { \
+ DPA_ASSERT(eqcr->busy); \
+ DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
+ DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
+} while (0)
+
+static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pci);
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ hwsync();
+ qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+ qm_cl_invalidate(EQCR_PI);
+ qm_cl_touch_rw(EQCR_PI);
+}
+
+static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ lwsync();
+ qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+ lwsync();
+ eqcursor = eqcr->cursor;
+ eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ qm_cl_touch_ro(EQCR_CI);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->ithresh;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+/* FIXME: many possible improvements;
+ * - look at changing the API to use pointer rather than index parameters now
+ * that 'cursor' is a pointer,
+ * - consider moving other parameters to pointer if it could help (ci)
+ */
+
+#define DQRR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
+
+static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
+}
+
+static inline const struct qm_dqrr_entry *DQRR_INC(
+ const struct qm_dqrr_entry *e)
+{
+ return DQRR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ __maybe_unused enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(DQRR_SDQCR, 0);
+ qm_out(DQRR_VDQCR, 0);
+ qm_out(DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(DQRR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dcbi(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+#ifndef __rtems__
+ 0xa0 | /* RE+SE */
+#endif /* __rtems__ */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if ((dqrr->cmode != qm_dqrr_cdc) &&
+ (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ return DQRR_PTR2IDX(dqrr->cursor);
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->fill);
+ dqrr->cursor = DQRR_INC(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+
+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pci);
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+ qm_cl_invalidate(DQRR_PI);
+ qm_cl_touch_ro(DQRR_PI);
+}
+
+static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+
+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+ dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads". */
+#ifdef __rtems__
+ dcbi(res);
+#endif /* __rtems__ */
+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_rw(DQRR_CI);
+}
+
+static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ ((park ? 1 : 0) << 6) | /* PK */
+ idx); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 idx = DQRR_PTR2IDX(dq);
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPA_ASSERT((dqrr->ring + idx) == dq);
+ DPA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+}
+
+static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_ro(DQRR_CI);
+}
+
+static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
+}
+
+static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+ return dqrr->ci;
+}
+
+static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_park_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(DQRR_SDQCR, sdqcr);
+}
+
+static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_SDQCR);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(DQRR_VDQCR, vdqcr);
+}
+
+static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_VDQCR);
+}
+
+static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr)
+{
+ qm_out(DQRR_PDQCR, pdqcr);
+}
+
+static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_PDQCR);
+}
+
+static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ return dqrr->ithresh;
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(DQRR_ITR, ithresh);
+}
+
+static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
+{
+ return (qm_in(CFG) & 0x00f00000) >> 20;
+}
+
+/* --- MR API --- */
+
+#define MR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
+
+static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
+}
+
+static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
+{
+ return MR_CARRYCLEAR(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ register struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.addr_ce + QM_CL_MR;
+ mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(MR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != MR_PTR2IDX(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline u8 qm_mr_cursor(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ return MR_PTR2IDX(mr->cursor);
+}
+
+static inline u8 qm_mr_next(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPA_ASSERT(mr->fill);
+ mr->cursor = MR_INC(mr->cursor);
+ return --mr->fill;
+}
+
+static inline u8 qm_mr_pci_update(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ u8 diff, old_pi = mr->pi;
+
+ DPA_ASSERT(mr->pmode == qm_mr_pci);
+ mr->pi = qm_in(MR_PI_CINH);
+ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
+ mr->fill += diff;
+ return diff;
+}
+
+static inline void qm_mr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mr *mr = &portal->mr;
+
+ DPA_ASSERT(mr->pmode == qm_mr_pce);
+ qm_cl_invalidate(MR_PI);
+ qm_cl_touch_ro(MR_PI);
+}
+
+static inline u8 qm_mr_pce_update(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ u8 diff, old_pi = mr->pi;
+
+ DPA_ASSERT(mr->pmode == qm_mr_pce);
+ mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1);
+ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
+ mr->fill += diff;
+ return diff;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+ DPA_ASSERT(mr->pmode == qm_mr_pvb);
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads". */
+ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = MR_INC(res);
+ }
+ dcbit_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = MR_PTR2IDX(mr->cursor);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mr *mr = &portal->mr;
+
+ DPA_ASSERT(mr->cmode == qm_mr_cce);
+ qm_cl_invalidate(MR_CI);
+ qm_cl_touch_rw(MR_CI);
+}
+
+static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPA_ASSERT(mr->cmode == qm_mr_cce);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_cl_out(MR_CI, mr->ci);
+}
+
+static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPA_ASSERT(mr->cmode == qm_mr_cce);
+ mr->ci = MR_PTR2IDX(mr->cursor);
+ qm_cl_out(MR_CI, mr->ci);
+}
+
+static inline u8 qm_mr_get_ci(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ return mr->ci;
+}
+
+static inline u8 qm_mr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ return mr->ithresh;
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.addr_ce + QM_CL_CR;
+ mc->rr = portal->addr.addr_ce + QM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ QM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mc *mc = &portal->mc;
+
+ DPA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+
+ DPA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = qman_mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_abort(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mc *mc = &portal->mc;
+
+ DPA_ASSERT(mc->state == qman_mc_user);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPA_ASSERT(mc->state == qman_mc_user);
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPA_ASSERT(mc->state == qman_mc_hw);
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering... */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+/* --- Portal interrupt register API --- */
+
+static inline int qm_isr_init(__always_unused struct qm_portal *portal)
+{
+ return 0;
+}
+
+static inline void qm_isr_finish(__always_unused struct qm_portal *portal)
+{
+}
+
+static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
+{
+ qm_out(ITPR, iperiod);
+}
+
+static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
+{
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
+}
+
+static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
+ u32 val)
+{
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
+}
diff --git a/linux/drivers/soc/fsl/qbman/qman_api.c b/linux/drivers/soc/fsl/qbman/qman_api.c
new file mode 100644
index 00000000..e838d08f
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_api.c
@@ -0,0 +1,3026 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman.h"
+
+/* Compilation constants */
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+#define FSL_DPA_PORTAL_SHARE 1 /* Allow portals to be shared */
+/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's
+ * positive, and rounding to the closest value if it's zero. NB, this macro
+ * implicitly upgrades parameters to unsigned 64-bit, so feed it with types
+ * that are compatible with this. NB, these arguments should not be expressions
+ * unless it is safe for them to be evaluated multiple times. Eg. do not pass
+ * in "some_value++" as a parameter to the macro! */
+#define ROUNDING(n, d, r) \
+ (((r) < 0) ? div64_u64((n), (d)) : \
+ (((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \
+ div64_u64(((n) + ((d) / 2)), (d))))
+
+/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
+ * inter-processor locking only. Note, FQLOCK() is always called either under a
+ * local_irq_save() or from interrupt context - hence there's no need for irq
+ * protection (and indeed, attempting to nest irq-protection doesn't work, as
+ * the "irq en/disable" machinery isn't recursive...). */
+#define FQLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_lock(&__fq478->fqlock); \
+ } while (0)
+#define FQUNLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_unlock(&__fq478->fqlock); \
+ } while (0)
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ set_bits(mask, &fq->flags);
+}
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ clear_bits(mask, &fq->flags);
+}
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ u32 slowpoll; /* only used when interrupts are off */
+ struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+ raw_spinlock_t sharing_lock; /* only used if is_shared */
+#ifndef __rtems__
+ int is_shared;
+ struct qman_portal *sharing_redirect;
+#endif /* __rtems__ */
+#endif
+ u32 sdqcr;
+ int dqrr_disable_ref;
+ /* A portal-specific handler for DCP ERNs. If this is NULL, the global
+ * handler is called instead. */
+ qman_cb_dc_ern cb_dc_ern;
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct qm_portal_config *config;
+#ifndef __rtems__
+ /* This is needed for providing a non-NULL device to dma_map_***() */
+ struct platform_device *pdev;
+#endif /* __rtems__ */
+ struct dpa_rbtree retire_table;
+ char irqname[MAX_IRQNAME];
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ /* track if memory was allocated by the driver */
+ u8 alloced;
+};
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+ do { \
+ if ((p)->is_shared) \
+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
+ else \
+ local_irq_save(irqflags); \
+ } while (0)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+ do { \
+ if ((p)->is_shared) \
+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
+ irqflags); \
+ else \
+ local_irq_restore(irqflags); \
+ } while (0)
+#else /* __rtems__ */
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, irqflags)
+#endif /* __rtems__ */
+#else
+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
+#endif
+
+/* Global handler for DCP ERNs. Used when the portal receiving the message does
+ * not have a portal-specific handler. */
+static qman_cb_dc_ern cb_dc_ern;
+
+#ifndef __rtems__
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+#endif /* __rtems__ */
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+#ifndef __rtems__
+void *affine_portals[NR_CPUS];
+#endif /* __rtems__ */
+
+/* "raw" gets the cpu-local struct whether it's a redirect or not. */
+static inline struct qman_portal *get_raw_affine_portal(void)
+{
+ return &get_cpu_var(qman_affine_portal);
+}
+/* For ops that can redirect, this obtains the portal to use */
+#ifdef FSL_DPA_PORTAL_SHARE
+static inline struct qman_portal *get_affine_portal(void)
+{
+ struct qman_portal *p = get_raw_affine_portal();
+
+#ifndef __rtems__
+ if (p->sharing_redirect)
+ return p->sharing_redirect;
+#endif /* __rtems__ */
+ return p;
+}
+#else
+#define get_affine_portal() get_raw_affine_portal()
+#endif
+/* For every "get", there must be a "put" */
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(qman_affine_portal);
+}
+/* Exception: poll functions assume the caller is cpu-affine and in no risk of
+ * re-entrance, which are the two reasons we usually use the get/put_cpu_var()
+ * semantic - ie. to disable pre-emption. Some use-cases expect the execution
+ * context to remain as non-atomic during poll-triggered callbacks as it was
+ * when the poll API was first called (eg. NAPI), so we go out of our way in
+ * this case to not disable pre-emption. */
+static inline struct qman_portal *get_poll_portal(void)
+{
+ return this_cpu_ptr(&qman_affine_portal);
+}
+#define put_poll_portal()
+
+/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
+ * retirement notifications (the fact they are sometimes h/w-consumed means that
+ * contextB isn't always a s/w demux - and as we can't know which case it is
+ * when looking at the notification, we have to use the slow lookup for all of
+ * them). NB, it's possible to have multiple FQ objects refer to the same FQID
+ * (though at most one of them should be the consumer), so this table isn't for
+ * all FQs - FQs are added when retirement commands are issued, and removed when
+ * they complete, which also massively reduces the size of this table. */
+IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid);
+
+/* This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on. */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ int ret = fqtree_push(&p->retire_table, fq);
+
+ if (ret)
+ pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
+ return ret;
+}
+
+static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ fqtree_del(&p->retire_table, fq);
+}
+
+static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
+{
+ return fqtree_find(&p->retire_table, fqid);
+}
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+static void **qman_fq_lookup_table;
+static size_t qman_fq_lookup_table_size;
+
+int qman_setup_fq_lookup_table(size_t num_entries)
+{
+ num_entries++;
+ /* Allocate 1 more entry since the first entry is not used */
+ qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *)));
+ if (!qman_fq_lookup_table)
+ return -ENOMEM;
+ qman_fq_lookup_table_size = num_entries;
+ pr_info("Allocated lookup table at %p, entry count %lu\n",
+ qman_fq_lookup_table, (unsigned long)qman_fq_lookup_table_size);
+ return 0;
+}
+
+/* global structure that maintains fq object mapping */
+static DEFINE_SPINLOCK(fq_hash_table_lock);
+
+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
+{
+ u32 i;
+
+ spin_lock(&fq_hash_table_lock);
+ /* Can't use index zero because this has special meaning
+ * in context_b field. */
+ for (i = 1; i < qman_fq_lookup_table_size; i++) {
+ if (qman_fq_lookup_table[i] == NULL) {
+ *entry = i;
+ qman_fq_lookup_table[i] = fq;
+ spin_unlock(&fq_hash_table_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&fq_hash_table_lock);
+ return -ENOMEM;
+}
+
+static void clear_fq_table_entry(u32 entry)
+{
+ spin_lock(&fq_hash_table_lock);
+ BUG_ON(entry >= qman_fq_lookup_table_size);
+ qman_fq_lookup_table[entry] = NULL;
+ spin_unlock(&fq_hash_table_lock);
+}
+
+static inline struct qman_fq *get_fq_table_entry(u32 entry)
+{
+ BUG_ON(entry >= qman_fq_lookup_table_size);
+ return qman_fq_lookup_table[entry];
+}
+#endif
+
+/* In the case that slow- and fast-path handling are both done by qman_poll()
+ * (ie. because there is no interrupt handling), we ought to balance how often
+ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
+ * sources, so we call the fast poll 'n' times before calling the slow poll
+ * once. The idle decrementer constant is used when the last slow-poll detected
+ * no work to do, and the busy decrementer constant when the last slow-poll had
+ * work to do. */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+ /*
+ * The CSCI source is cleared inside __poll_portal_slow(), because
+ * it could race against a Query Congestion State command also given
+ * as part of the handling of this interrupt source. We mustn't
+ * clear it a second time in this top-level function.
+ */
+ u32 clear = QM_DQAVAIL_MASK;
+ u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI) {
+ clear |= QM_PIRQ_DQRI;
+ __poll_portal_fast(p, QMAN_POLL_LIMIT);
+ }
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is);
+ qm_isr_status_clear(&p->p, clear);
+ return IRQ_HANDLED;
+}
+
+/* This inner version is used privately by qman_create_affine_portal(), as well
+ * as by the exported qman_stop_dequeues(). */
+static inline void qman_stop_dequeues_ex(struct qman_portal *p)
+{
+ unsigned long irqflags __maybe_unused;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ if (!(p->dqrr_disable_ref++))
+ qm_dqrr_set_maxfill(&p->p, 0);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const struct qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /* if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here. */
+ u64 now, then = mfatb();
+
+ do {
+ now = mfatb();
+ } while ((then + 10000) > now);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+struct qman_portal *qman_create_portal(
+ struct qman_portal *portal,
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *__p;
+#ifndef __rtems__
+ char buf[16];
+#endif /* __rtems__ */
+ int ret;
+ u32 isdr;
+
+ if (!portal) {
+ portal = kmalloc(sizeof(*portal), GFP_KERNEL);
+ if (!portal)
+ return portal;
+ portal->alloced = 1;
+ } else
+ portal->alloced = 0;
+
+ __p = &portal->p;
+
+#ifndef __rtems__
+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ?
+ 1 : 0);
+#else /* __rtems__ */
+ portal->use_eqcr_ci_stashing = 0;
+#endif /* __rtems__ */
+
+ /* prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference... */
+ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(__p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ pr_err("EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ pr_err("DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) {
+ pr_err("MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(__p)) {
+ pr_err("MC initialisation failed\n");
+ goto fail_mc;
+ }
+ if (qm_isr_init(__p)) {
+ pr_err("ISR initialisation failed\n");
+ goto fail_isr;
+ }
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(__p, QMAN_PIRQ_DQRR_ITHRESH);
+ qm_mr_set_ithresh(__p, QMAN_PIRQ_MR_ITHRESH);
+ qm_isr_set_iperiod(__p, QMAN_PIRQ_IPERIOD);
+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ portal->bits = 0;
+ portal->slowpoll = 0;
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ portal->eqci_owned = NULL;
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+ raw_spin_lock_init(&portal->sharing_lock);
+#ifndef __rtems__
+ portal->is_shared = config->public_cfg.is_shared;
+ portal->sharing_redirect = NULL;
+#endif /* __rtems__ */
+#endif
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ portal->dqrr_disable_ref = 0;
+ portal->cb_dc_ern = NULL;
+#ifndef __rtems__
+ sprintf(buf, "qportal-%d", config->public_cfg.channel);
+ portal->pdev = platform_device_alloc(buf, -1);
+ if (!portal->pdev)
+ goto fail_devalloc;
+ if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
+ goto fail_devadd;
+ ret = platform_device_add(portal->pdev);
+ if (ret)
+ goto fail_devadd;
+#endif /* __rtems__ */
+ dpa_rbtree_init(&portal->retire_table);
+ isdr = 0xffffffff;
+ qm_isr_disable_write(__p, isdr);
+ portal->irq_sources = 0;
+ qm_isr_enable_write(__p, portal->irq_sources);
+ qm_isr_status_clear(__p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
+ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+#ifndef __rtems__
+ if ((config->public_cfg.cpu != -1) &&
+ irq_can_set_affinity(config->public_cfg.irq) &&
+ irq_set_affinity(config->public_cfg.irq,
+ cpumask_of(config->public_cfg.cpu))) {
+ pr_err("irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+#endif /* __rtems__ */
+
+ /* Need EQCR to be empty before continuing */
+ isdr ^= QM_PIRQ_EQCI;
+ qm_isr_disable_write(__p, isdr);
+ ret = qm_eqcr_get_fill(__p);
+ if (ret) {
+ pr_err("EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_isr_disable_write(__p, isdr);
+ if (qm_dqrr_current(__p) != NULL) {
+ pr_err("DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(__p, 0xffff);
+ }
+ if (qm_mr_current(__p) != NULL) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ if (drain_mr_fqrni(__p)) {
+ const struct qm_mr_entry *e = qm_mr_current(__p);
+
+ pr_err("MR unclean, MR VERB 0x%x, rc 0x%x\n, addr 0x%x",
+ e->verb, e->ern.rc, e->ern.fd.addr_lo);
+ goto fail_dqrr_mr_empty;
+ }
+ }
+ /* Success */
+ portal->config = config;
+ qm_isr_disable_write(__p, 0);
+ qm_isr_uninhibit(__p);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(__p, portal->sdqcr);
+ return portal;
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+#ifndef __rtems__
+fail_affinity:
+#endif /* __rtems__ */
+ free_irq(config->public_cfg.irq, portal);
+fail_irq:
+#ifndef __rtems__
+ platform_device_del(portal->pdev);
+fail_devadd:
+ platform_device_put(portal->pdev);
+fail_devalloc:
+#endif /* __rtems__ */
+ kfree(portal->cgrs);
+fail_cgrs:
+ qm_isr_finish(__p);
+fail_isr:
+ qm_mc_finish(__p);
+fail_mc:
+ qm_mr_finish(__p);
+fail_mr:
+ qm_dqrr_finish(__p);
+fail_dqrr:
+ qm_eqcr_finish(__p);
+fail_eqcr:
+ return NULL;
+}
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *res;
+ struct qman_portal *portal;
+
+ portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
+ res = qman_create_portal(portal, config, cgrs);
+ if (res) {
+#ifndef __rtems__
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
+ affine_channels[config->public_cfg.cpu] =
+ config->public_cfg.channel;
+ affine_portals[config->public_cfg.cpu] = portal;
+ spin_unlock(&affine_mask_lock);
+#endif /* __rtems__ */
+ }
+ return res;
+}
+
+#ifndef __rtems__
+/* These checks are BUG_ON()s because the driver is already supposed to avoid
+ * these cases. */
+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
+ int cpu)
+{
+#ifdef FSL_DPA_PORTAL_SHARE
+ struct qman_portal *p = &per_cpu(qman_affine_portal, cpu);
+
+ /* Check that we don't already have our own portal */
+ BUG_ON(p->config);
+ /* Check that we aren't already slaving to another portal */
+ BUG_ON(p->is_shared);
+ /* Check that 'redirect' is prepared to have us */
+ BUG_ON(!redirect->config->public_cfg.is_shared);
+ /* These are the only elements to initialise when redirecting */
+ p->irq_sources = 0;
+ p->sharing_redirect = redirect;
+ affine_portals[cpu] = p;
+ return p;
+#else
+ BUG();
+ return NULL;
+#endif
+}
+#endif /* __rtems__ */
+
+void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began. */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->public_cfg.irq, qm);
+
+ kfree(qm->cgrs);
+ qm_isr_finish(&qm->p);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+#ifndef __rtems__
+ platform_device_del(qm->pdev);
+ platform_device_put(qm->pdev);
+#endif /* __rtems__ */
+
+ qm->config = NULL;
+ if (qm->alloced)
+ kfree(qm);
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+ /* We don't want to redirect if we're a slave, use "raw" */
+ struct qman_portal *qm = get_raw_affine_portal();
+ const struct qm_portal_config *pcfg;
+#ifndef __rtems__
+ int cpu;
+#endif /* __rtems__ */
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (qm->sharing_redirect) {
+ qm->sharing_redirect = NULL;
+ put_affine_portal();
+ return NULL;
+ }
+ qm->is_shared = 0;
+#endif /* __rtems__ */
+#endif
+ pcfg = qm->config;
+#ifndef __rtems__
+ cpu = pcfg->public_cfg.cpu;
+#endif /* __rtems__ */
+
+ qman_destroy_portal(qm);
+
+#ifndef __rtems__
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+#endif /* __rtems__ */
+ put_affine_portal();
+ return pcfg;
+}
+
+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p)
+{
+ return &p->config->public_cfg;
+}
+EXPORT_SYMBOL(qman_p_get_portal_config);
+
+const struct qman_portal_config *qman_get_portal_config(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ const struct qman_portal_config *ret = qman_p_get_portal_config(p);
+
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_get_portal_config);
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_mr_entry *msg, u8 verb)
+{
+ FQLOCK(fq);
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ table_del_fq(p, fq);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPA_ASSERT((fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_sched));
+ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPA_ASSERT(fq->state == qman_fq_state_sched);
+ DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+ FQUNLOCK(fq);
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ const struct qm_mr_entry *msg;
+
+ if (is & QM_PIRQ_CSCI) {
+ struct qman_cgrs rr, c;
+ struct qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+ unsigned long irqflags __maybe_unused;
+
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ /*
+ * The CSCI bit must be cleared _before_ issuing the
+ * Query Congestion State command, to ensure that a long
+ * CGR State Change callback cannot miss an intervening
+ * state change.
+ */
+ qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (const struct qman_cgrs *)
+ &mcr->querycongestion.state, &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ }
+
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (is & QM_PIRQ_EQCI) {
+ unsigned long irqflags;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ p->eqci_owned = NULL;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ wake_up(&affine_queue);
+ }
+#endif
+
+ if (is & QM_PIRQ_EQRI) {
+ unsigned long irqflags __maybe_unused;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+mr_loop:
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ goto mr_done;
+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is set */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = table_find_fq(p, msg->fq.fqid);
+ BUG_ON(!fq);
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(msg->fq.contextB);
+#else
+ fq = (void *)(uintptr_t)msg->fq.contextB;
+#endif
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ if (p->cb_dc_ern)
+ p->cb_dc_ern(p, msg);
+ else if (cb_dc_ern)
+ cb_dc_ern(p, msg);
+ else
+ pr_crit_once("Leaking DCP ERNs!\n");
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(msg->ern.tag);
+#else
+ fq = (void *)(uintptr_t)msg->ern.tag;
+#endif
+ fq->cb.ern(p, fq, msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ goto mr_loop;
+mr_done:
+ qm_mr_cci_consume(&p->p, num);
+ }
+ /*
+ * QM_PIRQ_CSCI has already been cleared, as part of its specific
+ * processing. If that interrupt source has meanwhile been re-asserted,
+ * we mustn't clear it here (or in the top-level interrupt handler).
+ */
+ return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
+}
+
+/* remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined. */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ FQLOCK(fq);
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ wake_up(&affine_queue);
+}
+
+/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states
+ * that would conflict with other things if they ran at the same time on the
+ * same cpu are;
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API *except* qman_poll() (as that's the
+ * sole API that could be invoking the callback through this function).
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+
+loop:
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ goto done;
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /* VDQCR: don't trust contextB as the FQ may have been
+ * configured for h/w consumption and we're draining it
+ * post-retirement. */
+ fq = p->vdqcr_owned;
+ /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
+ * to check for clearing it when doing volatile dequeues. It's
+ * one less thing to check in the critical path (SDQCR). */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /* this is duplicated from the SDQCR code, but we have stuff to
+ * do before *and* after this callback, and we don't want
+ * multiple if()s in the critical path (SDQCR). */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ goto done;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: contextB points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+ /* The callback can request that we exit without consuming this
+ * entry nor advancing; */
+ if (res == qman_cb_dqrr_stop)
+ goto done;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /* Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both. */
+ DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* Defer just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park));
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /* Entry processed and consumed, increment our counter. The callback can
+ * request that we exit after consuming the entry, and we also exit if
+ * we reach our processing limit, so loop back only if neither of these
+ * conditions is met. */
+ if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop))
+ goto loop;
+done:
+ return limit;
+}
+
+u32 qman_irqsource_get(void)
+{
+ /* "irqsource" and "poll" APIs mustn't redirect when sharing, they
+ * should shut the user out if they are not the primary CPU hosting the
+ * portal. That's why we use the "raw" interface. */
+ struct qman_portal *p = get_raw_affine_portal();
+ u32 ret = p->irq_sources & QM_PIRQ_VISIBLE;
+
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_get);
+
+int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused)
+{
+ __maybe_unused unsigned long irqflags;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (p->sharing_redirect)
+ return -EINVAL;
+#endif /* __rtems__ */
+#endif
+ PORTAL_IRQ_LOCK(p, irqflags);
+ set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+int qman_irqsource_add(u32 bits __maybe_unused)
+{
+ struct qman_portal *p = get_raw_affine_portal();
+ int ret;
+
+ ret = qman_p_irqsource_add(p, bits);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_add);
+
+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ __maybe_unused unsigned long irqflags;
+ u32 ier;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (p->sharing_redirect) {
+ put_affine_portal();
+ return -EINVAL;
+ }
+#endif /* __rtems__ */
+#endif
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert. */
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bits &= QM_PIRQ_VISIBLE;
+ clear_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+ ier = qm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering. */
+ qm_isr_status_clear(&p->p, ~ier);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+int qman_irqsource_remove(u32 bits)
+{
+ struct qman_portal *p = get_raw_affine_portal();
+ int ret;
+
+ ret = qman_p_irqsource_remove(p, bits);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_remove);
+
+#ifndef __rtems__
+const cpumask_t *qman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_raw_affine_portal();
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ BUG_ON(portal->sharing_redirect);
+#endif /* __rtems__ */
+#endif
+ cpu = portal->config->public_cfg.cpu;
+ put_affine_portal();
+ }
+ BUG_ON(!cpumask_test_cpu(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+#endif /* __rtems__ */
+
+void *qman_get_affine_portal(int cpu)
+{
+#ifndef __rtems__
+ return affine_portals[cpu];
+#else /* __rtems__ */
+ return &per_cpu(qman_affine_portal, cpu);
+#endif /* __rtems__ */
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+ int ret;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (unlikely(p->sharing_redirect))
+ ret = -EINVAL;
+ else
+#endif /* __rtems__ */
+#endif
+ {
+ BUG_ON(p->irq_sources & QM_PIRQ_DQRI);
+ ret = __poll_portal_fast(p, limit);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+int qman_poll_dqrr(unsigned int limit)
+{
+ struct qman_portal *p = get_poll_portal();
+ int ret;
+
+ ret = qman_p_poll_dqrr(p, limit);
+ put_poll_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_poll_dqrr);
+
+u32 qman_p_poll_slow(struct qman_portal *p)
+{
+ u32 ret;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (unlikely(p->sharing_redirect))
+ ret = (u32)-1;
+ else
+#endif /* __rtems__ */
+#endif
+ {
+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+
+ ret = __poll_portal_slow(p, is);
+ qm_isr_status_clear(&p->p, ret);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(qman_p_poll_slow);
+
+u32 qman_poll_slow(void)
+{
+ struct qman_portal *p = get_poll_portal();
+ u32 ret;
+
+ ret = qman_p_poll_slow(p);
+ put_poll_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_poll_slow);
+
+/* Legacy wrapper */
+void qman_p_poll(struct qman_portal *p)
+{
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (unlikely(p->sharing_redirect))
+ return;
+#endif /* __rtems__ */
+#endif
+ if ((~p->irq_sources) & QM_PIRQ_SLOW) {
+ if (!(p->slowpoll--)) {
+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+ u32 active = __poll_portal_slow(p, is);
+
+ if (active) {
+ qm_isr_status_clear(&p->p, active);
+ p->slowpoll = SLOW_POLL_BUSY;
+ } else
+ p->slowpoll = SLOW_POLL_IDLE;
+ }
+ }
+ if ((~p->irq_sources) & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, QMAN_POLL_LIMIT);
+}
+EXPORT_SYMBOL(qman_p_poll);
+
+void qman_poll(void)
+{
+ struct qman_portal *p = get_poll_portal();
+
+ qman_p_poll(p);
+ put_poll_portal();
+}
+EXPORT_SYMBOL(qman_poll);
+
+void qman_p_stop_dequeues(struct qman_portal *p)
+{
+ qman_stop_dequeues_ex(p);
+}
+EXPORT_SYMBOL(qman_p_stop_dequeues);
+
+void qman_stop_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qman_p_stop_dequeues(p);
+ put_affine_portal();
+}
+EXPORT_SYMBOL(qman_stop_dequeues);
+
+void qman_p_start_dequeues(struct qman_portal *p)
+{
+ unsigned long irqflags __maybe_unused;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ DPA_ASSERT(p->dqrr_disable_ref > 0);
+ if (!(--p->dqrr_disable_ref))
+ qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_start_dequeues);
+
+void qman_start_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qman_p_start_dequeues(p);
+ put_affine_portal();
+}
+EXPORT_SYMBOL(qman_start_dequeues);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags __maybe_unused;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ pools &= p->config->public_cfg.pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+void qman_static_dequeue_add(u32 pools)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qman_p_static_dequeue_add(p, pools);
+ put_affine_portal();
+}
+EXPORT_SYMBOL(qman_static_dequeue_add);
+
+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags __maybe_unused;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ pools &= p->config->public_cfg.pools;
+ p->sdqcr &= ~pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_del);
+
+void qman_static_dequeue_del(u32 pools)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qman_p_static_dequeue_del(p, pools);
+ put_affine_portal();
+}
+EXPORT_SYMBOL(qman_static_dequeue_del);
+
+u32 qman_p_static_dequeue_get(struct qman_portal *p)
+{
+ return p->sdqcr;
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_get);
+
+u32 qman_static_dequeue_get(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 ret = qman_p_static_dequeue_get(p);
+
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_static_dequeue_get);
+
+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
+ int park_request)
+{
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
+}
+EXPORT_SYMBOL(qman_p_dca);
+
+void qman_dca(struct qm_dqrr_entry *dq, int park_request)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qman_p_dca(p, dq, park_request);
+ put_affine_portal();
+}
+EXPORT_SYMBOL(qman_dca);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ struct qm_fqd fqd;
+ struct qm_mcr_queryfq_np np;
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ spin_lock_init(&fq->fqlock);
+ fq->fqid = fqid;
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
+ return -ENOMEM;
+#endif
+ if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
+ return 0;
+ /* Everything else is AS_IS support */
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ fqd = mcr->queryfq.fqd;
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq_np.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ np = mcr->queryfq_np;
+ /* Phew, have queryfq and queryfq_np results, stitch together
+ * the FQ object from those. */
+ fq->cgr_groupid = fqd.cgid;
+ switch (np.state & QM_MCR_NP_STATE_MASK) {
+ case QM_MCR_NP_STATE_OOS:
+ break;
+ case QM_MCR_NP_STATE_RETIRED:
+ fq->state = qman_fq_state_retired;
+ if (np.frm_cnt)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ break;
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ fq->state = qman_fq_state_sched;
+ if (np.state & QM_MCR_NP_STATE_R)
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ break;
+ case QM_MCR_NP_STATE_PARKED:
+ fq->state = qman_fq_state_parked;
+ break;
+ default:
+ DPA_ASSERT(NULL == "invalid FQ state");
+ }
+ if (fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq->state |= QMAN_FQ_STATE_CGR_EN;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return 0;
+err:
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
+ qman_release_fqid(fqid);
+ return -EIO;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
+{
+
+ /* We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks. */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ clear_fq_table_entry(fq->key);
+#endif
+ return;
+ default:
+ break;
+ }
+ DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
+{
+ if (state)
+ *state = fq->state;
+ if (flags)
+ *flags = fq->flags;
+}
+EXPORT_SYMBOL(qman_fq_state);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked))
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked)))) {
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return -EBUSY;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ mcc->initfq.fqid = fq->fqid;
+ mcc->initfq.count = 0;
+ /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it. */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ mcc->initfq.fqd.context_b = fq->key;
+#else
+ mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
+#endif
+ /* and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings. */
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+#ifndef __rtems__
+ phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+ DMA_TO_DEVICE);
+#else /* __rtems__ */
+ phys_fq = (dma_addr_t)fq;
+#endif /* __rtems__ */
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel;
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ mcc->initfq.fqd.dest.wq = 4;
+ }
+ }
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return -EIO;
+ }
+ if (opts) {
+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (opts->we_mask & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state != qman_fq_state_parked))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ int rval;
+ u8 res;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_sched))
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_oos))) {
+ rval = -EBUSY;
+ goto out;
+ }
+ rval = table_push_fq(p, fq);
+ if (rval)
+ goto out;
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change(). */
+ if (likely(res == QM_MCR_RESULT_OK)) {
+ rval = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /* Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI... */
+ struct qm_mr_entry msg;
+
+ msg.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ msg.fq.fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ msg.fq.contextB = fq->key;
+#else
+ msg.fq.contextB = (u32)(uintptr_t)fq;
+#endif
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ rval = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ rval = -EIO;
+ table_del_fq(p, fq);
+ }
+out:
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return rval;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
+ (fq->state != qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_fq_flow_control(struct qman_fq *fq, int xon)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ int ret = 0;
+ u8 res;
+ u8 myverb;
+
+ if ((fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_parked))
+ return -EINVAL;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ mcc->alterfq.count = 0;
+ myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
+
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+out:
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_fq_flow_control);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_fq);
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *np = mcr->queryfq_np;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_fq_np);
+
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res, myverb;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
+ QM_MCR_VERB_QUERYWQ;
+ mcc = qm_mc_start(&p->p);
+ mcc->querywq.channel.id = wq->channel.id;
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *wq = mcr->querywq;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_wq);
+
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ mcc->cgrtestwrite.cgid = cgr->cgrid;
+ mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
+ mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
+ qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *result = mcr->cgrtestwrite;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_testwrite_cgr);
+
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ mcc->querycgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr);
+
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
+{
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCC_VERB_QUERYCONGESTION);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *congestion = mcr->querycongestion;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_congestion);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+ unsigned long irqflags __maybe_unused;
+ int ret = -EBUSY;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ if (!p->vdqcr_owned) {
+ FQLOCK(fq);
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto escape;
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ p->vdqcr_owned = fq;
+ ret = 0;
+ }
+escape:
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ if (!ret)
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+ int ret;
+
+ *p = get_affine_portal();
+ ret = set_p_vdqcr(*p, fq, vdqcr);
+ put_affine_portal();
+ return ret;
+}
+
+#ifdef FSL_DPA_CAN_WAIT
+static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+
+#ifndef __rtems__
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !(ret = set_p_vdqcr(p, fq, vdqcr)));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr)));
+ return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+
+#ifndef __rtems__
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !(ret = set_vdqcr(p, fq, vdqcr)));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr)));
+ return ret;
+}
+#endif
+
+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
+ u32 flags __maybe_unused, u32 vdqcr)
+{
+ int ret;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired))
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_p_vdqcr_start(p, fq, vdqcr, flags);
+ else
+#endif
+ ret = set_p_vdqcr(p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+#ifndef __rtems__
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /* NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue. */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_volatile_dequeue);
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
+ u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired))
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+ else
+#endif
+ ret = set_vdqcr(&p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+#ifndef __rtems__
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /* NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue. */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_eqcr_is_empty(void)
+{
+ unsigned long irqflags __maybe_unused;
+ struct qman_portal *p = get_affine_portal();
+ u8 avail;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ update_eqcr_ci(p, 0);
+ avail = qm_eqcr_get_fill(&p->p);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return avail == 0;
+}
+EXPORT_SYMBOL(qman_eqcr_is_empty);
+
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
+{
+ if (affine) {
+ unsigned long irqflags __maybe_unused;
+ struct qman_portal *p = get_affine_portal();
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ p->cb_dc_ern = handler;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ } else
+ cb_dc_ern = handler;
+}
+EXPORT_SYMBOL(qman_set_dc_ern);
+
+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ u8 avail;
+
+ PORTAL_IRQ_LOCK(p, (*irqflags));
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (p->eqci_owned) {
+ PORTAL_IRQ_UNLOCK(p, (*irqflags));
+ return NULL;
+ }
+ p->eqci_owned = fq;
+ }
+#endif
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq)) {
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC)))
+ p->eqci_owned = NULL;
+#endif
+ PORTAL_IRQ_UNLOCK(p, (*irqflags));
+ return NULL;
+ }
+ if (flags & QMAN_ENQUEUE_FLAG_DCA)
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
+ QM_EQCR_DCA_PARK : 0) |
+ ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
+ eq->fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ eq->tag = fq->key;
+#else
+ eq->tag = (u32)(uintptr_t)fq;
+#endif
+ eq->fd = *fd;
+ return eq;
+}
+
+static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+
+ *p = get_affine_portal();
+ eq = try_p_eq_start(*p, irqflags, fq, fd, flags);
+ if (!eq)
+ put_affine_portal();
+ return eq;
+}
+
+#ifdef FSL_DPA_CAN_WAIT
+static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags);
+
+ if (!eq)
+ qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH);
+ return eq;
+}
+static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+
+#ifndef __rtems__
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue,
+ (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
+ return eq;
+}
+static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags);
+
+ if (!eq)
+ qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH);
+ return eq;
+}
+static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+
+#ifndef __rtems__
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue,
+ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
+ return eq;
+}
+#endif
+
+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue);
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum)
+{
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Process ORP-specifics here */
+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+ else {
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+ else
+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+ }
+ eq->seqnum = orp_seqnum;
+ eq->orp = orp->fqid;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue_orp);
+
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Process ORP-specifics here */
+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+ else {
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+ else
+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+ }
+ eq->seqnum = orp_seqnum;
+ eq->orp = orp->fqid;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue_orp);
+
+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags,
+ qman_cb_precommit cb, void *cb_arg)
+{
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* invoke user supplied callback function before writing commit verb */
+ if (cb(cb_arg)) {
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ return -EINVAL;
+ }
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue_precommit);
+
+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
+ u32 flags, qman_cb_precommit cb, void *cb_arg)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* invoke user supplied callback function before writing commit verb */
+ if (cb(cb_arg)) {
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return -EINVAL;
+ }
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue_precommit);
+
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ res = mcr->result;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(qman_modify_cgr);
+
+#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \
+ QM_CHANNEL_SWPORTAL0))
+#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0)
+
+static u8 qman_cgr_cpus[__CGR_NUM];
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ unsigned long irqflags __maybe_unused;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret;
+ struct qman_portal *p;
+
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC. */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ preempt_disable();
+ p = get_affine_portal();
+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+ preempt_enable();
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ cgr->chan = p->config->public_cfg.channel;
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+
+ /* if no opts specified, just add it to the list */
+ if (!opts)
+ goto add_list;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto release_lock;
+ if (opts)
+ local_opts = *opts;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ else
+ /* Overwrite TARG */
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_MASK(p);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto release_lock;
+add_list:
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success, but screen
+ * and wail to the log file */
+ pr_crit("CGR HW state partially modified\n");
+ ret = 0;
+ goto release_lock;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
+ cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts)
+{
+ unsigned long irqflags __maybe_unused;
+ struct qm_mcc_initcgr local_opts;
+ int ret;
+
+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
+ pr_warn("This version doesn't support to send CSCN to DCP portal\n");
+ return -EINVAL;
+ }
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ if (opts)
+ local_opts = *opts;
+
+ local_opts.cgr.cscn_targ_upd_ctrl = QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr_to_dcp);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ unsigned long irqflags __maybe_unused;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->public_cfg.channel) {
+ pr_crit("Attempting to delete cgr from different portal "
+ "than it was create: create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->public_cfg.channel);
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list, update
+ * CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if ((i->cgrid == cgr->cgrid) && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+ /* Overwrite TARG */
+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+ ~(TARG_MASK(p));
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+#ifndef __rtems__
+struct cgr_comp {
+ struct qman_cgr *cgr;
+ struct completion completion;
+};
+
+static int qman_delete_cgr_thread(void *p)
+{
+ struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
+ int res;
+
+ res = qman_delete_cgr((struct qman_cgr *)cgr_comp->cgr);
+ complete(&cgr_comp->completion);
+
+ return res;
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+ struct task_struct *thread;
+ struct cgr_comp cgr_comp;
+
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+ init_completion(&cgr_comp.completion);
+ cgr_comp.cgr = cgr;
+ thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
+ "cgr_del");
+
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
+ wake_up_process(thread);
+ wait_for_completion(&cgr_comp.completion);
+ preempt_enable();
+ return;
+ }
+ }
+ qman_delete_cgr(cgr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+#endif /* __rtems__ */
+
+int qman_set_wpm(int wpm_enable)
+{
+ return qm_set_wpm(wpm_enable);
+}
+EXPORT_SYMBOL(qman_set_wpm);
+
+int qman_get_wpm(int *wpm_enable)
+{
+ return qm_get_wpm(wpm_enable);
+}
+EXPORT_SYMBOL(qman_get_wpm);
+
+
+/* Cleanup FQs */
+static int qm_shutdown_fq(struct qm_portal **portal, int portal_count,
+ u32 fqid)
+{
+
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ u8 state;
+ int orl_empty, fq_empty, i, drain = 0;
+ u32 result;
+ u32 channel, wq;
+
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(portal[0]);
+ mcc->queryfq_np.fqid = fqid;
+ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(portal[0])))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ return 0; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(portal[0]);
+ mcc->queryfq.fqid = fqid;
+ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(portal[0])))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+
+ /* Need to store these since the MCR gets reused */
+ channel = mcr->queryfq.fqd.dest.channel;
+ wq = mcr->queryfq.fqd.dest.wq;
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(portal[0]);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(portal[0])))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ result = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (result == QM_MCR_RESULT_PENDING) {
+ /* Need to wait for the FQRN in the message ring, which
+ will only occur once the FQ has been drained. In
+ order for the FQ to drain the portal needs to be set
+ to dequeue from the channel the FQ is scheduled on */
+ const struct qm_mr_entry *msg;
+ const struct qm_dqrr_entry *dqrr = NULL;
+ int found_fqrn = 0;
+ u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < (qm_channel_pool1 + 15)) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1)<<4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ pr_info("Cannot recover FQ 0x%x, it is "
+ "scheduled on channel 0x%x",
+ fqid, channel);
+ return -EBUSY;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ for (i = 0; i < portal_count; i++)
+ qm_dqrr_sdqcr_set(portal[i],
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ for (i = 0; i < portal_count; i++)
+ qm_dqrr_sdqcr_set(
+ portal[i],
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ while (!found_fqrn) {
+ /* Keep draining DQRR while checking the MR*/
+ for (i = 0; i < portal_count; i++) {
+ qm_dqrr_pvb_update(portal[i]);
+ dqrr = qm_dqrr_current(portal[i]);
+ while (dqrr) {
+ qm_dqrr_cdc_consume_1ptr(
+ portal[i], dqrr, 0);
+ qm_dqrr_pvb_update(portal[i]);
+ qm_dqrr_next(portal[i]);
+ dqrr = qm_dqrr_current(
+ portal[i]);
+ }
+ /* Process message ring too */
+ qm_mr_pvb_update(portal[i]);
+ msg = qm_mr_current(portal[i]);
+ while (msg) {
+ if ((msg->verb &
+ QM_MR_VERB_TYPE_MASK)
+ == QM_MR_VERB_FQRN)
+ found_fqrn = 1;
+ qm_mr_next(portal[i]);
+ qm_mr_cci_consume_to_current(
+ portal[i]);
+ qm_mr_pvb_update(portal[i]);
+ msg = qm_mr_current(portal[i]);
+ }
+ cpu_relax();
+ }
+ }
+ }
+ if (result != QM_MCR_RESULT_OK &&
+ result != QM_MCR_RESULT_PENDING) {
+ /* error */
+ pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n",
+ fqid, result);
+ return -1;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /* ORL had no entries, no need to wait until the
+ ERNs come in */
+ orl_empty = 1;
+ }
+ /* Retirement succeeded, check to see if FQ needs
+ to be drained */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ fq_empty = 0;
+ do {
+ const struct qm_dqrr_entry *dqrr = NULL;
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+ qm_dqrr_vdqcr_set(portal[0], vdqcr);
+
+ /* Wait for a dequeue to occur */
+ while (dqrr == NULL) {
+ qm_dqrr_pvb_update(portal[0]);
+ dqrr = qm_dqrr_current(portal[0]);
+ if (!dqrr)
+ cpu_relax();
+ }
+ /* Process the dequeues, making sure to
+ empty the ring completely */
+ while (dqrr) {
+ if (dqrr->fqid == fqid &&
+ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_empty = 1;
+ qm_dqrr_cdc_consume_1ptr(portal[0],
+ dqrr, 0);
+ qm_dqrr_pvb_update(portal[0]);
+ qm_dqrr_next(portal[0]);
+ dqrr = qm_dqrr_current(portal[0]);
+ }
+ } while (fq_empty == 0);
+ }
+ for (i = 0; i < portal_count; i++)
+ qm_dqrr_sdqcr_set(portal[i], 0);
+
+ /* Wait for the ORL to have been completely drained */
+ while (orl_empty == 0) {
+ const struct qm_mr_entry *msg;
+
+ qm_mr_pvb_update(portal[0]);
+ msg = qm_mr_current(portal[0]);
+ while (msg) {
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
+ QM_MR_VERB_FQRL)
+ orl_empty = 1;
+ qm_mr_next(portal[0]);
+ qm_mr_cci_consume_to_current(portal[0]);
+ qm_mr_pvb_update(portal[0]);
+ msg = qm_mr_current(portal[0]);
+ }
+ cpu_relax();
+ }
+ mcc = qm_mc_start(portal[0]);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(portal[0])))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n",
+ fqid, mcr->result);
+ return -1;
+ }
+ return 0;
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(portal[0]);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(portal[0])))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ pr_err("OOS Failed on FQID 0x%x\n", fqid);
+ return -1;
+ }
+ return 0;
+ case QM_MCR_NP_STATE_OOS:
+ /* Done */
+ return 0;
+ }
+ return -1;
+}
+
+int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ int ret;
+ struct qm_portal *low_p;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ low_p = &p->p;
+ ret = qm_shutdown_fq(&low_p, 1, fqid);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal)
+{
+#ifndef __rtems__
+ return portal->sharing_redirect ? NULL : portal->config;
+#else /* __rtems__ */
+ return portal->config;
+#endif /* __rtems__ */
+}
diff --git a/linux/drivers/soc/fsl/qbman/qman_driver.c b/linux/drivers/soc/fsl/qbman/qman_driver.c
new file mode 100644
index 00000000..6923504e
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_driver.c
@@ -0,0 +1,87 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2013 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#include <linux/time.h>
+
+static int __init early_qman_init(void)
+{
+ struct device_node *dn;
+ u32 is_portal_available;
+
+ qman_init();
+
+ is_portal_available = 0;
+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+ if (of_device_is_available(dn)) {
+ is_portal_available = 1;
+ break;
+ }
+ }
+
+ if (!qman_have_ccsr() && is_portal_available) {
+ struct qman_fq fq = {.fqid = 1};
+ struct qm_mcr_queryfq_np np;
+ int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
+ struct timespec nowts, diffts, startts = current_kernel_time();
+
+ /* Loop while querying given fqid succeeds or time out */
+ while (1) {
+ err = qman_query_fq_np(&fq, &np);
+ if (!err) {
+ /* success, control-plane has configured QMan */
+ break;
+ } else if (err != -ERANGE) {
+ pr_err("I/O error, continuing anyway\n");
+ break;
+ }
+ nowts = current_kernel_time();
+ diffts = timespec_sub(nowts, startts);
+ if (diffts.tv_sec > 0) {
+ if (!retry--) {
+ pr_err("Time out, control-plane dead?\n");
+ break;
+ }
+ pr_warn("Polling for the control-plane (%d)\n",
+ retry);
+ }
+ }
+ }
+
+ qman_resource_init();
+
+ return 0;
+}
+subsys_initcall(early_qman_init);
diff --git a/linux/drivers/soc/fsl/qbman/qman_portal.c b/linux/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 00000000..c74059b1
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,796 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+#ifdef __rtems__
+#include <bsp/qoriq.h>
+#endif /* __rtems__ */
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW 1
+#define CONFIG_FSL_DPA_PIRQ_FAST 1
+
+/* Global variable containing revision id (even on non-control plane systems
+ * where CCSR isn't available) */
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+EXPORT_SYMBOL(qm_channel_caam);
+u16 qm_channel_pme = QMAN_CHANNEL_PME;
+EXPORT_SYMBOL(qm_channel_pme);
+u16 qm_channel_dce = QMAN_CHANNEL_DCE;
+EXPORT_SYMBOL(qm_channel_dce);
+u16 qman_portal_max;
+EXPORT_SYMBOL(qman_portal_max);
+
+#ifndef __rtems__
+/* For these variables, and the portal-initialisation logic, the
+ * comments in bman_driver.c apply here so won't be repeated. */
+static struct qman_portal *shared_portals[NR_CPUS];
+static int num_shared_portals;
+static int shared_portals_idx;
+static LIST_HEAD(unused_pcfgs);
+#endif /* __rtems__ */
+
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 pools_sdqcr;
+
+#define STR_ERR_NOPROP "No '%s' property in node %s\n"
+#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n"
+#define STR_FQID_RANGE "fsl,fqid-range"
+#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
+#define STR_CGRID_RANGE "fsl,cgrid-range"
+
+/* A "fsl,fqid-range" node; release the given range to the allocator */
+static __init int fsl_fqid_range_init(struct device_node *node)
+{
+ int ret;
+ const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
+
+ if (!range) {
+ pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
+ return -EINVAL;
+ }
+ qman_seed_fqid_range(range[0], range[1]);
+ pr_info("FQID allocator includes range %d:%d\n",
+ range[0], range[1]);
+ return 0;
+}
+
+/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
+static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
+{
+ int ret;
+ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
+
+ if (!chanid) {
+ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
+ return -EINVAL;
+ }
+ for (ret = 0; ret < chanid[1]; ret++)
+ pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(chanid[0] + ret);
+ return 0;
+}
+
+/* A "fsl,pool-channel-range" node; release the given range to the allocator */
+static __init int fsl_pool_channel_range_init(struct device_node *node)
+{
+ int ret;
+ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
+
+ if (!chanid) {
+ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
+ return -EINVAL;
+ }
+ qman_seed_pool_range(chanid[0], chanid[1]);
+ pr_info("Pool channel allocator includes range %d:%d\n",
+ chanid[0], chanid[1]);
+ return 0;
+}
+
+/* A "fsl,cgrid-range" node; release the given range to the allocator */
+static __init int fsl_cgrid_range_init(struct device_node *node)
+{
+ struct qman_cgr cgr;
+ int ret, errors = 0;
+ const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
+
+ if (!range) {
+ pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
+ return -EINVAL;
+ }
+ qman_seed_cgrid_range(range[0], range[1]);
+ pr_info("CGRID allocator includes range %d:%d\n",
+ range[0], range[1]);
+ for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
+ ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
+ if (ret)
+ errors++;
+ }
+ if (errors)
+ pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
+ errors, (errors > 1) ? "s" : "", range[0], range[1]);
+ return 0;
+}
+
+static void qman_get_ip_revision(struct device_node *dn)
+{
+#ifdef __rtems__
+ struct device_node of_dns;
+#endif /* __rtems__ */
+ u16 ip_rev = 0;
+
+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+ if (!of_device_is_available(dn))
+ continue;
+ if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
+ of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
+ pr_err("Rev1.0 on P4080 rev1 is not supported!\n");
+ BUG_ON(1);
+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
+ of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
+ ip_rev = QMAN_REV11;
+ qman_portal_max = 10;
+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
+ of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
+ ip_rev = QMAN_REV12;
+ qman_portal_max = 10;
+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
+ of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
+ ip_rev = QMAN_REV20;
+ qman_portal_max = 3;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.0.0")) {
+ ip_rev = QMAN_REV30;
+ qman_portal_max = 50;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.0.1")) {
+ ip_rev = QMAN_REV30;
+ qman_portal_max = 25;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.1.0")) {
+ ip_rev = QMAN_REV31;
+ qman_portal_max = 50;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.1.1")) {
+ ip_rev = QMAN_REV31;
+ qman_portal_max = 25;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.1.2")) {
+ ip_rev = QMAN_REV31;
+ qman_portal_max = 18;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.1.3")) {
+ ip_rev = QMAN_REV31;
+ qman_portal_max = 10;
+ } else {
+ pr_warn("Unknown version in portal node, default to rev1.1\n");
+ ip_rev = QMAN_REV11;
+ qman_portal_max = 10;
+ }
+
+ if (!qman_ip_rev) {
+ if (ip_rev) {
+ qman_ip_rev = ip_rev;
+ } else {
+ pr_warn("Unknown version, default to rev1.1\n");
+ qman_ip_rev = QMAN_REV11;
+ }
+ } else if (ip_rev && (qman_ip_rev != ip_rev))
+ pr_warn("Revision = 0x%04x, but portal '%s' has 0x%04x\n",
+ qman_ip_rev, dn->full_name, ip_rev);
+ if (qman_ip_rev == ip_rev)
+ break;
+ }
+}
+
+#ifndef __rtems__
+/* Parse a portal node, perform generic mapping duties and return the config. It
+ * is not known at this stage for what purpose (or even if) the portal will be
+ * used. */
+static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
+{
+ struct qm_portal_config *pcfg;
+ const u32 *channel;
+ int irq, ret;
+ struct resource res;
+
+ pcfg = kzalloc(sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return NULL;
+
+ /*
+ * This is a *horrible hack*, but the IOMMU/PAMU driver needs a
+ * 'struct device' in order to get the PAMU stashing setup and the QMan
+ * portal [driver] won't function at all without ring stashing
+ *
+ * Making the QMan portal driver nice and proper is part of the
+ * upstreaming effort
+ */
+ pcfg->dev.bus = &platform_bus_type;
+ pcfg->dev.of_node = node;
+#ifdef CONFIG_IOMMU_API
+ pcfg->dev.archdata.iommu_domain = NULL;
+#endif
+
+ ret = of_address_to_resource(node, DPA_PORTAL_CE,
+ &pcfg->addr_phys[DPA_PORTAL_CE]);
+ if (ret) {
+ pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
+ goto err;
+ }
+ ret = of_address_to_resource(node, DPA_PORTAL_CI,
+ &pcfg->addr_phys[DPA_PORTAL_CI]);
+ if (ret) {
+ pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
+ goto err;
+ }
+
+ channel = of_get_property(node, "fsl,qman-channel-id", &ret);
+ if (!channel || (ret != 4)) {
+ pr_err("Can't get %s property 'fsl,qman-channel-id'\n",
+ node->full_name);
+ goto err;
+ }
+ pcfg->public_cfg.channel = *channel;
+ pcfg->public_cfg.cpu = -1;
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq == NO_IRQ) {
+ pr_err("Can't get %s property 'interrupts'\n", node->full_name);
+ goto err;
+ }
+ pcfg->public_cfg.irq = irq;
+#ifdef CONFIG_FSL_QMAN_CONFIG
+ /* We need the same LIODN offset for all portals */
+ qman_liodn_fixup(pcfg->public_cfg.channel);
+#endif
+
+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
+ pcfg->addr_phys[DPA_PORTAL_CE].start,
+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]),
+ 0);
+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
+ pcfg->addr_phys[DPA_PORTAL_CI].start,
+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
+ _PAGE_GUARDED | _PAGE_NO_CACHE);
+
+ return pcfg;
+err:
+ kfree(pcfg);
+ return NULL;
+}
+
+static struct qm_portal_config *get_pcfg(struct list_head *list)
+{
+ struct qm_portal_config *pcfg;
+
+ if (list_empty(list))
+ return NULL;
+ pcfg = list_entry(list->prev, struct qm_portal_config, list);
+ list_del(&pcfg->list);
+ return pcfg;
+}
+#endif /* __rtems__ */
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+ int ret;
+ int window_count = 1;
+ struct iommu_domain_geometry geom_attr;
+ struct pamu_stash_attribute stash_attr;
+
+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+ if (!pcfg->iommu_domain) {
+ pr_err("%s(): iommu_domain_alloc() failed", __func__);
+ goto _no_iommu;
+ }
+ geom_attr.aperture_start = 0;
+ geom_attr.aperture_end =
+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+ geom_attr.force_aperture = true;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+ &geom_attr);
+ if (ret < 0) {
+ pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+ &window_count);
+ if (ret < 0) {
+ pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
+ goto _iommu_domain_free;
+ }
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH,
+ &stash_attr);
+ if (ret < 0) {
+ pr_err("%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ pr_err("%s(): iommu_domain_window_enable() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
+ if (ret < 0) {
+ pr_err("%s(): iommu_device_attach() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
+ &window_count);
+ if (ret < 0) {
+ pr_err("%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto _iommu_detach_device;
+ }
+
+_no_iommu:
+#endif
+#ifdef CONFIG_FSL_QMAN_CONFIG
+ if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+#endif
+ pr_warn("Failed to set the stash request queue\n");
+
+ return;
+
+#ifdef CONFIG_FSL_PAMU
+_iommu_detach_device:
+ iommu_detach_device(pcfg->iommu_domain, NULL);
+_iommu_domain_free:
+ iommu_domain_free(pcfg->iommu_domain);
+ pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+ struct qman_portal *p;
+
+#ifndef __rtems__
+ pcfg->iommu_domain = NULL;
+#endif /* __rtems__ */
+ portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
+ p = qman_create_affine_portal(pcfg, NULL);
+ if (p) {
+ u32 irq_sources = 0;
+ /* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+ QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+ irq_sources |= QM_PIRQ_DQRI;
+#endif
+ qman_p_irqsource_add(p, irq_sources);
+ pr_info("Portal %sinitialised, cpu %d\n",
+#ifndef __rtems__
+ pcfg->public_cfg.is_shared ? "(shared) " : "",
+#else /* __rtems__ */
+ "",
+#endif /* __rtems__ */
+ pcfg->public_cfg.cpu);
+ } else
+ pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu);
+ return p;
+}
+
+#ifndef __rtems__
+static void init_slave(int cpu)
+{
+ struct qman_portal *p;
+ struct cpumask oldmask = *tsk_cpus_allowed(current);
+
+ set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
+ p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
+ if (!p)
+ pr_err("Slave portal failure on cpu %d\n", cpu);
+ else
+ pr_info("Portal (slave) initialised, cpu %d\n", cpu);
+ set_cpus_allowed_ptr(current, &oldmask);
+ if (shared_portals_idx >= num_shared_portals)
+ shared_portals_idx = 0;
+}
+
+static struct cpumask want_unshared __initdata;
+static struct cpumask want_shared __initdata;
+
+static int __init parse_qportals(char *str)
+{
+ return parse_portals_bootarg(str, &want_shared, &want_unshared,
+ "qportals");
+}
+__setup("qportals=", parse_qportals);
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+ unsigned int cpu)
+{
+ struct pamu_stash_attribute stash_attr;
+ int ret;
+
+ if (pcfg->iommu_domain) {
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+ if (ret < 0) {
+ pr_err("Failed to update pamu stash setting\n");
+ return;
+ }
+ }
+#ifdef CONFIG_FSL_QMAN_CONFIG
+ if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+#endif
+ pr_warn("Failed to update portal's stash request queue\n");
+}
+
+static void qman_offline_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = (struct qman_portal *)affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
+ qman_portal_update_sdest(pcfg, 0);
+ }
+ }
+}
+#endif /* __rtems__ */
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void qman_online_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = (struct qman_portal *)affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
+ qman_portal_update_sdest(pcfg, cpu);
+ }
+ }
+}
+
+static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ qman_online_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ qman_offline_cpu(cpu);
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block qman_hotplug_cpu_notifier = {
+ .notifier_call = qman_hotplug_cpu_callback,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#ifdef __rtems__
+#include <bsp/fdt.h>
+static struct qm_portal_config qman_configs[NR_CPUS];
+static void
+qman_init_portals(void)
+{
+ const char *fdt = bsp_fdt_get();
+ struct device_node dn;
+ const char *name;
+ int cpu_count = (int)rtems_get_processor_count();
+ int cpu;
+ int ret;
+ int node;
+ int parent;
+
+ memset(&dn, 0, sizeof(dn));
+
+ name = "fsl,qman-portal";
+ node = fdt_node_offset_by_compatible(fdt, 0, name);
+ if (node < 0)
+ panic("qman: no portals in FDT");
+ parent = fdt_parent_offset(fdt, node);
+ if (parent < 0)
+ panic("qman: no parent of portals in FDT");
+ node = fdt_first_subnode(fdt, parent);
+
+ dn.full_name = name;
+ dn.offset = node;
+
+ qoriq_clear_ce_portal(&qoriq_qman_portal[0][0],
+ sizeof(qoriq_qman_portal[0]));
+ qoriq_clear_ci_portal(&qoriq_qman_portal[1][0],
+ sizeof(qoriq_qman_portal[1]));
+
+ for (cpu = 0; cpu < cpu_count; ++cpu) {
+ struct qm_portal_config *pcfg = &qman_configs[cpu];
+ struct qman_portal *portal;
+ struct resource res;
+ const u32 *channel;
+
+ if (node < 0)
+ panic("qman: missing portal in FDT");
+
+ ret = of_address_to_resource(&dn, 0, &res);
+ if (ret != 0)
+ panic("qman: no portal CE address");
+ pcfg->addr_virt[0] = (__iomem void *)
+ ((uintptr_t)&qoriq_qman_portal[0][0] + (uintptr_t)res.start);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] >=
+ (uintptr_t)&qoriq_qman_portal[0][0]);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] <
+ (uintptr_t)&qoriq_qman_portal[1][0]);
+
+ ret = of_address_to_resource(&dn, 1, &res);
+ if (ret != 0)
+ panic("qman: no portal CI address");
+ pcfg->addr_virt[1] = (__iomem void *)
+ ((uintptr_t)&qoriq_qman_portal[0][0] + (uintptr_t)res.start);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] >=
+ (uintptr_t)&qoriq_qman_portal[1][0]);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
+ (uintptr_t)&qoriq_qman_portal[2][0]);
+
+ pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL);
+ if (pcfg->public_cfg.irq == NO_IRQ)
+ panic("qman: no portal interrupt");
+
+ channel = of_get_property(&dn, "fsl,qman-channel-id", &ret);
+ if (channel == NULL || ret != 4)
+ panic("qman: no portal channel ID");
+ pcfg->public_cfg.channel = *channel;
+
+ pcfg->public_cfg.cpu = cpu;
+ pcfg->public_cfg.pools = pools_sdqcr;
+
+ portal = init_pcfg(pcfg);
+ if (portal == NULL)
+ panic("qman: cannot create portal");
+
+ node = fdt_next_subnode(fdt, node);
+ dn.offset = node;
+ }
+}
+#endif /* __rtems__ */
+#ifndef __rtems__
+__init int qman_init(void)
+{
+ struct cpumask slave_cpus;
+ struct cpumask unshared_cpus = *cpu_none_mask;
+ struct cpumask shared_cpus = *cpu_none_mask;
+ LIST_HEAD(unshared_pcfgs);
+ LIST_HEAD(shared_pcfgs);
+ struct device_node *dn;
+ struct qm_portal_config *pcfg;
+ struct qman_portal *p;
+ int cpu, ret;
+ struct cpumask offline_cpus;
+
+ /* Initialise the QMan (CCSR) device */
+ for_each_compatible_node(dn, NULL, "fsl,qman") {
+ if (!qman_init_ccsr(dn))
+ pr_info("Err interrupt handler present\n");
+ else
+ pr_err("CCSR setup failed\n");
+ }
+#else /* __rtems__ */
+int
+qman_init(struct device_node *dn)
+{
+ struct device_node of_dns;
+ int ret;
+#endif /* __rtems__ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ /* Setup lookup table for FQ demux */
+ ret = qman_setup_fq_lookup_table(qman_fqd_size()/64);
+ if (ret)
+ return ret;
+#endif
+
+ /* Get qman ip revision */
+ qman_get_ip_revision(dn);
+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+ qm_channel_pme = QMAN_CHANNEL_PME_REV3;
+ }
+
+ /* Parse pool channels into the SDQCR mask. (Must happen before portals
+ * are initialised.) */
+ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
+ ret = fsl_pool_channel_range_sdqcr(dn);
+ if (ret)
+ return ret;
+ }
+
+#ifndef __rtems__
+ memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
+ /* Initialise portals. See bman_driver.c for comments */
+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+ if (!of_device_is_available(dn))
+ continue;
+ pcfg = parse_pcfg(dn);
+ if (pcfg) {
+ pcfg->public_cfg.pools = pools_sdqcr;
+ list_add_tail(&pcfg->list, &unused_pcfgs);
+ }
+ }
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, &want_shared)) {
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &shared_pcfgs);
+ cpumask_set_cpu(cpu, &shared_cpus);
+ }
+ if (cpumask_test_cpu(cpu, &want_unshared)) {
+ if (cpumask_test_cpu(cpu, &shared_cpus))
+ continue;
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &unshared_pcfgs);
+ cpumask_set_cpu(cpu, &unshared_cpus);
+ }
+ }
+ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
+ for_each_possible_cpu(cpu) {
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &unshared_pcfgs);
+ cpumask_set_cpu(cpu, &unshared_cpus);
+ }
+ }
+ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
+ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
+ if (cpumask_empty(&slave_cpus)) {
+ if (!list_empty(&shared_pcfgs)) {
+ cpumask_or(&unshared_cpus, &unshared_cpus,
+ &shared_cpus);
+ cpumask_clear(&shared_cpus);
+ list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
+ INIT_LIST_HEAD(&shared_pcfgs);
+ }
+ } else {
+ if (list_empty(&shared_pcfgs)) {
+ pcfg = get_pcfg(&unshared_pcfgs);
+ if (!pcfg) {
+ pr_crit("No portals available!\n");
+ return 0;
+ }
+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
+ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
+ list_add_tail(&pcfg->list, &shared_pcfgs);
+ }
+ }
+ list_for_each_entry(pcfg, &unshared_pcfgs, list) {
+ pcfg->public_cfg.is_shared = 0;
+ p = init_pcfg(pcfg);
+ }
+ list_for_each_entry(pcfg, &shared_pcfgs, list) {
+ pcfg->public_cfg.is_shared = 1;
+ p = init_pcfg(pcfg);
+ if (p)
+ shared_portals[num_shared_portals++] = p;
+ }
+ if (!cpumask_empty(&slave_cpus))
+ for_each_cpu(cpu, &slave_cpus)
+ init_slave(cpu);
+#else /* __rtems__ */
+ qman_init_portals();
+#endif /* __rtems__ */
+ pr_info("Portals initialised\n");
+#ifndef __rtems__
+ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
+ for_each_cpu(cpu, &offline_cpus)
+ qman_offline_cpu(cpu);
+#endif /* __rtems__ */
+#ifdef CONFIG_HOTPLUG_CPU
+ register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
+#endif
+ return 0;
+}
+
+__init int qman_resource_init(void)
+{
+#ifdef __rtems__
+ struct device_node of_dns;
+#endif /* __rtems__ */
+ struct device_node *dn;
+ int ret;
+
+ /* Initialise FQID allocation ranges */
+ for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
+ ret = fsl_fqid_range_init(dn);
+ if (ret)
+ return ret;
+ }
+ /* Initialise CGRID allocation ranges */
+ for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
+ ret = fsl_cgrid_range_init(dn);
+ if (ret)
+ return ret;
+ }
+ /* Parse pool channels into the allocator. (Must happen after portals
+ * are initialised.) */
+ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
+ ret = fsl_pool_channel_range_init(dn);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/linux/drivers/soc/fsl/qbman/qman_priv.h b/linux/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 00000000..f04bd476
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,293 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/iommu.h>
+#include <asm/fsl_pamu_stash.h>
+
+/* Congestion Groups */
+
+/* This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
+{
+ return QM_MCR_QUERYCONGESTION(&c->q, num);
+}
+static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
+{
+ c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
+}
+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
+{
+ c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
+}
+static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
+{
+ while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
+ ;
+ return num;
+}
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ *dest = *src;
+}
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.__state;
+ const u32 *_a = a->q.__state;
+ const u32 *_b = b->q.__state;
+
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) & *(_b++);
+}
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.__state;
+ const u32 *_a = a->q.__state;
+ const u32 *_b = b->q.__state;
+
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) ^ *(_b++);
+}
+
+/* used by CCSR and portal interrupt code */
+enum qm_isr_reg {
+ qm_isr_status = 0,
+ qm_isr_enable = 1,
+ qm_isr_disable = 2,
+ qm_isr_inhibit = 3
+};
+
+struct qm_portal_config {
+ /* Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited. */
+ __iomem void *addr_virt[2];
+#ifndef __rtems__
+ struct resource addr_phys[2];
+ struct device dev;
+ struct iommu_domain *iommu_domain;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+#endif /* __rtems__ */
+ /* User-visible portal configuration settings */
+ struct qman_portal_config public_cfg;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+extern u16 qman_portal_max;
+
+#ifdef CONFIG_FSL_QMAN_CONFIG
+/* Hooks from qman_driver.c to qman_config.c */
+int qman_init_ccsr(struct device_node *node);
+void qman_liodn_fixup(u16 channel);
+int qman_set_sdest(u16 channel, unsigned int cpu_idx);
+size_t qman_fqd_size(void);
+#endif
+
+int qm_set_wpm(int wpm);
+int qm_get_wpm(int *wpm);
+
+/* Hooks from qman_driver.c in to qman_high.c */
+struct qman_portal *qman_create_portal(
+ struct qman_portal *portal,
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
+ int cpu);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+void qman_destroy_portal(struct qman_portal *qm);
+
+/* This CGR feature is supported by h/w and required by unit-tests and the
+ * debugfs hooks, so is implemented in the driver. However it allows an explicit
+ * corruption of h/w fields by s/w that are usually incorruptible (because the
+ * counters are usually maintained entirely within h/w). As such, we declare
+ * this API internally. */
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+/* If the fq object pointer is greater than the size of context_b field,
+ * than a lookup table is required. */
+int qman_setup_fq_lookup_table(size_t num_entries);
+#endif
+
+
+/*************************************************/
+/* QMan s/w corenet portal, low-level i/face */
+/*************************************************/
+
+/* Note: most functions are only used by the high-level interface, so are
+ * inlined from qman.h. The stuff below is for use by other parts of the
+ * driver. */
+
+/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
+ * If MODE==SCHEDULED
+ * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
+ * If CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ * If MODE==UNSCHEDULED
+ * Choose FQID().
+ */
+#define QM_PDQCR_MODE_SCHEDULED 0x0
+#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000
+#define QM_PDQCR_SCHEDULED_CHANNELS 0x0
+#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000
+#define QM_PDQCR_COUNT_EXACT1 0x0
+#define QM_PDQCR_COUNT_UPTO3 0x20000000
+#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_PDQCR_TYPE_MASK 0x03000000
+#define QM_PDQCR_TYPE_NULL 0x0
+#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_PDQCR_TYPE_ACTIVE 0x03000000
+#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
+#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_PDQCR_SPECIFICWQ_WQ(n) (n)
+#define QM_PDQCR_FQID(n) ((n) & 0xffffff)
+
+/* Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write". */
+#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
+#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
+#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
+#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
+#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
+#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
+/* TODO: unfortunate name-clash here, reword? */
+#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
+
+#ifdef CONFIG_FSL_QMAN_CONFIG
+int qman_have_ccsr(void);
+#else
+#define qman_have_ccsr 0
+#endif
+
+#ifndef __rtems__
+__init int qman_init(void);
+#else /* __rtems__ */
+int qman_init(struct device_node *dn);
+#endif /* __rtems__ */
+__init int qman_resource_init(void);
+
+extern void *affine_portals[NR_CPUS];
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal);
diff --git a/linux/drivers/soc/fsl/qbman/qman_test.c b/linux/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 00000000..18c04482
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,61 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+ int loop = 1;
+
+ while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+ qman_test_stash();
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+ qman_test_api();
+#endif
+ }
+ return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/linux/drivers/soc/fsl/qbman/qman_test.h b/linux/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 00000000..0b34a670
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,44 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+
+#include <soc/fsl/qman.h>
+
+void qman_test_stash(void);
+void qman_test_api(void);
diff --git a/linux/drivers/soc/fsl/qbman/qman_test_api.c b/linux/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 00000000..63a6d11d
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,222 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+/*************/
+/* constants */
+/*************/
+
+#define CGR_ID 27
+#define POOL_ID 2
+#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES 10
+#define NUM_PARTIAL 4
+#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
+ QM_SDQCR_TYPE_PRIO_QOS | \
+ QM_SDQCR_TOKEN_SET(0x98) | \
+ QM_SDQCR_CHANNELS_DEDICATED | \
+ QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
+#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+/*************************************/
+/* Predeclarations (eg. for fq_base) */
+/*************************************/
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+ struct qman_fq *,
+ const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+ const struct qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+ const struct qm_mr_entry *);
+
+/***************/
+/* global vars */
+/***************/
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+ .cb.dqrr = cb_dqrr,
+ .cb.ern = cb_ern,
+ .cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/**********************/
+/* internal functions */
+/**********************/
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *__fd)
+{
+ qm_fd_addr_set64(__fd, 0xabdeadbeefLLU);
+ __fd->format = qm_fd_contig_big;
+ __fd->length29 = 0x0000ffff;
+ __fd->cmd = 0xfeedf00d;
+}
+
+static void fd_inc(struct qm_fd *__fd)
+{
+ u64 t = qm_fd_addr_get64(__fd);
+ int z = t >> 40;
+
+ t <<= 1;
+ if (z)
+ t |= 1;
+ qm_fd_addr_set64(__fd, t);
+ __fd->length29--;
+ __fd->cmd++;
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+{
+ int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+
+ if (!r)
+ r = a->format - b->format;
+ if (!r)
+ r = a->opaque - b->opaque;
+ if (!r)
+ r = a->cmd - b->cmd;
+ return r;
+}
+
+/********/
+/* test */
+/********/
+
+static void do_enqueues(struct qman_fq *fq)
+{
+ unsigned int loop;
+
+ for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+ if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT |
+ (((loop + 1) == NUM_ENQUEUES) ?
+ QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0)))
+ panic("qman_enqueue() failed\n");
+ fd_inc(&fd);
+ }
+}
+
+void qman_test_api(void)
+{
+ u32 flags;
+ int res;
+ struct qman_fq *fq = &fq_base;
+
+ pr_info("%s(): Starting\n", __func__);
+ fd_init(&fd);
+ fd_init(&fd_dq);
+
+ /* Initialise (parked) FQ */
+ if (qman_create_fq(0, FQ_FLAGS, fq))
+ panic("qman_create_fq() failed\n");
+ if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
+ panic("qman_init_fq() failed\n");
+
+ /* Do enqueues + VDQCR, twice. (Parked FQ) */
+ do_enqueues(fq);
+ pr_info("VDQCR (till-empty);\n");
+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
+ QM_VDQCR_NUMFRAMES_TILLEMPTY))
+ panic("qman_volatile_dequeue() failed\n");
+ do_enqueues(fq);
+ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
+ QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
+ panic("qman_volatile_dequeue() failed\n");
+ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+ NUM_ENQUEUES);
+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
+ QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
+ panic("qman_volatile_dequeue() failed\n");
+
+ do_enqueues(fq);
+ pr_info("scheduled dequeue (till-empty)\n");
+ if (qman_schedule_fq(fq))
+ panic("qman_schedule_fq() failed\n");
+ wait_event(waitqueue, sdqcr_complete);
+
+ /* Retire and OOS the FQ */
+ res = qman_retire_fq(fq, &flags);
+ if (res < 0)
+ panic("qman_retire_fq() failed\n");
+ wait_event(waitqueue, retire_complete);
+ if (flags & QMAN_FQ_STATE_BLOCKOOS)
+ panic("leaking frames\n");
+ if (qman_oos_fq(fq))
+ panic("qman_oos_fq() failed\n");
+ qman_destroy_fq(fq, 0);
+ pr_info("%s(): Finished\n", __func__);
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ if (fd_cmp(&fd_dq, &dq->fd)) {
+ pr_err("BADNESS: dequeued frame doesn't match;\n");
+ BUG();
+ }
+ fd_inc(&fd_dq);
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+ sdqcr_complete = 1;
+ wake_up(&waitqueue);
+ }
+ return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ panic("cb_ern() unimplemented");
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI))
+ panic("unexpected FQS message");
+#ifndef __rtems__
+ pr_info("Retirement message received\n");
+#endif /* __rtems__ */
+ retire_complete = 1;
+ wake_up(&waitqueue);
+}
diff --git a/linux/drivers/soc/fsl/qbman/qman_test_stash.c b/linux/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 00000000..a3ca6603
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,540 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#ifdef __rtems__
+#include <rtems/malloc.h>
+#undef msleep
+#define msleep(x) usleep((x) * 1000)
+#define L1_CACHE_BYTES 64
+#endif /* __rtems__ */
+
+/* Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ * handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ * and advance the iterator for the next loop. This includes a final fixup,
+ * which connects the last handler to the first (and which is why phase 2
+ * and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * initialise FQ objects and advance the iterator for the next loop.
+ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ * initialisation targets the correct cpu.
+ */
+
+/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive). */
+struct bstrap {
+ void (*fn)(void);
+ atomic_t started;
+};
+static int bstrap_fn(void *__bstrap)
+{
+ struct bstrap *bstrap = __bstrap;
+
+ atomic_inc(&bstrap->started);
+ bstrap->fn();
+ while (!kthread_should_stop())
+ msleep(1);
+ return 0;
+}
+static int on_all_cpus(void (*fn)(void))
+{
+ int cpu;
+
+#ifndef __rtems__
+ for_each_cpu(cpu, cpu_online_mask) {
+#else /* __rtems__ */
+ for (cpu = 0; cpu < (int)rtems_get_processor_count(); ++cpu) {
+#endif /* __rtems__ */
+ struct bstrap bstrap = {
+ .fn = fn,
+ .started = ATOMIC_INIT(0)
+ };
+ struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+ "hotpotato%d", cpu);
+ int ret;
+
+#ifndef __rtems__
+ if (IS_ERR(k))
+#else /* __rtems__ */
+ if (k == NULL)
+#endif /* __rtems__ */
+ return -ENOMEM;
+ kthread_bind(k, cpu);
+ wake_up_process(k);
+ /* If we call kthread_stop() before the "wake up" has had an
+ * effect, then the thread may exit with -EINTR without ever
+ * running the function. So poll until it's started before
+ * requesting it to stop. */
+ while (!atomic_read(&bstrap.started))
+ msleep(10);
+ ret = kthread_stop(k);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+struct hp_handler {
+
+ /* The following data is stashed when 'rx' is dequeued; */
+ /* -------------- */
+ /* The Rx FQ, dequeues of which will stash the entire hp_handler */
+ struct qman_fq rx;
+ /* The Tx FQ we should forward to */
+ struct qman_fq tx;
+ /* The value we XOR post-dequeue, prior to validating */
+ u32 rx_mixer;
+ /* The value we XOR pre-enqueue, after validating */
+ u32 tx_mixer;
+ /* what the hotpotato address should be on dequeue */
+ dma_addr_t addr;
+ u32 *frame_ptr;
+
+ /* The following data isn't (necessarily) stashed on dequeue; */
+ /* -------------- */
+ u32 fqid_rx, fqid_tx;
+ /* list node for linking us into 'hp_cpu' */
+ struct list_head node;
+ /* Just to check ... */
+ unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+ /* identify the cpu we run on; */
+ unsigned int processor_id;
+ /* root node for the per-cpu list of handlers */
+ struct list_head handlers;
+ /* list node for linking us into 'hp_cpu_list' */
+ struct list_head node;
+ /* when repeatedly scanning 'hp_list', each time linking the n'th
+ * handlers together, this is used as per-cpu iterator state */
+ struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+#ifndef __rtems__
+static void *__frame_ptr;
+#endif /* __rtems__ */
+static u32 *frame_ptr;
+#ifndef __rtems__
+static dma_addr_t frame_dma;
+#endif /* __rtems__ */
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU 2
+#define HP_LOOPS 8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS 80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD 0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static void allocate_frame_data(void)
+{
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+#ifndef __rtems__
+ struct platform_device *pdev = platform_device_alloc("foobar", -1);
+
+ if (!pdev)
+ panic("platform_device_alloc() failed");
+ if (platform_device_add(pdev))
+ panic("platform_device_add() failed");
+ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+ if (!__frame_ptr)
+ panic("kmalloc() failed");
+ frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
+ ~(unsigned long)63);
+#else /* __rtems__ */
+ frame_ptr = rtems_heap_allocate_aligned_with_boundary(4 * HP_NUM_WORDS, 64, 0);
+ if (frame_ptr == NULL)
+ panic("rtems_heap_allocate_aligned_with_boundary() failed");
+#endif /* __rtems__ */
+ for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+ frame_ptr[loop] = lfsr;
+ lfsr = do_lfsr(lfsr);
+ }
+#ifndef __rtems__
+ frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ platform_device_del(pdev);
+ platform_device_put(pdev);
+#endif /* __rtems__ */
+}
+
+static void deallocate_frame_data(void)
+{
+#ifndef __rtems__
+ kfree(__frame_ptr);
+#endif /* __rtems__ */
+}
+
+static inline void process_frame_data(struct hp_handler *handler,
+ const struct qm_fd *fd)
+{
+ u32 *p = handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+
+ if (qm_fd_addr_get64(fd) != handler->addr)
+ panic("bad frame address");
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ *p ^= handler->rx_mixer;
+ if (*p != lfsr)
+ panic("corrupt frame data");
+ *p ^= handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ process_frame_data(handler, &dqrr->fd);
+ if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
+ panic("qman_enqueue() failed");
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ process_frame_data(handler, &dqrr->fd);
+ if (++loop_counter < HP_LOOPS) {
+ if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
+ panic("qman_enqueue() failed");
+ } else {
+ pr_info("Received final (%dth) frame\n", loop_counter);
+ wake_up(&queue);
+ }
+ return qman_cb_dqrr_consume;
+}
+
+static void create_per_cpu_handlers(void)
+{
+ struct hp_handler *handler;
+ int loop;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ hp_cpu->processor_id = smp_processor_id();
+ spin_lock(&hp_lock);
+ list_add_tail(&hp_cpu->node, &hp_cpu_list);
+ hp_cpu_list_length++;
+ spin_unlock(&hp_lock);
+ INIT_LIST_HEAD(&hp_cpu->handlers);
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+ if (!handler)
+ panic("kmem_cache_alloc() failed");
+ handler->processor_id = hp_cpu->processor_id;
+#ifndef __rtems__
+ handler->addr = frame_dma;
+#else /* __rtems__ */
+ handler->addr = (dma_addr_t)frame_ptr;
+#endif /* __rtems__ */
+ handler->frame_ptr = frame_ptr;
+ list_add_tail(&handler->node, &hp_cpu->handlers);
+ }
+}
+
+static void destroy_per_cpu_handlers(void)
+{
+ struct list_head *loop, *tmp;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ spin_lock(&hp_lock);
+ list_del(&hp_cpu->node);
+ spin_unlock(&hp_lock);
+ list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+ u32 flags;
+ struct hp_handler *handler = list_entry(loop, struct hp_handler,
+ node);
+ if (qman_retire_fq(&handler->rx, &flags))
+ panic("qman_retire_fq(rx) failed");
+ BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
+ if (qman_oos_fq(&handler->rx))
+ panic("qman_oos_fq(rx) failed");
+ qman_destroy_fq(&handler->rx, 0);
+ qman_destroy_fq(&handler->tx, 0);
+ qman_release_fqid(handler->fqid_rx);
+ list_del(&handler->node);
+ kmem_cache_free(hp_handler_slab, handler);
+ }
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+ u8 res = (offset + (L1_CACHE_BYTES - 1))
+ / (L1_CACHE_BYTES);
+ if (res > 3)
+ return 3;
+ return res;
+}
+#define STASH_DATA_CL \
+ num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+ num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static void init_handler(void *__handler)
+{
+ struct qm_mcc_initfq opts;
+ struct hp_handler *handler = __handler;
+
+ BUG_ON(handler->processor_id != smp_processor_id());
+ /* Set up rx */
+ memset(&handler->rx, 0, sizeof(handler->rx));
+ if (handler == special_handler)
+ handler->rx.cb.dqrr = special_dqrr;
+ else
+ handler->rx.cb.dqrr = normal_dqrr;
+ if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
+ panic("qman_create_fq(rx) failed");
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+ opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
+ opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
+ if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+ QMAN_INITFQ_FLAG_LOCAL, &opts))
+ panic("qman_init_fq(rx) failed");
+ /* Set up tx */
+ memset(&handler->tx, 0, sizeof(handler->tx));
+ if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+ &handler->tx))
+ panic("qman_create_fq(tx) failed");
+}
+
+static void init_phase2(void)
+{
+ int loop;
+ u32 fqid = 0;
+ u32 lfsr = 0xdeadbeef;
+ struct hp_cpu *hp_cpu;
+ struct hp_handler *handler;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ int ret;
+
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ /* Rx FQID is the previous handler's Tx FQID */
+ hp_cpu->iterator->fqid_rx = fqid;
+ /* Allocate new FQID for Tx */
+ ret = qman_alloc_fqid(&fqid);
+ if (ret)
+ panic("qman_alloc_fqid() failed");
+ hp_cpu->iterator->fqid_tx = fqid;
+ /* Rx mixer is the previous handler's Tx mixer */
+ hp_cpu->iterator->rx_mixer = lfsr;
+ /* Get new mixer for Tx */
+ lfsr = do_lfsr(lfsr);
+ hp_cpu->iterator->tx_mixer = lfsr;
+ }
+ }
+ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+ BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
+ handler->fqid_rx = fqid;
+ handler->rx_mixer = lfsr;
+ /* and tag it as our "special" handler */
+ special_handler = handler;
+}
+
+static void init_phase3(void)
+{
+ int loop;
+ struct hp_cpu *hp_cpu;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ preempt_disable();
+ if (hp_cpu->processor_id == smp_processor_id())
+ init_handler(hp_cpu->iterator);
+ else
+ smp_call_function_single(hp_cpu->processor_id,
+ init_handler, hp_cpu->iterator, 1);
+ preempt_enable();
+ }
+ }
+}
+
+static void send_first_frame(void *ignore)
+{
+ u32 *p = special_handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+ struct qm_fd fd;
+
+ BUG_ON(special_handler->processor_id != smp_processor_id());
+ memset(&fd, 0, sizeof(fd));
+ qm_fd_addr_set64(&fd, special_handler->addr);
+ fd.format = qm_fd_contig_big;
+ fd.length29 = HP_NUM_WORDS * 4;
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ if (*p != lfsr)
+ panic("corrupt frame data");
+ *p ^= special_handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ pr_info("Sending first frame\n");
+ if (qman_enqueue(&special_handler->tx, &fd, 0))
+ panic("qman_enqueue() failed");
+}
+
+void qman_test_stash(void)
+{
+#ifndef __rtems__
+ if (cpumask_weight(cpu_online_mask) < 2) {
+ pr_info("%s(): skip - only 1 CPU\n", __func__);
+ return;
+ }
+#endif /* __rtems__ */
+
+ pr_info("%s(): Starting\n", __func__);
+
+ hp_cpu_list_length = 0;
+ loop_counter = 0;
+ hp_handler_slab = kmem_cache_create("hp_handler_slab",
+ sizeof(struct hp_handler), L1_CACHE_BYTES,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!hp_handler_slab)
+ panic("kmem_cache_create() failed");
+
+ allocate_frame_data();
+
+ /* Init phase 1 */
+ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+ if (on_all_cpus(create_per_cpu_handlers))
+ panic("on_each_cpu() failed");
+ pr_info("Number of cpus: %d, total of %d handlers\n",
+ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+ init_phase2();
+
+ init_phase3();
+
+ preempt_disable();
+ if (special_handler->processor_id == smp_processor_id())
+ send_first_frame(NULL);
+ else
+ smp_call_function_single(special_handler->processor_id,
+ send_first_frame, NULL, 1);
+ preempt_enable();
+
+ wait_event(queue, loop_counter == HP_LOOPS);
+ deallocate_frame_data();
+ if (on_all_cpus(destroy_per_cpu_handlers))
+ panic("on_each_cpu() failed");
+ kmem_cache_destroy(hp_handler_slab);
+ pr_info("%s(): Finished\n", __func__);
+}
diff --git a/linux/drivers/soc/fsl/qbman/qman_utils.c b/linux/drivers/soc/fsl/qbman/qman_utils.c
new file mode 100644
index 00000000..5b85f037
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_utils.c
@@ -0,0 +1,309 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* --- FQID Pool --- */
+
+struct qman_fqid_pool {
+ /* Base and size of the FQID range */
+ u32 fqid_base;
+ u32 total;
+ /* Number of FQIDs currently "allocated" */
+ u32 used;
+ /* Allocation optimisation. When 'used<total', it is the index of an
+ * available FQID. Otherwise there are no available FQIDs, and this
+ * will be set when the next deallocation occurs. */
+ u32 next;
+ /* A bit-field representation of the FQID range. */
+ unsigned long *bits;
+};
+
+#define QLONG_BYTES sizeof(unsigned long)
+#define QLONG_BITS (QLONG_BYTES * 8)
+/* Number of 'longs' required for the given number of bits */
+#define QNUM_LONGS(b) (((b) + QLONG_BITS - 1) / QLONG_BITS)
+/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
+#define QNUM_BYTES(b) (QNUM_LONGS(b) * QLONG_BYTES)
+/* And in bits */
+#define QNUM_BITS(b) (QNUM_LONGS(b) * QLONG_BITS)
+
+struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
+{
+ struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ unsigned int i;
+
+ BUG_ON(!num);
+ if (!pool)
+ return NULL;
+ pool->fqid_base = fqid_start;
+ pool->total = num;
+ pool->used = 0;
+ pool->next = 0;
+ pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
+ if (!pool->bits) {
+ kfree(pool);
+ return NULL;
+ }
+ /* If num is not an even multiple of QLONG_BITS (or even 8, for
+ * byte-oriented searching) then we fill the trailing bits with 1, to
+ * make them look allocated (permanently). */
+ for (i = num + 1; i < QNUM_BITS(num); i++)
+ set_bit(i, pool->bits);
+ return pool;
+}
+EXPORT_SYMBOL(qman_fqid_pool_create);
+
+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
+{
+ int ret = pool->used;
+
+ kfree(pool->bits);
+ kfree(pool);
+ return ret;
+}
+EXPORT_SYMBOL(qman_fqid_pool_destroy);
+
+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
+{
+ int ret;
+
+ if (pool->used == pool->total)
+ return -ENOMEM;
+ *fqid = pool->fqid_base + pool->next;
+ ret = test_and_set_bit(pool->next, pool->bits);
+ BUG_ON(ret);
+ if (++pool->used == pool->total)
+ return 0;
+ pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
+ if (pool->next >= pool->total)
+ pool->next = find_first_zero_bit(pool->bits, pool->total);
+ BUG_ON(pool->next >= pool->total);
+ return 0;
+}
+EXPORT_SYMBOL(qman_fqid_pool_alloc);
+
+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
+{
+ int ret;
+
+ fqid -= pool->fqid_base;
+ ret = test_and_clear_bit(fqid, pool->bits);
+ BUG_ON(!ret);
+ if (pool->used-- == pool->total)
+ pool->next = fqid;
+}
+EXPORT_SYMBOL(qman_fqid_pool_free);
+
+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
+{
+ return pool->used;
+}
+EXPORT_SYMBOL(qman_fqid_pool_used);
+
+static DECLARE_DPAA_RESOURCE(fqalloc); /* FQID allocator */
+static DECLARE_DPAA_RESOURCE(qpalloc); /* pool-channel allocator */
+static DECLARE_DPAA_RESOURCE(cgralloc); /* CGR ID allocator */
+
+/* FQID allocator front-end */
+
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return dpaa_resource_new(&fqalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+static int fq_cleanup(u32 fqid)
+{
+ return qman_shutdown_fq(fqid) == 0;
+}
+
+void qman_release_fqid_range(u32 fqid, u32 count)
+{
+ u32 total_invalid = dpaa_resource_release(&fqalloc,
+ fqid, count, fq_cleanup);
+
+ if (total_invalid)
+ pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
+ fqid, fqid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_fqid_range);
+
+int qman_reserve_fqid_range(u32 fqid, u32 count)
+{
+ return dpaa_resource_reserve(&fqalloc, fqid, count);
+}
+EXPORT_SYMBOL(qman_reserve_fqid_range);
+
+void qman_seed_fqid_range(u32 fqid, u32 count)
+{
+ dpaa_resource_seed(&fqalloc, fqid, count);
+}
+EXPORT_SYMBOL(qman_seed_fqid_range);
+
+/* Pool-channel allocator front-end */
+
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return dpaa_resource_new(&qpalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+static int qpool_cleanup(u32 qp)
+{
+ /* We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose destination channel is the pool-channel being released.
+ * When a non-OOS FQD is found we attempt to clean it up */
+ struct qman_fq fq = {
+ .fqid = 1
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 1;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ BUG_ON(err);
+ if (fqd.dest.channel == qp) {
+ /* The channel is the FQ's target, clean it */
+ if (qman_shutdown_fq(fq.fqid) != 0)
+ /* Couldn't shut down the FQ
+ so the pool must be leaked */
+ return 0;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+void qman_release_pool_range(u32 qp, u32 count)
+{
+ u32 total_invalid = dpaa_resource_release(&qpalloc,
+ qp, count, qpool_cleanup);
+
+ if (total_invalid) {
+ /* Pool channels are almost always used individually */
+ if (count == 1)
+ pr_err("Pool channel 0x%x had %d leaks\n",
+ qp, total_invalid);
+ else
+ pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
+ qp, qp + count - 1, count, total_invalid);
+ }
+}
+EXPORT_SYMBOL(qman_release_pool_range);
+
+void qman_seed_pool_range(u32 poolid, u32 count)
+{
+ dpaa_resource_seed(&qpalloc, poolid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_pool_range);
+
+int qman_reserve_pool_range(u32 poolid, u32 count)
+{
+ return dpaa_resource_reserve(&qpalloc, poolid, count);
+}
+EXPORT_SYMBOL(qman_reserve_pool_range);
+
+
+/* CGR ID allocator front-end */
+
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return dpaa_resource_new(&cgralloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+static int cqr_cleanup(u32 cgrid)
+{
+ /* We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose CGR is the CGR being released.
+ */
+ struct qman_fq fq = {
+ .fqid = 1
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 1;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ BUG_ON(err);
+ if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ (fqd.cgid == cgrid)) {
+ pr_err("CRGID 0x%x is being used by FQID 0x%x,"
+ " CGR will be leaked\n",
+ cgrid, fq.fqid);
+ return 1;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+void qman_release_cgrid_range(u32 cgrid, u32 count)
+{
+ u32 total_invalid = dpaa_resource_release(&cgralloc,
+ cgrid, count, cqr_cleanup);
+ if (total_invalid)
+ pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
+ cgrid, cgrid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_cgrid_range);
+
+void qman_seed_cgrid_range(u32 cgrid, u32 count)
+{
+ dpaa_resource_seed(&cgralloc, cgrid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_cgrid_range);