diff options
Diffstat (limited to 'linux/drivers/soc/fsl/qbman')
24 files changed, 5497 insertions, 9493 deletions
diff --git a/linux/drivers/soc/fsl/qbman/bman.c b/linux/drivers/soc/fsl/qbman/bman.c index 35459bc7..2cc7f5c5 100644 --- a/linux/drivers/soc/fsl/qbman/bman.c +++ b/linux/drivers/soc/fsl/qbman/bman.c @@ -2,7 +2,7 @@ #include <rtems/bsd/local/opt_dpaa.h> -/* Copyright (c) 2009 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,659 +34,776 @@ #include "bman_priv.h" -/* Last updated for v00.79 of the BG */ - -struct bman; - -/* Register offsets */ -#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04)) -#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04)) -#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04)) -#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04)) -#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04)) -#define REG_FBPR_FPC 0x0800 -#define REG_ECSR 0x0a00 -#define REG_ECIR 0x0a04 -#define REG_EADR 0x0a08 -#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) -#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) -#define REG_IP_REV_1 0x0bf8 -#define REG_IP_REV_2 0x0bfc -#define REG_FBPR_BARE 0x0c00 -#define REG_FBPR_BAR 0x0c04 -#define REG_FBPR_AR 0x0c10 -#define REG_SRCIDR 0x0d04 -#define REG_LIODNR 0x0d08 -#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */ - -/* Used by all error interrupt registers except 'inhibit' */ -#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */ -#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */ -#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */ -#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */ -#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */ - -/* BMAN_ECIR valid error bit */ -#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI) - -union bman_ecir { - u32 ecir_raw; - struct { - u32 __reserved1:4; - u32 portal_num:4; - u32 __reserved2:12; - u32 numb:4; - u32 __reserved3:2; - u32 pid:6; - } __packed info; -}; +#define IRQNAME "BMan portal %d" +#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ + +/* Portal register assists */ + +/* Cache-inhibited register offsets */ +#define BM_REG_RCR_PI_CINH 0x0000 +#define BM_REG_RCR_CI_CINH 0x0004 +#define BM_REG_RCR_ITR 0x0008 +#define BM_REG_CFG 0x0100 +#define BM_REG_SCN(n) (0x0200 + ((n) << 2)) +#define BM_REG_ISR 0x0e00 +#define BM_REG_IER 0x0e04 +#define BM_REG_ISDR 0x0e08 +#define BM_REG_IIR 0x0e0c + +/* Cache-enabled register offsets */ +#define BM_CL_CR 0x0000 +#define BM_CL_RR0 0x0100 +#define BM_CL_RR1 0x0140 +#define BM_CL_RCR 0x1000 +#define BM_CL_RCR_PI_CENA 0x3000 +#define BM_CL_RCR_CI_CENA 0x3100 -union bman_eadr { - u32 eadr_raw; - struct { - u32 __reserved1:5; - u32 memid:3; - u32 __reserved2:14; - u32 eadr:10; - } __packed info; +/* + * Portal modes. + * Enum types; + * pmode == production mode + * cmode == consumption mode, + * Enum values use 3 letter codes. First letter matches the portal mode, + * remaining two letters indicate; + * ci == cache-inhibited portal register + * ce == cache-enabled portal register + * vb == in-band valid-bit (cache-enabled) + */ +enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */ + bm_rcr_pci = 0, /* PI index, cache-inhibited */ + bm_rcr_pce = 1, /* PI index, cache-enabled */ + bm_rcr_pvb = 2 /* valid-bit */ }; - -struct bman_hwerr_txt { - u32 mask; - const char *txt; +enum bm_rcr_cmode { /* s/w-only */ + bm_rcr_cci, /* CI index, cache-inhibited */ + bm_rcr_cce /* CI index, cache-enabled */ }; -#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b } -static const struct bman_hwerr_txt bman_hwerr_txts[] = { - BMAN_HWE_TXT(IVCI, "Invalid Command Verb"), - BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"), - BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"), - BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"), - BMAN_HWE_TXT(BSCN, "Pool State Change Notification"), +/* --- Portal structures --- */ + +#define BM_RCR_SIZE 8 + +/* Release Command */ +struct bm_rcr_entry { + union { + struct { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */ + u8 __reserved1[62]; + }; + struct bm_buffer bufs[8]; + }; +}; +#define BM_RCR_VERB_VBIT 0x80 +#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */ +#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20 +#define BM_RCR_VERB_CMD_BPID_MULTI 0x30 +#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */ + +struct bm_rcr { + struct bm_rcr_entry *ring, *cursor; + u8 ci, available, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + u32 busy; + enum bm_rcr_pmode pmode; + enum bm_rcr_cmode cmode; +#endif }; -#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt)) -struct bman_error_info_mdata { - u16 addr_mask; - u16 bits; - const char *txt; +/* MC (Management Command) command */ +struct bm_mc_command { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 bpid; /* used by acquire command */ + u8 __reserved[62]; +}; +#define BM_MCC_VERB_VBIT 0x80 +#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */ +#define BM_MCC_VERB_CMD_ACQUIRE 0x10 +#define BM_MCC_VERB_CMD_QUERY 0x40 +#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */ + +/* MC result, Acquire and Query Response */ +union bm_mc_result { + struct { + u8 verb; + u8 bpid; + u8 __reserved[62]; + }; + struct bm_buffer bufs[8]; +}; +#define BM_MCR_VERB_VBIT 0x80 +#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK +#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE +#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY +#define BM_MCR_VERB_CMD_ERR_INVALID 0x60 +#define BM_MCR_VERB_CMD_ERR_ECC 0x70 +#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */ +#define BM_MCR_TIMEOUT 10000 /* us */ + +struct bm_mc { + struct bm_mc_command *cr; + union bm_mc_result *rr; + u8 rridx, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum { + /* Can only be _mc_start()ed */ + mc_idle, + /* Can only be _mc_commit()ed or _mc_abort()ed */ + mc_user, + /* Can only be _mc_retry()ed */ + mc_hw + } state; +#endif }; -#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c} -static const struct bman_error_info_mdata error_mdata[] = { - BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"), - BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"), - BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"), +struct bm_addr { + void __iomem *ce; /* cache-enabled */ + void __iomem *ci; /* cache-inhibited */ }; -#define BMAN_ERR_MDATA_COUNT \ - (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata)) -/* Add this in Kconfig */ -#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI) +struct bm_portal { + struct bm_addr addr; + struct bm_rcr rcr; + struct bm_mc mc; +} ____cacheline_aligned; -/** - * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers - * @v: for accessors that write values, this is the 32-bit value - * - * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All - * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of - * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means - * "write the enable register" rather than "enable the write register"! - */ -#define bm_err_isr_status_read(bm) \ - __bm_err_isr_read(bm, bm_isr_status) -#define bm_err_isr_status_clear(bm, m) \ - __bm_err_isr_write(bm, bm_isr_status, m) -#define bm_err_isr_enable_read(bm) \ - __bm_err_isr_read(bm, bm_isr_enable) -#define bm_err_isr_enable_write(bm, v) \ - __bm_err_isr_write(bm, bm_isr_enable, v) -#define bm_err_isr_disable_read(bm) \ - __bm_err_isr_read(bm, bm_isr_disable) -#define bm_err_isr_disable_write(bm, v) \ - __bm_err_isr_write(bm, bm_isr_disable, v) -#define bm_err_isr_inhibit(bm) \ - __bm_err_isr_write(bm, bm_isr_inhibit, 1) -#define bm_err_isr_uninhibit(bm) \ - __bm_err_isr_write(bm, bm_isr_inhibit, 0) +/* Cache-inhibited register access. */ +static inline u32 bm_in(struct bm_portal *p, u32 offset) +{ + return be32_to_cpu(__raw_readl(p->addr.ci + offset)); +} + +static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) +{ + __raw_writel(cpu_to_be32(val), p->addr.ci + offset); +} + +/* Cache Enabled Portal Access */ +static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset) +{ + dpaa_invalidate(p->addr.ce + offset); +} + +static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) +{ + dpaa_touch_ro(p->addr.ce + offset); +} + +static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) +{ + return be32_to_cpu(__raw_readl(p->addr.ce + offset)); +} + +struct bman_portal { + struct bm_portal p; + /* interrupt sources processed by portal_isr(), configurable */ + unsigned long irq_sources; + /* probing time config params for cpu-affine portals */ + const struct bm_portal_config *config; + char irqname[MAX_IRQNAME]; +}; #ifndef __rtems__ -static u16 bman_pool_max; -#else /* __rtems__ */ -/* FIXME */ -extern u16 bman_ip_rev; -extern u16 bman_pool_max; +static cpumask_t affine_mask; +static DEFINE_SPINLOCK(affine_mask_lock); #endif /* __rtems__ */ +static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); + +static inline struct bman_portal *get_affine_portal(void) +{ + return &get_cpu_var(bman_affine_portal); +} + +static inline void put_affine_portal(void) +{ + put_cpu_var(bman_affine_portal); +} /* - * TODO: unimplemented registers - * - * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT, - * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ + * This object type refers to a pool, it isn't *the* pool. There may be + * more than one such object per BMan buffer pool, eg. if different users of the + * pool are operating via different portals. */ +struct bman_pool { + /* index of the buffer pool to encapsulate (0-63) */ + u32 bpid; + /* Used for hash-table admin when using depletion notifications. */ + struct bman_portal *portal; + struct bman_pool *next; +}; -/* Encapsulate "struct bman *" as a cast of the register space address. */ +static u32 poll_portal_slow(struct bman_portal *p, u32 is); -static struct bman *bm_create(void *regs) +static irqreturn_t portal_isr(int irq, void *ptr) { - return (struct bman *)regs; + struct bman_portal *p = ptr; + struct bm_portal *portal = &p->p; + u32 clear = p->irq_sources; + u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources; + + if (unlikely(!is)) + return IRQ_NONE; + + clear |= poll_portal_slow(p, is); + bm_out(portal, BM_REG_ISR, clear); + return IRQ_HANDLED; } -static inline u32 __bm_in(struct bman *bm, u32 offset) +/* --- RCR API --- */ + +#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry)) +#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT) + +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ +static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p) { - return ioread32be((void *)bm + offset); + uintptr_t addr = (uintptr_t)p; + + addr &= ~RCR_CARRY; + + return (struct bm_rcr_entry *)addr; } -static inline void __bm_out(struct bman *bm, u32 offset, u32 val) + +#ifdef CONFIG_FSL_DPAA_CHECKING +/* Bit-wise logic to convert a ring pointer to a ring index */ +static int rcr_ptr2idx(struct bm_rcr_entry *e) { - iowrite32be(val, (void*) bm + offset); + return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1); } -#define bm_in(reg) __bm_in(bm, REG_##reg) -#define bm_out(reg, val) __bm_out(bm, REG_##reg, val) +#endif -static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n) +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ +static inline void rcr_inc(struct bm_rcr *rcr) { - return __bm_in(bm, REG_ERR_ISR + (n << 2)); + /* increment to the next RCR pointer and handle overflow and 'vbit' */ + struct bm_rcr_entry *partial = rcr->cursor + 1; + + rcr->cursor = rcr_carryclear(partial); + if (partial != rcr->cursor) + rcr->vbit ^= BM_RCR_VERB_VBIT; } -static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val) +static int bm_rcr_get_avail(struct bm_portal *portal) { - __bm_out(bm, REG_ERR_ISR + (n << 2), val); + struct bm_rcr *rcr = &portal->rcr; + + return rcr->available; } -static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor) +static int bm_rcr_get_fill(struct bm_portal *portal) { - u32 v = bm_in(IP_REV_1); - *id = (v >> 16); - *major = (v >> 8) & 0xff; - *minor = v & 0xff; + struct bm_rcr *rcr = &portal->rcr; + + return BM_RCR_SIZE - 1 - rcr->available; } -static u32 __generate_thresh(u32 val, int roundup) +static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh) { - u32 e = 0; /* co-efficient, exponent */ - int oddbit = 0; + struct bm_rcr *rcr = &portal->rcr; - while (val > 0xff) { - oddbit = val & 1; - val >>= 1; - e++; - if (roundup && oddbit) - val++; - } - DPA_ASSERT(e < 0x10); - return val | (e << 8); + rcr->ithresh = ithresh; + bm_out(portal, BM_REG_RCR_ITR, ithresh); } -static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt, - u32 hwdet, u32 hwdxt) +static void bm_rcr_cce_prefetch(struct bm_portal *portal) { - DPA_ASSERT(pool < bman_pool_max); + __maybe_unused struct bm_rcr *rcr = &portal->rcr; - bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0)); - bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1)); - bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0)); - bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1)); + DPAA_ASSERT(rcr->cmode == bm_rcr_cce); + bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA); } -static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size) +static u8 bm_rcr_cce_update(struct bm_portal *portal) { - u32 exp = ilog2(size); - /* choke if size isn't within range */ - DPA_ASSERT((size >= 4096) && (size <= 1073741824) && - is_power_of_2(size)); - /* choke if '[e]ba' has lower-alignment than 'size' */ - DPA_ASSERT(!(ba & (size - 1))); - bm_out(FBPR_BARE, upper_32_bits(ba)); - bm_out(FBPR_BAR, lower_32_bits(ba)); - bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1)); + struct bm_rcr *rcr = &portal->rcr; + u8 diff, old_ci = rcr->ci; + + DPAA_ASSERT(rcr->cmode == bm_rcr_cce); + rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1); + bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA); + diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); + rcr->available += diff; + return diff; } -/*****************/ -/* Config driver */ -/*****************/ +static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) +{ + struct bm_rcr *rcr = &portal->rcr; + + DPAA_ASSERT(!rcr->busy); + if (!rcr->available) + return NULL; +#ifdef CONFIG_FSL_DPAA_CHECKING + rcr->busy = 1; +#endif + dpaa_zero(rcr->cursor); + return rcr->cursor; +} -/* We support only one of these. */ -static struct bman *bm; +static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) +{ + struct bm_rcr *rcr = &portal->rcr; + struct bm_rcr_entry *rcursor; + + DPAA_ASSERT(rcr->busy); + DPAA_ASSERT(rcr->pmode == bm_rcr_pvb); + DPAA_ASSERT(rcr->available >= 1); + dma_wmb(); + rcursor = rcr->cursor; + rcursor->_ncw_verb = myverb | rcr->vbit; + dpaa_flush(rcursor); + rcr_inc(rcr); + rcr->available--; +#ifdef CONFIG_FSL_DPAA_CHECKING + rcr->busy = 0; +#endif +} -/* And this state belongs to 'bm' */ -#ifndef __rtems__ -static dma_addr_t fbpr_a; -static size_t fbpr_sz; +static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode, + enum bm_rcr_cmode cmode) +{ + struct bm_rcr *rcr = &portal->rcr; + u32 cfg; + u8 pi; + + rcr->ring = portal->addr.ce + BM_CL_RCR; + rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); + pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); + rcr->cursor = rcr->ring + pi; + rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ? + BM_RCR_VERB_VBIT : 0; + rcr->available = BM_RCR_SIZE - 1 + - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi); + rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + rcr->busy = 0; + rcr->pmode = pmode; + rcr->cmode = cmode; +#endif + cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0) + | (pmode & 0x3); /* BCSP_CFG::RPM */ + bm_out(portal, BM_REG_CFG, cfg); + return 0; +} -static int bman_fbpr(struct reserved_mem *rmem) +static void bm_rcr_finish(struct bm_portal *portal) { - fbpr_a = rmem->base; - fbpr_sz = rmem->size; +#ifdef CONFIG_FSL_DPAA_CHECKING + struct bm_rcr *rcr = &portal->rcr; + int i; - WARN_ON(!(fbpr_a && fbpr_sz)); + DPAA_ASSERT(!rcr->busy); - return 0; + i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); + if (i != rcr_ptr2idx(rcr->cursor)) + pr_crit("losing uncommitted RCR entries\n"); + + i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); + if (i != rcr->ci) + pr_crit("missing existing RCR completions\n"); + if (rcr->ci != rcr_ptr2idx(rcr->cursor)) + pr_crit("RCR destroyed unquiesced\n"); +#endif } -RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr); -#else /* __rtems__ */ -static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fbpr, 16777216); -#define fbpr_a ((uintptr_t)&fbpr[0]) -#define fbpr_sz sizeof(fbpr) -#endif /* __rtems__ */ -int bm_pool_set(u32 bpid, const u32 *thresholds) +/* --- Management command API --- */ +static int bm_mc_init(struct bm_portal *portal) { - if (!bm) - return -ENODEV; - bm_set_pool(bm, bpid, thresholds[0], thresholds[1], - thresholds[2], thresholds[3]); + struct bm_mc *mc = &portal->mc; + + mc->cr = portal->addr.ce + BM_CL_CR; + mc->rr = portal->addr.ce + BM_CL_RR0; + mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ? + 0 : 1; + mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_idle; +#endif return 0; } -EXPORT_SYMBOL(bm_pool_set); -static void log_edata_bits(u32 bit_count) +static void bm_mc_finish(struct bm_portal *portal) { - u32 i, j, mask = 0xffffffff; +#ifdef CONFIG_FSL_DPAA_CHECKING + struct bm_mc *mc = &portal->mc; - pr_warn("ErrInt, EDATA:\n"); - i = bit_count/32; - if (bit_count%32) { - i++; - mask = ~(mask << bit_count%32); - } - j = 16-i; - pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask); - j++; - for (; j < 16; j++) - pr_warn(" 0x%08x\n", bm_in(EDATA(j))); + DPAA_ASSERT(mc->state == mc_idle); + if (mc->state != mc_idle) + pr_crit("Losing incomplete MC command\n"); +#endif } -static void log_additional_error_info(u32 isr_val, u32 ecsr_val) +static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) { - union bman_ecir ecir_val; - union bman_eadr eadr_val; + struct bm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_user; +#endif + dpaa_zero(mc->cr); + return mc->cr; +} - ecir_val.ecir_raw = bm_in(ECIR); - /* Is portal info valid */ - if (ecsr_val & PORTAL_ECSR_ERR) { - pr_warn("ErrInt: SWP id %d, numb %d, pid %d\n", - ecir_val.info.portal_num, ecir_val.info.numb, - ecir_val.info.pid); - } - if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) { - eadr_val.eadr_raw = bm_in(EADR); - pr_warn("ErrInt: EADR Memory: %s, 0x%x\n", - error_mdata[eadr_val.info.memid].txt, - error_mdata[eadr_val.info.memid].addr_mask - & eadr_val.info.eadr); - log_edata_bits(error_mdata[eadr_val.info.memid].bits); - } +static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) +{ + struct bm_mc *mc = &portal->mc; + union bm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == mc_user); + dma_wmb(); + mc->cr->_ncw_verb = myverb | mc->vbit; + dpaa_flush(mc->cr); + dpaa_invalidate_touch_ro(rr); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_hw; +#endif } -/* BMan interrupt handler */ -static irqreturn_t bman_isr(int irq, void *ptr) +static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal) { - u32 isr_val, ier_val, ecsr_val, isr_mask, i; + struct bm_mc *mc = &portal->mc; + union bm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == mc_hw); + /* + * The inactive response register's verb byte always returns zero until + * its command is submitted and completed. This includes the valid-bit, + * in case you were wondering... + */ + if (!__raw_readb(&rr->verb)) { + dpaa_invalidate_touch_ro(rr); + return NULL; + } + mc->rridx ^= 1; + mc->vbit ^= BM_MCC_VERB_VBIT; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_idle; +#endif + return rr; +} - ier_val = bm_err_isr_enable_read(bm); - isr_val = bm_err_isr_status_read(bm); - ecsr_val = bm_in(ECSR); - isr_mask = isr_val & ier_val; +static inline int bm_mc_result_timeout(struct bm_portal *portal, + union bm_mc_result **mcr) +{ + int timeout = BM_MCR_TIMEOUT; - if (!isr_mask) - return IRQ_NONE; + do { + *mcr = bm_mc_result(portal); + if (*mcr) + break; + udelay(1); + } while (--timeout); - for (i = 0; i < BMAN_HWE_COUNT; i++) { - if (bman_hwerr_txts[i].mask & isr_mask) { - pr_warn("ErrInt: %s\n", bman_hwerr_txts[i].txt); - if (bman_hwerr_txts[i].mask & ecsr_val) { - log_additional_error_info(isr_mask, ecsr_val); - /* Re-arm error capture registers */ - bm_out(ECSR, ecsr_val); - } - if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) { - pr_devel("Un-enabling error 0x%x\n", - bman_hwerr_txts[i].mask); - ier_val &= ~bman_hwerr_txts[i].mask; - bm_err_isr_enable_write(bm, ier_val); - } - } - } - bm_err_isr_status_clear(bm, isr_val); - - return IRQ_HANDLED; + return timeout; } -u32 bm_pool_free_buffers(u32 bpid) +/* Disable all BSCN interrupts for the portal */ +static void bm_isr_bscn_disable(struct bm_portal *portal) { - return bm_in(POOL_CONTENT(bpid)); + bm_out(portal, BM_REG_SCN(0), 0); + bm_out(portal, BM_REG_SCN(1), 0); } -EXPORT_SYMBOL(bm_pool_free_buffers); -#ifndef __rtems__ -static ssize_t show_fbpr_fpc(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static int bman_create_portal(struct bman_portal *portal, + const struct bm_portal_config *c) { - return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC)); -}; + struct bm_portal *p; + int ret; -static ssize_t show_pool_count(struct device *dev, - struct device_attribute *dev_attr, char *buf) -{ - u32 data; - int i; + p = &portal->p; + /* + * prep the low-level portal struct with the mapped addresses from the + * config, everything that follows depends on it and "config" is more + * for (de)reference... + */ + p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; + p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; + if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) { + dev_err(c->dev, "RCR initialisation failed\n"); + goto fail_rcr; + } + if (bm_mc_init(p)) { + dev_err(c->dev, "MC initialisation failed\n"); + goto fail_mc; + } + /* + * Default to all BPIDs disabled, we enable as required at + * run-time. + */ + bm_isr_bscn_disable(p); + + /* Write-to-clear any stale interrupt status bits */ + bm_out(p, BM_REG_ISDR, 0xffffffff); + portal->irq_sources = 0; + bm_out(p, BM_REG_IER, 0); + bm_out(p, BM_REG_ISR, 0xffffffff); + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); + if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { + dev_err(c->dev, "request_irq() failed\n"); + goto fail_irq; + } +#ifndef __rtems__ + if (c->cpu != -1 && irq_can_set_affinity(c->irq) && + irq_set_affinity(c->irq, cpumask_of(c->cpu))) { + dev_err(c->dev, "irq_set_affinity() failed\n"); + goto fail_affinity; + } +#endif /* __rtems__ */ - if (kstrtoint(dev_attr->attr.name, 10, &i)) - return -EINVAL; - data = bm_in(POOL_CONTENT(i)); - return snprintf(buf, PAGE_SIZE, "%d\n", data); -}; + /* Need RCR to be empty before continuing */ + ret = bm_rcr_get_fill(p); + if (ret) { + dev_err(c->dev, "RCR unclean\n"); + goto fail_rcr_empty; + } + /* Success */ + portal->config = c; -static ssize_t show_err_isr(struct device *dev, - struct device_attribute *dev_attr, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR)); -}; + bm_out(p, BM_REG_ISDR, 0); + bm_out(p, BM_REG_IIR, 0); + + return 0; -static ssize_t show_sbec(struct device *dev, - struct device_attribute *dev_attr, char *buf) +fail_rcr_empty: +#ifndef __rtems__ +fail_affinity: +#endif /* __rtems__ */ + free_irq(c->irq, portal); +fail_irq: + bm_mc_finish(p); +fail_mc: + bm_rcr_finish(p); +fail_rcr: + return -EIO; +} + +struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c) { - int i; + struct bman_portal *portal; + int err; - if (sscanf(dev_attr->attr.name, "sbec_%d", &i) != 1) - return -EINVAL; - return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i))); -}; + portal = &per_cpu(bman_affine_portal, c->cpu); + err = bman_create_portal(portal, c); + if (err) + return NULL; -static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL); -static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL); +#ifndef __rtems__ + spin_lock(&affine_mask_lock); + cpumask_set_cpu(c->cpu, &affine_mask); + spin_unlock(&affine_mask_lock); +#endif /* __rtems__ */ -/* Didn't use DEVICE_ATTR as 64 of this would be required. - * Initialize them when needed. */ -static char *name_attrs_pool_count; /* "xx" + null-terminator */ -static struct device_attribute *dev_attr_buffer_pool_count; + return portal; +} -static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL); +static u32 poll_portal_slow(struct bman_portal *p, u32 is) +{ + u32 ret = is; -static struct attribute *bman_dev_attributes[] = { - &dev_attr_fbpr_fpc.attr, - &dev_attr_err_isr.attr, - NULL -}; + if (is & BM_PIRQ_RCRI) { + bm_rcr_cce_update(&p->p); + bm_rcr_set_ithresh(&p->p, 0); + bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI); + is &= ~BM_PIRQ_RCRI; + } -static struct attribute *bman_dev_ecr_attributes[] = { - &dev_attr_sbec_0.attr, - &dev_attr_sbec_1.attr, - NULL -}; + /* There should be no status register bits left undefined */ + DPAA_ASSERT(!is); + return ret; +} -static struct attribute **bman_dev_pool_count_attributes; +int bman_p_irqsource_add(struct bman_portal *p, u32 bits) +{ + unsigned long irqflags; -/* root level */ -static const struct attribute_group bman_dev_attr_grp = { - .name = NULL, - .attrs = bman_dev_attributes -}; -static const struct attribute_group bman_dev_ecr_grp = { - .name = "error_capture", - .attrs = bman_dev_ecr_attributes -}; -static struct attribute_group bman_dev_pool_countent_grp = { - .name = "pool_count", -}; + local_irq_save(irqflags); + set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); + bm_out(&p->p, BM_REG_IER, p->irq_sources); + local_irq_restore(irqflags); + return 0; +} -static int of_fsl_bman_remove(struct platform_device *ofdev) +static int bm_shutdown_pool(u32 bpid) { - sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp); + struct bm_mc_command *bm_cmd; + union bm_mc_result *bm_res; + + while (1) { + struct bman_portal *p = get_affine_portal(); + /* Acquire buffers until empty */ + bm_cmd = bm_mc_start(&p->p); + bm_cmd->bpid = bpid; + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1); + if (!bm_mc_result_timeout(&p->p, &bm_res)) { + put_affine_portal(); + pr_crit("BMan Acquire Command timedout\n"); + return -ETIMEDOUT; + } + if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { + put_affine_portal(); + /* Pool is empty */ + return 0; + } + put_affine_portal(); + } + return 0; -}; -#endif /* __rtems__ */ +} + +struct gen_pool *bm_bpalloc; -static int of_fsl_bman_probe(struct platform_device *ofdev) +static int bm_alloc_bpid_range(u32 *result, u32 count) { - int ret, err_irq, i; - struct device *dev = &ofdev->dev; - struct device_node *node = dev->of_node; - struct resource res; - u32 __iomem *regs; - u16 id; - u8 major, minor; + unsigned long addr; - if (!of_device_is_available(node)) - return -ENODEV; + addr = gen_pool_alloc(bm_bpalloc, count); + if (!addr) + return -ENOMEM; - ret = of_address_to_resource(node, 0, &res); - if (ret) { - dev_err(dev, "Can't get %s property 'reg'\n", node->full_name); - return ret; - } - regs = devm_ioremap(dev, res.start, res.end - res.start + 1); - if (!regs) - return -ENXIO; - - bm = bm_create(regs); - - bm_get_version(bm, &id, &major, &minor); - dev_info(dev, "Bman ver:%04x,%02x,%02x\n", id, major, minor); - if ((major == 1) && (minor == 0)) - bman_pool_max = 64; - else if ((major == 2) && (minor == 0)) - bman_pool_max = 8; - else if ((major == 2) && (minor == 1)) - bman_pool_max = 64; - else - dev_warn(dev, "unknown Bman version, default to rev1.0\n"); -#ifdef __rtems__ - bman_ip_rev = (u16)((major << 8) | minor); -#endif /* __rtems__ */ + *result = addr & ~DPAA_GENALLOC_OFF; + return 0; +} - bm_set_memory(bm, fbpr_a, 0, fbpr_sz); +static int bm_release_bpid(u32 bpid) +{ + int ret; - err_irq = of_irq_to_resource(node, 0, NULL); - if (err_irq == NO_IRQ) { - dev_info(dev, "Can't get %s property 'interrupts'\n", - node->full_name); - return -ENODEV; - } - ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err", - node); - if (ret) { - dev_err(dev, "devm_request_irq() failed %d for '%s'\n", - ret, node->full_name); + ret = bm_shutdown_pool(bpid); + if (ret) { + pr_debug("BPID %d leaked\n", bpid); return ret; } - /* Disable Buffer Pool State Change */ - bm_err_isr_disable_write(bm, BM_EIRQ_BSCN); - /* Write-to-clear any stale bits, (eg. starvation being asserted prior - * to resource allocation during driver init). */ - bm_err_isr_status_clear(bm, 0xffffffff); - /* Enable Error Interrupts */ - bm_err_isr_enable_write(bm, 0xffffffff); -#ifndef __rtems__ - ret = sysfs_create_group(&dev->kobj, &bman_dev_attr_grp); - if (ret) - goto done; - ret = sysfs_create_group(&dev->kobj, &bman_dev_ecr_grp); - if (ret) - goto del_group_0; - - name_attrs_pool_count = devm_kmalloc(dev, - sizeof(char) * bman_pool_max * 3, GFP_KERNEL); - if (!name_attrs_pool_count) - goto del_group_1; - - dev_attr_buffer_pool_count = devm_kmalloc(dev, - sizeof(struct device_attribute) * bman_pool_max, GFP_KERNEL); - if (!dev_attr_buffer_pool_count) - goto del_group_1; - - bman_dev_pool_count_attributes = devm_kmalloc(dev, - sizeof(struct attribute *) * (bman_pool_max + 1), GFP_KERNEL); - if (!bman_dev_pool_count_attributes) - goto del_group_1; - - for (i = 0; i < bman_pool_max; i++) { - ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i); - if (!ret) - goto del_group_1; - dev_attr_buffer_pool_count[i].attr.name = - (name_attrs_pool_count + i * 3); - dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR; - dev_attr_buffer_pool_count[i].show = show_pool_count; - bman_dev_pool_count_attributes[i] = - &dev_attr_buffer_pool_count[i].attr; - } - bman_dev_pool_count_attributes[bman_pool_max] = NULL; - - bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes; - - ret = sysfs_create_group(&dev->kobj, &bman_dev_pool_countent_grp); - if (ret) - goto del_group_1; + gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1); + return 0; +} - goto done; +struct bman_pool *bman_new_pool(void) +{ + struct bman_pool *pool = NULL; + u32 bpid; -del_group_1: - sysfs_remove_group(&dev->kobj, &bman_dev_ecr_grp); -del_group_0: - sysfs_remove_group(&dev->kobj, &bman_dev_attr_grp); -done: - if (ret) - dev_err(dev, "Cannot create dev attributes ret=%d\n", ret); -#else /* __rtems__ */ - (void)i; -#endif /* __rtems__ */ + if (bm_alloc_bpid_range(&bpid, 1)) + return NULL; - return ret; -}; + pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + goto err; -#ifndef __rtems__ -static const struct of_device_id of_fsl_bman_ids[] = { - { - .compatible = "fsl,bman", - }, - {} -}; + pool->bpid = bpid; -static struct platform_driver of_fsl_bman_driver = { - .driver = { - .name = KBUILD_MODNAME, - .of_match_table = of_fsl_bman_ids, - }, - .probe = of_fsl_bman_probe, - .remove = of_fsl_bman_remove, -}; + return pool; +err: + bm_release_bpid(bpid); + kfree(pool); + return NULL; +} +EXPORT_SYMBOL(bman_new_pool); -builtin_platform_driver(of_fsl_bman_driver); -#else /* __rtems__ */ -#include <sys/types.h> -#include <sys/kernel.h> -#include <rtems.h> -#include <bsp/fdt.h> -#include <bsp/qoriq.h> +void bman_free_pool(struct bman_pool *pool) +{ + bm_release_bpid(pool->bpid); -static struct bm_portal_config bman_configs[NR_CPUS]; + kfree(pool); +} +EXPORT_SYMBOL(bman_free_pool); -u16 bman_ip_rev; +int bman_get_bpid(const struct bman_pool *pool) +{ + return pool->bpid; +} +EXPORT_SYMBOL(bman_get_bpid); -u16 bman_pool_max; +static void update_rcr_ci(struct bman_portal *p, int avail) +{ + if (avail) + bm_rcr_cce_prefetch(&p->p); + else + bm_rcr_cce_update(&p->p); +} -SYSINIT_REFERENCE(irqs); +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num) +{ + struct bman_portal *p; + struct bm_rcr_entry *r; + unsigned long irqflags; + int avail, timeout = 1000; /* 1ms */ + int i = num - 1; + + DPAA_ASSERT(num > 0 && num <= 8); + + do { + p = get_affine_portal(); + local_irq_save(irqflags); + avail = bm_rcr_get_avail(&p->p); + if (avail < 2) + update_rcr_ci(p, avail); + r = bm_rcr_start(&p->p); + local_irq_restore(irqflags); + put_affine_portal(); + if (likely(r)) + break; + + udelay(1); + } while (--timeout); + + if (unlikely(!timeout)) + return -ETIMEDOUT; + + p = get_affine_portal(); + local_irq_save(irqflags); + /* + * we can copy all but the first entry, as this can trigger badness + * with the valid-bit + */ + bm_buffer_set64(r->bufs, bm_buffer_get64(bufs)); + bm_buffer_set_bpid(r->bufs, pool->bpid); + if (i) + memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); + + bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | + (num & BM_RCR_VERB_BUFCOUNT_MASK)); + + local_irq_restore(irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(bman_release); -static void -bman_sysinit(void) +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num) { - const char *fdt = bsp_fdt_get(); - struct device_node dn; - struct platform_device ofdev = { - .dev = { - .of_node = &dn, - .base = (uintptr_t)&qoriq - } - }; - const char *name; - int cpu_count = (int)rtems_get_processor_count(); - int cpu; + struct bman_portal *p = get_affine_portal(); + struct bm_mc_command *mcc; + union bm_mc_result *mcr; int ret; - int node; - int parent; - - qoriq_reset_qman_and_bman(); - qoriq_clear_ce_portal(&qoriq_bman_portal[0][0], - sizeof(qoriq_bman_portal[0])); - qoriq_clear_ci_portal(&qoriq_bman_portal[1][0], - sizeof(qoriq_bman_portal[1])); - - memset(&dn, 0, sizeof(dn)); - - name = "fsl,bman"; - node = fdt_node_offset_by_compatible(fdt, 0, name); - if (node < 0) - panic("bman: no bman in FDT"); - - dn.full_name = name; - dn.offset = node; - ret = of_fsl_bman_probe(&ofdev); - if (ret != 0) - panic("bman: probe failed"); - - name = "fsl,bman-portal"; - node = fdt_node_offset_by_compatible(fdt, 0, name); - if (node < 0) - panic("bman: no portals in FDT"); - parent = fdt_parent_offset(fdt, node); - if (parent < 0) - panic("bman: no parent of portals in FDT"); - node = fdt_first_subnode(fdt, parent); - - dn.full_name = name; - dn.offset = node; - - for (cpu = 0; cpu < cpu_count; ++cpu) { - struct bm_portal_config *pcfg = &bman_configs[cpu]; - struct bman_portal *portal; - struct resource res; - - if (node < 0) - panic("bman: missing portal in FDT"); - - ret = of_address_to_resource(&dn, 0, &res); - if (ret != 0) - panic("bman: no portal CE address"); - pcfg->addr_virt[0] = (__iomem void *) - ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start); - BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] >= - (uintptr_t)&qoriq_bman_portal[0][0]); - BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] < - (uintptr_t)&qoriq_bman_portal[1][0]); - - ret = of_address_to_resource(&dn, 1, &res); - if (ret != 0) - panic("bman: no portal CI address"); - pcfg->addr_virt[1] = (__iomem void *) - ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start); - BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] >= - (uintptr_t)&qoriq_bman_portal[1][0]); - BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] < - (uintptr_t)&qoriq_bman_portal[2][0]); - - pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL); - if (pcfg->public_cfg.irq == NO_IRQ) - panic("bman: no portal interrupt"); - - pcfg->public_cfg.cpu = cpu; - bman_depletion_fill(&pcfg->public_cfg.mask); - - portal = bman_create_affine_portal(pcfg); - if (portal == NULL) - panic("bman: cannot create portal"); - - bman_p_irqsource_add(portal, BM_PIRQ_RCRI | BM_PIRQ_BSCN); - - node = fdt_next_subnode(fdt, node); - dn.offset = node; + + DPAA_ASSERT(num > 0 && num <= 8); + + mcc = bm_mc_start(&p->p); + mcc->bpid = pool->bpid; + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | + (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); + if (!bm_mc_result_timeout(&p->p, &mcr)) { + put_affine_portal(); + pr_crit("BMan Acquire Timeout\n"); + return -ETIMEDOUT; } + ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; + if (bufs) + memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0])); - bman_seed_bpid_range(0, bman_pool_max); + put_affine_portal(); + if (ret != num) + ret = -ENOMEM; + return ret; +} +EXPORT_SYMBOL(bman_acquire); + +const struct bm_portal_config * +bman_get_bm_portal_config(const struct bman_portal *portal) +{ + return portal->config; } -SYSINIT(bman_sysinit, SI_SUB_CPU, SI_ORDER_FIRST, bman_sysinit, NULL); -#endif /* __rtems__ */ diff --git a/linux/drivers/soc/fsl/qbman/bman_api.c b/linux/drivers/soc/fsl/qbman/bman_api.c deleted file mode 100644 index cdfcebbd..00000000 --- a/linux/drivers/soc/fsl/qbman/bman_api.c +++ /dev/null @@ -1,1123 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "bman.h" - -/* Compilation constants */ -#define RCR_THRESH 2 /* reread h/w CI when running out of space */ -#define IRQNAME "BMan portal %d" -#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ -#define FSL_DPA_PORTAL_SHARE 1 /* Allow portals to be shared */ - -struct bman_portal { - struct bm_portal p; - /* 2-element array. pools[0] is mask, pools[1] is snapshot. */ - struct bman_depletion *pools; - int thresh_set; - unsigned long irq_sources; - u32 slowpoll; /* only used when interrupts are off */ -#ifdef FSL_DPA_CAN_WAIT_SYNC - struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */ -#endif -#ifdef FSL_DPA_PORTAL_SHARE - raw_spinlock_t sharing_lock; /* only used if is_shared */ -#ifndef __rtems__ - int is_shared; - struct bman_portal *sharing_redirect; -#endif /* __rtems__ */ -#endif - /* When the cpu-affine portal is activated, this is non-NULL */ - const struct bm_portal_config *config; - /* 64-entry hash-table of pool objects that are tracking depletion - * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so - * we're not fussy about cache-misses and so forth - whereas the above - * members should all fit in one cacheline. - * BTW, with 64 entries in the hash table and 64 buffer pools to track, - * you'll never guess the hash-function ... */ - struct bman_pool *cb[64]; - char irqname[MAX_IRQNAME]; - /* Track if the portal was alloced by the driver */ - u8 alloced; -}; - - -#ifdef FSL_DPA_PORTAL_SHARE -/* For an explanation of the locking, redirection, or affine-portal logic, - * please consult the QMan driver for details. This is the same, only simpler - * (no fiddly QMan-specific bits.) */ -#ifndef __rtems__ -#define PORTAL_IRQ_LOCK(p, irqflags) \ - do { \ - if ((p)->is_shared) \ - raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ - else \ - local_irq_save(irqflags); \ - } while (0) -#define PORTAL_IRQ_UNLOCK(p, irqflags) \ - do { \ - if ((p)->is_shared) \ - raw_spin_unlock_irqrestore(&(p)->sharing_lock, \ - irqflags); \ - else \ - local_irq_restore(irqflags); \ - } while (0) -#else /* __rtems__ */ -#define PORTAL_IRQ_LOCK(p, irqflags) \ - raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags) -#define PORTAL_IRQ_UNLOCK(p, irqflags) \ - raw_spin_unlock_irqrestore(&(p)->sharing_lock, irqflags) -#endif /* __rtems__ */ -#else -#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) -#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) -#endif - -#ifndef __rtems__ -static cpumask_t affine_mask; -static DEFINE_SPINLOCK(affine_mask_lock); -#endif /* __rtems__ */ -static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); -static inline struct bman_portal *get_raw_affine_portal(void) -{ - return &get_cpu_var(bman_affine_portal); -} -#ifdef FSL_DPA_PORTAL_SHARE -static inline struct bman_portal *get_affine_portal(void) -{ - struct bman_portal *p = get_raw_affine_portal(); - -#ifndef __rtems__ - if (p->sharing_redirect) - return p->sharing_redirect; -#endif /* __rtems__ */ - return p; -} -#else -#define get_affine_portal() get_raw_affine_portal() -#endif -static inline void put_affine_portal(void) -{ - put_cpu_var(bman_affine_portal); -} -static inline struct bman_portal *get_poll_portal(void) -{ - return this_cpu_ptr(&bman_affine_portal); -} -#define put_poll_portal() - -/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be - * more than one such object per BMan buffer pool, eg. if different users of the - * pool are operating via different portals. */ -struct bman_pool { - struct bman_pool_params params; - /* Used for hash-table admin when using depletion notifications. */ - struct bman_portal *portal; - struct bman_pool *next; - /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */ - struct bm_buffer *sp; - unsigned int sp_fill; -#ifdef CONFIG_FSL_DPA_CHECKING - atomic_t in_use; -#endif -}; - -/* (De)Registration of depletion notification callbacks */ -static void depletion_link(struct bman_portal *portal, struct bman_pool *pool) -{ - __maybe_unused unsigned long irqflags; - - pool->portal = portal; - PORTAL_IRQ_LOCK(portal, irqflags); - pool->next = portal->cb[pool->params.bpid]; - portal->cb[pool->params.bpid] = pool; - if (!pool->next) - /* First object for that bpid on this portal, enable the BSCN - * mask bit. */ - bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1); - PORTAL_IRQ_UNLOCK(portal, irqflags); -} -static void depletion_unlink(struct bman_pool *pool) -{ - struct bman_pool *it, *last = NULL; - struct bman_pool **base = &pool->portal->cb[pool->params.bpid]; - __maybe_unused unsigned long irqflags; - - PORTAL_IRQ_LOCK(pool->portal, irqflags); - it = *base; /* <-- gotcha, don't do this prior to the irq_save */ - while (it != pool) { - last = it; - it = it->next; - } - if (!last) - *base = pool->next; - else - last->next = pool->next; - if (!last && !pool->next) { - /* Last object for that bpid on this portal, disable the BSCN - * mask bit. */ - bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0); - /* And "forget" that we last saw this pool as depleted */ - bman_depletion_unset(&pool->portal->pools[1], - pool->params.bpid); - } - PORTAL_IRQ_UNLOCK(pool->portal, irqflags); -} - -/* In the case that the application's core loop calls qman_poll() and - * bman_poll(), we ought to balance how often we incur the overheads of the - * slow-path poll. We'll use two decrementer sources. The idle decrementer - * constant is used when the last slow-poll detected no work to do, and the busy - * decrementer constant when the last slow-poll had work to do. */ -#define SLOW_POLL_IDLE 1000 -#define SLOW_POLL_BUSY 10 -static u32 __poll_portal_slow(struct bman_portal *p, u32 is); - -/* Portal interrupt handler */ -static irqreturn_t portal_isr(__always_unused int irq, void *ptr) -{ - struct bman_portal *p = ptr; - u32 clear = p->irq_sources; - u32 is = bm_isr_status_read(&p->p) & p->irq_sources; - - clear |= __poll_portal_slow(p, is); - bm_isr_status_clear(&p->p, clear); - return IRQ_HANDLED; -} - - -struct bman_portal *bman_create_portal( - struct bman_portal *portal, - const struct bm_portal_config *config) -{ - struct bm_portal *__p; - const struct bman_depletion *pools = &config->public_cfg.mask; - int ret; - u8 bpid = 0; - - if (!portal) { - portal = kmalloc(sizeof(*portal), GFP_KERNEL); - if (!portal) - return portal; - portal->alloced = 1; - } else - portal->alloced = 0; - - __p = &portal->p; - - /* prep the low-level portal struct with the mapped addresses from the - * config, everything that follows depends on it and "config" is more - * for (de)reference... */ - __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; - __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; - if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) { - pr_err("RCR initialisation failed\n"); - goto fail_rcr; - } - if (bm_mc_init(__p)) { - pr_err("MC initialisation failed\n"); - goto fail_mc; - } - if (bm_isr_init(__p)) { - pr_err("ISR initialisation failed\n"); - goto fail_isr; - } - portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL); - if (!portal->pools) - goto fail_pools; - portal->pools[0] = *pools; - bman_depletion_init(portal->pools + 1); - while (bpid < bman_pool_max) { - /* Default to all BPIDs disabled, we enable as required at - * run-time. */ - bm_isr_bscn_mask(__p, bpid, 0); - bpid++; - } - portal->slowpoll = 0; -#ifdef FSL_DPA_CAN_WAIT_SYNC - portal->rcri_owned = NULL; -#endif -#ifdef FSL_DPA_PORTAL_SHARE - raw_spin_lock_init(&portal->sharing_lock); -#ifndef __rtems__ - portal->is_shared = config->public_cfg.is_shared; - portal->sharing_redirect = NULL; -#endif /* __rtems__ */ -#endif - memset(&portal->cb, 0, sizeof(portal->cb)); - /* Write-to-clear any stale interrupt status bits */ - bm_isr_disable_write(__p, 0xffffffff); - portal->irq_sources = 0; - bm_isr_enable_write(__p, portal->irq_sources); - bm_isr_status_clear(__p, 0xffffffff); - snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); - if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname, - portal)) { - pr_err("request_irq() failed\n"); - goto fail_irq; - } -#ifndef __rtems__ - if ((config->public_cfg.cpu != -1) && - irq_can_set_affinity(config->public_cfg.irq) && - irq_set_affinity(config->public_cfg.irq, - cpumask_of(config->public_cfg.cpu))) { - pr_err("irq_set_affinity() failed\n"); - goto fail_affinity; - } -#endif /* __rtems__ */ - - /* Need RCR to be empty before continuing */ - ret = bm_rcr_get_fill(__p); - if (ret) { - pr_err("RCR unclean\n"); - goto fail_rcr_empty; - } - /* Success */ - portal->config = config; - - bm_isr_disable_write(__p, 0); - bm_isr_uninhibit(__p); - return portal; -fail_rcr_empty: -#ifndef __rtems__ -fail_affinity: -#endif /* __rtems__ */ - free_irq(config->public_cfg.irq, portal); -fail_irq: - kfree(portal->pools); -fail_pools: - bm_isr_finish(__p); -fail_isr: - bm_mc_finish(__p); -fail_mc: - bm_rcr_finish(__p); -fail_rcr: - if (portal->alloced) - kfree(portal); - return NULL; -} - -struct bman_portal *bman_create_affine_portal( - const struct bm_portal_config *config) -{ - struct bman_portal *portal; - - portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu); - portal = bman_create_portal(portal, config); -#ifndef __rtems__ - if (portal) { - spin_lock(&affine_mask_lock); - cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); - spin_unlock(&affine_mask_lock); - } -#endif /* __rtems__ */ - return portal; -} - - -#ifndef __rtems__ -struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect, - int cpu) -{ -#ifdef FSL_DPA_PORTAL_SHARE - struct bman_portal *p = &per_cpu(bman_affine_portal, cpu); - - BUG_ON(p->config); - BUG_ON(p->is_shared); - BUG_ON(!redirect->config->public_cfg.is_shared); - p->irq_sources = 0; - p->sharing_redirect = redirect; - put_affine_portal(); - return p; -#else - BUG(); - return NULL; -#endif -} -#endif /* __rtems__ */ - -void bman_destroy_portal(struct bman_portal *bm) -{ - const struct bm_portal_config *pcfg = bm->config; - - bm_rcr_cce_update(&bm->p); - bm_rcr_cce_update(&bm->p); - - free_irq(pcfg->public_cfg.irq, bm); - - kfree(bm->pools); - bm_isr_finish(&bm->p); - bm_mc_finish(&bm->p); - bm_rcr_finish(&bm->p); - bm->config = NULL; - if (bm->alloced) - kfree(bm); -} - -const struct bm_portal_config *bman_destroy_affine_portal(void) -{ - struct bman_portal *bm = get_raw_affine_portal(); - const struct bm_portal_config *pcfg; - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (bm->sharing_redirect) { - bm->sharing_redirect = NULL; - put_affine_portal(); - return NULL; - } - bm->is_shared = 0; -#endif /* __rtems__ */ -#endif - pcfg = bm->config; - bman_destroy_portal(bm); -#ifndef __rtems__ - spin_lock(&affine_mask_lock); - cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask); - spin_unlock(&affine_mask_lock); -#endif /* __rtems__ */ - put_affine_portal(); - return pcfg; -} - -/* When release logic waits on available RCR space, we need a global waitqueue - * in the case of "affine" use (as the waits wake on different cpus which means - * different portals - so we can't wait on any per-portal waitqueue). */ -static DECLARE_WAIT_QUEUE_HEAD(affine_queue); - -static u32 __poll_portal_slow(struct bman_portal *p, u32 is) -{ - struct bman_depletion tmp; - u32 ret = is; - - /* There is a gotcha to be aware of. If we do the query before clearing - * the status register, we may miss state changes that occur between the - * two. If we write to clear the status register before the query, the - * cache-enabled query command may overtake the status register write - * unless we use a heavyweight sync (which we don't want). Instead, we - * write-to-clear the status register then *read it back* before doing - * the query, hence the odd while loop with the 'is' accumulation. */ - if (is & BM_PIRQ_BSCN) { - struct bm_mc_result *mcr; - __maybe_unused unsigned long irqflags; - unsigned int i, j; - u32 __is; - - bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); - while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) { - is |= __is; - bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); - } - is &= ~BM_PIRQ_BSCN; - PORTAL_IRQ_LOCK(p, irqflags); - bm_mc_start(&p->p); - bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); - while (!(mcr = bm_mc_result(&p->p))) - cpu_relax(); - tmp = mcr->query.ds.state; - PORTAL_IRQ_UNLOCK(p, irqflags); - for (i = 0; i < 2; i++) { - int idx = i * 32; - /* tmp is a mask of currently-depleted pools. - * pools[0] is mask of those we care about. - * pools[1] is our previous view (we only want to - * be told about changes). */ - tmp.__state[i] &= p->pools[0].__state[i]; - if (tmp.__state[i] == p->pools[1].__state[i]) - /* fast-path, nothing to see, move along */ - continue; - for (j = 0; j <= 31; j++, idx++) { - struct bman_pool *pool = p->cb[idx]; - int b4 = bman_depletion_get(&p->pools[1], idx); - int af = bman_depletion_get(&tmp, idx); - - if (b4 == af) - continue; - while (pool) { - pool->params.cb(p, pool, - pool->params.cb_ctx, af); - pool = pool->next; - } - } - } - p->pools[1] = tmp; - } - - if (is & BM_PIRQ_RCRI) { - __maybe_unused unsigned long irqflags; - - PORTAL_IRQ_LOCK(p, irqflags); - bm_rcr_cce_update(&p->p); -#ifdef FSL_DPA_CAN_WAIT_SYNC - /* If waiting for sync, we only cancel the interrupt threshold - * when the ring utilisation hits zero. */ - if (p->rcri_owned) { - if (!bm_rcr_get_fill(&p->p)) { - p->rcri_owned = NULL; - bm_rcr_set_ithresh(&p->p, 0); - } - } else -#endif - bm_rcr_set_ithresh(&p->p, 0); - PORTAL_IRQ_UNLOCK(p, irqflags); - wake_up(&affine_queue); - bm_isr_status_clear(&p->p, BM_PIRQ_RCRI); - is &= ~BM_PIRQ_RCRI; - } - - /* There should be no status register bits left undefined */ - DPA_ASSERT(!is); - return ret; -} - -const struct bman_portal_config *bman_get_portal_config(void) -{ - struct bman_portal *p = get_affine_portal(); - const struct bman_portal_config *ret = &p->config->public_cfg; - - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(bman_get_portal_config); - -u32 bman_irqsource_get(void) -{ - struct bman_portal *p = get_raw_affine_portal(); - u32 ret = p->irq_sources & BM_PIRQ_VISIBLE; - - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(bman_irqsource_get); - -int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits) -{ - __maybe_unused unsigned long irqflags; - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (p->sharing_redirect) - return -EINVAL; -#endif /* __rtems__ */ -#endif - PORTAL_IRQ_LOCK(p, irqflags); - set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); - bm_isr_enable_write(&p->p, p->irq_sources); - PORTAL_IRQ_UNLOCK(p, irqflags); - return 0; -} -EXPORT_SYMBOL(bman_p_irqsource_add); - -int bman_irqsource_add(__maybe_unused u32 bits) -{ - struct bman_portal *p = get_raw_affine_portal(); - int ret = bman_p_irqsource_add(p, bits); - - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(bman_irqsource_add); - -int bman_irqsource_remove(u32 bits) -{ - struct bman_portal *p = get_raw_affine_portal(); - __maybe_unused unsigned long irqflags; - u32 ier; - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (p->sharing_redirect) { - put_affine_portal(); - return -EINVAL; - } -#endif /* __rtems__ */ -#endif - /* Our interrupt handler only processes+clears status register bits that - * are in p->irq_sources. As we're trimming that mask, if one of them - * were to assert in the status register just before we remove it from - * the enable register, there would be an interrupt-storm when we - * release the IRQ lock. So we wait for the enable register update to - * take effect in h/w (by reading it back) and then clear all other bits - * in the status register. Ie. we clear them from ISR once it's certain - * IER won't allow them to reassert. */ - PORTAL_IRQ_LOCK(p, irqflags); - bits &= BM_PIRQ_VISIBLE; - clear_bits(bits, &p->irq_sources); - bm_isr_enable_write(&p->p, p->irq_sources); - ier = bm_isr_enable_read(&p->p); - /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a - * data-dependency, ie. to protect against re-ordering. */ - bm_isr_status_clear(&p->p, ~ier); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return 0; -} -EXPORT_SYMBOL(bman_irqsource_remove); - -#ifndef __rtems__ -const cpumask_t *bman_affine_cpus(void) -{ - return &affine_mask; -} -EXPORT_SYMBOL(bman_affine_cpus); -#endif /* __rtems__ */ - -u32 bman_poll_slow(void) -{ - struct bman_portal *p = get_poll_portal(); - u32 ret; - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (unlikely(p->sharing_redirect)) - ret = (u32)-1; - else -#endif /* __rtems__ */ -#endif - { - u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; - - ret = __poll_portal_slow(p, is); - bm_isr_status_clear(&p->p, ret); - } - put_poll_portal(); - return ret; -} -EXPORT_SYMBOL(bman_poll_slow); - -/* Legacy wrapper */ -void bman_poll(void) -{ - struct bman_portal *p = get_poll_portal(); - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (unlikely(p->sharing_redirect)) - goto done; -#endif /* __rtems__ */ -#endif - if (!(p->slowpoll--)) { - u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; - u32 active = __poll_portal_slow(p, is); - - if (active) - p->slowpoll = SLOW_POLL_BUSY; - else - p->slowpoll = SLOW_POLL_IDLE; - } -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ -done: -#endif /* __rtems__ */ -#endif - put_poll_portal(); -} -EXPORT_SYMBOL(bman_poll); - -static const u32 zero_thresholds[4] = {0, 0, 0, 0}; - -struct bman_pool *bman_new_pool(const struct bman_pool_params *params) -{ - struct bman_pool *pool = NULL; - u32 bpid; - - if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) { - if (bman_alloc_bpid(&bpid)) - return NULL; - } else { - if (params->bpid >= bman_pool_max) - return NULL; - bpid = params->bpid; - } -#ifdef CONFIG_FSL_BMAN - if (params->flags & BMAN_POOL_FLAG_THRESH) { - if (bm_pool_set(bpid, params->thresholds)) - goto err; - } -#else - if (params->flags & BMAN_POOL_FLAG_THRESH) - goto err; -#endif - pool = kmalloc(sizeof(*pool), GFP_KERNEL); - if (!pool) - goto err; - pool->sp = NULL; - pool->sp_fill = 0; - pool->params = *params; -#ifdef CONFIG_FSL_DPA_CHECKING - atomic_set(&pool->in_use, 1); -#endif - if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) - pool->params.bpid = bpid; - if (params->flags & BMAN_POOL_FLAG_STOCKPILE) { - pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ, - GFP_KERNEL); - if (!pool->sp) - goto err; - } - if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) { - struct bman_portal *p = get_affine_portal(); - - if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) { - pr_err("Depletion events disabled for bpid %d\n", bpid); - goto err; - } - depletion_link(p, pool); - put_affine_portal(); - } - return pool; -err: -#ifdef CONFIG_FSL_BMAN - if (params->flags & BMAN_POOL_FLAG_THRESH) - bm_pool_set(bpid, zero_thresholds); -#endif - if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) - bman_release_bpid(bpid); - if (pool) { - kfree(pool->sp); - kfree(pool); - } - return NULL; -} -EXPORT_SYMBOL(bman_new_pool); - -void bman_free_pool(struct bman_pool *pool) -{ -#ifdef CONFIG_FSL_BMAN - if (pool->params.flags & BMAN_POOL_FLAG_THRESH) - bm_pool_set(pool->params.bpid, zero_thresholds); -#endif - if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) - depletion_unlink(pool); - if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) { - if (pool->sp_fill) - pr_err("Stockpile not flushed, has %u in bpid %u.\n", - pool->sp_fill, pool->params.bpid); - kfree(pool->sp); - pool->sp = NULL; - pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE; - } - if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID) - bman_release_bpid(pool->params.bpid); - kfree(pool); -} -EXPORT_SYMBOL(bman_free_pool); - -const struct bman_pool_params *bman_get_params(const struct bman_pool *pool) -{ - return &pool->params; -} -EXPORT_SYMBOL(bman_get_params); - -static noinline void update_rcr_ci(struct bman_portal *p, u8 avail) -{ - if (avail) - bm_rcr_cce_prefetch(&p->p); - else - bm_rcr_cce_update(&p->p); -} - -int bman_rcr_is_empty(void) -{ - __maybe_unused unsigned long irqflags; - struct bman_portal *p = get_affine_portal(); - u8 avail; - - PORTAL_IRQ_LOCK(p, irqflags); - update_rcr_ci(p, 0); - avail = bm_rcr_get_fill(&p->p); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return avail == 0; -} -EXPORT_SYMBOL(bman_rcr_is_empty); - -static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p, -#ifdef FSL_DPA_CAN_WAIT - __maybe_unused struct bman_pool *pool, -#endif - __maybe_unused unsigned long *irqflags, - __maybe_unused u32 flags) -{ - struct bm_rcr_entry *r; - u8 avail; - - *p = get_affine_portal(); - PORTAL_IRQ_LOCK(*p, (*irqflags)); -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && - (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { - if ((*p)->rcri_owned) { - PORTAL_IRQ_UNLOCK(*p, (*irqflags)); - put_affine_portal(); - return NULL; - } - (*p)->rcri_owned = pool; - } -#endif - avail = bm_rcr_get_avail(&(*p)->p); - if (avail < 2) - update_rcr_ci(*p, avail); - r = bm_rcr_start(&(*p)->p); - if (unlikely(!r)) { -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && - (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) - (*p)->rcri_owned = NULL; -#endif - PORTAL_IRQ_UNLOCK(*p, (*irqflags)); - put_affine_portal(); - } - return r; -} - -#ifdef FSL_DPA_CAN_WAIT -static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p, - struct bman_pool *pool, - __maybe_unused unsigned long *irqflags, - u32 flags) -{ - struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags); - - if (!rcr) - bm_rcr_set_ithresh(&(*p)->p, 1); - return rcr; -} - -static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p, - struct bman_pool *pool, - __maybe_unused unsigned long *irqflags, - u32 flags) -{ - struct bm_rcr_entry *rcr; -#ifndef FSL_DPA_CAN_WAIT_SYNC - pool = NULL; -#endif -#ifndef __rtems__ - if (flags & BMAN_RELEASE_FLAG_WAIT_INT) - wait_event_interruptible(affine_queue, - (rcr = __wait_rel_start(p, pool, irqflags, flags))); - else -#endif /* __rtems__ */ - wait_event(affine_queue, - (rcr = __wait_rel_start(p, pool, irqflags, flags))); - return rcr; -} -#endif - -/* to facilitate better copying of bufs into the ring without either (a) copying - * noise into the first byte (prematurely triggering the command), nor (b) being - * very inefficient by copying small fields using read-modify-write */ -struct overlay_bm_buffer { - u32 first; - u32 second; -}; - -static inline int __bman_release(struct bman_pool *pool, - const struct bm_buffer *bufs, u8 num, u32 flags) -{ - struct bman_portal *p; - struct bm_rcr_entry *r; - struct overlay_bm_buffer *o_dest; - struct overlay_bm_buffer *o_src = (struct overlay_bm_buffer *)&bufs[0]; - __maybe_unused unsigned long irqflags; - u32 i = num - 1; - -#ifdef FSL_DPA_CAN_WAIT - if (flags & BMAN_RELEASE_FLAG_WAIT) - r = wait_rel_start(&p, pool, &irqflags, flags); - else - r = try_rel_start(&p, pool, &irqflags, flags); -#else - r = try_rel_start(&p, &irqflags, flags); -#endif - if (!r) - return -EBUSY; - /* We can copy all but the first entry, as this can trigger badness - * with the valid-bit. Use the overlay to mask the verb byte. */ - o_dest = (struct overlay_bm_buffer *)&r->bufs[0]; - o_dest->first = (o_src->first & 0x0000ffff) | - (((u32)pool->params.bpid << 16) & 0x00ff0000); - o_dest->second = o_src->second; - if (i) - copy_words(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); - bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | - (num & BM_RCR_VERB_BUFCOUNT_MASK)); -#ifdef FSL_DPA_CAN_WAIT_SYNC - /* if we wish to sync we need to set the threshold after h/w sees the - * new ring entry. As we're mixing cache-enabled and cache-inhibited - * accesses, this requires a heavy-weight sync. */ - if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && - (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { - hwsync(); - bm_rcr_set_ithresh(&p->p, 1); - } -#endif - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && - (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { -#ifndef __rtems__ - if (flags & BMAN_RELEASE_FLAG_WAIT_INT) - wait_event_interruptible(affine_queue, - (p->rcri_owned != pool)); - else -#endif /* __rtems__ */ - wait_event(affine_queue, (p->rcri_owned != pool)); - } -#endif - return 0; -} - -int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num, - u32 flags) -{ - int ret = 0; - -#ifdef CONFIG_FSL_DPA_CHECKING - if (!num || (num > 8)) - return -EINVAL; - if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE) - return -EINVAL; -#endif - /* Without stockpile, this API is a pass-through to the h/w operation */ - if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) - return __bman_release(pool, bufs, num, flags); -#ifdef CONFIG_FSL_DPA_CHECKING - if (!atomic_dec_and_test(&pool->in_use)) { - pr_crit("Parallel attempts to enter bman_released() detected."); - panic("only one instance of bman_released/acquired allowed"); - } -#endif - /* This needs some explanation. Adding the given buffers may take the - * stockpile over the threshold, but in fact the stockpile may already - * *be* over the threshold if a previous release-to-hw attempt had - * failed. So we have 3 cases to cover; - * 1. we add to the stockpile and don't hit the threshold, - * 2. we add to the stockpile, hit the threshold and release-to-hw, - * 3. we have to release-to-hw before adding to the stockpile - * (not enough room in the stockpile for case 2). - * Our constraints on thresholds guarantee that in case 3, there must be - * at least 8 bufs already in the stockpile, so all release-to-hw ops - * are for 8 bufs. Despite all this, the API must indicate whether the - * given buffers were taken off the caller's hands, irrespective of - * whether a release-to-hw was attempted. */ - while (num) { - /* Add buffers to stockpile if they fit */ - if ((pool->sp_fill + num) < BMAN_STOCKPILE_SZ) { - copy_words(pool->sp + pool->sp_fill, bufs, - sizeof(struct bm_buffer) * num); - pool->sp_fill += num; - num = 0; /* --> will return success no matter what */ - } - /* Do hw op if hitting the high-water threshold */ - if ((pool->sp_fill + num) >= BMAN_STOCKPILE_HIGH) { - ret = __bman_release(pool, - pool->sp + (pool->sp_fill - 8), 8, flags); - if (ret) { - ret = (num ? ret : 0); - goto release_done; - } - pool->sp_fill -= 8; - } - } -release_done: -#ifdef CONFIG_FSL_DPA_CHECKING - atomic_inc(&pool->in_use); -#endif - return ret; -} -EXPORT_SYMBOL(bman_release); - -static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, - u8 num) -{ - struct bman_portal *p = get_affine_portal(); - struct bm_mc_command *mcc; - struct bm_mc_result *mcr; - __maybe_unused unsigned long irqflags; - int ret; - - PORTAL_IRQ_LOCK(p, irqflags); - mcc = bm_mc_start(&p->p); - mcc->acquire.bpid = pool->params.bpid; - bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | - (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); - while (!(mcr = bm_mc_result(&p->p))) - cpu_relax(); - ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; - if (bufs) - copy_words(&bufs[0], &mcr->acquire.bufs[0], - num * sizeof(bufs[0])); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - if (ret != num) - ret = -ENOMEM; - return ret; -} - -int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num, - u32 flags) -{ - int ret = 0; - -#ifdef CONFIG_FSL_DPA_CHECKING - if (!num || (num > 8)) - return -EINVAL; - if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE) - return -EINVAL; -#endif - /* Without stockpile, this API is a pass-through to the h/w operation */ - if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) - return __bman_acquire(pool, bufs, num); -#ifdef CONFIG_FSL_DPA_CHECKING - if (!atomic_dec_and_test(&pool->in_use)) { - pr_crit("Parallel attempts to enter bman_acquire() detected."); - panic("only one instance of bman_released/acquired allowed"); - } -#endif - /* Only need a h/w op if we'll hit the low-water thresh */ - if (!(flags & BMAN_ACQUIRE_FLAG_STOCKPILE) && - (pool->sp_fill <= (BMAN_STOCKPILE_LOW + num))) { - /* refill stockpile with max amount, but if max amount - * isn't available, try amount the user wants */ - int bufcount = 8; - - ret = __bman_acquire(pool, pool->sp + pool->sp_fill, bufcount); - if (ret < 0 && bufcount != num) { - bufcount = num; - /* Maybe buffer pool has less than 8 */ - ret = __bman_acquire(pool, pool->sp + pool->sp_fill, - bufcount); - } - if (ret < 0) - goto hw_starved; - DPA_ASSERT(ret == bufcount); - pool->sp_fill += bufcount; - } else { -hw_starved: - if (pool->sp_fill < num) { - ret = -ENOMEM; - goto acquire_done; - } - } - copy_words(bufs, pool->sp + (pool->sp_fill - num), - sizeof(struct bm_buffer) * num); - pool->sp_fill -= num; - ret = num; -acquire_done: -#ifdef CONFIG_FSL_DPA_CHECKING - atomic_inc(&pool->in_use); -#endif - return ret; -} -EXPORT_SYMBOL(bman_acquire); - -int bman_flush_stockpile(struct bman_pool *pool, u32 flags) -{ - u8 num; - int ret; - - while (pool->sp_fill) { - num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill); - ret = __bman_release(pool, pool->sp + (pool->sp_fill - num), - num, flags); - if (ret) - return ret; - pool->sp_fill -= num; - } - return 0; -} -EXPORT_SYMBOL(bman_flush_stockpile); - -int bman_query_pools(struct bm_pool_state *state) -{ - struct bman_portal *p = get_affine_portal(); - struct bm_mc_result *mcr; - __maybe_unused unsigned long irqflags; - - PORTAL_IRQ_LOCK(p, irqflags); - bm_mc_start(&p->p); - bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); - while (!(mcr = bm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY); - *state = mcr->query; - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return 0; -} -EXPORT_SYMBOL(bman_query_pools); - -#ifdef CONFIG_FSL_BMAN -u32 bman_query_free_buffers(struct bman_pool *pool) -{ - return bm_pool_free_buffers(pool->params.bpid); -} -EXPORT_SYMBOL(bman_query_free_buffers); - -int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds) -{ - u32 bpid; - - bpid = bman_get_params(pool)->bpid; - - return bm_pool_set(bpid, thresholds); -} -EXPORT_SYMBOL(bman_update_pool_thresholds); -#endif - -int bman_shutdown_pool(u32 bpid) -{ - struct bman_portal *p = get_affine_portal(); - __maybe_unused unsigned long irqflags; - int ret; - - PORTAL_IRQ_LOCK(p, irqflags); - ret = bm_shutdown_pool(&p->p, bpid); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(bman_shutdown_pool); - -const struct bm_portal_config * -bman_get_bm_portal_config(const struct bman_portal *portal) -{ -#ifndef __rtems__ - return portal->sharing_redirect ? NULL : portal->config; -#else /* __rtems__ */ - return portal->config; -#endif /* __rtems__ */ -} diff --git a/linux/drivers/soc/fsl/qbman/bman_ccsr.c b/linux/drivers/soc/fsl/qbman/bman_ccsr.c new file mode 100644 index 00000000..1df1d9c8 --- /dev/null +++ b/linux/drivers/soc/fsl/qbman/bman_ccsr.c @@ -0,0 +1,392 @@ +#include <machine/rtems-bsd-kernel-space.h> + +#include <rtems/bsd/local/opt_dpaa.h> + +/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_priv.h" + +u16 bman_ip_rev; +EXPORT_SYMBOL(bman_ip_rev); + +/* Register offsets */ +#define REG_FBPR_FPC 0x0800 +#define REG_ECSR 0x0a00 +#define REG_ECIR 0x0a04 +#define REG_EADR 0x0a08 +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) +#define REG_IP_REV_1 0x0bf8 +#define REG_IP_REV_2 0x0bfc +#define REG_FBPR_BARE 0x0c00 +#define REG_FBPR_BAR 0x0c04 +#define REG_FBPR_AR 0x0c10 +#define REG_SRCIDR 0x0d04 +#define REG_LIODNR 0x0d08 +#define REG_ERR_ISR 0x0e00 +#define REG_ERR_IER 0x0e04 +#define REG_ERR_ISDR 0x0e08 + +/* Used by all error interrupt registers except 'inhibit' */ +#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */ +#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */ +#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */ +#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */ +#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */ + +struct bman_hwerr_txt { + u32 mask; + const char *txt; +}; + +static const struct bman_hwerr_txt bman_hwerr_txts[] = { + { BM_EIRQ_IVCI, "Invalid Command Verb" }, + { BM_EIRQ_FLWI, "FBPR Low Watermark" }, + { BM_EIRQ_MBEI, "Multi-bit ECC Error" }, + { BM_EIRQ_SBEI, "Single-bit ECC Error" }, + { BM_EIRQ_BSCN, "Pool State Change Notification" }, +}; + +/* Only trigger low water mark interrupt once only */ +#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI + +/* Pointer to the start of the BMan's CCSR space */ +static u32 __iomem *bm_ccsr_start; + +static inline u32 bm_ccsr_in(u32 offset) +{ + return ioread32be(bm_ccsr_start + offset/4); +} +static inline void bm_ccsr_out(u32 offset, u32 val) +{ + iowrite32be(val, bm_ccsr_start + offset/4); +} + +static void bm_get_version(u16 *id, u8 *major, u8 *minor) +{ + u32 v = bm_ccsr_in(REG_IP_REV_1); + *id = (v >> 16); + *major = (v >> 8) & 0xff; + *minor = v & 0xff; +} + +/* signal transactions for FBPRs with higher priority */ +#define FBPR_AR_RPRIO_HI BIT(30) + +static void bm_set_memory(u64 ba, u32 size) +{ + u32 exp = ilog2(size); + /* choke if size isn't within range */ + DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 && + is_power_of_2(size)); + /* choke if '[e]ba' has lower-alignment than 'size' */ + DPAA_ASSERT(!(ba & (size - 1))); + bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba)); + bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba)); + bm_ccsr_out(REG_FBPR_AR, exp - 1); +} + +/* + * Location and size of BMan private memory + * + * Ideally we would use the DMA API to turn rmem->base into a DMA address + * (especially if iommu translations ever get involved). Unfortunately, the + * DMA API currently does not allow mapping anything that is not backed with + * a struct page. + */ +#ifndef __rtems__ +static dma_addr_t fbpr_a; +static size_t fbpr_sz; + +static int bman_fbpr(struct reserved_mem *rmem) +{ + fbpr_a = rmem->base; + fbpr_sz = rmem->size; + + WARN_ON(!(fbpr_a && fbpr_sz)); + + return 0; +} +RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr); +#else /* __rtems__ */ +static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fbpr, 16777216); +#define fbpr_a ((uintptr_t)&fbpr[0]) +#define fbpr_sz sizeof(fbpr) +#endif /* __rtems__ */ + +static irqreturn_t bman_isr(int irq, void *ptr) +{ + u32 isr_val, ier_val, ecsr_val, isr_mask, i; + struct device *dev = ptr; + + ier_val = bm_ccsr_in(REG_ERR_IER); + isr_val = bm_ccsr_in(REG_ERR_ISR); + ecsr_val = bm_ccsr_in(REG_ECSR); + isr_mask = isr_val & ier_val; + + if (!isr_mask) + return IRQ_NONE; + + for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) { + if (bman_hwerr_txts[i].mask & isr_mask) { +#ifndef __rtems__ + dev_err_ratelimited(dev, "ErrInt: %s\n", + bman_hwerr_txts[i].txt); +#endif /* __rtems__ */ + if (bman_hwerr_txts[i].mask & ecsr_val) { + /* Re-arm error capture registers */ + bm_ccsr_out(REG_ECSR, ecsr_val); + } + if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) { + dev_dbg(dev, "Disabling error 0x%x\n", + bman_hwerr_txts[i].mask); + ier_val &= ~bman_hwerr_txts[i].mask; + bm_ccsr_out(REG_ERR_IER, ier_val); + } + } + } + bm_ccsr_out(REG_ERR_ISR, isr_val); + + return IRQ_HANDLED; +} + +static int fsl_bman_probe(struct platform_device *pdev) +{ + int ret, err_irq; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; +#ifdef __rtems__ + struct resource res_storage; +#endif /* __rtems__ */ + struct resource *res; + u16 id, bm_pool_cnt; + u8 major, minor; + +#ifndef __rtems__ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +#else /* __rtems__ */ + res = platform_get_resource(&res_storage, pdev, IORESOURCE_MEM, 0); +#endif /* __rtems__ */ + if (!res) { + dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", + node->full_name); + return -ENXIO; + } + bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); + if (!bm_ccsr_start) + return -ENXIO; + + bm_get_version(&id, &major, &minor); + if (major == 1 && minor == 0) { + bman_ip_rev = BMAN_REV10; + bm_pool_cnt = BM_POOL_MAX; + } else if (major == 2 && minor == 0) { + bman_ip_rev = BMAN_REV20; + bm_pool_cnt = 8; + } else if (major == 2 && minor == 1) { + bman_ip_rev = BMAN_REV21; + bm_pool_cnt = BM_POOL_MAX; + } else { + dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n", + id, major, minor); + return -ENODEV; + } + + bm_set_memory(fbpr_a, fbpr_sz); + + err_irq = platform_get_irq(pdev, 0); + if (err_irq <= 0) { + dev_info(dev, "Can't get %s IRQ\n", node->full_name); + return -ENODEV; + } + ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err", + dev); + if (ret) { + dev_err(dev, "devm_request_irq() failed %d for '%s'\n", + ret, node->full_name); + return ret; + } + /* Disable Buffer Pool State Change */ + bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN); + /* + * Write-to-clear any stale bits, (eg. starvation being asserted prior + * to resource allocation during driver init). + */ + bm_ccsr_out(REG_ERR_ISR, 0xffffffff); + /* Enable Error Interrupts */ + bm_ccsr_out(REG_ERR_IER, 0xffffffff); + + bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc"); + if (IS_ERR(bm_bpalloc)) { + ret = PTR_ERR(bm_bpalloc); + dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret); + return ret; + } + + /* seed BMan resource pool */ + ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1); + if (ret) { + dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n", + 0, bm_pool_cnt - 1, ret); + return ret; + } + + return 0; +}; + +#ifndef __rtems__ +static const struct of_device_id fsl_bman_ids[] = { + { + .compatible = "fsl,bman", + }, + {} +}; + +static struct platform_driver fsl_bman_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = fsl_bman_ids, + .suppress_bind_attrs = true, + }, + .probe = fsl_bman_probe, +}; + +builtin_platform_driver(fsl_bman_driver); +#else /* __rtems__ */ +#include <sys/types.h> +#include <sys/kernel.h> +#include <rtems.h> +#include <bsp/fdt.h> +#include <bsp/qoriq.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +static struct bm_portal_config bman_configs[NR_CPUS]; + +u16 bman_pool_max; + +SYSINIT_REFERENCE(irqs); + +static void +bman_sysinit(void) +{ + const char *fdt = bsp_fdt_get(); + struct device_node dn; + struct platform_device ofdev = { + .dev = { + .of_node = &dn, + .base = (uintptr_t)&qoriq + } + }; + const char *name; + int cpu_count = (int)rtems_get_processor_count(); + int cpu; + int ret; + int node; + int parent; + + qoriq_reset_qman_and_bman(); + qoriq_clear_ce_portal(&qoriq_bman_portal[0][0], + sizeof(qoriq_bman_portal[0])); + qoriq_clear_ci_portal(&qoriq_bman_portal[1][0], + sizeof(qoriq_bman_portal[1])); + + memset(&dn, 0, sizeof(dn)); + + name = "fsl,bman"; + node = fdt_node_offset_by_compatible(fdt, 0, name); + if (node < 0) + panic("bman: no bman in FDT"); + + dn.full_name = name; + dn.offset = node; + ret = fsl_bman_probe(&ofdev); + if (ret != 0) + panic("bman: probe failed"); + + name = "fsl,bman-portal"; + node = fdt_node_offset_by_compatible(fdt, 0, name); + if (node < 0) + panic("bman: no portals in FDT"); + parent = fdt_parent_offset(fdt, node); + if (parent < 0) + panic("bman: no parent of portals in FDT"); + node = fdt_first_subnode(fdt, parent); + + dn.full_name = name; + dn.offset = node; + + for (cpu = 0; cpu < cpu_count; ++cpu) { + struct bm_portal_config *pcfg = &bman_configs[cpu]; + struct bman_portal *portal; + struct resource res; + + if (node < 0) + panic("bman: missing portal in FDT"); + + ret = of_address_to_resource(&dn, 0, &res); + if (ret != 0) + panic("bman: no portal CE address"); + pcfg->addr_virt[0] = (__iomem void *) + ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start); + BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] >= + (uintptr_t)&qoriq_bman_portal[0][0]); + BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] < + (uintptr_t)&qoriq_bman_portal[1][0]); + + ret = of_address_to_resource(&dn, 1, &res); + if (ret != 0) + panic("bman: no portal CI address"); + pcfg->addr_virt[1] = (__iomem void *) + ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start); + BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] >= + (uintptr_t)&qoriq_bman_portal[1][0]); + BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] < + (uintptr_t)&qoriq_bman_portal[2][0]); + + pcfg->irq = of_irq_to_resource(&dn, 0, NULL); + if (pcfg->irq == NO_IRQ) + panic("bman: no portal interrupt"); + + pcfg->cpu = cpu; + + portal = bman_create_affine_portal(pcfg); + if (portal == NULL) + panic("bman: cannot create portal"); + + bman_p_irqsource_add(portal, BM_PIRQ_RCRI); + + node = fdt_next_subnode(fdt, node); + dn.offset = node; + } +} +SYSINIT(bman, SI_SUB_CPU, SI_ORDER_FIRST, bman_sysinit, NULL); +#endif /* __rtems__ */ diff --git a/linux/drivers/soc/fsl/qbman/bman_portal.c b/linux/drivers/soc/fsl/qbman/bman_portal.c deleted file mode 100644 index f9fd022c..00000000 --- a/linux/drivers/soc/fsl/qbman/bman_portal.c +++ /dev/null @@ -1,399 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "bman_priv.h" - -/* - * Global variables of the max portal/pool number this BMan version supported - */ -u16 bman_ip_rev; -EXPORT_SYMBOL(bman_ip_rev); - -u16 bman_pool_max; -EXPORT_SYMBOL(bman_pool_max); - -/* After initialising cpus that own shared portal configs, we cache the - * resulting portals (ie. not just the configs) in this array. Then we - * initialise slave cpus that don't have their own portals, redirecting them to - * portals from this cache in a round-robin assignment. */ -static struct bman_portal *shared_portals[NR_CPUS] __initdata; -static int num_shared_portals __initdata; -static int shared_portals_idx __initdata; - -static LIST_HEAD(unused_pcfgs); -static void *affine_bportals[NR_CPUS]; - -#ifndef __rtems__ -static const int flags[] = {0, _PAGE_GUARDED | _PAGE_NO_CACHE}; -#else /* __rtems__ */ -static const int flags[] = {0, 0}; -#endif /* __rtems__ */ - -static struct bm_portal_config * __init get_pcfg(struct list_head *list) -{ - struct bm_portal_config *pcfg; - - if (list_empty(list)) - return NULL; - pcfg = list_entry(list->prev, struct bm_portal_config, list); - list_del(&pcfg->list); - - return pcfg; -} - -static struct bman_portal * __init init_pcfg(struct bm_portal_config *pcfg) -{ - struct bman_portal *p = bman_create_affine_portal(pcfg); - - if (p) { -#ifdef CONFIG_FSL_DPA_PIRQ_SLOW - bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN); -#endif - pr_info("Portal %sinitialised, cpu %d\n", - pcfg->public_cfg.is_shared ? "(shared) " : "", - pcfg->public_cfg.cpu); - affine_bportals[pcfg->public_cfg.cpu] = p; - } else - pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu); - - return p; -} - -static void __init init_slave(int cpu) -{ - struct bman_portal *p; - - p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu); - if (!p) - pr_err("Slave portal failure on cpu %d\n", cpu); - else - pr_info("Portal %s initialised, cpu %d\n", "(slave) ", cpu); - if (shared_portals_idx >= num_shared_portals) - shared_portals_idx = 0; - affine_bportals[cpu] = p; -} - -/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the - * parsing is in dpaa_sys.h. The syntax is a comma-separated list of indexes - * and/or ranges of indexes, with each being optionally prefixed by "s" to - * explicitly mark it or them for sharing. - * Eg; - * bportals=s0,1-3,s4 - * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared" - * portals, and any remaining cpus share the portals that are assigned to cpus 0 - * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share - * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu - * 0's portal.) */ -static struct cpumask want_unshared __initdata; /* cpus requested without "s" */ -static struct cpumask want_shared __initdata; /* cpus requested with "s" */ - -static int __init parse_bportals(char *str) -{ - return parse_portals_bootarg(str, &want_shared, &want_unshared, - "bportals"); -} -__setup("bportals=", parse_bportals); - -static void __cold bman_offline_cpu(unsigned int cpu) -{ - struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu]; - const struct bm_portal_config *pcfg; - - if (p) { - pcfg = bman_get_bm_portal_config(p); - if (pcfg) - irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0)); - } -} - -#ifdef CONFIG_HOTPLUG_CPU -static void __cold bman_online_cpu(unsigned int cpu) -{ - struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu]; - const struct bm_portal_config *pcfg; - - if (p) { - pcfg = bman_get_bm_portal_config(p); - if (pcfg) - irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu)); - } -} - -static int __cold bman_hotplug_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - bman_online_cpu(cpu); - break; - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - bman_offline_cpu(cpu); - } - - return NOTIFY_OK; -} - -static struct notifier_block bman_hotplug_cpu_notifier = { - .notifier_call = bman_hotplug_cpu_callback, -}; -#endif /* CONFIG_HOTPLUG_CPU */ - -static int __cold bman_portal_probe(struct platform_device *of_dev) -{ - struct device *dev = &of_dev->dev; - struct device_node *node = dev->of_node; - struct bm_portal_config *pcfg; - int i, irq, ret; - - if (!of_device_is_available(node)) - return -ENODEV; - - if (of_device_is_compatible(node, "fsl,bman-portal-1.0") || - of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) { - bman_ip_rev = BMAN_REV10; - bman_pool_max = 64; - } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") || - of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) { - bman_ip_rev = BMAN_REV20; - bman_pool_max = 8; - } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0") || - of_device_is_compatible(node, "fsl,bman-portal-2.1.1") || - of_device_is_compatible(node, "fsl,bman-portal-2.1.2") || - of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) { - bman_ip_rev = BMAN_REV21; - bman_pool_max = 64; - } - - pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); - if (!pcfg) { - dev_err(dev, "Can't allocate portal config\n"); - return -ENOMEM; - } - - for (i = DPA_PORTAL_CE; i <= DPA_PORTAL_CI; i++) { - ret = of_address_to_resource(node, i, pcfg->addr_phys + i); - if (ret < 0) { - dev_err(dev, "Can't get %s property 'reg::%d'\n", - node->full_name, i); - return ret; - } - ret = devm_request_resource(dev, &iomem_resource, - pcfg->addr_phys + i); - if (ret < 0) - return ret; - pcfg->addr_virt[i] = devm_ioremap_prot(dev, - pcfg->addr_phys[i].start, - resource_size(pcfg->addr_phys + i), - flags[i]); - if (!pcfg->addr_virt[i]) - return -ENXIO; - } - - pcfg->public_cfg.cpu = -1; - - irq = irq_of_parse_and_map(node, 0); - if (irq == NO_IRQ) { - dev_err(dev, "Can't get %s property 'interrupts'\n", - node->full_name); - return -ENXIO; - } - pcfg->public_cfg.irq = irq; - - bman_depletion_fill(&pcfg->public_cfg.mask); - - list_add_tail(&pcfg->list, &unused_pcfgs); - - return 0; -}; - -static int __cold bman_portal_remove(struct platform_device *of_dev) -{ - return 0; -}; - -static const struct of_device_id bman_portal_ids[] = { - { - .compatible = "fsl,bman-portal", - }, - {} -}; -MODULE_DEVICE_TABLE(of, bman_portal_ids); - -static struct platform_driver bman_portal_driver = { - .driver = { - .name = KBUILD_MODNAME, - .of_match_table = bman_portal_ids, - }, - .probe = bman_portal_probe, - .remove = bman_portal_remove, -}; - -static int __init bman_portal_driver_register(struct platform_driver *drv) -{ - int _errno; - struct cpumask slave_cpus; - struct cpumask unshared_cpus = *cpu_none_mask; - struct cpumask shared_cpus = *cpu_none_mask; - LIST_HEAD(unshared_pcfgs); - LIST_HEAD(shared_pcfgs); - struct bm_portal_config *pcfg; - struct bman_portal *p; - int cpu; - struct cpumask offline_cpus; - - _errno = platform_driver_register(drv); - if (_errno < 0) - return _errno; - -/* Initialise the BMan driver. The meat of this function deals with portals. The - * following describes the flow of portal-handling, the code "steps" refer to - * this description; - * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with - * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not - * bound). - * 2. The "want_shared" and "want_unshared" lists (as filled by the - * "bportals=[...]" bootarg) are processed, allocating portals and assigning - * them to cpus, placing them in the relevant list and setting ::cpu as - * appropriate. If no "bportals" bootarg was present, the defaut is to try to - * assign portals to all online cpus at the time of driver initialisation. - * Any failure to allocate portals (when parsing the "want" lists or when - * using default behaviour) will be silently tolerated (the "fixup" logic in - * step 3 will determine what happens in this case). - * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for - * sharing and sharing is required (because not all cpus have been assigned - * portals), then one portal will marked for sharing. Conversely if no - * sharing is required, any portals marked for sharing will not be shared. It - * may be that sharing occurs when it wasn't expected, if portal allocation - * failed to honour all the requested assignments (including the default - * assignments if no bootarg is present). - * 4. Unshared portals are initialised on their respective cpus. - * 5. Shared portals are initialised on their respective cpus. - * 6. Each remaining cpu is initialised to slave to one of the shared portals, - * which are selected in a round-robin fashion. - */ - /* Step 2. */ - for_each_possible_cpu(cpu) { - if (cpumask_test_cpu(cpu, &want_shared)) { - pcfg = get_pcfg(&unused_pcfgs); - if (!pcfg) - break; - pcfg->public_cfg.cpu = cpu; - list_add_tail(&pcfg->list, &shared_pcfgs); - cpumask_set_cpu(cpu, &shared_cpus); - } - if (cpumask_test_cpu(cpu, &want_unshared)) { - if (cpumask_test_cpu(cpu, &shared_cpus)) - continue; - pcfg = get_pcfg(&unused_pcfgs); - if (!pcfg) - break; - pcfg->public_cfg.cpu = cpu; - list_add_tail(&pcfg->list, &unshared_pcfgs); - cpumask_set_cpu(cpu, &unshared_cpus); - } - } - if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) { - /* Default, give an unshared portal to each online cpu */ - for_each_possible_cpu(cpu) { - pcfg = get_pcfg(&unused_pcfgs); - if (!pcfg) - break; - pcfg->public_cfg.cpu = cpu; - list_add_tail(&pcfg->list, &unshared_pcfgs); - cpumask_set_cpu(cpu, &unshared_cpus); - } - } - /* Step 3. */ - cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus); - cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus); - if (cpumask_empty(&slave_cpus)) { - /* No sharing required */ - if (!list_empty(&shared_pcfgs)) { - /* Migrate "shared" to "unshared" */ - cpumask_or(&unshared_cpus, &unshared_cpus, - &shared_cpus); - cpumask_clear(&shared_cpus); - list_splice_tail(&shared_pcfgs, &unshared_pcfgs); - INIT_LIST_HEAD(&shared_pcfgs); - } - } else { - /* Sharing required */ - if (list_empty(&shared_pcfgs)) { - /* Migrate one "unshared" to "shared" */ - pcfg = get_pcfg(&unshared_pcfgs); - if (!pcfg) { - pr_crit("No portals available!\n"); - return 0; - } - cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus); - cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus); - list_add_tail(&pcfg->list, &shared_pcfgs); - } - } - /* Step 4. */ - list_for_each_entry(pcfg, &unshared_pcfgs, list) { - pcfg->public_cfg.is_shared = 0; - p = init_pcfg(pcfg); - } - /* Step 5. */ - list_for_each_entry(pcfg, &shared_pcfgs, list) { - pcfg->public_cfg.is_shared = 1; - p = init_pcfg(pcfg); - if (p) - shared_portals[num_shared_portals++] = p; - } - /* Step 6. */ - if (!cpumask_empty(&slave_cpus)) - for_each_cpu(cpu, &slave_cpus) - init_slave(cpu); - pr_info("Portals initialised\n"); - cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask); - for_each_cpu(cpu, &offline_cpus) - bman_offline_cpu(cpu); - -#ifdef CONFIG_HOTPLUG_CPU - register_hotcpu_notifier(&bman_hotplug_cpu_notifier); -#endif - - bman_seed_bpid_range(0, bman_pool_max); - - return 0; -} - -module_driver(bman_portal_driver, - bman_portal_driver_register, platform_driver_unregister); diff --git a/linux/drivers/soc/fsl/qbman/bman_priv.h b/linux/drivers/soc/fsl/qbman/bman_priv.h index e87f17a3..e8ba0be5 100644 --- a/linux/drivers/soc/fsl/qbman/bman_priv.h +++ b/linux/drivers/soc/fsl/qbman/bman_priv.h @@ -1,4 +1,4 @@ -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,103 +34,49 @@ #include <soc/fsl/bman.h> -/* used by CCSR and portal interrupt code */ -enum bm_isr_reg { - bm_isr_status = 0, - bm_isr_enable = 1, - bm_isr_disable = 2, - bm_isr_inhibit = 3 -}; - -#ifdef CONFIG_FSL_BMAN -/* Set depletion thresholds associated with a buffer pool. Requires that the - * operating system have access to BMan CCSR (ie. compiled in support and - * run-time access courtesy of the device-tree). */ -int bm_pool_set(u32 bpid, const u32 *thresholds); -#define BM_POOL_THRESH_SW_ENTER 0 -#define BM_POOL_THRESH_SW_EXIT 1 -#define BM_POOL_THRESH_HW_ENTER 2 -#define BM_POOL_THRESH_HW_EXIT 3 - -/* Read the free buffer count for a given buffer */ -u32 bm_pool_free_buffers(u32 bpid); +/* Portal processing (interrupt) sources */ +#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */ -#endif /* CONFIG_FSL_BMAN */ - -#if defined(CONFIG_FSL_BMAN_PORTAL) || defined(CONFIG_FSL_BMAN_PORTAL_MODULE) /* Revision info (for errata and feature handling) */ #define BMAN_REV10 0x0100 #define BMAN_REV20 0x0200 #define BMAN_REV21 0x0201 extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */ +extern struct gen_pool *bm_bpalloc; + struct bm_portal_config { - /* Corenet portal addresses; - * [0]==cache-enabled, [1]==cache-inhibited. */ - __iomem void *addr_virt[2]; + /* + * Corenet portal addresses; + * [0]==cache-enabled, [1]==cache-inhibited. + */ + void __iomem *addr_virt[2]; #ifndef __rtems__ - struct resource addr_phys[2]; /* Allow these to be joined in lists */ struct list_head list; #endif /* __rtems__ */ + struct device *dev; /* User-visible portal configuration settings */ - struct bman_portal_config public_cfg; + /* portal is affined to this cpu */ + int cpu; + /* portal interrupt line */ + int irq; }; -/* Hooks from bman_driver.c in to bman_high.c */ -struct bman_portal *bman_create_portal( - struct bman_portal *portal, - const struct bm_portal_config *config); struct bman_portal *bman_create_affine_portal( const struct bm_portal_config *config); -struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect, - int cpu); -void bman_destroy_portal(struct bman_portal *bm); - -const struct bm_portal_config *bman_destroy_affine_portal(void); - -/* Stockpile build constants. The _LOW value: when bman_acquire() is called and - * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it - * might fail (if the buffer pool is depleted). So this value provides some - * "stagger" in that the bman_acquire() function will only fail if lots of bufs - * are requested at once or if h/w has been tested a couple of times without - * luck. The _HIGH value: when bman_release() is called and the stockpile - * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if - * the release ring is full). So this value provides some "stagger" so that - * ring-access is retried a couple of times prior to the API returning a - * failure. The following *must* be true; - * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8 - * (to avoid thrashing) - * BMAN_STOCKPILE_SZ >= 16 - * (as the release logic expects to either send 8 buffers to hw prior to - * adding the given buffers to the stockpile or add the buffers to the - * stockpile before sending 8 to hw, as the API must be an all-or-nothing - * success/fail.) +/* + * The below bman_p_***() variant might be called in a situation that the cpu + * which the portal affine to is not online yet. + * @bman_portal specifies which portal the API will use. */ -#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */ -#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */ -#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */ +int bman_p_irqsource_add(struct bman_portal *p, u32 bits); -/*************************************************/ -/* BMan s/w corenet portal, low-level i/face */ -/*************************************************/ - -/* Used by all portal interrupt registers except 'inhibit' +/* + * Used by all portal interrupt registers except 'inhibit' * This mask contains all the "irqsource" bits visible to API users */ -#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN) - -/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write - * the disable register" rather than "disable the ability to write". */ -#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status) -#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m) -#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable) -#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v) -#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable) -#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v) -#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1) -#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0) +#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI const struct bm_portal_config * bman_get_bm_portal_config(const struct bman_portal *portal); -#endif /* CONFIG_FSL_BMAN_PORTAL* */ diff --git a/linux/drivers/soc/fsl/qbman/bman_test.c b/linux/drivers/soc/fsl/qbman/bman_test.c deleted file mode 100644 index 154b7374..00000000 --- a/linux/drivers/soc/fsl/qbman/bman_test.c +++ /dev/null @@ -1,60 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "bman_test.h" - -MODULE_AUTHOR("Geoff Thorpe"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("BMan testing"); - -static int test_init(void) -{ -#ifdef CONFIG_FSL_BMAN_TEST_API - int loop = 1; - - while (loop--) - bman_test_api(); -#endif -#ifdef CONFIG_FSL_BMAN_TEST_THRESH - bman_test_thresh(); -#endif - return 0; -} - -static void test_exit(void) -{ -} - -module_init(test_init); -module_exit(test_exit); diff --git a/linux/drivers/soc/fsl/qbman/bman_test.h b/linux/drivers/soc/fsl/qbman/bman_test.h index 9c51c38b..037ed342 100644 --- a/linux/drivers/soc/fsl/qbman/bman_test.h +++ b/linux/drivers/soc/fsl/qbman/bman_test.h @@ -1,4 +1,4 @@ -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -30,5 +30,6 @@ #include "bman_priv.h" +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + void bman_test_api(void); -void bman_test_thresh(void); diff --git a/linux/drivers/soc/fsl/qbman/bman_test_api.c b/linux/drivers/soc/fsl/qbman/bman_test_api.c index 5585bdf6..f3e5ec03 100644 --- a/linux/drivers/soc/fsl/qbman/bman_test_api.c +++ b/linux/drivers/soc/fsl/qbman/bman_test_api.c @@ -2,7 +2,7 @@ #include <rtems/bsd/local/opt_dpaa.h> -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,33 +34,15 @@ #include "bman_test.h" -/*************/ -/* constants */ -/*************/ - -#define PORTAL_OPAQUE ((void *)0xf00dbeef) -#define POOL_OPAQUE ((void *)0xdeadabba) #define NUM_BUFS 93 #define LOOPS 3 #define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU -/***************/ -/* global vars */ -/***************/ - static struct bman_pool *pool; -static int depleted; static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned; static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned; static int bufs_received; -/* Predeclare the callback so we can instantiate pool parameters */ -static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int); - -/**********************/ -/* internal functions */ -/**********************/ - static void bufs_init(void) { int i; @@ -72,9 +54,10 @@ static void bufs_init(void) static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b) { - if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) { + if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) { - /* On SoCs with BMan revison 2.0, BMan only respects the 40 + /* + * On SoCs with BMan revison 2.0, BMan only respects the 40 * LS-bits of buffer addresses, masking off the upper 8-bits on * release commands. The API provides for 48-bit addresses * because some SoCs support all 48-bits. When generating @@ -84,11 +67,11 @@ static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b) * don't match), or we need to mask the upper 8-bits off when * comparing. We do the latter. */ - if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) - < (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) < + (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) return -1; - if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) - > (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) > + (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) return 1; } else { if (bm_buffer_get64(a) < bm_buffer_get64(b)) @@ -110,79 +93,63 @@ static void bufs_confirm(void) for (j = 0; j < NUM_BUFS; j++) if (!bufs_cmp(&bufs_in[i], &bufs_out[j])) matches++; - BUG_ON(matches != 1); + WARN_ON(matches != 1); } } -/********/ /* test */ -/********/ - -static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool, - void *pool_ctx, int __depleted) -{ - BUG_ON(__pool != pool); - BUG_ON(pool_ctx != POOL_OPAQUE); - depleted = __depleted; -} - void bman_test_api(void) { - struct bman_pool_params pparams = { - .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID, - .cb = depletion_cb, - .cb_ctx = POOL_OPAQUE, - }; int i, loops = LOOPS; bufs_init(); - pr_info(" --- Starting high-level test ---\n"); + pr_info("%s(): Starting\n", __func__); - pool = bman_new_pool(&pparams); - BUG_ON(!pool); + pool = bman_new_pool(); + if (!pool) { + pr_crit("bman_new_pool() failed\n"); + goto failed; + } - /*******************/ /* Release buffers */ - /*******************/ do_loop: i = 0; while (i < NUM_BUFS) { - u32 flags = BMAN_RELEASE_FLAG_WAIT; int num = 8; - if ((i + num) > NUM_BUFS) + if (i + num > NUM_BUFS) num = NUM_BUFS - i; - if ((i + num) == NUM_BUFS) - flags |= BMAN_RELEASE_FLAG_WAIT_SYNC; - if (bman_release(pool, bufs_in + i, num, flags)) - panic("bman_release() failed\n"); + if (bman_release(pool, bufs_in + i, num)) { + pr_crit("bman_release() failed\n"); + goto failed; + } i += num; } - /*******************/ /* Acquire buffers */ - /*******************/ while (i > 0) { int tmp, num = 8; if (num > i) num = i; - tmp = bman_acquire(pool, bufs_out + i - num, num, 0); - BUG_ON(tmp != num); + tmp = bman_acquire(pool, bufs_out + i - num, num); + WARN_ON(tmp != num); i -= num; } - i = bman_acquire(pool, NULL, 1, 0); - BUG_ON(i > 0); + i = bman_acquire(pool, NULL, 1); + WARN_ON(i > 0); bufs_confirm(); if (--loops) goto do_loop; - /************/ /* Clean up */ - /************/ bman_free_pool(pool); - pr_info(" --- Finished high-level test ---\n"); + pr_info("%s(): Finished\n", __func__); + return; + +failed: + WARN_ON(1); } diff --git a/linux/drivers/soc/fsl/qbman/bman_test_thresh.c b/linux/drivers/soc/fsl/qbman/bman_test_thresh.c deleted file mode 100644 index c0f045be..00000000 --- a/linux/drivers/soc/fsl/qbman/bman_test_thresh.c +++ /dev/null @@ -1,216 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2010 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "bman_test.h" - -/* Test constants */ -#define TEST_NUMBUFS 129728 -#define TEST_EXIT 129536 -#define TEST_ENTRY 129024 - -struct affine_test_data { - struct task_struct *t; - int cpu; -#ifndef __rtems__ - int expect_affinity; -#endif /* __rtems__ */ - int drain; - int num_enter; - int num_exit; - struct list_head node; - struct completion wakethread; - struct completion wakeparent; -}; - -static void cb_depletion(struct bman_portal *portal, - struct bman_pool *pool, - void *opaque, - int depleted) -{ - struct affine_test_data *data = opaque; - int c = smp_processor_id(); - - pr_info("%s(): bpid=%d, depleted=%d, cpu=%d, original=%d\n", __func__, - bman_get_params(pool)->bpid, !!depleted, c, data->cpu); - /* We should be executing on the CPU of the thread that owns the pool if - * and that CPU has an affine portal (ie. it isn't slaved). */ -#ifndef __rtems__ - BUG_ON((c != data->cpu) && data->expect_affinity); - BUG_ON((c == data->cpu) && !data->expect_affinity); -#endif /* __rtems__ */ - if (depleted) - data->num_enter++; - else - data->num_exit++; -} - -/* Params used to set up a pool, this also dynamically allocates a BPID */ -static const struct bman_pool_params params_nocb = { - .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH, - .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 } -}; - -/* Params used to set up each cpu's pool with callbacks enabled */ -static struct bman_pool_params params_cb = { - .bpid = 0, /* will be replaced to match pool_nocb */ - .flags = BMAN_POOL_FLAG_DEPLETION, - .cb = cb_depletion -}; - -static struct bman_pool *pool_nocb; -static LIST_HEAD(threads); - -static int affine_test(void *__data) -{ - struct bman_pool *pool; - struct affine_test_data *data = __data; - struct bman_pool_params my_params = params_cb; - - pr_info("Thread %d: starting\n", data->cpu); - /* create the pool */ - my_params.cb_ctx = data; - pool = bman_new_pool(&my_params); - BUG_ON(!pool); - complete(&data->wakeparent); - wait_for_completion(&data->wakethread); - init_completion(&data->wakethread); - - /* if we're the drainer, we get signalled for that */ - if (data->drain) { - struct bm_buffer buf; - int ret; - - pr_info("Thread %d: draining...\n", data->cpu); - do { - ret = bman_acquire(pool, &buf, 1, 0); - } while (ret > 0); - pr_info("Thread %d: draining done.\n", data->cpu); - complete(&data->wakeparent); - wait_for_completion(&data->wakethread); - init_completion(&data->wakethread); - } - - /* cleanup */ - bman_free_pool(pool); - while (!kthread_should_stop()) - cpu_relax(); - pr_info("Thread %d: exiting\n", data->cpu); - return 0; -} - -static struct affine_test_data *start_affine_test(int cpu, int drain) -{ - struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL); - - if (!data) - return NULL; - data->cpu = cpu; -#ifndef __rtems__ - data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus()); -#endif /* __rtems__ */ - data->drain = drain; - data->num_enter = 0; - data->num_exit = 0; - init_completion(&data->wakethread); - init_completion(&data->wakeparent); - list_add_tail(&data->node, &threads); - data->t = kthread_create(affine_test, data, "threshtest%d", cpu); -#ifndef __rtems__ - BUG_ON(IS_ERR(data->t)); -#else /* __rtems__ */ - BUG_ON(data->t == NULL); -#endif /* __rtems__ */ - kthread_bind(data->t, cpu); - wake_up_process(data->t); - return data; -} - -void bman_test_thresh(void) -{ - int loop = TEST_NUMBUFS; - int ret, num_cpus = 0; - struct affine_test_data *data, *drainer = NULL; - - pr_info("%s(): Start\n", __func__); - - /* allocate a BPID and seed it */ - pool_nocb = bman_new_pool(¶ms_nocb); - BUG_ON(!pool_nocb); - while (loop--) { - struct bm_buffer buf; - - bm_buffer_set64(&buf, 0x0badbeef + loop); - ret = bman_release(pool_nocb, &buf, 1, - BMAN_RELEASE_FLAG_WAIT); - BUG_ON(ret); - } - while (!bman_rcr_is_empty()) - cpu_relax(); - pr_info("%s(): Buffers are in\n", __func__); - - /* create threads and wait for them to create pools */ - params_cb.bpid = bman_get_params(pool_nocb)->bpid; -#ifndef __rtems__ - for_each_cpu(loop, cpu_online_mask) { -#else /* __rtems__ */ - for (loop = 0; loop < rtems_get_processor_count(); ++loop) { -#endif /* __rtems__ */ - data = start_affine_test(loop, drainer ? 0 : 1); - BUG_ON(!data); - if (!drainer) - drainer = data; - num_cpus++; - wait_for_completion(&data->wakeparent); - } - - /* signal the drainer to start draining */ - complete(&drainer->wakethread); - wait_for_completion(&drainer->wakeparent); - init_completion(&drainer->wakeparent); - - /* tear down */ - list_for_each_entry_safe(data, drainer, &threads, node) { - complete(&data->wakethread); - ret = kthread_stop(data->t); - BUG_ON(ret); - list_del(&data->node); - /* check that we get the expected callbacks (and no others) */ - BUG_ON(data->num_enter != 1); - BUG_ON(data->num_exit != 0); - kfree(data); - } - bman_free_pool(pool_nocb); - - pr_info("%s(): Done\n", __func__); -} diff --git a/linux/drivers/soc/fsl/qbman/bman_utils.c b/linux/drivers/soc/fsl/qbman/bman_utils.c deleted file mode 100644 index c6fa0b33..00000000 --- a/linux/drivers/soc/fsl/qbman/bman_utils.c +++ /dev/null @@ -1,76 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2009 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "dpaa_sys.h" - -#include <soc/fsl/bman.h> - -/* BMan APIs are front-ends to the common code */ - -static DECLARE_DPAA_RESOURCE(bpalloc); /* BPID allocator */ - -/* BPID allocator front-end */ - -int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial) -{ - return dpaa_resource_new(&bpalloc, result, count, align, partial); -} -EXPORT_SYMBOL(bman_alloc_bpid_range); - -static int bp_cleanup(u32 bpid) -{ - return bman_shutdown_pool(bpid) == 0; -} -void bman_release_bpid_range(u32 bpid, u32 count) -{ - u32 total_invalid = dpaa_resource_release(&bpalloc, - bpid, count, bp_cleanup); - - if (total_invalid) - pr_err("BPID range [%d..%d] (%d) had %d leaks\n", - bpid, bpid + count - 1, count, total_invalid); -} -EXPORT_SYMBOL(bman_release_bpid_range); - -void bman_seed_bpid_range(u32 bpid, u32 count) -{ - dpaa_resource_seed(&bpalloc, bpid, count); -} -EXPORT_SYMBOL(bman_seed_bpid_range); - -int bman_reserve_bpid_range(u32 bpid, u32 count) -{ - return dpaa_resource_reserve(&bpalloc, bpid, count); -} -EXPORT_SYMBOL(bman_reserve_bpid_range); diff --git a/linux/drivers/soc/fsl/qbman/dpaa_resource.c b/linux/drivers/soc/fsl/qbman/dpaa_resource.c deleted file mode 100644 index 3f7b74bf..00000000 --- a/linux/drivers/soc/fsl/qbman/dpaa_resource.c +++ /dev/null @@ -1,363 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2009 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#if defined(CONFIG_FSL_BMAN_PORTAL) || \ - defined(CONFIG_FSL_BMAN_PORTAL_MODULE) || \ - defined(CONFIG_FSL_QMAN_PORTAL) || \ - defined(CONFIG_FSL_QMAN_PORTAL_MODULE) -#include "dpaa_sys.h" - -/* The allocator is a (possibly-empty) list of these */ -struct dpaa_resource_node { - struct list_head list; - u32 base; - u32 num; - /* refcount and is_alloced are only set - when the node is in the used list */ - unsigned int refcount; - int is_alloced; -}; - -#ifdef DPAA_RESOURCE_DEBUG -#define DPRINT pr_info -static void DUMP(struct dpaa_resource *alloc) -{ - int off = 0; - char buf[256]; - struct dpaa_resource_node *p; - - pr_info("Free Nodes\n"); - list_for_each_entry(p, &alloc->free, list) { - if (off < 255) - off += snprintf(buf + off, 255-off, "{%d,%d}", - p->base, p->base + p->num - 1); - } - pr_info("%s\n", buf); - - off = 0; - pr_info("Used Nodes\n"); - list_for_each_entry(p, &alloc->used, list) { - if (off < 255) - off += snprintf(buf + off, 255-off, "{%d,%d}", - p->base, p->base + p->num - 1); - } - pr_info("%s\n", buf); -} -#else -#define DPRINT(x...) -#define DUMP(a) -#endif - -int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result, - u32 count, u32 align, int partial) -{ - struct dpaa_resource_node *i = NULL, *next_best = NULL, - *used_node = NULL; - u32 base, next_best_base = 0, num = 0, next_best_num = 0; - struct dpaa_resource_node *margin_left, *margin_right; - - *result = (u32)-1; - DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial); - DUMP(alloc); - /* If 'align' is 0, it should behave as though it was 1 */ - if (!align) - align = 1; - margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL); - if (!margin_left) - goto err; - margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL); - if (!margin_right) { - kfree(margin_left); - goto err; - } - spin_lock_irq(&alloc->lock); - list_for_each_entry(i, &alloc->free, list) { - base = (i->base + align - 1) / align; - base *= align; - if ((base - i->base) >= i->num) - /* alignment is impossible, regardless of count */ - continue; - num = i->num - (base - i->base); - if (num >= count) { - /* this one will do nicely */ - num = count; - goto done; - } - if (num > next_best_num) { - next_best = i; - next_best_base = base; - next_best_num = num; - } - } - if (partial && next_best) { - i = next_best; - base = next_best_base; - num = next_best_num; - } else - i = NULL; -done: - if (i) { - if (base != i->base) { - margin_left->base = i->base; - margin_left->num = base - i->base; - list_add_tail(&margin_left->list, &i->list); - } else - kfree(margin_left); - if ((base + num) < (i->base + i->num)) { - margin_right->base = base + num; - margin_right->num = (i->base + i->num) - - (base + num); - list_add(&margin_right->list, &i->list); - } else - kfree(margin_right); - list_del(&i->list); - kfree(i); - *result = base; - } - spin_unlock_irq(&alloc->lock); -err: - DPRINT("returning %d\n", i ? num : -ENOMEM); - DUMP(alloc); - if (!i) - return -ENOMEM; - - /* Add the allocation to the used list with a refcount of 1 */ - used_node = kmalloc(sizeof(*used_node), GFP_KERNEL); - if (!used_node) - return -ENOMEM; - used_node->base = *result; - used_node->num = num; - used_node->refcount = 1; - used_node->is_alloced = 1; - list_add_tail(&used_node->list, &alloc->used); - return (int)num; -} -EXPORT_SYMBOL(dpaa_resource_new); - -/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid - * forcing error-handling on to users in the deallocation path. */ -static void _dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id, - u32 count) -{ - struct dpaa_resource_node *i, - *node = kmalloc(sizeof(*node), GFP_ATOMIC); - - BUG_ON(!node); - DPRINT("release_range(%d,%d)\n", base_id, count); - DUMP(alloc); - BUG_ON(!count); - spin_lock_irq(&alloc->lock); - - node->base = base_id; - node->num = count; - list_for_each_entry(i, &alloc->free, list) { - if (i->base >= node->base) { - /* BUG_ON(any overlapping) */ - BUG_ON(i->base < (node->base + node->num)); - list_add_tail(&node->list, &i->list); - goto done; - } - } - list_add_tail(&node->list, &alloc->free); -done: - /* Merge to the left */ - i = list_entry(node->list.prev, struct dpaa_resource_node, list); - if (node->list.prev != &alloc->free) { - BUG_ON((i->base + i->num) > node->base); - if ((i->base + i->num) == node->base) { - node->base = i->base; - node->num += i->num; - list_del(&i->list); - kfree(i); - } - } - /* Merge to the right */ - i = list_entry(node->list.next, struct dpaa_resource_node, list); - if (node->list.next != &alloc->free) { - BUG_ON((node->base + node->num) > i->base); - if ((node->base + node->num) == i->base) { - node->num += i->num; - list_del(&i->list); - kfree(i); - } - } - spin_unlock_irq(&alloc->lock); - DUMP(alloc); -} - -static void dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id, - u32 count) -{ - struct dpaa_resource_node *i = NULL; - - spin_lock_irq(&alloc->lock); - - /* First find the node in the used list and decrement its ref count */ - list_for_each_entry(i, &alloc->used, list) { - if (i->base == base_id && i->num == count) { - --i->refcount; - if (i->refcount == 0) { - list_del(&i->list); - spin_unlock_irq(&alloc->lock); - if (i->is_alloced) - _dpaa_resource_free(alloc, base_id, - count); - kfree(i); - return; - } - spin_unlock_irq(&alloc->lock); - return; - } - } - /* Couldn't find the allocation */ - pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n", - base_id, count); - spin_unlock_irq(&alloc->lock); -} - -/* Same as free but no previous allocation checking is needed */ -void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count) -{ - _dpaa_resource_free(alloc, base_id, count); -} -EXPORT_SYMBOL(dpaa_resource_seed); - -/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire - * desired range is not available, or 0 for success - */ -int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num) -{ - struct dpaa_resource_node *i = NULL, *used_node; - - DPRINT("alloc_reserve(%d,%d)\n", base, num); - DUMP(alloc); - - spin_lock_irq(&alloc->lock); - - /* Check for the node in the used list. - If found, increase it's refcount */ - list_for_each_entry(i, &alloc->used, list) { - if ((i->base == base) && (i->num == num)) { - ++i->refcount; - spin_unlock_irq(&alloc->lock); - return 0; - } - if ((base >= i->base) && (base < (i->base + i->num))) { - /* This is an attempt to reserve a region that was - already reserved or alloced with a different - base or num */ - pr_err("Cannot reserve %d - %d, it overlaps with" - " existing reservation from %d - %d\n", - base, base + num - 1, i->base, - i->base + i->num - 1); - spin_unlock_irq(&alloc->lock); - return -1; - } - } - /* Check to make sure this ID isn't in the free list */ - list_for_each_entry(i, &alloc->free, list) { - if ((base >= i->base) && (base < (i->base + i->num))) { - /* yep, the reservation is within this node */ - pr_err("Cannot reserve %d - %d, it overlaps with" - " free range %d - %d and must be alloced\n", - base, base + num - 1, - i->base, i->base + i->num - 1); - spin_unlock_irq(&alloc->lock); - return -1; - } - } - /* Add the allocation to the used list with a refcount of 1 */ - used_node = kmalloc(sizeof(*used_node), GFP_KERNEL); - if (!used_node) { - spin_unlock_irq(&alloc->lock); - return -ENOMEM; - - } - used_node->base = base; - used_node->num = num; - used_node->refcount = 1; - used_node->is_alloced = 0; - list_add_tail(&used_node->list, &alloc->used); - spin_unlock_irq(&alloc->lock); - return 0; -} -EXPORT_SYMBOL(dpaa_resource_reserve); - -/* This is a sort-of-conditional dpaa_resource_free() routine. Eg. when - * releasing FQIDs (probably from user-space), it can filter out those - * that aren't in the OOS state (better to leak a h/w resource than to - * crash). This function returns the number of invalid IDs that were not - * released. -*/ -u32 dpaa_resource_release(struct dpaa_resource *alloc, - u32 id, u32 count, int (*is_valid)(u32 id)) -{ - int valid_mode = 0; - u32 loop = id, total_invalid = 0; - - while (loop < (id + count)) { - int isvalid = is_valid ? is_valid(loop) : 1; - - if (!valid_mode) { - /* We're looking for a valid ID to terminate an invalid - * range */ - if (isvalid) { - /* We finished a range of invalid IDs, a valid - * range is now underway */ - valid_mode = 1; - count -= (loop - id); - id = loop; - } else - total_invalid++; - } else { - /* We're looking for an invalid ID to terminate a - * valid range */ - if (!isvalid) { - /* Release the range of valid IDs, an unvalid - * range is now underway */ - if (loop > id) - dpaa_resource_free(alloc, id, - loop - id); - valid_mode = 0; - } - } - loop++; - } - /* Release any unterminated range of valid IDs */ - if (valid_mode && count) - dpaa_resource_free(alloc, id, count); - return total_invalid; -} -EXPORT_SYMBOL(dpaa_resource_release); -#endif /* CONFIG_FSL_*MAN_PORTAL* */ diff --git a/linux/drivers/soc/fsl/qbman/dpaa_sys.h b/linux/drivers/soc/fsl/qbman/dpaa_sys.h index 85f87800..0e897026 100644 --- a/linux/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/linux/drivers/soc/fsl/qbman/dpaa_sys.h @@ -1,4 +1,4 @@ -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,23 +31,19 @@ #ifndef __DPAA_SYS_H #define __DPAA_SYS_H +#include <linux/cpu.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/interrupt.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/of_reserved_mem.h> #include <linux/kthread.h> -#include <linux/uaccess.h> -#include <linux/debugfs.h> +#include <linux/sched/signal.h> #include <linux/vmalloc.h> #include <linux/platform_device.h> -#include <linux/ctype.h> -#ifdef CONFIG_HOTPLUG_CPU -#include <linux/cpu.h> -#endif - -#include <asm/pgtable.h> +#include <linux/of.h> +#include <linux/of_reserved_mem.h> +#include <linux/prefetch.h> +#include <linux/genalloc.h> +#include <asm/cacheflush.h> #ifdef __rtems__ #include <asm/cache.h> #include <asm/mpc85xx.h> @@ -55,238 +51,91 @@ #include <linux/io.h> #include <linux/rbtree.h> #include <bsp/linker-symbols.h> + #define DPAA_NOCACHENOLOAD_ALIGNED_REGION(designator, size) \ BSP_NOCACHENOLOAD_SUBSECTION(designator) __aligned(size) \ uint8_t designator[size] -#endif /* __rtems__ */ - -struct dpaa_resource { - struct list_head free; - spinlock_t lock; - struct list_head used; -}; -#define DECLARE_DPAA_RESOURCE(name) \ -struct dpaa_resource name = { \ - .free = { \ - .prev = &name.free, \ - .next = &name.free \ - }, \ - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ - .used = { \ - .prev = &name.used, \ - .next = &name.used \ - } \ -} - -int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result, - u32 count, u32 align, int partial); -u32 dpaa_resource_release(struct dpaa_resource *alloc, - u32 id, u32 count, int (*is_valid)(u32 id)); -void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count); -int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num); +#ifdef __PPC_CPU_E6500__ +#define dma_wmb() ppc_light_weight_synchronize() +#else +#define dma_wmb() ppc_enforce_in_order_execution_of_io() +#endif -/* When copying aligned words or shorts, try to avoid memcpy() */ -#define CONFIG_TRY_BETTER_MEMCPY +#define prefetch(x) ppc_data_cache_block_touch(x) +#endif /* __rtems__ */ /* For 2-element tables related to cache-inhibited and cache-enabled mappings */ -#define DPA_PORTAL_CE 0 -#define DPA_PORTAL_CI 1 +#define DPAA_PORTAL_CE 0 +#define DPAA_PORTAL_CI 1 -/* Misc inline assists */ +#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64) +#error "Unsupported Cacheline Size" +#endif -/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler - * barriers and that dcb*() won't fall victim to compiler or execution - * reordering with respect to other code/instructions that manipulate the same - * cacheline. */ -#define hwsync() __asm__ __volatile__ ("sync" : : : "memory") +static inline void dpaa_flush(void *p) +{ #ifndef __rtems__ -#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory") +#ifdef CONFIG_PPC + flush_dcache_range((unsigned long)p, (unsigned long)p+64); +#elif defined(CONFIG_ARM32) + __cpuc_flush_dcache_area(p, 64); +#elif defined(CONFIG_ARM64) + __flush_dcache_area(p, 64); +#endif #else /* __rtems__ */ - #ifdef __PPC_CPU_E6500__ - #define lwsync() ppc_light_weight_synchronize() - #else - #define lwsync() ppc_synchronize_data() - #endif -#endif /* __rtems__ */ -#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory") -#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p)) -#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p)) -#define dcbi(p) dcbf(p) -#ifdef CONFIG_PPC_E500MC -#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p)) -#define dcbz_64(p) dcbzl(p) -#define dcbf_64(p) dcbf(p) -/* Commonly used combo */ -#define dcbit_ro(p) \ - do { \ - dcbi(p); \ - dcbt_ro(p); \ - } while (0) +#ifdef __PPC_CPU_E6500__ + ppc_data_cache_block_flush(p); #else -#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p)) -#define dcbz_64(p) \ - do { \ - dcbz((u32)p + 32); \ - dcbz(p); \ - } while (0) -#define dcbf_64(p) \ - do { \ - dcbf((u32)p + 32); \ - dcbf(p); \ - } while (0) -/* Commonly used combo */ -#define dcbit_ro(p) \ - do { \ - dcbi(p); \ - dcbi((u32)p + 32); \ - dcbt_ro(p); \ - dcbt_ro((u32)p + 32); \ - } while (0) -#endif /* CONFIG_PPC_E500MC */ - -static inline u64 mfatb(void) -{ - u32 hi, lo, chk; - - do { - hi = mfspr(SPRN_ATBU); - lo = mfspr(SPRN_ATBL); - chk = mfspr(SPRN_ATBU); - } while (unlikely(hi != chk)); - return ((u64)hi << 32) | (u64)lo; +#error "Unsupported platform" +#endif +#endif /* __rtems__ */ } -#ifdef CONFIG_FSL_DPA_CHECKING -#define DPA_ASSERT(x) WARN_ON(!(x)) +#define dpaa_invalidate(p) dpaa_flush(p) + +#ifndef __rtems__ +#define dpaa_zero(p) memset(p, 0, 64) +#else /* __rtems__ */ +#ifdef __PPC_CPU_E6500__ +#define dpaa_zero(p) ppc_data_cache_block_clear_to_zero(p) #else -#define DPA_ASSERT(x) +#define dpaa_zero(p) memset(p, 0, 64) #endif +#endif /* __rtems__ */ -#ifdef CONFIG_TRY_BETTER_MEMCPY -static inline void copy_words(void *dest, const void *src, size_t sz) +static inline void dpaa_touch_ro(void *p) { - u32 *__dest = dest; - const u32 *__src = src; - size_t __sz = sz >> 2; - - BUG_ON((unsigned long)dest & 0x3); - BUG_ON((unsigned long)src & 0x3); - BUG_ON(sz & 0x3); - while (__sz--) - *(__dest++) = *(__src++); -} -#else -#define copy_words memcpy +#if (L1_CACHE_BYTES == 32) + prefetch(p+32); #endif + prefetch(p); +} -/* RB-trees */ - -/* We encapsulate RB-trees so that its easier to use non-linux forms in - * non-linux systems. This also encapsulates the extra plumbing that linux code - * usually provides when using RB-trees. This encapsulation assumes that the - * data type held by the tree is u32. */ - -struct dpa_rbtree { - struct rb_root root; -}; -#define DPA_RBTREE { .root = RB_ROOT } - -static inline void dpa_rbtree_init(struct dpa_rbtree *tree) +/* Commonly used combo */ +static inline void dpaa_invalidate_touch_ro(void *p) { - tree->root = RB_ROOT; + dpaa_invalidate(p); + dpaa_touch_ro(p); } -#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \ -static inline int name##_push(struct dpa_rbtree *tree, type *obj) \ -{ \ - struct rb_node *parent = NULL, **p = &tree->root.rb_node; \ - while (*p) { \ - u32 item; \ - parent = *p; \ - item = rb_entry(parent, type, node_field)->val_field; \ - if (obj->val_field < item) \ - p = &parent->rb_left; \ - else if (obj->val_field > item) \ - p = &parent->rb_right; \ - else \ - return -EBUSY; \ - } \ - rb_link_node(&obj->node_field, parent, p); \ - rb_insert_color(&obj->node_field, &tree->root); \ - return 0; \ -} \ -static inline void name##_del(struct dpa_rbtree *tree, type *obj) \ -{ \ - rb_erase(&obj->node_field, &tree->root); \ -} \ -static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \ -{ \ - type *ret; \ - struct rb_node *p = tree->root.rb_node; \ - while (p) { \ - ret = rb_entry(p, type, node_field); \ - if (val < ret->val_field) \ - p = p->rb_left; \ - else if (val > ret->val_field) \ - p = p->rb_right; \ - else \ - return ret; \ - } \ - return NULL; \ -} -#ifndef __rtems__ -/* Bootargs */ +#ifdef CONFIG_FSL_DPAA_CHECKING +#define DPAA_ASSERT(x) WARN_ON(!(x)) +#else +#define DPAA_ASSERT(x) +#endif -/* QMan has "qportals=" and BMan has "bportals=", they use the same syntax - * though; a comma-separated list of items, each item being a cpu index and/or a - * range of cpu indices, and each item optionally be prefixed by "s" to indicate - * that the portal associated with that cpu should be shared. See bman_driver.c - * for more specifics. */ -static int __parse_portals_cpu(const char **s, unsigned int *cpu) +/* cyclic helper for rings */ +static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last) { - *cpu = 0; - if (!isdigit(**s)) - return -EINVAL; - while (isdigit(**s)) - *cpu = *cpu * 10 + (*((*s)++) - '0'); - return 0; + /* 'first' is included, 'last' is excluded */ + if (first <= last) + return last - first; + return ringsize + last - first; } -static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared, - struct cpumask *want_unshared, - const char *argname) -{ - const char *s = str; - unsigned int shared, cpu1, cpu2, loop; -keep_going: - if (*s == 's') { - shared = 1; - s++; - } else - shared = 0; - if (__parse_portals_cpu(&s, &cpu1)) - goto err; - if (*s == '-') { - s++; - if (__parse_portals_cpu(&s, &cpu2)) - goto err; - if (cpu2 < cpu1) - goto err; - } else - cpu2 = cpu1; - for (loop = cpu1; loop <= cpu2; loop++) - cpumask_set_cpu(loop, shared ? want_shared : want_unshared); - if (*s == ',') { - s++; - goto keep_going; - } else if ((*s == '\0') || isspace(*s)) - return 0; -err: - pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str, - (unsigned long)s - (unsigned long)str); - return -EINVAL; -} -#endif /* __rtems__ */ +/* Offset applied to genalloc pools due to zero being an error return */ +#define DPAA_GENALLOC_OFF 0x80000000 + #endif /* __DPAA_SYS_H */ diff --git a/linux/drivers/soc/fsl/qbman/qman.c b/linux/drivers/soc/fsl/qbman/qman.c index aa188888..244cf164 100644 --- a/linux/drivers/soc/fsl/qbman/qman.c +++ b/linux/drivers/soc/fsl/qbman/qman.c @@ -2,7 +2,7 @@ #include <rtems/bsd/local/opt_dpaa.h> -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,1074 +33,2875 @@ */ #include "qman_priv.h" +#ifdef __rtems__ +#undef dev_crit +#undef dev_dbg +#undef dev_err +#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__) +#define dev_dbg dev_crit +#define dev_err dev_crit +#endif /* __rtems__ */ + +#define DQRR_MAXFILL 15 +#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ +#define IRQNAME "QMan portal %d" +#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ +#define QMAN_POLL_LIMIT 32 +#define QMAN_PIRQ_DQRR_ITHRESH 12 +#define QMAN_PIRQ_MR_ITHRESH 4 +#define QMAN_PIRQ_IPERIOD 100 + +/* Portal register assists */ + +/* Cache-inhibited register offsets */ +#define QM_REG_EQCR_PI_CINH 0x0000 +#define QM_REG_EQCR_CI_CINH 0x0004 +#define QM_REG_EQCR_ITR 0x0008 +#define QM_REG_DQRR_PI_CINH 0x0040 +#define QM_REG_DQRR_CI_CINH 0x0044 +#define QM_REG_DQRR_ITR 0x0048 +#define QM_REG_DQRR_DCAP 0x0050 +#define QM_REG_DQRR_SDQCR 0x0054 +#define QM_REG_DQRR_VDQCR 0x0058 +#define QM_REG_DQRR_PDQCR 0x005c +#define QM_REG_MR_PI_CINH 0x0080 +#define QM_REG_MR_CI_CINH 0x0084 +#define QM_REG_MR_ITR 0x0088 +#define QM_REG_CFG 0x0100 +#define QM_REG_ISR 0x0e00 +#define QM_REG_IER 0x0e04 +#define QM_REG_ISDR 0x0e08 +#define QM_REG_IIR 0x0e0c +#define QM_REG_ITPR 0x0e14 + +/* Cache-enabled register offsets */ +#define QM_CL_EQCR 0x0000 +#define QM_CL_DQRR 0x1000 +#define QM_CL_MR 0x2000 +#define QM_CL_EQCR_PI_CENA 0x3000 +#define QM_CL_EQCR_CI_CENA 0x3100 +#define QM_CL_DQRR_PI_CENA 0x3200 +#define QM_CL_DQRR_CI_CENA 0x3300 +#define QM_CL_MR_PI_CENA 0x3400 +#define QM_CL_MR_CI_CENA 0x3500 +#define QM_CL_CR 0x3800 +#define QM_CL_RR0 0x3900 +#define QM_CL_RR1 0x3940 + +/* + * BTW, the drivers (and h/w programming model) already obtain the required + * synchronisation for portal accesses and data-dependencies. Use of barrier()s + * or other order-preserving primitives simply degrade performance. Hence the + * use of the __raw_*() interfaces, which simply ensure that the compiler treats + * the portal registers as volatile + */ -#include <asm/cacheflush.h> - -/* Last updated for v00.800 of the BG */ - -/* Register offsets */ -#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) -#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) -#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) -#define REG_DD_CFG 0x0200 -#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) -#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) -#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) -#define REG_PFDR_FPC 0x0400 -#define REG_PFDR_FP_HEAD 0x0404 -#define REG_PFDR_FP_TAIL 0x0408 -#define REG_PFDR_FP_LWIT 0x0410 -#define REG_PFDR_CFG 0x0414 -#define REG_SFDR_CFG 0x0500 -#define REG_SFDR_IN_USE 0x0504 -#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) -#define REG_WQ_DEF_ENC_WQID 0x0630 -#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) -#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) -#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) -#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) -#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ -#define REG_CM_CFG 0x0800 -#define REG_ECSR 0x0a00 -#define REG_ECIR 0x0a04 -#define REG_EADR 0x0a08 -#define REG_ECIR2 0x0a0c -#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) -#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) -#define REG_MCR 0x0b00 -#define REG_MCP(n) (0x0b04 + ((n) * 0x04)) -#define REG_MISC_CFG 0x0be0 -#define REG_HID_CFG 0x0bf0 -#define REG_IDLE_STAT 0x0bf4 -#define REG_IP_REV_1 0x0bf8 -#define REG_IP_REV_2 0x0bfc -#define REG_FQD_BARE 0x0c00 -#define REG_PFDR_BARE 0x0c20 -#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */ -#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */ -#define REG_QCSP_BARE 0x0c80 -#define REG_QCSP_BAR 0x0c84 -#define REG_CI_SCHED_CFG 0x0d00 -#define REG_SRCIDR 0x0d04 -#define REG_LIODNR 0x0d08 -#define REG_CI_RLM_AVG 0x0d14 -#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */ -#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10)) -#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10)) -#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10)) - -/* Assists for QMAN_MCR */ -#define MCR_INIT_PFDR 0x01000000 -#define MCR_get_rslt(v) (u8)((v) >> 24) -#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0)) -#define MCR_rslt_ok(r) (rslt == 0xf0) -#define MCR_rslt_eaccess(r) (rslt == 0xf8) -#define MCR_rslt_inval(r) (rslt == 0xff) - -/* Corenet initiator settings. Stash request queues are 4-deep to match cores - ability to snarf. Stash priority is 3, other priorities are 2. */ -#define FSL_QMAN_CI_SCHED_CFG_SRCCIV 4 -#define FSL_QMAN_CI_SCHED_CFG_SRQ_W 3 -#define FSL_QMAN_CI_SCHED_CFG_RW_W 2 -#define FSL_QMAN_CI_SCHED_CFG_BMAN_W 2 - -struct qman; - -/* Follows WQ_CS_CFG0-5 */ -enum qm_wq_class { - qm_wq_portal = 0, - qm_wq_pool = 1, - qm_wq_fman0 = 2, - qm_wq_fman1 = 3, - qm_wq_caam = 4, - qm_wq_pme = 5, - qm_wq_first = qm_wq_portal, - qm_wq_last = qm_wq_pme +/* Cache-enabled ring access */ +#define qm_cl(base, idx) ((void *)base + ((idx) << 6)) + +/* + * Portal modes. + * Enum types; + * pmode == production mode + * cmode == consumption mode, + * dmode == h/w dequeue mode. + * Enum values use 3 letter codes. First letter matches the portal mode, + * remaining two letters indicate; + * ci == cache-inhibited portal register + * ce == cache-enabled portal register + * vb == in-band valid-bit (cache-enabled) + * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only + * As for "enum qm_dqrr_dmode", it should be self-explanatory. + */ +enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ + qm_eqcr_pci = 0, /* PI index, cache-inhibited */ + qm_eqcr_pce = 1, /* PI index, cache-enabled */ + qm_eqcr_pvb = 2 /* valid-bit */ +}; +enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ + qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ + qm_dqrr_dpull = 1 /* PDQCR */ +}; +enum qm_dqrr_pmode { /* s/w-only */ + qm_dqrr_pci, /* reads DQRR_PI_CINH */ + qm_dqrr_pce, /* reads DQRR_PI_CENA */ + qm_dqrr_pvb /* reads valid-bit */ +}; +enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ + qm_dqrr_cci = 0, /* CI index, cache-inhibited */ + qm_dqrr_cce = 1, /* CI index, cache-enabled */ + qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */ +}; +enum qm_mr_pmode { /* s/w-only */ + qm_mr_pci, /* reads MR_PI_CINH */ + qm_mr_pce, /* reads MR_PI_CENA */ + qm_mr_pvb /* reads valid-bit */ +}; +enum qm_mr_cmode { /* matches QCSP_CFG::MM */ + qm_mr_cci = 0, /* CI index, cache-inhibited */ + qm_mr_cce = 1 /* CI index, cache-enabled */ }; -/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */ -enum qm_memory { - qm_memory_fqd, - qm_memory_pfdr +/* --- Portal structures --- */ + +#define QM_EQCR_SIZE 8 +#define QM_DQRR_SIZE 16 +#define QM_MR_SIZE 8 + +/* "Enqueue Command" */ +struct qm_eqcr_entry { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 dca; + __be16 seqnum; + u8 __reserved[4]; + __be32 fqid; /* 24-bit */ + __be32 tag; + struct qm_fd fd; + u8 __reserved3[32]; +} __packed; +#define QM_EQCR_VERB_VBIT 0x80 +#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ +#define QM_EQCR_VERB_CMD_ENQUEUE 0x01 +#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ +#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ +#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ + +struct qm_eqcr { + struct qm_eqcr_entry *ring, *cursor; + u8 ci, available, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + u32 busy; + enum qm_eqcr_pmode pmode; +#endif }; -/* Used by all error interrupt registers except 'inhibit' */ -#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */ -#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */ -#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */ -#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */ -#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */ -#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */ -#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */ -#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */ -#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */ -#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */ -#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */ -#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */ -#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */ -#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */ -#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */ -#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */ -#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */ -#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */ - -/* QMAN_ECIR valid error bit */ -#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \ - QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \ - QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI) -#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \ - QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \ - QM_EIRQ_IFSI) - -union qman_ecir { - u32 ecir_raw; - struct { - u32 __reserved:2; - u32 portal_type:1; - u32 portal_num:5; - u32 fqid:24; - } __packed info; +struct qm_dqrr { + const struct qm_dqrr_entry *ring, *cursor; + u8 pi, ci, fill, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum qm_dqrr_dmode dmode; + enum qm_dqrr_pmode pmode; + enum qm_dqrr_cmode cmode; +#endif }; -union qman_ecir2 { - u32 ecir2_raw; - struct { - u32 portal_type:1; - u32 __reserved:21; - u32 portal_num:10; - } __packed info; +struct qm_mr { + union qm_mr_entry *ring, *cursor; + u8 pi, ci, fill, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum qm_mr_pmode pmode; + enum qm_mr_cmode cmode; +#endif }; -union qman_eadr { - u32 eadr_raw; - struct { - u32 __reserved1:4; - u32 memid:4; - u32 __reserved2:12; - u32 eadr:12; - } __packed info; - struct { - u32 __reserved1:3; - u32 memid:5; - u32 __reserved:8; - u32 eadr:16; - } __packed info_rev3; +/* MC (Management Command) command */ +/* "FQ" command layout */ +struct qm_mcc_fq { + u8 _ncw_verb; + u8 __reserved1[3]; + __be32 fqid; /* 24-bit */ + u8 __reserved2[56]; +} __packed; + +/* "CGR" command layout */ +struct qm_mcc_cgr { + u8 _ncw_verb; + u8 __reserved1[30]; + u8 cgid; + u8 __reserved2[32]; }; -struct qman_hwerr_txt { - u32 mask; - const char *txt; +#define QM_MCC_VERB_VBIT 0x80 +#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ +#define QM_MCC_VERB_INITFQ_PARKED 0x40 +#define QM_MCC_VERB_INITFQ_SCHED 0x41 +#define QM_MCC_VERB_QUERYFQ 0x44 +#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ +#define QM_MCC_VERB_QUERYWQ 0x46 +#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 +#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ +#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ +#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ +#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ +#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ +#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ +#define QM_MCC_VERB_INITCGR 0x50 +#define QM_MCC_VERB_MODIFYCGR 0x51 +#define QM_MCC_VERB_CGRTESTWRITE 0x52 +#define QM_MCC_VERB_QUERYCGR 0x58 +#define QM_MCC_VERB_QUERYCONGESTION 0x59 +union qm_mc_command { + struct { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 __reserved[63]; + }; + struct qm_mcc_initfq initfq; + struct qm_mcc_initcgr initcgr; + struct qm_mcc_fq fq; + struct qm_mcc_cgr cgr; }; -#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b } - -static const struct qman_hwerr_txt qman_hwerr_txts[] = { - QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"), - QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"), - QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"), - QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"), - QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"), - QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"), - QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"), - QMAN_HWE_TXT(ICVI, "Invalid Command Verb"), - QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"), - QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"), - QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"), - QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"), - QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"), - QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"), - QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"), - QMAN_HWE_TXT(IESI, "Invalid Enqueue State"), - QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"), - QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue") +/* MC (Management Command) result */ +/* "Query FQ" */ +struct qm_mcr_queryfq { + u8 verb; + u8 result; + u8 __reserved1[8]; + struct qm_fqd fqd; /* the FQD fields are here */ + u8 __reserved2[30]; +} __packed; + +/* "Alter FQ State Commands" */ +struct qm_mcr_alterfq { + u8 verb; + u8 result; + u8 fqs; /* Frame Queue Status */ + u8 __reserved1[61]; +}; +#define QM_MCR_VERB_RRID 0x80 +#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK +#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED +#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED +#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ +#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP +#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ +#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED +#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED +#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE +#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE +#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS +#define QM_MCR_RESULT_NULL 0x00 +#define QM_MCR_RESULT_OK 0xf0 +#define QM_MCR_RESULT_ERR_FQID 0xf1 +#define QM_MCR_RESULT_ERR_FQSTATE 0xf2 +#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ +#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 +#define QM_MCR_RESULT_PENDING 0xf8 +#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff +#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ +#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ +#define QM_MCR_TIMEOUT 10000 /* us */ +union qm_mc_result { + struct { + u8 verb; + u8 result; + u8 __reserved1[62]; + }; + struct qm_mcr_queryfq queryfq; + struct qm_mcr_alterfq alterfq; + struct qm_mcr_querycgr querycgr; + struct qm_mcr_querycongestion querycongestion; + struct qm_mcr_querywq querywq; + struct qm_mcr_queryfq_np queryfq_np; }; -#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt)) -struct qman_error_info_mdata { - u16 addr_mask; - u16 bits; - const char *txt; +struct qm_mc { + union qm_mc_command *cr; + union qm_mc_result *rr; + u8 rridx, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum { + /* Can be _mc_start()ed */ + qman_mc_idle, + /* Can be _mc_commit()ed or _mc_abort()ed */ + qman_mc_user, + /* Can only be _mc_retry()ed */ + qman_mc_hw + } state; +#endif }; -#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c} -static const struct qman_error_info_mdata error_mdata[] = { - QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"), - QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"), - QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"), - QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"), - QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"), - QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"), - QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"), - QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"), - QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"), - QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"), +struct qm_addr { + void __iomem *ce; /* cache-enabled */ + void __iomem *ci; /* cache-inhibited */ }; -#define QMAN_ERR_MDATA_COUNT \ - (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata)) -/* Add this in Kconfig */ -#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI) +struct qm_portal { + /* + * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to + * and including 'mc' fits within a cacheline (yay!). The 'config' part + * is setup-only, so isn't a cause for a concern. In other words, don't + * rearrange this structure on a whim, there be dragons ... + */ + struct qm_addr addr; + struct qm_eqcr eqcr; + struct qm_dqrr dqrr; + struct qm_mr mr; + struct qm_mc mc; +} ____cacheline_aligned; + +/* Cache-inhibited register access. */ +static inline u32 qm_in(struct qm_portal *p, u32 offset) +{ + return be32_to_cpu(__raw_readl(p->addr.ci + offset)); +} -/** - * qm_err_isr_<reg>_<verb> - Manipulate global interrupt registers - * @v: for accessors that write values, this is the 32-bit value - * - * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All - * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of - * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means - * "write the enable register" rather than "enable the write register"! - */ -#define qm_err_isr_status_read(qm) \ - __qm_err_isr_read(qm, qm_isr_status) -#define qm_err_isr_status_clear(qm, m) \ - __qm_err_isr_write(qm, qm_isr_status, m) -#define qm_err_isr_enable_read(qm) \ - __qm_err_isr_read(qm, qm_isr_enable) -#define qm_err_isr_enable_write(qm, v) \ - __qm_err_isr_write(qm, qm_isr_enable, v) -#define qm_err_isr_disable_read(qm) \ - __qm_err_isr_read(qm, qm_isr_disable) -#define qm_err_isr_disable_write(qm, v) \ - __qm_err_isr_write(qm, qm_isr_disable, v) -#define qm_err_isr_inhibit(qm) \ - __qm_err_isr_write(qm, qm_isr_inhibit, 1) -#define qm_err_isr_uninhibit(qm) \ - __qm_err_isr_write(qm, qm_isr_inhibit, 0) +static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) +{ + __raw_writel(cpu_to_be32(val), p->addr.ci + offset); +} -/* - * TODO: unimplemented registers - * - * Keeping a list here of QMan registers I have not yet covered; - * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR, - * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG, - * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12 - */ +/* Cache Enabled Portal Access */ +static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset) +{ + dpaa_invalidate(p->addr.ce + offset); +} -/* Encapsulate "struct qman *" as a cast of the register space address. */ +static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) +{ + dpaa_touch_ro(p->addr.ce + offset); +} -static struct qman *qm_create(void *regs) +static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) { - return (struct qman *)regs; + return be32_to_cpu(__raw_readl(p->addr.ce + offset)); } -static inline u32 __qm_in(struct qman *qm, u32 offset) +/* --- EQCR API --- */ + +#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry)) +#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT) + +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ +static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p) { - return ioread32be((void *)qm + offset); + uintptr_t addr = (uintptr_t)p; + + addr &= ~EQCR_CARRY; + + return (struct qm_eqcr_entry *)addr; } -static inline void __qm_out(struct qman *qm, u32 offset, u32 val) + +/* Bit-wise logic to convert a ring pointer to a ring index */ +static int eqcr_ptr2idx(struct qm_eqcr_entry *e) { - iowrite32be(val, (void *)qm + offset); + return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1); } -#define qm_in(reg) __qm_in(qm, REG_##reg) -#define qm_out(reg, val) __qm_out(qm, REG_##reg, val) -static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n) +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ +static inline void eqcr_inc(struct qm_eqcr *eqcr) { - return __qm_in(qm, REG_ERR_ISR + (n << 2)); + /* increment to the next EQCR pointer and handle overflow and 'vbit' */ + struct qm_eqcr_entry *partial = eqcr->cursor + 1; + + eqcr->cursor = eqcr_carryclear(partial); + if (partial != eqcr->cursor) + eqcr->vbit ^= QM_EQCR_VERB_VBIT; } -static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val) +static inline int qm_eqcr_init(struct qm_portal *portal, + enum qm_eqcr_pmode pmode, + unsigned int eq_stash_thresh, + int eq_stash_prio) { - __qm_out(qm, REG_ERR_ISR + (n << 2), val); + struct qm_eqcr *eqcr = &portal->eqcr; + u32 cfg; + u8 pi; + + eqcr->ring = portal->addr.ce + QM_CL_EQCR; + eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); + pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); + eqcr->cursor = eqcr->ring + pi; + eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ? + QM_EQCR_VERB_VBIT : 0; + eqcr->available = QM_EQCR_SIZE - 1 - + dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); + eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 0; + eqcr->pmode = pmode; +#endif + cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) | + (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ + (eq_stash_prio << 26) | /* QCSP_CFG: EP */ + ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ + qm_out(portal, QM_REG_CFG, cfg); + return 0; } -static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal, - int ed, u8 sernd) +static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) { - DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) || - (portal == qm_dc_portal_fman1)); - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff)); - else - qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f)); + return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7; } -static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class, - u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5, - u8 csw6, u8 csw7) +static inline void qm_eqcr_finish(struct qm_portal *portal) { - qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) | - ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) | - ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) | - ((csw6 & 0x7) << 4) | (csw7 & 0x7)); + struct qm_eqcr *eqcr = &portal->eqcr; + u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); + u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + + DPAA_ASSERT(!eqcr->busy); + if (pi != eqcr_ptr2idx(eqcr->cursor)) + pr_crit("losing uncommitted EQCR entries\n"); + if (ci != eqcr->ci) + pr_crit("missing existing EQCR completions\n"); + if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) + pr_crit("EQCR destroyed unquiesced\n"); } -static void qm_set_hid(struct qman *qm) +static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal + *portal) { - qm_out(HID_CFG, 0); + struct qm_eqcr *eqcr = &portal->eqcr; + + DPAA_ASSERT(!eqcr->busy); + if (!eqcr->available) + return NULL; + +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 1; +#endif + dpaa_zero(eqcr->cursor); + return eqcr->cursor; } -static void qm_set_corenet_initiator(struct qman *qm) +static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal + *portal) { - qm_out(CI_SCHED_CFG, - 0x80000000 | /* write srcciv enable */ - (FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) | - (FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) | - (FSL_QMAN_CI_SCHED_CFG_RW_W << 4) | - FSL_QMAN_CI_SCHED_CFG_BMAN_W); + struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci; + + DPAA_ASSERT(!eqcr->busy); + if (!eqcr->available) { + old_ci = eqcr->ci; + eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & + (QM_EQCR_SIZE - 1); + diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + if (!diff) + return NULL; + } +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 1; +#endif + dpaa_zero(eqcr->cursor); + return eqcr->cursor; } -static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor) +static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) { - u32 v = qm_in(IP_REV_1); - *id = (v >> 16); - *major = (v >> 8) & 0xff; - *minor = v & 0xff; + DPAA_ASSERT(eqcr->busy); + DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK)); + DPAA_ASSERT(eqcr->available >= 1); } -static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba, - int enable, int prio, int stash, u32 size) +static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) { - u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; - u32 exp = ilog2(size); - /* choke if size isn't within range */ - DPA_ASSERT((size >= 4096) && (size <= 1073741824) && - is_power_of_2(size)); - /* choke if 'ba' has lower-alignment than 'size' */ - DPA_ASSERT(!(ba & (size - 1))); - __qm_out(qm, offset, upper_32_bits(ba)); - __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba)); - __qm_out(qm, offset + REG_offset_AR, - (enable ? 0x80000000 : 0) | - (prio ? 0x40000000 : 0) | - (stash ? 0x20000000 : 0) | - (exp - 1)); + struct qm_eqcr *eqcr = &portal->eqcr; + struct qm_eqcr_entry *eqcursor; + + eqcr_commit_checks(eqcr); + DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb); + dma_wmb(); + eqcursor = eqcr->cursor; + eqcursor->_ncw_verb = myverb | eqcr->vbit; + dpaa_flush(eqcursor); + eqcr_inc(eqcr); + eqcr->available--; +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 0; +#endif } -static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k) +static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) { - qm_out(PFDR_FP_LWIT, th & 0xffffff); - qm_out(PFDR_CFG, k); + qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA); } -static void qm_set_sfdr_threshold(struct qman *qm, u16 th) +static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) { - qm_out(SFDR_CFG, th & 0x3ff); + struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci = eqcr->ci; + + eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1); + qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); + diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + return diff; } -static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num) +static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) { - u8 rslt = MCR_get_rslt(qm_in(MCR)); + struct qm_eqcr *eqcr = &portal->eqcr; + + eqcr->ithresh = ithresh; + qm_out(portal, QM_REG_EQCR_ITR, ithresh); +} - DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num); - /* Make sure the command interface is 'idle' */ - if (!MCR_rslt_idle(rslt)) - panic("QMAN_MCR isn't idle"); +static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; - /* Write the MCR command params then the verb */ - qm_out(MCP(0), pfdr_start); - /* TODO: remove this - it's a workaround for a model bug that is - * corrected in more recent versions. We use the workaround until - * everyone has upgraded. */ - qm_out(MCP(1), (pfdr_start + num - 16)); - lwsync(); - qm_out(MCR, MCR_INIT_PFDR); - /* Poll for the result */ - do { - rslt = MCR_get_rslt(qm_in(MCR)); - } while (!MCR_rslt_idle(rslt)); - if (MCR_rslt_ok(rslt)) - return 0; - if (MCR_rslt_eaccess(rslt)) - return -EACCES; - if (MCR_rslt_inval(rslt)) - return -EINVAL; - pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt); - return -ENODEV; + return eqcr->available; } -/*****************/ -/* Config driver */ -/*****************/ +static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; -/* We support only one of these */ -static struct qman *qm; -#ifndef __rtems__ -static struct device_node *qm_node; -#endif /* __rtems__ */ + return QM_EQCR_SIZE - 1 - eqcr->available; +} -/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used - * during qman_init_ccsr(). */ -#ifndef __rtems__ -static dma_addr_t fqd_a, pfdr_a; -static size_t fqd_sz, pfdr_sz; +/* --- DQRR API --- */ -static int qman_fqd(struct reserved_mem *rmem) +#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry)) +#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT) + +static const struct qm_dqrr_entry *dqrr_carryclear( + const struct qm_dqrr_entry *p) { - fqd_a = rmem->base; - fqd_sz = rmem->size; + uintptr_t addr = (uintptr_t)p; - WARN_ON(!(fqd_a && fqd_sz)); + addr &= ~DQRR_CARRY; - return 0; + return (const struct qm_dqrr_entry *)addr; } -RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); -#else /* __rtems__ */ -static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fqd, 4194304); -#define fqd_a ((uintptr_t)&fqd[0]) -#define fqd_sz sizeof(fqd) -static DPAA_NOCACHENOLOAD_ALIGNED_REGION(pfdr, 33554432); -#define pfdr_a ((uintptr_t)&pfdr[0]) -#define pfdr_sz sizeof(pfdr) -#endif /* __rtems__ */ -size_t qman_fqd_size(void) +static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e) { - return fqd_sz; + return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1); } -#ifndef __rtems__ -static int qman_pfdr(struct reserved_mem *rmem) +static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e) { - pfdr_a = rmem->base; - pfdr_sz = rmem->size; + return dqrr_carryclear(e + 1); +} - WARN_ON(!(pfdr_a && pfdr_sz)); +static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) +{ + qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) | + ((mf & (QM_DQRR_SIZE - 1)) << 20)); +} +static inline int qm_dqrr_init(struct qm_portal *portal, + const struct qm_portal_config *config, + enum qm_dqrr_dmode dmode, + enum qm_dqrr_pmode pmode, + enum qm_dqrr_cmode cmode, u8 max_fill) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + u32 cfg; + + /* Make sure the DQRR will be idle when we enable */ + qm_out(portal, QM_REG_DQRR_SDQCR, 0); + qm_out(portal, QM_REG_DQRR_VDQCR, 0); + qm_out(portal, QM_REG_DQRR_PDQCR, 0); + dqrr->ring = portal->addr.ce + QM_CL_DQRR; + dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); + dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); + dqrr->cursor = dqrr->ring + dqrr->ci; + dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); + dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ? + QM_DQRR_VERB_VBIT : 0; + dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + dqrr->dmode = dmode; + dqrr->pmode = pmode; + dqrr->cmode = cmode; +#endif + /* Invalidate every ring entry before beginning */ + for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) + dpaa_invalidate(qm_cl(dqrr->ring, cfg)); + cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) | + ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ + ((dmode & 1) << 18) | /* DP */ + ((cmode & 3) << 16) | /* DCM */ + 0xa0 | /* RE+SE */ + (0 ? 0x40 : 0) | /* Ignore RP */ + (0 ? 0x10 : 0); /* Ignore SP */ + qm_out(portal, QM_REG_CFG, cfg); + qm_dqrr_set_maxfill(portal, max_fill); return 0; } -RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr); -/* Parse the <name> property to extract the memory location and size and - * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default - * size. Also flush this memory range from data cache so that QMAN originated - * transactions for this memory region could be marked non-coherent. - */ -static __init int parse_mem_property(struct device_node *node, - dma_addr_t *addr, size_t *sz, int zero) +static inline void qm_dqrr_finish(struct qm_portal *portal) { - int ret; +#ifdef CONFIG_FSL_DPAA_CHECKING + struct qm_dqrr *dqrr = &portal->dqrr; + + if (dqrr->cmode != qm_dqrr_cdc && + dqrr->ci != dqrr_ptr2idx(dqrr->cursor)) + pr_crit("Ignoring completed DQRR entries\n"); +#endif +} + +static inline const struct qm_dqrr_entry *qm_dqrr_current( + struct qm_portal *portal) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + + if (!dqrr->fill) + return NULL; + return dqrr->cursor; +} + +static inline u8 qm_dqrr_next(struct qm_portal *portal) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + + DPAA_ASSERT(dqrr->fill); + dqrr->cursor = dqrr_inc(dqrr->cursor); + return --dqrr->fill; +} - /* If using a "zero-pma", don't try to zero it, even if you asked */ - if (zero && of_find_property(node, "zero-pma", &ret)) { - pr_info(" it's a 'zero-pma', not zeroing from s/w\n"); - zero = 0; +static inline void qm_dqrr_pvb_update(struct qm_portal *portal) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); + + DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); +#ifndef CONFIG_FSL_PAMU + /* + * If PAMU is not available we need to invalidate the cache. + * When PAMU is available the cache is updated by stash + */ + dpaa_invalidate_touch_ro(res); +#endif + /* + * when accessing 'verb', use __raw_readb() to ensure that compiler + * inlining doesn't try to optimise out "excess reads". + */ + if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { + dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); + if (!dqrr->pi) + dqrr->vbit ^= QM_DQRR_VERB_VBIT; + dqrr->fill++; } +} + +static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, + const struct qm_dqrr_entry *dq, + int park) +{ + __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; + int idx = dqrr_ptr2idx(dq); + + DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + DPAA_ASSERT((dqrr->ring + idx) == dq); + DPAA_ASSERT(idx < QM_DQRR_SIZE); + qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ + ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ + idx); /* DQRR_DCAP::DCAP_CI */ +} + +static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask) +{ + __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; + + DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ + (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ +} + +static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) +{ + qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr); +} + +static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) +{ + qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr); +} + +static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + qm_out(portal, QM_REG_DQRR_ITR, ithresh); +} + +/* --- MR API --- */ + +#define MR_SHIFT ilog2(sizeof(union qm_mr_entry)) +#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT) + +static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p) +{ + uintptr_t addr = (uintptr_t)p; + + addr &= ~MR_CARRY; + + return (union qm_mr_entry *)addr; +} + +static inline int mr_ptr2idx(const union qm_mr_entry *e) +{ + return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1); +} + +static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e) +{ + return mr_carryclear(e + 1); +} + +static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, + enum qm_mr_cmode cmode) +{ + struct qm_mr *mr = &portal->mr; + u32 cfg; + + mr->ring = portal->addr.ce + QM_CL_MR; + mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1); + mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1); + mr->cursor = mr->ring + mr->ci; + mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); + mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE) + ? QM_MR_VERB_VBIT : 0; + mr->ithresh = qm_in(portal, QM_REG_MR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + mr->pmode = pmode; + mr->cmode = cmode; +#endif + cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) | + ((cmode & 1) << 8); /* QCSP_CFG:MM */ + qm_out(portal, QM_REG_CFG, cfg); + return 0; +} + +static inline void qm_mr_finish(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + if (mr->ci != mr_ptr2idx(mr->cursor)) + pr_crit("Ignoring completed MR entries\n"); +} + +static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + if (!mr->fill) + return NULL; + return mr->cursor; +} - if (zero) { - /* map as cacheable, non-guarded */ - void __iomem *tmpp = ioremap_prot(*addr, *sz, 0); +static inline int qm_mr_next(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; - memset_io(tmpp, 0, *sz); - flush_dcache_range((unsigned long)tmpp, - (unsigned long)tmpp + *sz); - iounmap(tmpp); + DPAA_ASSERT(mr->fill); + mr->cursor = mr_inc(mr->cursor); + return --mr->fill; +} + +static inline void qm_mr_pvb_update(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); + + DPAA_ASSERT(mr->pmode == qm_mr_pvb); + /* + * when accessing 'verb', use __raw_readb() to ensure that compiler + * inlining doesn't try to optimise out "excess reads". + */ + if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { + mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); + if (!mr->pi) + mr->vbit ^= QM_MR_VERB_VBIT; + mr->fill++; + res = mr_inc(res); } + dpaa_invalidate_touch_ro(res); +} + +static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) +{ + struct qm_mr *mr = &portal->mr; + + DPAA_ASSERT(mr->cmode == qm_mr_cci); + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); + qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); +} + +static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + DPAA_ASSERT(mr->cmode == qm_mr_cci); + mr->ci = mr_ptr2idx(mr->cursor); + qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); +} + +static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + qm_out(portal, QM_REG_MR_ITR, ithresh); +} + +/* --- Management command API --- */ +static inline int qm_mc_init(struct qm_portal *portal) +{ + struct qm_mc *mc = &portal->mc; + + mc->cr = portal->addr.ce + QM_CL_CR; + mc->rr = portal->addr.ce + QM_CL_RR0; + mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT) + ? 0 : 1; + mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_idle; +#endif return 0; } -#else /* __rtems__ */ -#include <bsp/qoriq.h> -#endif /* __rtems__ */ -/* TODO: - * - there is obviously no handling of errors, - * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for - * both memory resources to zero. - */ -static int __init fsl_qman_init(struct device_node *node) +static inline void qm_mc_finish(struct qm_portal *portal) +{ +#ifdef CONFIG_FSL_DPAA_CHECKING + struct qm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == qman_mc_idle); + if (mc->state != qman_mc_idle) + pr_crit("Losing incomplete MC command\n"); +#endif +} + +static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal) +{ + struct qm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == qman_mc_idle); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_user; +#endif + dpaa_zero(mc->cr); + return mc->cr; +} + +static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) +{ + struct qm_mc *mc = &portal->mc; + union qm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == qman_mc_user); + dma_wmb(); + mc->cr->_ncw_verb = myverb | mc->vbit; + dpaa_flush(mc->cr); + dpaa_invalidate_touch_ro(rr); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_hw; +#endif +} + +static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) +{ + struct qm_mc *mc = &portal->mc; + union qm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == qman_mc_hw); + /* + * The inactive response register's verb byte always returns zero until + * its command is submitted and completed. This includes the valid-bit, + * in case you were wondering... + */ + if (!__raw_readb(&rr->verb)) { + dpaa_invalidate_touch_ro(rr); + return NULL; + } + mc->rridx ^= 1; + mc->vbit ^= QM_MCC_VERB_VBIT; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_idle; +#endif + return rr; +} + +static inline int qm_mc_result_timeout(struct qm_portal *portal, + union qm_mc_result **mcr) +{ + int timeout = QM_MCR_TIMEOUT; + + do { + *mcr = qm_mc_result(portal); + if (*mcr) + break; + udelay(1); + } while (--timeout); + + return timeout; +} + +static inline void fq_set(struct qman_fq *fq, u32 mask) +{ + set_bits(mask, &fq->flags); +} + +static inline void fq_clear(struct qman_fq *fq, u32 mask) { + clear_bits(mask, &fq->flags); +} + +static inline int fq_isset(struct qman_fq *fq, u32 mask) +{ + return fq->flags & mask; +} + +static inline int fq_isclear(struct qman_fq *fq, u32 mask) +{ + return !(fq->flags & mask); +} + +struct qman_portal { + struct qm_portal p; + /* PORTAL_BITS_*** - dynamic, strictly internal */ + unsigned long bits; + /* interrupt sources processed by portal_isr(), configurable */ + unsigned long irq_sources; + u32 use_eqcr_ci_stashing; + /* only 1 volatile dequeue at a time */ + struct qman_fq *vdqcr_owned; + u32 sdqcr; + /* probing time config params for cpu-affine portals */ + const struct qm_portal_config *config; + /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ + struct qman_cgrs *cgrs; + /* linked-list of CSCN handlers. */ + struct list_head cgr_cbs; + /* list lock */ + spinlock_t cgr_lock; + struct work_struct congestion_work; + struct work_struct mr_work; + char irqname[MAX_IRQNAME]; +}; + #ifndef __rtems__ - struct resource res; +static cpumask_t affine_mask; +static DEFINE_SPINLOCK(affine_mask_lock); +static u16 affine_channels[NR_CPUS]; #endif /* __rtems__ */ - u32 __iomem *regs; +static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); +struct qman_portal *affine_portals[NR_CPUS]; + +static inline struct qman_portal *get_affine_portal(void) +{ + return &get_cpu_var(qman_affine_portal); +} + +static inline void put_affine_portal(void) +{ + put_cpu_var(qman_affine_portal); +} + +static struct workqueue_struct *qm_portal_wq; + +int qman_wq_alloc(void) +{ + qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1); + if (!qm_portal_wq) + return -ENOMEM; + return 0; +} + +/* + * This is what everything can wait on, even if it migrates to a different cpu + * to the one whose affine portal it is waiting on. + */ +static DECLARE_WAIT_QUEUE_HEAD(affine_queue); + +static struct qman_fq **fq_table; +static u32 num_fqids; + +int qman_alloc_fq_table(u32 _num_fqids) +{ + num_fqids = _num_fqids; + + fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *)); + if (!fq_table) + return -ENOMEM; + + pr_debug("Allocated fq lookup table at %p, entry count %u\n", + fq_table, num_fqids * 2); + return 0; +} + +static struct qman_fq *idx_to_fq(u32 idx) +{ + struct qman_fq *fq; + +#ifdef CONFIG_FSL_DPAA_CHECKING + if (WARN_ON(idx >= num_fqids * 2)) + return NULL; +#endif + fq = fq_table[idx]; + DPAA_ASSERT(!fq || idx == fq->idx); + + return fq; +} + +/* + * Only returns full-service fq objects, not enqueue-only + * references (QMAN_FQ_FLAG_NO_MODIFY). + */ +static struct qman_fq *fqid_to_fq(u32 fqid) +{ + return idx_to_fq(fqid * 2); +} + +static struct qman_fq *tag_to_fq(u32 tag) +{ +#if BITS_PER_LONG == 64 + return idx_to_fq(tag); +#else + return (struct qman_fq *)tag; +#endif +} + +static u32 fq_to_tag(struct qman_fq *fq) +{ +#if BITS_PER_LONG == 64 + return fq->idx; +#else + return (u32)fq; +#endif +} + +static u32 __poll_portal_slow(struct qman_portal *p, u32 is); +static inline unsigned int __poll_portal_fast(struct qman_portal *p, + unsigned int poll_limit); +static void qm_congestion_task(struct work_struct *work); +static void qm_mr_process_task(struct work_struct *work); + +static irqreturn_t portal_isr(int irq, void *ptr) +{ + struct qman_portal *p = ptr; + + u32 clear = QM_DQAVAIL_MASK | p->irq_sources; + u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; + + if (unlikely(!is)) + return IRQ_NONE; + + /* DQRR-handling if it's interrupt-driven */ + if (is & QM_PIRQ_DQRI) + __poll_portal_fast(p, QMAN_POLL_LIMIT); + /* Handling of anything else that's interrupt-driven */ + clear |= __poll_portal_slow(p, is); + qm_out(&p->p, QM_REG_ISR, clear); + return IRQ_HANDLED; +} + +static int drain_mr_fqrni(struct qm_portal *p) +{ + const union qm_mr_entry *msg; +loop: + msg = qm_mr_current(p); + if (!msg) { + /* + * if MR was full and h/w had other FQRNI entries to produce, we + * need to allow it time to produce those entries once the + * existing entries are consumed. A worst-case situation + * (fully-loaded system) means h/w sequencers may have to do 3-4 + * other things before servicing the portal's MR pump, each of + * which (if slow) may take ~50 qman cycles (which is ~200 + * processor cycles). So rounding up and then multiplying this + * worst-case estimate by a factor of 10, just to be + * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume + * one entry at a time, so h/w has an opportunity to produce new + * entries well before the ring has been fully consumed, so + * we're being *really* paranoid here. + */ + u64 now, then = jiffies; + + do { + now = jiffies; + } while ((then + 10000) > now); + msg = qm_mr_current(p); + if (!msg) + return 0; + } + if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { + /* We aren't draining anything but FQRNIs */ + pr_err("Found verb 0x%x in MR\n", msg->verb); + return -1; + } + qm_mr_next(p); + qm_mr_cci_consume(p, 1); + goto loop; +} + +static int qman_create_portal(struct qman_portal *portal, + const struct qm_portal_config *c, + const struct qman_cgrs *cgrs) +{ + struct qm_portal *p; + int ret; + u32 isdr; + + p = &portal->p; + +#ifdef CONFIG_FSL_PAMU + /* PAMU is required for stashing */ + portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); +#else + portal->use_eqcr_ci_stashing = 0; +#endif + /* + * prep the low-level portal struct with the mapped addresses from the + * config, everything that follows depends on it and "config" is more + * for (de)reference + */ + p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; + p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; + /* + * If CI-stashing is used, the current defaults use a threshold of 3, + * and stash with high-than-DQRR priority. + */ + if (qm_eqcr_init(p, qm_eqcr_pvb, + portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { + dev_err(c->dev, "EQCR initialisation failed\n"); + goto fail_eqcr; + } + if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, + qm_dqrr_cdc, DQRR_MAXFILL)) { + dev_err(c->dev, "DQRR initialisation failed\n"); + goto fail_dqrr; + } + if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) { + dev_err(c->dev, "MR initialisation failed\n"); + goto fail_mr; + } + if (qm_mc_init(p)) { + dev_err(c->dev, "MC initialisation failed\n"); + goto fail_mc; + } + /* static interrupt-gating controls */ + qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH); + qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH); + qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); + portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); + if (!portal->cgrs) + goto fail_cgrs; + /* initial snapshot is no-depletion */ + qman_cgrs_init(&portal->cgrs[1]); + if (cgrs) + portal->cgrs[0] = *cgrs; + else + /* if the given mask is NULL, assume all CGRs can be seen */ + qman_cgrs_fill(&portal->cgrs[0]); + INIT_LIST_HEAD(&portal->cgr_cbs); + spin_lock_init(&portal->cgr_lock); + INIT_WORK(&portal->congestion_work, qm_congestion_task); + INIT_WORK(&portal->mr_work, qm_mr_process_task); + portal->bits = 0; + portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | + QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | + QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; + isdr = 0xffffffff; + qm_out(p, QM_REG_ISDR, isdr); + portal->irq_sources = 0; + qm_out(p, QM_REG_IER, 0); + qm_out(p, QM_REG_ISR, 0xffffffff); + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); + if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { + dev_err(c->dev, "request_irq() failed\n"); + goto fail_irq; + } #ifndef __rtems__ - const char *s; - int ret, standby = 0; + if (c->cpu != -1 && irq_can_set_affinity(c->irq) && + irq_set_affinity(c->irq, cpumask_of(c->cpu))) { + dev_err(c->dev, "irq_set_affinity() failed\n"); + goto fail_affinity; + } #endif /* __rtems__ */ - u16 id; - u8 major, minor; -#ifndef __rtems__ - ret = of_address_to_resource(node, 0, &res); + /* Need EQCR to be empty before continuing */ + isdr &= ~QM_PIRQ_EQCI; + qm_out(p, QM_REG_ISDR, isdr); + ret = qm_eqcr_get_fill(p); if (ret) { - pr_err("Can't get %s property 'reg'\n", node->full_name); - return ret; + dev_err(c->dev, "EQCR unclean\n"); + goto fail_eqcr_empty; } - s = of_get_property(node, "fsl,hv-claimable", &ret); - if (s && !strcmp(s, "standby")) - standby = 1; - if (!standby) { - ret = parse_mem_property(node, &fqd_a, &fqd_sz, 1); - BUG_ON(ret); - ret = parse_mem_property(node, &pfdr_a, &pfdr_sz, 0); - BUG_ON(ret); + isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); + qm_out(p, QM_REG_ISDR, isdr); + if (qm_dqrr_current(p)) { + dev_err(c->dev, "DQRR unclean\n"); + qm_dqrr_cdc_consume_n(p, 0xffff); } -#else /* __rtems__ */ - memset((void *)fqd_a, 0, fqd_sz); + if (qm_mr_current(p) && drain_mr_fqrni(p)) { + /* special handling, drain just in case it's a few FQRNIs */ + const union qm_mr_entry *e = qm_mr_current(p); + + dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n", + e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd)); + goto fail_dqrr_mr_empty; + } + /* Success */ + portal->config = c; + qm_out(p, QM_REG_ISDR, 0); + qm_out(p, QM_REG_IIR, 0); + /* Write a sane SDQCR */ + qm_dqrr_sdqcr_set(p, portal->sdqcr); + return 0; + +fail_dqrr_mr_empty: +fail_eqcr_empty: +#ifndef __rtems__ +fail_affinity: #endif /* __rtems__ */ - /* Global configuration */ + free_irq(c->irq, portal); +fail_irq: + kfree(portal->cgrs); +fail_cgrs: + qm_mc_finish(p); +fail_mc: + qm_mr_finish(p); +fail_mr: + qm_dqrr_finish(p); +fail_dqrr: + qm_eqcr_finish(p); +fail_eqcr: + return -EIO; +} + +struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, + const struct qman_cgrs *cgrs) +{ + struct qman_portal *portal; + int err; + + portal = &per_cpu(qman_affine_portal, c->cpu); + err = qman_create_portal(portal, c, cgrs); + if (err) + return NULL; + #ifndef __rtems__ - regs = ioremap(res.start, res.end - res.start + 1); -#else /* __rtems__ */ - regs = (u32 __iomem *)&qoriq.qman; + spin_lock(&affine_mask_lock); + cpumask_set_cpu(c->cpu, &affine_mask); + affine_channels[c->cpu] = c->channel; #endif /* __rtems__ */ - qm = qm_create(regs); + affine_portals[c->cpu] = portal; #ifndef __rtems__ - qm_node = node; + spin_unlock(&affine_mask_lock); #endif /* __rtems__ */ - qm_get_version(qm, &id, &major, &minor); - pr_info("Ver: %04x,%02x,%02x\n", id, major, minor); - if (!qman_ip_rev) { - if ((major == 1) && (minor == 0)) { - pr_err("Rev1.0 on P4080 rev1 is not supported!\n"); + + return portal; +} + +static void qman_destroy_portal(struct qman_portal *qm) +{ + const struct qm_portal_config *pcfg; + + /* Stop dequeues on the portal */ + qm_dqrr_sdqcr_set(&qm->p, 0); + + /* + * NB we do this to "quiesce" EQCR. If we add enqueue-completions or + * something related to QM_PIRQ_EQCI, this may need fixing. + * Also, due to the prefetching model used for CI updates in the enqueue + * path, this update will only invalidate the CI cacheline *after* + * working on it, so we need to call this twice to ensure a full update + * irrespective of where the enqueue processing was at when the teardown + * began. + */ + qm_eqcr_cce_update(&qm->p); + qm_eqcr_cce_update(&qm->p); + pcfg = qm->config; + + free_irq(pcfg->irq, qm); + + kfree(qm->cgrs); + qm_mc_finish(&qm->p); + qm_mr_finish(&qm->p); + qm_dqrr_finish(&qm->p); + qm_eqcr_finish(&qm->p); + + qm->config = NULL; +} + +const struct qm_portal_config *qman_destroy_affine_portal(void) +{ + struct qman_portal *qm = get_affine_portal(); + const struct qm_portal_config *pcfg; + int cpu; + + pcfg = qm->config; + cpu = pcfg->cpu; + + qman_destroy_portal(qm); + #ifndef __rtems__ - iounmap(regs); + spin_lock(&affine_mask_lock); + cpumask_clear_cpu(cpu, &affine_mask); + spin_unlock(&affine_mask_lock); +#else /* __rtems__ */ + (void)cpu; #endif /* __rtems__ */ - return -ENODEV; - } else if ((major == 1) && (minor == 1)) - qman_ip_rev = QMAN_REV11; - else if ((major == 1) && (minor == 2)) - qman_ip_rev = QMAN_REV12; - else if ((major == 2) && (minor == 0)) - qman_ip_rev = QMAN_REV20; - else if ((major == 3) && (minor == 0)) - qman_ip_rev = QMAN_REV30; - else if ((major == 3) && (minor == 1)) - qman_ip_rev = QMAN_REV31; - else { - pr_warn("Unknown version, default to rev1.1\n"); - qman_ip_rev = QMAN_REV11; - } + put_affine_portal(); + return pcfg; +} + +/* Inline helper to reduce nesting in __poll_portal_slow() */ +static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, + const union qm_mr_entry *msg, u8 verb) +{ + switch (verb) { + case QM_MR_VERB_FQRL: + DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); + fq_clear(fq, QMAN_FQ_STATE_ORL); + break; + case QM_MR_VERB_FQRN: + DPAA_ASSERT(fq->state == qman_fq_state_parked || + fq->state == qman_fq_state_sched); + DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); + fq_clear(fq, QMAN_FQ_STATE_CHANGING); + if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) + fq_set(fq, QMAN_FQ_STATE_NE); + if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) + fq_set(fq, QMAN_FQ_STATE_ORL); + fq->state = qman_fq_state_retired; + break; + case QM_MR_VERB_FQPN: + DPAA_ASSERT(fq->state == qman_fq_state_sched); + DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); + fq->state = qman_fq_state_parked; } +} -#ifndef __rtems__ - if (standby) { - pr_info(" -> in standby mode\n"); - return 0; +static void qm_congestion_task(struct work_struct *work) +{ + struct qman_portal *p = container_of(work, struct qman_portal, + congestion_work); + struct qman_cgrs rr, c; + union qm_mc_result *mcr; + struct qman_cgr *cgr; + + spin_lock(&p->cgr_lock); + qm_mc_start(&p->p); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + spin_unlock(&p->cgr_lock); + dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); + return; } -#endif /* __rtems__ */ - return 0; + /* mask out the ones I'm not interested in */ + qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state, + &p->cgrs[0]); + /* check previous snapshot for delta, enter/exit congestion */ + qman_cgrs_xor(&c, &rr, &p->cgrs[1]); + /* update snapshot */ + qman_cgrs_cp(&p->cgrs[1], &rr); + /* Invoke callback */ + list_for_each_entry(cgr, &p->cgr_cbs, node) + if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) + cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); + spin_unlock(&p->cgr_lock); } -int qman_have_ccsr(void) +static void qm_mr_process_task(struct work_struct *work) { - return qm ? 1 : 0; + struct qman_portal *p = container_of(work, struct qman_portal, + mr_work); + const union qm_mr_entry *msg; + struct qman_fq *fq; + u8 verb, num = 0; + + preempt_disable(); + + while (1) { + qm_mr_pvb_update(&p->p); + msg = qm_mr_current(&p->p); + if (!msg) + break; + + verb = msg->verb & QM_MR_VERB_TYPE_MASK; + /* The message is a software ERN iff the 0x20 bit is clear */ + if (verb & 0x20) { + switch (verb) { + case QM_MR_VERB_FQRNI: + /* nada, we drop FQRNIs on the floor */ + break; + case QM_MR_VERB_FQRN: + case QM_MR_VERB_FQRL: + /* Lookup in the retirement table */ + fq = fqid_to_fq(qm_fqid_get(&msg->fq)); + if (WARN_ON(!fq)) + break; + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, msg); + break; + case QM_MR_VERB_FQPN: + /* Parked */ + fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, msg); + break; + case QM_MR_VERB_DC_ERN: + /* DCP ERN */ + pr_crit_once("Leaking DCP ERNs!\n"); + break; + default: + pr_crit("Invalid MR verb 0x%02x\n", verb); + } + } else { + /* Its a software ERN */ + fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); + fq->cb.ern(p, fq, msg); + } + num++; + qm_mr_next(&p->p); + } + + qm_mr_cci_consume(&p->p, num); + preempt_enable(); } -#ifndef __rtems__ -__init void qman_init_early(void) +static u32 __poll_portal_slow(struct qman_portal *p, u32 is) { - struct device_node *dn; - int ret; + if (is & QM_PIRQ_CSCI) { + queue_work_on(smp_processor_id(), qm_portal_wq, + &p->congestion_work); + } - for_each_compatible_node(dn, NULL, "fsl,qman") { - if (qm) - pr_err("%s: only one 'fsl,qman' allowed\n", - dn->full_name); - else { - if (!of_device_is_available(dn)) - continue; + if (is & QM_PIRQ_EQRI) { + qm_eqcr_cce_update(&p->p); + qm_eqcr_set_ithresh(&p->p, 0); + wake_up(&affine_queue); + } - ret = fsl_qman_init(dn); - BUG_ON(ret); - } + if (is & QM_PIRQ_MRI) { + queue_work_on(smp_processor_id(), qm_portal_wq, + &p->mr_work); } + + return is; } -#else /* __rtems__ */ -#include <bsp/fdt.h> -static void -qman_sysinit(void) +/* + * remove some slowish-path stuff from the "fast path" and make sure it isn't + * inlined. + */ +static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) { - const char *fdt = bsp_fdt_get(); - struct device_node dn; - const char *name; - int node; - int ret; + p->vdqcr_owned = NULL; + fq_clear(fq, QMAN_FQ_STATE_VDQCR); + wake_up(&affine_queue); +} + +/* + * The only states that would conflict with other things if they ran at the + * same time on the same cpu are: + * + * (i) setting/clearing vdqcr_owned, and + * (ii) clearing the NE (Not Empty) flag. + * + * Both are safe. Because; + * + * (i) this clearing can only occur after qman_volatile_dequeue() has set the + * vdqcr_owned field (which it does before setting VDQCR), and + * qman_volatile_dequeue() blocks interrupts and preemption while this is + * done so that we can't interfere. + * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as + * with (i) that API prevents us from interfering until it's safe. + * + * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far + * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett + * advantage comes from this function not having to "lock" anything at all. + * + * Note also that the callbacks are invoked at points which are safe against the + * above potential conflicts, but that this function itself is not re-entrant + * (this is because the function tracks one end of each FIFO in the portal and + * we do *not* want to lock that). So the consequence is that it is safe for + * user callbacks to call into any QMan API. + */ +static inline unsigned int __poll_portal_fast(struct qman_portal *p, + unsigned int poll_limit) +{ + const struct qm_dqrr_entry *dq; + struct qman_fq *fq; + enum qman_cb_dqrr_result res; + unsigned int limit = 0; - memset(&dn, 0, sizeof(dn)); + do { + qm_dqrr_pvb_update(&p->p); + dq = qm_dqrr_current(&p->p); + if (!dq) + break; + + if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { + /* + * VDQCR: don't trust context_b as the FQ may have + * been configured for h/w consumption and we're + * draining it post-retirement. + */ + fq = p->vdqcr_owned; + /* + * We only set QMAN_FQ_STATE_NE when retiring, so we + * only need to check for clearing it when doing + * volatile dequeues. It's one less thing to check + * in the critical path (SDQCR). + */ + if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) + fq_clear(fq, QMAN_FQ_STATE_NE); + /* + * This is duplicated from the SDQCR code, but we + * have stuff to do before *and* after this callback, + * and we don't want multiple if()s in the critical + * path (SDQCR). + */ + res = fq->cb.dqrr(p, fq, dq); + if (res == qman_cb_dqrr_stop) + break; + /* Check for VDQCR completion */ + if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) + clear_vdqcr(p, fq); + } else { + /* SDQCR: context_b points to the FQ */ + fq = tag_to_fq(be32_to_cpu(dq->context_b)); + /* Now let the callback do its stuff */ + res = fq->cb.dqrr(p, fq, dq); + /* + * The callback can request that we exit without + * consuming this entry nor advancing; + */ + if (res == qman_cb_dqrr_stop) + break; + } + /* Interpret 'dq' from a driver perspective. */ + /* + * Parking isn't possible unless HELDACTIVE was set. NB, + * FORCEELIGIBLE implies HELDACTIVE, so we only need to + * check for HELDACTIVE to cover both. + */ + DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || + (res != qman_cb_dqrr_park)); + /* just means "skip it, I'll consume it myself later on" */ + if (res != qman_cb_dqrr_defer) + qm_dqrr_cdc_consume_1ptr(&p->p, dq, + res == qman_cb_dqrr_park); + /* Move forward */ + qm_dqrr_next(&p->p); + /* + * Entry processed and consumed, increment our counter. The + * callback can request that we exit after consuming the + * entry, and we also exit if we reach our processing limit, + * so loop back only if neither of these conditions is met. + */ + } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); + + return limit; +} - name = "fsl,qman"; - node = fdt_node_offset_by_compatible(fdt, 0, name); - if (node < 0) - panic("qman: no qman in FDT"); +void qman_p_irqsource_add(struct qman_portal *p, u32 bits) +{ + unsigned long irqflags; - dn.full_name = name; - dn.offset = node; + local_irq_save(irqflags); + set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); + qm_out(&p->p, QM_REG_IER, p->irq_sources); + local_irq_restore(irqflags); +} +EXPORT_SYMBOL(qman_p_irqsource_add); - ret = fsl_qman_init(&dn); - if (ret != 0) - panic("qman: init 1 failed"); +void qman_p_irqsource_remove(struct qman_portal *p, u32 bits) +{ + unsigned long irqflags; + u32 ier; + + /* + * Our interrupt handler only processes+clears status register bits that + * are in p->irq_sources. As we're trimming that mask, if one of them + * were to assert in the status register just before we remove it from + * the enable register, there would be an interrupt-storm when we + * release the IRQ lock. So we wait for the enable register update to + * take effect in h/w (by reading it back) and then clear all other bits + * in the status register. Ie. we clear them from ISR once it's certain + * IER won't allow them to reassert. + */ + local_irq_save(irqflags); + bits &= QM_PIRQ_VISIBLE; + clear_bits(bits, &p->irq_sources); + qm_out(&p->p, QM_REG_IER, p->irq_sources); + ier = qm_in(&p->p, QM_REG_IER); + /* + * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a + * data-dependency, ie. to protect against re-ordering. + */ + qm_out(&p->p, QM_REG_ISR, ~ier); + local_irq_restore(irqflags); +} +EXPORT_SYMBOL(qman_p_irqsource_remove); - ret = qman_init_ccsr(&dn); - if (ret != 0) - panic("qman: init CCSR failed"); +#ifndef __rtems__ +const cpumask_t *qman_affine_cpus(void) +{ + return &affine_mask; +} +EXPORT_SYMBOL(qman_affine_cpus); - ret = qman_init(&dn); - if (ret != 0) - panic("qman: init 2 failed"); +u16 qman_affine_channel(int cpu) +{ + if (cpu < 0) { + struct qman_portal *portal = get_affine_portal(); - ret = qman_resource_init(); - if (ret != 0) - panic("qman: resource init failed"); + cpu = portal->config->cpu; + put_affine_portal(); + } + WARN_ON(!cpumask_test_cpu(cpu, &affine_mask)); + return affine_channels[cpu]; } -SYSINIT(qman_sysinit, SI_SUB_CPU, SI_ORDER_SECOND, qman_sysinit, NULL); +EXPORT_SYMBOL(qman_affine_channel); #endif /* __rtems__ */ -static void log_edata_bits(u32 bit_count) +struct qman_portal *qman_get_affine_portal(int cpu) +{ + return affine_portals[cpu]; +} +EXPORT_SYMBOL(qman_get_affine_portal); + +int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) { - u32 i, j, mask = 0xffffffff; + return __poll_portal_fast(p, limit); +} +EXPORT_SYMBOL(qman_p_poll_dqrr); - pr_warn("ErrInt, EDATA:\n"); - i = bit_count/32; - if (bit_count%32) { - i++; - mask = ~(mask << bit_count%32); +void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) +{ + unsigned long irqflags; + + local_irq_save(irqflags); + pools &= p->config->pools; + p->sdqcr |= pools; + qm_dqrr_sdqcr_set(&p->p, p->sdqcr); + local_irq_restore(irqflags); +} +EXPORT_SYMBOL(qman_p_static_dequeue_add); + +/* Frame queue API */ + +static const char *mcr_result_str(u8 result) +{ + switch (result) { + case QM_MCR_RESULT_NULL: + return "QM_MCR_RESULT_NULL"; + case QM_MCR_RESULT_OK: + return "QM_MCR_RESULT_OK"; + case QM_MCR_RESULT_ERR_FQID: + return "QM_MCR_RESULT_ERR_FQID"; + case QM_MCR_RESULT_ERR_FQSTATE: + return "QM_MCR_RESULT_ERR_FQSTATE"; + case QM_MCR_RESULT_ERR_NOTEMPTY: + return "QM_MCR_RESULT_ERR_NOTEMPTY"; + case QM_MCR_RESULT_PENDING: + return "QM_MCR_RESULT_PENDING"; + case QM_MCR_RESULT_ERR_BADCOMMAND: + return "QM_MCR_RESULT_ERR_BADCOMMAND"; } - j = 16-i; - pr_warn(" 0x%08x\n", qm_in(EDATA(j)) & mask); - j++; - for (; j < 16; j++) - pr_warn(" 0x%08x\n", qm_in(EDATA(j))); + return "<unknown MCR result>"; } -static void log_additional_error_info(u32 isr_val, u32 ecsr_val) +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) { - union qman_ecir ecir_val; - union qman_eadr eadr_val; + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { + int ret = qman_alloc_fqid(&fqid); + + if (ret) + return ret; + } + fq->fqid = fqid; + fq->flags = flags; + fq->state = qman_fq_state_oos; + fq->cgr_groupid = 0; + + /* A context_b of 0 is allegedly special, so don't use that fqid */ + if (fqid == 0 || fqid >= num_fqids) { + WARN(1, "bad fqid %d\n", fqid); + return -EINVAL; + } - ecir_val.ecir_raw = qm_in(ECIR); - /* Is portal info valid */ - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { - union qman_ecir2 ecir2_val; + fq->idx = fqid * 2; + if (flags & QMAN_FQ_FLAG_NO_MODIFY) + fq->idx++; - ecir2_val.ecir2_raw = qm_in(ECIR2); - if (ecsr_val & PORTAL_ECSR_ERR) { - pr_warn("ErrInt: %s id %d\n", - ecir2_val.info.portal_type ? "DCP" : "SWP", - ecir2_val.info.portal_num); - } - if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) - pr_warn("ErrInt: ecir.fqid 0x%x\n", ecir_val.info.fqid); - - if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { - eadr_val.eadr_raw = qm_in(EADR); - pr_warn("ErrInt: EADR Memory: %s, 0x%x\n", - error_mdata[eadr_val.info_rev3.memid].txt, - error_mdata[eadr_val.info_rev3.memid].addr_mask - & eadr_val.info_rev3.eadr); - log_edata_bits( - error_mdata[eadr_val.info_rev3.memid].bits); - } - } else { - if (ecsr_val & PORTAL_ECSR_ERR) { - pr_warn("ErrInt: %s id %d\n", - ecir_val.info.portal_type ? "DCP" : "SWP", - ecir_val.info.portal_num); - } - if (ecsr_val & FQID_ECSR_ERR) - pr_warn("ErrInt: ecir.fqid 0x%x\n", ecir_val.info.fqid); - - if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { - eadr_val.eadr_raw = qm_in(EADR); - pr_warn("ErrInt: EADR Memory: %s, 0x%x\n", - error_mdata[eadr_val.info.memid].txt, - error_mdata[eadr_val.info.memid].addr_mask - & eadr_val.info.eadr); - log_edata_bits(error_mdata[eadr_val.info.memid].bits); - } + WARN_ON(fq_table[fq->idx]); + fq_table[fq->idx] = fq; + + return 0; +} +EXPORT_SYMBOL(qman_create_fq); + +void qman_destroy_fq(struct qman_fq *fq) +{ + /* + * We don't need to lock the FQ as it is a pre-condition that the FQ be + * quiesced. Instead, run some checks. + */ + switch (fq->state) { + case qman_fq_state_parked: + case qman_fq_state_oos: + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) + qman_release_fqid(fq->fqid); + + DPAA_ASSERT(fq_table[fq->idx]); + fq_table[fq->idx] = NULL; + return; + default: + break; } + DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); } +EXPORT_SYMBOL(qman_destroy_fq); -/* QMan interrupt handler */ -static irqreturn_t qman_isr(int irq, void *ptr) +u32 qman_fq_fqid(struct qman_fq *fq) { - u32 isr_val, ier_val, ecsr_val, isr_mask, i; + return fq->fqid; +} +EXPORT_SYMBOL(qman_fq_fqid); - ier_val = qm_err_isr_enable_read(qm); - isr_val = qm_err_isr_status_read(qm); - ecsr_val = qm_in(ECSR); - isr_mask = isr_val & ier_val; +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + u8 res, myverb; + int ret = 0; - if (!isr_mask) - return IRQ_NONE; - for (i = 0; i < QMAN_HWE_COUNT; i++) { - if (qman_hwerr_txts[i].mask & isr_mask) { - pr_warn("ErrInt: %s\n", qman_hwerr_txts[i].txt); - if (qman_hwerr_txts[i].mask & ecsr_val) { - log_additional_error_info(isr_mask, ecsr_val); - /* Re-arm error capture registers */ - qm_out(ECSR, ecsr_val); - } - if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) { - pr_devel("Un-enabling error 0x%x\n", - qman_hwerr_txts[i].mask); - ier_val &= ~qman_hwerr_txts[i].mask; - qm_err_isr_enable_write(qm, ier_val); + myverb = (flags & QMAN_INITFQ_FLAG_SCHED) + ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; + + if (fq->state != qman_fq_state_oos && + fq->state != qman_fq_state_parked) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { + /* And can't be set at the same time as TDTHRESH */ + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) + return -EINVAL; + } + /* Issue an INITFQ_[PARKED|SCHED] management command */ + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || + (fq->state != qman_fq_state_oos && + fq->state != qman_fq_state_parked)) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + if (opts) + mcc->initfq = *opts; + qm_fqid_set(&mcc->fq, fq->fqid); + mcc->initfq.count = 0; + /* + * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a + * demux pointer. Otherwise, the caller-provided value is allowed to + * stand, don't overwrite it. + */ + if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { + dma_addr_t phys_fq; + + mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB); + mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); + /* + * and the physical address - NB, if the user wasn't trying to + * set CONTEXTA, clear the stashing settings. + */ + if (!(be16_to_cpu(mcc->initfq.we_mask) & + QM_INITFQ_WE_CONTEXTA)) { + mcc->initfq.we_mask |= + cpu_to_be16(QM_INITFQ_WE_CONTEXTA); + memset(&mcc->initfq.fqd.context_a, 0, + sizeof(mcc->initfq.fqd.context_a)); + } else { +#ifndef __rtems__ + struct qman_portal *p = qman_dma_portal; + + phys_fq = dma_map_single(p->config->dev, fq, + sizeof(*fq), DMA_TO_DEVICE); + if (dma_mapping_error(p->config->dev, phys_fq)) { + dev_err(p->config->dev, "dma_mapping failed\n"); + ret = -EIO; + goto out; } +#else /* __rtems__ */ + phys_fq = (dma_addr_t)fq; +#endif /* __rtems__ */ + + qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); } } - qm_err_isr_status_clear(qm, isr_val); - return IRQ_HANDLED; + if (flags & QMAN_INITFQ_FLAG_LOCAL) { + int wq = 0; + + if (!(be16_to_cpu(mcc->initfq.we_mask) & + QM_INITFQ_WE_DESTWQ)) { + mcc->initfq.we_mask |= + cpu_to_be16(QM_INITFQ_WE_DESTWQ); + wq = 4; + } + qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); + } + qm_mc_commit(&p->p, myverb); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(p->config->dev, "MCR timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + if (opts) { + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) { + if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE) + fq_set(fq, QMAN_FQ_STATE_CGR_EN); + else + fq_clear(fq, QMAN_FQ_STATE_CGR_EN); + } + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID) + fq->cgr_groupid = opts->fqd.cgid; + } + fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? + qman_fq_state_sched : qman_fq_state_parked; + +out: + put_affine_portal(); + return ret; } +EXPORT_SYMBOL(qman_init_fq); -static int __bind_irq(struct device_node *node) +int qman_schedule_fq(struct qman_fq *fq) { - int ret, err_irq; + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + int ret = 0; - err_irq = of_irq_to_resource(node, 0, NULL); - if (err_irq == NO_IRQ) { - pr_info("Can't get %s property 'interrupts'\n", - node->full_name); - return -ENODEV; + if (fq->state != qman_fq_state_parked) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + /* Issue a ALTERFQ_SCHED management command */ + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || + fq->state != qman_fq_state_parked) { + ret = -EBUSY; + goto out; } - ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", node); - if (ret) { - pr_err("request_irq() failed %d for '%s'\n", - ret, node->full_name); - return -ENODEV; + mcc = qm_mc_start(&p->p); + qm_fqid_set(&mcc->fq, fq->fqid); + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(p->config->dev, "ALTER_SCHED timeout\n"); + ret = -ETIMEDOUT; + goto out; } - /* Write-to-clear any stale bits, (eg. starvation being asserted prior - * to resource allocation during driver init). */ - qm_err_isr_status_clear(qm, 0xffffffff); - /* Enable Error Interrupts */ - qm_err_isr_enable_write(qm, 0xffffffff); - return 0; + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); + if (mcr->result != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + fq->state = qman_fq_state_sched; +out: + put_affine_portal(); + return ret; } +EXPORT_SYMBOL(qman_schedule_fq); -int qman_init_ccsr(struct device_node *node) +int qman_retire_fq(struct qman_fq *fq, u32 *flags) { + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; int ret; + u8 res; -#ifndef __rtems__ - if (!qman_have_ccsr()) - return 0; - if (node != qm_node) + if (fq->state != qman_fq_state_parked && + fq->state != qman_fq_state_sched) return -EINVAL; -#endif /* __rtems__ */ - /* FQD memory */ - qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz); - /* PFDR memory */ - qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz); - qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8); - /* thresholds */ - qm_set_pfdr_threshold(qm, 512, 64); - qm_set_sfdr_threshold(qm, 128); - /* clear stale PEBI bit from interrupt status register */ - qm_err_isr_status_clear(qm, QM_EIRQ_PEBI); - /* corenet initiator settings */ - qm_set_corenet_initiator(qm); - /* HID settings */ - qm_set_hid(qm); - /* Set scheduling weights to defaults */ - for (ret = qm_wq_first; ret <= qm_wq_last; ret++) - qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0); - /* We are not prepared to accept ERNs for hardware enqueues */ - qm_set_dc(qm, qm_dc_portal_fman0, 1, 0); - qm_set_dc(qm, qm_dc_portal_fman1, 1, 0); - /* Initialise Error Interrupt Handler */ - ret = __bind_irq(node); - if (ret) - return ret; - return 0; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || + fq->state == qman_fq_state_retired || + fq->state == qman_fq_state_oos) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + qm_fqid_set(&mcc->fq, fq->fqid); + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); + res = mcr->result; + /* + * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, + * and defer the flags until FQRNI or FQRN (respectively) show up. But + * "Friendly" is to process OK immediately, and not set CHANGING. We do + * friendly, otherwise the caller doesn't necessarily have a fully + * "retired" FQ on return even if the retirement was immediate. However + * this does mean some code duplication between here and + * fq_state_change(). + */ + if (res == QM_MCR_RESULT_OK) { + ret = 0; + /* Process 'fq' right away, we'll ignore FQRNI */ + if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) + fq_set(fq, QMAN_FQ_STATE_NE); + if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) + fq_set(fq, QMAN_FQ_STATE_ORL); + if (flags) + *flags = fq->flags; + fq->state = qman_fq_state_retired; + if (fq->cb.fqs) { + /* + * Another issue with supporting "immediate" retirement + * is that we're forced to drop FQRNIs, because by the + * time they're seen it may already be "too late" (the + * fq may have been OOS'd and free()'d already). But if + * the upper layer wants a callback whether it's + * immediate or not, we have to fake a "MR" entry to + * look like an FQRNI... + */ + union qm_mr_entry msg; + + msg.verb = QM_MR_VERB_FQRNI; + msg.fq.fqs = mcr->alterfq.fqs; + qm_fqid_set(&msg.fq, fq->fqid); + msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); + fq->cb.fqs(p, fq, &msg); + } + } else if (res == QM_MCR_RESULT_PENDING) { + ret = 1; + fq_set(fq, QMAN_FQ_STATE_CHANGING); + } else { + ret = -EIO; + } +out: + put_affine_portal(); + return ret; } +EXPORT_SYMBOL(qman_retire_fq); -#define LIO_CFG_LIODN_MASK 0x0fff0000 -void qman_liodn_fixup(u16 channel) +int qman_oos_fq(struct qman_fq *fq) { - static int done; - static u32 liodn_offset; - u32 before, after; - int idx = channel - QM_CHANNEL_SWPORTAL0; + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + int ret = 0; - if (!qman_have_ccsr()) - return; - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - before = qm_in(REV3_QCSP_LIO_CFG(idx)); - else - before = qm_in(QCSP_LIO_CFG(idx)); - if (!done) { - liodn_offset = before & LIO_CFG_LIODN_MASK; - done = 1; - return; + if (fq->state != qman_fq_state_retired) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || + fq->state != qman_fq_state_retired) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + qm_fqid_set(&mcc->fq, fq->fqid); + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; } - after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset; - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - qm_out(REV3_QCSP_LIO_CFG(idx), after); + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); + if (mcr->result != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + fq->state = qman_fq_state_oos; +out: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_oos_fq); + +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + int ret = 0; + + mcc = qm_mc_start(&p->p); + qm_fqid_set(&mcc->fq, fq->fqid); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); + if (mcr->result == QM_MCR_RESULT_OK) + *fqd = mcr->queryfq.fqd; else - qm_out(QCSP_LIO_CFG(idx), after); + ret = -EIO; +out: + put_affine_portal(); + return ret; } -#define IO_CFG_SDEST_MASK 0x00ff0000 -int qman_set_sdest(u16 channel, unsigned int cpu_idx) +static int qman_query_fq_np(struct qman_fq *fq, + struct qm_mcr_queryfq_np *np) { - int idx = channel - QM_CHANNEL_SWPORTAL0; - u32 before, after; + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + int ret = 0; + + mcc = qm_mc_start(&p->p); + qm_fqid_set(&mcc->fq, fq->fqid); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } - if (!qman_have_ccsr()) - return -ENODEV; + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); + if (mcr->result == QM_MCR_RESULT_OK) + *np = mcr->queryfq_np; + else if (mcr->result == QM_MCR_RESULT_ERR_FQID) + ret = -ERANGE; + else + ret = -EIO; +out: + put_affine_portal(); + return ret; +} - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { - before = qm_in(REV3_QCSP_IO_CFG(idx)); - /* Each pair of vcpu share the same SRQ(SDEST) */ - cpu_idx /= 2; - after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); - qm_out(REV3_QCSP_IO_CFG(idx), after); - } else { - before = qm_in(QCSP_IO_CFG(idx)); - after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); - qm_out(QCSP_IO_CFG(idx), after); +static int qman_query_cgr(struct qman_cgr *cgr, + struct qm_mcr_querycgr *cgrd) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + int ret = 0; + + mcc = qm_mc_start(&p->p); + mcc->cgr.cgid = cgr->cgrid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; } + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); + if (mcr->result == QM_MCR_RESULT_OK) + *cgrd = mcr->querycgr; + else { + dev_err(p->config->dev, "QUERY_CGR failed: %s\n", + mcr_result_str(mcr->result)); + ret = -EIO; + } +out: + put_affine_portal(); + return ret; +} + +int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result) +{ + struct qm_mcr_querycgr query_cgr; + int err; + + err = qman_query_cgr(cgr, &query_cgr); + if (err) + return err; + + *result = !!query_cgr.cgr.cs; return 0; } +EXPORT_SYMBOL(qman_query_cgr_congested); + +/* internal function used as a wait_event() expression */ +static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) +{ + unsigned long irqflags; + int ret = -EBUSY; + + local_irq_save(irqflags); + if (p->vdqcr_owned) + goto out; + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) + goto out; + + fq_set(fq, QMAN_FQ_STATE_VDQCR); + p->vdqcr_owned = fq; + qm_dqrr_vdqcr_set(&p->p, vdqcr); + ret = 0; +out: + local_irq_restore(irqflags); + return ret; +} + +static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) +{ + int ret; + + *p = get_affine_portal(); + ret = set_p_vdqcr(*p, fq, vdqcr); + put_affine_portal(); + return ret; +} -#define MISC_CFG_WPM_MASK 0x00000002 -int qm_set_wpm(int wpm) +static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, + u32 vdqcr, u32 flags) { - u32 before; - u32 after; + int ret = 0; - if (!qman_have_ccsr()) - return -ENODEV; + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) + ret = wait_event_interruptible(affine_queue, + !set_vdqcr(p, fq, vdqcr)); + else + wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); + return ret; +} + +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) +{ + struct qman_portal *p; + int ret; - before = qm_in(MISC_CFG); - after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1); - qm_out(MISC_CFG, after); + if (fq->state != qman_fq_state_parked && + fq->state != qman_fq_state_retired) + return -EINVAL; + if (vdqcr & QM_VDQCR_FQID_MASK) + return -EINVAL; + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) + return -EBUSY; + vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; + if (flags & QMAN_VOLATILE_FLAG_WAIT) + ret = wait_vdqcr_start(&p, fq, vdqcr, flags); + else + ret = set_vdqcr(&p, fq, vdqcr); + if (ret) + return ret; + /* VDQCR is set */ + if (flags & QMAN_VOLATILE_FLAG_FINISH) { + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) + /* + * NB: don't propagate any error - the caller wouldn't + * know whether the VDQCR was issued or not. A signal + * could arrive after returning anyway, so the caller + * can check signal_pending() if that's an issue. + */ + wait_event_interruptible(affine_queue, + !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + else + wait_event(affine_queue, + !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + } return 0; } +EXPORT_SYMBOL(qman_volatile_dequeue); + +static void update_eqcr_ci(struct qman_portal *p, u8 avail) +{ + if (avail) + qm_eqcr_cce_prefetch(&p->p); + else + qm_eqcr_cce_update(&p->p); +} -int qm_get_wpm(int *wpm) +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) { - u32 before; + struct qman_portal *p; + struct qm_eqcr_entry *eq; + unsigned long irqflags; + u8 avail; + + p = get_affine_portal(); + local_irq_save(irqflags); + + if (p->use_eqcr_ci_stashing) { + /* + * The stashing case is easy, only update if we need to in + * order to try and liberate ring entries. + */ + eq = qm_eqcr_start_stash(&p->p); + } else { + /* + * The non-stashing case is harder, need to prefetch ahead of + * time. + */ + avail = qm_eqcr_get_avail(&p->p); + if (avail < 2) + update_eqcr_ci(p, avail); + eq = qm_eqcr_start_no_stash(&p->p); + } + + if (unlikely(!eq)) + goto out; - if (!qman_have_ccsr()) - return -ENODEV; + qm_fqid_set(eq, fq->fqid); + eq->tag = cpu_to_be32(fq_to_tag(fq)); + eq->fd = *fd; - before = qm_in(MISC_CFG); - *wpm = (before & MISC_CFG_WPM_MASK) >> 1; + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); +out: + local_irq_restore(irqflags); + put_affine_portal(); return 0; } +EXPORT_SYMBOL(qman_enqueue); + +static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + u8 verb = QM_MCC_VERB_MODIFYCGR; + int ret = 0; + + mcc = qm_mc_start(&p->p); + if (opts) + mcc->initcgr = *opts; + mcc->initcgr.cgid = cgr->cgrid; + if (flags & QMAN_CGR_FLAG_USE_INIT) + verb = QM_MCC_VERB_INITCGR; + qm_mc_commit(&p->p, verb); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); + if (mcr->result != QM_MCR_RESULT_OK) + ret = -EIO; -#ifdef CONFIG_SYSFS +out: + put_affine_portal(); + return ret; +} -#define DRV_NAME "fsl-qman" +#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) -static ssize_t show_pfdr_fpc(struct device *dev, - struct device_attribute *dev_attr, char *buf) +/* congestion state change notification target update control */ +static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val) { - return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC)); -}; + if (qman_ip_rev >= QMAN_REV30) + cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi | + QM_CGR_TARG_UDP_CTRL_WRITE_BIT); + else + cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi)); +} -static ssize_t show_dlm_avg(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val) { - u32 data; - int i; + if (qman_ip_rev >= QMAN_REV30) + cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi); + else + cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi)); +} - if (sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i) != 1) - return -EINVAL; - data = qm_in(DCP_DLM_AVG(i)); - return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, - (data & 0x000000ff)*390625); -}; +static u8 qman_cgr_cpus[CGR_NUM]; -static ssize_t set_dlm_avg(struct device *dev, - struct device_attribute *dev_attr, const char *buf, size_t count) +void qman_init_cgr_all(void) { - unsigned long val; - int i; + struct qman_cgr cgr; + int err_cnt = 0; - if (sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i) != 1) - return -EINVAL; - if (kstrtoul(buf, 0, &val)) { - dev_dbg(dev, "invalid input %s\n", buf); + for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) { + if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL)) + err_cnt++; + } + + if (err_cnt) + pr_err("Warning: %d error%s while initialising CGR h/w\n", + err_cnt, (err_cnt > 1) ? "s" : ""); +} + +int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts) +{ + struct qm_mcr_querycgr cgr_state; + int ret; + struct qman_portal *p; + + /* + * We have to check that the provided CGRID is within the limits of the + * data-structures, for obvious reasons. However we'll let h/w take + * care of determining whether it's within the limits of what exists on + * the SoC. + */ + if (cgr->cgrid >= CGR_NUM) return -EINVAL; + + preempt_disable(); + p = get_affine_portal(); + qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); + preempt_enable(); + + cgr->chan = p->config->channel; + spin_lock(&p->cgr_lock); + + if (opts) { + struct qm_mcc_initcgr local_opts = *opts; + + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) + goto out; + + qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p), + be32_to_cpu(cgr_state.cgr.cscn_targ)); + local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG); + + /* send init if flags indicate so */ + if (flags & QMAN_CGR_FLAG_USE_INIT) + ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, + &local_opts); + else + ret = qm_modify_cgr(cgr, 0, &local_opts); + if (ret) + goto out; } - qm_out(DCP_DLM_AVG(i), val); - return count; -}; -static ssize_t show_pfdr_cfg(struct device *dev, - struct device_attribute *dev_attr, char *buf) + list_add(&cgr->node, &p->cgr_cbs); + + /* Determine if newly added object requires its callback to be called */ + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) { + /* we can't go back, so proceed and return success */ + dev_err(p->config->dev, "CGR HW state partially modified\n"); + ret = 0; + goto out; + } + if (cgr->cb && cgr_state.cgr.cscn_en && + qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) + cgr->cb(p, cgr, 1); +out: + spin_unlock(&p->cgr_lock); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_create_cgr); + +int qman_delete_cgr(struct qman_cgr *cgr) { - return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG)); + unsigned long irqflags; + struct qm_mcr_querycgr cgr_state; + struct qm_mcc_initcgr local_opts; + int ret = 0; + struct qman_cgr *i; + struct qman_portal *p = get_affine_portal(); + + if (cgr->chan != p->config->channel) { + /* attempt to delete from other portal than creator */ + dev_err(p->config->dev, "CGR not owned by current portal"); + dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n", + cgr->chan, p->config->channel); + + ret = -EINVAL; + goto put_portal; + } + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); + spin_lock_irqsave(&p->cgr_lock, irqflags); + list_del(&cgr->node); + /* + * If there are no other CGR objects for this CGRID in the list, + * update CSCN_TARG accordingly + */ + list_for_each_entry(i, &p->cgr_cbs, node) + if (i->cgrid == cgr->cgrid && i->cb) + goto release_lock; + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) { + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs); + goto release_lock; + } + + local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG); + qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p), + be32_to_cpu(cgr_state.cgr.cscn_targ)); + + ret = qm_modify_cgr(cgr, 0, &local_opts); + if (ret) + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs); +release_lock: + spin_unlock_irqrestore(&p->cgr_lock, irqflags); +put_portal: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_delete_cgr); + +struct cgr_comp { + struct qman_cgr *cgr; + struct completion completion; }; -static ssize_t set_pfdr_cfg(struct device *dev, - struct device_attribute *dev_attr, const char *buf, size_t count) +static int qman_delete_cgr_thread(void *p) { - unsigned long val; + struct cgr_comp *cgr_comp = (struct cgr_comp *)p; + int ret; - if (kstrtoul(buf, 0, &val)) { - dev_dbg(dev, "invalid input %s\n", buf); - return -EINVAL; - } - qm_out(PFDR_CFG, val); - return count; -}; + ret = qman_delete_cgr(cgr_comp->cgr); + complete(&cgr_comp->completion); -static ssize_t show_sfdr_in_use(struct device *dev, - struct device_attribute *dev_attr, char *buf) + return ret; +} + +void qman_delete_cgr_safe(struct qman_cgr *cgr) { - return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE)); -}; + struct task_struct *thread; + struct cgr_comp cgr_comp; + + preempt_disable(); + if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { + init_completion(&cgr_comp.completion); + cgr_comp.cgr = cgr; + thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, + "cgr_del"); + + if (IS_ERR(thread)) + goto out; + + kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); + wake_up_process(thread); + wait_for_completion(&cgr_comp.completion); + preempt_enable(); + return; + } +out: + qman_delete_cgr(cgr); + preempt_enable(); +} +EXPORT_SYMBOL(qman_delete_cgr_safe); + +/* Cleanup FQs */ -static ssize_t show_idle_stat(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v) { - return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT)); -}; + const union qm_mr_entry *msg; + int found = 0; + + qm_mr_pvb_update(p); + msg = qm_mr_current(p); + while (msg) { + if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v) + found = 1; + qm_mr_next(p); + qm_mr_cci_consume_to_current(p); + qm_mr_pvb_update(p); + msg = qm_mr_current(p); + } + return found; +} -static ssize_t show_ci_rlm_avg(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, + bool wait) { - u32 data = qm_in(CI_RLM_AVG); + const struct qm_dqrr_entry *dqrr; + int found = 0; - return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, - (data & 0x000000ff)*390625); -}; + do { + qm_dqrr_pvb_update(p); + dqrr = qm_dqrr_current(p); + if (!dqrr) + cpu_relax(); + } while (wait && !dqrr); + + while (dqrr) { + if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s)) + found = 1; + qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); + qm_dqrr_pvb_update(p); + qm_dqrr_next(p); + dqrr = qm_dqrr_current(p); + } + return found; +} + +#define qm_mr_drain(p, V) \ + _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V) + +#define qm_dqrr_drain(p, f, S) \ + _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false) -static ssize_t set_ci_rlm_avg(struct device *dev, - struct device_attribute *dev_attr, const char *buf, size_t count) +#define qm_dqrr_drain_wait(p, f, S) \ + _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true) + +#define qm_dqrr_drain_nomatch(p) \ + _qm_dqrr_consume_and_match(p, 0, 0, false) + +static int qman_shutdown_fq(u32 fqid) { - unsigned long val; + struct qman_portal *p; +#ifndef __rtems__ + struct device *dev; +#endif /* __rtems__ */ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + int orl_empty, drain = 0, ret = 0; + u32 channel, wq, res; + u8 state; - if (kstrtoul(buf, 0, &val)) { - dev_dbg(dev, "invalid input %s\n", buf); - return -EINVAL; + p = get_affine_portal(); +#ifndef __rtems__ + dev = p->config->dev; +#endif /* __rtems__ */ + /* Determine the state of the FQID */ + mcc = qm_mc_start(&p->p); + qm_fqid_set(&mcc->fq, fqid); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(dev, "QUERYFQ_NP timeout\n"); + ret = -ETIMEDOUT; + goto out; } - qm_out(CI_RLM_AVG, val); - return count; -}; -static ssize_t show_err_isr(struct device *dev, - struct device_attribute *dev_attr, char *buf) + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); + state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; + if (state == QM_MCR_NP_STATE_OOS) + goto out; /* Already OOS, no need to do anymore checks */ + + /* Query which channel the FQ is using */ + mcc = qm_mc_start(&p->p); + qm_fqid_set(&mcc->fq, fqid); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(dev, "QUERYFQ timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); + /* Need to store these since the MCR gets reused */ + channel = qm_fqd_get_chan(&mcr->queryfq.fqd); + wq = qm_fqd_get_wq(&mcr->queryfq.fqd); + + switch (state) { + case QM_MCR_NP_STATE_TEN_SCHED: + case QM_MCR_NP_STATE_TRU_SCHED: + case QM_MCR_NP_STATE_ACTIVE: + case QM_MCR_NP_STATE_PARKED: + orl_empty = 0; + mcc = qm_mc_start(&p->p); + qm_fqid_set(&mcc->fq, fqid); + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(dev, "QUERYFQ_NP timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCR_VERB_ALTER_RETIRE); + res = mcr->result; /* Make a copy as we reuse MCR below */ + + if (res == QM_MCR_RESULT_PENDING) { + /* + * Need to wait for the FQRN in the message ring, which + * will only occur once the FQ has been drained. In + * order for the FQ to drain the portal needs to be set + * to dequeue from the channel the FQ is scheduled on + */ + int found_fqrn = 0; + u16 dequeue_wq = 0; + + /* Flag that we need to drain FQ */ + drain = 1; + + if (channel >= qm_channel_pool1 && + channel < qm_channel_pool1 + 15) { + /* Pool channel, enable the bit in the portal */ + dequeue_wq = (channel - + qm_channel_pool1 + 1)<<4 | wq; + } else if (channel < qm_channel_pool1) { + /* Dedicated channel */ + dequeue_wq = wq; + } else { + dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x", + fqid, channel); + ret = -EBUSY; + goto out; + } +#ifdef __rtems__ + (void)dequeue_wq; +#endif /* __rtems__ */ + /* Set the sdqcr to drain this channel */ + if (channel < qm_channel_pool1) + qm_dqrr_sdqcr_set(&p->p, + QM_SDQCR_TYPE_ACTIVE | + QM_SDQCR_CHANNELS_DEDICATED); + else + qm_dqrr_sdqcr_set(&p->p, + QM_SDQCR_TYPE_ACTIVE | + QM_SDQCR_CHANNELS_POOL_CONV + (channel)); + do { + /* Keep draining DQRR while checking the MR*/ + qm_dqrr_drain_nomatch(&p->p); + /* Process message ring too */ + found_fqrn = qm_mr_drain(&p->p, FQRN); + cpu_relax(); + } while (!found_fqrn); + + } + if (res != QM_MCR_RESULT_OK && + res != QM_MCR_RESULT_PENDING) { + dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n", + fqid, res); + ret = -EIO; + goto out; + } + if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { + /* + * ORL had no entries, no need to wait until the + * ERNs come in + */ + orl_empty = 1; + } + /* + * Retirement succeeded, check to see if FQ needs + * to be drained + */ + if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { + /* FQ is Not Empty, drain using volatile DQ commands */ + do { + u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); + + qm_dqrr_vdqcr_set(&p->p, vdqcr); + /* + * Wait for a dequeue and process the dequeues, + * making sure to empty the ring completely + */ + } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); + } + qm_dqrr_sdqcr_set(&p->p, 0); + + while (!orl_empty) { + /* Wait for the ORL to have been completely drained */ + orl_empty = qm_mr_drain(&p->p, FQRL); + cpu_relax(); + } + mcc = qm_mc_start(&p->p); + qm_fqid_set(&mcc->fq, fqid); + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCR_VERB_ALTER_OOS); + if (mcr->result != QM_MCR_RESULT_OK) { + dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n", + fqid, mcr->result); + ret = -EIO; + goto out; + } + break; + + case QM_MCR_NP_STATE_RETIRED: + /* Send OOS Command */ + mcc = qm_mc_start(&p->p); + qm_fqid_set(&mcc->fq, fqid); + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCR_VERB_ALTER_OOS); + if (mcr->result) { + dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", + fqid, mcr->result); + ret = -EIO; + goto out; + } + break; + + case QM_MCR_NP_STATE_OOS: + /* Done */ + break; + + default: + ret = -EIO; + } + +out: + put_affine_portal(); + return ret; +} + +const struct qm_portal_config *qman_get_qm_portal_config( + struct qman_portal *portal) { - return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR)); -}; + return portal->config; +} +EXPORT_SYMBOL(qman_get_qm_portal_config); +struct gen_pool *qm_fqalloc; /* FQID allocator */ +struct gen_pool *qm_qpalloc; /* pool-channel allocator */ +struct gen_pool *qm_cgralloc; /* CGR ID allocator */ -static ssize_t show_sbec(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt) { - int i; + unsigned long addr; - if (sscanf(dev_attr->attr.name, "sbec_%d", &i) != 1) - return -EINVAL; - return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i))); -}; + addr = gen_pool_alloc(p, cnt); + if (!addr) + return -ENOMEM; -static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL); -static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg); -static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL); -static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR), - show_ci_rlm_avg, set_ci_rlm_avg); -static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL); -static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL); - -static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); -static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); -static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); -static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg); - -static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL); -static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL); - -static struct attribute *qman_dev_attributes[] = { - &dev_attr_pfdr_fpc.attr, - &dev_attr_pfdr_cfg.attr, - &dev_attr_idle_stat.attr, - &dev_attr_ci_rlm_avg.attr, - &dev_attr_err_isr.attr, - &dev_attr_dcp0_dlm_avg.attr, - &dev_attr_dcp1_dlm_avg.attr, - &dev_attr_dcp2_dlm_avg.attr, - &dev_attr_dcp3_dlm_avg.attr, - /* sfdr_in_use will be added if necessary */ - NULL -}; + *result = addr & ~DPAA_GENALLOC_OFF; -static struct attribute *qman_dev_ecr_attributes[] = { - &dev_attr_sbec_0.attr, - &dev_attr_sbec_1.attr, - &dev_attr_sbec_2.attr, - &dev_attr_sbec_3.attr, - &dev_attr_sbec_4.attr, - &dev_attr_sbec_5.attr, - &dev_attr_sbec_6.attr, - &dev_attr_sbec_7.attr, - &dev_attr_sbec_8.attr, - &dev_attr_sbec_9.attr, - &dev_attr_sbec_10.attr, - &dev_attr_sbec_11.attr, - &dev_attr_sbec_12.attr, - &dev_attr_sbec_13.attr, - &dev_attr_sbec_14.attr, - NULL -}; + return 0; +} -/* root level */ -static const struct attribute_group qman_dev_attr_grp = { - .name = NULL, - .attrs = qman_dev_attributes -}; -static const struct attribute_group qman_dev_ecr_grp = { - .name = "error_capture", - .attrs = qman_dev_ecr_attributes -}; +int qman_alloc_fqid_range(u32 *result, u32 count) +{ + return qman_alloc_range(qm_fqalloc, result, count); +} +EXPORT_SYMBOL(qman_alloc_fqid_range); -static int of_fsl_qman_remove(struct platform_device *ofdev) +int qman_alloc_pool_range(u32 *result, u32 count) { - sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp); + return qman_alloc_range(qm_qpalloc, result, count); +} +EXPORT_SYMBOL(qman_alloc_pool_range); + +int qman_alloc_cgrid_range(u32 *result, u32 count) +{ + return qman_alloc_range(qm_cgralloc, result, count); +} +EXPORT_SYMBOL(qman_alloc_cgrid_range); + +int qman_release_fqid(u32 fqid) +{ + int ret = qman_shutdown_fq(fqid); + + if (ret) { + pr_debug("FQID %d leaked\n", fqid); + return ret; + } + + gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1); return 0; -}; +} +EXPORT_SYMBOL(qman_release_fqid); + +static int qpool_cleanup(u32 qp) +{ + /* + * We query all FQDs starting from + * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs + * whose destination channel is the pool-channel being released. + * When a non-OOS FQD is found we attempt to clean it up + */ + struct qman_fq fq = { + .fqid = QM_FQID_RANGE_START + }; + int err; + + do { + struct qm_mcr_queryfq_np np; + + err = qman_query_fq_np(&fq, &np); + if (err == -ERANGE) + /* FQID range exceeded, found no problems */ + return 0; + else if (WARN_ON(err)) + return err; + + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { + struct qm_fqd fqd; + + err = qman_query_fq(&fq, &fqd); + if (WARN_ON(err)) + return err; + if (qm_fqd_get_chan(&fqd) == qp) { + /* The channel is the FQ's target, clean it */ + err = qman_shutdown_fq(fq.fqid); + if (err) + /* + * Couldn't shut down the FQ + * so the pool must be leaked + */ + return err; + } + } + /* Move to the next FQID */ + fq.fqid++; + } while (1); +} -static int of_fsl_qman_probe(struct platform_device *ofdev) +int qman_release_pool(u32 qp) { int ret; - struct device *dev = &ofdev->dev; - ret = sysfs_create_group(&dev->kobj, &qman_dev_attr_grp); - if (ret) - goto done; - ret = sysfs_add_file_to_group(&dev->kobj, - &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name); - if (ret) - goto del_group_0; - ret = sysfs_create_group(&dev->kobj, &qman_dev_ecr_grp); - if (ret) - goto del_group_0; + ret = qpool_cleanup(qp); + if (ret) { + pr_debug("CHID %d leaked\n", qp); + return ret; + } - goto done; + gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1); + return 0; +} +EXPORT_SYMBOL(qman_release_pool); -del_group_0: - sysfs_remove_group(&dev->kobj, &qman_dev_attr_grp); -done: - if (ret) - dev_err(dev, "Cannot create dev attributes ret=%d\n", ret); - return ret; -}; +static int cgr_cleanup(u32 cgrid) +{ + /* + * query all FQDs starting from FQID 1 until we get an "invalid FQID" + * error, looking for non-OOS FQDs whose CGR is the CGR being released + */ + struct qman_fq fq = { + .fqid = QM_FQID_RANGE_START + }; + int err; -static const struct of_device_id of_fsl_qman_ids[] = { - { - .compatible = "fsl,qman", - }, - {} -}; + do { + struct qm_mcr_queryfq_np np; + + err = qman_query_fq_np(&fq, &np); + if (err == -ERANGE) + /* FQID range exceeded, found no problems */ + return 0; + else if (WARN_ON(err)) + return err; + + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { + struct qm_fqd fqd; + + err = qman_query_fq(&fq, &fqd); + if (WARN_ON(err)) + return err; + if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE && + fqd.cgid == cgrid) { + pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", + cgrid, fq.fqid); + return -EIO; + } + } + /* Move to the next FQID */ + fq.fqid++; + } while (1); +} -static struct platform_driver of_fsl_qman_driver = { - .driver = { - .name = DRV_NAME, - .of_match_table = of_fsl_qman_ids, - }, - .probe = of_fsl_qman_probe, - .remove = of_fsl_qman_remove, -}; +int qman_release_cgrid(u32 cgrid) +{ + int ret; -builtin_platform_driver(of_fsl_qman_driver); + ret = cgr_cleanup(cgrid); + if (ret) { + pr_debug("CGRID %d leaked\n", cgrid); + return ret; + } -#endif /* CONFIG_SYSFS */ + gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1); + return 0; +} +EXPORT_SYMBOL(qman_release_cgrid); diff --git a/linux/drivers/soc/fsl/qbman/qman.h b/linux/drivers/soc/fsl/qbman/qman.h deleted file mode 100644 index 331db7c7..00000000 --- a/linux/drivers/soc/fsl/qbman/qman.h +++ /dev/null @@ -1,1133 +0,0 @@ -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "qman_priv.h" - -/* Portal register assists */ - -/* Cache-inhibited register offsets */ -#define QM_REG_EQCR_PI_CINH 0x0000 -#define QM_REG_EQCR_CI_CINH 0x0004 -#define QM_REG_EQCR_ITR 0x0008 -#define QM_REG_DQRR_PI_CINH 0x0040 -#define QM_REG_DQRR_CI_CINH 0x0044 -#define QM_REG_DQRR_ITR 0x0048 -#define QM_REG_DQRR_DCAP 0x0050 -#define QM_REG_DQRR_SDQCR 0x0054 -#define QM_REG_DQRR_VDQCR 0x0058 -#define QM_REG_DQRR_PDQCR 0x005c -#define QM_REG_MR_PI_CINH 0x0080 -#define QM_REG_MR_CI_CINH 0x0084 -#define QM_REG_MR_ITR 0x0088 -#define QM_REG_CFG 0x0100 -#define QM_REG_ISR 0x0e00 -#define QM_REG_IIR 0x0e0c -#define QM_REG_ITPR 0x0e14 - -/* Cache-enabled register offsets */ -#define QM_CL_EQCR 0x0000 -#define QM_CL_DQRR 0x1000 -#define QM_CL_MR 0x2000 -#define QM_CL_EQCR_PI_CENA 0x3000 -#define QM_CL_EQCR_CI_CENA 0x3100 -#define QM_CL_DQRR_PI_CENA 0x3200 -#define QM_CL_DQRR_CI_CENA 0x3300 -#define QM_CL_MR_PI_CENA 0x3400 -#define QM_CL_MR_CI_CENA 0x3500 -#define QM_CL_CR 0x3800 -#define QM_CL_RR0 0x3900 -#define QM_CL_RR1 0x3940 - -/* BTW, the drivers (and h/w programming model) already obtain the required - * synchronisation for portal accesses via lwsync(), hwsync(), and - * data-dependencies. Use of barrier()s or other order-preserving primitives - * simply degrade performance. Hence the use of the __raw_*() interfaces, which - * simply ensure that the compiler treats the portal registers as volatile (ie. - * non-coherent). */ - -/* Cache-inhibited register access. */ -#define __qm_in(qm, o) __raw_readl((qm)->addr_ci + (o)) -#define __qm_out(qm, o, val) __raw_writel((val), (qm)->addr_ci + (o)) -#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg) -#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val) - -/* Cache-enabled (index) register access */ -#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o)) -#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o)) -#define __qm_cl_in(qm, o) __raw_readl((qm)->addr_ce + (o)) -#define __qm_cl_out(qm, o, val) \ - do { \ - u32 *__tmpclout = (qm)->addr_ce + (o); \ - __raw_writel((val), __tmpclout); \ - dcbf(__tmpclout); \ - } while (0) -#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o)) -#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA) -#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA) -#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA) -#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val) -#define qm_cl_invalidate(reg)\ - __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA) - -/* Cache-enabled ring access */ -#define qm_cl(base, idx) ((void *)base + ((idx) << 6)) - -/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf - * analysis, look at using the "extra" bit in the ring index registers to avoid - * cyclic issues. */ -static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last) -{ - /* 'first' is included, 'last' is excluded */ - if (first <= last) - return last - first; - return ringsize + last - first; -} - -/* Portal modes. - * Enum types; - * pmode == production mode - * cmode == consumption mode, - * dmode == h/w dequeue mode. - * Enum values use 3 letter codes. First letter matches the portal mode, - * remaining two letters indicate; - * ci == cache-inhibited portal register - * ce == cache-enabled portal register - * vb == in-band valid-bit (cache-enabled) - * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only - * As for "enum qm_dqrr_dmode", it should be self-explanatory. - */ -enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ - qm_eqcr_pci = 0, /* PI index, cache-inhibited */ - qm_eqcr_pce = 1, /* PI index, cache-enabled */ - qm_eqcr_pvb = 2 /* valid-bit */ -}; -enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ - qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ - qm_dqrr_dpull = 1 /* PDQCR */ -}; -enum qm_dqrr_pmode { /* s/w-only */ - qm_dqrr_pci, /* reads DQRR_PI_CINH */ - qm_dqrr_pce, /* reads DQRR_PI_CENA */ - qm_dqrr_pvb /* reads valid-bit */ -}; -enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ - qm_dqrr_cci = 0, /* CI index, cache-inhibited */ - qm_dqrr_cce = 1, /* CI index, cache-enabled */ - qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */ -}; -enum qm_mr_pmode { /* s/w-only */ - qm_mr_pci, /* reads MR_PI_CINH */ - qm_mr_pce, /* reads MR_PI_CENA */ - qm_mr_pvb /* reads valid-bit */ -}; -enum qm_mr_cmode { /* matches QCSP_CFG::MM */ - qm_mr_cci = 0, /* CI index, cache-inhibited */ - qm_mr_cce = 1 /* CI index, cache-enabled */ -}; - -/* --- Portal structures --- */ - -#define QM_EQCR_SIZE 8 -#define QM_DQRR_SIZE 16 -#define QM_MR_SIZE 8 - -struct qm_eqcr { - struct qm_eqcr_entry *ring, *cursor; - u8 ci, available, ithresh, vbit; -#ifdef CONFIG_FSL_DPA_CHECKING - u32 busy; - enum qm_eqcr_pmode pmode; -#endif -}; - -struct qm_dqrr { - const struct qm_dqrr_entry *ring, *cursor; - u8 pi, ci, fill, ithresh, vbit; -#ifdef CONFIG_FSL_DPA_CHECKING - enum qm_dqrr_dmode dmode; - enum qm_dqrr_pmode pmode; - enum qm_dqrr_cmode cmode; -#endif -}; - -struct qm_mr { - const struct qm_mr_entry *ring, *cursor; - u8 pi, ci, fill, ithresh, vbit; -#ifdef CONFIG_FSL_DPA_CHECKING - enum qm_mr_pmode pmode; - enum qm_mr_cmode cmode; -#endif -}; - -struct qm_mc { - struct qm_mc_command *cr; - struct qm_mc_result *rr; - u8 rridx, vbit; -#ifdef CONFIG_FSL_DPA_CHECKING - enum { - /* Can be _mc_start()ed */ - qman_mc_idle, - /* Can be _mc_commit()ed or _mc_abort()ed */ - qman_mc_user, - /* Can only be _mc_retry()ed */ - qman_mc_hw - } state; -#endif -}; - -#define QM_PORTAL_ALIGNMENT ____cacheline_aligned - -struct qm_addr { - void __iomem *addr_ce; /* cache-enabled */ - void __iomem *addr_ci; /* cache-inhibited */ -}; - -struct qm_portal { - /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to - * and including 'mc' fits within a cacheline (yay!). The 'config' part - * is setup-only, so isn't a cause for a concern. In other words, don't - * rearrange this structure on a whim, there be dragons ... */ - struct qm_addr addr; - struct qm_eqcr eqcr; - struct qm_dqrr dqrr; - struct qm_mr mr; - struct qm_mc mc; -} QM_PORTAL_ALIGNMENT; - -/* --- EQCR API --- */ - -/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ -#define EQCR_CARRYCLEAR(p) \ - (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6))) - -/* Bit-wise logic to convert a ring pointer to a ring index */ -static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e) -{ - return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1); -} - -/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ -static inline void EQCR_INC(struct qm_eqcr *eqcr) -{ - /* NB: this is odd-looking, but experiments show that it generates fast - * code with essentially no branching overheads. We increment to the - * next EQCR pointer and handle overflow and 'vbit'. */ - struct qm_eqcr_entry *partial = eqcr->cursor + 1; - - eqcr->cursor = EQCR_CARRYCLEAR(partial); - if (partial != eqcr->cursor) - eqcr->vbit ^= QM_EQCR_VERB_VBIT; -} - -static inline int qm_eqcr_init(struct qm_portal *portal, - enum qm_eqcr_pmode pmode, - unsigned int eq_stash_thresh, - int eq_stash_prio) -{ - /* This use of 'register', as well as all other occurrences, is because - * it has been observed to generate much faster code with gcc than is - * otherwise the case. */ - register struct qm_eqcr *eqcr = &portal->eqcr; - u32 cfg; - u8 pi; - - eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR; - eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); - qm_cl_invalidate(EQCR_CI); - pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); - eqcr->cursor = eqcr->ring + pi; - eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ? - QM_EQCR_VERB_VBIT : 0; - eqcr->available = QM_EQCR_SIZE - 1 - - qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); - eqcr->ithresh = qm_in(EQCR_ITR); -#ifdef CONFIG_FSL_DPA_CHECKING - eqcr->busy = 0; - eqcr->pmode = pmode; -#endif - cfg = (qm_in(CFG) & 0x00ffffff) | - (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ - (eq_stash_prio << 26) | /* QCSP_CFG: EP */ - ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ - qm_out(CFG, cfg); - return 0; -} - -static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) -{ - return (qm_in(CFG) >> 28) & 0x7; -} - -static inline void qm_eqcr_finish(struct qm_portal *portal) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - u8 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); - u8 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); - - DPA_ASSERT(!eqcr->busy); - if (pi != EQCR_PTR2IDX(eqcr->cursor)) - pr_crit("losing uncommited EQCR entries\n"); - if (ci != eqcr->ci) - pr_crit("missing existing EQCR completions\n"); - if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor)) - pr_crit("EQCR destroyed unquiesced\n"); -} - -static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal - *portal) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - - DPA_ASSERT(!eqcr->busy); - if (!eqcr->available) - return NULL; - - -#ifdef CONFIG_FSL_DPA_CHECKING - eqcr->busy = 1; -#endif - dcbz_64(eqcr->cursor); - return eqcr->cursor; -} - -static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal - *portal) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - u8 diff, old_ci; - - DPA_ASSERT(!eqcr->busy); - if (!eqcr->available) { - old_ci = eqcr->ci; - eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); - diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); - eqcr->available += diff; - if (!diff) - return NULL; - } -#ifdef CONFIG_FSL_DPA_CHECKING - eqcr->busy = 1; -#endif - dcbz_64(eqcr->cursor); - return eqcr->cursor; -} - -static inline void qm_eqcr_abort(struct qm_portal *portal) -{ - __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; - - DPA_ASSERT(eqcr->busy); -#ifdef CONFIG_FSL_DPA_CHECKING - eqcr->busy = 0; -#endif -} - -static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next( - struct qm_portal *portal, u8 myverb) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - - DPA_ASSERT(eqcr->busy); - DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb); - if (eqcr->available == 1) - return NULL; - eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; - dcbf(eqcr->cursor); - EQCR_INC(eqcr); - eqcr->available--; - dcbz_64(eqcr->cursor); - return eqcr->cursor; -} - -#define EQCR_COMMIT_CHECKS(eqcr) \ -do { \ - DPA_ASSERT(eqcr->busy); \ - DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \ - DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \ -} while (0) - -static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - - EQCR_COMMIT_CHECKS(eqcr); - DPA_ASSERT(eqcr->pmode == qm_eqcr_pci); - eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; - EQCR_INC(eqcr); - eqcr->available--; - dcbf(eqcr->cursor); - hwsync(); - qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor)); -#ifdef CONFIG_FSL_DPA_CHECKING - eqcr->busy = 0; -#endif -} - -static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal) -{ - __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; - - DPA_ASSERT(eqcr->pmode == qm_eqcr_pce); - qm_cl_invalidate(EQCR_PI); - qm_cl_touch_rw(EQCR_PI); -} - -static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - - EQCR_COMMIT_CHECKS(eqcr); - DPA_ASSERT(eqcr->pmode == qm_eqcr_pce); - eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; - EQCR_INC(eqcr); - eqcr->available--; - dcbf(eqcr->cursor); - lwsync(); - qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor)); -#ifdef CONFIG_FSL_DPA_CHECKING - eqcr->busy = 0; -#endif -} - -static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - struct qm_eqcr_entry *eqcursor; - - EQCR_COMMIT_CHECKS(eqcr); - DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb); - lwsync(); - eqcursor = eqcr->cursor; - eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit; - dcbf(eqcursor); - EQCR_INC(eqcr); - eqcr->available--; -#ifdef CONFIG_FSL_DPA_CHECKING - eqcr->busy = 0; -#endif -} - -static inline u8 qm_eqcr_cci_update(struct qm_portal *portal) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - u8 diff, old_ci = eqcr->ci; - - eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); - diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); - eqcr->available += diff; - return diff; -} - -static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) -{ - __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; - - qm_cl_touch_ro(EQCR_CI); -} - -static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - u8 diff, old_ci = eqcr->ci; - - eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); - qm_cl_invalidate(EQCR_CI); - diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); - eqcr->available += diff; - return diff; -} - -static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - - return eqcr->ithresh; -} - -static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - - eqcr->ithresh = ithresh; - qm_out(EQCR_ITR, ithresh); -} - -static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - - return eqcr->available; -} - -static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) -{ - register struct qm_eqcr *eqcr = &portal->eqcr; - - return QM_EQCR_SIZE - 1 - eqcr->available; -} - -/* --- DQRR API --- */ - -/* FIXME: many possible improvements; - * - look at changing the API to use pointer rather than index parameters now - * that 'cursor' is a pointer, - * - consider moving other parameters to pointer if it could help (ci) - */ - -#define DQRR_CARRYCLEAR(p) \ - (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6))) - -static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e) -{ - return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1); -} - -static inline const struct qm_dqrr_entry *DQRR_INC( - const struct qm_dqrr_entry *e) -{ - return DQRR_CARRYCLEAR(e + 1); -} - -static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) -{ - qm_out(CFG, (qm_in(CFG) & 0xff0fffff) | - ((mf & (QM_DQRR_SIZE - 1)) << 20)); -} - -static inline int qm_dqrr_init(struct qm_portal *portal, - const struct qm_portal_config *config, - enum qm_dqrr_dmode dmode, - __maybe_unused enum qm_dqrr_pmode pmode, - enum qm_dqrr_cmode cmode, u8 max_fill) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - u32 cfg; - - /* Make sure the DQRR will be idle when we enable */ - qm_out(DQRR_SDQCR, 0); - qm_out(DQRR_VDQCR, 0); - qm_out(DQRR_PDQCR, 0); - dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR; - dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); - dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); - dqrr->cursor = dqrr->ring + dqrr->ci; - dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); - dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ? - QM_DQRR_VERB_VBIT : 0; - dqrr->ithresh = qm_in(DQRR_ITR); -#ifdef CONFIG_FSL_DPA_CHECKING - dqrr->dmode = dmode; - dqrr->pmode = pmode; - dqrr->cmode = cmode; -#endif - /* Invalidate every ring entry before beginning */ - for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) - dcbi(qm_cl(dqrr->ring, cfg)); - cfg = (qm_in(CFG) & 0xff000f00) | - ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ - ((dmode & 1) << 18) | /* DP */ - ((cmode & 3) << 16) | /* DCM */ -#ifndef __rtems__ - 0xa0 | /* RE+SE */ -#endif /* __rtems__ */ - (0 ? 0x40 : 0) | /* Ignore RP */ - (0 ? 0x10 : 0); /* Ignore SP */ - qm_out(CFG, cfg); - qm_dqrr_set_maxfill(portal, max_fill); - return 0; -} - -static inline void qm_dqrr_finish(struct qm_portal *portal) -{ - __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; -#ifdef CONFIG_FSL_DPA_CHECKING - if ((dqrr->cmode != qm_dqrr_cdc) && - (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor))) - pr_crit("Ignoring completed DQRR entries\n"); -#endif -} - -static inline const struct qm_dqrr_entry *qm_dqrr_current( - struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - - if (!dqrr->fill) - return NULL; - return dqrr->cursor; -} - -static inline u8 qm_dqrr_cursor(struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - - return DQRR_PTR2IDX(dqrr->cursor); -} - -static inline u8 qm_dqrr_next(struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->fill); - dqrr->cursor = DQRR_INC(dqrr->cursor); - return --dqrr->fill; -} - -static inline u8 qm_dqrr_pci_update(struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - u8 diff, old_pi = dqrr->pi; - - DPA_ASSERT(dqrr->pmode == qm_dqrr_pci); - dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); - diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi); - dqrr->fill += diff; - return diff; -} - -static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal) -{ - __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->pmode == qm_dqrr_pce); - qm_cl_invalidate(DQRR_PI); - qm_cl_touch_ro(DQRR_PI); -} - -static inline u8 qm_dqrr_pce_update(struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - u8 diff, old_pi = dqrr->pi; - - DPA_ASSERT(dqrr->pmode == qm_dqrr_pce); - dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1); - diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi); - dqrr->fill += diff; - return diff; -} - -static inline void qm_dqrr_pvb_update(struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); - - DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb); - /* when accessing 'verb', use __raw_readb() to ensure that compiler - * inlining doesn't try to optimise out "excess reads". */ -#ifdef __rtems__ - dcbi(res); -#endif /* __rtems__ */ - if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { - dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); - if (!dqrr->pi) - dqrr->vbit ^= QM_DQRR_VERB_VBIT; - dqrr->fill++; - } -} - -static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cci); - dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1); - qm_out(DQRR_CI_CINH, dqrr->ci); -} - -static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cci); - dqrr->ci = DQRR_PTR2IDX(dqrr->cursor); - qm_out(DQRR_CI_CINH, dqrr->ci); -} - -static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal) -{ - __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); - qm_cl_invalidate(DQRR_CI); - qm_cl_touch_rw(DQRR_CI); -} - -static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); - dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1); - qm_cl_out(DQRR_CI, dqrr->ci); -} - -static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); - dqrr->ci = DQRR_PTR2IDX(dqrr->cursor); - qm_cl_out(DQRR_CI, dqrr->ci); -} - -static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx, - int park) -{ - __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); - DPA_ASSERT(idx < QM_DQRR_SIZE); - qm_out(DQRR_DCAP, (0 << 8) | /* S */ - ((park ? 1 : 0) << 6) | /* PK */ - idx); /* DCAP_CI */ -} - -static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, - const struct qm_dqrr_entry *dq, - int park) -{ - __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; - u8 idx = DQRR_PTR2IDX(dq); - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); - DPA_ASSERT((dqrr->ring + idx) == dq); - DPA_ASSERT(idx < QM_DQRR_SIZE); - qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ - ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ - idx); /* DQRR_DCAP::DCAP_CI */ -} - -static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask) -{ - __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); - qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ - ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ -} - -static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal) -{ - __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); - return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); -} - -static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal) -{ - __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); - qm_cl_invalidate(DQRR_CI); - qm_cl_touch_ro(DQRR_CI); -} - -static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal) -{ - __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); - return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1); -} - -static inline u8 qm_dqrr_get_ci(struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); - return dqrr->ci; -} - -static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx) -{ - __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); - qm_out(DQRR_DCAP, (0 << 8) | /* S */ - (1 << 6) | /* PK */ - (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */ -} - -static inline void qm_dqrr_park_current(struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - - DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); - qm_out(DQRR_DCAP, (0 << 8) | /* S */ - (1 << 6) | /* PK */ - DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */ -} - -static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) -{ - qm_out(DQRR_SDQCR, sdqcr); -} - -static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal) -{ - return qm_in(DQRR_SDQCR); -} - -static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) -{ - qm_out(DQRR_VDQCR, vdqcr); -} - -static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal) -{ - return qm_in(DQRR_VDQCR); -} - -static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr) -{ - qm_out(DQRR_PDQCR, pdqcr); -} - -static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal) -{ - return qm_in(DQRR_PDQCR); -} - -static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal) -{ - register struct qm_dqrr *dqrr = &portal->dqrr; - - return dqrr->ithresh; -} - -static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) -{ - qm_out(DQRR_ITR, ithresh); -} - -static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal) -{ - return (qm_in(CFG) & 0x00f00000) >> 20; -} - -/* --- MR API --- */ - -#define MR_CARRYCLEAR(p) \ - (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6))) - -static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e) -{ - return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1); -} - -static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e) -{ - return MR_CARRYCLEAR(e + 1); -} - -static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, - enum qm_mr_cmode cmode) -{ - register struct qm_mr *mr = &portal->mr; - u32 cfg; - - mr->ring = portal->addr.addr_ce + QM_CL_MR; - mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1); - mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1); - mr->cursor = mr->ring + mr->ci; - mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); - mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0; - mr->ithresh = qm_in(MR_ITR); -#ifdef CONFIG_FSL_DPA_CHECKING - mr->pmode = pmode; - mr->cmode = cmode; -#endif - cfg = (qm_in(CFG) & 0xfffff0ff) | - ((cmode & 1) << 8); /* QCSP_CFG:MM */ - qm_out(CFG, cfg); - return 0; -} - -static inline void qm_mr_finish(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - - if (mr->ci != MR_PTR2IDX(mr->cursor)) - pr_crit("Ignoring completed MR entries\n"); -} - -static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - - if (!mr->fill) - return NULL; - return mr->cursor; -} - -static inline u8 qm_mr_cursor(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - - return MR_PTR2IDX(mr->cursor); -} - -static inline u8 qm_mr_next(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - - DPA_ASSERT(mr->fill); - mr->cursor = MR_INC(mr->cursor); - return --mr->fill; -} - -static inline u8 qm_mr_pci_update(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - u8 diff, old_pi = mr->pi; - - DPA_ASSERT(mr->pmode == qm_mr_pci); - mr->pi = qm_in(MR_PI_CINH); - diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi); - mr->fill += diff; - return diff; -} - -static inline void qm_mr_pce_prefetch(struct qm_portal *portal) -{ - __maybe_unused register struct qm_mr *mr = &portal->mr; - - DPA_ASSERT(mr->pmode == qm_mr_pce); - qm_cl_invalidate(MR_PI); - qm_cl_touch_ro(MR_PI); -} - -static inline u8 qm_mr_pce_update(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - u8 diff, old_pi = mr->pi; - - DPA_ASSERT(mr->pmode == qm_mr_pce); - mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1); - diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi); - mr->fill += diff; - return diff; -} - -static inline void qm_mr_pvb_update(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); - - DPA_ASSERT(mr->pmode == qm_mr_pvb); - /* when accessing 'verb', use __raw_readb() to ensure that compiler - * inlining doesn't try to optimise out "excess reads". */ - if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { - mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); - if (!mr->pi) - mr->vbit ^= QM_MR_VERB_VBIT; - mr->fill++; - res = MR_INC(res); - } - dcbit_ro(res); -} - -static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) -{ - register struct qm_mr *mr = &portal->mr; - - DPA_ASSERT(mr->cmode == qm_mr_cci); - mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); - qm_out(MR_CI_CINH, mr->ci); -} - -static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - - DPA_ASSERT(mr->cmode == qm_mr_cci); - mr->ci = MR_PTR2IDX(mr->cursor); - qm_out(MR_CI_CINH, mr->ci); -} - -static inline void qm_mr_cce_prefetch(struct qm_portal *portal) -{ - __maybe_unused register struct qm_mr *mr = &portal->mr; - - DPA_ASSERT(mr->cmode == qm_mr_cce); - qm_cl_invalidate(MR_CI); - qm_cl_touch_rw(MR_CI); -} - -static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num) -{ - register struct qm_mr *mr = &portal->mr; - - DPA_ASSERT(mr->cmode == qm_mr_cce); - mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); - qm_cl_out(MR_CI, mr->ci); -} - -static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - - DPA_ASSERT(mr->cmode == qm_mr_cce); - mr->ci = MR_PTR2IDX(mr->cursor); - qm_cl_out(MR_CI, mr->ci); -} - -static inline u8 qm_mr_get_ci(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - - return mr->ci; -} - -static inline u8 qm_mr_get_ithresh(struct qm_portal *portal) -{ - register struct qm_mr *mr = &portal->mr; - - return mr->ithresh; -} - -static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) -{ - qm_out(MR_ITR, ithresh); -} - -/* --- Management command API --- */ - -static inline int qm_mc_init(struct qm_portal *portal) -{ - register struct qm_mc *mc = &portal->mc; - - mc->cr = portal->addr.addr_ce + QM_CL_CR; - mc->rr = portal->addr.addr_ce + QM_CL_RR0; - mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) & - QM_MCC_VERB_VBIT) ? 0 : 1; - mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; -#ifdef CONFIG_FSL_DPA_CHECKING - mc->state = qman_mc_idle; -#endif - return 0; -} - -static inline void qm_mc_finish(struct qm_portal *portal) -{ - __maybe_unused register struct qm_mc *mc = &portal->mc; - - DPA_ASSERT(mc->state == qman_mc_idle); -#ifdef CONFIG_FSL_DPA_CHECKING - if (mc->state != qman_mc_idle) - pr_crit("Losing incomplete MC command\n"); -#endif -} - -static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal) -{ - register struct qm_mc *mc = &portal->mc; - - DPA_ASSERT(mc->state == qman_mc_idle); -#ifdef CONFIG_FSL_DPA_CHECKING - mc->state = qman_mc_user; -#endif - dcbz_64(mc->cr); - return mc->cr; -} - -static inline void qm_mc_abort(struct qm_portal *portal) -{ - __maybe_unused register struct qm_mc *mc = &portal->mc; - - DPA_ASSERT(mc->state == qman_mc_user); -#ifdef CONFIG_FSL_DPA_CHECKING - mc->state = qman_mc_idle; -#endif -} - -static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) -{ - register struct qm_mc *mc = &portal->mc; - struct qm_mc_result *rr = mc->rr + mc->rridx; - - DPA_ASSERT(mc->state == qman_mc_user); - lwsync(); - mc->cr->__dont_write_directly__verb = myverb | mc->vbit; - dcbf(mc->cr); - dcbit_ro(rr); -#ifdef CONFIG_FSL_DPA_CHECKING - mc->state = qman_mc_hw; -#endif -} - -static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal) -{ - register struct qm_mc *mc = &portal->mc; - struct qm_mc_result *rr = mc->rr + mc->rridx; - - DPA_ASSERT(mc->state == qman_mc_hw); - /* The inactive response register's verb byte always returns zero until - * its command is submitted and completed. This includes the valid-bit, - * in case you were wondering... */ - if (!__raw_readb(&rr->verb)) { - dcbit_ro(rr); - return NULL; - } - mc->rridx ^= 1; - mc->vbit ^= QM_MCC_VERB_VBIT; -#ifdef CONFIG_FSL_DPA_CHECKING - mc->state = qman_mc_idle; -#endif - return rr; -} - -/* --- Portal interrupt register API --- */ - -static inline int qm_isr_init(__always_unused struct qm_portal *portal) -{ - return 0; -} - -static inline void qm_isr_finish(__always_unused struct qm_portal *portal) -{ -} - -static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod) -{ - qm_out(ITPR, iperiod); -} - -static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n) -{ - return __qm_in(&portal->addr, QM_REG_ISR + (n << 2)); -} - -static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n, - u32 val) -{ - __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val); -} diff --git a/linux/drivers/soc/fsl/qbman/qman_api.c b/linux/drivers/soc/fsl/qbman/qman_api.c deleted file mode 100644 index e838d08f..00000000 --- a/linux/drivers/soc/fsl/qbman/qman_api.c +++ /dev/null @@ -1,3026 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "qman.h" - -/* Compilation constants */ -#define DQRR_MAXFILL 15 -#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ -#define IRQNAME "QMan portal %d" -#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ -#define QMAN_POLL_LIMIT 32 -#define QMAN_PIRQ_DQRR_ITHRESH 12 -#define QMAN_PIRQ_MR_ITHRESH 4 -#define QMAN_PIRQ_IPERIOD 100 -#define FSL_DPA_PORTAL_SHARE 1 /* Allow portals to be shared */ -/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's - * positive, and rounding to the closest value if it's zero. NB, this macro - * implicitly upgrades parameters to unsigned 64-bit, so feed it with types - * that are compatible with this. NB, these arguments should not be expressions - * unless it is safe for them to be evaluated multiple times. Eg. do not pass - * in "some_value++" as a parameter to the macro! */ -#define ROUNDING(n, d, r) \ - (((r) < 0) ? div64_u64((n), (d)) : \ - (((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \ - div64_u64(((n) + ((d) / 2)), (d)))) - -/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about - * inter-processor locking only. Note, FQLOCK() is always called either under a - * local_irq_save() or from interrupt context - hence there's no need for irq - * protection (and indeed, attempting to nest irq-protection doesn't work, as - * the "irq en/disable" machinery isn't recursive...). */ -#define FQLOCK(fq) \ - do { \ - struct qman_fq *__fq478 = (fq); \ - if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ - spin_lock(&__fq478->fqlock); \ - } while (0) -#define FQUNLOCK(fq) \ - do { \ - struct qman_fq *__fq478 = (fq); \ - if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ - spin_unlock(&__fq478->fqlock); \ - } while (0) - -static inline void fq_set(struct qman_fq *fq, u32 mask) -{ - set_bits(mask, &fq->flags); -} -static inline void fq_clear(struct qman_fq *fq, u32 mask) -{ - clear_bits(mask, &fq->flags); -} -static inline int fq_isset(struct qman_fq *fq, u32 mask) -{ - return fq->flags & mask; -} -static inline int fq_isclear(struct qman_fq *fq, u32 mask) -{ - return !(fq->flags & mask); -} - -struct qman_portal { - struct qm_portal p; - unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */ - unsigned long irq_sources; - u32 use_eqcr_ci_stashing; - u32 slowpoll; /* only used when interrupts are off */ - struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */ -#ifdef FSL_DPA_CAN_WAIT_SYNC - struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */ -#endif -#ifdef FSL_DPA_PORTAL_SHARE - raw_spinlock_t sharing_lock; /* only used if is_shared */ -#ifndef __rtems__ - int is_shared; - struct qman_portal *sharing_redirect; -#endif /* __rtems__ */ -#endif - u32 sdqcr; - int dqrr_disable_ref; - /* A portal-specific handler for DCP ERNs. If this is NULL, the global - * handler is called instead. */ - qman_cb_dc_ern cb_dc_ern; - /* When the cpu-affine portal is activated, this is non-NULL */ - const struct qm_portal_config *config; -#ifndef __rtems__ - /* This is needed for providing a non-NULL device to dma_map_***() */ - struct platform_device *pdev; -#endif /* __rtems__ */ - struct dpa_rbtree retire_table; - char irqname[MAX_IRQNAME]; - /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ - struct qman_cgrs *cgrs; - /* linked-list of CSCN handlers. */ - struct list_head cgr_cbs; - /* list lock */ - spinlock_t cgr_lock; - /* track if memory was allocated by the driver */ - u8 alloced; -}; - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ -#define PORTAL_IRQ_LOCK(p, irqflags) \ - do { \ - if ((p)->is_shared) \ - raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ - else \ - local_irq_save(irqflags); \ - } while (0) -#define PORTAL_IRQ_UNLOCK(p, irqflags) \ - do { \ - if ((p)->is_shared) \ - raw_spin_unlock_irqrestore(&(p)->sharing_lock, \ - irqflags); \ - else \ - local_irq_restore(irqflags); \ - } while (0) -#else /* __rtems__ */ -#define PORTAL_IRQ_LOCK(p, irqflags) \ - raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags) -#define PORTAL_IRQ_UNLOCK(p, irqflags) \ - raw_spin_unlock_irqrestore(&(p)->sharing_lock, irqflags) -#endif /* __rtems__ */ -#else -#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) -#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) -#endif - -/* Global handler for DCP ERNs. Used when the portal receiving the message does - * not have a portal-specific handler. */ -static qman_cb_dc_ern cb_dc_ern; - -#ifndef __rtems__ -static cpumask_t affine_mask; -static DEFINE_SPINLOCK(affine_mask_lock); -static u16 affine_channels[NR_CPUS]; -#endif /* __rtems__ */ -static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); -#ifndef __rtems__ -void *affine_portals[NR_CPUS]; -#endif /* __rtems__ */ - -/* "raw" gets the cpu-local struct whether it's a redirect or not. */ -static inline struct qman_portal *get_raw_affine_portal(void) -{ - return &get_cpu_var(qman_affine_portal); -} -/* For ops that can redirect, this obtains the portal to use */ -#ifdef FSL_DPA_PORTAL_SHARE -static inline struct qman_portal *get_affine_portal(void) -{ - struct qman_portal *p = get_raw_affine_portal(); - -#ifndef __rtems__ - if (p->sharing_redirect) - return p->sharing_redirect; -#endif /* __rtems__ */ - return p; -} -#else -#define get_affine_portal() get_raw_affine_portal() -#endif -/* For every "get", there must be a "put" */ -static inline void put_affine_portal(void) -{ - put_cpu_var(qman_affine_portal); -} -/* Exception: poll functions assume the caller is cpu-affine and in no risk of - * re-entrance, which are the two reasons we usually use the get/put_cpu_var() - * semantic - ie. to disable pre-emption. Some use-cases expect the execution - * context to remain as non-atomic during poll-triggered callbacks as it was - * when the poll API was first called (eg. NAPI), so we go out of our way in - * this case to not disable pre-emption. */ -static inline struct qman_portal *get_poll_portal(void) -{ - return this_cpu_ptr(&qman_affine_portal); -} -#define put_poll_portal() - -/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux - * retirement notifications (the fact they are sometimes h/w-consumed means that - * contextB isn't always a s/w demux - and as we can't know which case it is - * when looking at the notification, we have to use the slow lookup for all of - * them). NB, it's possible to have multiple FQ objects refer to the same FQID - * (though at most one of them should be the consumer), so this table isn't for - * all FQs - FQs are added when retirement commands are issued, and removed when - * they complete, which also massively reduces the size of this table. */ -IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid); - -/* This is what everything can wait on, even if it migrates to a different cpu - * to the one whose affine portal it is waiting on. */ -static DECLARE_WAIT_QUEUE_HEAD(affine_queue); - -static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq) -{ - int ret = fqtree_push(&p->retire_table, fq); - - if (ret) - pr_err("ERROR: double FQ-retirement %d\n", fq->fqid); - return ret; -} - -static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq) -{ - fqtree_del(&p->retire_table, fq); -} - -static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid) -{ - return fqtree_find(&p->retire_table, fqid); -} - -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -static void **qman_fq_lookup_table; -static size_t qman_fq_lookup_table_size; - -int qman_setup_fq_lookup_table(size_t num_entries) -{ - num_entries++; - /* Allocate 1 more entry since the first entry is not used */ - qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *))); - if (!qman_fq_lookup_table) - return -ENOMEM; - qman_fq_lookup_table_size = num_entries; - pr_info("Allocated lookup table at %p, entry count %lu\n", - qman_fq_lookup_table, (unsigned long)qman_fq_lookup_table_size); - return 0; -} - -/* global structure that maintains fq object mapping */ -static DEFINE_SPINLOCK(fq_hash_table_lock); - -static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq) -{ - u32 i; - - spin_lock(&fq_hash_table_lock); - /* Can't use index zero because this has special meaning - * in context_b field. */ - for (i = 1; i < qman_fq_lookup_table_size; i++) { - if (qman_fq_lookup_table[i] == NULL) { - *entry = i; - qman_fq_lookup_table[i] = fq; - spin_unlock(&fq_hash_table_lock); - return 0; - } - } - spin_unlock(&fq_hash_table_lock); - return -ENOMEM; -} - -static void clear_fq_table_entry(u32 entry) -{ - spin_lock(&fq_hash_table_lock); - BUG_ON(entry >= qman_fq_lookup_table_size); - qman_fq_lookup_table[entry] = NULL; - spin_unlock(&fq_hash_table_lock); -} - -static inline struct qman_fq *get_fq_table_entry(u32 entry) -{ - BUG_ON(entry >= qman_fq_lookup_table_size); - return qman_fq_lookup_table[entry]; -} -#endif - -/* In the case that slow- and fast-path handling are both done by qman_poll() - * (ie. because there is no interrupt handling), we ought to balance how often - * we do the fast-path poll versus the slow-path poll. We'll use two decrementer - * sources, so we call the fast poll 'n' times before calling the slow poll - * once. The idle decrementer constant is used when the last slow-poll detected - * no work to do, and the busy decrementer constant when the last slow-poll had - * work to do. */ -#define SLOW_POLL_IDLE 1000 -#define SLOW_POLL_BUSY 10 -static u32 __poll_portal_slow(struct qman_portal *p, u32 is); -static inline unsigned int __poll_portal_fast(struct qman_portal *p, - unsigned int poll_limit); - -/* Portal interrupt handler */ -static irqreturn_t portal_isr(__always_unused int irq, void *ptr) -{ - struct qman_portal *p = ptr; - /* - * The CSCI source is cleared inside __poll_portal_slow(), because - * it could race against a Query Congestion State command also given - * as part of the handling of this interrupt source. We mustn't - * clear it a second time in this top-level function. - */ - u32 clear = QM_DQAVAIL_MASK; - u32 is = qm_isr_status_read(&p->p) & p->irq_sources; - /* DQRR-handling if it's interrupt-driven */ - if (is & QM_PIRQ_DQRI) { - clear |= QM_PIRQ_DQRI; - __poll_portal_fast(p, QMAN_POLL_LIMIT); - } - /* Handling of anything else that's interrupt-driven */ - clear |= __poll_portal_slow(p, is); - qm_isr_status_clear(&p->p, clear); - return IRQ_HANDLED; -} - -/* This inner version is used privately by qman_create_affine_portal(), as well - * as by the exported qman_stop_dequeues(). */ -static inline void qman_stop_dequeues_ex(struct qman_portal *p) -{ - unsigned long irqflags __maybe_unused; - PORTAL_IRQ_LOCK(p, irqflags); - if (!(p->dqrr_disable_ref++)) - qm_dqrr_set_maxfill(&p->p, 0); - PORTAL_IRQ_UNLOCK(p, irqflags); -} - -static int drain_mr_fqrni(struct qm_portal *p) -{ - const struct qm_mr_entry *msg; -loop: - msg = qm_mr_current(p); - if (!msg) { - /* if MR was full and h/w had other FQRNI entries to produce, we - * need to allow it time to produce those entries once the - * existing entries are consumed. A worst-case situation - * (fully-loaded system) means h/w sequencers may have to do 3-4 - * other things before servicing the portal's MR pump, each of - * which (if slow) may take ~50 qman cycles (which is ~200 - * processor cycles). So rounding up and then multiplying this - * worst-case estimate by a factor of 10, just to be - * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume - * one entry at a time, so h/w has an opportunity to produce new - * entries well before the ring has been fully consumed, so - * we're being *really* paranoid here. */ - u64 now, then = mfatb(); - - do { - now = mfatb(); - } while ((then + 10000) > now); - msg = qm_mr_current(p); - if (!msg) - return 0; - } - if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { - /* We aren't draining anything but FQRNIs */ - pr_err("Found verb 0x%x in MR\n", msg->verb); - return -1; - } - qm_mr_next(p); - qm_mr_cci_consume(p, 1); - goto loop; -} - -struct qman_portal *qman_create_portal( - struct qman_portal *portal, - const struct qm_portal_config *config, - const struct qman_cgrs *cgrs) -{ - struct qm_portal *__p; -#ifndef __rtems__ - char buf[16]; -#endif /* __rtems__ */ - int ret; - u32 isdr; - - if (!portal) { - portal = kmalloc(sizeof(*portal), GFP_KERNEL); - if (!portal) - return portal; - portal->alloced = 1; - } else - portal->alloced = 0; - - __p = &portal->p; - -#ifndef __rtems__ - portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? - 1 : 0); -#else /* __rtems__ */ - portal->use_eqcr_ci_stashing = 0; -#endif /* __rtems__ */ - - /* prep the low-level portal struct with the mapped addresses from the - * config, everything that follows depends on it and "config" is more - * for (de)reference... */ - __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; - __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; - /* - * If CI-stashing is used, the current defaults use a threshold of 3, - * and stash with high-than-DQRR priority. - */ - if (qm_eqcr_init(__p, qm_eqcr_pvb, - portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { - pr_err("EQCR initialisation failed\n"); - goto fail_eqcr; - } - if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb, - qm_dqrr_cdc, DQRR_MAXFILL)) { - pr_err("DQRR initialisation failed\n"); - goto fail_dqrr; - } - if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) { - pr_err("MR initialisation failed\n"); - goto fail_mr; - } - if (qm_mc_init(__p)) { - pr_err("MC initialisation failed\n"); - goto fail_mc; - } - if (qm_isr_init(__p)) { - pr_err("ISR initialisation failed\n"); - goto fail_isr; - } - /* static interrupt-gating controls */ - qm_dqrr_set_ithresh(__p, QMAN_PIRQ_DQRR_ITHRESH); - qm_mr_set_ithresh(__p, QMAN_PIRQ_MR_ITHRESH); - qm_isr_set_iperiod(__p, QMAN_PIRQ_IPERIOD); - portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); - if (!portal->cgrs) - goto fail_cgrs; - /* initial snapshot is no-depletion */ - qman_cgrs_init(&portal->cgrs[1]); - if (cgrs) - portal->cgrs[0] = *cgrs; - else - /* if the given mask is NULL, assume all CGRs can be seen */ - qman_cgrs_fill(&portal->cgrs[0]); - INIT_LIST_HEAD(&portal->cgr_cbs); - spin_lock_init(&portal->cgr_lock); - portal->bits = 0; - portal->slowpoll = 0; -#ifdef FSL_DPA_CAN_WAIT_SYNC - portal->eqci_owned = NULL; -#endif -#ifdef FSL_DPA_PORTAL_SHARE - raw_spin_lock_init(&portal->sharing_lock); -#ifndef __rtems__ - portal->is_shared = config->public_cfg.is_shared; - portal->sharing_redirect = NULL; -#endif /* __rtems__ */ -#endif - portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | - QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | - QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; - portal->dqrr_disable_ref = 0; - portal->cb_dc_ern = NULL; -#ifndef __rtems__ - sprintf(buf, "qportal-%d", config->public_cfg.channel); - portal->pdev = platform_device_alloc(buf, -1); - if (!portal->pdev) - goto fail_devalloc; - if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) - goto fail_devadd; - ret = platform_device_add(portal->pdev); - if (ret) - goto fail_devadd; -#endif /* __rtems__ */ - dpa_rbtree_init(&portal->retire_table); - isdr = 0xffffffff; - qm_isr_disable_write(__p, isdr); - portal->irq_sources = 0; - qm_isr_enable_write(__p, portal->irq_sources); - qm_isr_status_clear(__p, 0xffffffff); - snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); - if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname, - portal)) { - pr_err("request_irq() failed\n"); - goto fail_irq; - } -#ifndef __rtems__ - if ((config->public_cfg.cpu != -1) && - irq_can_set_affinity(config->public_cfg.irq) && - irq_set_affinity(config->public_cfg.irq, - cpumask_of(config->public_cfg.cpu))) { - pr_err("irq_set_affinity() failed\n"); - goto fail_affinity; - } -#endif /* __rtems__ */ - - /* Need EQCR to be empty before continuing */ - isdr ^= QM_PIRQ_EQCI; - qm_isr_disable_write(__p, isdr); - ret = qm_eqcr_get_fill(__p); - if (ret) { - pr_err("EQCR unclean\n"); - goto fail_eqcr_empty; - } - isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI); - qm_isr_disable_write(__p, isdr); - if (qm_dqrr_current(__p) != NULL) { - pr_err("DQRR unclean\n"); - qm_dqrr_cdc_consume_n(__p, 0xffff); - } - if (qm_mr_current(__p) != NULL) { - /* special handling, drain just in case it's a few FQRNIs */ - if (drain_mr_fqrni(__p)) { - const struct qm_mr_entry *e = qm_mr_current(__p); - - pr_err("MR unclean, MR VERB 0x%x, rc 0x%x\n, addr 0x%x", - e->verb, e->ern.rc, e->ern.fd.addr_lo); - goto fail_dqrr_mr_empty; - } - } - /* Success */ - portal->config = config; - qm_isr_disable_write(__p, 0); - qm_isr_uninhibit(__p); - /* Write a sane SDQCR */ - qm_dqrr_sdqcr_set(__p, portal->sdqcr); - return portal; -fail_dqrr_mr_empty: -fail_eqcr_empty: -#ifndef __rtems__ -fail_affinity: -#endif /* __rtems__ */ - free_irq(config->public_cfg.irq, portal); -fail_irq: -#ifndef __rtems__ - platform_device_del(portal->pdev); -fail_devadd: - platform_device_put(portal->pdev); -fail_devalloc: -#endif /* __rtems__ */ - kfree(portal->cgrs); -fail_cgrs: - qm_isr_finish(__p); -fail_isr: - qm_mc_finish(__p); -fail_mc: - qm_mr_finish(__p); -fail_mr: - qm_dqrr_finish(__p); -fail_dqrr: - qm_eqcr_finish(__p); -fail_eqcr: - return NULL; -} - -struct qman_portal *qman_create_affine_portal( - const struct qm_portal_config *config, - const struct qman_cgrs *cgrs) -{ - struct qman_portal *res; - struct qman_portal *portal; - - portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu); - res = qman_create_portal(portal, config, cgrs); - if (res) { -#ifndef __rtems__ - spin_lock(&affine_mask_lock); - cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); - affine_channels[config->public_cfg.cpu] = - config->public_cfg.channel; - affine_portals[config->public_cfg.cpu] = portal; - spin_unlock(&affine_mask_lock); -#endif /* __rtems__ */ - } - return res; -} - -#ifndef __rtems__ -/* These checks are BUG_ON()s because the driver is already supposed to avoid - * these cases. */ -struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect, - int cpu) -{ -#ifdef FSL_DPA_PORTAL_SHARE - struct qman_portal *p = &per_cpu(qman_affine_portal, cpu); - - /* Check that we don't already have our own portal */ - BUG_ON(p->config); - /* Check that we aren't already slaving to another portal */ - BUG_ON(p->is_shared); - /* Check that 'redirect' is prepared to have us */ - BUG_ON(!redirect->config->public_cfg.is_shared); - /* These are the only elements to initialise when redirecting */ - p->irq_sources = 0; - p->sharing_redirect = redirect; - affine_portals[cpu] = p; - return p; -#else - BUG(); - return NULL; -#endif -} -#endif /* __rtems__ */ - -void qman_destroy_portal(struct qman_portal *qm) -{ - const struct qm_portal_config *pcfg; - - /* Stop dequeues on the portal */ - qm_dqrr_sdqcr_set(&qm->p, 0); - - /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or - * something related to QM_PIRQ_EQCI, this may need fixing. - * Also, due to the prefetching model used for CI updates in the enqueue - * path, this update will only invalidate the CI cacheline *after* - * working on it, so we need to call this twice to ensure a full update - * irrespective of where the enqueue processing was at when the teardown - * began. */ - qm_eqcr_cce_update(&qm->p); - qm_eqcr_cce_update(&qm->p); - pcfg = qm->config; - - free_irq(pcfg->public_cfg.irq, qm); - - kfree(qm->cgrs); - qm_isr_finish(&qm->p); - qm_mc_finish(&qm->p); - qm_mr_finish(&qm->p); - qm_dqrr_finish(&qm->p); - qm_eqcr_finish(&qm->p); - -#ifndef __rtems__ - platform_device_del(qm->pdev); - platform_device_put(qm->pdev); -#endif /* __rtems__ */ - - qm->config = NULL; - if (qm->alloced) - kfree(qm); -} - -const struct qm_portal_config *qman_destroy_affine_portal(void) -{ - /* We don't want to redirect if we're a slave, use "raw" */ - struct qman_portal *qm = get_raw_affine_portal(); - const struct qm_portal_config *pcfg; -#ifndef __rtems__ - int cpu; -#endif /* __rtems__ */ - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (qm->sharing_redirect) { - qm->sharing_redirect = NULL; - put_affine_portal(); - return NULL; - } - qm->is_shared = 0; -#endif /* __rtems__ */ -#endif - pcfg = qm->config; -#ifndef __rtems__ - cpu = pcfg->public_cfg.cpu; -#endif /* __rtems__ */ - - qman_destroy_portal(qm); - -#ifndef __rtems__ - spin_lock(&affine_mask_lock); - cpumask_clear_cpu(cpu, &affine_mask); - spin_unlock(&affine_mask_lock); -#endif /* __rtems__ */ - put_affine_portal(); - return pcfg; -} - -const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p) -{ - return &p->config->public_cfg; -} -EXPORT_SYMBOL(qman_p_get_portal_config); - -const struct qman_portal_config *qman_get_portal_config(void) -{ - struct qman_portal *p = get_affine_portal(); - const struct qman_portal_config *ret = qman_p_get_portal_config(p); - - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(qman_get_portal_config); - -/* Inline helper to reduce nesting in __poll_portal_slow() */ -static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, - const struct qm_mr_entry *msg, u8 verb) -{ - FQLOCK(fq); - switch (verb) { - case QM_MR_VERB_FQRL: - DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); - fq_clear(fq, QMAN_FQ_STATE_ORL); - table_del_fq(p, fq); - break; - case QM_MR_VERB_FQRN: - DPA_ASSERT((fq->state == qman_fq_state_parked) || - (fq->state == qman_fq_state_sched)); - DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); - fq_clear(fq, QMAN_FQ_STATE_CHANGING); - if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) - fq_set(fq, QMAN_FQ_STATE_NE); - if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) - fq_set(fq, QMAN_FQ_STATE_ORL); - else - table_del_fq(p, fq); - fq->state = qman_fq_state_retired; - break; - case QM_MR_VERB_FQPN: - DPA_ASSERT(fq->state == qman_fq_state_sched); - DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); - fq->state = qman_fq_state_parked; - } - FQUNLOCK(fq); -} - -static u32 __poll_portal_slow(struct qman_portal *p, u32 is) -{ - const struct qm_mr_entry *msg; - - if (is & QM_PIRQ_CSCI) { - struct qman_cgrs rr, c; - struct qm_mc_result *mcr; - struct qman_cgr *cgr; - unsigned long irqflags __maybe_unused; - - spin_lock_irqsave(&p->cgr_lock, irqflags); - /* - * The CSCI bit must be cleared _before_ issuing the - * Query Congestion State command, to ensure that a long - * CGR State Change callback cannot miss an intervening - * state change. - */ - qm_isr_status_clear(&p->p, QM_PIRQ_CSCI); - qm_mc_start(&p->p); - qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - /* mask out the ones I'm not interested in */ - qman_cgrs_and(&rr, (const struct qman_cgrs *) - &mcr->querycongestion.state, &p->cgrs[0]); - /* check previous snapshot for delta, enter/exit congestion */ - qman_cgrs_xor(&c, &rr, &p->cgrs[1]); - /* update snapshot */ - qman_cgrs_cp(&p->cgrs[1], &rr); - /* Invoke callback */ - list_for_each_entry(cgr, &p->cgr_cbs, node) - if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) - cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); - spin_unlock_irqrestore(&p->cgr_lock, irqflags); - } - -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (is & QM_PIRQ_EQCI) { - unsigned long irqflags; - - PORTAL_IRQ_LOCK(p, irqflags); - p->eqci_owned = NULL; - PORTAL_IRQ_UNLOCK(p, irqflags); - wake_up(&affine_queue); - } -#endif - - if (is & QM_PIRQ_EQRI) { - unsigned long irqflags __maybe_unused; - - PORTAL_IRQ_LOCK(p, irqflags); - qm_eqcr_cce_update(&p->p); - qm_eqcr_set_ithresh(&p->p, 0); - PORTAL_IRQ_UNLOCK(p, irqflags); - wake_up(&affine_queue); - } - - if (is & QM_PIRQ_MRI) { - struct qman_fq *fq; - u8 verb, num = 0; -mr_loop: - qm_mr_pvb_update(&p->p); - msg = qm_mr_current(&p->p); - if (!msg) - goto mr_done; - verb = msg->verb & QM_MR_VERB_TYPE_MASK; - /* The message is a software ERN iff the 0x20 bit is set */ - if (verb & 0x20) { - switch (verb) { - case QM_MR_VERB_FQRNI: - /* nada, we drop FQRNIs on the floor */ - break; - case QM_MR_VERB_FQRN: - case QM_MR_VERB_FQRL: - /* Lookup in the retirement table */ - fq = table_find_fq(p, msg->fq.fqid); - BUG_ON(!fq); - fq_state_change(p, fq, msg, verb); - if (fq->cb.fqs) - fq->cb.fqs(p, fq, msg); - break; - case QM_MR_VERB_FQPN: - /* Parked */ -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - fq = get_fq_table_entry(msg->fq.contextB); -#else - fq = (void *)(uintptr_t)msg->fq.contextB; -#endif - fq_state_change(p, fq, msg, verb); - if (fq->cb.fqs) - fq->cb.fqs(p, fq, msg); - break; - case QM_MR_VERB_DC_ERN: - /* DCP ERN */ - if (p->cb_dc_ern) - p->cb_dc_ern(p, msg); - else if (cb_dc_ern) - cb_dc_ern(p, msg); - else - pr_crit_once("Leaking DCP ERNs!\n"); - break; - default: - pr_crit("Invalid MR verb 0x%02x\n", verb); - } - } else { - /* Its a software ERN */ -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - fq = get_fq_table_entry(msg->ern.tag); -#else - fq = (void *)(uintptr_t)msg->ern.tag; -#endif - fq->cb.ern(p, fq, msg); - } - num++; - qm_mr_next(&p->p); - goto mr_loop; -mr_done: - qm_mr_cci_consume(&p->p, num); - } - /* - * QM_PIRQ_CSCI has already been cleared, as part of its specific - * processing. If that interrupt source has meanwhile been re-asserted, - * we mustn't clear it here (or in the top-level interrupt handler). - */ - return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI); -} - -/* remove some slowish-path stuff from the "fast path" and make sure it isn't - * inlined. */ -static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) -{ - p->vdqcr_owned = NULL; - FQLOCK(fq); - fq_clear(fq, QMAN_FQ_STATE_VDQCR); - FQUNLOCK(fq); - wake_up(&affine_queue); -} - -/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states - * that would conflict with other things if they ran at the same time on the - * same cpu are; - * - * (i) setting/clearing vdqcr_owned, and - * (ii) clearing the NE (Not Empty) flag. - * - * Both are safe. Because; - * - * (i) this clearing can only occur after qman_volatile_dequeue() has set the - * vdqcr_owned field (which it does before setting VDQCR), and - * qman_volatile_dequeue() blocks interrupts and preemption while this is - * done so that we can't interfere. - * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as - * with (i) that API prevents us from interfering until it's safe. - * - * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far - * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett - * advantage comes from this function not having to "lock" anything at all. - * - * Note also that the callbacks are invoked at points which are safe against the - * above potential conflicts, but that this function itself is not re-entrant - * (this is because the function tracks one end of each FIFO in the portal and - * we do *not* want to lock that). So the consequence is that it is safe for - * user callbacks to call into any QMan API *except* qman_poll() (as that's the - * sole API that could be invoking the callback through this function). - */ -static inline unsigned int __poll_portal_fast(struct qman_portal *p, - unsigned int poll_limit) -{ - const struct qm_dqrr_entry *dq; - struct qman_fq *fq; - enum qman_cb_dqrr_result res; - unsigned int limit = 0; - -loop: - qm_dqrr_pvb_update(&p->p); - dq = qm_dqrr_current(&p->p); - if (!dq) - goto done; - if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { - /* VDQCR: don't trust contextB as the FQ may have been - * configured for h/w consumption and we're draining it - * post-retirement. */ - fq = p->vdqcr_owned; - /* We only set QMAN_FQ_STATE_NE when retiring, so we only need - * to check for clearing it when doing volatile dequeues. It's - * one less thing to check in the critical path (SDQCR). */ - if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) - fq_clear(fq, QMAN_FQ_STATE_NE); - /* this is duplicated from the SDQCR code, but we have stuff to - * do before *and* after this callback, and we don't want - * multiple if()s in the critical path (SDQCR). */ - res = fq->cb.dqrr(p, fq, dq); - if (res == qman_cb_dqrr_stop) - goto done; - /* Check for VDQCR completion */ - if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) - clear_vdqcr(p, fq); - } else { - /* SDQCR: contextB points to the FQ */ -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - fq = get_fq_table_entry(dq->contextB); -#else - fq = (void *)(uintptr_t)dq->contextB; -#endif - /* Now let the callback do its stuff */ - res = fq->cb.dqrr(p, fq, dq); - /* The callback can request that we exit without consuming this - * entry nor advancing; */ - if (res == qman_cb_dqrr_stop) - goto done; - } - /* Interpret 'dq' from a driver perspective. */ - /* Parking isn't possible unless HELDACTIVE was set. NB, - * FORCEELIGIBLE implies HELDACTIVE, so we only need to - * check for HELDACTIVE to cover both. */ - DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || - (res != qman_cb_dqrr_park)); - /* Defer just means "skip it, I'll consume it myself later on" */ - if (res != qman_cb_dqrr_defer) - qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park)); - /* Move forward */ - qm_dqrr_next(&p->p); - /* Entry processed and consumed, increment our counter. The callback can - * request that we exit after consuming the entry, and we also exit if - * we reach our processing limit, so loop back only if neither of these - * conditions is met. */ - if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop)) - goto loop; -done: - return limit; -} - -u32 qman_irqsource_get(void) -{ - /* "irqsource" and "poll" APIs mustn't redirect when sharing, they - * should shut the user out if they are not the primary CPU hosting the - * portal. That's why we use the "raw" interface. */ - struct qman_portal *p = get_raw_affine_portal(); - u32 ret = p->irq_sources & QM_PIRQ_VISIBLE; - - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(qman_irqsource_get); - -int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused) -{ - __maybe_unused unsigned long irqflags; - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (p->sharing_redirect) - return -EINVAL; -#endif /* __rtems__ */ -#endif - PORTAL_IRQ_LOCK(p, irqflags); - set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); - qm_isr_enable_write(&p->p, p->irq_sources); - PORTAL_IRQ_UNLOCK(p, irqflags); - return 0; -} -EXPORT_SYMBOL(qman_p_irqsource_add); - -int qman_irqsource_add(u32 bits __maybe_unused) -{ - struct qman_portal *p = get_raw_affine_portal(); - int ret; - - ret = qman_p_irqsource_add(p, bits); - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(qman_irqsource_add); - -int qman_p_irqsource_remove(struct qman_portal *p, u32 bits) -{ - __maybe_unused unsigned long irqflags; - u32 ier; - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (p->sharing_redirect) { - put_affine_portal(); - return -EINVAL; - } -#endif /* __rtems__ */ -#endif - /* Our interrupt handler only processes+clears status register bits that - * are in p->irq_sources. As we're trimming that mask, if one of them - * were to assert in the status register just before we remove it from - * the enable register, there would be an interrupt-storm when we - * release the IRQ lock. So we wait for the enable register update to - * take effect in h/w (by reading it back) and then clear all other bits - * in the status register. Ie. we clear them from ISR once it's certain - * IER won't allow them to reassert. */ - PORTAL_IRQ_LOCK(p, irqflags); - bits &= QM_PIRQ_VISIBLE; - clear_bits(bits, &p->irq_sources); - qm_isr_enable_write(&p->p, p->irq_sources); - ier = qm_isr_enable_read(&p->p); - /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a - * data-dependency, ie. to protect against re-ordering. */ - qm_isr_status_clear(&p->p, ~ier); - PORTAL_IRQ_UNLOCK(p, irqflags); - return 0; -} -EXPORT_SYMBOL(qman_p_irqsource_remove); - -int qman_irqsource_remove(u32 bits) -{ - struct qman_portal *p = get_raw_affine_portal(); - int ret; - - ret = qman_p_irqsource_remove(p, bits); - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(qman_irqsource_remove); - -#ifndef __rtems__ -const cpumask_t *qman_affine_cpus(void) -{ - return &affine_mask; -} -EXPORT_SYMBOL(qman_affine_cpus); - -u16 qman_affine_channel(int cpu) -{ - if (cpu < 0) { - struct qman_portal *portal = get_raw_affine_portal(); - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - BUG_ON(portal->sharing_redirect); -#endif /* __rtems__ */ -#endif - cpu = portal->config->public_cfg.cpu; - put_affine_portal(); - } - BUG_ON(!cpumask_test_cpu(cpu, &affine_mask)); - return affine_channels[cpu]; -} -EXPORT_SYMBOL(qman_affine_channel); -#endif /* __rtems__ */ - -void *qman_get_affine_portal(int cpu) -{ -#ifndef __rtems__ - return affine_portals[cpu]; -#else /* __rtems__ */ - return &per_cpu(qman_affine_portal, cpu); -#endif /* __rtems__ */ -} -EXPORT_SYMBOL(qman_get_affine_portal); - -int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) -{ - int ret; - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (unlikely(p->sharing_redirect)) - ret = -EINVAL; - else -#endif /* __rtems__ */ -#endif - { - BUG_ON(p->irq_sources & QM_PIRQ_DQRI); - ret = __poll_portal_fast(p, limit); - } - return ret; -} -EXPORT_SYMBOL(qman_p_poll_dqrr); - -int qman_poll_dqrr(unsigned int limit) -{ - struct qman_portal *p = get_poll_portal(); - int ret; - - ret = qman_p_poll_dqrr(p, limit); - put_poll_portal(); - return ret; -} -EXPORT_SYMBOL(qman_poll_dqrr); - -u32 qman_p_poll_slow(struct qman_portal *p) -{ - u32 ret; - -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (unlikely(p->sharing_redirect)) - ret = (u32)-1; - else -#endif /* __rtems__ */ -#endif - { - u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; - - ret = __poll_portal_slow(p, is); - qm_isr_status_clear(&p->p, ret); - } - return ret; -} -EXPORT_SYMBOL(qman_p_poll_slow); - -u32 qman_poll_slow(void) -{ - struct qman_portal *p = get_poll_portal(); - u32 ret; - - ret = qman_p_poll_slow(p); - put_poll_portal(); - return ret; -} -EXPORT_SYMBOL(qman_poll_slow); - -/* Legacy wrapper */ -void qman_p_poll(struct qman_portal *p) -{ -#ifdef FSL_DPA_PORTAL_SHARE -#ifndef __rtems__ - if (unlikely(p->sharing_redirect)) - return; -#endif /* __rtems__ */ -#endif - if ((~p->irq_sources) & QM_PIRQ_SLOW) { - if (!(p->slowpoll--)) { - u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; - u32 active = __poll_portal_slow(p, is); - - if (active) { - qm_isr_status_clear(&p->p, active); - p->slowpoll = SLOW_POLL_BUSY; - } else - p->slowpoll = SLOW_POLL_IDLE; - } - } - if ((~p->irq_sources) & QM_PIRQ_DQRI) - __poll_portal_fast(p, QMAN_POLL_LIMIT); -} -EXPORT_SYMBOL(qman_p_poll); - -void qman_poll(void) -{ - struct qman_portal *p = get_poll_portal(); - - qman_p_poll(p); - put_poll_portal(); -} -EXPORT_SYMBOL(qman_poll); - -void qman_p_stop_dequeues(struct qman_portal *p) -{ - qman_stop_dequeues_ex(p); -} -EXPORT_SYMBOL(qman_p_stop_dequeues); - -void qman_stop_dequeues(void) -{ - struct qman_portal *p = get_affine_portal(); - - qman_p_stop_dequeues(p); - put_affine_portal(); -} -EXPORT_SYMBOL(qman_stop_dequeues); - -void qman_p_start_dequeues(struct qman_portal *p) -{ - unsigned long irqflags __maybe_unused; - - PORTAL_IRQ_LOCK(p, irqflags); - DPA_ASSERT(p->dqrr_disable_ref > 0); - if (!(--p->dqrr_disable_ref)) - qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL); - PORTAL_IRQ_UNLOCK(p, irqflags); -} -EXPORT_SYMBOL(qman_p_start_dequeues); - -void qman_start_dequeues(void) -{ - struct qman_portal *p = get_affine_portal(); - - qman_p_start_dequeues(p); - put_affine_portal(); -} -EXPORT_SYMBOL(qman_start_dequeues); - -void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) -{ - unsigned long irqflags __maybe_unused; - - PORTAL_IRQ_LOCK(p, irqflags); - pools &= p->config->public_cfg.pools; - p->sdqcr |= pools; - qm_dqrr_sdqcr_set(&p->p, p->sdqcr); - PORTAL_IRQ_UNLOCK(p, irqflags); -} -EXPORT_SYMBOL(qman_p_static_dequeue_add); - -void qman_static_dequeue_add(u32 pools) -{ - struct qman_portal *p = get_affine_portal(); - - qman_p_static_dequeue_add(p, pools); - put_affine_portal(); -} -EXPORT_SYMBOL(qman_static_dequeue_add); - -void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools) -{ - unsigned long irqflags __maybe_unused; - - PORTAL_IRQ_LOCK(p, irqflags); - pools &= p->config->public_cfg.pools; - p->sdqcr &= ~pools; - qm_dqrr_sdqcr_set(&p->p, p->sdqcr); - PORTAL_IRQ_UNLOCK(p, irqflags); -} -EXPORT_SYMBOL(qman_p_static_dequeue_del); - -void qman_static_dequeue_del(u32 pools) -{ - struct qman_portal *p = get_affine_portal(); - - qman_p_static_dequeue_del(p, pools); - put_affine_portal(); -} -EXPORT_SYMBOL(qman_static_dequeue_del); - -u32 qman_p_static_dequeue_get(struct qman_portal *p) -{ - return p->sdqcr; -} -EXPORT_SYMBOL(qman_p_static_dequeue_get); - -u32 qman_static_dequeue_get(void) -{ - struct qman_portal *p = get_affine_portal(); - u32 ret = qman_p_static_dequeue_get(p); - - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(qman_static_dequeue_get); - -void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq, - int park_request) -{ - qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request); -} -EXPORT_SYMBOL(qman_p_dca); - -void qman_dca(struct qm_dqrr_entry *dq, int park_request) -{ - struct qman_portal *p = get_affine_portal(); - - qman_p_dca(p, dq, park_request); - put_affine_portal(); -} -EXPORT_SYMBOL(qman_dca); - -/* Frame queue API */ - -static const char *mcr_result_str(u8 result) -{ - switch (result) { - case QM_MCR_RESULT_NULL: - return "QM_MCR_RESULT_NULL"; - case QM_MCR_RESULT_OK: - return "QM_MCR_RESULT_OK"; - case QM_MCR_RESULT_ERR_FQID: - return "QM_MCR_RESULT_ERR_FQID"; - case QM_MCR_RESULT_ERR_FQSTATE: - return "QM_MCR_RESULT_ERR_FQSTATE"; - case QM_MCR_RESULT_ERR_NOTEMPTY: - return "QM_MCR_RESULT_ERR_NOTEMPTY"; - case QM_MCR_RESULT_PENDING: - return "QM_MCR_RESULT_PENDING"; - case QM_MCR_RESULT_ERR_BADCOMMAND: - return "QM_MCR_RESULT_ERR_BADCOMMAND"; - } - return "<unknown MCR result>"; -} - -int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) -{ - struct qm_fqd fqd; - struct qm_mcr_queryfq_np np; - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p; - unsigned long irqflags __maybe_unused; - - if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { - int ret = qman_alloc_fqid(&fqid); - - if (ret) - return ret; - } - spin_lock_init(&fq->fqlock); - fq->fqid = fqid; - fq->flags = flags; - fq->state = qman_fq_state_oos; - fq->cgr_groupid = 0; -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) - return -ENOMEM; -#endif - if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY)) - return 0; - /* Everything else is AS_IS support */ - p = get_affine_portal(); - PORTAL_IRQ_LOCK(p, irqflags); - mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fqid; - qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ); - if (mcr->result != QM_MCR_RESULT_OK) { - pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result)); - goto err; - } - fqd = mcr->queryfq.fqd; - mcc = qm_mc_start(&p->p); - mcc->queryfq_np.fqid = fqid; - qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP); - if (mcr->result != QM_MCR_RESULT_OK) { - pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result)); - goto err; - } - np = mcr->queryfq_np; - /* Phew, have queryfq and queryfq_np results, stitch together - * the FQ object from those. */ - fq->cgr_groupid = fqd.cgid; - switch (np.state & QM_MCR_NP_STATE_MASK) { - case QM_MCR_NP_STATE_OOS: - break; - case QM_MCR_NP_STATE_RETIRED: - fq->state = qman_fq_state_retired; - if (np.frm_cnt) - fq_set(fq, QMAN_FQ_STATE_NE); - break; - case QM_MCR_NP_STATE_TEN_SCHED: - case QM_MCR_NP_STATE_TRU_SCHED: - case QM_MCR_NP_STATE_ACTIVE: - fq->state = qman_fq_state_sched; - if (np.state & QM_MCR_NP_STATE_R) - fq_set(fq, QMAN_FQ_STATE_CHANGING); - break; - case QM_MCR_NP_STATE_PARKED: - fq->state = qman_fq_state_parked; - break; - default: - DPA_ASSERT(NULL == "invalid FQ state"); - } - if (fqd.fq_ctrl & QM_FQCTRL_CGE) - fq->state |= QMAN_FQ_STATE_CGR_EN; - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return 0; -err: - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) - qman_release_fqid(fqid); - return -EIO; -} -EXPORT_SYMBOL(qman_create_fq); - -void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused) -{ - - /* We don't need to lock the FQ as it is a pre-condition that the FQ be - * quiesced. Instead, run some checks. */ - switch (fq->state) { - case qman_fq_state_parked: - DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED); - case qman_fq_state_oos: - if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) - qman_release_fqid(fq->fqid); -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - clear_fq_table_entry(fq->key); -#endif - return; - default: - break; - } - DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); -} -EXPORT_SYMBOL(qman_destroy_fq); - -u32 qman_fq_fqid(struct qman_fq *fq) -{ - return fq->fqid; -} -EXPORT_SYMBOL(qman_fq_fqid); - -void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags) -{ - if (state) - *state = fq->state; - if (flags) - *flags = fq->flags; -} -EXPORT_SYMBOL(qman_fq_state); - -int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p; - unsigned long irqflags __maybe_unused; - u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ? - QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; - - if ((fq->state != qman_fq_state_oos) && - (fq->state != qman_fq_state_parked)) - return -EINVAL; -#ifdef CONFIG_FSL_DPA_CHECKING - if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) - return -EINVAL; -#endif - if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { - /* And can't be set at the same time as TDTHRESH */ - if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) - return -EINVAL; - } - /* Issue an INITFQ_[PARKED|SCHED] management command */ - p = get_affine_portal(); - PORTAL_IRQ_LOCK(p, irqflags); - FQLOCK(fq); - if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || - ((fq->state != qman_fq_state_oos) && - (fq->state != qman_fq_state_parked)))) { - FQUNLOCK(fq); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return -EBUSY; - } - mcc = qm_mc_start(&p->p); - if (opts) - mcc->initfq = *opts; - mcc->initfq.fqid = fq->fqid; - mcc->initfq.count = 0; - /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a - * demux pointer. Otherwise, the caller-provided value is allowed to - * stand, don't overwrite it. */ - if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { - dma_addr_t phys_fq; - - mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - mcc->initfq.fqd.context_b = fq->key; -#else - mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq; -#endif - /* and the physical address - NB, if the user wasn't trying to - * set CONTEXTA, clear the stashing settings. */ - if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { - mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; - memset(&mcc->initfq.fqd.context_a, 0, - sizeof(mcc->initfq.fqd.context_a)); - } else { -#ifndef __rtems__ - phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), - DMA_TO_DEVICE); -#else /* __rtems__ */ - phys_fq = (dma_addr_t)fq; -#endif /* __rtems__ */ - qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); - } - } - if (flags & QMAN_INITFQ_FLAG_LOCAL) { - mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel; - if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { - mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; - mcc->initfq.fqd.dest.wq = 4; - } - } - qm_mc_commit(&p->p, myverb); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); - res = mcr->result; - if (res != QM_MCR_RESULT_OK) { - FQUNLOCK(fq); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return -EIO; - } - if (opts) { - if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { - if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) - fq_set(fq, QMAN_FQ_STATE_CGR_EN); - else - fq_clear(fq, QMAN_FQ_STATE_CGR_EN); - } - if (opts->we_mask & QM_INITFQ_WE_CGID) - fq->cgr_groupid = opts->fqd.cgid; - } - fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? - qman_fq_state_sched : qman_fq_state_parked; - FQUNLOCK(fq); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return 0; -} -EXPORT_SYMBOL(qman_init_fq); - -int qman_schedule_fq(struct qman_fq *fq) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p; - unsigned long irqflags __maybe_unused; - int ret = 0; - u8 res; - - if (fq->state != qman_fq_state_parked) - return -EINVAL; -#ifdef CONFIG_FSL_DPA_CHECKING - if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) - return -EINVAL; -#endif - /* Issue a ALTERFQ_SCHED management command */ - p = get_affine_portal(); - PORTAL_IRQ_LOCK(p, irqflags); - FQLOCK(fq); - if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || - (fq->state != qman_fq_state_parked))) { - ret = -EBUSY; - goto out; - } - mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; - qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); - res = mcr->result; - if (res != QM_MCR_RESULT_OK) { - ret = -EIO; - goto out; - } - fq->state = qman_fq_state_sched; -out: - FQUNLOCK(fq); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(qman_schedule_fq); - -int qman_retire_fq(struct qman_fq *fq, u32 *flags) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p; - unsigned long irqflags __maybe_unused; - int rval; - u8 res; - - if ((fq->state != qman_fq_state_parked) && - (fq->state != qman_fq_state_sched)) - return -EINVAL; -#ifdef CONFIG_FSL_DPA_CHECKING - if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) - return -EINVAL; -#endif - p = get_affine_portal(); - PORTAL_IRQ_LOCK(p, irqflags); - FQLOCK(fq); - if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || - (fq->state == qman_fq_state_retired) || - (fq->state == qman_fq_state_oos))) { - rval = -EBUSY; - goto out; - } - rval = table_push_fq(p, fq); - if (rval) - goto out; - mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; - qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); - res = mcr->result; - /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING, - * and defer the flags until FQRNI or FQRN (respectively) show up. But - * "Friendly" is to process OK immediately, and not set CHANGING. We do - * friendly, otherwise the caller doesn't necessarily have a fully - * "retired" FQ on return even if the retirement was immediate. However - * this does mean some code duplication between here and - * fq_state_change(). */ - if (likely(res == QM_MCR_RESULT_OK)) { - rval = 0; - /* Process 'fq' right away, we'll ignore FQRNI */ - if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) - fq_set(fq, QMAN_FQ_STATE_NE); - if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) - fq_set(fq, QMAN_FQ_STATE_ORL); - else - table_del_fq(p, fq); - if (flags) - *flags = fq->flags; - fq->state = qman_fq_state_retired; - if (fq->cb.fqs) { - /* Another issue with supporting "immediate" retirement - * is that we're forced to drop FQRNIs, because by the - * time they're seen it may already be "too late" (the - * fq may have been OOS'd and free()'d already). But if - * the upper layer wants a callback whether it's - * immediate or not, we have to fake a "MR" entry to - * look like an FQRNI... */ - struct qm_mr_entry msg; - - msg.verb = QM_MR_VERB_FQRNI; - msg.fq.fqs = mcr->alterfq.fqs; - msg.fq.fqid = fq->fqid; -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - msg.fq.contextB = fq->key; -#else - msg.fq.contextB = (u32)(uintptr_t)fq; -#endif - fq->cb.fqs(p, fq, &msg); - } - } else if (res == QM_MCR_RESULT_PENDING) { - rval = 1; - fq_set(fq, QMAN_FQ_STATE_CHANGING); - } else { - rval = -EIO; - table_del_fq(p, fq); - } -out: - FQUNLOCK(fq); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return rval; -} -EXPORT_SYMBOL(qman_retire_fq); - -int qman_oos_fq(struct qman_fq *fq) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p; - unsigned long irqflags __maybe_unused; - int ret = 0; - u8 res; - - if (fq->state != qman_fq_state_retired) - return -EINVAL; -#ifdef CONFIG_FSL_DPA_CHECKING - if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) - return -EINVAL; -#endif - p = get_affine_portal(); - PORTAL_IRQ_LOCK(p, irqflags); - FQLOCK(fq); - if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) || - (fq->state != qman_fq_state_retired))) { - ret = -EBUSY; - goto out; - } - mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; - qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); - res = mcr->result; - if (res != QM_MCR_RESULT_OK) { - ret = -EIO; - goto out; - } - fq->state = qman_fq_state_oos; -out: - FQUNLOCK(fq); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(qman_oos_fq); - -int qman_fq_flow_control(struct qman_fq *fq, int xon) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p; - unsigned long irqflags __maybe_unused; - int ret = 0; - u8 res; - u8 myverb; - - if ((fq->state == qman_fq_state_oos) || - (fq->state == qman_fq_state_retired) || - (fq->state == qman_fq_state_parked)) - return -EINVAL; - -#ifdef CONFIG_FSL_DPA_CHECKING - if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) - return -EINVAL; -#endif - /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */ - p = get_affine_portal(); - PORTAL_IRQ_LOCK(p, irqflags); - FQLOCK(fq); - if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || - (fq->state == qman_fq_state_parked) || - (fq->state == qman_fq_state_oos) || - (fq->state == qman_fq_state_retired))) { - ret = -EBUSY; - goto out; - } - mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; - mcc->alterfq.count = 0; - myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF; - - qm_mc_commit(&p->p, myverb); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); - - res = mcr->result; - if (res != QM_MCR_RESULT_OK) { - ret = -EIO; - goto out; - } -out: - FQUNLOCK(fq); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(qman_fq_flow_control); - -int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p = get_affine_portal(); - unsigned long irqflags __maybe_unused; - u8 res; - - PORTAL_IRQ_LOCK(p, irqflags); - mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fq->fqid; - qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); - res = mcr->result; - if (res == QM_MCR_RESULT_OK) - *fqd = mcr->queryfq.fqd; - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - if (res != QM_MCR_RESULT_OK) - return -EIO; - return 0; -} -EXPORT_SYMBOL(qman_query_fq); - -int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p = get_affine_portal(); - unsigned long irqflags __maybe_unused; - u8 res; - - PORTAL_IRQ_LOCK(p, irqflags); - mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fq->fqid; - qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); - res = mcr->result; - if (res == QM_MCR_RESULT_OK) - *np = mcr->queryfq_np; - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - if (res == QM_MCR_RESULT_ERR_FQID) - return -ERANGE; - else if (res != QM_MCR_RESULT_OK) - return -EIO; - return 0; -} -EXPORT_SYMBOL(qman_query_fq_np); - -int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p = get_affine_portal(); - unsigned long irqflags __maybe_unused; - u8 res, myverb; - - PORTAL_IRQ_LOCK(p, irqflags); - myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED : - QM_MCR_VERB_QUERYWQ; - mcc = qm_mc_start(&p->p); - mcc->querywq.channel.id = wq->channel.id; - qm_mc_commit(&p->p, myverb); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); - res = mcr->result; - if (res == QM_MCR_RESULT_OK) - *wq = mcr->querywq; - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - if (res != QM_MCR_RESULT_OK) { - pr_err("QUERYWQ failed: %s\n", mcr_result_str(res)); - return -EIO; - } - return 0; -} -EXPORT_SYMBOL(qman_query_wq); - -int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, - struct qm_mcr_cgrtestwrite *result) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p = get_affine_portal(); - unsigned long irqflags __maybe_unused; - u8 res; - - PORTAL_IRQ_LOCK(p, irqflags); - mcc = qm_mc_start(&p->p); - mcc->cgrtestwrite.cgid = cgr->cgrid; - mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32); - mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt; - qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE); - res = mcr->result; - if (res == QM_MCR_RESULT_OK) - *result = mcr->cgrtestwrite; - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - if (res != QM_MCR_RESULT_OK) { - pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res)); - return -EIO; - } - return 0; -} -EXPORT_SYMBOL(qman_testwrite_cgr); - -int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p = get_affine_portal(); - unsigned long irqflags __maybe_unused; - u8 res; - - PORTAL_IRQ_LOCK(p, irqflags); - mcc = qm_mc_start(&p->p); - mcc->querycgr.cgid = cgr->cgrid; - qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); - res = mcr->result; - if (res == QM_MCR_RESULT_OK) - *cgrd = mcr->querycgr; - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - if (res != QM_MCR_RESULT_OK) { - pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res)); - return -EIO; - } - return 0; -} -EXPORT_SYMBOL(qman_query_cgr); - -int qman_query_congestion(struct qm_mcr_querycongestion *congestion) -{ - struct qm_mc_result *mcr; - struct qman_portal *p = get_affine_portal(); - unsigned long irqflags __maybe_unused; - u8 res; - - PORTAL_IRQ_LOCK(p, irqflags); - qm_mc_start(&p->p); - qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == - QM_MCC_VERB_QUERYCONGESTION); - res = mcr->result; - if (res == QM_MCR_RESULT_OK) - *congestion = mcr->querycongestion; - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - if (res != QM_MCR_RESULT_OK) { - pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res)); - return -EIO; - } - return 0; -} -EXPORT_SYMBOL(qman_query_congestion); - -/* internal function used as a wait_event() expression */ -static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) -{ - unsigned long irqflags __maybe_unused; - int ret = -EBUSY; - - PORTAL_IRQ_LOCK(p, irqflags); - if (!p->vdqcr_owned) { - FQLOCK(fq); - if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) - goto escape; - fq_set(fq, QMAN_FQ_STATE_VDQCR); - FQUNLOCK(fq); - p->vdqcr_owned = fq; - ret = 0; - } -escape: - PORTAL_IRQ_UNLOCK(p, irqflags); - if (!ret) - qm_dqrr_vdqcr_set(&p->p, vdqcr); - return ret; -} - -static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) -{ - int ret; - - *p = get_affine_portal(); - ret = set_p_vdqcr(*p, fq, vdqcr); - put_affine_portal(); - return ret; -} - -#ifdef FSL_DPA_CAN_WAIT -static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq, - u32 vdqcr, u32 flags) -{ - int ret = 0; - -#ifndef __rtems__ - if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) - ret = wait_event_interruptible(affine_queue, - !(ret = set_p_vdqcr(p, fq, vdqcr))); - else -#endif /* __rtems__ */ - wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr))); - return ret; -} - -static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, - u32 vdqcr, u32 flags) -{ - int ret = 0; - -#ifndef __rtems__ - if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) - ret = wait_event_interruptible(affine_queue, - !(ret = set_vdqcr(p, fq, vdqcr))); - else -#endif /* __rtems__ */ - wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr))); - return ret; -} -#endif - -int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq, - u32 flags __maybe_unused, u32 vdqcr) -{ - int ret; - - if ((fq->state != qman_fq_state_parked) && - (fq->state != qman_fq_state_retired)) - return -EINVAL; - if (vdqcr & QM_VDQCR_FQID_MASK) - return -EINVAL; - if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) - return -EBUSY; - vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; -#ifdef FSL_DPA_CAN_WAIT - if (flags & QMAN_VOLATILE_FLAG_WAIT) - ret = wait_p_vdqcr_start(p, fq, vdqcr, flags); - else -#endif - ret = set_p_vdqcr(p, fq, vdqcr); - if (ret) - return ret; - /* VDQCR is set */ -#ifdef FSL_DPA_CAN_WAIT - if (flags & QMAN_VOLATILE_FLAG_FINISH) { -#ifndef __rtems__ - if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) - /* NB: don't propagate any error - the caller wouldn't - * know whether the VDQCR was issued or not. A signal - * could arrive after returning anyway, so the caller - * can check signal_pending() if that's an issue. */ - wait_event_interruptible(affine_queue, - !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); - else -#endif /* __rtems__ */ - wait_event(affine_queue, - !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); - } -#endif - return 0; -} -EXPORT_SYMBOL(qman_p_volatile_dequeue); - -int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused, - u32 vdqcr) -{ - struct qman_portal *p; - int ret; - - if ((fq->state != qman_fq_state_parked) && - (fq->state != qman_fq_state_retired)) - return -EINVAL; - if (vdqcr & QM_VDQCR_FQID_MASK) - return -EINVAL; - if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) - return -EBUSY; - vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; -#ifdef FSL_DPA_CAN_WAIT - if (flags & QMAN_VOLATILE_FLAG_WAIT) - ret = wait_vdqcr_start(&p, fq, vdqcr, flags); - else -#endif - ret = set_vdqcr(&p, fq, vdqcr); - if (ret) - return ret; - /* VDQCR is set */ -#ifdef FSL_DPA_CAN_WAIT - if (flags & QMAN_VOLATILE_FLAG_FINISH) { -#ifndef __rtems__ - if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) - /* NB: don't propagate any error - the caller wouldn't - * know whether the VDQCR was issued or not. A signal - * could arrive after returning anyway, so the caller - * can check signal_pending() if that's an issue. */ - wait_event_interruptible(affine_queue, - !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); - else -#endif /* __rtems__ */ - wait_event(affine_queue, - !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); - } -#endif - return 0; -} -EXPORT_SYMBOL(qman_volatile_dequeue); - -static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail) -{ - if (avail) - qm_eqcr_cce_prefetch(&p->p); - else - qm_eqcr_cce_update(&p->p); -} - -int qman_eqcr_is_empty(void) -{ - unsigned long irqflags __maybe_unused; - struct qman_portal *p = get_affine_portal(); - u8 avail; - - PORTAL_IRQ_LOCK(p, irqflags); - update_eqcr_ci(p, 0); - avail = qm_eqcr_get_fill(&p->p); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return avail == 0; -} -EXPORT_SYMBOL(qman_eqcr_is_empty); - -void qman_set_dc_ern(qman_cb_dc_ern handler, int affine) -{ - if (affine) { - unsigned long irqflags __maybe_unused; - struct qman_portal *p = get_affine_portal(); - - PORTAL_IRQ_LOCK(p, irqflags); - p->cb_dc_ern = handler; - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - } else - cb_dc_ern = handler; -} -EXPORT_SYMBOL(qman_set_dc_ern); - -static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p, - unsigned long *irqflags __maybe_unused, - struct qman_fq *fq, - const struct qm_fd *fd, - u32 flags) -{ - struct qm_eqcr_entry *eq; - u8 avail; - - PORTAL_IRQ_LOCK(p, (*irqflags)); -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && - (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { - if (p->eqci_owned) { - PORTAL_IRQ_UNLOCK(p, (*irqflags)); - return NULL; - } - p->eqci_owned = fq; - } -#endif - if (p->use_eqcr_ci_stashing) { - /* - * The stashing case is easy, only update if we need to in - * order to try and liberate ring entries. - */ - eq = qm_eqcr_start_stash(&p->p); - } else { - /* - * The non-stashing case is harder, need to prefetch ahead of - * time. - */ - avail = qm_eqcr_get_avail(&p->p); - if (avail < 2) - update_eqcr_ci(p, avail); - eq = qm_eqcr_start_no_stash(&p->p); - } - - if (unlikely(!eq)) { -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && - (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) - p->eqci_owned = NULL; -#endif - PORTAL_IRQ_UNLOCK(p, (*irqflags)); - return NULL; - } - if (flags & QMAN_ENQUEUE_FLAG_DCA) - eq->dca = QM_EQCR_DCA_ENABLE | - ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ? - QM_EQCR_DCA_PARK : 0) | - ((flags >> 8) & QM_EQCR_DCA_IDXMASK); - eq->fqid = fq->fqid; -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - eq->tag = fq->key; -#else - eq->tag = (u32)(uintptr_t)fq; -#endif - eq->fd = *fd; - return eq; -} - -static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p, - unsigned long *irqflags __maybe_unused, - struct qman_fq *fq, - const struct qm_fd *fd, - u32 flags) -{ - struct qm_eqcr_entry *eq; - - *p = get_affine_portal(); - eq = try_p_eq_start(*p, irqflags, fq, fd, flags); - if (!eq) - put_affine_portal(); - return eq; -} - -#ifdef FSL_DPA_CAN_WAIT -static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p, - unsigned long *irqflags __maybe_unused, - struct qman_fq *fq, - const struct qm_fd *fd, - u32 flags) -{ - struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags); - - if (!eq) - qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH); - return eq; -} -static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p, - unsigned long *irqflags __maybe_unused, - struct qman_fq *fq, - const struct qm_fd *fd, - u32 flags) -{ - struct qm_eqcr_entry *eq; - -#ifndef __rtems__ - if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) - wait_event_interruptible(affine_queue, - (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); - else -#endif /* __rtems__ */ - wait_event(affine_queue, - (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); - return eq; -} -static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p, - unsigned long *irqflags __maybe_unused, - struct qman_fq *fq, - const struct qm_fd *fd, - u32 flags) -{ - struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags); - - if (!eq) - qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH); - return eq; -} -static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p, - unsigned long *irqflags __maybe_unused, - struct qman_fq *fq, - const struct qm_fd *fd, - u32 flags) -{ - struct qm_eqcr_entry *eq; - -#ifndef __rtems__ - if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) - wait_event_interruptible(affine_queue, - (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags))); - else -#endif /* __rtems__ */ - wait_event(affine_queue, - (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags))); - return eq; -} -#endif - -int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq, - const struct qm_fd *fd, u32 flags) -{ - struct qm_eqcr_entry *eq; - unsigned long irqflags __maybe_unused; - -#ifdef FSL_DPA_CAN_WAIT - if (flags & QMAN_ENQUEUE_FLAG_WAIT) - eq = wait_p_eq_start(p, &irqflags, fq, fd, flags); - else -#endif - eq = try_p_eq_start(p, &irqflags, fq, fd, flags); - if (!eq) - return -EBUSY; - /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ - qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | - (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); - /* Factor the below out, it's used from qman_enqueue_orp() too */ - PORTAL_IRQ_UNLOCK(p, irqflags); -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && - (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -#ifndef __rtems__ - if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) - wait_event_interruptible(affine_queue, - (p->eqci_owned != fq)); - else -#endif /* __rtems__ */ - wait_event(affine_queue, (p->eqci_owned != fq)); - } -#endif - return 0; -} -EXPORT_SYMBOL(qman_p_enqueue); - -int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags) -{ - struct qman_portal *p; - struct qm_eqcr_entry *eq; - unsigned long irqflags __maybe_unused; - -#ifdef FSL_DPA_CAN_WAIT - if (flags & QMAN_ENQUEUE_FLAG_WAIT) - eq = wait_eq_start(&p, &irqflags, fq, fd, flags); - else -#endif - eq = try_eq_start(&p, &irqflags, fq, fd, flags); - if (!eq) - return -EBUSY; - /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ - qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | - (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); - /* Factor the below out, it's used from qman_enqueue_orp() too */ - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && - (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -#ifndef __rtems__ - if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) - wait_event_interruptible(affine_queue, - (p->eqci_owned != fq)); - else -#endif /* __rtems__ */ - wait_event(affine_queue, (p->eqci_owned != fq)); - } -#endif - return 0; -} -EXPORT_SYMBOL(qman_enqueue); - -int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq, - const struct qm_fd *fd, u32 flags, - struct qman_fq *orp, u16 orp_seqnum) -{ - struct qm_eqcr_entry *eq; - unsigned long irqflags __maybe_unused; - -#ifdef FSL_DPA_CAN_WAIT - if (flags & QMAN_ENQUEUE_FLAG_WAIT) - eq = wait_p_eq_start(p, &irqflags, fq, fd, flags); - else -#endif - eq = try_p_eq_start(p, &irqflags, fq, fd, flags); - if (!eq) - return -EBUSY; - /* Process ORP-specifics here */ - if (flags & QMAN_ENQUEUE_FLAG_NLIS) - orp_seqnum |= QM_EQCR_SEQNUM_NLIS; - else { - orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS; - if (flags & QMAN_ENQUEUE_FLAG_NESN) - orp_seqnum |= QM_EQCR_SEQNUM_NESN; - else - /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ - orp_seqnum &= ~QM_EQCR_SEQNUM_NESN; - } - eq->seqnum = orp_seqnum; - eq->orp = orp->fqid; - /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ - qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP | - ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ? - 0 : QM_EQCR_VERB_CMD_ENQUEUE) | - (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); - PORTAL_IRQ_UNLOCK(p, irqflags); -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && - (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -#ifndef __rtems__ - if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) - wait_event_interruptible(affine_queue, - (p->eqci_owned != fq)); - else -#endif /* __rtems__ */ - wait_event(affine_queue, (p->eqci_owned != fq)); - } -#endif - return 0; -} -EXPORT_SYMBOL(qman_p_enqueue_orp); - -int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, - struct qman_fq *orp, u16 orp_seqnum) -{ - struct qman_portal *p; - struct qm_eqcr_entry *eq; - unsigned long irqflags __maybe_unused; - -#ifdef FSL_DPA_CAN_WAIT - if (flags & QMAN_ENQUEUE_FLAG_WAIT) - eq = wait_eq_start(&p, &irqflags, fq, fd, flags); - else -#endif - eq = try_eq_start(&p, &irqflags, fq, fd, flags); - if (!eq) - return -EBUSY; - /* Process ORP-specifics here */ - if (flags & QMAN_ENQUEUE_FLAG_NLIS) - orp_seqnum |= QM_EQCR_SEQNUM_NLIS; - else { - orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS; - if (flags & QMAN_ENQUEUE_FLAG_NESN) - orp_seqnum |= QM_EQCR_SEQNUM_NESN; - else - /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ - orp_seqnum &= ~QM_EQCR_SEQNUM_NESN; - } - eq->seqnum = orp_seqnum; - eq->orp = orp->fqid; - /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ - qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP | - ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ? - 0 : QM_EQCR_VERB_CMD_ENQUEUE) | - (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && - (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -#ifndef __rtems__ - if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) - wait_event_interruptible(affine_queue, - (p->eqci_owned != fq)); - else -#endif /* __rtems__ */ - wait_event(affine_queue, (p->eqci_owned != fq)); - } -#endif - return 0; -} -EXPORT_SYMBOL(qman_enqueue_orp); - -int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq, - const struct qm_fd *fd, u32 flags, - qman_cb_precommit cb, void *cb_arg) -{ - struct qm_eqcr_entry *eq; - unsigned long irqflags __maybe_unused; - -#ifdef FSL_DPA_CAN_WAIT - if (flags & QMAN_ENQUEUE_FLAG_WAIT) - eq = wait_p_eq_start(p, &irqflags, fq, fd, flags); - else -#endif - eq = try_p_eq_start(p, &irqflags, fq, fd, flags); - if (!eq) - return -EBUSY; - /* invoke user supplied callback function before writing commit verb */ - if (cb(cb_arg)) { - PORTAL_IRQ_UNLOCK(p, irqflags); - return -EINVAL; - } - /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ - qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | - (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); - /* Factor the below out, it's used from qman_enqueue_orp() too */ - PORTAL_IRQ_UNLOCK(p, irqflags); -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && - (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -#ifndef __rtems__ - if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) - wait_event_interruptible(affine_queue, - (p->eqci_owned != fq)); - else -#endif /* __rtems__ */ - wait_event(affine_queue, (p->eqci_owned != fq)); - } -#endif - return 0; -} -EXPORT_SYMBOL(qman_p_enqueue_precommit); - -int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd, - u32 flags, qman_cb_precommit cb, void *cb_arg) -{ - struct qman_portal *p; - struct qm_eqcr_entry *eq; - unsigned long irqflags __maybe_unused; - -#ifdef FSL_DPA_CAN_WAIT - if (flags & QMAN_ENQUEUE_FLAG_WAIT) - eq = wait_eq_start(&p, &irqflags, fq, fd, flags); - else -#endif - eq = try_eq_start(&p, &irqflags, fq, fd, flags); - if (!eq) - return -EBUSY; - /* invoke user supplied callback function before writing commit verb */ - if (cb(cb_arg)) { - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return -EINVAL; - } - /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ - qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | - (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); - /* Factor the below out, it's used from qman_enqueue_orp() too */ - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); -#ifdef FSL_DPA_CAN_WAIT_SYNC - if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && - (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { -#ifndef __rtems__ - if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) - wait_event_interruptible(affine_queue, - (p->eqci_owned != fq)); - else -#endif /* __rtems__ */ - wait_event(affine_queue, (p->eqci_owned != fq)); - } -#endif - return 0; -} -EXPORT_SYMBOL(qman_enqueue_precommit); - -int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, - struct qm_mcc_initcgr *opts) -{ - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - struct qman_portal *p = get_affine_portal(); - unsigned long irqflags __maybe_unused; - u8 res; - u8 verb = QM_MCC_VERB_MODIFYCGR; - - PORTAL_IRQ_LOCK(p, irqflags); - mcc = qm_mc_start(&p->p); - if (opts) - mcc->initcgr = *opts; - mcc->initcgr.cgid = cgr->cgrid; - if (flags & QMAN_CGR_FLAG_USE_INIT) - verb = QM_MCC_VERB_INITCGR; - qm_mc_commit(&p->p, verb); - while (!(mcr = qm_mc_result(&p->p))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); - res = mcr->result; - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return (res == QM_MCR_RESULT_OK) ? 0 : -EIO; -} -EXPORT_SYMBOL(qman_modify_cgr); - -#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \ - QM_CHANNEL_SWPORTAL0)) -#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0) - -static u8 qman_cgr_cpus[__CGR_NUM]; - -int qman_create_cgr(struct qman_cgr *cgr, u32 flags, - struct qm_mcc_initcgr *opts) -{ - unsigned long irqflags __maybe_unused; - struct qm_mcr_querycgr cgr_state; - struct qm_mcc_initcgr local_opts; - int ret; - struct qman_portal *p; - - /* We have to check that the provided CGRID is within the limits of the - * data-structures, for obvious reasons. However we'll let h/w take - * care of determining whether it's within the limits of what exists on - * the SoC. */ - if (cgr->cgrid >= __CGR_NUM) - return -EINVAL; - - preempt_disable(); - p = get_affine_portal(); - qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); - preempt_enable(); - - memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); - cgr->chan = p->config->public_cfg.channel; - spin_lock_irqsave(&p->cgr_lock, irqflags); - - /* if no opts specified, just add it to the list */ - if (!opts) - goto add_list; - - ret = qman_query_cgr(cgr, &cgr_state); - if (ret) - goto release_lock; - if (opts) - local_opts = *opts; - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - local_opts.cgr.cscn_targ_upd_ctrl = - QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); - else - /* Overwrite TARG */ - local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | - TARG_MASK(p); - local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; - - /* send init if flags indicate so */ - if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) - ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts); - else - ret = qman_modify_cgr(cgr, 0, &local_opts); - if (ret) - goto release_lock; -add_list: - list_add(&cgr->node, &p->cgr_cbs); - - /* Determine if newly added object requires its callback to be called */ - ret = qman_query_cgr(cgr, &cgr_state); - if (ret) { - /* we can't go back, so proceed and return success, but screen - * and wail to the log file */ - pr_crit("CGR HW state partially modified\n"); - ret = 0; - goto release_lock; - } - if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1], - cgr->cgrid)) - cgr->cb(p, cgr, 1); -release_lock: - spin_unlock_irqrestore(&p->cgr_lock, irqflags); - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(qman_create_cgr); - -int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, - struct qm_mcc_initcgr *opts) -{ - unsigned long irqflags __maybe_unused; - struct qm_mcc_initcgr local_opts; - int ret; - - if ((qman_ip_rev & 0xFF00) < QMAN_REV30) { - pr_warn("This version doesn't support to send CSCN to DCP portal\n"); - return -EINVAL; - } - /* We have to check that the provided CGRID is within the limits of the - * data-structures, for obvious reasons. However we'll let h/w take - * care of determining whether it's within the limits of what exists on - * the SoC. - */ - if (cgr->cgrid >= __CGR_NUM) - return -EINVAL; - - memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); - if (opts) - local_opts = *opts; - - local_opts.cgr.cscn_targ_upd_ctrl = QM_CGR_TARG_UDP_CTRL_WRITE_BIT | - QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal; - local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; - - /* send init if flags indicate so */ - if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) - ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, - &local_opts); - else - ret = qman_modify_cgr(cgr, 0, &local_opts); - - return ret; -} -EXPORT_SYMBOL(qman_create_cgr_to_dcp); - -int qman_delete_cgr(struct qman_cgr *cgr) -{ - unsigned long irqflags __maybe_unused; - struct qm_mcr_querycgr cgr_state; - struct qm_mcc_initcgr local_opts; - int ret = 0; - struct qman_cgr *i; - struct qman_portal *p = get_affine_portal(); - - if (cgr->chan != p->config->public_cfg.channel) { - pr_crit("Attempting to delete cgr from different portal " - "than it was create: create 0x%x, delete 0x%x\n", - cgr->chan, p->config->public_cfg.channel); - ret = -EINVAL; - goto put_portal; - } - memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); - spin_lock_irqsave(&p->cgr_lock, irqflags); - list_del(&cgr->node); - /* - * If there are no other CGR objects for this CGRID in the list, update - * CSCN_TARG accordingly - */ - list_for_each_entry(i, &p->cgr_cbs, node) - if ((i->cgrid == cgr->cgrid) && i->cb) - goto release_lock; - ret = qman_query_cgr(cgr, &cgr_state); - if (ret) { - /* add back to the list */ - list_add(&cgr->node, &p->cgr_cbs); - goto release_lock; - } - /* Overwrite TARG */ - local_opts.we_mask = QM_CGR_WE_CSCN_TARG; - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); - else - local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & - ~(TARG_MASK(p)); - ret = qman_modify_cgr(cgr, 0, &local_opts); - if (ret) - /* add back to the list */ - list_add(&cgr->node, &p->cgr_cbs); -release_lock: - spin_unlock_irqrestore(&p->cgr_lock, irqflags); -put_portal: - put_affine_portal(); - return ret; -} -EXPORT_SYMBOL(qman_delete_cgr); - -#ifndef __rtems__ -struct cgr_comp { - struct qman_cgr *cgr; - struct completion completion; -}; - -static int qman_delete_cgr_thread(void *p) -{ - struct cgr_comp *cgr_comp = (struct cgr_comp *)p; - int res; - - res = qman_delete_cgr((struct qman_cgr *)cgr_comp->cgr); - complete(&cgr_comp->completion); - - return res; -} - -void qman_delete_cgr_safe(struct qman_cgr *cgr) -{ - struct task_struct *thread; - struct cgr_comp cgr_comp; - - preempt_disable(); - if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { - init_completion(&cgr_comp.completion); - cgr_comp.cgr = cgr; - thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, - "cgr_del"); - - if (likely(!IS_ERR(thread))) { - kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); - wake_up_process(thread); - wait_for_completion(&cgr_comp.completion); - preempt_enable(); - return; - } - } - qman_delete_cgr(cgr); - preempt_enable(); -} -EXPORT_SYMBOL(qman_delete_cgr_safe); -#endif /* __rtems__ */ - -int qman_set_wpm(int wpm_enable) -{ - return qm_set_wpm(wpm_enable); -} -EXPORT_SYMBOL(qman_set_wpm); - -int qman_get_wpm(int *wpm_enable) -{ - return qm_get_wpm(wpm_enable); -} -EXPORT_SYMBOL(qman_get_wpm); - - -/* Cleanup FQs */ -static int qm_shutdown_fq(struct qm_portal **portal, int portal_count, - u32 fqid) -{ - - struct qm_mc_command *mcc; - struct qm_mc_result *mcr; - u8 state; - int orl_empty, fq_empty, i, drain = 0; - u32 result; - u32 channel, wq; - - /* Determine the state of the FQID */ - mcc = qm_mc_start(portal[0]); - mcc->queryfq_np.fqid = fqid; - qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP); - while (!(mcr = qm_mc_result(portal[0]))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); - state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; - if (state == QM_MCR_NP_STATE_OOS) - return 0; /* Already OOS, no need to do anymore checks */ - - /* Query which channel the FQ is using */ - mcc = qm_mc_start(portal[0]); - mcc->queryfq.fqid = fqid; - qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ); - while (!(mcr = qm_mc_result(portal[0]))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); - - /* Need to store these since the MCR gets reused */ - channel = mcr->queryfq.fqd.dest.channel; - wq = mcr->queryfq.fqd.dest.wq; - - switch (state) { - case QM_MCR_NP_STATE_TEN_SCHED: - case QM_MCR_NP_STATE_TRU_SCHED: - case QM_MCR_NP_STATE_ACTIVE: - case QM_MCR_NP_STATE_PARKED: - orl_empty = 0; - mcc = qm_mc_start(portal[0]); - mcc->alterfq.fqid = fqid; - qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE); - while (!(mcr = qm_mc_result(portal[0]))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == - QM_MCR_VERB_ALTER_RETIRE); - result = mcr->result; /* Make a copy as we reuse MCR below */ - - if (result == QM_MCR_RESULT_PENDING) { - /* Need to wait for the FQRN in the message ring, which - will only occur once the FQ has been drained. In - order for the FQ to drain the portal needs to be set - to dequeue from the channel the FQ is scheduled on */ - const struct qm_mr_entry *msg; - const struct qm_dqrr_entry *dqrr = NULL; - int found_fqrn = 0; - u16 dequeue_wq = 0; - - /* Flag that we need to drain FQ */ - drain = 1; - - if (channel >= qm_channel_pool1 && - channel < (qm_channel_pool1 + 15)) { - /* Pool channel, enable the bit in the portal */ - dequeue_wq = (channel - - qm_channel_pool1 + 1)<<4 | wq; - } else if (channel < qm_channel_pool1) { - /* Dedicated channel */ - dequeue_wq = wq; - } else { - pr_info("Cannot recover FQ 0x%x, it is " - "scheduled on channel 0x%x", - fqid, channel); - return -EBUSY; - } - /* Set the sdqcr to drain this channel */ - if (channel < qm_channel_pool1) - for (i = 0; i < portal_count; i++) - qm_dqrr_sdqcr_set(portal[i], - QM_SDQCR_TYPE_ACTIVE | - QM_SDQCR_CHANNELS_DEDICATED); - else - for (i = 0; i < portal_count; i++) - qm_dqrr_sdqcr_set( - portal[i], - QM_SDQCR_TYPE_ACTIVE | - QM_SDQCR_CHANNELS_POOL_CONV - (channel)); - while (!found_fqrn) { - /* Keep draining DQRR while checking the MR*/ - for (i = 0; i < portal_count; i++) { - qm_dqrr_pvb_update(portal[i]); - dqrr = qm_dqrr_current(portal[i]); - while (dqrr) { - qm_dqrr_cdc_consume_1ptr( - portal[i], dqrr, 0); - qm_dqrr_pvb_update(portal[i]); - qm_dqrr_next(portal[i]); - dqrr = qm_dqrr_current( - portal[i]); - } - /* Process message ring too */ - qm_mr_pvb_update(portal[i]); - msg = qm_mr_current(portal[i]); - while (msg) { - if ((msg->verb & - QM_MR_VERB_TYPE_MASK) - == QM_MR_VERB_FQRN) - found_fqrn = 1; - qm_mr_next(portal[i]); - qm_mr_cci_consume_to_current( - portal[i]); - qm_mr_pvb_update(portal[i]); - msg = qm_mr_current(portal[i]); - } - cpu_relax(); - } - } - } - if (result != QM_MCR_RESULT_OK && - result != QM_MCR_RESULT_PENDING) { - /* error */ - pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n", - fqid, result); - return -1; - } - if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { - /* ORL had no entries, no need to wait until the - ERNs come in */ - orl_empty = 1; - } - /* Retirement succeeded, check to see if FQ needs - to be drained */ - if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { - /* FQ is Not Empty, drain using volatile DQ commands */ - fq_empty = 0; - do { - const struct qm_dqrr_entry *dqrr = NULL; - u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); - qm_dqrr_vdqcr_set(portal[0], vdqcr); - - /* Wait for a dequeue to occur */ - while (dqrr == NULL) { - qm_dqrr_pvb_update(portal[0]); - dqrr = qm_dqrr_current(portal[0]); - if (!dqrr) - cpu_relax(); - } - /* Process the dequeues, making sure to - empty the ring completely */ - while (dqrr) { - if (dqrr->fqid == fqid && - dqrr->stat & QM_DQRR_STAT_FQ_EMPTY) - fq_empty = 1; - qm_dqrr_cdc_consume_1ptr(portal[0], - dqrr, 0); - qm_dqrr_pvb_update(portal[0]); - qm_dqrr_next(portal[0]); - dqrr = qm_dqrr_current(portal[0]); - } - } while (fq_empty == 0); - } - for (i = 0; i < portal_count; i++) - qm_dqrr_sdqcr_set(portal[i], 0); - - /* Wait for the ORL to have been completely drained */ - while (orl_empty == 0) { - const struct qm_mr_entry *msg; - - qm_mr_pvb_update(portal[0]); - msg = qm_mr_current(portal[0]); - while (msg) { - if ((msg->verb & QM_MR_VERB_TYPE_MASK) == - QM_MR_VERB_FQRL) - orl_empty = 1; - qm_mr_next(portal[0]); - qm_mr_cci_consume_to_current(portal[0]); - qm_mr_pvb_update(portal[0]); - msg = qm_mr_current(portal[0]); - } - cpu_relax(); - } - mcc = qm_mc_start(portal[0]); - mcc->alterfq.fqid = fqid; - qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS); - while (!(mcr = qm_mc_result(portal[0]))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == - QM_MCR_VERB_ALTER_OOS); - if (mcr->result != QM_MCR_RESULT_OK) { - pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n", - fqid, mcr->result); - return -1; - } - return 0; - case QM_MCR_NP_STATE_RETIRED: - /* Send OOS Command */ - mcc = qm_mc_start(portal[0]); - mcc->alterfq.fqid = fqid; - qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS); - while (!(mcr = qm_mc_result(portal[0]))) - cpu_relax(); - DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == - QM_MCR_VERB_ALTER_OOS); - if (mcr->result) { - pr_err("OOS Failed on FQID 0x%x\n", fqid); - return -1; - } - return 0; - case QM_MCR_NP_STATE_OOS: - /* Done */ - return 0; - } - return -1; -} - -int qman_shutdown_fq(u32 fqid) -{ - struct qman_portal *p; - unsigned long irqflags __maybe_unused; - int ret; - struct qm_portal *low_p; - - p = get_affine_portal(); - PORTAL_IRQ_LOCK(p, irqflags); - low_p = &p->p; - ret = qm_shutdown_fq(&low_p, 1, fqid); - PORTAL_IRQ_UNLOCK(p, irqflags); - put_affine_portal(); - return ret; -} - -const struct qm_portal_config *qman_get_qm_portal_config( - struct qman_portal *portal) -{ -#ifndef __rtems__ - return portal->sharing_redirect ? NULL : portal->config; -#else /* __rtems__ */ - return portal->config; -#endif /* __rtems__ */ -} diff --git a/linux/drivers/soc/fsl/qbman/qman_ccsr.c b/linux/drivers/soc/fsl/qbman/qman_ccsr.c new file mode 100644 index 00000000..7def3431 --- /dev/null +++ b/linux/drivers/soc/fsl/qbman/qman_ccsr.c @@ -0,0 +1,883 @@ +#include <machine/rtems-bsd-kernel-space.h> + +#include <rtems/bsd/local/opt_dpaa.h> + +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_priv.h" +#ifdef __rtems__ +#undef dev_crit +#undef dev_dbg +#undef dev_err +#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__) +#define dev_dbg dev_crit +#define dev_err dev_crit +#endif /* __rtems__ */ + +u16 qman_ip_rev; +EXPORT_SYMBOL(qman_ip_rev); +u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; +EXPORT_SYMBOL(qm_channel_pool1); + +/* Register offsets */ +#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) +#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) +#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) +#define REG_DD_CFG 0x0200 +#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) +#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) +#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) +#define REG_PFDR_FPC 0x0400 +#define REG_PFDR_FP_HEAD 0x0404 +#define REG_PFDR_FP_TAIL 0x0408 +#define REG_PFDR_FP_LWIT 0x0410 +#define REG_PFDR_CFG 0x0414 +#define REG_SFDR_CFG 0x0500 +#define REG_SFDR_IN_USE 0x0504 +#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) +#define REG_WQ_DEF_ENC_WQID 0x0630 +#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) +#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) +#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) +#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) +#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ +#define REG_CM_CFG 0x0800 +#define REG_ECSR 0x0a00 +#define REG_ECIR 0x0a04 +#define REG_EADR 0x0a08 +#define REG_ECIR2 0x0a0c +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) +#define REG_MCR 0x0b00 +#define REG_MCP(n) (0x0b04 + ((n) * 0x04)) +#define REG_MISC_CFG 0x0be0 +#define REG_HID_CFG 0x0bf0 +#define REG_IDLE_STAT 0x0bf4 +#define REG_IP_REV_1 0x0bf8 +#define REG_IP_REV_2 0x0bfc +#define REG_FQD_BARE 0x0c00 +#define REG_PFDR_BARE 0x0c20 +#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */ +#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */ +#define REG_QCSP_BARE 0x0c80 +#define REG_QCSP_BAR 0x0c84 +#define REG_CI_SCHED_CFG 0x0d00 +#define REG_SRCIDR 0x0d04 +#define REG_LIODNR 0x0d08 +#define REG_CI_RLM_AVG 0x0d14 +#define REG_ERR_ISR 0x0e00 +#define REG_ERR_IER 0x0e04 +#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10)) +#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10)) +#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10)) + +/* Assists for QMAN_MCR */ +#define MCR_INIT_PFDR 0x01000000 +#define MCR_get_rslt(v) (u8)((v) >> 24) +#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0)) +#define MCR_rslt_ok(r) ((r) == 0xf0) +#define MCR_rslt_eaccess(r) ((r) == 0xf8) +#define MCR_rslt_inval(r) ((r) == 0xff) + +/* + * Corenet initiator settings. Stash request queues are 4-deep to match cores + * ability to snarf. Stash priority is 3, other priorities are 2. + */ +#define QM_CI_SCHED_CFG_SRCCIV 4 +#define QM_CI_SCHED_CFG_SRQ_W 3 +#define QM_CI_SCHED_CFG_RW_W 2 +#define QM_CI_SCHED_CFG_BMAN_W 2 +/* write SRCCIV enable */ +#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31) + +/* Follows WQ_CS_CFG0-5 */ +enum qm_wq_class { + qm_wq_portal = 0, + qm_wq_pool = 1, + qm_wq_fman0 = 2, + qm_wq_fman1 = 3, + qm_wq_caam = 4, + qm_wq_pme = 5, + qm_wq_first = qm_wq_portal, + qm_wq_last = qm_wq_pme +}; + +/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */ +enum qm_memory { + qm_memory_fqd, + qm_memory_pfdr +}; + +/* Used by all error interrupt registers except 'inhibit' */ +#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */ +#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */ +#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */ +#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */ +#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */ +#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */ +#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */ +#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */ +#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */ +#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */ +#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */ +#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */ +#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */ +#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */ +#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */ +#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */ +#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */ +#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */ + +/* QMAN_ECIR valid error bit */ +#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \ + QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \ + QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI) +#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \ + QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \ + QM_EIRQ_IFSI) + +struct qm_ecir { + u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */ +}; + +static bool qm_ecir_is_dcp(const struct qm_ecir *p) +{ + return p->info & BIT(29); +} + +static int qm_ecir_get_pnum(const struct qm_ecir *p) +{ + return (p->info >> 24) & 0x1f; +} + +static int qm_ecir_get_fqid(const struct qm_ecir *p) +{ + return p->info & (BIT(24) - 1); +} + +struct qm_ecir2 { + u32 info; /* ptyp[31], res[10-30], pnum[0-9] */ +}; + +static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p) +{ + return p->info & BIT(31); +} + +static int qm_ecir2_get_pnum(const struct qm_ecir2 *p) +{ + return p->info & (BIT(10) - 1); +} + +struct qm_eadr { + u32 info; /* memid[24-27], eadr[0-11] */ + /* v3: memid[24-28], eadr[0-15] */ +}; + +static int qm_eadr_get_memid(const struct qm_eadr *p) +{ + return (p->info >> 24) & 0xf; +} + +static int qm_eadr_get_eadr(const struct qm_eadr *p) +{ + return p->info & (BIT(12) - 1); +} + +static int qm_eadr_v3_get_memid(const struct qm_eadr *p) +{ + return (p->info >> 24) & 0x1f; +} + +static int qm_eadr_v3_get_eadr(const struct qm_eadr *p) +{ + return p->info & (BIT(16) - 1); +} + +struct qman_hwerr_txt { + u32 mask; + const char *txt; +}; + + +static const struct qman_hwerr_txt qman_hwerr_txts[] = { + { QM_EIRQ_CIDE, "Corenet Initiator Data Error" }, + { QM_EIRQ_CTDE, "Corenet Target Data Error" }, + { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" }, + { QM_EIRQ_PLWI, "PFDR Low Watermark" }, + { QM_EIRQ_MBEI, "Multi-bit ECC Error" }, + { QM_EIRQ_SBEI, "Single-bit ECC Error" }, + { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" }, + { QM_EIRQ_ICVI, "Invalid Command Verb" }, + { QM_EIRQ_IFSI, "Invalid Flow Control State" }, + { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" }, + { QM_EIRQ_IDFI, "Invalid Dequeue FQ" }, + { QM_EIRQ_IDSI, "Invalid Dequeue Source" }, + { QM_EIRQ_IDQI, "Invalid Dequeue Queue" }, + { QM_EIRQ_IECE, "Invalid Enqueue Configuration" }, + { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" }, + { QM_EIRQ_IESI, "Invalid Enqueue State" }, + { QM_EIRQ_IECI, "Invalid Enqueue Channel" }, + { QM_EIRQ_IEQI, "Invalid Enqueue Queue" }, +}; + +struct qman_error_info_mdata { + u16 addr_mask; + u16 bits; + const char *txt; +}; + +static const struct qman_error_info_mdata error_mdata[] = { + { 0x01FF, 24, "FQD cache tag memory 0" }, + { 0x01FF, 24, "FQD cache tag memory 1" }, + { 0x01FF, 24, "FQD cache tag memory 2" }, + { 0x01FF, 24, "FQD cache tag memory 3" }, + { 0x0FFF, 512, "FQD cache memory" }, + { 0x07FF, 128, "SFDR memory" }, + { 0x01FF, 72, "WQ context memory" }, + { 0x00FF, 240, "CGR memory" }, + { 0x00FF, 302, "Internal Order Restoration List memory" }, + { 0x01FF, 256, "SW portal ring memory" }, +}; + +#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI) + +/* + * TODO: unimplemented registers + * + * Keeping a list here of QMan registers I have not yet covered; + * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR, + * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG, + * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12 + */ + +/* Pointer to the start of the QMan's CCSR space */ +static u32 __iomem *qm_ccsr_start; +/* A SDQCR mask comprising all the available/visible pool channels */ +static u32 qm_pools_sdqcr; + +static inline u32 qm_ccsr_in(u32 offset) +{ + return ioread32be(qm_ccsr_start + offset/4); +} + +static inline void qm_ccsr_out(u32 offset, u32 val) +{ + iowrite32be(val, qm_ccsr_start + offset/4); +} + +u32 qm_get_pools_sdqcr(void) +{ + return qm_pools_sdqcr; +} + +enum qm_dc_portal { + qm_dc_portal_fman0 = 0, + qm_dc_portal_fman1 = 1 +}; + +static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd) +{ + DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 || + portal == qm_dc_portal_fman1); + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + qm_ccsr_out(REG_DCP_CFG(portal), + (ed ? 0x1000 : 0) | (sernd & 0x3ff)); + else + qm_ccsr_out(REG_DCP_CFG(portal), + (ed ? 0x100 : 0) | (sernd & 0x1f)); +} + +static void qm_set_wq_scheduling(enum qm_wq_class wq_class, + u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, + u8 csw5, u8 csw6, u8 csw7) +{ + qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) | + ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) | + ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) | + ((csw6 & 0x7) << 4) | (csw7 & 0x7)); +} + +static void qm_set_hid(void) +{ + qm_ccsr_out(REG_HID_CFG, 0); +} + +static void qm_set_corenet_initiator(void) +{ + qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN | + (QM_CI_SCHED_CFG_SRCCIV << 24) | + (QM_CI_SCHED_CFG_SRQ_W << 8) | + (QM_CI_SCHED_CFG_RW_W << 4) | + QM_CI_SCHED_CFG_BMAN_W); +} + +static void qm_get_version(u16 *id, u8 *major, u8 *minor) +{ + u32 v = qm_ccsr_in(REG_IP_REV_1); + *id = (v >> 16); + *major = (v >> 8) & 0xff; + *minor = v & 0xff; +} + +#define PFDR_AR_EN BIT(31) +static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size) +{ + u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; + u32 exp = ilog2(size); + + /* choke if size isn't within range */ + DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) && + is_power_of_2(size)); + /* choke if 'ba' has lower-alignment than 'size' */ + DPAA_ASSERT(!(ba & (size - 1))); + qm_ccsr_out(offset, upper_32_bits(ba)); + qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba)); + qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1)); +} + +static void qm_set_pfdr_threshold(u32 th, u8 k) +{ + qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff); + qm_ccsr_out(REG_PFDR_CFG, k); +} + +static void qm_set_sfdr_threshold(u16 th) +{ + qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff); +} + +static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num) +{ + u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR)); + + DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num); + /* Make sure the command interface is 'idle' */ + if (!MCR_rslt_idle(rslt)) { + dev_crit(dev, "QMAN_MCR isn't idle"); + WARN_ON(1); + } + + /* Write the MCR command params then the verb */ + qm_ccsr_out(REG_MCP(0), pfdr_start); + /* + * TODO: remove this - it's a workaround for a model bug that is + * corrected in more recent versions. We use the workaround until + * everyone has upgraded. + */ + qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16); + dma_wmb(); + qm_ccsr_out(REG_MCR, MCR_INIT_PFDR); + /* Poll for the result */ + do { + rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR)); + } while (!MCR_rslt_idle(rslt)); + if (MCR_rslt_ok(rslt)) + return 0; + if (MCR_rslt_eaccess(rslt)) + return -EACCES; + if (MCR_rslt_inval(rslt)) + return -EINVAL; + dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt); + return -ENODEV; +} + +/* + * Ideally we would use the DMA API to turn rmem->base into a DMA address + * (especially if iommu translations ever get involved). Unfortunately, the + * DMA API currently does not allow mapping anything that is not backed with + * a struct page. + */ +#ifndef __rtems__ +static dma_addr_t fqd_a, pfdr_a; +static size_t fqd_sz, pfdr_sz; + +static int qman_fqd(struct reserved_mem *rmem) +{ + fqd_a = rmem->base; + fqd_sz = rmem->size; + + WARN_ON(!(fqd_a && fqd_sz)); + + return 0; +} +RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); + +static int qman_pfdr(struct reserved_mem *rmem) +{ + pfdr_a = rmem->base; + pfdr_sz = rmem->size; + + WARN_ON(!(pfdr_a && pfdr_sz)); + + return 0; +} +RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); +#else /* __rtems__ */ +static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fqd, 4194304); +#define fqd_a ((uintptr_t)&fqd[0]) +#define fqd_sz sizeof(fqd) +static DPAA_NOCACHENOLOAD_ALIGNED_REGION(pfdr, 33554432); +#define pfdr_a ((uintptr_t)&pfdr[0]) +#define pfdr_sz sizeof(pfdr) +#endif /* __rtems__ */ + +static unsigned int qm_get_fqid_maxcnt(void) +{ + return fqd_sz / 64; +} + +/* + * Flush this memory range from data cache so that QMAN originated + * transactions for this memory region could be marked non-coherent. + */ +static int zero_priv_mem(struct device *dev, struct device_node *node, + phys_addr_t addr, size_t sz) +{ +#ifndef __rtems__ + /* map as cacheable, non-guarded */ + void __iomem *tmpp = ioremap_prot(addr, sz, 0); + + if (!tmpp) + return -ENOMEM; + + memset_io(tmpp, 0, sz); + flush_dcache_range((unsigned long)tmpp, + (unsigned long)tmpp + sz); + iounmap(tmpp); + +#else /* __rtems__ */ + memset((void *)(uintptr_t)addr, 0, sz); +#endif /* __rtems__ */ + return 0; +} + +static void log_edata_bits(struct device *dev, u32 bit_count) +{ + u32 i, j, mask = 0xffffffff; + + dev_warn(dev, "ErrInt, EDATA:\n"); + i = bit_count / 32; + if (bit_count % 32) { + i++; + mask = ~(mask << bit_count % 32); + } + j = 16 - i; + dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask); + j++; + for (; j < 16; j++) + dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j))); +} + +static void log_additional_error_info(struct device *dev, u32 isr_val, + u32 ecsr_val) +{ + struct qm_ecir ecir_val; + struct qm_eadr eadr_val; + int memid; + + ecir_val.info = qm_ccsr_in(REG_ECIR); + /* Is portal info valid */ + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { + struct qm_ecir2 ecir2_val; + + ecir2_val.info = qm_ccsr_in(REG_ECIR2); + if (ecsr_val & PORTAL_ECSR_ERR) { + dev_warn(dev, "ErrInt: %s id %d\n", + qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP", + qm_ecir2_get_pnum(&ecir2_val)); + } + if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) + dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n", + qm_ecir_get_fqid(&ecir_val)); + + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { + eadr_val.info = qm_ccsr_in(REG_EADR); + memid = qm_eadr_v3_get_memid(&eadr_val); + dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[memid].txt, + error_mdata[memid].addr_mask + & qm_eadr_v3_get_eadr(&eadr_val)); + log_edata_bits(dev, error_mdata[memid].bits); + } + } else { + if (ecsr_val & PORTAL_ECSR_ERR) { + dev_warn(dev, "ErrInt: %s id %d\n", + qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP", + qm_ecir_get_pnum(&ecir_val)); + } + if (ecsr_val & FQID_ECSR_ERR) + dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n", + qm_ecir_get_fqid(&ecir_val)); + + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { + eadr_val.info = qm_ccsr_in(REG_EADR); + memid = qm_eadr_get_memid(&eadr_val); + dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[memid].txt, + error_mdata[memid].addr_mask + & qm_eadr_get_eadr(&eadr_val)); + log_edata_bits(dev, error_mdata[memid].bits); + } + } +} + +static irqreturn_t qman_isr(int irq, void *ptr) +{ + u32 isr_val, ier_val, ecsr_val, isr_mask, i; + struct device *dev = ptr; + + ier_val = qm_ccsr_in(REG_ERR_IER); + isr_val = qm_ccsr_in(REG_ERR_ISR); + ecsr_val = qm_ccsr_in(REG_ECSR); + isr_mask = isr_val & ier_val; + + if (!isr_mask) + return IRQ_NONE; + + for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) { + if (qman_hwerr_txts[i].mask & isr_mask) { +#ifndef __rtems__ + dev_err_ratelimited(dev, "ErrInt: %s\n", + qman_hwerr_txts[i].txt); +#endif /* __rtems__ */ + if (qman_hwerr_txts[i].mask & ecsr_val) { + log_additional_error_info(dev, isr_mask, + ecsr_val); + /* Re-arm error capture registers */ + qm_ccsr_out(REG_ECSR, ecsr_val); + } + if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) { + dev_dbg(dev, "Disabling error 0x%x\n", + qman_hwerr_txts[i].mask); + ier_val &= ~qman_hwerr_txts[i].mask; + qm_ccsr_out(REG_ERR_IER, ier_val); + } + } + } + qm_ccsr_out(REG_ERR_ISR, isr_val); + + return IRQ_HANDLED; +} + +static int qman_init_ccsr(struct device *dev) +{ + int i, err; + + /* FQD memory */ + qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz); + /* PFDR memory */ + qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz); + err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8); + if (err) + return err; + /* thresholds */ + qm_set_pfdr_threshold(512, 64); + qm_set_sfdr_threshold(128); + /* clear stale PEBI bit from interrupt status register */ + qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI); + /* corenet initiator settings */ + qm_set_corenet_initiator(); + /* HID settings */ + qm_set_hid(); + /* Set scheduling weights to defaults */ + for (i = qm_wq_first; i <= qm_wq_last; i++) + qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0); + /* We are not prepared to accept ERNs for hardware enqueues */ + qm_set_dc(qm_dc_portal_fman0, 1, 0); + qm_set_dc(qm_dc_portal_fman1, 1, 0); + return 0; +} + +#define LIO_CFG_LIODN_MASK 0x0fff0000 +void qman_liodn_fixup(u16 channel) +{ + static int done; + static u32 liodn_offset; + u32 before, after; + int idx = channel - QM_CHANNEL_SWPORTAL0; + + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx)); + else + before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx)); + if (!done) { + liodn_offset = before & LIO_CFG_LIODN_MASK; + done = 1; + return; + } + after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after); + else + qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after); +} + +#define IO_CFG_SDEST_MASK 0x00ff0000 +void qman_set_sdest(u16 channel, unsigned int cpu_idx) +{ + int idx = channel - QM_CHANNEL_SWPORTAL0; + u32 before, after; + + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { + before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx)); + /* Each pair of vcpu share the same SRQ(SDEST) */ + cpu_idx /= 2; + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); + qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after); + } else { + before = qm_ccsr_in(REG_QCSP_IO_CFG(idx)); + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); + qm_ccsr_out(REG_QCSP_IO_CFG(idx), after); + } +} + +static int qman_resource_init(struct device *dev) +{ + int pool_chan_num, cgrid_num; + int ret, i; + + switch (qman_ip_rev >> 8) { + case 1: + pool_chan_num = 15; + cgrid_num = 256; + break; + case 2: + pool_chan_num = 3; + cgrid_num = 64; + break; + case 3: + pool_chan_num = 15; + cgrid_num = 256; + break; + default: + return -ENODEV; + } + + ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF, + pool_chan_num, -1); + if (ret) { + dev_err(dev, "Failed to seed pool channels (%d)\n", ret); + return ret; + } + + ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1); + if (ret) { + dev_err(dev, "Failed to seed CGRID range (%d)\n", ret); + return ret; + } + + /* parse pool channels into the SDQCR mask */ + for (i = 0; i < cgrid_num; i++) + qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i); + + ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF, + qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1); + if (ret) { + dev_err(dev, "Failed to seed FQID range (%d)\n", ret); + return ret; + } + + return 0; +} + +static int fsl_qman_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; +#ifdef __rtems__ + struct resource res_storage; +#endif /* __rtems__ */ + struct resource *res; + int ret, err_irq; + u16 id; + u8 major, minor; + +#ifndef __rtems__ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +#else /* __rtems__ */ + res = platform_get_resource(&res_storage, pdev, IORESOURCE_MEM, 0); +#endif /* __rtems__ */ + if (!res) { + dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", + node->full_name); + return -ENXIO; + } + qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); + if (!qm_ccsr_start) + return -ENXIO; + + qm_get_version(&id, &major, &minor); + if (major == 1 && minor == 0) { + dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n"); + return -ENODEV; + } else if (major == 1 && minor == 1) + qman_ip_rev = QMAN_REV11; + else if (major == 1 && minor == 2) + qman_ip_rev = QMAN_REV12; + else if (major == 2 && minor == 0) + qman_ip_rev = QMAN_REV20; + else if (major == 3 && minor == 0) + qman_ip_rev = QMAN_REV30; + else if (major == 3 && minor == 1) + qman_ip_rev = QMAN_REV31; + else { + dev_err(dev, "Unknown QMan version\n"); + return -ENODEV; + } + + if ((qman_ip_rev & 0xff00) >= QMAN_REV30) + qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; + + ret = zero_priv_mem(dev, node, fqd_a, fqd_sz); + WARN_ON(ret); + if (ret) + return -ENODEV; + + ret = qman_init_ccsr(dev); + if (ret) { + dev_err(dev, "CCSR setup failed\n"); + return ret; + } + + err_irq = platform_get_irq(pdev, 0); + if (err_irq <= 0) { + dev_info(dev, "Can't get %s property 'interrupts'\n", + node->full_name); + return -ENODEV; + } + ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err", + dev); + if (ret) { + dev_err(dev, "devm_request_irq() failed %d for '%s'\n", + ret, node->full_name); + return ret; + } + + /* + * Write-to-clear any stale bits, (eg. starvation being asserted prior + * to resource allocation during driver init). + */ + qm_ccsr_out(REG_ERR_ISR, 0xffffffff); + /* Enable Error Interrupts */ + qm_ccsr_out(REG_ERR_IER, 0xffffffff); + + qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc"); + if (IS_ERR(qm_fqalloc)) { + ret = PTR_ERR(qm_fqalloc); + dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret); + return ret; + } + + qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc"); + if (IS_ERR(qm_qpalloc)) { + ret = PTR_ERR(qm_qpalloc); + dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret); + return ret; + } + + qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc"); + if (IS_ERR(qm_cgralloc)) { + ret = PTR_ERR(qm_cgralloc); + dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret); + return ret; + } + + ret = qman_resource_init(dev); + if (ret) + return ret; + + ret = qman_alloc_fq_table(qm_get_fqid_maxcnt()); + if (ret) + return ret; + + ret = qman_wq_alloc(); + if (ret) + return ret; + + return 0; +} + +#ifndef __rtems__ +static const struct of_device_id fsl_qman_ids[] = { + { + .compatible = "fsl,qman", + }, + {} +}; + +static struct platform_driver fsl_qman_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = fsl_qman_ids, + .suppress_bind_attrs = true, + }, + .probe = fsl_qman_probe, +}; + +builtin_platform_driver(fsl_qman_driver); +#else /* __rtems__ */ +#include <bsp/fdt.h> +#include <bsp/qoriq.h> + +SYSINIT_REFERENCE(bman); + +static void +qman_sysinit(void) +{ + const char *fdt = bsp_fdt_get(); + struct { + struct platform_device pdev; + struct device_node of_node; + } dev; + const char *name; + int node; + int ret; + + name = "fsl,qman"; + node = fdt_node_offset_by_compatible(fdt, 0, name); + if (node < 0) + panic("qman: no qman in FDT"); + + memset(&dev, 0, sizeof(dev)); + dev.pdev.dev.of_node = &dev.of_node; + dev.pdev.dev.base = (uintptr_t)&qoriq; + dev.of_node.offset = node; + dev.of_node.full_name = name; + + ret = fsl_qman_probe(&dev.pdev); + if (ret != 0) + panic("qman: init failed"); + + qman_sysinit_portals(); +} +SYSINIT(qman, SI_SUB_CPU, SI_ORDER_SECOND, qman_sysinit, NULL); +#endif /* __rtems__ */ diff --git a/linux/drivers/soc/fsl/qbman/qman_driver.c b/linux/drivers/soc/fsl/qbman/qman_driver.c deleted file mode 100644 index 6923504e..00000000 --- a/linux/drivers/soc/fsl/qbman/qman_driver.c +++ /dev/null @@ -1,87 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2013 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "qman_priv.h" - -#include <linux/time.h> - -static int __init early_qman_init(void) -{ - struct device_node *dn; - u32 is_portal_available; - - qman_init(); - - is_portal_available = 0; - for_each_compatible_node(dn, NULL, "fsl,qman-portal") { - if (of_device_is_available(dn)) { - is_portal_available = 1; - break; - } - } - - if (!qman_have_ccsr() && is_portal_available) { - struct qman_fq fq = {.fqid = 1}; - struct qm_mcr_queryfq_np np; - int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT; - struct timespec nowts, diffts, startts = current_kernel_time(); - - /* Loop while querying given fqid succeeds or time out */ - while (1) { - err = qman_query_fq_np(&fq, &np); - if (!err) { - /* success, control-plane has configured QMan */ - break; - } else if (err != -ERANGE) { - pr_err("I/O error, continuing anyway\n"); - break; - } - nowts = current_kernel_time(); - diffts = timespec_sub(nowts, startts); - if (diffts.tv_sec > 0) { - if (!retry--) { - pr_err("Time out, control-plane dead?\n"); - break; - } - pr_warn("Polling for the control-plane (%d)\n", - retry); - } - } - } - - qman_resource_init(); - - return 0; -} -subsys_initcall(early_qman_init); diff --git a/linux/drivers/soc/fsl/qbman/qman_portal.c b/linux/drivers/soc/fsl/qbman/qman_portal.c index c74059b1..640343ac 100644 --- a/linux/drivers/soc/fsl/qbman/qman_portal.c +++ b/linux/drivers/soc/fsl/qbman/qman_portal.c @@ -2,7 +2,7 @@ #include <rtems/bsd/local/opt_dpaa.h> -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,304 +35,40 @@ #include "qman_priv.h" #ifdef __rtems__ #include <bsp/qoriq.h> +#undef dev_crit +#undef dev_info +#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__) +#define dev_info dev_crit +#endif /* __rtems__ */ + +#ifndef __rtems__ +struct qman_portal *qman_dma_portal; +EXPORT_SYMBOL(qman_dma_portal); #endif /* __rtems__ */ /* Enable portal interupts (as opposed to polling mode) */ #define CONFIG_FSL_DPA_PIRQ_SLOW 1 #define CONFIG_FSL_DPA_PIRQ_FAST 1 -/* Global variable containing revision id (even on non-control plane systems - * where CCSR isn't available) */ -u16 qman_ip_rev; -EXPORT_SYMBOL(qman_ip_rev); -u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; -EXPORT_SYMBOL(qm_channel_pool1); -u16 qm_channel_caam = QMAN_CHANNEL_CAAM; -EXPORT_SYMBOL(qm_channel_caam); -u16 qm_channel_pme = QMAN_CHANNEL_PME; -EXPORT_SYMBOL(qm_channel_pme); -u16 qm_channel_dce = QMAN_CHANNEL_DCE; -EXPORT_SYMBOL(qm_channel_dce); -u16 qman_portal_max; -EXPORT_SYMBOL(qman_portal_max); - #ifndef __rtems__ -/* For these variables, and the portal-initialisation logic, the - * comments in bman_driver.c apply here so won't be repeated. */ -static struct qman_portal *shared_portals[NR_CPUS]; -static int num_shared_portals; -static int shared_portals_idx; -static LIST_HEAD(unused_pcfgs); -#endif /* __rtems__ */ - -/* A SDQCR mask comprising all the available/visible pool channels */ -static u32 pools_sdqcr; - -#define STR_ERR_NOPROP "No '%s' property in node %s\n" -#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n" -#define STR_FQID_RANGE "fsl,fqid-range" -#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range" -#define STR_CGRID_RANGE "fsl,cgrid-range" - -/* A "fsl,fqid-range" node; release the given range to the allocator */ -static __init int fsl_fqid_range_init(struct device_node *node) -{ - int ret; - const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret); - - if (!range) { - pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name); - return -EINVAL; - } - if (ret != 8) { - pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name); - return -EINVAL; - } - qman_seed_fqid_range(range[0], range[1]); - pr_info("FQID allocator includes range %d:%d\n", - range[0], range[1]); - return 0; -} - -/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */ -static __init int fsl_pool_channel_range_sdqcr(struct device_node *node) -{ - int ret; - const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret); - - if (!chanid) { - pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name); - return -EINVAL; - } - if (ret != 8) { - pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name); - return -EINVAL; - } - for (ret = 0; ret < chanid[1]; ret++) - pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(chanid[0] + ret); - return 0; -} - -/* A "fsl,pool-channel-range" node; release the given range to the allocator */ -static __init int fsl_pool_channel_range_init(struct device_node *node) -{ - int ret; - const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret); - - if (!chanid) { - pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name); - return -EINVAL; - } - if (ret != 8) { - pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name); - return -EINVAL; - } - qman_seed_pool_range(chanid[0], chanid[1]); - pr_info("Pool channel allocator includes range %d:%d\n", - chanid[0], chanid[1]); - return 0; -} - -/* A "fsl,cgrid-range" node; release the given range to the allocator */ -static __init int fsl_cgrid_range_init(struct device_node *node) -{ - struct qman_cgr cgr; - int ret, errors = 0; - const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret); - - if (!range) { - pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name); - return -EINVAL; - } - if (ret != 8) { - pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name); - return -EINVAL; - } - qman_seed_cgrid_range(range[0], range[1]); - pr_info("CGRID allocator includes range %d:%d\n", - range[0], range[1]); - for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) { - ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL); - if (ret) - errors++; - } - if (errors) - pr_err("Warning: %d error%s while initialising CGRs %d:%d\n", - errors, (errors > 1) ? "s" : "", range[0], range[1]); - return 0; -} - -static void qman_get_ip_revision(struct device_node *dn) -{ -#ifdef __rtems__ - struct device_node of_dns; -#endif /* __rtems__ */ - u16 ip_rev = 0; - - for_each_compatible_node(dn, NULL, "fsl,qman-portal") { - if (!of_device_is_available(dn)) - continue; - if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") || - of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) { - pr_err("Rev1.0 on P4080 rev1 is not supported!\n"); - BUG_ON(1); - } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") || - of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) { - ip_rev = QMAN_REV11; - qman_portal_max = 10; - } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") || - of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) { - ip_rev = QMAN_REV12; - qman_portal_max = 10; - } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") || - of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) { - ip_rev = QMAN_REV20; - qman_portal_max = 3; - } else if (of_device_is_compatible(dn, - "fsl,qman-portal-3.0.0")) { - ip_rev = QMAN_REV30; - qman_portal_max = 50; - } else if (of_device_is_compatible(dn, - "fsl,qman-portal-3.0.1")) { - ip_rev = QMAN_REV30; - qman_portal_max = 25; - } else if (of_device_is_compatible(dn, - "fsl,qman-portal-3.1.0")) { - ip_rev = QMAN_REV31; - qman_portal_max = 50; - } else if (of_device_is_compatible(dn, - "fsl,qman-portal-3.1.1")) { - ip_rev = QMAN_REV31; - qman_portal_max = 25; - } else if (of_device_is_compatible(dn, - "fsl,qman-portal-3.1.2")) { - ip_rev = QMAN_REV31; - qman_portal_max = 18; - } else if (of_device_is_compatible(dn, - "fsl,qman-portal-3.1.3")) { - ip_rev = QMAN_REV31; - qman_portal_max = 10; - } else { - pr_warn("Unknown version in portal node, default to rev1.1\n"); - ip_rev = QMAN_REV11; - qman_portal_max = 10; - } - - if (!qman_ip_rev) { - if (ip_rev) { - qman_ip_rev = ip_rev; - } else { - pr_warn("Unknown version, default to rev1.1\n"); - qman_ip_rev = QMAN_REV11; - } - } else if (ip_rev && (qman_ip_rev != ip_rev)) - pr_warn("Revision = 0x%04x, but portal '%s' has 0x%04x\n", - qman_ip_rev, dn->full_name, ip_rev); - if (qman_ip_rev == ip_rev) - break; - } -} - -#ifndef __rtems__ -/* Parse a portal node, perform generic mapping duties and return the config. It - * is not known at this stage for what purpose (or even if) the portal will be - * used. */ -static struct qm_portal_config * __init parse_pcfg(struct device_node *node) -{ - struct qm_portal_config *pcfg; - const u32 *channel; - int irq, ret; - struct resource res; - - pcfg = kzalloc(sizeof(*pcfg), GFP_KERNEL); - if (!pcfg) - return NULL; - - /* - * This is a *horrible hack*, but the IOMMU/PAMU driver needs a - * 'struct device' in order to get the PAMU stashing setup and the QMan - * portal [driver] won't function at all without ring stashing - * - * Making the QMan portal driver nice and proper is part of the - * upstreaming effort - */ - pcfg->dev.bus = &platform_bus_type; - pcfg->dev.of_node = node; -#ifdef CONFIG_IOMMU_API - pcfg->dev.archdata.iommu_domain = NULL; -#endif - - ret = of_address_to_resource(node, DPA_PORTAL_CE, - &pcfg->addr_phys[DPA_PORTAL_CE]); - if (ret) { - pr_err("Can't get %s property 'reg::CE'\n", node->full_name); - goto err; - } - ret = of_address_to_resource(node, DPA_PORTAL_CI, - &pcfg->addr_phys[DPA_PORTAL_CI]); - if (ret) { - pr_err("Can't get %s property 'reg::CI'\n", node->full_name); - goto err; - } - - channel = of_get_property(node, "fsl,qman-channel-id", &ret); - if (!channel || (ret != 4)) { - pr_err("Can't get %s property 'fsl,qman-channel-id'\n", - node->full_name); - goto err; - } - pcfg->public_cfg.channel = *channel; - pcfg->public_cfg.cpu = -1; - irq = irq_of_parse_and_map(node, 0); - if (irq == NO_IRQ) { - pr_err("Can't get %s property 'interrupts'\n", node->full_name); - goto err; - } - pcfg->public_cfg.irq = irq; -#ifdef CONFIG_FSL_QMAN_CONFIG - /* We need the same LIODN offset for all portals */ - qman_liodn_fixup(pcfg->public_cfg.channel); -#endif - - pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot( - pcfg->addr_phys[DPA_PORTAL_CE].start, - resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]), - 0); - pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot( - pcfg->addr_phys[DPA_PORTAL_CI].start, - resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]), - _PAGE_GUARDED | _PAGE_NO_CACHE); - - return pcfg; -err: - kfree(pcfg); - return NULL; -} - -static struct qm_portal_config *get_pcfg(struct list_head *list) -{ - struct qm_portal_config *pcfg; - - if (list_empty(list)) - return NULL; - pcfg = list_entry(list->prev, struct qm_portal_config, list); - list_del(&pcfg->list); - return pcfg; -} +static struct cpumask portal_cpus; +/* protect qman global registers and global data shared among portals */ +static DEFINE_SPINLOCK(qman_lock); #endif /* __rtems__ */ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) { #ifdef CONFIG_FSL_PAMU - int ret; + struct device *dev = pcfg->dev; int window_count = 1; struct iommu_domain_geometry geom_attr; struct pamu_stash_attribute stash_attr; + int ret; pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); if (!pcfg->iommu_domain) { - pr_err("%s(): iommu_domain_alloc() failed", __func__); - goto _no_iommu; + dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); + goto no_iommu; } geom_attr.aperture_start = 0; geom_attr.aperture_end = @@ -341,14 +77,16 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY, &geom_attr); if (ret < 0) { - pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret); - goto _iommu_domain_free; + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, + ret); + goto out_domain_free; } ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, &window_count); if (ret < 0) { - pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret); - goto _iommu_domain_free; + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, + ret); + goto out_domain_free; } stash_attr.cpu = cpu; stash_attr.cache = PAMU_ATTR_CACHE_L1; @@ -356,45 +94,42 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr); if (ret < 0) { - pr_err("%s(): iommu_domain_set_attr() = %d", - __func__, ret); - goto _iommu_domain_free; + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", + __func__, ret); + goto out_domain_free; } ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, IOMMU_READ | IOMMU_WRITE); if (ret < 0) { - pr_err("%s(): iommu_domain_window_enable() = %d", - __func__, ret); - goto _iommu_domain_free; + dev_err(dev, "%s(): iommu_domain_window_enable() = %d", + __func__, ret); + goto out_domain_free; } - ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev); + ret = iommu_attach_device(pcfg->iommu_domain, dev); if (ret < 0) { - pr_err("%s(): iommu_device_attach() = %d", - __func__, ret); - goto _iommu_domain_free; + dev_err(dev, "%s(): iommu_device_attach() = %d", __func__, + ret); + goto out_domain_free; } ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_FSL_PAMU_ENABLE, &window_count); if (ret < 0) { - pr_err("%s(): iommu_domain_set_attr() = %d", - __func__, ret); - goto _iommu_detach_device; + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, + ret); + goto out_detach_device; } -_no_iommu: -#endif -#ifdef CONFIG_FSL_QMAN_CONFIG - if (qman_set_sdest(pcfg->public_cfg.channel, cpu)) +no_iommu: #endif - pr_warn("Failed to set the stash request queue\n"); + qman_set_sdest(pcfg->channel, cpu); return; #ifdef CONFIG_FSL_PAMU -_iommu_detach_device: +out_detach_device: iommu_detach_device(pcfg->iommu_domain, NULL); -_iommu_domain_free: +out_domain_free: iommu_domain_free(pcfg->iommu_domain); pcfg->iommu_domain = NULL; #endif @@ -403,65 +138,54 @@ _iommu_domain_free: static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) { struct qman_portal *p; + u32 irq_sources = 0; + + /* We need the same LIODN offset for all portals */ + qman_liodn_fixup(pcfg->channel); #ifndef __rtems__ pcfg->iommu_domain = NULL; #endif /* __rtems__ */ - portal_set_cpu(pcfg, pcfg->public_cfg.cpu); + portal_set_cpu(pcfg, pcfg->cpu); p = qman_create_affine_portal(pcfg, NULL); - if (p) { - u32 irq_sources = 0; - /* Determine what should be interrupt-vs-poll driven */ + if (!p) { + dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n", + __func__, pcfg->cpu); + return NULL; + } + + /* Determine what should be interrupt-vs-poll driven */ #ifdef CONFIG_FSL_DPA_PIRQ_SLOW - irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI | - QM_PIRQ_CSCI; + irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI | + QM_PIRQ_CSCI; #endif #ifdef CONFIG_FSL_DPA_PIRQ_FAST - irq_sources |= QM_PIRQ_DQRI; + irq_sources |= QM_PIRQ_DQRI; #endif - qman_p_irqsource_add(p, irq_sources); - pr_info("Portal %sinitialised, cpu %d\n", -#ifndef __rtems__ - pcfg->public_cfg.is_shared ? "(shared) " : "", -#else /* __rtems__ */ - "", -#endif /* __rtems__ */ - pcfg->public_cfg.cpu); - } else - pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu); - return p; -} + qman_p_irqsource_add(p, irq_sources); #ifndef __rtems__ -static void init_slave(int cpu) -{ - struct qman_portal *p; - struct cpumask oldmask = *tsk_cpus_allowed(current); - - set_cpus_allowed_ptr(current, get_cpu_mask(cpu)); - p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu); - if (!p) - pr_err("Slave portal failure on cpu %d\n", cpu); - else - pr_info("Portal (slave) initialised, cpu %d\n", cpu); - set_cpus_allowed_ptr(current, &oldmask); - if (shared_portals_idx >= num_shared_portals) - shared_portals_idx = 0; -} + spin_lock(&qman_lock); + if (cpumask_equal(&portal_cpus, cpu_possible_mask)) { + /* all assigned portals are initialized now */ + qman_init_cgr_all(); + } -static struct cpumask want_unshared __initdata; -static struct cpumask want_shared __initdata; + if (!qman_dma_portal) + qman_dma_portal = p; -static int __init parse_qportals(char *str) -{ - return parse_portals_bootarg(str, &want_shared, &want_unshared, - "qportals"); + spin_unlock(&qman_lock); +#endif /* __rtems__ */ + + dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); + + return p; } -__setup("qportals=", parse_qportals); static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, unsigned int cpu) { +#ifdef CONFIG_FSL_PAMU /* TODO */ struct pamu_stash_attribute stash_attr; int ret; @@ -471,77 +195,196 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr); if (ret < 0) { - pr_err("Failed to update pamu stash setting\n"); + dev_err(pcfg->dev, + "Failed to update pamu stash setting\n"); return; } } -#ifdef CONFIG_FSL_QMAN_CONFIG - if (qman_set_sdest(pcfg->public_cfg.channel, cpu)) #endif - pr_warn("Failed to update portal's stash request queue\n"); + qman_set_sdest(pcfg->channel, cpu); } -static void qman_offline_cpu(unsigned int cpu) +#ifndef __rtems__ +static int qman_offline_cpu(unsigned int cpu) { struct qman_portal *p; const struct qm_portal_config *pcfg; - p = (struct qman_portal *)affine_portals[cpu]; + p = affine_portals[cpu]; if (p) { pcfg = qman_get_qm_portal_config(p); if (pcfg) { - irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0)); + irq_set_affinity(pcfg->irq, cpumask_of(0)); qman_portal_update_sdest(pcfg, 0); } } + return 0; } -#endif /* __rtems__ */ -#ifdef CONFIG_HOTPLUG_CPU -static void qman_online_cpu(unsigned int cpu) +static int qman_online_cpu(unsigned int cpu) { struct qman_portal *p; const struct qm_portal_config *pcfg; - p = (struct qman_portal *)affine_portals[cpu]; + p = affine_portals[cpu]; if (p) { pcfg = qman_get_qm_portal_config(p); if (pcfg) { - irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu)); + irq_set_affinity(pcfg->irq, cpumask_of(cpu)); qman_portal_update_sdest(pcfg, cpu); } } + return 0; } -static int qman_hotplug_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int qman_portal_probe(struct platform_device *pdev) { - unsigned int cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - qman_online_cpu(cpu); - break; - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - qman_offline_cpu(cpu); - default: - break; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct qm_portal_config *pcfg; + struct resource *addr_phys[2]; + void __iomem *va; + int irq, cpu, err; + u32 val; + + pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); + if (!pcfg) + return -ENOMEM; + + pcfg->dev = dev; + + addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, + DPAA_PORTAL_CE); + if (!addr_phys[0]) { + dev_err(dev, "Can't get %s property 'reg::CE'\n", + node->full_name); + return -ENXIO; + } + + addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, + DPAA_PORTAL_CI); + if (!addr_phys[1]) { + dev_err(dev, "Can't get %s property 'reg::CI'\n", + node->full_name); + return -ENXIO; + } + + err = of_property_read_u32(node, "cell-index", &val); + if (err) { + dev_err(dev, "Can't get %s property 'cell-index'\n", + node->full_name); + return err; + } + pcfg->channel = val; + pcfg->cpu = -1; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "Can't get %s IRQ\n", node->full_name); + return -ENXIO; + } + pcfg->irq = irq; + + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); + if (!va) { + dev_err(dev, "ioremap::CE failed\n"); + goto err_ioremap1; + } + + pcfg->addr_virt[DPAA_PORTAL_CE] = va; + + va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), + _PAGE_GUARDED | _PAGE_NO_CACHE); + if (!va) { + dev_err(dev, "ioremap::CI failed\n"); + goto err_ioremap2; + } + + pcfg->addr_virt[DPAA_PORTAL_CI] = va; + + pcfg->pools = qm_get_pools_sdqcr(); + + spin_lock(&qman_lock); + cpu = cpumask_next_zero(-1, &portal_cpus); + if (cpu >= nr_cpu_ids) { + /* unassigned portal, skip init */ + spin_unlock(&qman_lock); + return 0; + } + + cpumask_set_cpu(cpu, &portal_cpus); + spin_unlock(&qman_lock); + pcfg->cpu = cpu; + + if (dma_set_mask(dev, DMA_BIT_MASK(40))) { + dev_err(dev, "dma_set_mask() failed\n"); + goto err_portal_init; + } + + if (!init_pcfg(pcfg)) { + dev_err(dev, "portal init failed\n"); + goto err_portal_init; } - return NOTIFY_OK; + + /* clear irq affinity if assigned cpu is offline */ + if (!cpu_online(cpu)) + qman_offline_cpu(cpu); + + return 0; + +err_portal_init: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); +err_ioremap2: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); +err_ioremap1: + return -ENXIO; } -static struct notifier_block qman_hotplug_cpu_notifier = { - .notifier_call = qman_hotplug_cpu_callback, +static const struct of_device_id qman_portal_ids[] = { + { + .compatible = "fsl,qman-portal", + }, + {} +}; +MODULE_DEVICE_TABLE(of, qman_portal_ids); + +static struct platform_driver qman_portal_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = qman_portal_ids, + }, + .probe = qman_portal_probe, }; -#endif /* CONFIG_HOTPLUG_CPU */ -#ifdef __rtems__ +static int __init qman_portal_driver_register(struct platform_driver *drv) +{ + int ret; + + ret = platform_driver_register(drv); + if (ret < 0) + return ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "soc/qman_portal:online", + qman_online_cpu, qman_offline_cpu); + if (ret < 0) { + pr_err("qman: failed to register hotplug callbacks.\n"); + platform_driver_unregister(drv); + return ret; + } + return 0; +} + +module_driver(qman_portal_driver, + qman_portal_driver_register, platform_driver_unregister); +#else /* __rtems__ */ #include <bsp/fdt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + static struct qm_portal_config qman_configs[NR_CPUS]; -static void -qman_init_portals(void) + +void +qman_sysinit_portals(void) { const char *fdt = bsp_fdt_get(); struct device_node dn; @@ -575,7 +418,7 @@ qman_init_portals(void) struct qm_portal_config *pcfg = &qman_configs[cpu]; struct qman_portal *portal; struct resource res; - const u32 *channel; + u32 val; if (node < 0) panic("qman: missing portal in FDT"); @@ -600,197 +443,29 @@ qman_init_portals(void) BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] < (uintptr_t)&qoriq_qman_portal[2][0]); - pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL); - if (pcfg->public_cfg.irq == NO_IRQ) - panic("qman: no portal interrupt"); + ret = of_property_read_u32(&dn, "cell-index", &val); + if (ret != 0) + panic("qman: no cell-index"); + pcfg->channel = val; - channel = of_get_property(&dn, "fsl,qman-channel-id", &ret); - if (channel == NULL || ret != 4) - panic("qman: no portal channel ID"); - pcfg->public_cfg.channel = *channel; + pcfg->irq = of_irq_to_resource(&dn, 0, NULL); + if (pcfg->irq == NO_IRQ) + panic("qman: no portal interrupt"); - pcfg->public_cfg.cpu = cpu; - pcfg->public_cfg.pools = pools_sdqcr; + pcfg->cpu = cpu; + pcfg->pools = qm_get_pools_sdqcr(); portal = init_pcfg(pcfg); if (portal == NULL) panic("qman: cannot create portal"); + qman_portal_update_sdest(pcfg, cpu); + node = fdt_next_subnode(fdt, node); dn.offset = node; } -} -#endif /* __rtems__ */ -#ifndef __rtems__ -__init int qman_init(void) -{ - struct cpumask slave_cpus; - struct cpumask unshared_cpus = *cpu_none_mask; - struct cpumask shared_cpus = *cpu_none_mask; - LIST_HEAD(unshared_pcfgs); - LIST_HEAD(shared_pcfgs); - struct device_node *dn; - struct qm_portal_config *pcfg; - struct qman_portal *p; - int cpu, ret; - struct cpumask offline_cpus; - - /* Initialise the QMan (CCSR) device */ - for_each_compatible_node(dn, NULL, "fsl,qman") { - if (!qman_init_ccsr(dn)) - pr_info("Err interrupt handler present\n"); - else - pr_err("CCSR setup failed\n"); - } -#else /* __rtems__ */ -int -qman_init(struct device_node *dn) -{ - struct device_node of_dns; - int ret; -#endif /* __rtems__ */ -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP - /* Setup lookup table for FQ demux */ - ret = qman_setup_fq_lookup_table(qman_fqd_size()/64); - if (ret) - return ret; -#endif - /* Get qman ip revision */ - qman_get_ip_revision(dn); - if ((qman_ip_rev & 0xff00) >= QMAN_REV30) { - qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; - qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; - qm_channel_pme = QMAN_CHANNEL_PME_REV3; - } - - /* Parse pool channels into the SDQCR mask. (Must happen before portals - * are initialised.) */ - for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") { - ret = fsl_pool_channel_range_sdqcr(dn); - if (ret) - return ret; - } - -#ifndef __rtems__ - memset(affine_portals, 0, sizeof(void *) * num_possible_cpus()); - /* Initialise portals. See bman_driver.c for comments */ - for_each_compatible_node(dn, NULL, "fsl,qman-portal") { - if (!of_device_is_available(dn)) - continue; - pcfg = parse_pcfg(dn); - if (pcfg) { - pcfg->public_cfg.pools = pools_sdqcr; - list_add_tail(&pcfg->list, &unused_pcfgs); - } - } - for_each_possible_cpu(cpu) { - if (cpumask_test_cpu(cpu, &want_shared)) { - pcfg = get_pcfg(&unused_pcfgs); - if (!pcfg) - break; - pcfg->public_cfg.cpu = cpu; - list_add_tail(&pcfg->list, &shared_pcfgs); - cpumask_set_cpu(cpu, &shared_cpus); - } - if (cpumask_test_cpu(cpu, &want_unshared)) { - if (cpumask_test_cpu(cpu, &shared_cpus)) - continue; - pcfg = get_pcfg(&unused_pcfgs); - if (!pcfg) - break; - pcfg->public_cfg.cpu = cpu; - list_add_tail(&pcfg->list, &unshared_pcfgs); - cpumask_set_cpu(cpu, &unshared_cpus); - } - } - if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) { - for_each_possible_cpu(cpu) { - pcfg = get_pcfg(&unused_pcfgs); - if (!pcfg) - break; - pcfg->public_cfg.cpu = cpu; - list_add_tail(&pcfg->list, &unshared_pcfgs); - cpumask_set_cpu(cpu, &unshared_cpus); - } - } - cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus); - cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus); - if (cpumask_empty(&slave_cpus)) { - if (!list_empty(&shared_pcfgs)) { - cpumask_or(&unshared_cpus, &unshared_cpus, - &shared_cpus); - cpumask_clear(&shared_cpus); - list_splice_tail(&shared_pcfgs, &unshared_pcfgs); - INIT_LIST_HEAD(&shared_pcfgs); - } - } else { - if (list_empty(&shared_pcfgs)) { - pcfg = get_pcfg(&unshared_pcfgs); - if (!pcfg) { - pr_crit("No portals available!\n"); - return 0; - } - cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus); - cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus); - list_add_tail(&pcfg->list, &shared_pcfgs); - } - } - list_for_each_entry(pcfg, &unshared_pcfgs, list) { - pcfg->public_cfg.is_shared = 0; - p = init_pcfg(pcfg); - } - list_for_each_entry(pcfg, &shared_pcfgs, list) { - pcfg->public_cfg.is_shared = 1; - p = init_pcfg(pcfg); - if (p) - shared_portals[num_shared_portals++] = p; - } - if (!cpumask_empty(&slave_cpus)) - for_each_cpu(cpu, &slave_cpus) - init_slave(cpu); -#else /* __rtems__ */ - qman_init_portals(); -#endif /* __rtems__ */ - pr_info("Portals initialised\n"); -#ifndef __rtems__ - cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask); - for_each_cpu(cpu, &offline_cpus) - qman_offline_cpu(cpu); -#endif /* __rtems__ */ -#ifdef CONFIG_HOTPLUG_CPU - register_hotcpu_notifier(&qman_hotplug_cpu_notifier); -#endif - return 0; + /* all assigned portals are initialized now */ + qman_init_cgr_all(); } - -__init int qman_resource_init(void) -{ -#ifdef __rtems__ - struct device_node of_dns; #endif /* __rtems__ */ - struct device_node *dn; - int ret; - - /* Initialise FQID allocation ranges */ - for_each_compatible_node(dn, NULL, "fsl,fqid-range") { - ret = fsl_fqid_range_init(dn); - if (ret) - return ret; - } - /* Initialise CGRID allocation ranges */ - for_each_compatible_node(dn, NULL, "fsl,cgrid-range") { - ret = fsl_cgrid_range_init(dn); - if (ret) - return ret; - } - /* Parse pool channels into the allocator. (Must happen after portals - * are initialised.) */ - for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") { - ret = fsl_pool_channel_range_init(dn); - if (ret) - return ret; - } - - return 0; -} diff --git a/linux/drivers/soc/fsl/qbman/qman_priv.h b/linux/drivers/soc/fsl/qbman/qman_priv.h index f04bd476..c03f1b5b 100644 --- a/linux/drivers/soc/fsl/qbman/qman_priv.h +++ b/linux/drivers/soc/fsl/qbman/qman_priv.h @@ -1,4 +1,4 @@ -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,93 +34,213 @@ #include <soc/fsl/qman.h> #include <linux/iommu.h> + +#if defined(CONFIG_FSL_PAMU) #include <asm/fsl_pamu_stash.h> +#endif + +struct qm_mcr_querywq { + u8 verb; + u8 result; + u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ + u8 __reserved[28]; + u32 wq_len[8]; +} __packed; + +static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq) +{ + return wq->channel_wq >> 3; +} + +struct __qm_mcr_querycongestion { + u32 state[8]; +}; + +/* "Query Congestion Group State" */ +struct qm_mcr_querycongestion { + u8 verb; + u8 result; + u8 __reserved[30]; + /* Access this struct using qman_cgrs_get() */ + struct __qm_mcr_querycongestion state; +} __packed; + +/* "Query CGR" */ +struct qm_mcr_querycgr { + u8 verb; + u8 result; + u16 __reserved1; + struct __qm_mc_cgr cgr; /* CGR fields */ + u8 __reserved2[6]; + u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ + __be32 i_bcnt_lo; /* low 32-bits of 40-bit */ + u8 __reserved3[3]; + u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ + __be32 a_bcnt_lo; /* low 32-bits of 40-bit */ + __be32 cscn_targ_swp[4]; +} __packed; + +static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) +{ + return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo); +} +static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) +{ + return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo); +} + +/* "Query FQ Non-Programmable Fields" */ + +struct qm_mcr_queryfq_np { + u8 verb; + u8 result; + u8 __reserved1; + u8 state; /* QM_MCR_NP_STATE_*** */ + u32 fqd_link; /* 24-bit, _res2[24-31] */ + u16 odp_seq; /* 14-bit, _res3[14-15] */ + u16 orp_nesn; /* 14-bit, _res4[14-15] */ + u16 orp_ea_hseq; /* 15-bit, _res5[15] */ + u16 orp_ea_tseq; /* 15-bit, _res6[15] */ + u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */ + u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */ + u32 pfdr_hptr; /* 24-bit, _res9[24-31] */ + u32 pfdr_tptr; /* 24-bit, _res10[24-31] */ + u8 __reserved2[5]; + u8 is; /* 1-bit, _res12[1-7] */ + u16 ics_surp; + u32 byte_cnt; + u32 frm_cnt; /* 24-bit, _res13[24-31] */ + u32 __reserved3; + u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ + u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ + u16 __reserved4; + u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ + u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ + u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ +} __packed; + +#define QM_MCR_NP_STATE_FE 0x10 +#define QM_MCR_NP_STATE_R 0x08 +#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */ +#define QM_MCR_NP_STATE_OOS 0x00 +#define QM_MCR_NP_STATE_RETIRED 0x01 +#define QM_MCR_NP_STATE_TEN_SCHED 0x02 +#define QM_MCR_NP_STATE_TRU_SCHED 0x03 +#define QM_MCR_NP_STATE_PARKED 0x04 +#define QM_MCR_NP_STATE_ACTIVE 0x05 +#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */ +#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */ +#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */ +#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */ +#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */ + +enum qm_mcr_queryfq_np_masks { + qm_mcr_fqd_link_mask = BIT(24)-1, + qm_mcr_odp_seq_mask = BIT(14)-1, + qm_mcr_orp_nesn_mask = BIT(14)-1, + qm_mcr_orp_ea_hseq_mask = BIT(15)-1, + qm_mcr_orp_ea_tseq_mask = BIT(15)-1, + qm_mcr_orp_ea_hptr_mask = BIT(24)-1, + qm_mcr_orp_ea_tptr_mask = BIT(24)-1, + qm_mcr_pfdr_hptr_mask = BIT(24)-1, + qm_mcr_pfdr_tptr_mask = BIT(24)-1, + qm_mcr_is_mask = BIT(1)-1, + qm_mcr_frm_cnt_mask = BIT(24)-1, +}; +#define qm_mcr_np_get(np, field) \ + ((np)->field & (qm_mcr_##field##_mask)) /* Congestion Groups */ -/* This wrapper represents a bit-array for the state of the 256 QMan congestion +/* + * This wrapper represents a bit-array for the state of the 256 QMan congestion * groups. Is also used as a *mask* for congestion groups, eg. so we ignore * those that don't concern us. We harness the structure and accessor details * already used in the management command to query congestion groups. */ +#define CGR_BITS_PER_WORD 5 +#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD) +#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f)) +#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3) + struct qman_cgrs { struct __qm_mcr_querycongestion q; }; + static inline void qman_cgrs_init(struct qman_cgrs *c) { memset(c, 0, sizeof(*c)); } + static inline void qman_cgrs_fill(struct qman_cgrs *c) { memset(c, 0xff, sizeof(*c)); } -static inline int qman_cgrs_get(struct qman_cgrs *c, int num) -{ - return QM_MCR_QUERYCONGESTION(&c->q, num); -} -static inline void qman_cgrs_set(struct qman_cgrs *c, int num) -{ - c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num)); -} -static inline void qman_cgrs_unset(struct qman_cgrs *c, int num) -{ - c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num)); -} -static inline int qman_cgrs_next(struct qman_cgrs *c, int num) + +static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr) { - while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num)) - ; - return num; + return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr); } + static inline void qman_cgrs_cp(struct qman_cgrs *dest, const struct qman_cgrs *src) { *dest = *src; } + static inline void qman_cgrs_and(struct qman_cgrs *dest, const struct qman_cgrs *a, const struct qman_cgrs *b) { int ret; - u32 *_d = dest->q.__state; - const u32 *_a = a->q.__state; - const u32 *_b = b->q.__state; + u32 *_d = dest->q.state; + const u32 *_a = a->q.state; + const u32 *_b = b->q.state; for (ret = 0; ret < 8; ret++) - *(_d++) = *(_a++) & *(_b++); + *_d++ = *_a++ & *_b++; } + static inline void qman_cgrs_xor(struct qman_cgrs *dest, const struct qman_cgrs *a, const struct qman_cgrs *b) { int ret; - u32 *_d = dest->q.__state; - const u32 *_a = a->q.__state; - const u32 *_b = b->q.__state; + u32 *_d = dest->q.state; + const u32 *_a = a->q.state; + const u32 *_b = b->q.state; for (ret = 0; ret < 8; ret++) - *(_d++) = *(_a++) ^ *(_b++); + *_d++ = *_a++ ^ *_b++; } -/* used by CCSR and portal interrupt code */ -enum qm_isr_reg { - qm_isr_status = 0, - qm_isr_enable = 1, - qm_isr_disable = 2, - qm_isr_inhibit = 3 -}; +void qman_init_cgr_all(void); struct qm_portal_config { - /* Corenet portal addresses; - * [0]==cache-enabled, [1]==cache-inhibited. */ - __iomem void *addr_virt[2]; + /* + * Corenet portal addresses; + * [0]==cache-enabled, [1]==cache-inhibited. + */ + void __iomem *addr_virt[2]; #ifndef __rtems__ - struct resource addr_phys[2]; - struct device dev; + struct device *dev; struct iommu_domain *iommu_domain; /* Allow these to be joined in lists */ struct list_head list; #endif /* __rtems__ */ /* User-visible portal configuration settings */ - struct qman_portal_config public_cfg; + /* portal is affined to this cpu */ + int cpu; + /* portal interrupt line */ + int irq; + /* + * the portal's dedicated channel id, used initialising + * frame queues to target this portal when scheduled + */ + u16 channel; + /* + * mask of pool channels this portal has dequeue access to + * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) + */ + u32 pools; }; /* Revision info (for errata and feature handling) */ @@ -131,57 +251,70 @@ struct qm_portal_config { #define QMAN_REV31 0x0301 extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ -extern u16 qman_portal_max; +#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */ +extern struct gen_pool *qm_fqalloc; /* FQID allocator */ +extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */ +extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */ +u32 qm_get_pools_sdqcr(void); -#ifdef CONFIG_FSL_QMAN_CONFIG -/* Hooks from qman_driver.c to qman_config.c */ -int qman_init_ccsr(struct device_node *node); +int qman_wq_alloc(void); void qman_liodn_fixup(u16 channel); -int qman_set_sdest(u16 channel, unsigned int cpu_idx); -size_t qman_fqd_size(void); -#endif - -int qm_set_wpm(int wpm); -int qm_get_wpm(int *wpm); - -/* Hooks from qman_driver.c in to qman_high.c */ -struct qman_portal *qman_create_portal( - struct qman_portal *portal, - const struct qm_portal_config *config, - const struct qman_cgrs *cgrs); +void qman_set_sdest(u16 channel, unsigned int cpu_idx); struct qman_portal *qman_create_affine_portal( const struct qm_portal_config *config, const struct qman_cgrs *cgrs); -struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect, - int cpu); const struct qm_portal_config *qman_destroy_affine_portal(void); -void qman_destroy_portal(struct qman_portal *qm); - -/* This CGR feature is supported by h/w and required by unit-tests and the - * debugfs hooks, so is implemented in the driver. However it allows an explicit - * corruption of h/w fields by s/w that are usually incorruptible (because the - * counters are usually maintained entirely within h/w). As such, we declare - * this API internally. */ -int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, - struct qm_mcr_cgrtestwrite *result); - -#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP -/* If the fq object pointer is greater than the size of context_b field, - * than a lookup table is required. */ -int qman_setup_fq_lookup_table(size_t num_entries); -#endif +/* + * qman_query_fq - Queries FQD fields (via h/w query command) + * @fq: the frame queue object to be queried + * @fqd: storage for the queried FQD fields + */ +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); -/*************************************************/ -/* QMan s/w corenet portal, low-level i/face */ -/*************************************************/ +/* + * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use + * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use + * FQID(n) to fill in the frame queue ID. + */ +#define QM_VDQCR_PRECEDENCE_VDQCR 0x0 +#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 +#define QM_VDQCR_EXACT 0x40000000 +#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 +#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) +#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) +#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) + +#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */ +#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */ +#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */ + +/* + * qman_volatile_dequeue - Issue a volatile dequeue command + * @fq: the frame queue object to dequeue from + * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options + * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set() + * + * Attempts to lock access to the portal's VDQCR volatile dequeue functionality. + * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and + * the VDQCR is already in use, otherwise returns non-zero for failure. If + * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once + * the VDQCR command has finished executing (ie. once the callback for the last + * DQRR entry resulting from the VDQCR command has been called). If not using + * the FINISH flag, completion can be determined either by detecting the + * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits + * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting + * for the QMAN_FQ_STATE_VDQCR bit to disappear. + */ +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); -/* Note: most functions are only used by the high-level interface, so are - * inlined from qman.h. The stuff below is for use by other parts of the - * driver. */ +int qman_alloc_fq_table(u32 num_fqids); + +/* QMan s/w corenet portal, low-level i/face */ -/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one +/* + * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one * dequeue TYPE. Choose TOKEN (8-bit). * If SOURCE == CHANNELS, * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n). @@ -216,42 +349,8 @@ int qman_setup_fq_lookup_table(size_t num_entries); #define QM_VDQCR_FQID_MASK 0x00ffffff #define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK) -/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT. - * If MODE==SCHEDULED - * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE. - * If CHANNELS, - * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels. - * You can choose DEDICATED_PRECEDENCE if the portal channel should have - * priority. - * If SPECIFICWQ, - * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the - * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the - * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the - * same value. - * If MODE==UNSCHEDULED - * Choose FQID(). - */ -#define QM_PDQCR_MODE_SCHEDULED 0x0 -#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000 -#define QM_PDQCR_SCHEDULED_CHANNELS 0x0 -#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000 -#define QM_PDQCR_COUNT_EXACT1 0x0 -#define QM_PDQCR_COUNT_UPTO3 0x20000000 -#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000 -#define QM_PDQCR_TYPE_MASK 0x03000000 -#define QM_PDQCR_TYPE_NULL 0x0 -#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000 -#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000 -#define QM_PDQCR_TYPE_ACTIVE 0x03000000 -#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000 -#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) -#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7 -#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000 -#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4) -#define QM_PDQCR_SPECIFICWQ_WQ(n) (n) -#define QM_PDQCR_FQID(n) ((n) & 0xffffff) - -/* Used by all portal interrupt registers except 'inhibit' +/* + * Used by all portal interrupt registers except 'inhibit' * Channels with frame availability */ #define QM_PIRQ_DQAVAIL 0x0000ffff @@ -263,31 +362,10 @@ int qman_setup_fq_lookup_table(size_t num_entries); /* This mask contains all the "irqsource" bits visible to API users */ #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) -/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write - * the disable register" rather than "disable the ability to write". */ -#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status) -#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m) -#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable) -#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v) -#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable) -#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v) -/* TODO: unfortunate name-clash here, reword? */ -#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1) -#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0) - -#ifdef CONFIG_FSL_QMAN_CONFIG -int qman_have_ccsr(void); -#else -#define qman_have_ccsr 0 -#endif - -#ifndef __rtems__ -__init int qman_init(void); -#else /* __rtems__ */ -int qman_init(struct device_node *dn); -#endif /* __rtems__ */ -__init int qman_resource_init(void); - -extern void *affine_portals[NR_CPUS]; +extern struct qman_portal *affine_portals[NR_CPUS]; +extern struct qman_portal *qman_dma_portal; const struct qm_portal_config *qman_get_qm_portal_config( struct qman_portal *portal); +#ifdef __rtems__ +void qman_sysinit_portals(void); +#endif /* __rtems__ */ diff --git a/linux/drivers/soc/fsl/qbman/qman_test.c b/linux/drivers/soc/fsl/qbman/qman_test.c deleted file mode 100644 index 18c04482..00000000 --- a/linux/drivers/soc/fsl/qbman/qman_test.c +++ /dev/null @@ -1,61 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "qman_test.h" - -MODULE_AUTHOR("Geoff Thorpe"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("QMan testing"); - -static int test_init(void) -{ - int loop = 1; - - while (loop--) { -#ifdef CONFIG_FSL_QMAN_TEST_STASH - qman_test_stash(); -#endif -#ifdef CONFIG_FSL_QMAN_TEST_API - qman_test_api(); -#endif - } - return 0; -} - -static void test_exit(void) -{ -} - -module_init(test_init); -module_exit(test_exit); diff --git a/linux/drivers/soc/fsl/qbman/qman_test.h b/linux/drivers/soc/fsl/qbman/qman_test.h index 0b34a670..d5f8cb22 100644 --- a/linux/drivers/soc/fsl/qbman/qman_test.h +++ b/linux/drivers/soc/fsl/qbman/qman_test.h @@ -1,4 +1,4 @@ -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -28,17 +28,9 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/io.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/kthread.h> -#include <linux/platform_device.h> +#include "qman_priv.h" -#include <soc/fsl/qman.h> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -void qman_test_stash(void); -void qman_test_api(void); +int qman_test_stash(void); +int qman_test_api(void); diff --git a/linux/drivers/soc/fsl/qbman/qman_test_api.c b/linux/drivers/soc/fsl/qbman/qman_test_api.c index 63a6d11d..d5de4c9a 100644 --- a/linux/drivers/soc/fsl/qbman/qman_test_api.c +++ b/linux/drivers/soc/fsl/qbman/qman_test_api.c @@ -2,7 +2,7 @@ #include <rtems/bsd/local/opt_dpaa.h> -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,10 +34,6 @@ #include "qman_test.h" -/*************/ -/* constants */ -/*************/ - #define CGR_ID 27 #define POOL_ID 2 #define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID @@ -51,21 +47,13 @@ #define PORTAL_OPAQUE ((void *)0xf00dbeef) #define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH) -/*************************************/ -/* Predeclarations (eg. for fq_base) */ -/*************************************/ - static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, struct qman_fq *, const struct qm_dqrr_entry *); static void cb_ern(struct qman_portal *, struct qman_fq *, - const struct qm_mr_entry *); + const union qm_mr_entry *); static void cb_fqs(struct qman_portal *, struct qman_fq *, - const struct qm_mr_entry *); - -/***************/ -/* global vars */ -/***************/ + const union qm_mr_entry *); static struct qm_fd fd, fd_dq; static struct qman_fq fq_base = { @@ -76,67 +64,68 @@ static struct qman_fq fq_base = { static DECLARE_WAIT_QUEUE_HEAD(waitqueue); static int retire_complete, sdqcr_complete; -/**********************/ -/* internal functions */ -/**********************/ - /* Helpers for initialising and "incrementing" a frame descriptor */ -static void fd_init(struct qm_fd *__fd) +static void fd_init(struct qm_fd *fd) { - qm_fd_addr_set64(__fd, 0xabdeadbeefLLU); - __fd->format = qm_fd_contig_big; - __fd->length29 = 0x0000ffff; - __fd->cmd = 0xfeedf00d; + qm_fd_addr_set64(fd, 0xabdeadbeefLLU); + qm_fd_set_contig_big(fd, 0x0000ffff); + fd->cmd = cpu_to_be32(0xfeedf00d); } -static void fd_inc(struct qm_fd *__fd) +static void fd_inc(struct qm_fd *fd) { - u64 t = qm_fd_addr_get64(__fd); + u64 t = qm_fd_addr_get64(fd); int z = t >> 40; + unsigned int len, off; + enum qm_fd_format fmt; t <<= 1; if (z) t |= 1; - qm_fd_addr_set64(__fd, t); - __fd->length29--; - __fd->cmd++; + qm_fd_addr_set64(fd, t); + + fmt = qm_fd_get_format(fd); + off = qm_fd_get_offset(fd); + len = qm_fd_get_length(fd); + len--; + qm_fd_set_param(fd, fmt, off, len); + + fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1); } /* The only part of the 'fd' we can't memcmp() is the ppid */ -static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) +static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b) { - int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; - - if (!r) - r = a->format - b->format; - if (!r) - r = a->opaque - b->opaque; - if (!r) - r = a->cmd - b->cmd; - return r; + bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b); + + neq |= qm_fd_get_format(a) != qm_fd_get_format(b); + neq |= a->cfg != b->cfg; + neq |= a->cmd != b->cmd; + + return neq; } -/********/ /* test */ -/********/ - -static void do_enqueues(struct qman_fq *fq) +static int do_enqueues(struct qman_fq *fq) { unsigned int loop; + int err = 0; for (loop = 0; loop < NUM_ENQUEUES; loop++) { - if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT | - (((loop + 1) == NUM_ENQUEUES) ? - QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0))) - panic("qman_enqueue() failed\n"); + if (qman_enqueue(fq, &fd)) { + pr_crit("qman_enqueue() failed\n"); + err = -EIO; + } fd_inc(&fd); } + + return err; } -void qman_test_api(void) +int qman_test_api(void) { - u32 flags; - int res; + u32 flags, frmcnt; + int err; struct qman_fq *fq = &fq_base; pr_info("%s(): Starting\n", __func__); @@ -144,57 +133,93 @@ void qman_test_api(void) fd_init(&fd_dq); /* Initialise (parked) FQ */ - if (qman_create_fq(0, FQ_FLAGS, fq)) - panic("qman_create_fq() failed\n"); - if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL)) - panic("qman_init_fq() failed\n"); - + err = qman_create_fq(0, FQ_FLAGS, fq); + if (err) { + pr_crit("qman_create_fq() failed\n"); + goto failed; + } + err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); + if (err) { + pr_crit("qman_init_fq() failed\n"); + goto failed; + } /* Do enqueues + VDQCR, twice. (Parked FQ) */ - do_enqueues(fq); + err = do_enqueues(fq); + if (err) + goto failed; pr_info("VDQCR (till-empty);\n"); - if (qman_volatile_dequeue(fq, VDQCR_FLAGS, - QM_VDQCR_NUMFRAMES_TILLEMPTY)) - panic("qman_volatile_dequeue() failed\n"); - do_enqueues(fq); + frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY; + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); + if (err) { + pr_crit("qman_volatile_dequeue() failed\n"); + goto failed; + } + err = do_enqueues(fq); + if (err) + goto failed; pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); - if (qman_volatile_dequeue(fq, VDQCR_FLAGS, - QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL))) - panic("qman_volatile_dequeue() failed\n"); + frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL); + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); + if (err) { + pr_crit("qman_volatile_dequeue() failed\n"); + goto failed; + } pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, - NUM_ENQUEUES); - if (qman_volatile_dequeue(fq, VDQCR_FLAGS, - QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL))) - panic("qman_volatile_dequeue() failed\n"); + NUM_ENQUEUES); + frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL); + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); + if (err) { + pr_err("qman_volatile_dequeue() failed\n"); + goto failed; + } - do_enqueues(fq); + err = do_enqueues(fq); + if (err) + goto failed; pr_info("scheduled dequeue (till-empty)\n"); - if (qman_schedule_fq(fq)) - panic("qman_schedule_fq() failed\n"); + err = qman_schedule_fq(fq); + if (err) { + pr_crit("qman_schedule_fq() failed\n"); + goto failed; + } wait_event(waitqueue, sdqcr_complete); /* Retire and OOS the FQ */ - res = qman_retire_fq(fq, &flags); - if (res < 0) - panic("qman_retire_fq() failed\n"); + err = qman_retire_fq(fq, &flags); + if (err < 0) { + pr_crit("qman_retire_fq() failed\n"); + goto failed; + } wait_event(waitqueue, retire_complete); - if (flags & QMAN_FQ_STATE_BLOCKOOS) - panic("leaking frames\n"); - if (qman_oos_fq(fq)) - panic("qman_oos_fq() failed\n"); - qman_destroy_fq(fq, 0); + if (flags & QMAN_FQ_STATE_BLOCKOOS) { + err = -EIO; + pr_crit("leaking frames\n"); + goto failed; + } + err = qman_oos_fq(fq); + if (err) { + pr_crit("qman_oos_fq() failed\n"); + goto failed; + } + qman_destroy_fq(fq); pr_info("%s(): Finished\n", __func__); + return 0; + +failed: + WARN_ON(1); + return err; } static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { - if (fd_cmp(&fd_dq, &dq->fd)) { + if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) { pr_err("BADNESS: dequeued frame doesn't match;\n"); - BUG(); + return qman_cb_dqrr_consume; } fd_inc(&fd_dq); - if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) { sdqcr_complete = 1; wake_up(&waitqueue); } @@ -202,18 +227,22 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, } static void cb_ern(struct qman_portal *p, struct qman_fq *fq, - const struct qm_mr_entry *msg) + const union qm_mr_entry *msg) { - panic("cb_ern() unimplemented"); + pr_crit("cb_ern() unimplemented"); + WARN_ON(1); } static void cb_fqs(struct qman_portal *p, struct qman_fq *fq, - const struct qm_mr_entry *msg) + const union qm_mr_entry *msg) { u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK); - if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) - panic("unexpected FQS message"); + if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) { + pr_crit("unexpected FQS message"); + WARN_ON(1); + return; + } #ifndef __rtems__ pr_info("Retirement message received\n"); #endif /* __rtems__ */ diff --git a/linux/drivers/soc/fsl/qbman/qman_test_stash.c b/linux/drivers/soc/fsl/qbman/qman_test_stash.c index a3ca6603..9c3a135f 100644 --- a/linux/drivers/soc/fsl/qbman/qman_test_stash.c +++ b/linux/drivers/soc/fsl/qbman/qman_test_stash.c @@ -2,7 +2,7 @@ #include <rtems/bsd/local/opt_dpaa.h> -/* Copyright 2009 - 2015 Freescale Semiconductor, Inc. +/* Copyright 2009 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,14 +35,15 @@ #include "qman_test.h" #include <linux/dma-mapping.h> +#include <linux/delay.h> #ifdef __rtems__ #include <rtems/malloc.h> #undef msleep #define msleep(x) usleep((x) * 1000) -#define L1_CACHE_BYTES 64 #endif /* __rtems__ */ -/* Algorithm: +/* + * Algorithm: * * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The @@ -86,23 +87,28 @@ * initialisation targets the correct cpu. */ -/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes - * the fn from irq context, which is too restrictive). */ +/* + * helper to run something on all cpus (can't use on_each_cpu(), as that invokes + * the fn from irq context, which is too restrictive). + */ struct bstrap { - void (*fn)(void); + int (*fn)(void); atomic_t started; }; -static int bstrap_fn(void *__bstrap) +static int bstrap_fn(void *bs) { - struct bstrap *bstrap = __bstrap; + struct bstrap *bstrap = bs; + int err; atomic_inc(&bstrap->started); - bstrap->fn(); + err = bstrap->fn(); + if (err) + return err; while (!kthread_should_stop()) - msleep(1); + msleep(20); return 0; } -static int on_all_cpus(void (*fn)(void)) +static int on_all_cpus(int (*fn)(void)) { int cpu; @@ -127,12 +133,14 @@ static int on_all_cpus(void (*fn)(void)) return -ENOMEM; kthread_bind(k, cpu); wake_up_process(k); - /* If we call kthread_stop() before the "wake up" has had an + /* + * If we call kthread_stop() before the "wake up" has had an * effect, then the thread may exit with -EINTR without ever * running the function. So poll until it's started before - * requesting it to stop. */ + * requesting it to stop. + */ while (!atomic_read(&bstrap.started)) - msleep(10); + msleep(20); ret = kthread_stop(k); if (ret) return ret; @@ -172,8 +180,10 @@ struct hp_cpu { struct list_head handlers; /* list node for linking us into 'hp_cpu_list' */ struct list_head node; - /* when repeatedly scanning 'hp_list', each time linking the n'th - * handlers together, this is used as per-cpu iterator state */ + /* + * when repeatedly scanning 'hp_list', each time linking the n'th + * handlers together, this is used as per-cpu iterator state + */ struct hp_handler *iterator; }; @@ -182,7 +192,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); /* links together the hp_cpu structs, in first-come first-serve order. */ static LIST_HEAD(hp_cpu_list); -static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); +static DEFINE_SPINLOCK(hp_lock); static unsigned int hp_cpu_list_length; @@ -202,6 +212,9 @@ static u32 *frame_ptr; static dma_addr_t frame_dma; #endif /* __rtems__ */ +/* needed for dma_map*() */ +static const struct qm_portal_config *pcfg; + /* the main function waits on this */ static DECLARE_WAIT_QUEUE_HEAD(queue); @@ -217,22 +230,28 @@ static inline u32 do_lfsr(u32 prev) return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); } -static void allocate_frame_data(void) +static int allocate_frame_data(void) { u32 lfsr = HP_FIRST_WORD; int loop; + #ifndef __rtems__ - struct platform_device *pdev = platform_device_alloc("foobar", -1); + if (!qman_dma_portal) { + pr_crit("portal not available\n"); + return -EIO; + } + + pcfg = qman_get_qm_portal_config(qman_dma_portal); +#else /* __rtems__ */ + pcfg = qman_get_qm_portal_config(qman_get_affine_portal(0)); +#endif /* __rtems__ */ - if (!pdev) - panic("platform_device_alloc() failed"); - if (platform_device_add(pdev)) - panic("platform_device_add() failed"); +#ifndef __rtems__ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); if (!__frame_ptr) - panic("kmalloc() failed"); - frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) & - ~(unsigned long)63); + return -ENOMEM; + + frame_ptr = PTR_ALIGN(__frame_ptr, 64); #else /* __rtems__ */ frame_ptr = rtems_heap_allocate_aligned_with_boundary(4 * HP_NUM_WORDS, 64, 0); if (frame_ptr == NULL) @@ -242,73 +261,96 @@ static void allocate_frame_data(void) frame_ptr[loop] = lfsr; lfsr = do_lfsr(lfsr); } + #ifndef __rtems__ - frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, - DMA_BIDIRECTIONAL); - platform_device_del(pdev); - platform_device_put(pdev); + frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(pcfg->dev, frame_dma)) { + pr_crit("dma mapping failure\n"); + kfree(__frame_ptr); + return -EIO; + } + #endif /* __rtems__ */ + return 0; } static void deallocate_frame_data(void) { #ifndef __rtems__ - kfree(__frame_ptr); + dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS, + DMA_BIDIRECTIONAL); #endif /* __rtems__ */ } -static inline void process_frame_data(struct hp_handler *handler, - const struct qm_fd *fd) +static inline int process_frame_data(struct hp_handler *handler, + const struct qm_fd *fd) { u32 *p = handler->frame_ptr; u32 lfsr = HP_FIRST_WORD; int loop; - if (qm_fd_addr_get64(fd) != handler->addr) - panic("bad frame address"); + if (qm_fd_addr_get64(fd) != handler->addr) { + pr_crit("bad frame address, [%llX != %llX]\n", + qm_fd_addr_get64(fd), handler->addr); + return -EIO; + } for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { *p ^= handler->rx_mixer; - if (*p != lfsr) - panic("corrupt frame data"); + if (*p != lfsr) { + pr_crit("corrupt frame data"); + return -EIO; + } *p ^= handler->tx_mixer; lfsr = do_lfsr(lfsr); } + return 0; } static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, - struct qman_fq *fq, - const struct qm_dqrr_entry *dqrr) + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) { struct hp_handler *handler = (struct hp_handler *)fq; - process_frame_data(handler, &dqrr->fd); - if (qman_enqueue(&handler->tx, &dqrr->fd, 0)) - panic("qman_enqueue() failed"); + if (process_frame_data(handler, &dqrr->fd)) { + WARN_ON(1); + goto skip; + } + if (qman_enqueue(&handler->tx, &dqrr->fd)) { + pr_crit("qman_enqueue() failed"); + WARN_ON(1); + } +skip: return qman_cb_dqrr_consume; } static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, - struct qman_fq *fq, - const struct qm_dqrr_entry *dqrr) + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) { struct hp_handler *handler = (struct hp_handler *)fq; process_frame_data(handler, &dqrr->fd); if (++loop_counter < HP_LOOPS) { - if (qman_enqueue(&handler->tx, &dqrr->fd, 0)) - panic("qman_enqueue() failed"); + if (qman_enqueue(&handler->tx, &dqrr->fd)) { + pr_crit("qman_enqueue() failed"); + WARN_ON(1); + goto skip; + } } else { pr_info("Received final (%dth) frame\n", loop_counter); wake_up(&queue); } +skip: return qman_cb_dqrr_consume; } -static void create_per_cpu_handlers(void) +static int create_per_cpu_handlers(void) { struct hp_handler *handler; int loop; - struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); + struct hp_cpu *hp_cpu = this_cpu_ptr(hp_cpus); hp_cpu->processor_id = smp_processor_id(); spin_lock(&hp_lock); @@ -318,8 +360,11 @@ static void create_per_cpu_handlers(void) INIT_LIST_HEAD(&hp_cpu->handlers); for (loop = 0; loop < HP_PER_CPU; loop++) { handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL); - if (!handler) - panic("kmem_cache_alloc() failed"); + if (!handler) { + pr_crit("kmem_cache_alloc() failed"); + WARN_ON(1); + return -EIO; + } handler->processor_id = hp_cpu->processor_id; #ifndef __rtems__ handler->addr = frame_dma; @@ -329,31 +374,39 @@ static void create_per_cpu_handlers(void) handler->frame_ptr = frame_ptr; list_add_tail(&handler->node, &hp_cpu->handlers); } + return 0; } -static void destroy_per_cpu_handlers(void) +static int destroy_per_cpu_handlers(void) { struct list_head *loop, *tmp; - struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); + struct hp_cpu *hp_cpu = this_cpu_ptr(hp_cpus); spin_lock(&hp_lock); list_del(&hp_cpu->node); spin_unlock(&hp_lock); list_for_each_safe(loop, tmp, &hp_cpu->handlers) { - u32 flags; + u32 flags = 0; struct hp_handler *handler = list_entry(loop, struct hp_handler, node); - if (qman_retire_fq(&handler->rx, &flags)) - panic("qman_retire_fq(rx) failed"); - BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS); - if (qman_oos_fq(&handler->rx)) - panic("qman_oos_fq(rx) failed"); - qman_destroy_fq(&handler->rx, 0); - qman_destroy_fq(&handler->tx, 0); + if (qman_retire_fq(&handler->rx, &flags) || + (flags & QMAN_FQ_STATE_BLOCKOOS)) { + pr_crit("qman_retire_fq(rx) failed, flags: %x", flags); + WARN_ON(1); + return -EIO; + } + if (qman_oos_fq(&handler->rx)) { + pr_crit("qman_oos_fq(rx) failed"); + WARN_ON(1); + return -EIO; + } + qman_destroy_fq(&handler->rx); + qman_destroy_fq(&handler->tx); qman_release_fqid(handler->fqid_rx); list_del(&handler->node); kmem_cache_free(hp_handler_slab, handler); } + return 0; } static inline u8 num_cachelines(u32 offset) @@ -369,36 +422,59 @@ static inline u8 num_cachelines(u32 offset) #define STASH_CTX_CL \ num_cachelines(offsetof(struct hp_handler, fqid_rx)) -static void init_handler(void *__handler) +static int init_handler(void *h) { struct qm_mcc_initfq opts; - struct hp_handler *handler = __handler; + struct hp_handler *handler = h; + int err; - BUG_ON(handler->processor_id != smp_processor_id()); + if (handler->processor_id != smp_processor_id()) { + err = -EIO; + goto failed; + } /* Set up rx */ memset(&handler->rx, 0, sizeof(handler->rx)); if (handler == special_handler) handler->rx.cb.dqrr = special_dqrr; else handler->rx.cb.dqrr = normal_dqrr; - if (qman_create_fq(handler->fqid_rx, 0, &handler->rx)) - panic("qman_create_fq(rx) failed"); + err = qman_create_fq(handler->fqid_rx, 0, &handler->rx); + if (err) { + pr_crit("qman_create_fq(rx) failed"); + goto failed; + } memset(&opts, 0, sizeof(opts)); - opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; - opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; - opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL; - opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL; - if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | - QMAN_INITFQ_FLAG_LOCAL, &opts)) - panic("qman_init_fq(rx) failed"); + opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | + QM_INITFQ_WE_CONTEXTA); + opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING); + qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); + err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | + QMAN_INITFQ_FLAG_LOCAL, &opts); + if (err) { + pr_crit("qman_init_fq(rx) failed"); + goto failed; + } /* Set up tx */ memset(&handler->tx, 0, sizeof(handler->tx)); - if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, - &handler->tx)) - panic("qman_create_fq(tx) failed"); + err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, + &handler->tx); + if (err) { + pr_crit("qman_create_fq(tx) failed"); + goto failed; + } + + return 0; +failed: + return err; } -static void init_phase2(void) +static void init_handler_cb(void *h) +{ + if (init_handler(h)) + WARN_ON(1); +} + +static int init_phase2(void) { int loop; u32 fqid = 0; @@ -408,7 +484,7 @@ static void init_phase2(void) for (loop = 0; loop < HP_PER_CPU; loop++) { list_for_each_entry(hp_cpu, &hp_cpu_list, node) { - int ret; + int err; if (!loop) hp_cpu->iterator = list_first_entry( @@ -421,9 +497,11 @@ static void init_phase2(void) /* Rx FQID is the previous handler's Tx FQID */ hp_cpu->iterator->fqid_rx = fqid; /* Allocate new FQID for Tx */ - ret = qman_alloc_fqid(&fqid); - if (ret) - panic("qman_alloc_fqid() failed"); + err = qman_alloc_fqid(&fqid); + if (err) { + pr_crit("qman_alloc_fqid() failed"); + return err; + } hp_cpu->iterator->fqid_tx = fqid; /* Rx mixer is the previous handler's Tx mixer */ hp_cpu->iterator->rx_mixer = lfsr; @@ -435,16 +513,18 @@ static void init_phase2(void) /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); - BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef)); + if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef) + return 1; handler->fqid_rx = fqid; handler->rx_mixer = lfsr; /* and tag it as our "special" handler */ special_handler = handler; + return 0; } -static void init_phase3(void) +static int init_phase3(void) { - int loop; + int loop, err; struct hp_cpu *hp_cpu; for (loop = 0; loop < HP_PER_CPU; loop++) { @@ -458,45 +538,69 @@ static void init_phase3(void) hp_cpu->iterator->node.next, struct hp_handler, node); preempt_disable(); - if (hp_cpu->processor_id == smp_processor_id()) - init_handler(hp_cpu->iterator); - else + if (hp_cpu->processor_id == smp_processor_id()) { + err = init_handler(hp_cpu->iterator); + if (err) + return err; + } else { smp_call_function_single(hp_cpu->processor_id, - init_handler, hp_cpu->iterator, 1); + init_handler_cb, hp_cpu->iterator, 1); + } preempt_enable(); } } + return 0; } -static void send_first_frame(void *ignore) +static int send_first_frame(void *ignore) { u32 *p = special_handler->frame_ptr; u32 lfsr = HP_FIRST_WORD; - int loop; + int loop, err; struct qm_fd fd; - BUG_ON(special_handler->processor_id != smp_processor_id()); + if (special_handler->processor_id != smp_processor_id()) { + err = -EIO; + goto failed; + } memset(&fd, 0, sizeof(fd)); qm_fd_addr_set64(&fd, special_handler->addr); - fd.format = qm_fd_contig_big; - fd.length29 = HP_NUM_WORDS * 4; + qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4); for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { - if (*p != lfsr) - panic("corrupt frame data"); + if (*p != lfsr) { + err = -EIO; + pr_crit("corrupt frame data"); + goto failed; + } *p ^= special_handler->tx_mixer; lfsr = do_lfsr(lfsr); } pr_info("Sending first frame\n"); - if (qman_enqueue(&special_handler->tx, &fd, 0)) - panic("qman_enqueue() failed"); + err = qman_enqueue(&special_handler->tx, &fd); + if (err) { + pr_crit("qman_enqueue() failed"); + goto failed; + } + + return 0; +failed: + return err; } -void qman_test_stash(void) +static void send_first_frame_cb(void *ignore) { + if (send_first_frame(NULL)) + WARN_ON(1); +} + +int qman_test_stash(void) +{ + int err; + #ifndef __rtems__ if (cpumask_weight(cpu_online_mask) < 2) { pr_info("%s(): skip - only 1 CPU\n", __func__); - return; + return 0; } #endif /* __rtems__ */ @@ -507,34 +611,57 @@ void qman_test_stash(void) hp_handler_slab = kmem_cache_create("hp_handler_slab", sizeof(struct hp_handler), L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL); - if (!hp_handler_slab) - panic("kmem_cache_create() failed"); + if (!hp_handler_slab) { + err = -EIO; + pr_crit("kmem_cache_create() failed"); + goto failed; + } - allocate_frame_data(); + err = allocate_frame_data(); + if (err) + goto failed; /* Init phase 1 */ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU); - if (on_all_cpus(create_per_cpu_handlers)) - panic("on_each_cpu() failed"); + if (on_all_cpus(create_per_cpu_handlers)) { + err = -EIO; + pr_crit("on_each_cpu() failed"); + goto failed; + } pr_info("Number of cpus: %d, total of %d handlers\n", hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); - init_phase2(); + err = init_phase2(); + if (err) + goto failed; - init_phase3(); + err = init_phase3(); + if (err) + goto failed; preempt_disable(); - if (special_handler->processor_id == smp_processor_id()) - send_first_frame(NULL); - else + if (special_handler->processor_id == smp_processor_id()) { + err = send_first_frame(NULL); + if (err) + goto failed; + } else { smp_call_function_single(special_handler->processor_id, - send_first_frame, NULL, 1); + send_first_frame_cb, NULL, 1); + } preempt_enable(); wait_event(queue, loop_counter == HP_LOOPS); deallocate_frame_data(); - if (on_all_cpus(destroy_per_cpu_handlers)) - panic("on_each_cpu() failed"); + if (on_all_cpus(destroy_per_cpu_handlers)) { + err = -EIO; + pr_crit("on_each_cpu() failed"); + goto failed; + } kmem_cache_destroy(hp_handler_slab); pr_info("%s(): Finished\n", __func__); + + return 0; +failed: + WARN_ON(1); + return err; } diff --git a/linux/drivers/soc/fsl/qbman/qman_utils.c b/linux/drivers/soc/fsl/qbman/qman_utils.c deleted file mode 100644 index 5b85f037..00000000 --- a/linux/drivers/soc/fsl/qbman/qman_utils.c +++ /dev/null @@ -1,309 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "qman_priv.h" - -/* --- FQID Pool --- */ - -struct qman_fqid_pool { - /* Base and size of the FQID range */ - u32 fqid_base; - u32 total; - /* Number of FQIDs currently "allocated" */ - u32 used; - /* Allocation optimisation. When 'used<total', it is the index of an - * available FQID. Otherwise there are no available FQIDs, and this - * will be set when the next deallocation occurs. */ - u32 next; - /* A bit-field representation of the FQID range. */ - unsigned long *bits; -}; - -#define QLONG_BYTES sizeof(unsigned long) -#define QLONG_BITS (QLONG_BYTES * 8) -/* Number of 'longs' required for the given number of bits */ -#define QNUM_LONGS(b) (((b) + QLONG_BITS - 1) / QLONG_BITS) -/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */ -#define QNUM_BYTES(b) (QNUM_LONGS(b) * QLONG_BYTES) -/* And in bits */ -#define QNUM_BITS(b) (QNUM_LONGS(b) * QLONG_BITS) - -struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num) -{ - struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); - unsigned int i; - - BUG_ON(!num); - if (!pool) - return NULL; - pool->fqid_base = fqid_start; - pool->total = num; - pool->used = 0; - pool->next = 0; - pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL); - if (!pool->bits) { - kfree(pool); - return NULL; - } - /* If num is not an even multiple of QLONG_BITS (or even 8, for - * byte-oriented searching) then we fill the trailing bits with 1, to - * make them look allocated (permanently). */ - for (i = num + 1; i < QNUM_BITS(num); i++) - set_bit(i, pool->bits); - return pool; -} -EXPORT_SYMBOL(qman_fqid_pool_create); - -int qman_fqid_pool_destroy(struct qman_fqid_pool *pool) -{ - int ret = pool->used; - - kfree(pool->bits); - kfree(pool); - return ret; -} -EXPORT_SYMBOL(qman_fqid_pool_destroy); - -int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid) -{ - int ret; - - if (pool->used == pool->total) - return -ENOMEM; - *fqid = pool->fqid_base + pool->next; - ret = test_and_set_bit(pool->next, pool->bits); - BUG_ON(ret); - if (++pool->used == pool->total) - return 0; - pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next); - if (pool->next >= pool->total) - pool->next = find_first_zero_bit(pool->bits, pool->total); - BUG_ON(pool->next >= pool->total); - return 0; -} -EXPORT_SYMBOL(qman_fqid_pool_alloc); - -void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid) -{ - int ret; - - fqid -= pool->fqid_base; - ret = test_and_clear_bit(fqid, pool->bits); - BUG_ON(!ret); - if (pool->used-- == pool->total) - pool->next = fqid; -} -EXPORT_SYMBOL(qman_fqid_pool_free); - -u32 qman_fqid_pool_used(struct qman_fqid_pool *pool) -{ - return pool->used; -} -EXPORT_SYMBOL(qman_fqid_pool_used); - -static DECLARE_DPAA_RESOURCE(fqalloc); /* FQID allocator */ -static DECLARE_DPAA_RESOURCE(qpalloc); /* pool-channel allocator */ -static DECLARE_DPAA_RESOURCE(cgralloc); /* CGR ID allocator */ - -/* FQID allocator front-end */ - -int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial) -{ - return dpaa_resource_new(&fqalloc, result, count, align, partial); -} -EXPORT_SYMBOL(qman_alloc_fqid_range); - -static int fq_cleanup(u32 fqid) -{ - return qman_shutdown_fq(fqid) == 0; -} - -void qman_release_fqid_range(u32 fqid, u32 count) -{ - u32 total_invalid = dpaa_resource_release(&fqalloc, - fqid, count, fq_cleanup); - - if (total_invalid) - pr_err("FQID range [%d..%d] (%d) had %d leaks\n", - fqid, fqid + count - 1, count, total_invalid); -} -EXPORT_SYMBOL(qman_release_fqid_range); - -int qman_reserve_fqid_range(u32 fqid, u32 count) -{ - return dpaa_resource_reserve(&fqalloc, fqid, count); -} -EXPORT_SYMBOL(qman_reserve_fqid_range); - -void qman_seed_fqid_range(u32 fqid, u32 count) -{ - dpaa_resource_seed(&fqalloc, fqid, count); -} -EXPORT_SYMBOL(qman_seed_fqid_range); - -/* Pool-channel allocator front-end */ - -int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial) -{ - return dpaa_resource_new(&qpalloc, result, count, align, partial); -} -EXPORT_SYMBOL(qman_alloc_pool_range); - -static int qpool_cleanup(u32 qp) -{ - /* We query all FQDs starting from - * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs - * whose destination channel is the pool-channel being released. - * When a non-OOS FQD is found we attempt to clean it up */ - struct qman_fq fq = { - .fqid = 1 - }; - int err; - - do { - struct qm_mcr_queryfq_np np; - - err = qman_query_fq_np(&fq, &np); - if (err) - /* FQID range exceeded, found no problems */ - return 1; - if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { - struct qm_fqd fqd; - - err = qman_query_fq(&fq, &fqd); - BUG_ON(err); - if (fqd.dest.channel == qp) { - /* The channel is the FQ's target, clean it */ - if (qman_shutdown_fq(fq.fqid) != 0) - /* Couldn't shut down the FQ - so the pool must be leaked */ - return 0; - } - } - /* Move to the next FQID */ - fq.fqid++; - } while (1); -} - -void qman_release_pool_range(u32 qp, u32 count) -{ - u32 total_invalid = dpaa_resource_release(&qpalloc, - qp, count, qpool_cleanup); - - if (total_invalid) { - /* Pool channels are almost always used individually */ - if (count == 1) - pr_err("Pool channel 0x%x had %d leaks\n", - qp, total_invalid); - else - pr_err("Pool channels [%d..%d] (%d) had %d leaks\n", - qp, qp + count - 1, count, total_invalid); - } -} -EXPORT_SYMBOL(qman_release_pool_range); - -void qman_seed_pool_range(u32 poolid, u32 count) -{ - dpaa_resource_seed(&qpalloc, poolid, count); - -} -EXPORT_SYMBOL(qman_seed_pool_range); - -int qman_reserve_pool_range(u32 poolid, u32 count) -{ - return dpaa_resource_reserve(&qpalloc, poolid, count); -} -EXPORT_SYMBOL(qman_reserve_pool_range); - - -/* CGR ID allocator front-end */ - -int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial) -{ - return dpaa_resource_new(&cgralloc, result, count, align, partial); -} -EXPORT_SYMBOL(qman_alloc_cgrid_range); - -static int cqr_cleanup(u32 cgrid) -{ - /* We query all FQDs starting from - * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs - * whose CGR is the CGR being released. - */ - struct qman_fq fq = { - .fqid = 1 - }; - int err; - - do { - struct qm_mcr_queryfq_np np; - - err = qman_query_fq_np(&fq, &np); - if (err) - /* FQID range exceeded, found no problems */ - return 1; - if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { - struct qm_fqd fqd; - - err = qman_query_fq(&fq, &fqd); - BUG_ON(err); - if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && - (fqd.cgid == cgrid)) { - pr_err("CRGID 0x%x is being used by FQID 0x%x," - " CGR will be leaked\n", - cgrid, fq.fqid); - return 1; - } - } - /* Move to the next FQID */ - fq.fqid++; - } while (1); -} - -void qman_release_cgrid_range(u32 cgrid, u32 count) -{ - u32 total_invalid = dpaa_resource_release(&cgralloc, - cgrid, count, cqr_cleanup); - if (total_invalid) - pr_err("CGRID range [%d..%d] (%d) had %d leaks\n", - cgrid, cgrid + count - 1, count, total_invalid); -} -EXPORT_SYMBOL(qman_release_cgrid_range); - -void qman_seed_cgrid_range(u32 cgrid, u32 count) -{ - dpaa_resource_seed(&cgralloc, cgrid, count); - -} -EXPORT_SYMBOL(qman_seed_cgrid_range); |