summaryrefslogtreecommitdiffstats
path: root/linux/drivers/soc/fsl/qbman/qman_priv.h
diff options
context:
space:
mode:
Diffstat (limited to 'linux/drivers/soc/fsl/qbman/qman_priv.h')
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_priv.h360
1 files changed, 219 insertions, 141 deletions
diff --git a/linux/drivers/soc/fsl/qbman/qman_priv.h b/linux/drivers/soc/fsl/qbman/qman_priv.h
index f04bd476..c03f1b5b 100644
--- a/linux/drivers/soc/fsl/qbman/qman_priv.h
+++ b/linux/drivers/soc/fsl/qbman/qman_priv.h
@@ -1,4 +1,4 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,93 +34,213 @@
#include <soc/fsl/qman.h>
#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+ u8 verb;
+ u8 result;
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+ return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+ u8 verb;
+ u8 result;
+ u8 __reserved[30];
+ /* Access this struct using qman_cgrs_get() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+ u8 verb;
+ u8 result;
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[6];
+ u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
+ __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved3[3];
+ u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
+ __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
+}
+
+/* "Query FQ Non-Programmable Fields" */
+
+struct qm_mcr_queryfq_np {
+ u8 verb;
+ u8 result;
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+ u32 fqd_link; /* 24-bit, _res2[24-31] */
+ u16 odp_seq; /* 14-bit, _res3[14-15] */
+ u16 orp_nesn; /* 14-bit, _res4[14-15] */
+ u16 orp_ea_hseq; /* 15-bit, _res5[15] */
+ u16 orp_ea_tseq; /* 15-bit, _res6[15] */
+ u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
+ u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
+ u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
+ u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
+ u8 __reserved2[5];
+ u8 is; /* 1-bit, _res12[1-7] */
+ u16 ics_surp;
+ u32 byte_cnt;
+ u32 frm_cnt; /* 24-bit, _res13[24-31] */
+ u32 __reserved3;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved4;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+} __packed;
+
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+
+enum qm_mcr_queryfq_np_masks {
+ qm_mcr_fqd_link_mask = BIT(24)-1,
+ qm_mcr_odp_seq_mask = BIT(14)-1,
+ qm_mcr_orp_nesn_mask = BIT(14)-1,
+ qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
+ qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
+ qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
+ qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
+ qm_mcr_pfdr_hptr_mask = BIT(24)-1,
+ qm_mcr_pfdr_tptr_mask = BIT(24)-1,
+ qm_mcr_is_mask = BIT(1)-1,
+ qm_mcr_frm_cnt_mask = BIT(24)-1,
+};
+#define qm_mcr_np_get(np, field) \
+ ((np)->field & (qm_mcr_##field##_mask))
/* Congestion Groups */
-/* This wrapper represents a bit-array for the state of the 256 QMan congestion
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
* groups. Is also used as a *mask* for congestion groups, eg. so we ignore
* those that don't concern us. We harness the structure and accessor details
* already used in the management command to query congestion groups.
*/
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+
struct qman_cgrs {
struct __qm_mcr_querycongestion q;
};
+
static inline void qman_cgrs_init(struct qman_cgrs *c)
{
memset(c, 0, sizeof(*c));
}
+
static inline void qman_cgrs_fill(struct qman_cgrs *c)
{
memset(c, 0xff, sizeof(*c));
}
-static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
-{
- return QM_MCR_QUERYCONGESTION(&c->q, num);
-}
-static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
-{
- c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
-}
-static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
-{
- c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
-}
-static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
{
- while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
- ;
- return num;
+ return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
}
+
static inline void qman_cgrs_cp(struct qman_cgrs *dest,
const struct qman_cgrs *src)
{
*dest = *src;
}
+
static inline void qman_cgrs_and(struct qman_cgrs *dest,
const struct qman_cgrs *a, const struct qman_cgrs *b)
{
int ret;
- u32 *_d = dest->q.__state;
- const u32 *_a = a->q.__state;
- const u32 *_b = b->q.__state;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
for (ret = 0; ret < 8; ret++)
- *(_d++) = *(_a++) & *(_b++);
+ *_d++ = *_a++ & *_b++;
}
+
static inline void qman_cgrs_xor(struct qman_cgrs *dest,
const struct qman_cgrs *a, const struct qman_cgrs *b)
{
int ret;
- u32 *_d = dest->q.__state;
- const u32 *_a = a->q.__state;
- const u32 *_b = b->q.__state;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
for (ret = 0; ret < 8; ret++)
- *(_d++) = *(_a++) ^ *(_b++);
+ *_d++ = *_a++ ^ *_b++;
}
-/* used by CCSR and portal interrupt code */
-enum qm_isr_reg {
- qm_isr_status = 0,
- qm_isr_enable = 1,
- qm_isr_disable = 2,
- qm_isr_inhibit = 3
-};
+void qman_init_cgr_all(void);
struct qm_portal_config {
- /* Corenet portal addresses;
- * [0]==cache-enabled, [1]==cache-inhibited. */
- __iomem void *addr_virt[2];
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
#ifndef __rtems__
- struct resource addr_phys[2];
- struct device dev;
+ struct device *dev;
struct iommu_domain *iommu_domain;
/* Allow these to be joined in lists */
struct list_head list;
#endif /* __rtems__ */
/* User-visible portal configuration settings */
- struct qman_portal_config public_cfg;
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /*
+ * the portal's dedicated channel id, used initialising
+ * frame queues to target this portal when scheduled
+ */
+ u16 channel;
+ /*
+ * mask of pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+ */
+ u32 pools;
};
/* Revision info (for errata and feature handling) */
@@ -131,57 +251,70 @@ struct qm_portal_config {
#define QMAN_REV31 0x0301
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
-extern u16 qman_portal_max;
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
-#ifdef CONFIG_FSL_QMAN_CONFIG
-/* Hooks from qman_driver.c to qman_config.c */
-int qman_init_ccsr(struct device_node *node);
+int qman_wq_alloc(void);
void qman_liodn_fixup(u16 channel);
-int qman_set_sdest(u16 channel, unsigned int cpu_idx);
-size_t qman_fqd_size(void);
-#endif
-
-int qm_set_wpm(int wpm);
-int qm_get_wpm(int *wpm);
-
-/* Hooks from qman_driver.c in to qman_high.c */
-struct qman_portal *qman_create_portal(
- struct qman_portal *portal,
- const struct qm_portal_config *config,
- const struct qman_cgrs *cgrs);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
struct qman_portal *qman_create_affine_portal(
const struct qm_portal_config *config,
const struct qman_cgrs *cgrs);
-struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
- int cpu);
const struct qm_portal_config *qman_destroy_affine_portal(void);
-void qman_destroy_portal(struct qman_portal *qm);
-
-/* This CGR feature is supported by h/w and required by unit-tests and the
- * debugfs hooks, so is implemented in the driver. However it allows an explicit
- * corruption of h/w fields by s/w that are usually incorruptible (because the
- * counters are usually maintained entirely within h/w). As such, we declare
- * this API internally. */
-int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
- struct qm_mcr_cgrtestwrite *result);
-
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-/* If the fq object pointer is greater than the size of context_b field,
- * than a lookup table is required. */
-int qman_setup_fq_lookup_table(size_t num_entries);
-#endif
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
-/*************************************************/
-/* QMan s/w corenet portal, low-level i/face */
-/*************************************************/
+/*
+ * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
+
+/*
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
+ * for the QMAN_FQ_STATE_VDQCR bit to disappear.
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
-/* Note: most functions are only used by the high-level interface, so are
- * inlined from qman.h. The stuff below is for use by other parts of the
- * driver. */
+int qman_alloc_fq_table(u32 num_fqids);
+
+/* QMan s/w corenet portal, low-level i/face */
-/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
* dequeue TYPE. Choose TOKEN (8-bit).
* If SOURCE == CHANNELS,
* Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
@@ -216,42 +349,8 @@ int qman_setup_fq_lookup_table(size_t num_entries);
#define QM_VDQCR_FQID_MASK 0x00ffffff
#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
-/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
- * If MODE==SCHEDULED
- * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
- * If CHANNELS,
- * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
- * You can choose DEDICATED_PRECEDENCE if the portal channel should have
- * priority.
- * If SPECIFICWQ,
- * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
- * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
- * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
- * same value.
- * If MODE==UNSCHEDULED
- * Choose FQID().
- */
-#define QM_PDQCR_MODE_SCHEDULED 0x0
-#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000
-#define QM_PDQCR_SCHEDULED_CHANNELS 0x0
-#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000
-#define QM_PDQCR_COUNT_EXACT1 0x0
-#define QM_PDQCR_COUNT_UPTO3 0x20000000
-#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000
-#define QM_PDQCR_TYPE_MASK 0x03000000
-#define QM_PDQCR_TYPE_NULL 0x0
-#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000
-#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000
-#define QM_PDQCR_TYPE_ACTIVE 0x03000000
-#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000
-#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
-#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7
-#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000
-#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
-#define QM_PDQCR_SPECIFICWQ_WQ(n) (n)
-#define QM_PDQCR_FQID(n) ((n) & 0xffffff)
-
-/* Used by all portal interrupt registers except 'inhibit'
+/*
+ * Used by all portal interrupt registers except 'inhibit'
* Channels with frame availability
*/
#define QM_PIRQ_DQAVAIL 0x0000ffff
@@ -263,31 +362,10 @@ int qman_setup_fq_lookup_table(size_t num_entries);
/* This mask contains all the "irqsource" bits visible to API users */
#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
-/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
- * the disable register" rather than "disable the ability to write". */
-#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
-#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
-#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
-#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
-#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
-#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
-/* TODO: unfortunate name-clash here, reword? */
-#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
-#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
-
-#ifdef CONFIG_FSL_QMAN_CONFIG
-int qman_have_ccsr(void);
-#else
-#define qman_have_ccsr 0
-#endif
-
-#ifndef __rtems__
-__init int qman_init(void);
-#else /* __rtems__ */
-int qman_init(struct device_node *dn);
-#endif /* __rtems__ */
-__init int qman_resource_init(void);
-
-extern void *affine_portals[NR_CPUS];
+extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
const struct qm_portal_config *qman_get_qm_portal_config(
struct qman_portal *portal);
+#ifdef __rtems__
+void qman_sysinit_portals(void);
+#endif /* __rtems__ */