summaryrefslogtreecommitdiffstats
path: root/linux/drivers/soc/fsl/qbman/qman_test_stash.c
diff options
context:
space:
mode:
Diffstat (limited to 'linux/drivers/soc/fsl/qbman/qman_test_stash.c')
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test_stash.c349
1 files changed, 238 insertions, 111 deletions
diff --git a/linux/drivers/soc/fsl/qbman/qman_test_stash.c b/linux/drivers/soc/fsl/qbman/qman_test_stash.c
index a3ca6603..9c3a135f 100644
--- a/linux/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/linux/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -2,7 +2,7 @@
#include <rtems/bsd/local/opt_dpaa.h>
-/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,14 +35,15 @@
#include "qman_test.h"
#include <linux/dma-mapping.h>
+#include <linux/delay.h>
#ifdef __rtems__
#include <rtems/malloc.h>
#undef msleep
#define msleep(x) usleep((x) * 1000)
-#define L1_CACHE_BYTES 64
#endif /* __rtems__ */
-/* Algorithm:
+/*
+ * Algorithm:
*
* Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
* an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
@@ -86,23 +87,28 @@
* initialisation targets the correct cpu.
*/
-/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
- * the fn from irq context, which is too restrictive). */
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
struct bstrap {
- void (*fn)(void);
+ int (*fn)(void);
atomic_t started;
};
-static int bstrap_fn(void *__bstrap)
+static int bstrap_fn(void *bs)
{
- struct bstrap *bstrap = __bstrap;
+ struct bstrap *bstrap = bs;
+ int err;
atomic_inc(&bstrap->started);
- bstrap->fn();
+ err = bstrap->fn();
+ if (err)
+ return err;
while (!kthread_should_stop())
- msleep(1);
+ msleep(20);
return 0;
}
-static int on_all_cpus(void (*fn)(void))
+static int on_all_cpus(int (*fn)(void))
{
int cpu;
@@ -127,12 +133,14 @@ static int on_all_cpus(void (*fn)(void))
return -ENOMEM;
kthread_bind(k, cpu);
wake_up_process(k);
- /* If we call kthread_stop() before the "wake up" has had an
+ /*
+ * If we call kthread_stop() before the "wake up" has had an
* effect, then the thread may exit with -EINTR without ever
* running the function. So poll until it's started before
- * requesting it to stop. */
+ * requesting it to stop.
+ */
while (!atomic_read(&bstrap.started))
- msleep(10);
+ msleep(20);
ret = kthread_stop(k);
if (ret)
return ret;
@@ -172,8 +180,10 @@ struct hp_cpu {
struct list_head handlers;
/* list node for linking us into 'hp_cpu_list' */
struct list_head node;
- /* when repeatedly scanning 'hp_list', each time linking the n'th
- * handlers together, this is used as per-cpu iterator state */
+ /*
+ * when repeatedly scanning 'hp_list', each time linking the n'th
+ * handlers together, this is used as per-cpu iterator state
+ */
struct hp_handler *iterator;
};
@@ -182,7 +192,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
/* links together the hp_cpu structs, in first-come first-serve order. */
static LIST_HEAD(hp_cpu_list);
-static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+static DEFINE_SPINLOCK(hp_lock);
static unsigned int hp_cpu_list_length;
@@ -202,6 +212,9 @@ static u32 *frame_ptr;
static dma_addr_t frame_dma;
#endif /* __rtems__ */
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
/* the main function waits on this */
static DECLARE_WAIT_QUEUE_HEAD(queue);
@@ -217,22 +230,28 @@ static inline u32 do_lfsr(u32 prev)
return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
}
-static void allocate_frame_data(void)
+static int allocate_frame_data(void)
{
u32 lfsr = HP_FIRST_WORD;
int loop;
+
#ifndef __rtems__
- struct platform_device *pdev = platform_device_alloc("foobar", -1);
+ if (!qman_dma_portal) {
+ pr_crit("portal not available\n");
+ return -EIO;
+ }
+
+ pcfg = qman_get_qm_portal_config(qman_dma_portal);
+#else /* __rtems__ */
+ pcfg = qman_get_qm_portal_config(qman_get_affine_portal(0));
+#endif /* __rtems__ */
- if (!pdev)
- panic("platform_device_alloc() failed");
- if (platform_device_add(pdev))
- panic("platform_device_add() failed");
+#ifndef __rtems__
__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
if (!__frame_ptr)
- panic("kmalloc() failed");
- frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
- ~(unsigned long)63);
+ return -ENOMEM;
+
+ frame_ptr = PTR_ALIGN(__frame_ptr, 64);
#else /* __rtems__ */
frame_ptr = rtems_heap_allocate_aligned_with_boundary(4 * HP_NUM_WORDS, 64, 0);
if (frame_ptr == NULL)
@@ -242,73 +261,96 @@ static void allocate_frame_data(void)
frame_ptr[loop] = lfsr;
lfsr = do_lfsr(lfsr);
}
+
#ifndef __rtems__
- frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
- DMA_BIDIRECTIONAL);
- platform_device_del(pdev);
- platform_device_put(pdev);
+ frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pcfg->dev, frame_dma)) {
+ pr_crit("dma mapping failure\n");
+ kfree(__frame_ptr);
+ return -EIO;
+ }
+
#endif /* __rtems__ */
+ return 0;
}
static void deallocate_frame_data(void)
{
#ifndef __rtems__
- kfree(__frame_ptr);
+ dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
#endif /* __rtems__ */
}
-static inline void process_frame_data(struct hp_handler *handler,
- const struct qm_fd *fd)
+static inline int process_frame_data(struct hp_handler *handler,
+ const struct qm_fd *fd)
{
u32 *p = handler->frame_ptr;
u32 lfsr = HP_FIRST_WORD;
int loop;
- if (qm_fd_addr_get64(fd) != handler->addr)
- panic("bad frame address");
+ if (qm_fd_addr_get64(fd) != handler->addr) {
+ pr_crit("bad frame address, [%llX != %llX]\n",
+ qm_fd_addr_get64(fd), handler->addr);
+ return -EIO;
+ }
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
*p ^= handler->rx_mixer;
- if (*p != lfsr)
- panic("corrupt frame data");
+ if (*p != lfsr) {
+ pr_crit("corrupt frame data");
+ return -EIO;
+ }
*p ^= handler->tx_mixer;
lfsr = do_lfsr(lfsr);
}
+ return 0;
}
static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr)
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
{
struct hp_handler *handler = (struct hp_handler *)fq;
- process_frame_data(handler, &dqrr->fd);
- if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
- panic("qman_enqueue() failed");
+ if (process_frame_data(handler, &dqrr->fd)) {
+ WARN_ON(1);
+ goto skip;
+ }
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ }
+skip:
return qman_cb_dqrr_consume;
}
static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr)
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
{
struct hp_handler *handler = (struct hp_handler *)fq;
process_frame_data(handler, &dqrr->fd);
if (++loop_counter < HP_LOOPS) {
- if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
- panic("qman_enqueue() failed");
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ goto skip;
+ }
} else {
pr_info("Received final (%dth) frame\n", loop_counter);
wake_up(&queue);
}
+skip:
return qman_cb_dqrr_consume;
}
-static void create_per_cpu_handlers(void)
+static int create_per_cpu_handlers(void)
{
struct hp_handler *handler;
int loop;
- struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+ struct hp_cpu *hp_cpu = this_cpu_ptr(hp_cpus);
hp_cpu->processor_id = smp_processor_id();
spin_lock(&hp_lock);
@@ -318,8 +360,11 @@ static void create_per_cpu_handlers(void)
INIT_LIST_HEAD(&hp_cpu->handlers);
for (loop = 0; loop < HP_PER_CPU; loop++) {
handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
- if (!handler)
- panic("kmem_cache_alloc() failed");
+ if (!handler) {
+ pr_crit("kmem_cache_alloc() failed");
+ WARN_ON(1);
+ return -EIO;
+ }
handler->processor_id = hp_cpu->processor_id;
#ifndef __rtems__
handler->addr = frame_dma;
@@ -329,31 +374,39 @@ static void create_per_cpu_handlers(void)
handler->frame_ptr = frame_ptr;
list_add_tail(&handler->node, &hp_cpu->handlers);
}
+ return 0;
}
-static void destroy_per_cpu_handlers(void)
+static int destroy_per_cpu_handlers(void)
{
struct list_head *loop, *tmp;
- struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+ struct hp_cpu *hp_cpu = this_cpu_ptr(hp_cpus);
spin_lock(&hp_lock);
list_del(&hp_cpu->node);
spin_unlock(&hp_lock);
list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
- u32 flags;
+ u32 flags = 0;
struct hp_handler *handler = list_entry(loop, struct hp_handler,
node);
- if (qman_retire_fq(&handler->rx, &flags))
- panic("qman_retire_fq(rx) failed");
- BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
- if (qman_oos_fq(&handler->rx))
- panic("qman_oos_fq(rx) failed");
- qman_destroy_fq(&handler->rx, 0);
- qman_destroy_fq(&handler->tx, 0);
+ if (qman_retire_fq(&handler->rx, &flags) ||
+ (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+ pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+ WARN_ON(1);
+ return -EIO;
+ }
+ if (qman_oos_fq(&handler->rx)) {
+ pr_crit("qman_oos_fq(rx) failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ qman_destroy_fq(&handler->rx);
+ qman_destroy_fq(&handler->tx);
qman_release_fqid(handler->fqid_rx);
list_del(&handler->node);
kmem_cache_free(hp_handler_slab, handler);
}
+ return 0;
}
static inline u8 num_cachelines(u32 offset)
@@ -369,36 +422,59 @@ static inline u8 num_cachelines(u32 offset)
#define STASH_CTX_CL \
num_cachelines(offsetof(struct hp_handler, fqid_rx))
-static void init_handler(void *__handler)
+static int init_handler(void *h)
{
struct qm_mcc_initfq opts;
- struct hp_handler *handler = __handler;
+ struct hp_handler *handler = h;
+ int err;
- BUG_ON(handler->processor_id != smp_processor_id());
+ if (handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
/* Set up rx */
memset(&handler->rx, 0, sizeof(handler->rx));
if (handler == special_handler)
handler->rx.cb.dqrr = special_dqrr;
else
handler->rx.cb.dqrr = normal_dqrr;
- if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
- panic("qman_create_fq(rx) failed");
+ err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+ if (err) {
+ pr_crit("qman_create_fq(rx) failed");
+ goto failed;
+ }
memset(&opts, 0, sizeof(opts));
- opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
- opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
- opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
- opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
- if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
- QMAN_INITFQ_FLAG_LOCAL, &opts))
- panic("qman_init_fq(rx) failed");
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
+ qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+ err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+ QMAN_INITFQ_FLAG_LOCAL, &opts);
+ if (err) {
+ pr_crit("qman_init_fq(rx) failed");
+ goto failed;
+ }
/* Set up tx */
memset(&handler->tx, 0, sizeof(handler->tx));
- if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
- &handler->tx))
- panic("qman_create_fq(tx) failed");
+ err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+ &handler->tx);
+ if (err) {
+ pr_crit("qman_create_fq(tx) failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
}
-static void init_phase2(void)
+static void init_handler_cb(void *h)
+{
+ if (init_handler(h))
+ WARN_ON(1);
+}
+
+static int init_phase2(void)
{
int loop;
u32 fqid = 0;
@@ -408,7 +484,7 @@ static void init_phase2(void)
for (loop = 0; loop < HP_PER_CPU; loop++) {
list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
- int ret;
+ int err;
if (!loop)
hp_cpu->iterator = list_first_entry(
@@ -421,9 +497,11 @@ static void init_phase2(void)
/* Rx FQID is the previous handler's Tx FQID */
hp_cpu->iterator->fqid_rx = fqid;
/* Allocate new FQID for Tx */
- ret = qman_alloc_fqid(&fqid);
- if (ret)
- panic("qman_alloc_fqid() failed");
+ err = qman_alloc_fqid(&fqid);
+ if (err) {
+ pr_crit("qman_alloc_fqid() failed");
+ return err;
+ }
hp_cpu->iterator->fqid_tx = fqid;
/* Rx mixer is the previous handler's Tx mixer */
hp_cpu->iterator->rx_mixer = lfsr;
@@ -435,16 +513,18 @@ static void init_phase2(void)
/* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
- BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
+ if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+ return 1;
handler->fqid_rx = fqid;
handler->rx_mixer = lfsr;
/* and tag it as our "special" handler */
special_handler = handler;
+ return 0;
}
-static void init_phase3(void)
+static int init_phase3(void)
{
- int loop;
+ int loop, err;
struct hp_cpu *hp_cpu;
for (loop = 0; loop < HP_PER_CPU; loop++) {
@@ -458,45 +538,69 @@ static void init_phase3(void)
hp_cpu->iterator->node.next,
struct hp_handler, node);
preempt_disable();
- if (hp_cpu->processor_id == smp_processor_id())
- init_handler(hp_cpu->iterator);
- else
+ if (hp_cpu->processor_id == smp_processor_id()) {
+ err = init_handler(hp_cpu->iterator);
+ if (err)
+ return err;
+ } else {
smp_call_function_single(hp_cpu->processor_id,
- init_handler, hp_cpu->iterator, 1);
+ init_handler_cb, hp_cpu->iterator, 1);
+ }
preempt_enable();
}
}
+ return 0;
}
-static void send_first_frame(void *ignore)
+static int send_first_frame(void *ignore)
{
u32 *p = special_handler->frame_ptr;
u32 lfsr = HP_FIRST_WORD;
- int loop;
+ int loop, err;
struct qm_fd fd;
- BUG_ON(special_handler->processor_id != smp_processor_id());
+ if (special_handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
memset(&fd, 0, sizeof(fd));
qm_fd_addr_set64(&fd, special_handler->addr);
- fd.format = qm_fd_contig_big;
- fd.length29 = HP_NUM_WORDS * 4;
+ qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
- if (*p != lfsr)
- panic("corrupt frame data");
+ if (*p != lfsr) {
+ err = -EIO;
+ pr_crit("corrupt frame data");
+ goto failed;
+ }
*p ^= special_handler->tx_mixer;
lfsr = do_lfsr(lfsr);
}
pr_info("Sending first frame\n");
- if (qman_enqueue(&special_handler->tx, &fd, 0))
- panic("qman_enqueue() failed");
+ err = qman_enqueue(&special_handler->tx, &fd);
+ if (err) {
+ pr_crit("qman_enqueue() failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
}
-void qman_test_stash(void)
+static void send_first_frame_cb(void *ignore)
{
+ if (send_first_frame(NULL))
+ WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+ int err;
+
#ifndef __rtems__
if (cpumask_weight(cpu_online_mask) < 2) {
pr_info("%s(): skip - only 1 CPU\n", __func__);
- return;
+ return 0;
}
#endif /* __rtems__ */
@@ -507,34 +611,57 @@ void qman_test_stash(void)
hp_handler_slab = kmem_cache_create("hp_handler_slab",
sizeof(struct hp_handler), L1_CACHE_BYTES,
SLAB_HWCACHE_ALIGN, NULL);
- if (!hp_handler_slab)
- panic("kmem_cache_create() failed");
+ if (!hp_handler_slab) {
+ err = -EIO;
+ pr_crit("kmem_cache_create() failed");
+ goto failed;
+ }
- allocate_frame_data();
+ err = allocate_frame_data();
+ if (err)
+ goto failed;
/* Init phase 1 */
pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
- if (on_all_cpus(create_per_cpu_handlers))
- panic("on_each_cpu() failed");
+ if (on_all_cpus(create_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
pr_info("Number of cpus: %d, total of %d handlers\n",
hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
- init_phase2();
+ err = init_phase2();
+ if (err)
+ goto failed;
- init_phase3();
+ err = init_phase3();
+ if (err)
+ goto failed;
preempt_disable();
- if (special_handler->processor_id == smp_processor_id())
- send_first_frame(NULL);
- else
+ if (special_handler->processor_id == smp_processor_id()) {
+ err = send_first_frame(NULL);
+ if (err)
+ goto failed;
+ } else {
smp_call_function_single(special_handler->processor_id,
- send_first_frame, NULL, 1);
+ send_first_frame_cb, NULL, 1);
+ }
preempt_enable();
wait_event(queue, loop_counter == HP_LOOPS);
deallocate_frame_data();
- if (on_all_cpus(destroy_per_cpu_handlers))
- panic("on_each_cpu() failed");
+ if (on_all_cpus(destroy_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
kmem_cache_destroy(hp_handler_slab);
pr_info("%s(): Finished\n", __func__);
+
+ return 0;
+failed:
+ WARN_ON(1);
+ return err;
}