summaryrefslogtreecommitdiffstats
path: root/linux/drivers/soc/fsl/qbman/qman_portal.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2017-05-05 08:47:39 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-10-23 09:23:59 +0200
commitcd089b9e05aad31244059ba88988afbe1db02e5e (patch)
tree81d344bb292ff39e60f74ac4df5f1621eca77c05 /linux/drivers/soc/fsl/qbman/qman_portal.c
parentImport DPAA driver snapshot (diff)
downloadrtems-libbsd-cd089b9e05aad31244059ba88988afbe1db02e5e.tar.bz2
Linux update to 4.11-rc5
Linux baseline a71c9a1c779f2499fb2afc0553e543f18aff6edf (4.11-rc5).
Diffstat (limited to '')
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_portal.c797
1 files changed, 236 insertions, 561 deletions
diff --git a/linux/drivers/soc/fsl/qbman/qman_portal.c b/linux/drivers/soc/fsl/qbman/qman_portal.c
index c74059b1..640343ac 100644
--- a/linux/drivers/soc/fsl/qbman/qman_portal.c
+++ b/linux/drivers/soc/fsl/qbman/qman_portal.c
@@ -2,7 +2,7 @@
#include <rtems/bsd/local/opt_dpaa.h>
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,304 +35,40 @@
#include "qman_priv.h"
#ifdef __rtems__
#include <bsp/qoriq.h>
+#undef dev_crit
+#undef dev_info
+#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__)
+#define dev_info dev_crit
+#endif /* __rtems__ */
+
+#ifndef __rtems__
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
#endif /* __rtems__ */
/* Enable portal interupts (as opposed to polling mode) */
#define CONFIG_FSL_DPA_PIRQ_SLOW 1
#define CONFIG_FSL_DPA_PIRQ_FAST 1
-/* Global variable containing revision id (even on non-control plane systems
- * where CCSR isn't available) */
-u16 qman_ip_rev;
-EXPORT_SYMBOL(qman_ip_rev);
-u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
-EXPORT_SYMBOL(qm_channel_pool1);
-u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
-EXPORT_SYMBOL(qm_channel_caam);
-u16 qm_channel_pme = QMAN_CHANNEL_PME;
-EXPORT_SYMBOL(qm_channel_pme);
-u16 qm_channel_dce = QMAN_CHANNEL_DCE;
-EXPORT_SYMBOL(qm_channel_dce);
-u16 qman_portal_max;
-EXPORT_SYMBOL(qman_portal_max);
-
#ifndef __rtems__
-/* For these variables, and the portal-initialisation logic, the
- * comments in bman_driver.c apply here so won't be repeated. */
-static struct qman_portal *shared_portals[NR_CPUS];
-static int num_shared_portals;
-static int shared_portals_idx;
-static LIST_HEAD(unused_pcfgs);
-#endif /* __rtems__ */
-
-/* A SDQCR mask comprising all the available/visible pool channels */
-static u32 pools_sdqcr;
-
-#define STR_ERR_NOPROP "No '%s' property in node %s\n"
-#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n"
-#define STR_FQID_RANGE "fsl,fqid-range"
-#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
-#define STR_CGRID_RANGE "fsl,cgrid-range"
-
-/* A "fsl,fqid-range" node; release the given range to the allocator */
-static __init int fsl_fqid_range_init(struct device_node *node)
-{
- int ret;
- const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
-
- if (!range) {
- pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
- return -EINVAL;
- }
- if (ret != 8) {
- pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
- return -EINVAL;
- }
- qman_seed_fqid_range(range[0], range[1]);
- pr_info("FQID allocator includes range %d:%d\n",
- range[0], range[1]);
- return 0;
-}
-
-/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
-static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
-{
- int ret;
- const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
-
- if (!chanid) {
- pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
- return -EINVAL;
- }
- if (ret != 8) {
- pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
- return -EINVAL;
- }
- for (ret = 0; ret < chanid[1]; ret++)
- pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(chanid[0] + ret);
- return 0;
-}
-
-/* A "fsl,pool-channel-range" node; release the given range to the allocator */
-static __init int fsl_pool_channel_range_init(struct device_node *node)
-{
- int ret;
- const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
-
- if (!chanid) {
- pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
- return -EINVAL;
- }
- if (ret != 8) {
- pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
- return -EINVAL;
- }
- qman_seed_pool_range(chanid[0], chanid[1]);
- pr_info("Pool channel allocator includes range %d:%d\n",
- chanid[0], chanid[1]);
- return 0;
-}
-
-/* A "fsl,cgrid-range" node; release the given range to the allocator */
-static __init int fsl_cgrid_range_init(struct device_node *node)
-{
- struct qman_cgr cgr;
- int ret, errors = 0;
- const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
-
- if (!range) {
- pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
- return -EINVAL;
- }
- if (ret != 8) {
- pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
- return -EINVAL;
- }
- qman_seed_cgrid_range(range[0], range[1]);
- pr_info("CGRID allocator includes range %d:%d\n",
- range[0], range[1]);
- for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
- ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
- if (ret)
- errors++;
- }
- if (errors)
- pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
- errors, (errors > 1) ? "s" : "", range[0], range[1]);
- return 0;
-}
-
-static void qman_get_ip_revision(struct device_node *dn)
-{
-#ifdef __rtems__
- struct device_node of_dns;
-#endif /* __rtems__ */
- u16 ip_rev = 0;
-
- for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
- if (!of_device_is_available(dn))
- continue;
- if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
- of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
- pr_err("Rev1.0 on P4080 rev1 is not supported!\n");
- BUG_ON(1);
- } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
- of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
- ip_rev = QMAN_REV11;
- qman_portal_max = 10;
- } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
- of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
- ip_rev = QMAN_REV12;
- qman_portal_max = 10;
- } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
- of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
- ip_rev = QMAN_REV20;
- qman_portal_max = 3;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.0.0")) {
- ip_rev = QMAN_REV30;
- qman_portal_max = 50;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.0.1")) {
- ip_rev = QMAN_REV30;
- qman_portal_max = 25;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.1.0")) {
- ip_rev = QMAN_REV31;
- qman_portal_max = 50;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.1.1")) {
- ip_rev = QMAN_REV31;
- qman_portal_max = 25;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.1.2")) {
- ip_rev = QMAN_REV31;
- qman_portal_max = 18;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.1.3")) {
- ip_rev = QMAN_REV31;
- qman_portal_max = 10;
- } else {
- pr_warn("Unknown version in portal node, default to rev1.1\n");
- ip_rev = QMAN_REV11;
- qman_portal_max = 10;
- }
-
- if (!qman_ip_rev) {
- if (ip_rev) {
- qman_ip_rev = ip_rev;
- } else {
- pr_warn("Unknown version, default to rev1.1\n");
- qman_ip_rev = QMAN_REV11;
- }
- } else if (ip_rev && (qman_ip_rev != ip_rev))
- pr_warn("Revision = 0x%04x, but portal '%s' has 0x%04x\n",
- qman_ip_rev, dn->full_name, ip_rev);
- if (qman_ip_rev == ip_rev)
- break;
- }
-}
-
-#ifndef __rtems__
-/* Parse a portal node, perform generic mapping duties and return the config. It
- * is not known at this stage for what purpose (or even if) the portal will be
- * used. */
-static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
-{
- struct qm_portal_config *pcfg;
- const u32 *channel;
- int irq, ret;
- struct resource res;
-
- pcfg = kzalloc(sizeof(*pcfg), GFP_KERNEL);
- if (!pcfg)
- return NULL;
-
- /*
- * This is a *horrible hack*, but the IOMMU/PAMU driver needs a
- * 'struct device' in order to get the PAMU stashing setup and the QMan
- * portal [driver] won't function at all without ring stashing
- *
- * Making the QMan portal driver nice and proper is part of the
- * upstreaming effort
- */
- pcfg->dev.bus = &platform_bus_type;
- pcfg->dev.of_node = node;
-#ifdef CONFIG_IOMMU_API
- pcfg->dev.archdata.iommu_domain = NULL;
-#endif
-
- ret = of_address_to_resource(node, DPA_PORTAL_CE,
- &pcfg->addr_phys[DPA_PORTAL_CE]);
- if (ret) {
- pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
- goto err;
- }
- ret = of_address_to_resource(node, DPA_PORTAL_CI,
- &pcfg->addr_phys[DPA_PORTAL_CI]);
- if (ret) {
- pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
- goto err;
- }
-
- channel = of_get_property(node, "fsl,qman-channel-id", &ret);
- if (!channel || (ret != 4)) {
- pr_err("Can't get %s property 'fsl,qman-channel-id'\n",
- node->full_name);
- goto err;
- }
- pcfg->public_cfg.channel = *channel;
- pcfg->public_cfg.cpu = -1;
- irq = irq_of_parse_and_map(node, 0);
- if (irq == NO_IRQ) {
- pr_err("Can't get %s property 'interrupts'\n", node->full_name);
- goto err;
- }
- pcfg->public_cfg.irq = irq;
-#ifdef CONFIG_FSL_QMAN_CONFIG
- /* We need the same LIODN offset for all portals */
- qman_liodn_fixup(pcfg->public_cfg.channel);
-#endif
-
- pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
- pcfg->addr_phys[DPA_PORTAL_CE].start,
- resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]),
- 0);
- pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
- pcfg->addr_phys[DPA_PORTAL_CI].start,
- resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
- _PAGE_GUARDED | _PAGE_NO_CACHE);
-
- return pcfg;
-err:
- kfree(pcfg);
- return NULL;
-}
-
-static struct qm_portal_config *get_pcfg(struct list_head *list)
-{
- struct qm_portal_config *pcfg;
-
- if (list_empty(list))
- return NULL;
- pcfg = list_entry(list->prev, struct qm_portal_config, list);
- list_del(&pcfg->list);
- return pcfg;
-}
+static struct cpumask portal_cpus;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
#endif /* __rtems__ */
static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
{
#ifdef CONFIG_FSL_PAMU
- int ret;
+ struct device *dev = pcfg->dev;
int window_count = 1;
struct iommu_domain_geometry geom_attr;
struct pamu_stash_attribute stash_attr;
+ int ret;
pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
if (!pcfg->iommu_domain) {
- pr_err("%s(): iommu_domain_alloc() failed", __func__);
- goto _no_iommu;
+ dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+ goto no_iommu;
}
geom_attr.aperture_start = 0;
geom_attr.aperture_end =
@@ -341,14 +77,16 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
&geom_attr);
if (ret < 0) {
- pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
- goto _iommu_domain_free;
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
}
ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
&window_count);
if (ret < 0) {
- pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
- goto _iommu_domain_free;
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
}
stash_attr.cpu = cpu;
stash_attr.cache = PAMU_ATTR_CACHE_L1;
@@ -356,45 +94,42 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
DOMAIN_ATTR_FSL_PAMU_STASH,
&stash_attr);
if (ret < 0) {
- pr_err("%s(): iommu_domain_set_attr() = %d",
- __func__, ret);
- goto _iommu_domain_free;
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto out_domain_free;
}
ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
IOMMU_READ | IOMMU_WRITE);
if (ret < 0) {
- pr_err("%s(): iommu_domain_window_enable() = %d",
- __func__, ret);
- goto _iommu_domain_free;
+ dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+ __func__, ret);
+ goto out_domain_free;
}
- ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
+ ret = iommu_attach_device(pcfg->iommu_domain, dev);
if (ret < 0) {
- pr_err("%s(): iommu_device_attach() = %d",
- __func__, ret);
- goto _iommu_domain_free;
+ dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+ ret);
+ goto out_domain_free;
}
ret = iommu_domain_set_attr(pcfg->iommu_domain,
DOMAIN_ATTR_FSL_PAMU_ENABLE,
&window_count);
if (ret < 0) {
- pr_err("%s(): iommu_domain_set_attr() = %d",
- __func__, ret);
- goto _iommu_detach_device;
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_detach_device;
}
-_no_iommu:
-#endif
-#ifdef CONFIG_FSL_QMAN_CONFIG
- if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+no_iommu:
#endif
- pr_warn("Failed to set the stash request queue\n");
+ qman_set_sdest(pcfg->channel, cpu);
return;
#ifdef CONFIG_FSL_PAMU
-_iommu_detach_device:
+out_detach_device:
iommu_detach_device(pcfg->iommu_domain, NULL);
-_iommu_domain_free:
+out_domain_free:
iommu_domain_free(pcfg->iommu_domain);
pcfg->iommu_domain = NULL;
#endif
@@ -403,65 +138,54 @@ _iommu_domain_free:
static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
{
struct qman_portal *p;
+ u32 irq_sources = 0;
+
+ /* We need the same LIODN offset for all portals */
+ qman_liodn_fixup(pcfg->channel);
#ifndef __rtems__
pcfg->iommu_domain = NULL;
#endif /* __rtems__ */
- portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
+ portal_set_cpu(pcfg, pcfg->cpu);
p = qman_create_affine_portal(pcfg, NULL);
- if (p) {
- u32 irq_sources = 0;
- /* Determine what should be interrupt-vs-poll driven */
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ /* Determine what should be interrupt-vs-poll driven */
#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
- irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
- QM_PIRQ_CSCI;
+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+ QM_PIRQ_CSCI;
#endif
#ifdef CONFIG_FSL_DPA_PIRQ_FAST
- irq_sources |= QM_PIRQ_DQRI;
+ irq_sources |= QM_PIRQ_DQRI;
#endif
- qman_p_irqsource_add(p, irq_sources);
- pr_info("Portal %sinitialised, cpu %d\n",
-#ifndef __rtems__
- pcfg->public_cfg.is_shared ? "(shared) " : "",
-#else /* __rtems__ */
- "",
-#endif /* __rtems__ */
- pcfg->public_cfg.cpu);
- } else
- pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu);
- return p;
-}
+ qman_p_irqsource_add(p, irq_sources);
#ifndef __rtems__
-static void init_slave(int cpu)
-{
- struct qman_portal *p;
- struct cpumask oldmask = *tsk_cpus_allowed(current);
-
- set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
- p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
- if (!p)
- pr_err("Slave portal failure on cpu %d\n", cpu);
- else
- pr_info("Portal (slave) initialised, cpu %d\n", cpu);
- set_cpus_allowed_ptr(current, &oldmask);
- if (shared_portals_idx >= num_shared_portals)
- shared_portals_idx = 0;
-}
+ spin_lock(&qman_lock);
+ if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+ /* all assigned portals are initialized now */
+ qman_init_cgr_all();
+ }
-static struct cpumask want_unshared __initdata;
-static struct cpumask want_shared __initdata;
+ if (!qman_dma_portal)
+ qman_dma_portal = p;
-static int __init parse_qportals(char *str)
-{
- return parse_portals_bootarg(str, &want_shared, &want_unshared,
- "qportals");
+ spin_unlock(&qman_lock);
+#endif /* __rtems__ */
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
}
-__setup("qportals=", parse_qportals);
static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
unsigned int cpu)
{
+#ifdef CONFIG_FSL_PAMU /* TODO */
struct pamu_stash_attribute stash_attr;
int ret;
@@ -471,77 +195,196 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
ret = iommu_domain_set_attr(pcfg->iommu_domain,
DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
if (ret < 0) {
- pr_err("Failed to update pamu stash setting\n");
+ dev_err(pcfg->dev,
+ "Failed to update pamu stash setting\n");
return;
}
}
-#ifdef CONFIG_FSL_QMAN_CONFIG
- if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
#endif
- pr_warn("Failed to update portal's stash request queue\n");
+ qman_set_sdest(pcfg->channel, cpu);
}
-static void qman_offline_cpu(unsigned int cpu)
+#ifndef __rtems__
+static int qman_offline_cpu(unsigned int cpu)
{
struct qman_portal *p;
const struct qm_portal_config *pcfg;
- p = (struct qman_portal *)affine_portals[cpu];
+ p = affine_portals[cpu];
if (p) {
pcfg = qman_get_qm_portal_config(p);
if (pcfg) {
- irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
+ irq_set_affinity(pcfg->irq, cpumask_of(0));
qman_portal_update_sdest(pcfg, 0);
}
}
+ return 0;
}
-#endif /* __rtems__ */
-#ifdef CONFIG_HOTPLUG_CPU
-static void qman_online_cpu(unsigned int cpu)
+static int qman_online_cpu(unsigned int cpu)
{
struct qman_portal *p;
const struct qm_portal_config *pcfg;
- p = (struct qman_portal *)affine_portals[cpu];
+ p = affine_portals[cpu];
if (p) {
pcfg = qman_get_qm_portal_config(p);
if (pcfg) {
- irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
qman_portal_update_sdest(pcfg, cpu);
}
}
+ return 0;
}
-static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int qman_portal_probe(struct platform_device *pdev)
{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- qman_online_cpu(cpu);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- qman_offline_cpu(cpu);
- default:
- break;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct qm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ void __iomem *va;
+ int irq, cpu, err;
+ u32 val;
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return -ENOMEM;
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %s property 'reg::CE'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %s property 'reg::CI'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ err = of_property_read_u32(node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "Can't get %s property 'cell-index'\n",
+ node->full_name);
+ return err;
+ }
+ pcfg->channel = val;
+ pcfg->cpu = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %s IRQ\n", node->full_name);
+ return -ENXIO;
+ }
+ pcfg->irq = irq;
+
+ va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+ if (!va) {
+ dev_err(dev, "ioremap::CE failed\n");
+ goto err_ioremap1;
+ }
+
+ pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+ va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+ _PAGE_GUARDED | _PAGE_NO_CACHE);
+ if (!va) {
+ dev_err(dev, "ioremap::CI failed\n");
+ goto err_ioremap2;
+ }
+
+ pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+ pcfg->pools = qm_get_pools_sdqcr();
+
+ spin_lock(&qman_lock);
+ cpu = cpumask_next_zero(-1, &portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ /* unassigned portal, skip init */
+ spin_unlock(&qman_lock);
+ return 0;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&qman_lock);
+ pcfg->cpu = cpu;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+ dev_err(dev, "dma_set_mask() failed\n");
+ goto err_portal_init;
+ }
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
}
- return NOTIFY_OK;
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ qman_offline_cpu(cpu);
+
+ return 0;
+
+err_portal_init:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
+err_ioremap2:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+ return -ENXIO;
}
-static struct notifier_block qman_hotplug_cpu_notifier = {
- .notifier_call = qman_hotplug_cpu_callback,
+static const struct of_device_id qman_portal_ids[] = {
+ {
+ .compatible = "fsl,qman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = qman_portal_ids,
+ },
+ .probe = qman_portal_probe,
};
-#endif /* CONFIG_HOTPLUG_CPU */
-#ifdef __rtems__
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qman_portal:online",
+ qman_online_cpu, qman_offline_cpu);
+ if (ret < 0) {
+ pr_err("qman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
+ return 0;
+}
+
+module_driver(qman_portal_driver,
+ qman_portal_driver_register, platform_driver_unregister);
+#else /* __rtems__ */
#include <bsp/fdt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
static struct qm_portal_config qman_configs[NR_CPUS];
-static void
-qman_init_portals(void)
+
+void
+qman_sysinit_portals(void)
{
const char *fdt = bsp_fdt_get();
struct device_node dn;
@@ -575,7 +418,7 @@ qman_init_portals(void)
struct qm_portal_config *pcfg = &qman_configs[cpu];
struct qman_portal *portal;
struct resource res;
- const u32 *channel;
+ u32 val;
if (node < 0)
panic("qman: missing portal in FDT");
@@ -600,197 +443,29 @@ qman_init_portals(void)
BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
(uintptr_t)&qoriq_qman_portal[2][0]);
- pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL);
- if (pcfg->public_cfg.irq == NO_IRQ)
- panic("qman: no portal interrupt");
+ ret = of_property_read_u32(&dn, "cell-index", &val);
+ if (ret != 0)
+ panic("qman: no cell-index");
+ pcfg->channel = val;
- channel = of_get_property(&dn, "fsl,qman-channel-id", &ret);
- if (channel == NULL || ret != 4)
- panic("qman: no portal channel ID");
- pcfg->public_cfg.channel = *channel;
+ pcfg->irq = of_irq_to_resource(&dn, 0, NULL);
+ if (pcfg->irq == NO_IRQ)
+ panic("qman: no portal interrupt");
- pcfg->public_cfg.cpu = cpu;
- pcfg->public_cfg.pools = pools_sdqcr;
+ pcfg->cpu = cpu;
+ pcfg->pools = qm_get_pools_sdqcr();
portal = init_pcfg(pcfg);
if (portal == NULL)
panic("qman: cannot create portal");
+ qman_portal_update_sdest(pcfg, cpu);
+
node = fdt_next_subnode(fdt, node);
dn.offset = node;
}
-}
-#endif /* __rtems__ */
-#ifndef __rtems__
-__init int qman_init(void)
-{
- struct cpumask slave_cpus;
- struct cpumask unshared_cpus = *cpu_none_mask;
- struct cpumask shared_cpus = *cpu_none_mask;
- LIST_HEAD(unshared_pcfgs);
- LIST_HEAD(shared_pcfgs);
- struct device_node *dn;
- struct qm_portal_config *pcfg;
- struct qman_portal *p;
- int cpu, ret;
- struct cpumask offline_cpus;
-
- /* Initialise the QMan (CCSR) device */
- for_each_compatible_node(dn, NULL, "fsl,qman") {
- if (!qman_init_ccsr(dn))
- pr_info("Err interrupt handler present\n");
- else
- pr_err("CCSR setup failed\n");
- }
-#else /* __rtems__ */
-int
-qman_init(struct device_node *dn)
-{
- struct device_node of_dns;
- int ret;
-#endif /* __rtems__ */
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- /* Setup lookup table for FQ demux */
- ret = qman_setup_fq_lookup_table(qman_fqd_size()/64);
- if (ret)
- return ret;
-#endif
- /* Get qman ip revision */
- qman_get_ip_revision(dn);
- if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
- qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
- qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
- qm_channel_pme = QMAN_CHANNEL_PME_REV3;
- }
-
- /* Parse pool channels into the SDQCR mask. (Must happen before portals
- * are initialised.) */
- for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
- ret = fsl_pool_channel_range_sdqcr(dn);
- if (ret)
- return ret;
- }
-
-#ifndef __rtems__
- memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
- /* Initialise portals. See bman_driver.c for comments */
- for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
- if (!of_device_is_available(dn))
- continue;
- pcfg = parse_pcfg(dn);
- if (pcfg) {
- pcfg->public_cfg.pools = pools_sdqcr;
- list_add_tail(&pcfg->list, &unused_pcfgs);
- }
- }
- for_each_possible_cpu(cpu) {
- if (cpumask_test_cpu(cpu, &want_shared)) {
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &shared_pcfgs);
- cpumask_set_cpu(cpu, &shared_cpus);
- }
- if (cpumask_test_cpu(cpu, &want_unshared)) {
- if (cpumask_test_cpu(cpu, &shared_cpus))
- continue;
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &unshared_pcfgs);
- cpumask_set_cpu(cpu, &unshared_cpus);
- }
- }
- if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
- for_each_possible_cpu(cpu) {
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &unshared_pcfgs);
- cpumask_set_cpu(cpu, &unshared_cpus);
- }
- }
- cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
- cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
- if (cpumask_empty(&slave_cpus)) {
- if (!list_empty(&shared_pcfgs)) {
- cpumask_or(&unshared_cpus, &unshared_cpus,
- &shared_cpus);
- cpumask_clear(&shared_cpus);
- list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
- INIT_LIST_HEAD(&shared_pcfgs);
- }
- } else {
- if (list_empty(&shared_pcfgs)) {
- pcfg = get_pcfg(&unshared_pcfgs);
- if (!pcfg) {
- pr_crit("No portals available!\n");
- return 0;
- }
- cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
- cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
- list_add_tail(&pcfg->list, &shared_pcfgs);
- }
- }
- list_for_each_entry(pcfg, &unshared_pcfgs, list) {
- pcfg->public_cfg.is_shared = 0;
- p = init_pcfg(pcfg);
- }
- list_for_each_entry(pcfg, &shared_pcfgs, list) {
- pcfg->public_cfg.is_shared = 1;
- p = init_pcfg(pcfg);
- if (p)
- shared_portals[num_shared_portals++] = p;
- }
- if (!cpumask_empty(&slave_cpus))
- for_each_cpu(cpu, &slave_cpus)
- init_slave(cpu);
-#else /* __rtems__ */
- qman_init_portals();
-#endif /* __rtems__ */
- pr_info("Portals initialised\n");
-#ifndef __rtems__
- cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
- for_each_cpu(cpu, &offline_cpus)
- qman_offline_cpu(cpu);
-#endif /* __rtems__ */
-#ifdef CONFIG_HOTPLUG_CPU
- register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
-#endif
- return 0;
+ /* all assigned portals are initialized now */
+ qman_init_cgr_all();
}
-
-__init int qman_resource_init(void)
-{
-#ifdef __rtems__
- struct device_node of_dns;
#endif /* __rtems__ */
- struct device_node *dn;
- int ret;
-
- /* Initialise FQID allocation ranges */
- for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
- ret = fsl_fqid_range_init(dn);
- if (ret)
- return ret;
- }
- /* Initialise CGRID allocation ranges */
- for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
- ret = fsl_cgrid_range_init(dn);
- if (ret)
- return ret;
- }
- /* Parse pool channels into the allocator. (Must happen after portals
- * are initialised.) */
- for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
- ret = fsl_pool_channel_range_init(dn);
- if (ret)
- return ret;
- }
-
- return 0;
-}