summaryrefslogtreecommitdiffstats
path: root/linux
diff options
context:
space:
mode:
Diffstat (limited to 'linux')
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2987
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h440
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c1491
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h113
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c710
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h12
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman.c1293
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman.h169
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c523
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_mac.h6
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_memac.c402
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_muram.h44
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_port.c338
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_port.h89
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_sp.c2
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_tgec.c116
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/mac.c241
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/mac.h26
-rw-r--r--linux/drivers/soc/fsl/qbman/bman.c1191
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_api.c1123
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_ccsr.c392
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_portal.c399
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_priv.h100
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test.c60
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test.h5
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test_api.c91
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test_thresh.c216
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_utils.c76
-rw-r--r--linux/drivers/soc/fsl/qbman/dpaa_resource.c363
-rw-r--r--linux/drivers/soc/fsl/qbman/dpaa_sys.h285
-rw-r--r--linux/drivers/soc/fsl/qbman/qman.c3465
-rw-r--r--linux/drivers/soc/fsl/qbman/qman.h1133
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_api.c3026
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_ccsr.c883
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_driver.c87
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_portal.c797
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_priv.h360
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test.c61
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test.h18
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test_api.c201
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test_stash.c349
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_utils.c309
-rw-r--r--linux/include/soc/fsl/bman.h469
-rw-r--r--linux/include/soc/fsl/qman.h1785
44 files changed, 9927 insertions, 16319 deletions
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 73173b89..f8ed4516 100644
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2,7 +2,7 @@
#include <rtems/bsd/local/opt_dpaa.h>
-/* Copyright 2008 - 2015 Freescale Semiconductor Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -36,9 +36,9 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/kthread.h>
#include <linux/io.h>
#ifndef __rtems__
#include <linux/if_arp.h>
@@ -49,32 +49,50 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
+#include <linux/sort.h>
#endif /* __rtems__ */
#include <soc/fsl/bman.h>
+#include <soc/fsl/qman.h>
#include "fman.h"
#include "fman_port.h"
-
#include "mac.h"
#include "dpaa_eth.h"
-#include "dpaa_eth_common.h"
-/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
* using trace events only need to #include <trace/events/sched.h>
*/
#define CREATE_TRACE_POINTS
#include "dpaa_eth_trace.h"
-#define DPA_NAPI_WEIGHT 64
+static int debug = -1;
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
+
+static u16 tx_timeout = 1000;
+module_param(tx_timeout, ushort, 0444);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+
+#define FM_FD_STAT_RX_ERRORS \
+ (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
+ FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
+ FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
+ FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
+ FM_FD_ERR_PRS_HDR_ERR)
-/* Valid checksum indication */
-#define DPA_CSUM_VALID 0xFFFF
+#define FM_FD_STAT_TX_ERRORS \
+ (FM_FD_ERR_UNSUPPORTED_FORMAT | \
+ FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
-#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
+#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFUP | \
+ NETIF_MSG_IFDOWN)
#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
/* Ingress congestion threshold on FMan ports
@@ -83,62 +101,1999 @@
* by FMan.
*/
+/* Size in bytes of the FQ taildrop threshold */
+#define DPAA_FQ_TD 0x200000
+
+#define DPAA_CS_THRESHOLD_1G 0x06000000
+/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
+ * The size in bytes of the egress Congestion State notification threshold on
+ * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
+ * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
+ * and the larger the frame size, the more acute the problem.
+ * So we have to find a balance between these factors:
+ * - avoiding the device staying congested for a prolonged time (risking
+ * the netdev watchdog to fire - see also the tx_timeout module param);
+ * - affecting performance of protocols such as TCP, which otherwise
+ * behave well under the congestion notification mechanism;
+ * - preventing the Tx cores from tightly-looping (as if the congestion
+ * threshold was too low to be effective);
+ * - running out of memory if the CS threshold is set too high.
+ */
+
+#define DPAA_CS_THRESHOLD_10G 0x10000000
+/* The size in bytes of the egress Congestion State notification threshold on
+ * 10G ports, range 0x1000 .. 0x10000000
+ */
+
+/* Largest value that the FQD's OAL field can hold */
+#define FSL_QMAN_MAX_OAL 127
+
+/* Default alignment for start of data in an Rx FD */
+#define DPAA_FD_DATA_ALIGNMENT 16
+
+/* Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define FM_L3_PARSE_RESULT_IPV4 0x8000
+/* L3 Type field: First IP Present IPv6 */
+#define FM_L3_PARSE_RESULT_IPV6 0x4000
+/* Values for the L4R field of the FM Parse Results */
+/* L4 Type field: UDP */
+#define FM_L4_PARSE_RESULT_UDP 0x40
+/* L4 Type field: TCP */
+#define FM_L4_PARSE_RESULT_TCP 0x20
+
+#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
+
+#define FSL_DPAA_BPID_INV 0xff
+#define FSL_DPAA_ETH_MAX_BUF_COUNT 128
+#define FSL_DPAA_ETH_REFILL_THRESHOLD 80
+
+#define DPAA_TX_PRIV_DATA_SIZE 16
+#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
+#define DPAA_TIME_STAMP_SIZE 8
+#define DPAA_HASH_RESULTS_SIZE 8
+#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
+ dpaa_rx_extra_headroom)
+
+#define DPAA_ETH_RX_QUEUES 128
+
+#define DPAA_ENQUEUE_RETRIES 100000
+
+enum port_type {RX, TX};
+
+struct fm_port_fqs {
+ struct dpaa_fq *tx_defq;
+ struct dpaa_fq *tx_errq;
+ struct dpaa_fq *rx_defq;
+ struct dpaa_fq *rx_errq;
+};
+
+/* All the dpa bps in use at any moment */
+static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
+
+/* The raw buffer size must be cacheline aligned */
#ifndef __rtems__
-static u8 debug = -1;
-module_param(debug, byte, S_IRUGO);
-MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+#define DPAA_BP_RAW_SIZE 4096
+#else /* __rtems__ */
+/*
+ * FIXME: Support multiple buffer pools.
+ */
+#define DPAA_BP_RAW_SIZE 2048
-/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
-static u16 tx_timeout = 1000;
-module_param(tx_timeout, ushort, S_IRUGO);
-MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+/*
+ * FIXME: 4 bytes would be enough for the mbuf pointer. However, jumbo receive
+ * frames overwrite this area if < 64 bytes.
+ */
+#define DPAA_OUT_OF_BAND_SIZE 64
+
+#define DPAA_MBUF_POINTER_OFFSET (DPAA_BP_RAW_SIZE - DPAA_OUT_OF_BAND_SIZE)
+#endif /* __rtems__ */
+/* When using more than one buffer pool, the raw sizes are as follows:
+ * 1 bp: 4KB
+ * 2 bp: 2KB, 4KB
+ * 3 bp: 1KB, 2KB, 4KB
+ * 4 bp: 1KB, 2KB, 4KB, 8KB
+ */
+static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
+{
+ size_t res = DPAA_BP_RAW_SIZE / 4;
+ u8 i;
+
+ for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
+ res *= 2;
+ return res;
+}
+
+/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
+ * half-page-aligned buffers, so we reserve some more space for start-of-buffer
+ * alignment.
+ */
+#ifndef __rtems__
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
+#else /* __rtems__ */
+#define dpaa_bp_size(raw_size) DPAA_MBUF_POINTER_OFFSET
#endif /* __rtems__ */
-/* BM */
+#ifndef __rtems__
+static int dpaa_max_frm;
+#endif /* __rtems__ */
-#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
+#ifndef __rtems__
+static int dpaa_rx_extra_headroom;
+#else /* __rtems__ */
+#define dpaa_rx_extra_headroom fman_get_rx_extra_headroom()
+#endif /* __rtems__ */
-static u8 dpa_priv_common_bpid;
+#define dpaa_get_max_mtu() \
+ (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
-static void _dpa_rx_error(struct net_device *net_dev,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_fd *fd,
- u32 fqid)
+#ifndef __rtems__
+static int dpaa_netdev_init(struct net_device *net_dev,
+ const struct net_device_ops *dpaa_ops,
+ u16 tx_timeout)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa_percpu_priv *percpu_priv;
+ const u8 *mac_addr;
+ int i, err;
+
+ /* Although we access another CPU's private data here
+ * we do it at initialization so it is safe
+ */
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ percpu_priv->net_dev = net_dev;
+ }
+
+ net_dev->netdev_ops = dpaa_ops;
+ mac_addr = priv->mac_dev->addr;
+
+ net_dev->mem_start = priv->mac_dev->res->start;
+ net_dev->mem_end = priv->mac_dev->res->end;
+
+ net_dev->min_mtu = ETH_MIN_MTU;
+ net_dev->max_mtu = dpaa_get_max_mtu();
+
+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_LLTX);
+
+ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
+ /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
+ * For conformity, we'll still declare GSO explicitly.
+ */
+ net_dev->features |= NETIF_F_GSO;
+
+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ /* we do not want shared skbs on TX */
+ net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+
+ net_dev->features |= net_dev->hw_features;
+ net_dev->vlan_features = net_dev->features;
+
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+
+ net_dev->ethtool_ops = &dpaa_ethtool_ops;
+
+ net_dev->needed_headroom = priv->tx_headroom;
+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+
+ /* start without the RUNNING flag, phylib controls it later */
+ netif_carrier_off(net_dev);
+
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+#endif /* __rtems__ */
+
+static int dpaa_stop(struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+ int i, err, error;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+#ifndef __rtems__
+ netif_tx_stop_all_queues(net_dev);
+#endif /* __rtems__ */
+ /* Allow the Fman (Tx) port to process in-flight frames before we
+ * try switching it off.
+ */
+ usleep_range(5000, 10000);
+
+ err = mac_dev->stop(mac_dev);
+ if (err < 0)
+ netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
+ err);
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ error = fman_port_disable(mac_dev->port[i]);
+ if (error)
+ err = error;
+ }
+
+#ifndef __rtems__
+ if (net_dev->phydev)
+ phy_disconnect(net_dev->phydev);
+ net_dev->phydev = NULL;
+#endif /* __rtems__ */
+
+ return err;
+}
+
+#ifndef __rtems__
+static void dpaa_tx_timeout(struct net_device *net_dev)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
+ jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
+
+ percpu_priv->stats.tx_errors++;
+}
+
+/* Calculates the statistics for the given device by adding the statistics
+ * collected by each CPU.
+ */
+static void dpaa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *s)
+{
+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ u64 *netstats = (u64 *)s;
+ u64 *cpustats;
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ cpustats = (u64 *)&percpu_priv->stats;
+
+ /* add stats from all CPUs */
+ for (j = 0; j < numstats; j++)
+ netstats[j] += cpustats[j];
+ }
+}
+
+static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
+{
+ struct platform_device *of_dev;
+ struct dpaa_eth_data *eth_data;
+ struct device *dpaa_dev, *dev;
+ struct device_node *mac_node;
+ struct mac_device *mac_dev;
+
+ dpaa_dev = &pdev->dev;
+ eth_data = dpaa_dev->platform_data;
+ if (!eth_data)
+ return ERR_PTR(-ENODEV);
+
+ mac_node = eth_data->mac_node;
+
+ of_dev = of_find_device_by_node(mac_node);
+ if (!of_dev) {
+ dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n",
+ mac_node->full_name);
+ of_node_put(mac_node);
+ return ERR_PTR(-EINVAL);
+ }
+ of_node_put(mac_node);
+
+ dev = &of_dev->dev;
+
+ mac_dev = dev_get_drvdata(dev);
+ if (!mac_dev) {
+ dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n",
+ dev_name(dev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ return mac_dev;
+}
+
+static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ const struct dpaa_priv *priv;
+ struct mac_device *mac_dev;
+ struct sockaddr old_addr;
+ int err;
+
+ priv = netdev_priv(net_dev);
+
+ memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
+
+ err = eth_mac_addr(net_dev, addr);
+ if (err < 0) {
+ netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
+ return err;
+ }
+
+ mac_dev = priv->mac_dev;
+
+ err = mac_dev->change_addr(mac_dev->fman_mac,
+ (enet_addr_t *)net_dev->dev_addr);
+ if (err < 0) {
+ netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
+ err);
+ /* reverting to previous address */
+ eth_mac_addr(net_dev, &old_addr);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static void dpaa_set_rx_mode(struct net_device *net_dev)
+{
+ const struct dpaa_priv *priv;
+ int err;
+
+ priv = netdev_priv(net_dev);
+
+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
+ priv->mac_dev->promisc = !priv->mac_dev->promisc;
+ err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
+ priv->mac_dev->promisc);
+ if (err < 0)
+ netif_err(priv, drv, net_dev,
+ "mac_dev->set_promisc() = %d\n",
+ err);
+ }
+
+ err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
+ if (err < 0)
+ netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
+ err);
+}
+#endif /* __rtems__ */
+
+static struct dpaa_bp *dpaa_bpid2pool(int bpid)
+{
+ if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
+ return NULL;
+
+ return dpaa_bp_array[bpid];
+}
+
+/* checks if this bpool is already allocated */
+static bool dpaa_bpid2pool_use(int bpid)
+{
+ if (dpaa_bpid2pool(bpid)) {
+ atomic_inc(&dpaa_bp_array[bpid]->refs);
+ return true;
+ }
+
+ return false;
+}
+
+/* called only once per bpid by dpaa_bp_alloc_pool() */
+static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
+{
+ dpaa_bp_array[bpid] = dpaa_bp;
+ atomic_set(&dpaa_bp->refs, 1);
+}
+
+static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
+{
+ int err;
+
+ if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
+ pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* If the pool is already specified, we only create one per bpid */
+ if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
+ dpaa_bpid2pool_use(dpaa_bp->bpid))
+ return 0;
+
+ if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
+ dpaa_bp->pool = bman_new_pool();
+ if (!dpaa_bp->pool) {
+ pr_err("%s: bman_new_pool() failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
+ }
+
+ if (dpaa_bp->seed_cb) {
+ err = dpaa_bp->seed_cb(dpaa_bp);
+ if (err)
+ goto pool_seed_failed;
+ }
+
+ dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
+
+ return 0;
+
+pool_seed_failed:
+ pr_err("%s: pool seeding failed\n", __func__);
+ bman_free_pool(dpaa_bp->pool);
+
+ return err;
+}
+
+/* remove and free all the buffers from the given buffer pool */
+static void dpaa_bp_drain(struct dpaa_bp *bp)
+{
+ u8 num = 8;
+ int ret;
+
+ do {
+ struct bm_buffer bmb[8];
+ int i;
+
+ ret = bman_acquire(bp->pool, bmb, num);
+ if (ret < 0) {
+ if (num == 8) {
+ /* we have less than 8 buffers left;
+ * drain them one by one
+ */
+ num = 1;
+ ret = 1;
+ continue;
+ } else {
+ /* Pool is fully drained */
+ break;
+ }
+ }
+
+ if (bp->free_buf_cb)
+ for (i = 0; i < num; i++)
+ bp->free_buf_cb(bp, &bmb[i]);
+ } while (ret > 0);
+}
+
+static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
+{
+ struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
+
+ /* the mapping between bpid and dpaa_bp is done very late in the
+ * allocation procedure; if something failed before the mapping, the bp
+ * was not configured, therefore we don't need the below instructions
+ */
+ if (!bp)
+ return;
+
+ if (!atomic_dec_and_test(&bp->refs))
+ return;
+
+ if (bp->free_buf_cb)
+ dpaa_bp_drain(bp);
+
+ dpaa_bp_array[bp->bpid] = NULL;
+ bman_free_pool(bp->pool);
+}
+
+static void dpaa_bps_free(struct dpaa_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < DPAA_BPS_NUM; i++)
+ dpaa_bp_free(priv->dpaa_bps[i]);
+}
+
+/* Use multiple WQs for FQ assignment:
+ * - Tx Confirmation queues go to WQ1.
+ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
+ * to be scheduled, in case there are many more FQs in WQ3).
+ * - Rx Default and Tx queues go to WQ3 (no differentiation between
+ * Rx and Tx traffic).
+ * This ensures that Tx-confirmed buffers are timely released. In particular,
+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
+ * are greatly outnumbered by other FQs in the system, while
+ * dequeue scheduling is round-robin.
+ */
+static inline void dpaa_assign_wq(struct dpaa_fq *fq)
+{
+ switch (fq->fq_type) {
+ case FQ_TYPE_TX_CONFIRM:
+ case FQ_TYPE_TX_CONF_MQ:
+ fq->wq = 1;
+ break;
+ case FQ_TYPE_RX_ERROR:
+ case FQ_TYPE_TX_ERROR:
+ fq->wq = 2;
+ break;
+ case FQ_TYPE_RX_DEFAULT:
+ case FQ_TYPE_TX:
+ fq->wq = 3;
+ break;
+ default:
+ WARN(1, "Invalid FQ type %d for FQID %d!\n",
+ fq->fq_type, fq->fqid);
+ }
+}
+
+static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
+ u32 start, u32 count,
+ struct list_head *list,
+ enum dpaa_fq_type fq_type)
+{
+ struct dpaa_fq *dpaa_fq;
+ int i;
+
+ dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count,
+ GFP_KERNEL);
+ if (!dpaa_fq)
+ return NULL;
+
+ for (i = 0; i < count; i++) {
+ dpaa_fq[i].fq_type = fq_type;
+ dpaa_fq[i].fqid = start ? start + i : 0;
+ list_add_tail(&dpaa_fq[i].list, list);
+ }
+
+ for (i = 0; i < count; i++)
+ dpaa_assign_wq(dpaa_fq + i);
+
+ return dpaa_fq;
+}
+
+static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs)
+{
+ struct dpaa_fq *dpaa_fq;
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->rx_errq = &dpaa_fq[0];
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->rx_defq = &dpaa_fq[0];
+
+ if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
+ goto fq_alloc_failed;
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->tx_errq = &dpaa_fq[0];
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->tx_defq = &dpaa_fq[0];
+
+ if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
+ goto fq_alloc_failed;
+
+ return 0;
+
+fq_alloc_failed:
+ dev_err(dev, "dpaa_fq_alloc() failed\n");
+ return -ENOMEM;
+}
+
+static u32 rx_pool_channel;
+static DEFINE_SPINLOCK(rx_pool_channel_init);
+
+static int dpaa_get_channel(void)
+{
+ spin_lock(&rx_pool_channel_init);
+ if (!rx_pool_channel) {
+ u32 pool;
+ int ret;
+
+ ret = qman_alloc_pool(&pool);
+
+ if (!ret)
+ rx_pool_channel = pool;
+ }
+ spin_unlock(&rx_pool_channel_init);
+ if (!rx_pool_channel)
+ return -ENOMEM;
+ return rx_pool_channel;
+}
+
+#ifndef __rtems__
+static void dpaa_release_channel(void)
+{
+ qman_release_pool(rx_pool_channel);
+}
+#endif /* __rtems__ */
+
+static void dpaa_eth_add_channel(u16 channel)
+{
+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
+#ifndef __rtems__
+ const cpumask_t *cpus = qman_affine_cpus();
+#endif /* __rtems__ */
+ struct qman_portal *portal;
+ int cpu;
+
+ for_each_cpu(cpu, cpus) {
+ portal = qman_get_affine_portal(cpu);
+ qman_p_static_dequeue_add(portal, pool);
+ }
+}
+
+/* Congestion group state change notification callback.
+ * Stops the device's egress queues while they are congested and
+ * wakes them upon exiting congested state.
+ * Also updates some CGR-related stats.
+ */
+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
+ int congested)
+{
+ struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
+ struct dpaa_priv, cgr_data.cgr);
+
+ if (congested) {
+ priv->cgr_data.congestion_start_jiffies = jiffies;
+#ifndef __rtems__
+ netif_tx_stop_all_queues(priv->net_dev);
+#endif /* __rtems__ */
+ priv->cgr_data.cgr_congested_count++;
+ } else {
+ priv->cgr_data.congested_jiffies +=
+ (jiffies - priv->cgr_data.congestion_start_jiffies);
+#ifndef __rtems__
+ netif_tx_wake_all_queues(priv->net_dev);
+#endif /* __rtems__ */
+ }
+}
+
+static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
{
- /* limit common, possibly innocuous Rx FIFO Overflow errors'
- * interference with zero-loss convergence benchmark results.
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("%s: Error %d allocating CGR ID\n",
+ __func__, err);
+ goto out_error;
+ }
+ priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
+
+ /* Enable Congestion State Change Notifications and CS taildrop */
+ memset(&initcgr, 0, sizeof(initcgr));
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+
+ /* Set different thresholds based on the MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at a speed
+ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
+ * In such cases, we ought to reconfigure the threshold, too.
*/
- if (likely(fd->status & FM_FD_ERR_PHYSICAL))
- pr_warn_once("non-zero error counters in fman statistics (sysfs)\n");
+ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+ cs_th = DPAA_CS_THRESHOLD_10G;
else
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("%s: Error %d creating CGR with ID %d\n",
+ __func__, err, priv->cgr_data.cgr.cgrid);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ goto out_error;
+ }
+ if (netif_msg_drv(priv))
+ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
+ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
+ priv->cgr_data.cgr.chan);
+
+out_error:
+ return err;
+}
+
+static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
+ struct dpaa_fq *fq,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ fq->channel = priv->channel;
+}
+
+static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
+ struct dpaa_fq *fq,
+ struct fman_port *port,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ if (port) {
+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ fq->channel = (u16)fman_port_get_qman_channel_id(port);
+ } else {
+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
+ }
+}
+
+static void dpaa_fq_setup(struct dpaa_priv *priv,
+ const struct dpaa_fq_cbs *fq_cbs,
+ struct fman_port *tx_port)
+{
#ifndef __rtems__
- if (net_ratelimit())
- netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_RX_ERRORS);
+ int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu;
+ const cpumask_t *affine_cpus = qman_affine_cpus();
+ u16 portals[NR_CPUS];
+#else /* __rtems__ */
+ int egress_cnt = 0, conf_cnt = 0;
+#endif /* __rtems__ */
+ struct dpaa_fq *fq;
+
+#ifndef __rtems__
+ for_each_cpu(cpu, affine_cpus)
+ portals[num_portals++] = qman_affine_channel(cpu);
+ if (num_portals == 0)
+ dev_err(priv->net_dev->dev.parent,
+ "No Qman software (affine) channels found");
+#endif /* __rtems__ */
+
+ /* Initialize each FQ in the list */
+ list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ break;
+ case FQ_TYPE_RX_ERROR:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
+ break;
+ case FQ_TYPE_TX:
+ dpaa_setup_egress(priv, fq, tx_port,
+ &fq_cbs->egress_ern);
+ /* If we have more Tx queues than the number of cores,
+ * just ignore the extra ones.
+ */
+ if (egress_cnt < DPAA_ETH_TXQ_NUM)
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ priv->conf_fqs[conf_cnt++] = &fq->fq_base;
+ /* fall through */
+ case FQ_TYPE_TX_CONFIRM:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+ break;
+ case FQ_TYPE_TX_ERROR:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
+ break;
+ default:
+#ifndef __rtems__
+ dev_warn(priv->net_dev->dev.parent,
+ "Unknown FQ type detected!\n");
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+ break;
+ }
+ }
+
+ /* Make sure all CPUs receive a corresponding Tx queue. */
+ while (egress_cnt < DPAA_ETH_TXQ_NUM) {
+ list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
+ if (fq->fq_type != FQ_TYPE_TX)
+ continue;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ if (egress_cnt == DPAA_ETH_TXQ_NUM)
+ break;
+ }
+ }
+}
+
+static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
+ struct qman_fq *tx_fq)
+{
+ int i;
+
+ for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
+ if (priv->egress_fqs[i] == tx_fq)
+ return i;
+
+ return -EINVAL;
+}
+
+static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
+{
+ const struct dpaa_priv *priv;
+ struct qman_fq *confq = NULL;
+ struct qm_mcc_initfq initfq;
+#ifndef __rtems__
+ struct device *dev;
+#endif /* __rtems__ */
+ struct qman_fq *fq;
+ int queue_id;
+ int err;
+
+ priv = netdev_priv(dpaa_fq->net_dev);
+#ifndef __rtems__
+ dev = dpaa_fq->net_dev->dev.parent;
+#endif /* __rtems__ */
+
+ if (dpaa_fq->fqid == 0)
+ dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
+
+ err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
+ if (err) {
+#ifndef __rtems__
+ dev_err(dev, "qman_create_fq() failed\n");
#else /* __rtems__ */
BSD_ASSERT(0);
#endif /* __rtems__ */
+ return err;
+ }
+ fq = &dpaa_fq->fq_base;
+
+ if (dpaa_fq->init) {
+ memset(&initfq, 0, sizeof(initfq));
+
+ initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
+ /* Note: we may get to keep an empty FQ in cache */
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
+
+ /* Try to reduce the number of portal interrupts for
+ * Tx Confirmation FQs.
+ */
+ if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+
+ /* FQ placement */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
+
+ qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
+
+ /* Put all egress queues in a congestion group of their own.
+ * Sensu stricto, the Tx confirmation queues are Rx FQs,
+ * rather than Tx - but they nonetheless account for the
+ * memory footprint on behalf of egress traffic. We therefore
+ * place them in the netdev's CGR, along with the Tx FQs.
+ */
+ if (dpaa_fq->fq_type == FQ_TYPE_TX ||
+ dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
+ dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
+ initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
+ /* Set a fixed overhead accounting, in an attempt to
+ * reduce the impact of fixed-size skb shells and the
+ * driver's needed headroom on system memory. This is
+ * especially the case when the egress traffic is
+ * composed of small datagrams.
+ * Unfortunately, QMan's OAL value is capped to an
+ * insufficient value, but even that is better than
+ * no overhead accounting at all.
+ */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
+ qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
+ qm_fqd_set_oal(&initfq.fqd,
+#ifndef __rtems__
+ min(sizeof(struct sk_buff) +
+#else /* __rtems__ */
+ min(
+#endif /* __rtems__ */
+ priv->tx_headroom,
+ (size_t)FSL_QMAN_MAX_OAL));
+ }
+
+ if (td_enable) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
+ qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
+ }
+
+ if (dpaa_fq->fq_type == FQ_TYPE_TX) {
+ queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
+ if (queue_id >= 0)
+ confq = priv->conf_fqs[queue_id];
+ if (confq) {
+ initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
+ * A2V=1 (contextA A2 field is valid)
+ * A0V=1 (contextA A0 field is valid)
+ * B0V=1 (contextB field is valid)
+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
+ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
+ */
+ qm_fqd_context_a_set64(&initfq.fqd,
+ 0x1e00000080000000ULL);
+ }
+ }
+
+ /* Put all the ingress queues in our "ingress CGR". */
+ if (priv->use_ingress_cgr &&
+ (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+ dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
+ initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
+ /* Set a fixed overhead accounting, just like for the
+ * egress CGR.
+ */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
+ qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
+ qm_fqd_set_oal(&initfq.fqd,
+#ifndef __rtems__
+ min(sizeof(struct sk_buff) +
+#else /* __rtems__ */
+ min(
+#endif /* __rtems__ */
+ priv->tx_headroom,
+ (size_t)FSL_QMAN_MAX_OAL));
+ }
+
+ /* Initialization common to all ingress queues */
+ if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+ initfq.fqd.context_a.stashing.exclusive =
+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_ANNOTATION;
+ qm_fqd_set_stashing(&initfq.fqd, 1, 2,
+ DIV_ROUND_UP(sizeof(struct qman_fq),
+ 64));
+ }
+
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (err < 0) {
+#ifndef __rtems__
+ dev_err(dev, "qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), err);
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+ qman_destroy_fq(fq);
+ return err;
+ }
+ }
+
+ dpaa_fq->fqid = qman_fq_fqid(fq);
+
+ return 0;
+}
+
+#ifndef __rtems__
+static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
+{
+#ifndef __rtems__
+ const struct dpaa_priv *priv;
+#endif /* __rtems__ */
+ struct dpaa_fq *dpaa_fq;
+ int err, error;
+
+ err = 0;
+
+ dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+#ifndef __rtems__
+ priv = netdev_priv(dpaa_fq->net_dev);
+#endif /* __rtems__ */
+
+ if (dpaa_fq->init) {
+ err = qman_retire_fq(fq, NULL);
+ if (err < 0 && netif_msg_drv(priv))
+ dev_err(dev, "qman_retire_fq(%u) = %d\n",
+ qman_fq_fqid(fq), err);
+
+ error = qman_oos_fq(fq);
+ if (error < 0 && netif_msg_drv(priv)) {
+ dev_err(dev, "qman_oos_fq(%u) = %d\n",
+ qman_fq_fqid(fq), error);
+ if (err >= 0)
+ err = error;
+ }
+ }
+
+ qman_destroy_fq(fq);
+ list_del(&dpaa_fq->list);
+
+ return err;
+}
+
+static int dpaa_fq_free(struct device *dev, struct list_head *list)
+{
+ struct dpaa_fq *dpaa_fq, *tmp;
+ int err, error;
+
+ err = 0;
+ list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
+ error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
+ if (error < 0 && err >= 0)
+ err = error;
+ }
+
+ return err;
+}
+#endif /* __rtems__ */
+
+static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
+{
+ struct fman_buffer_prefix_content buf_prefix_content;
+ struct fman_port_params params;
+ int err;
+
+ memset(&params, 0, sizeof(params));
+ memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = true;
+ buf_prefix_content.pass_hash_result = true;
+ buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+
+ params.specific_params.non_rx_params.err_fqid = errq->fqid;
+ params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
+
+ err = fman_port_config(port, &params);
+ if (err)
+ pr_err("%s: fman_port_config failed\n", __func__);
+
+ err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+ if (err)
+ pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
+ __func__);
+
+ err = fman_port_init(port);
+ if (err)
+ pr_err("%s: fm_port_init failed\n", __func__);
+}
+
+static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
+ size_t count, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
+{
+ struct fman_buffer_prefix_content buf_prefix_content;
+ struct fman_port_rx_params *rx_p;
+ struct fman_port_params params;
+ int i, err;
+
+ memset(&params, 0, sizeof(params));
+ memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = true;
+ buf_prefix_content.pass_hash_result = true;
+ buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+
+ rx_p = &params.specific_params.rx_params;
+ rx_p->err_fqid = errq->fqid;
+ rx_p->dflt_fqid = defq->fqid;
+
+ count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
+ rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
+ for (i = 0; i < count; i++) {
+ rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
+ rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
+ }
+
+ err = fman_port_config(port, &params);
+ if (err)
+ pr_err("%s: fman_port_config failed\n", __func__);
+
+ err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+ if (err)
+ pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
+ __func__);
+
+ err = fman_port_init(port);
+ if (err)
+ pr_err("%s: fm_port_init failed\n", __func__);
+}
+
+static void dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpaa_bp **bps, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpaa_buffer_layout *buf_layout,
+ struct device *dev)
+{
+ struct fman_port *rxport = mac_dev->port[RX];
+ struct fman_port *txport = mac_dev->port[TX];
+
+ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+ port_fqs->tx_defq, &buf_layout[TX]);
+ dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+ port_fqs->rx_defq, &buf_layout[RX]);
+}
+
+static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
+ struct bm_buffer *bmb, int cnt)
+{
+ int err;
+
+ err = bman_release(dpaa_bp->pool, bmb, cnt);
+ /* Should never occur, address anyway to avoid leaking the buffers */
+ if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
+ while (cnt-- > 0)
+ dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
+
+ return cnt;
+}
+
+static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
+{
+ struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
+ struct dpaa_bp *dpaa_bp;
+ int i = 0, j;
+
+ memset(bmb, 0, sizeof(bmb));
+
+ do {
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (!dpaa_bp)
+ return;
+
+ j = 0;
+ do {
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
+
+ j++; i++;
+ } while (j < ARRAY_SIZE(bmb) &&
+ !qm_sg_entry_is_final(&sgt[i - 1]) &&
+ sgt[i - 1].bpid == sgt[i].bpid);
+
+ dpaa_bman_release(dpaa_bp, bmb, j);
+ } while (!qm_sg_entry_is_final(&sgt[i - 1]));
+}
+
+static void dpaa_fd_release(const struct net_device *net_dev,
+ const struct qm_fd *fd)
+{
+ struct qm_sg_entry *sgt;
+ struct dpaa_bp *dpaa_bp;
+ struct bm_buffer bmb;
+ dma_addr_t addr;
+ void *vaddr;
+
+ bmb.data = 0;
+ bm_buffer_set64(&bmb, qm_fd_addr(fd));
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ return;
+
+ if (qm_fd_get_format(fd) == qm_fd_sg) {
+ vaddr = phys_to_virt(qm_fd_addr(fd));
+ sgt = vaddr + qm_fd_get_offset(fd);
+
+#ifndef __rtems__
+ dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
+ DMA_FROM_DEVICE);
+#endif /* __rtems__ */
+
+ dpaa_release_sgt_members(sgt);
+
+#ifndef __rtems__
+ addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpaa_bp->dev, addr)) {
+ dev_err(dpaa_bp->dev, "DMA mapping failed");
+ return;
+ }
+#else /* __rtems__ */
+ addr = (dma_addr_t)vaddr;
+#endif /* __rtems__ */
+ bm_buffer_set64(&bmb, addr);
+ }
+
+ dpaa_bman_release(dpaa_bp, &bmb, 1);
+}
+
+static void count_ern(struct dpaa_percpu_priv *percpu_priv,
+ const union qm_mr_entry *msg)
+{
+ switch (msg->ern.rc & QM_MR_RC_MASK) {
+ case QM_MR_RC_CGR_TAILDROP:
+ percpu_priv->ern_cnt.cg_tdrop++;
+ break;
+ case QM_MR_RC_WRED:
+ percpu_priv->ern_cnt.wred++;
+ break;
+ case QM_MR_RC_ERROR:
+ percpu_priv->ern_cnt.err_cond++;
+ break;
+ case QM_MR_RC_ORPWINDOW_EARLY:
+ percpu_priv->ern_cnt.early_window++;
+ break;
+ case QM_MR_RC_ORPWINDOW_LATE:
+ percpu_priv->ern_cnt.late_window++;
+ break;
+ case QM_MR_RC_FQ_TAILDROP:
+ percpu_priv->ern_cnt.fq_tdrop++;
+ break;
+ case QM_MR_RC_ORPWINDOW_RETIRED:
+ percpu_priv->ern_cnt.fq_retired++;
+ break;
+ case QM_MR_RC_ORP_ZERO:
+ percpu_priv->ern_cnt.orp_zero++;
+ break;
+ }
+}
+
+#ifndef __rtems__
+/* Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
+ struct sk_buff *skb,
+ struct qm_fd *fd,
+ char *parse_results)
+{
+ struct fman_prs_result *parse_result;
+ u16 ethertype = ntohs(skb->protocol);
+ struct ipv6hdr *ipv6h = NULL;
+ struct iphdr *iph;
+ int retval = 0;
+ u8 l4_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
+ * L4 alone from the FM configuration anyway.
+ */
+
+ /* Fill in some fields of the Parse Results array, so the FMan
+ * can find them as if they came from the FMan Parser.
+ */
+ parse_result = (struct fman_prs_result *)parse_results;
+
+ /* If we're dealing with VLAN, get the real Ethernet type */
+ if (ethertype == ETH_P_8021Q) {
+ /* We can't always assume the MAC header is set correctly
+ * by the stack, so reset to beginning of skb->data
+ */
+ skb_reset_mac_header(skb);
+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ }
+
+ /* Fill in the relevant L3 parse result fields
+ * and read the L4 protocol type
+ */
+ switch (ethertype) {
+ case ETH_P_IP:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
+ iph = ip_hdr(skb);
+ WARN_ON(!iph);
+ l4_proto = iph->protocol;
+ break;
+ case ETH_P_IPV6:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
+ ipv6h = ipv6_hdr(skb);
+ WARN_ON(!ipv6h);
+ l4_proto = ipv6h->nexthdr;
+ break;
+ default:
+ /* We shouldn't even be here */
+ if (net_ratelimit())
+ netif_alert(priv, tx_err, priv->net_dev,
+ "Can't compute HW csum for L3 proto 0x%x\n",
+ ntohs(skb->protocol));
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* Fill in the relevant L4 parse result fields */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+ break;
+ case IPPROTO_TCP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+ break;
+ default:
+ if (net_ratelimit())
+ netif_alert(priv, tx_err, priv->net_dev,
+ "Can't compute HW csum for L4 proto 0x%x\n",
+ l4_proto);
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
+ parse_result->ip_off[0] = (u8)skb_network_offset(skb);
+ parse_result->l4_off = (u8)skb_transport_offset(skb);
+
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
+
+ /* On P1023 and similar platforms fd->cmd interpretation could
+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
+ * is not set so we do not need to check; in the future, if/when
+ * using context_a we need to check this bit
+ */
+
+return_error:
+ return retval;
+}
+#endif /* __rtems__ */
+
+static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
+{
+#ifndef __rtems__
+ struct device *dev = dpaa_bp->dev;
+#endif /* __rtems__ */
+ struct bm_buffer bmb[8];
+ dma_addr_t addr;
+#ifndef __rtems__
+ void *new_buf;
+#endif /* __rtems__ */
+ u8 i;
+
+ for (i = 0; i < 8; i++) {
+#ifndef __rtems__
+ new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
+ if (unlikely(!new_buf)) {
+ dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
+ dpaa_bp->raw_size);
+ goto release_previous_buffs;
+ }
+ new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
+
+ addr = dma_map_single(dev, new_buf,
+ dpaa_bp->size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dpaa_bp->dev, "DMA map failed");
+ goto release_previous_buffs;
+ }
+#else /* __rtems__ */
+ struct mbuf *m;
+
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (unlikely(m == NULL)) {
+ goto release_previous_buffs;
+ }
+
+ RTEMS_STATIC_ASSERT(DPAA_BP_RAW_SIZE == MCLBYTES,
+ DPAA_BP_RAW_SIZE);
+ *(struct mbuf **)(mtod(m, char *) + DPAA_MBUF_POINTER_OFFSET) =
+ m;
+ addr = mtod(m, dma_addr_t);
+#endif /* __rtems__ */
+
+ bmb[i].data = 0;
+ bm_buffer_set64(&bmb[i], addr);
+ }
+
+release_bufs:
+ return dpaa_bman_release(dpaa_bp, bmb, i);
+
+release_previous_buffs:
+#ifndef __rtems__
+ WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
+#endif /* __rtems__ */
+
+ bm_buffer_set64(&bmb[i], 0);
+ /* Avoid releasing a completely null buffer; bman_release() requires
+ * at least one buffer.
+ */
+ if (likely(i))
+ goto release_bufs;
+
+ return 0;
+}
+
+static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
+{
+ int i;
+
+ /* Give each CPU an allotment of "config_count" buffers */
+ for_each_possible_cpu(i) {
+ int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
+ int j;
+
+ /* Although we access another CPU's counters here
+ * we do it at boot time so it is safe
+ */
+ for (j = 0; j < dpaa_bp->config_count; j += 8)
+ *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
+ }
+ return 0;
+}
+
+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
+{
+ int count = *countptr;
+ int new_bufs;
+
+ if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
+ do {
+ new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
+ if (unlikely(!new_bufs)) {
+ /* Avoid looping forever if we've temporarily
+ * run out of memory. We'll try again at the
+ * next NAPI cycle.
+ */
+ break;
+ }
+ count += new_bufs;
+ } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
+
+ *countptr = count;
+ if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
+{
+ struct dpaa_bp *dpaa_bp;
+ int *countptr;
+ int res, i;
+
+ for (i = 0; i < DPAA_BPS_NUM; i++) {
+ dpaa_bp = priv->dpaa_bps[i];
+ if (!dpaa_bp)
+ return -EINVAL;
+ countptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+#ifndef __rtems__
+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * either contiguous frames or scatter/gather ones.
+ * Skb freeing is not handled here.
+ *
+ * This function may be called on error paths in the Tx function, so guard
+ * against cases when not all fd relevant fields were filled in.
+ *
+ * Return the skb backpointer, since for S/G frames the buffer containing it
+ * gets freed here.
+ */
+static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ struct sk_buff **skbh, *skb;
+ int nr_frags, i;
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+ if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+
+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
+ * it's from lowmem.
+ */
+ sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
+
+ /* sgt[0] is from lowmem, was dma_map_single()-ed */
+ dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
+ qm_sg_entry_get_len(&sgt[0]), dma_dir);
+
+ /* remaining pages were mapped with skb_frag_dma_map() */
+ for (i = 1; i < nr_frags; i++) {
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
+ qm_sg_entry_get_len(&sgt[i]), dma_dir);
+ }
+
+ /* Free the page frag that we allocated on Tx */
+ skb_free_frag(phys_to_virt(addr));
+ } else {
+ dma_unmap_single(dev, addr,
+ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+ }
+
+ return skb;
+}
+
+/* Build a linear skb around the received buffer.
+ * We are guaranteed there is enough room at the end of the data buffer to
+ * accommodate the shared info area of the skb.
+ */
+static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ struct dpaa_bp *dpaa_bp;
+ struct sk_buff *skb;
+ void *vaddr;
+
+ vaddr = phys_to_virt(addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ goto free_buffer;
+
+ skb = build_skb(vaddr, dpaa_bp->size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb)) {
+ WARN_ONCE(1, "Build skb failure on Rx\n");
+ goto free_buffer;
+ }
+ WARN_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, qm_fd_get_length(fd));
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return skb;
+
+free_buffer:
+ skb_free_frag(vaddr);
+ return NULL;
+}
+
+/* Build an skb with the data of the first S/G entry in the linear portion and
+ * the rest of the frame as skb fragments.
+ *
+ * The page fragment holding the S/G Table is recycled here.
+ */
+static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ struct page *page, *head_page;
+ struct dpaa_bp *dpaa_bp;
+ void *vaddr, *sg_vaddr;
+ int frag_off, frag_len;
+ struct sk_buff *skb;
+ dma_addr_t sg_addr;
+ int page_offset;
+ unsigned int sz;
+ int *count_ptr;
+ int i;
+
+ vaddr = phys_to_virt(addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ /* Iterate through the SGT entries and add data buffers to the skb */
+ sgt = vaddr + fd_off;
+ for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
+ /* Extension bit is not supported */
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_vaddr = phys_to_virt(sg_addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
+ SMP_CACHE_BYTES));
+
+ /* We may use multiple Rx pools */
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (!dpaa_bp)
+ goto free_buffers;
+
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
+ DMA_FROM_DEVICE);
+ if (i == 0) {
+ sz = dpaa_bp->size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb = build_skb(sg_vaddr, sz);
+ if (WARN_ON(unlikely(!skb)))
+ goto free_buffers;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Make sure forwarded skbs will have enough space
+ * on Tx, if extra headers are added.
+ */
+ WARN_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
+ } else {
+ /* Not the first S/G entry; all data from buffer will
+ * be added in an skb fragment; fragment index is offset
+ * by one since first S/G entry was incorporated in the
+ * linear part of the skb.
+ *
+ * Caution: 'page' may be a tail page.
+ */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Compute offset in (possibly tail) page */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+ /* page_offset only refers to the beginning of sgt[i];
+ * but the buffer itself may have an internal offset.
+ */
+ frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
+ frag_len = qm_sg_entry_get_len(&sgt[i]);
+ /* skb_add_rx_frag() does no checking on the page; if
+ * we pass it a tail page, we'll end up with
+ * bad page accounting and eventually with segafults.
+ */
+ skb_add_rx_frag(skb, i - 1, head_page, frag_off,
+ frag_len, dpaa_bp->size);
+ }
+ /* Update the pool count for the current {cpu x bpool} */
+ (*count_ptr)--;
+
+ if (qm_sg_entry_is_final(&sgt[i]))
+ break;
+ }
+ WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
+
+ /* free the SG table buffer */
+ skb_free_frag(vaddr);
+
+ return skb;
+
+free_buffers:
+ /* compensate sw bpool counter changes */
+ for (i--; i >= 0; i--) {
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (dpaa_bp) {
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)++;
+ }
+ }
+ /* free all the SG entries */
+ for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_vaddr = phys_to_virt(sg_addr);
+ skb_free_frag(sg_vaddr);
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (dpaa_bp) {
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+ }
+
+ if (qm_sg_entry_is_final(&sgt[i]))
+ break;
+ }
+ /* free the SGT fragment */
+ skb_free_frag(vaddr);
+
+ return NULL;
+}
+
+static int skb_to_contig_fd(struct dpaa_priv *priv,
+ struct sk_buff *skb, struct qm_fd *fd,
+ int *offset)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ enum dma_data_direction dma_dir;
+ unsigned char *buffer_start;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ int err;
+
+ /* We are guaranteed to have at least tx_headroom bytes
+ * available, so just use that for offset.
+ */
+ fd->bpid = FSL_DPAA_BPID_INV;
+ buffer_start = skb->data - priv->tx_headroom;
+ dma_dir = DMA_TO_DEVICE;
+
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpaa_enable_tx_csum(priv, skb, fd,
+ ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+ err);
+ return err;
+ }
+
+ /* Fill in the rest of the FD fields */
+ qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
+
+ /* Map the entire buffer size that may be seen by FMan, but no more */
+ addr = dma_map_single(dev, skbh,
+ skb_tail_pointer(skb) - buffer_start, dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
+ return -EINVAL;
+ }
+ qm_fd_addr_set64(fd, addr);
+
+ return 0;
+}
+
+static int skb_to_sg_fd(struct dpaa_priv *priv,
+ struct sk_buff *skb, struct qm_fd *fd)
+{
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ const int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct qm_sg_entry *sgt;
+ struct sk_buff **skbh;
+ int i, j, err, sz;
+ void *buffer_start;
+ skb_frag_t *frag;
+ dma_addr_t addr;
+ size_t frag_len;
+ void *sgt_buf;
+
+ /* get a page frag to store the SGTable */
+ sz = SKB_DATA_ALIGN(priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags));
+ sgt_buf = netdev_alloc_frag(sz);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
+ sz);
+ return -ENOMEM;
+ }
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpaa_enable_tx_csum(priv, skb, fd,
+ sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+ err);
+ goto csum_failed;
+ }
+
+ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+ qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
+ sgt[0].bpid = FSL_DPAA_BPID_INV;
+ sgt[0].offset = 0;
+ addr = dma_map_single(dev, skb->data,
+ skb_headlen(skb), dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg0_map_failed;
+ }
+ qm_sg_entry_set64(&sgt[0], addr);
+
+ /* populate the rest of SGT entries */
+ frag = &skb_shinfo(skb)->frags[0];
+ frag_len = frag->size;
+ for (i = 1; i <= nr_frags; i++, frag++) {
+ WARN_ON(!skb_frag_page(frag));
+ addr = skb_frag_dma_map(dev, frag, 0,
+ frag_len, dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg_map_failed;
+ }
+
+ qm_sg_entry_set_len(&sgt[i], frag_len);
+ sgt[i].bpid = FSL_DPAA_BPID_INV;
+ sgt[i].offset = 0;
+
+ /* keep the offset in the address */
+ qm_sg_entry_set64(&sgt[i], addr);
+ frag_len = frag->size;
+ }
+ qm_sg_entry_set_f(&sgt[i - 1], frag_len);
+
+ qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
+
+ /* DMA map the SGT page */
+ buffer_start = (void *)sgt - priv->tx_headroom;
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+ addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sgt_map_failed;
+ }
+
+ fd->bpid = FSL_DPAA_BPID_INV;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
+ qm_fd_addr_set64(fd, addr);
+
+ return 0;
+
+sgt_map_failed:
+sg_map_failed:
+ for (j = 0; j < i; j++)
+ dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
+ qm_sg_entry_get_len(&sgt[j]), dma_dir);
+sg0_map_failed:
+csum_failed:
+ skb_free_frag(sgt_buf);
+
+ return err;
+}
+
+static inline int dpaa_xmit(struct dpaa_priv *priv,
+ struct rtnl_link_stats64 *percpu_stats,
+ int queue,
+ struct qm_fd *fd)
+{
+ struct qman_fq *egress_fq;
+ int err, i;
+
+ egress_fq = priv->egress_fqs[queue];
+ if (fd->bpid == FSL_DPAA_BPID_INV)
+ fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
+
+ /* Trace this Tx fd */
+ trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
+
+ for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
+ err = qman_enqueue(egress_fq, fd);
+ if (err != -EBUSY)
+ break;
+ }
+
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ percpu_stats->tx_fifo_errors++;
+ return err;
+ }
+
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += qm_fd_get_length(fd);
+
+ return 0;
+}
+
+static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+{
+ const int queue_mapping = skb_get_queue_mapping(skb);
+ bool nonlinear = skb_is_nonlinear(skb);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ struct dpaa_priv *priv;
+ struct qm_fd fd;
+ int offset = 0;
+ int err = 0;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
+
+ qm_fd_clear_fd(&fd);
+
+ if (!nonlinear) {
+ /* We're going to store the skb backpointer at the beginning
+ * of the data buffer, so we need a privately owned skb
+ *
+ * We've made sure skb is not shared in dev->priv_flags,
+ * we need to verify the skb head is not cloned
+ */
+ if (skb_cow_head(skb, priv->tx_headroom))
+ goto enomem;
+
+ WARN_ON(skb_is_nonlinear(skb));
+ }
+
+ /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
+ * make sure we don't feed FMan with more fragments than it supports.
+ */
+ if (nonlinear &&
+ likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) {
+ /* Just create a S/G fd based on the skb */
+ err = skb_to_sg_fd(priv, skb, &fd);
+ percpu_priv->tx_frag_skbuffs++;
+ } else {
+ /* If the egress skb contains more fragments than we support
+ * we have no choice but to linearize it ourselves.
+ */
+ if (unlikely(nonlinear) && __skb_linearize(skb))
+ goto enomem;
+
+ /* Finally, create a contig FD from this skb */
+ err = skb_to_contig_fd(priv, skb, &fd, &offset);
+ }
+ if (unlikely(err < 0))
+ goto skb_to_fd_failed;
+
+ if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
+ return NETDEV_TX_OK;
+
+ dpaa_cleanup_tx_fd(priv, &fd);
+skb_to_fd_failed:
+enomem:
+ percpu_stats->tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+#endif /* __rtems__ */
+static void dpaa_rx_error(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
#ifndef __rtems__
+ if (net_ratelimit())
+ netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
+ be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
+
percpu_priv->stats.rx_errors++;
#endif /* __rtems__ */
- if (fd->status & FM_FD_ERR_DMA)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
percpu_priv->rx_errors.dme++;
- if (fd->status & FM_FD_ERR_PHYSICAL)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
percpu_priv->rx_errors.fpe++;
- if (fd->status & FM_FD_ERR_SIZE)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
percpu_priv->rx_errors.fse++;
- if (fd->status & FM_FD_ERR_PRS_HDR_ERR)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
percpu_priv->rx_errors.phe++;
- dpa_fd_release(net_dev, fd);
+ dpaa_fd_release(net_dev, fd);
}
-static void _dpa_tx_error(struct net_device *net_dev,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
+static void dpaa_tx_error(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
const struct qm_fd *fd,
u32 fqid)
{
@@ -147,7 +2102,7 @@ static void _dpa_tx_error(struct net_device *net_dev,
if (net_ratelimit())
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_TX_ERRORS);
+ be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
percpu_priv->stats.tx_errors++;
#else /* __rtems__ */
@@ -156,64 +2111,57 @@ static void _dpa_tx_error(struct net_device *net_dev,
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
#endif /* __rtems__ */
- /* If we intended the buffers from this frame to go into the bpools
- * when the FMan transmit was done, we need to put it in manually.
- */
- if (fd->bpid != 0xff) {
- dpa_fd_release(net_dev, fd);
- return;
- }
-
#ifndef __rtems__
- skb = _dpa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd);
dev_kfree_skb(skb);
#else /* __rtems__ */
- _dpa_cleanup_tx_fd(ifp, fd);
+ dpaa_cleanup_tx_fd(ifp, fd);
#endif /* __rtems__ */
}
#ifndef __rtems__
static int dpaa_eth_poll(struct napi_struct *napi, int budget)
{
- struct dpa_napi_portal *np =
- container_of(napi, struct dpa_napi_portal, napi);
+ struct dpaa_napi_portal *np =
+ container_of(napi, struct dpaa_napi_portal, napi);
int cleaned = qman_p_poll_dqrr(np->p, budget);
if (cleaned < budget) {
- int tmp;
+ napi_complete_done(napi, cleaned);
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
- napi_complete(napi);
- tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
- DPA_ERR_ON(tmp);
+ } else if (np->down) {
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
return cleaned;
}
#endif /* __rtems__ */
-static void _dpa_tx_conf(struct net_device *net_dev,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
+static void dpaa_tx_conf(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
const struct qm_fd *fd,
u32 fqid)
{
#ifndef __rtems__
struct sk_buff *skb;
- if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+ if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
if (net_ratelimit())
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_TX_ERRORS);
+ be32_to_cpu(fd->status) &
+ FM_FD_STAT_TX_ERRORS);
percpu_priv->stats.tx_errors++;
}
percpu_priv->tx_confirm++;
- skb = _dpa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd);
- dev_kfree_skb(skb);
+ consume_skb(skb);
#else /* __rtems__ */
struct ifnet *ifp = net_dev->ifp;
@@ -221,159 +2169,366 @@ static void _dpa_tx_conf(struct net_device *net_dev,
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
- _dpa_cleanup_tx_fd(ifp, fd);
+ dpaa_cleanup_tx_fd(ifp, fd);
#endif /* __rtems__ */
}
-static enum qman_cb_dqrr_result
-priv_rx_error_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
+ struct qman_portal *portal)
{
+#ifndef __rtems__
+ if (unlikely(in_irq() || !in_serving_softirq())) {
+ /* Disable QMan IRQ and invoke NAPI */
+ qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
+
+ percpu_priv->np.p = portal;
+ napi_schedule(&percpu_priv->np.napi);
+ percpu_priv->in_interrupt++;
+ return 1;
+ }
+#endif /* __rtems__ */
+ return 0;
+}
+
+static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+ struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
- int *count_ptr;
+ struct dpaa_bp *dpaa_bp;
+ struct dpaa_priv *priv;
- net_dev = ((struct dpa_fq *)fq)->net_dev;
+ net_dev = dpaa_fq->net_dev;
priv = netdev_priv(net_dev);
+ dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
if (dpaa_eth_napi_schedule(percpu_priv, portal))
return qman_cb_dqrr_stop;
- if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
+ if (dpaa_eth_refill_bpools(priv))
/* Unable to refill the buffer pool due to insufficient
* system memory. Just release the frame back into the pool,
* otherwise we'll soon end up with an empty buffer pool.
*/
- dpa_fd_release(net_dev, &dq->fd);
+ dpaa_fd_release(net_dev, &dq->fd);
else
- _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+ dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
}
-static enum qman_cb_dqrr_result
-priv_rx_default_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+#ifdef __rtems__
+static struct mbuf *
+dpaa_bp_addr_to_mbuf(dma_addr_t addr)
+{
+ void *vaddr = phys_to_virt(addr);
+
+ return (*(struct mbuf **)(vaddr + DPAA_MBUF_POINTER_OFFSET));
+}
+
+static struct mbuf *
+contig_fd_to_mbuf(const struct qm_fd *fd, struct ifnet *ifp)
+{
+ struct mbuf *m;
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+
+ m = dpaa_bp_addr_to_mbuf(addr);
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = m->m_len = qm_fd_get_length(fd);
+ m->m_data = mtod(m, char *) + fd_off;
+
+ return (m);
+}
+
+static void
+dpaa_bp_recycle_frag(struct dpaa_bp *dpaa_bp, dma_addr_t addr, int *count_ptr)
+{
+ struct bm_buffer bmb;
+
+ bm_buffer_set64(&bmb, addr);
+
+ while (bman_release(dpaa_bp->pool, &bmb, 1))
+ cpu_relax();
+
+ ++(*count_ptr);
+}
+
+static struct mbuf *
+sg_fd_to_mbuf(struct dpaa_bp *dpaa_bp, const struct qm_fd *fd,
+ struct ifnet *ifp, int *count_ptr)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ int i;
+ int len;
+ struct mbuf *m;
+ struct mbuf *last;
+
+ sgt = (const struct qm_sg_entry *)((char *)phys_to_virt(addr) + fd_off);
+ len = 0;
+
+ for (i = 0; i < DPAA_SGT_MAX_ENTRIES; ++i) {
+ dma_addr_t sg_addr;
+ int sg_len;
+ struct mbuf *n;
+
+ BSD_ASSERT(!qm_sg_entry_is_ext(&sgt[i]));
+ BSD_ASSERT(dpaa_bp == dpaa_bpid2pool(sgt[i].bpid));
+
+ sg_addr = qm_sg_addr(&sgt[i]);
+ n = dpaa_bp_addr_to_mbuf(sg_addr);
+
+ sg_len = qm_sg_entry_get_len(&sgt[i]);
+ len += sg_len;
+
+ if (i == 0) {
+ m = n;
+ } else {
+ last->m_next = n;
+ }
+
+ n->m_len = sg_len;
+ n->m_data = mtod(n, char *) + sgt[i].offset;
+ last = n;
+
+ --(*count_ptr);
+
+ if (qm_sg_entry_is_final(&sgt[i])) {
+ break;
+ }
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = len;
+
+ dpaa_bp_recycle_frag(dpaa_bp, addr, count_ptr);
+
+ return (m);
+}
+
+static void
+dpaa_rx(struct net_device *net_dev, struct qman_portal *portal,
+ const struct dpaa_priv *priv, struct dpaa_percpu_priv *percpu_priv,
+ const struct qm_fd *fd, u32 fqid, int *count_ptr)
{
+ struct dpaa_bp *dpaa_bp;
+ enum qm_fd_format fd_format;
+ struct mbuf *m;
+ struct ifnet *ifp;
+
+ ifp = net_dev->ifp;
+
+ if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ dpaa_fd_release(net_dev, fd);
+ return;
+ }
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ fd_format = qm_fd_get_format(fd);
+
+ if (likely(fd_format == qm_fd_contig)) {
+ m = contig_fd_to_mbuf(fd, ifp);
+ } else {
+ BSD_ASSERT(fd_format == qm_fd_sg);
+ m = sg_fd_to_mbuf(dpaa_bp, fd, ifp, count_ptr);
+ }
+
+ /* Account for either the contig buffer or the SGT buffer (depending on
+ * which case we were in) having been removed from the pool.
+ */
+ (*count_ptr)--;
+
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+ (*ifp->if_input)(ifp, m);
+}
+#endif /* __rtems__ */
+static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+#ifndef __rtems__
+ struct rtnl_link_stats64 *percpu_stats;
+#endif /* __rtems__ */
+ struct dpaa_percpu_priv *percpu_priv;
+#ifndef __rtems__
+ const struct qm_fd *fd = &dq->fd;
+ dma_addr_t addr = qm_fd_addr(fd);
+ enum qm_fd_format fd_format;
+#endif /* __rtems__ */
struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
+#ifndef __rtems__
+ u32 fd_status = fd->status;
+#endif /* __rtems__ */
+ struct dpaa_bp *dpaa_bp;
+ struct dpaa_priv *priv;
+#ifndef __rtems__
+ unsigned int skb_len;
+ struct sk_buff *skb;
+#endif /* __rtems__ */
int *count_ptr;
- struct dpa_bp *dpa_bp;
- net_dev = ((struct dpa_fq *)fq)->net_dev;
+#ifndef __rtems__
+ fd_status = be32_to_cpu(fd->status);
+ fd_format = qm_fd_get_format(fd);
+#endif /* __rtems__ */
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
- dpa_bp = priv->dpa_bp;
+ dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
#ifndef __rtems__
/* Trace the Rx fd */
- trace_dpa_rx_fd(net_dev, fq, &dq->fd);
+ trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
#endif /* __rtems__ */
- /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+#ifndef __rtems__
+ percpu_stats = &percpu_priv->stats;
+#endif /* __rtems__ */
if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
return qman_cb_dqrr_stop;
- /* Vale of plenty: make sure we didn't run out of buffers */
-
- if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
+ /* Make sure we didn't run out of buffers */
+ if (unlikely(dpaa_eth_refill_bpools(priv))) {
#ifdef __rtems__
- {
struct ifnet *ifp = net_dev->ifp;
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
#endif /* __rtems__ */
- /* Unable to refill the buffer pool due to insufficient
- * system memory. Just release the frame back into the pool,
- * otherwise we'll soon end up with an empty buffer pool.
- */
- dpa_fd_release(net_dev, &dq->fd);
-#ifdef __rtems__
+ dpaa_fd_release(net_dev, &dq->fd);
+ return qman_cb_dqrr_consume;
}
-#endif /* __rtems__ */
+
+#ifndef __rtems__
+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ fd_status & FM_FD_STAT_RX_ERRORS);
+
+ percpu_stats->rx_errors++;
+ dpaa_fd_release(net_dev, fd);
+ return qman_cb_dqrr_consume;
+ }
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
+
+ dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
+
+ /* prefetch the first 64 bytes of the frame or the SGT start */
+ prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd));
+
+ fd_format = qm_fd_get_format(fd);
+ /* The only FD types that we may receive are contig and S/G */
+ WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
+
+ /* Account for either the contig buffer or the SGT buffer (depending on
+ * which case we were in) having been removed from the pool.
+ */
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+
+ if (likely(fd_format == qm_fd_contig))
+ skb = contig_fd_to_skb(priv, fd);
+ dpa_fd_release(net_dev, &dq->fd);
else
- _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
- count_ptr);
+ skb = sg_fd_to_skb(priv, fd);
+ if (!skb)
+ return qman_cb_dqrr_consume;
+
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ skb_len = skb->len;
+
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ return qman_cb_dqrr_consume;
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += skb_len;
+#else /* __rtems__ */
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ dpaa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
+ count_ptr);
+#endif /* __rtems__ */
return qman_cb_dqrr_consume;
}
-static enum qman_cb_dqrr_result
-priv_tx_conf_error_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
{
+ struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
+ struct dpaa_priv *priv;
- net_dev = ((struct dpa_fq *)fq)->net_dev;
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
if (dpaa_eth_napi_schedule(percpu_priv, portal))
return qman_cb_dqrr_stop;
- _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+ dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
}
-static enum qman_cb_dqrr_result
-priv_tx_conf_default_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
{
+ struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
+ struct dpaa_priv *priv;
- net_dev = ((struct dpa_fq *)fq)->net_dev;
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
#ifndef __rtems__
/* Trace the fd */
- trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
+ trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
#endif /* __rtems__ */
- /* Non-migratable context, safe to use raw_cpu_ptr */
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
if (dpaa_eth_napi_schedule(percpu_priv, portal))
return qman_cb_dqrr_stop;
- _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+ dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
}
-static void priv_ern(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_mr_entry *msg)
+static void egress_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const union qm_mr_entry *msg)
{
+ const struct qm_fd *fd = &msg->ern.fd;
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct dpaa_priv *priv;
struct net_device *net_dev;
- const struct dpa_priv_s *priv;
#ifndef __rtems__
struct sk_buff *skb;
#else /* __rtems__ */
struct ifnet *ifp;
#endif /* __rtems__ */
- struct dpa_percpu_priv_s *percpu_priv;
- const struct qm_fd *fd = &msg->ern.fd;
- net_dev = ((struct dpa_fq *)fq)->net_dev;
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
- /* Non-migratable context, safe to use raw_cpu_ptr */
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
#ifndef __rtems__
percpu_priv->stats.tx_dropped++;
@@ -384,97 +2539,115 @@ static void priv_ern(struct qman_portal *portal,
#endif /* __rtems__ */
count_ern(percpu_priv, msg);
- /* If we intended this buffer to go into the pool
- * when the FM was done, we need to put it in
- * manually.
- */
- if (msg->ern.fd.bpid != 0xff) {
- dpa_fd_release(net_dev, fd);
- return;
- }
-
#ifndef __rtems__
- skb = _dpa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd);
dev_kfree_skb_any(skb);
#else /* __rtems__ */
- _dpa_cleanup_tx_fd(ifp, fd);
+ dpaa_cleanup_tx_fd(ifp, fd);
#endif /* __rtems__ */
}
-static const struct dpa_fq_cbs_t private_fq_cbs = {
- .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
- .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
- .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
- .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
- .egress_ern = { .cb = { .ern = priv_ern } }
+static const struct dpaa_fq_cbs dpaa_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
+ .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
+ .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
+ .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
+ .egress_ern = { .cb = { .ern = egress_ern } }
};
-static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
+static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
{
#ifndef __rtems__
- struct dpa_percpu_priv_s *percpu_priv;
- int i, j;
+ struct dpaa_percpu_priv *percpu_priv;
+ int i;
for_each_possible_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- for (j = 0; j < qman_portal_max; j++)
- napi_enable(&percpu_priv->np[j].napi);
+ percpu_priv->np.down = 0;
+ napi_enable(&percpu_priv->np.napi);
}
#endif /* __rtems__ */
}
-static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
+static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
{
#ifndef __rtems__
- struct dpa_percpu_priv_s *percpu_priv;
- int i, j;
+ struct dpaa_percpu_priv *percpu_priv;
+ int i;
for_each_possible_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- for (j = 0; j < qman_portal_max; j++)
- napi_disable(&percpu_priv->np[j].napi);
+ percpu_priv->np.down = 1;
+ napi_disable(&percpu_priv->np.napi);
}
#endif /* __rtems__ */
}
#ifndef __rtems__
-static int dpa_eth_priv_start(struct net_device *net_dev)
+static int dpaa_open(struct net_device *net_dev)
#else /* __rtems__ */
int dpa_eth_priv_start(struct net_device *net_dev)
#endif /* __rtems__ */
{
- int err;
- struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+ int err, i;
priv = netdev_priv(net_dev);
-
+ mac_dev = priv->mac_dev;
dpaa_eth_napi_enable(priv);
- err = dpa_start(net_dev);
- if (err < 0)
- dpaa_eth_napi_disable(priv);
+#ifndef __rtems__
+ net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
+ if (!net_dev->phydev) {
+ netif_err(priv, ifup, net_dev, "init_phy() failed\n");
+ err = -ENODEV;
+ goto phy_init_failed;
+ }
+#endif /* __rtems__ */
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ err = fman_port_enable(mac_dev->port[i]);
+ if (err)
+ goto mac_start_failed;
+ }
+
+ err = priv->mac_dev->start(mac_dev);
+ if (err < 0) {
+ netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+ goto mac_start_failed;
+ }
+
+#ifndef __rtems__
+ netif_tx_start_all_queues(net_dev);
+#endif /* __rtems__ */
+
+ return 0;
+
+mac_start_failed:
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
+ fman_port_disable(mac_dev->port[i]);
+
+#ifndef __rtems__
+phy_init_failed:
+#endif /* __rtems__ */
+ dpaa_eth_napi_disable(priv);
return err;
}
#ifndef __rtems__
-static int dpa_eth_priv_stop(struct net_device *net_dev)
+static int dpaa_eth_stop(struct net_device *net_dev)
#else /* __rtems__ */
int dpa_eth_priv_stop(struct net_device *net_dev)
#endif /* __rtems__ */
{
+ struct dpaa_priv *priv;
int err;
- struct dpa_priv_s *priv;
- err = dpa_stop(net_dev);
- /* Allow NAPI to consume any frame still in the Rx/TxConfirm
- * ingress queues. This is to avoid a race between the current
- * context and ksoftirqd which could leave NAPI disabled while
- * in fact there's still Rx traffic to be processed.
- */
- usleep_range(5000, 10000);
+ err = dpaa_stop(net_dev);
priv = netdev_priv(net_dev);
dpaa_eth_napi_disable(priv);
@@ -483,131 +2656,89 @@ int dpa_eth_priv_stop(struct net_device *net_dev)
}
#ifndef __rtems__
-static const struct net_device_ops dpa_private_ops = {
- .ndo_open = dpa_eth_priv_start,
- .ndo_start_xmit = dpa_tx,
- .ndo_stop = dpa_eth_priv_stop,
- .ndo_tx_timeout = dpa_timeout,
- .ndo_get_stats64 = dpa_get_stats64,
- .ndo_set_mac_address = dpa_set_mac_address,
+static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
+{
+ if (!net_dev->phydev)
+ return -EINVAL;
+ return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+}
+
+static const struct net_device_ops dpaa_ops = {
+ .ndo_open = dpaa_open,
+ .ndo_start_xmit = dpaa_start_xmit,
+ .ndo_stop = dpaa_eth_stop,
+ .ndo_tx_timeout = dpaa_tx_timeout,
+ .ndo_get_stats64 = dpaa_get_stats64,
+ .ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
-#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- .ndo_select_queue = dpa_select_queue,
-#endif
- .ndo_change_mtu = dpa_change_mtu,
- .ndo_set_rx_mode = dpa_set_rx_mode,
- .ndo_init = dpa_ndo_init,
- .ndo_set_features = dpa_set_features,
- .ndo_fix_features = dpa_fix_features,
+ .ndo_set_rx_mode = dpaa_set_rx_mode,
+ .ndo_do_ioctl = dpaa_ioctl,
};
-#endif /* __rtems__ */
-static int dpa_private_napi_add(struct net_device *net_dev)
+static int dpaa_napi_add(struct net_device *net_dev)
{
-#ifndef __rtems__
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct dpa_percpu_priv_s *percpu_priv;
- int i, cpu;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ int cpu;
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
- qman_portal_max * sizeof(struct dpa_napi_portal),
- GFP_KERNEL);
-
- if (!percpu_priv->np)
- return -ENOMEM;
-
- for (i = 0; i < qman_portal_max; i++)
- netif_napi_add(net_dev, &percpu_priv->np[i].napi,
- dpaa_eth_poll, DPA_NAPI_WEIGHT);
+ netif_napi_add(net_dev, &percpu_priv->np.napi,
+ dpaa_eth_poll, NAPI_POLL_WEIGHT);
}
-#endif /* __rtems__ */
return 0;
}
+#endif /* __rtems__ */
-void dpa_private_napi_del(struct net_device *net_dev)
+static void dpaa_napi_del(struct net_device *net_dev)
{
#ifndef __rtems__
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct dpa_percpu_priv_s *percpu_priv;
- int i, cpu;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ int cpu;
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- if (percpu_priv->np) {
- for (i = 0; i < qman_portal_max; i++)
- netif_napi_del(&percpu_priv->np[i].napi);
-
- devm_kfree(net_dev->dev.parent, percpu_priv->np);
- }
+ netif_napi_del(&percpu_priv->np.napi);
}
#endif /* __rtems__ */
}
-static int dpa_private_netdev_init(struct net_device *net_dev)
+static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
+ struct bm_buffer *bmb)
{
- int i;
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct dpa_percpu_priv_s *percpu_priv;
-#ifndef __rtems__
- const u8 *mac_addr;
-#endif /* __rtems__ */
+ dma_addr_t addr = bm_buf_addr(bmb);
- /* Although we access another CPU's private data here
- * we do it at initialization so it is safe
- */
#ifndef __rtems__
- for_each_possible_cpu(i) {
-#else /* __rtems__ */
- for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
-#endif /* __rtems__ */
- percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- percpu_priv->net_dev = net_dev;
- }
-
-#ifndef __rtems__
- net_dev->netdev_ops = &dpa_private_ops;
- mac_addr = priv->mac_dev->addr;
+ dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
- net_dev->mem_start = priv->mac_dev->res->start;
- net_dev->mem_end = priv->mac_dev->res->end;
-
- net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_LLTX);
-
- /* Advertise S/G and HIGHDMA support for private interfaces */
- net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
- /* Recent kernels enable GSO automatically, if
- * we declare NETIF_F_SG. For conformity, we'll
- * still declare GSO explicitly.
- */
- net_dev->features |= NETIF_F_GSO;
-
- return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
+ skb_free_frag(phys_to_virt(addr));
#else /* __rtems__ */
- return 0;
+ BSD_ASSERT(0);
+ m_freem(dpaa_bp_addr_to_mbuf(addr));
#endif /* __rtems__ */
}
-static struct dpa_bp *dpa_priv_bp_probe(struct device *dev)
+/* Alloc the dpaa_bp struct and configure default values */
+static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
{
- struct dpa_bp *dpa_bp;
+ struct dpaa_bp *dpaa_bp;
- dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
- if (!dpa_bp)
+ dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
+ if (!dpaa_bp)
return ERR_PTR(-ENOMEM);
- dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
- dpa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
+ dpaa_bp->bpid = FSL_DPAA_BPID_INV;
+ dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
+ dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
- dpa_bp->seed_cb = dpa_bp_priv_seed;
- dpa_bp->free_buf_cb = _dpa_bp_free_pf;
+ dpaa_bp->seed_cb = dpaa_bp_seed;
+ dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
- return dpa_bp;
+ return dpaa_bp;
}
/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
@@ -615,7 +2746,7 @@ static struct dpa_bp *dpa_priv_bp_probe(struct device *dev)
* this CGR to generate enqueue rejections to FMan in order to drop the frames
* before they reach our ingress queues and eat up memory.
*/
-static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
+static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
{
struct qm_mcc_initcgr initcgr;
u32 cs_th;
@@ -623,112 +2754,95 @@ static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
if (err < 0) {
- pr_err("Error %d allocating CGR ID\n", err);
+ if (netif_msg_drv(priv))
+ pr_err("Error %d allocating CGR ID\n", err);
goto out_error;
}
/* Enable CS TD, but disable Congestion State Change Notifications. */
- initcgr.we_mask = QM_CGR_WE_CS_THRES;
+ memset(&initcgr, 0, sizeof(initcgr));
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
cs_th = DPAA_INGRESS_CS_THRESHOLD;
qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
initcgr.cgr.cstd_en = QM_CGR_EN;
- /* This is actually a hack, because this CGR will be associated with
- * our affine SWP. However, we'll place our ingress FQs in it.
+ /* This CGR will be associated with the SWP affined to the current CPU.
+ * However, we'll place all our ingress FQs in it.
*/
err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
&initcgr);
if (err < 0) {
- pr_err("Error %d creating ingress CGR with ID %d\n", err,
- priv->ingress_cgr.cgrid);
+ if (netif_msg_drv(priv))
+ pr_err("Error %d creating ingress CGR with ID %d\n",
+ err, priv->ingress_cgr.cgrid);
qman_release_cgrid(priv->ingress_cgr.cgrid);
goto out_error;
}
- pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
- priv->ingress_cgr.cgrid, priv->mac_dev->addr);
+ if (netif_msg_drv(priv))
+ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
+ priv->ingress_cgr.cgrid, priv->mac_dev->addr);
- /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
- * range), but we have no common initialization path between the
- * different variants of the DPAA Eth driver, so we do it here rather
- * than modifying every other variant than "private Eth".
- */
priv->use_ingress_cgr = true;
out_error:
return err;
}
-static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
- size_t count)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- int i;
-
- netif_dbg(priv, probe, net_dev,
- "Using private BM buffer pools\n");
-
- priv->bp_count = count;
-
- for (i = 0; i < count; i++) {
- int err;
-
- err = dpa_bp_alloc(&dpa_bp[i]);
- if (err < 0) {
- dpa_bp_free(priv);
- priv->dpa_bp = NULL;
- return err;
- }
+#ifndef __rtems__
+static const struct of_device_id dpaa_match[];
+#endif /* __rtems__ */
- priv->dpa_bp = &dpa_bp[i];
- }
+static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
+{
+ u16 headroom;
+
+ /* The frame headroom must accommodate:
+ * - the driver private data area
+ * - parse results, hash results, timestamp if selected
+ * If either hash results or time stamp are selected, both will
+ * be copied to/from the frame headroom, as TS is located between PR and
+ * HR in the IC and IC copy size has a granularity of 16bytes
+ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
+ *
+ * Also make sure the headroom is a multiple of data_align bytes
+ */
+ headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
+ DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
- dpa_priv_common_bpid = priv->dpa_bp->bpid;
- return 0;
+ return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
+ DPAA_FD_DATA_ALIGNMENT) :
+ headroom;
}
#ifndef __rtems__
-static const struct of_device_id dpa_match[];
-
-static int
-dpaa_eth_priv_probe(struct platform_device *pdev)
+static int dpaa_eth_probe(struct platform_device *pdev)
#else /* __rtems__ */
int
dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
#endif /* __rtems__ */
{
- int err = 0, i, channel;
- struct device *dev;
- struct dpa_bp *dpa_bp;
- struct dpa_fq *dpa_fq, *tmp;
- size_t count = 1;
+ struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
+ struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev = NULL;
- struct dpa_priv_s *priv = NULL;
- struct dpa_percpu_priv_s *percpu_priv;
+ struct dpaa_fq *dpaa_fq, *tmp;
+ struct dpaa_priv *priv = NULL;
struct fm_port_fqs port_fqs;
- struct dpa_buffer_layout_s *buf_layout = NULL;
#ifndef __rtems__
struct mac_device *mac_dev;
- struct task_struct *kth;
#endif /* __rtems__ */
+ int err = 0, i, channel;
+ struct device *dev;
dev = &pdev->dev;
- /* Get the buffer pool assigned to this interface;
- * run only once the default pool probing code
- */
- dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
- dpa_priv_bp_probe(dev);
- if (IS_ERR(dpa_bp))
- return PTR_ERR(dpa_bp);
-
#ifndef __rtems__
/* Allocate this early, so we can store relevant information in
* the private area
*/
- net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
goto alloc_etherdev_mq_failed;
@@ -738,12 +2852,6 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
net_dev->priv = malloc(sizeof(*priv), M_KMALLOC, M_WAITOK | M_ZERO);
#endif /* __rtems__ */
-#ifdef CONFIG_FSL_DPAA_ETH_FRIENDLY_IF_NAME
- snprintf(net_dev->name, IFNAMSIZ, "fm%d-mac%d",
- dpa_mac_fman_index_get(pdev),
- dpa_mac_hw_index_get(pdev));
-#endif
-
/* Do this here, so we can be verbose early */
#ifndef __rtems__
SET_NETDEV_DEV(net_dev, dev);
@@ -754,77 +2862,90 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
priv->net_dev = net_dev;
#ifndef __rtems__
- priv->msg_enable = netif_msg_init(debug, -1);
+ priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
- mac_dev = dpa_mac_dev_get(pdev);
- if (IS_ERR(mac_dev) || !mac_dev) {
+ mac_dev = dpaa_mac_dev_get(pdev);
+ if (IS_ERR(mac_dev)) {
+ dev_err(dev, "dpaa_mac_dev_get() failed\n");
err = PTR_ERR(mac_dev);
goto mac_probe_failed;
}
-#endif /* __rtems__ */
- /* We have physical ports, so we need to establish
- * the buffer layout.
+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
+ * we choose conservatively and let the user explicitly set a higher
+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
+ * in the same LAN.
+ * If on the other hand fsl_fm_max_frm has been chosen below 1500,
+ * start with the maximum allowed.
*/
- buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
- GFP_KERNEL);
- if (!buf_layout)
- goto alloc_failed;
+ net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
- dpa_set_buffers_layout(mac_dev, buf_layout);
+ netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
+ net_dev->mtu);
+#endif /* __rtems__ */
- /* For private ports, need to compute the size of the default
- * buffer pool, based on FMan port buffer layout;also update
- * the maximum buffer size for private ports if necessary
- */
- dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
+ priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
+ priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
- INIT_LIST_HEAD(&priv->dpa_fq_list);
+#ifndef __rtems__
+ /* device used for DMA mapping */
+ arch_setup_dma_ops(dev, 0, 0, NULL, false);
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err) {
+ dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
+ goto dev_mask_failed;
+ }
+#endif /* __rtems__ */
- memset(&port_fqs, 0, sizeof(port_fqs));
+ /* bp init */
+ for (i = 0; i < DPAA_BPS_NUM; i++) {
+ int err;
- err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
- if (!err)
- err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
- &port_fqs, true, TX);
+ dpaa_bps[i] = dpaa_bp_alloc(dev);
+ if (IS_ERR(dpaa_bps[i]))
+ return PTR_ERR(dpaa_bps[i]);
+ /* the raw size of the buffers used for reception */
+ dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
+ /* avoid runtime computations by keeping the usable size here */
+ dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
+ dpaa_bps[i]->dev = dev;
- if (err < 0)
- goto fq_probe_failed;
+ err = dpaa_bp_alloc_pool(dpaa_bps[i]);
+ if (err < 0) {
+ dpaa_bps_free(priv);
+ priv->dpaa_bps[i] = NULL;
+ goto bp_create_failed;
+ }
+ priv->dpaa_bps[i] = dpaa_bps[i];
+ }
- /* bp init */
+ INIT_LIST_HEAD(&priv->dpaa_fq_list);
- err = dpa_priv_bp_create(net_dev, dpa_bp, count);
+ memset(&port_fqs, 0, sizeof(port_fqs));
- if (err < 0)
- goto bp_create_failed;
+ err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
+ if (err < 0) {
+ dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
+ goto fq_probe_failed;
+ }
priv->mac_dev = mac_dev;
- channel = dpa_get_channel();
-
+ channel = dpaa_get_channel();
if (channel < 0) {
+ dev_err(dev, "dpaa_get_channel() failed\n");
err = channel;
goto get_channel_failed;
}
priv->channel = (u16)channel;
-#ifndef __rtems__
- /* Start a thread that will walk the cpus with affine portals
+ /* Start a thread that will walk the CPUs with affine portals
* and add this pool channel to each's dequeue mask.
*/
- kth = kthread_run(dpaa_eth_add_channel,
- (void *)(unsigned long)priv->channel,
- "dpaa_%p:%d", net_dev, priv->channel);
- if (!kth) {
- err = -ENOMEM;
- goto add_channel_failed;
- }
-#else /* __rtems__ */
- dpaa_eth_add_channel((void *)(unsigned long)priv->channel);
-#endif /* __rtems__ */
+ dpaa_eth_add_channel(priv->channel);
- dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port[TX]);
+ dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
/* Create a congestion group for this netdev, with
* dynamically-allocated CGR ID.
@@ -836,29 +2957,28 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
dev_err(dev, "Error initializing CGR\n");
goto tx_cgr_init_failed;
}
- err = dpaa_eth_priv_ingress_cgr_init(priv);
+
+ err = dpaa_ingress_cgr_init(priv);
if (err < 0) {
dev_err(dev, "Error initializing ingress CGR\n");
goto rx_cgr_init_failed;
}
/* Add the FQs to the interface, and make them active */
- list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
- err = dpa_fq_init(dpa_fq, false);
+ list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
+ err = dpaa_fq_init(dpaa_fq, false);
if (err < 0)
goto fq_alloc_failed;
}
- priv->buf_layout = buf_layout;
- priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
- priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
+ priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
+ priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
/* All real interfaces need their ports initialized */
- dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
- buf_layout, dev);
+ dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+ &priv->buf_layout[0], dev);
priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
-
if (!priv->percpu_priv) {
dev_err(dev, "devm_alloc_percpu() failed\n");
err = -ENOMEM;
@@ -873,31 +2993,32 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
memset(percpu_priv, 0, sizeof(*percpu_priv));
}
+#ifndef __rtems__
/* Initialize NAPI */
- err = dpa_private_napi_add(net_dev);
-
+ err = dpaa_napi_add(net_dev);
if (err < 0)
goto napi_add_failed;
- err = dpa_private_netdev_init(net_dev);
-
+ err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
if (err < 0)
goto netdev_init_failed;
-#ifndef __rtems__
dpaa_eth_sysfs_init(&net_dev->dev);
- pr_info("Probed interface %s\n", net_dev->name);
+ netif_info(priv, probe, net_dev, "Probed interface %s\n",
+ net_dev->name);
#endif /* __rtems__ */
return 0;
+#ifndef __rtems__
netdev_init_failed:
napi_add_failed:
- dpa_private_napi_del(net_dev);
+#endif /* __rtems__ */
+ dpaa_napi_del(net_dev);
alloc_percpu_failed:
#ifndef __rtems__
- dpa_fq_free(dev, &priv->dpa_fq_list);
+ dpaa_fq_free(dev, &priv->dpaa_fq_list);
#endif /* __rtems__ */
fq_alloc_failed:
#ifndef __rtems__
@@ -910,79 +3031,111 @@ rx_cgr_init_failed:
qman_release_cgrid(priv->cgr_data.cgr.cgrid);
#endif /* __rtems__ */
tx_cgr_init_failed:
-#ifndef __rtems__
-add_channel_failed:
-#endif /* __rtems__ */
get_channel_failed:
- dpa_bp_free(priv);
+ dpaa_bps_free(priv);
bp_create_failed:
fq_probe_failed:
-alloc_failed:
#ifndef __rtems__
+dev_mask_failed:
mac_probe_failed:
#endif /* __rtems__ */
dev_set_drvdata(dev, NULL);
#ifndef __rtems__
free_netdev(net_dev);
alloc_etherdev_mq_failed:
- if (atomic_read(&dpa_bp->refs) == 0)
- devm_kfree(dev, dpa_bp);
+ for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) {
+ if (atomic_read(&dpaa_bps[i]->refs) == 0)
+ devm_kfree(dev, dpaa_bps[i]);
+ }
#else /* __rtems__ */
BSD_ASSERT(0);
#endif /* __rtems__ */
+ return err;
+}
+
+#ifndef __rtems__
+static int dpaa_remove(struct platform_device *pdev)
+{
+ struct net_device *net_dev;
+ struct dpaa_priv *priv;
+ struct device *dev;
+ int err;
+
+ dev = &pdev->dev;
+ net_dev = dev_get_drvdata(dev);
+
+ priv = netdev_priv(net_dev);
+
+ dpaa_eth_sysfs_remove(dev);
+
+ dev_set_drvdata(dev, NULL);
+ unregister_netdev(net_dev);
+
+ err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
+
+ qman_delete_cgr_safe(&priv->ingress_cgr);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+
+ dpaa_napi_del(net_dev);
+
+ dpaa_bps_free(priv);
+
+ free_netdev(net_dev);
return err;
}
+#endif /* __rtems__ */
#ifndef __rtems__
-static struct platform_device_id dpa_devtype[] = {
+static struct platform_device_id dpaa_devtype[] = {
{
.name = "dpaa-ethernet",
.driver_data = 0,
}, {
}
};
-MODULE_DEVICE_TABLE(platform, dpa_devtype);
+MODULE_DEVICE_TABLE(platform, dpaa_devtype);
-static struct platform_driver dpa_driver = {
+static struct platform_driver dpaa_driver = {
.driver = {
.name = KBUILD_MODNAME,
},
- .id_table = dpa_devtype,
- .probe = dpaa_eth_priv_probe,
- .remove = dpa_remove
+ .id_table = dpaa_devtype,
+ .probe = dpaa_eth_probe,
+ .remove = dpaa_remove
};
-static int __init dpa_load(void)
+static int __init dpaa_load(void)
{
int err;
- pr_info(DPA_DESCRIPTION "\n");
+ pr_debug("FSL DPAA Ethernet driver\n");
- /* initialise dpaa_eth mirror values */
- dpa_rx_extra_headroom = fman_get_rx_extra_headroom();
- dpa_max_frm = fman_get_max_frm();
+ /* initialize dpaa_eth mirror values */
+ dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
+ dpaa_max_frm = fman_get_max_frm();
- err = platform_driver_register(&dpa_driver);
+ err = platform_driver_register(&dpaa_driver);
if (err < 0)
pr_err("Error, platform_driver_register() = %d\n", err);
return err;
}
-module_init(dpa_load);
+module_init(dpaa_load);
-static void __exit dpa_unload(void)
+static void __exit dpaa_unload(void)
{
- platform_driver_unregister(&dpa_driver);
+ platform_driver_unregister(&dpaa_driver);
- /* Only one channel is used and needs to be relased after all
+ /* Only one channel is used and needs to be released after all
* interfaces are removed
*/
- dpa_release_channel();
+ dpaa_release_channel();
}
-module_exit(dpa_unload);
+module_exit(dpaa_unload);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
-MODULE_DESCRIPTION(DPA_DESCRIPTION);
+MODULE_DESCRIPTION("FSL DPAA Ethernet driver");
#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index 11b11e65..8a2b1189 100644
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -1,4 +1,4 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -28,131 +28,36 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __DPA_H
-#define __DPA_H
+#ifndef __DPAA_H
+#define __DPAA_H
#include <linux/netdevice.h>
#include <soc/fsl/qman.h>
+#include <soc/fsl/bman.h>
#include "fman.h"
#include "mac.h"
#include "dpaa_eth_trace.h"
-#ifndef __rtems__
-extern int dpa_rx_extra_headroom;
-extern int dpa_max_frm;
-
-#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
-#define dpa_get_max_frm() dpa_max_frm
-#else /* __rtems__ */
-#define dpa_get_rx_extra_headroom fman_get_rx_extra_headroom
-#define dpa_get_max_frm fman_get_max_frm
-#endif /* __rtems__ */
-
-#define dpa_get_max_mtu() \
- (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
-
-/* Simple enum of FQ types - used for array indexing */
-enum port_type {RX, TX};
-
-struct dpa_buffer_layout_s {
- u16 priv_data_size;
- bool parse_results;
- bool time_stamp;
- bool hash_results;
- u16 data_align;
-};
-
-#define DPA_ERR_ON(cond)
-
-#define DPA_TX_PRIV_DATA_SIZE 16
-#define DPA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
-#define DPA_TIME_STAMP_SIZE 8
-#define DPA_HASH_RESULTS_SIZE 8
-#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
- dpa_get_rx_extra_headroom())
-
-#define FM_FD_STAT_RX_ERRORS \
- (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
- FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
- FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
- FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
- FM_FD_ERR_PRS_HDR_ERR)
+#define DPAA_ETH_TXQ_NUM NR_CPUS
-#define FM_FD_STAT_TX_ERRORS \
- (FM_FD_ERR_UNSUPPORTED_FORMAT | \
- FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
-
-/* The raw buffer size must be cacheline aligned.
- * Normally we use 2K buffers.
- */
-#define DPA_BP_RAW_SIZE 2048
-
-/* This is what FMan is ever allowed to use.
- * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
- * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
- * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
- * half-page-aligned buffers (can we?), so we reserve some more space
- * for start-of-buffer alignment.
- */
#ifndef __rtems__
-#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
- SMP_CACHE_BYTES)
+#define DPAA_BPS_NUM 3 /* number of bpools per interface */
#else /* __rtems__ */
-/*
- * FIXME: 4 bytes would be enough for the mbuf pointer. However, jumbo receive
- * frames overwrite this area if < 64 bytes.
- */
-#define DPA_OUT_OF_BAND_SIZE 64
-#define DPA_MBUF_POINTER_OFFSET (DPA_BP_RAW_SIZE - DPA_OUT_OF_BAND_SIZE)
-#define dpa_bp_size(buffer_layout) DPA_MBUF_POINTER_OFFSET
+#define DPAA_BPS_NUM 1
#endif /* __rtems__ */
-/* We must ensure that skb_shinfo is always cacheline-aligned. */
-#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
-
-/* Largest value that the FQD's OAL field can hold.
- * This is DPAA-1.x specific.
- */
-#define FSL_QMAN_MAX_OAL 127
-
-/* Default alignment for start of data in an Rx FD */
-#define DPA_FD_DATA_ALIGNMENT 16
-
-/* Values for the L3R field of the FM Parse Results
- */
-/* L3 Type field: First IP Present IPv4 */
-#define FM_L3_PARSE_RESULT_IPV4 0x8000
-/* L3 Type field: First IP Present IPv6 */
-#define FM_L3_PARSE_RESULT_IPV6 0x4000
-
-/* Values for the L4R field of the FM Parse Results
- * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
- */
-/* L4 Type field: UDP */
-#define FM_L4_PARSE_RESULT_UDP 0x40
-/* L4 Type field: TCP */
-#define FM_L4_PARSE_RESULT_TCP 0x20
-
-/* number of Tx queues to FMan */
-#define DPAA_ETH_TX_QUEUES NR_CPUS
-
-#define DPAA_ETH_RX_QUEUES 128
-
-#define FSL_DPAA_ETH_MAX_BUF_COUNT 128
-#define FSL_DPAA_ETH_REFILL_THRESHOLD 80
/* More detailed FQ types - used for fine-grained WQ assignments */
-enum dpa_fq_type {
+enum dpaa_fq_type {
FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
FQ_TYPE_RX_ERROR, /* Rx Error FQs */
- FQ_TYPE_RX_PCD, /* User-defined PCDs */
FQ_TYPE_TX, /* "Real" Tx FQs */
FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
};
-struct dpa_fq {
+struct dpaa_fq {
struct qman_fq fq_base;
struct list_head list;
struct net_device *net_dev;
@@ -161,10 +66,10 @@ struct dpa_fq {
u32 flags;
u16 channel;
u8 wq;
- enum dpa_fq_type fq_type;
+ enum dpaa_fq_type fq_type;
};
-struct dpa_fq_cbs_t {
+struct dpaa_fq_cbs {
struct qman_fq rx_defq;
struct qman_fq tx_defq;
struct qman_fq rx_errq;
@@ -172,45 +77,30 @@ struct dpa_fq_cbs_t {
struct qman_fq egress_ern;
};
-struct fqid_cell {
- u32 start;
- u32 count;
-};
-
-struct dpa_bp {
- struct bman_pool *pool;
- u8 bpid;
-#ifndef __rtems__
+struct dpaa_bp {
+ /* device used in the DMA mapping operations */
struct device *dev;
-#endif /* __rtems__ */
- /* the buffer pools used for the private ports are initialized
- * with config_count buffers for each CPU; at runtime the
- * number of buffers per CPU is constantly brought back to this
- * level
- */
- int config_count;
+ /* current number of buffers in the buffer pool alloted to each CPU */
+ int __percpu *percpu_count;
+ /* all buffers allocated for this pool have this raw size */
+ size_t raw_size;
+ /* all buffers in this pool have this same usable size */
size_t size;
- bool seed_pool;
- /* physical address of the contiguous memory used by the pool to store
- * the buffers
- */
- dma_addr_t paddr;
- /* virtual address of the contiguous memory used by the pool to store
- * the buffers
+ /* the buffer pools are initialized with config_count buffers for each
+ * CPU; at runtime the number of buffers per CPU is constantly brought
+ * back to this level
*/
- void __iomem *vaddr;
- /* current number of buffers in the bpool alloted to this CPU */
- int __percpu *percpu_count;
+ u16 config_count;
+ u8 bpid;
+ struct bman_pool *pool;
+ /* bpool can be seeded before use by this cb */
+ int (*seed_cb)(struct dpaa_bp *);
+ /* bpool can be emptied before freeing by this cb */
+ void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
atomic_t refs;
- /* some bpools need to be seeded before use by this cb */
- int (*seed_cb)(struct dpa_bp *);
- /* some bpools need to be emptied before freeing; this cb is used
- * for freeing of individual buffers taken from the pool
- */
- void (*free_buf_cb)(void *addr);
};
-struct dpa_rx_errors {
+struct dpaa_rx_errors {
u64 dme; /* DMA Error */
u64 fpe; /* Frame Physical Error */
u64 fse; /* Frame Size Error */
@@ -218,7 +108,7 @@ struct dpa_rx_errors {
};
/* Counters for QMan ERN frames - one counter per rejection code */
-struct dpa_ern_cnt {
+struct dpaa_ern_cnt {
u64 cg_tdrop; /* Congestion group taildrop */
u64 wred; /* WRED congestion */
u64 err_cond; /* Error condition */
@@ -229,16 +119,17 @@ struct dpa_ern_cnt {
u64 orp_zero; /* ORP disabled */
};
-struct dpa_napi_portal {
+struct dpaa_napi_portal {
#ifndef __rtems__
struct napi_struct napi;
#endif /* __rtems__ */
struct qman_portal *p;
+ bool down;
};
-struct dpa_percpu_priv_s {
+struct dpaa_percpu_priv {
struct net_device *net_dev;
- struct dpa_napi_portal *np;
+ struct dpaa_napi_portal np;
u64 in_interrupt;
u64 tx_confirm;
/* fragmented (non-linear) skbuffs received from the stack */
@@ -246,26 +137,28 @@ struct dpa_percpu_priv_s {
#ifndef __rtems__
struct rtnl_link_stats64 stats;
#endif /* __rtems__ */
- struct dpa_rx_errors rx_errors;
- struct dpa_ern_cnt ern_cnt;
+ struct dpaa_rx_errors rx_errors;
+ struct dpaa_ern_cnt ern_cnt;
};
-struct dpa_priv_s {
- struct dpa_percpu_priv_s __percpu *percpu_priv;
- struct dpa_bp *dpa_bp;
+struct dpaa_buffer_layout {
+ u16 priv_data_size;
+};
+
+struct dpaa_priv {
+ struct dpaa_percpu_priv __percpu *percpu_priv;
+ struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
/* Store here the needed Tx headroom for convenience and speed
* (even though it can be computed based on the fields of buf_layout)
*/
u16 tx_headroom;
struct net_device *net_dev;
struct mac_device *mac_dev;
- struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
- struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
-
- size_t bp_count;
+ struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
+ struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
- u16 channel; /* "fsl,qman-channel-id" */
- struct list_head dpa_fq_list;
+ u16 channel;
+ struct list_head dpaa_fq_list;
#ifndef __rtems__
u32 msg_enable; /* net_device message level */
@@ -289,231 +182,28 @@ struct dpa_priv_s {
bool use_ingress_cgr;
struct qman_cgr ingress_cgr;
- struct dpa_buffer_layout_s *buf_layout;
+ struct dpaa_buffer_layout buf_layout[2];
u16 rx_headroom;
};
-struct fm_port_fqs {
- struct dpa_fq *tx_defq;
- struct dpa_fq *tx_errq;
- struct dpa_fq *rx_defq;
- struct dpa_fq *rx_errq;
-};
+/* from dpaa_ethtool.c */
+extern const struct ethtool_ops dpaa_ethtool_ops;
-int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
-int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
-void _dpa_rx(struct net_device *net_dev,
- struct qman_portal *portal,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_fd *fd,
- u32 fqid,
- int *count_ptr);
-#ifndef __rtems__
-int dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
-struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
- const struct qm_fd *fd);
-
-/* Turn on HW checksum computation for this outgoing frame.
- * If the current protocol is not something we support in this regard
- * (or if the stack has already computed the SW checksum), we do nothing.
- *
- * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
- * otherwise.
- *
- * Note that this function may modify the fd->cmd field and the skb data buffer
- * (the Parse Results area).
- */
-int dpa_enable_tx_csum(struct dpa_priv_s *priv, struct sk_buff *skb,
- struct qm_fd *fd, char *parse_results);
-#else /* __rtems__ */
-void _dpa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd);
-#endif /* __rtems__ */
-
-static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
- struct qman_portal *portal)
-{
-#ifndef __rtems__
- /* In case of threaded ISR for RT enable kernel,
- * in_irq() does not return appropriate value, so use
- * in_serving_softirq to distinguish softirq or irq context.
- */
- if (unlikely(in_irq() || !in_serving_softirq())) {
- /* Disable QMan IRQ and invoke NAPI */
- int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
-
- if (likely(!ret)) {
- const struct qman_portal_config *pc =
- qman_p_get_portal_config(portal);
- struct dpa_napi_portal *np =
- &percpu_priv->np[pc->channel];
-
- np->p = portal;
- napi_schedule(&np->napi);
- percpu_priv->in_interrupt++;
- return 1;
- }
- }
-#else /* __rtems__ */
- /* FIXME */
-#endif /* __rtems__ */
- return 0;
-}
-
-static inline ssize_t __const dpa_fd_length(const struct qm_fd *fd)
-{
- return fd->length20;
-}
-
-static inline ssize_t __const dpa_fd_offset(const struct qm_fd *fd)
-{
- return fd->offset;
-}
-
-#ifndef __rtems__
-/* Verifies if the skb length is below the interface MTU */
-static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
-{
- if (unlikely(skb->len > mtu))
- if ((skb->protocol != htons(ETH_P_8021Q)) ||
- (skb->len > mtu + 4))
- return -1;
-
- return 0;
-}
-#endif /* __rtems__ */
-
-static inline u16 dpa_get_headroom(struct dpa_buffer_layout_s *bl)
-{
- u16 headroom;
- /* The frame headroom must accommodate:
- * - the driver private data area
- * - parse results, hash results, timestamp if selected
- * If either hash results or time stamp are selected, both will
- * be copied to/from the frame headroom, as TS is located between PR and
- * HR in the IC and IC copy size has a granularity of 16bytes
- * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
- *
- * Also make sure the headroom is a multiple of data_align bytes
- */
- headroom = (u16)(bl->priv_data_size +
- (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
- (bl->hash_results || bl->time_stamp ?
- DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0));
-
- return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
-}
-
-#ifndef __rtems__
+/* from dpaa_eth_sysfs.c */
void dpaa_eth_sysfs_remove(struct device *dev);
void dpaa_eth_sysfs_init(struct device *dev);
-
-void dpa_private_napi_del(struct net_device *net_dev);
+#ifdef __rtems__
+#include <sys/mbuf.h>
+
+#define DPAA_ENQUEUE_RETRIES 100000
+#define DPAA_SGT_MAX_ENTRIES 16
+#define DPAA_TX_PRIV_DATA_SIZE 16
+#define FM_L3_PARSE_RESULT_IPV4 0x8000
+#define FM_L3_PARSE_RESULT_IPV6 0x4000
+#define FM_L4_PARSE_RESULT_UDP 0x40
+#define FM_L4_PARSE_RESULT_TCP 0x20
+#define FSL_DPAA_BPID_INV 0xff
+
+void dpaa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd);
#endif /* __rtems__ */
-
-static inline void clear_fd(struct qm_fd *fd)
-{
- fd->opaque_addr = 0;
- fd->opaque = 0;
- fd->cmd = 0;
-}
-
-static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
- struct qman_fq *tx_fq)
-{
- int i;
-
- for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
- if (priv->egress_fqs[i] == tx_fq)
- return i;
-
- return -EINVAL;
-}
-
-#ifndef __rtems__
-static inline int dpa_xmit(struct dpa_priv_s *priv,
- struct rtnl_link_stats64 *percpu_stats,
- int queue,
- struct qm_fd *fd)
-{
- int err, i;
- struct qman_fq *egress_fq;
-
- egress_fq = priv->egress_fqs[queue];
- if (fd->bpid == 0xff)
- fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
-
- /* Trace this Tx fd */
- trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
-
- for (i = 0; i < 100000; i++) {
- err = qman_enqueue(egress_fq, fd, 0);
- if (err != -EBUSY)
- break;
- }
-
- if (unlikely(err < 0)) {
- percpu_stats->tx_errors++;
- percpu_stats->tx_fifo_errors++;
- return err;
- }
-
- percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpa_fd_length(fd);
-
- return 0;
-}
-#endif /* __rtems__ */
-
-/* Use multiple WQs for FQ assignment:
- * - Tx Confirmation queues go to WQ1.
- * - Rx Default and Tx queues go to WQ3 (no differentiation between
- * Rx and Tx traffic).
- * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
- * to be scheduled, in case there are many more FQs in WQ3).
- * This ensures that Tx-confirmed buffers are timely released. In particular,
- * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
- * are greatly outnumbered by other FQs in the system, while
- * dequeue scheduling is round-robin.
- */
-static inline void _dpa_assign_wq(struct dpa_fq *fq)
-{
- switch (fq->fq_type) {
- case FQ_TYPE_TX_CONFIRM:
- case FQ_TYPE_TX_CONF_MQ:
- fq->wq = 1;
- break;
- case FQ_TYPE_RX_DEFAULT:
- case FQ_TYPE_TX:
- fq->wq = 3;
- break;
- case FQ_TYPE_RX_ERROR:
- case FQ_TYPE_TX_ERROR:
- fq->wq = 2;
- break;
- default:
- WARN(1, "Invalid FQ type %d for FQID %d!\n",
- fq->fq_type, fq->fqid);
- }
-}
-
-#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-/* Use in lieu of skb_get_queue_mapping() */
-#define dpa_get_queue_mapping(skb) \
- raw_smp_processor_id()
-#else
-/* Use the queue selected by XPS */
-#define dpa_get_queue_mapping(skb) \
- skb_get_queue_mapping(skb)
-#endif
-
-static inline void _dpa_bp_free_pf(void *addr)
-{
-#ifndef __rtems__
- put_page(virt_to_head_page(addr));
-#else /* __rtems__ */
- BSD_ASSERT(0);
-#endif /* __rtems__ */
-}
-
-#endif /* __DPA_H */
+#endif /* __DPAA_H */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c
deleted file mode 100644
index 9a4a2184..00000000
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c
+++ /dev/null
@@ -1,1491 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/of_net.h>
-#include <linux/etherdevice.h>
-#include <linux/kthread.h>
-#include <linux/percpu.h>
-#ifndef __rtems__
-#include <linux/highmem.h>
-#include <linux/sort.h>
-#endif /* __rtems__ */
-#include <soc/fsl/qman.h>
-#ifndef __rtems__
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/if_vlan.h>
-#endif /* __rtems__ */
-#include "dpaa_eth.h"
-#include "dpaa_eth_common.h"
-#include "mac.h"
-
-/* Size in bytes of the FQ taildrop threshold */
-#define DPA_FQ_TD 0x200000
-
-#define DPAA_CS_THRESHOLD_1G 0x06000000
-/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
- * The size in bytes of the egress Congestion State notification threshold on
- * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
- * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
- * and the larger the frame size, the more acute the problem.
- * So we have to find a balance between these factors:
- * - avoiding the device staying congested for a prolonged time (risking
- * the netdev watchdog to fire - see also the tx_timeout module param);
- * - affecting performance of protocols such as TCP, which otherwise
- * behave well under the congestion notification mechanism;
- * - preventing the Tx cores from tightly-looping (as if the congestion
- * threshold was too low to be effective);
- * - running out of memory if the CS threshold is set too high.
- */
-
-#define DPAA_CS_THRESHOLD_10G 0x10000000
-/* The size in bytes of the egress Congestion State notification threshold on
- * 10G ports, range 0x1000 .. 0x10000000
- */
-
-static struct dpa_bp *dpa_bp_array[64];
-
-#ifndef __rtems__
-int dpa_max_frm;
-
-int dpa_rx_extra_headroom;
-#endif /* __rtems__ */
-
-static const struct fqid_cell tx_confirm_fqids[] = {
- {0, DPAA_ETH_TX_QUEUES}
-};
-
-static const struct fqid_cell default_fqids[][3] = {
- [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
- [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
-};
-
-#ifndef __rtems__
-int dpa_netdev_init(struct net_device *net_dev,
- const u8 *mac_addr,
- u16 tx_timeout)
-{
- int err;
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct device *dev = net_dev->dev.parent;
-
- net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- /* we do not want shared skbs on TX */
- net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
-
- net_dev->features |= net_dev->hw_features;
- net_dev->vlan_features = net_dev->features;
-
- memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
-
- net_dev->ethtool_ops = &dpa_ethtool_ops;
-
- net_dev->needed_headroom = priv->tx_headroom;
- net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
-
- /* start without the RUNNING flag, phylib controls it later */
- netif_carrier_off(net_dev);
-
- err = register_netdev(net_dev);
- if (err < 0) {
- dev_err(dev, "register_netdev() = %d\n", err);
- return err;
- }
-
- return 0;
-}
-#endif /* __rtems__ */
-
-int dpa_start(struct net_device *net_dev)
-{
- int err, i;
- struct dpa_priv_s *priv;
- struct mac_device *mac_dev;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
-#ifndef __rtems__
- err = mac_dev->init_phy(net_dev, priv->mac_dev);
- if (err < 0) {
- netif_err(priv, ifup, net_dev, "init_phy() = %d\n", err);
- return err;
- }
-#endif /* __rtems__ */
-
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
- err = fman_port_enable(mac_dev->port[i]);
- if (err)
- goto mac_start_failed;
- }
-
- err = priv->mac_dev->start(mac_dev);
- if (err < 0) {
- netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
- goto mac_start_failed;
- }
-
-#ifndef __rtems__
- netif_tx_start_all_queues(net_dev);
-#endif /* __rtems__ */
-
- return 0;
-
-mac_start_failed:
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
- fman_port_disable(mac_dev->port[i]);
-
- return err;
-}
-
-int dpa_stop(struct net_device *net_dev)
-{
- int i, err, error;
- struct dpa_priv_s *priv;
- struct mac_device *mac_dev;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
-#ifndef __rtems__
- netif_tx_stop_all_queues(net_dev);
-#endif /* __rtems__ */
- /* Allow the Fman (Tx) port to process in-flight frames before we
- * try switching it off.
- */
- usleep_range(5000, 10000);
-
- err = mac_dev->stop(mac_dev);
- if (err < 0)
- netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
- err);
-
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
- error = fman_port_disable(mac_dev->port[i]);
- if (error)
- err = error;
- }
-
-#ifndef __rtems__
- if (mac_dev->phy_dev)
- phy_disconnect(mac_dev->phy_dev);
- mac_dev->phy_dev = NULL;
-#endif /* __rtems__ */
-
- return err;
-}
-
-#ifndef __rtems__
-void dpa_timeout(struct net_device *net_dev)
-{
- const struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
-
- priv = netdev_priv(net_dev);
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-
- netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
- jiffies_to_msecs(jiffies - net_dev->trans_start));
-
- percpu_priv->stats.tx_errors++;
-}
-
-/* Calculates the statistics for the given device by adding the statistics
- * collected by each CPU.
- */
-struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev,
- struct rtnl_link_stats64 *stats)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- u64 *cpustats;
- u64 *netstats = (u64 *)stats;
- int i, j;
- struct dpa_percpu_priv_s *percpu_priv;
- int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
-
- for_each_possible_cpu(i) {
- percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-
- cpustats = (u64 *)&percpu_priv->stats;
-
- for (j = 0; j < numstats; j++)
- netstats[j] += cpustats[j];
- }
-
- return stats;
-}
-#endif /* __rtems__ */
-
-int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
-{
- const int max_mtu = dpa_get_max_mtu();
-
- /* Make sure we don't exceed the Ethernet controller's MAXFRM */
- if (new_mtu < 68 || new_mtu > max_mtu) {
- netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
- new_mtu, 68, max_mtu);
- return -EINVAL;
- }
-#ifndef __rtems__
- net_dev->mtu = new_mtu;
-#endif /* __rtems__ */
-
- return 0;
-}
-
-#ifndef __rtems__
-/* .ndo_init callback */
-int dpa_ndo_init(struct net_device *net_dev)
-{
- /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
- * we choose conservatively and let the user explicitly set a higher
- * MTU via ifconfig. Otherwise, the user may end up with different MTUs
- * in the same LAN.
- * If on the other hand fsl_fm_max_frm has been chosen below 1500,
- * start with the maximum allowed.
- */
- int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
-
- netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
- init_mtu);
- net_dev->mtu = init_mtu;
-
- return 0;
-}
-
-int dpa_set_features(struct net_device *dev, netdev_features_t features)
-{
- /* Not much to do here for now */
- dev->features = features;
- return 0;
-}
-
-netdev_features_t dpa_fix_features(struct net_device *dev,
- netdev_features_t features)
-{
- netdev_features_t unsupported_features = 0;
-
- /* In theory we should never be requested to enable features that
- * we didn't set in netdev->features and netdev->hw_features at probe
- * time, but double check just to be on the safe side.
- * We don't support enabling Rx csum through ethtool yet
- */
- unsupported_features |= NETIF_F_RXCSUM;
-
- features &= ~unsupported_features;
-
- return features;
-}
-
-int dpa_remove(struct platform_device *pdev)
-{
- int err;
- struct device *dev;
- struct net_device *net_dev;
- struct dpa_priv_s *priv;
-
- dev = &pdev->dev;
- net_dev = dev_get_drvdata(dev);
-
- priv = netdev_priv(net_dev);
-
- dpaa_eth_sysfs_remove(dev);
-
- dev_set_drvdata(dev, NULL);
- unregister_netdev(net_dev);
-
- err = dpa_fq_free(dev, &priv->dpa_fq_list);
-
- qman_delete_cgr_safe(&priv->ingress_cgr);
- qman_release_cgrid(priv->ingress_cgr.cgrid);
- qman_delete_cgr_safe(&priv->cgr_data.cgr);
- qman_release_cgrid(priv->cgr_data.cgr.cgrid);
-
- dpa_private_napi_del(net_dev);
-
- dpa_bp_free(priv);
-
- if (priv->buf_layout)
- devm_kfree(dev, priv->buf_layout);
-
- free_netdev(net_dev);
-
- return err;
-}
-
-struct mac_device *dpa_mac_dev_get(struct platform_device *pdev)
-{
- struct device *dpa_dev, *dev;
- struct device_node *mac_node;
- struct platform_device *of_dev;
- struct mac_device *mac_dev;
- struct dpaa_eth_data *eth_data;
-
- dpa_dev = &pdev->dev;
- eth_data = dpa_dev->platform_data;
- if (!eth_data)
- return ERR_PTR(-ENODEV);
-
- mac_node = eth_data->mac_node;
-
- of_dev = of_find_device_by_node(mac_node);
- if (!of_dev) {
- dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
- mac_node->full_name);
- of_node_put(mac_node);
- return ERR_PTR(-EINVAL);
- }
- of_node_put(mac_node);
-
- dev = &of_dev->dev;
-
- mac_dev = dev_get_drvdata(dev);
- if (!mac_dev) {
- dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
- dev_name(dev));
- return ERR_PTR(-EINVAL);
- }
-
- return mac_dev;
-}
-
-int dpa_mac_hw_index_get(struct platform_device *pdev)
-{
- struct device *dpa_dev;
- struct dpaa_eth_data *eth_data;
-
- dpa_dev = &pdev->dev;
- eth_data = dpa_dev->platform_data;
-
- return eth_data->mac_hw_id;
-}
-
-int dpa_mac_fman_index_get(struct platform_device *pdev)
-{
- struct device *dpa_dev;
- struct dpaa_eth_data *eth_data;
-
- dpa_dev = &pdev->dev;
- eth_data = dpa_dev->platform_data;
-
- return eth_data->fman_hw_id;
-}
-
-int dpa_set_mac_address(struct net_device *net_dev, void *addr)
-{
- const struct dpa_priv_s *priv;
- int err;
- struct mac_device *mac_dev;
-
- priv = netdev_priv(net_dev);
-
- err = eth_mac_addr(net_dev, addr);
- if (err < 0) {
- netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
- return err;
- }
-
- mac_dev = priv->mac_dev;
-
- err = mac_dev->change_addr(mac_dev->fman_mac,
- (enet_addr_t *)net_dev->dev_addr);
- if (err < 0) {
- netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
- err);
- return err;
- }
-
- return 0;
-}
-
-void dpa_set_rx_mode(struct net_device *net_dev)
-{
- int err;
- const struct dpa_priv_s *priv;
-
- priv = netdev_priv(net_dev);
-
- if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
- priv->mac_dev->promisc = !priv->mac_dev->promisc;
- err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
- priv->mac_dev->promisc);
- if (err < 0)
- netif_err(priv, drv, net_dev,
- "mac_dev->set_promisc() = %d\n",
- err);
- }
-
- err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
- if (err < 0)
- netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
- err);
-}
-#endif /* __rtems__ */
-
-void dpa_set_buffers_layout(struct mac_device *mac_dev,
- struct dpa_buffer_layout_s *layout)
-{
- /* Rx */
- layout[RX].priv_data_size = (u16)DPA_RX_PRIV_DATA_SIZE;
- layout[RX].parse_results = true;
- layout[RX].hash_results = true;
- layout[RX].data_align = DPA_FD_DATA_ALIGNMENT;
-
- /* Tx */
- layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
- layout[TX].parse_results = true;
- layout[TX].hash_results = true;
- layout[TX].data_align = DPA_FD_DATA_ALIGNMENT;
-}
-
-int dpa_bp_alloc(struct dpa_bp *dpa_bp)
-{
- int err;
- struct bman_pool_params bp_params;
-#ifndef __rtems__
- struct platform_device *pdev;
-#endif /* __rtems__ */
-
- if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
- pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
- return -EINVAL;
- }
-
- memset(&bp_params, 0, sizeof(struct bman_pool_params));
-
- /* If the pool is already specified, we only create one per bpid */
- if (dpa_bpid2pool_use(dpa_bp->bpid))
- return 0;
-
- if (dpa_bp->bpid == 0)
- bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
- else
- bp_params.bpid = dpa_bp->bpid;
-
- dpa_bp->pool = bman_new_pool(&bp_params);
- if (!dpa_bp->pool) {
- pr_err("bman_new_pool() failed\n");
- return -ENODEV;
- }
-
- dpa_bp->bpid = (u8)bman_get_params(dpa_bp->pool)->bpid;
-
-#ifndef __rtems__
- pdev = platform_device_register_simple("DPAA_bpool",
- dpa_bp->bpid, NULL, 0);
- if (IS_ERR(pdev)) {
- err = PTR_ERR(pdev);
- goto pdev_register_failed;
- }
-
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
- if (err)
- goto pdev_mask_failed;
-
- dpa_bp->dev = &pdev->dev;
-#endif /* __rtems__ */
-
- if (dpa_bp->seed_cb) {
- err = dpa_bp->seed_cb(dpa_bp);
- if (err)
- goto pool_seed_failed;
- }
-
- dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
-
- return 0;
-
-pool_seed_failed:
-#ifndef __rtems__
-pdev_mask_failed:
- platform_device_unregister(pdev);
-pdev_register_failed:
-#endif /* __rtems__ */
- bman_free_pool(dpa_bp->pool);
-
- return err;
-}
-
-void dpa_bp_drain(struct dpa_bp *bp)
-{
- int ret;
- u8 num = 8;
-
- do {
- struct bm_buffer bmb[8];
- int i;
-
- ret = bman_acquire(bp->pool, bmb, num, 0);
- if (ret < 0) {
- if (num == 8) {
- /* we have less than 8 buffers left;
- * drain them one by one
- */
- num = 1;
- ret = 1;
- continue;
- } else {
- /* Pool is fully drained */
- break;
- }
- }
-
- for (i = 0; i < num; i++) {
- dma_addr_t addr = bm_buf_addr(&bmb[i]);
-
-#ifndef __rtems__
- dma_unmap_single(bp->dev, addr, bp->size,
- DMA_BIDIRECTIONAL);
-#endif /* __rtems__ */
-
- bp->free_buf_cb(phys_to_virt(addr));
- }
- } while (ret > 0);
-}
-
-static void _dpa_bp_free(struct dpa_bp *dpa_bp)
-{
- struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
-
- /* the mapping between bpid and dpa_bp is done very late in the
- * allocation procedure; if something failed before the mapping, the bp
- * was not configured, therefore we don't need the below instructions
- */
- if (!bp)
- return;
-
- if (!atomic_dec_and_test(&bp->refs))
- return;
-
- if (bp->free_buf_cb)
- dpa_bp_drain(bp);
-
- dpa_bp_array[bp->bpid] = NULL;
- bman_free_pool(bp->pool);
-
-#ifndef __rtems__
- if (bp->dev)
- platform_device_unregister(to_platform_device(bp->dev));
-#endif /* __rtems__ */
-}
-
-void dpa_bp_free(struct dpa_priv_s *priv)
-{
- int i;
-
- for (i = 0; i < priv->bp_count; i++)
- _dpa_bp_free(&priv->dpa_bp[i]);
-}
-
-struct dpa_bp *dpa_bpid2pool(int bpid)
-{
- return dpa_bp_array[bpid];
-}
-
-void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
-{
- dpa_bp_array[bpid] = dpa_bp;
- atomic_set(&dpa_bp->refs, 1);
-}
-
-bool dpa_bpid2pool_use(int bpid)
-{
- if (dpa_bpid2pool(bpid)) {
- atomic_inc(&dpa_bp_array[bpid]->refs);
- return true;
- }
-
- return false;
-}
-
-#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
-{
- return dpa_get_queue_mapping(skb);
-}
-#endif
-
-struct dpa_fq *dpa_fq_alloc(struct device *dev,
- const struct fqid_cell *fqids,
- struct list_head *list,
- enum dpa_fq_type fq_type)
-{
- int i;
- struct dpa_fq *dpa_fq;
-
- dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids->count, GFP_KERNEL);
- if (!dpa_fq)
- return NULL;
-
- for (i = 0; i < fqids->count; i++) {
- dpa_fq[i].fq_type = fq_type;
- dpa_fq[i].fqid = fqids->start ? fqids->start + i : 0;
- list_add_tail(&dpa_fq[i].list, list);
- }
-
- for (i = 0; i < fqids->count; i++)
- _dpa_assign_wq(dpa_fq + i);
-
- return dpa_fq;
-}
-
-int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
- struct fm_port_fqs *port_fqs,
- bool alloc_tx_conf_fqs,
- enum port_type ptype)
-{
- const struct fqid_cell *fqids;
- struct dpa_fq *dpa_fq;
- int num_ranges;
- int i;
-
- if (ptype == TX && alloc_tx_conf_fqs) {
- if (!dpa_fq_alloc(dev, tx_confirm_fqids, list,
- FQ_TYPE_TX_CONF_MQ))
- goto fq_alloc_failed;
- }
-
- fqids = default_fqids[ptype];
- num_ranges = 3;
-
- for (i = 0; i < num_ranges; i++) {
- switch (i) {
- case 0:
- /* The first queue is the error queue */
- if (fqids[i].count != 1)
- goto invalid_error_queue;
-
- dpa_fq = dpa_fq_alloc(dev, &fqids[i], list,
- ptype == RX ?
- FQ_TYPE_RX_ERROR :
- FQ_TYPE_TX_ERROR);
- if (!dpa_fq)
- goto fq_alloc_failed;
-
- if (ptype == RX)
- port_fqs->rx_errq = &dpa_fq[0];
- else
- port_fqs->tx_errq = &dpa_fq[0];
- break;
- case 1:
- /* the second queue is the default queue */
- if (fqids[i].count != 1)
- goto invalid_default_queue;
-
- dpa_fq = dpa_fq_alloc(dev, &fqids[i], list,
- ptype == RX ?
- FQ_TYPE_RX_DEFAULT :
- FQ_TYPE_TX_CONFIRM);
- if (!dpa_fq)
- goto fq_alloc_failed;
-
- if (ptype == RX)
- port_fqs->rx_defq = &dpa_fq[0];
- else
- port_fqs->tx_defq = &dpa_fq[0];
- break;
- default:
- /* all subsequent queues are Tx */
- if (!dpa_fq_alloc(dev, &fqids[i], list, FQ_TYPE_TX))
- goto fq_alloc_failed;
- break;
- }
- }
-
- return 0;
-
-fq_alloc_failed:
- dev_err(dev, "dpa_fq_alloc() failed\n");
- return -ENOMEM;
-
-invalid_default_queue:
-invalid_error_queue:
- dev_err(dev, "Too many default or error queues\n");
- return -EINVAL;
-}
-
-static u32 rx_pool_channel;
-static DEFINE_SPINLOCK(rx_pool_channel_init);
-
-int dpa_get_channel(void)
-{
- spin_lock(&rx_pool_channel_init);
- if (!rx_pool_channel) {
- u32 pool;
- int ret = qman_alloc_pool(&pool);
-
- if (!ret)
- rx_pool_channel = pool;
- }
- spin_unlock(&rx_pool_channel_init);
- if (!rx_pool_channel)
- return -ENOMEM;
- return rx_pool_channel;
-}
-
-void dpa_release_channel(void)
-{
- qman_release_pool(rx_pool_channel);
-}
-
-int dpaa_eth_add_channel(void *__arg)
-{
-#ifndef __rtems__
- const cpumask_t *cpus = qman_affine_cpus();
-#endif /* __rtems__ */
- u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg);
- int cpu;
- struct qman_portal *portal;
-
-#ifndef __rtems__
- for_each_cpu(cpu, cpus) {
-#else /* __rtems__ */
- for (cpu = 0; cpu < (int)rtems_get_processor_count(); ++cpu) {
-#endif /* __rtems__ */
-
- portal = (struct qman_portal *)qman_get_affine_portal(cpu);
- qman_p_static_dequeue_add(portal, pool);
- }
- return 0;
-}
-
-/* Congestion group state change notification callback.
- * Stops the device's egress queues while they are congested and
- * wakes them upon exiting congested state.
- * Also updates some CGR-related stats.
- */
-static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
- int congested)
-{
- struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
- struct dpa_priv_s, cgr_data.cgr);
-
- if (congested) {
- priv->cgr_data.congestion_start_jiffies = jiffies;
-#ifndef __rtems__
- netif_tx_stop_all_queues(priv->net_dev);
-#else /* __rtems__ */
- BSD_ASSERT(0);
-#endif /* __rtems__ */
- priv->cgr_data.cgr_congested_count++;
- } else {
- priv->cgr_data.congested_jiffies +=
- (jiffies - priv->cgr_data.congestion_start_jiffies);
-#ifndef __rtems__
- netif_tx_wake_all_queues(priv->net_dev);
-#else /* __rtems__ */
- BSD_ASSERT(0);
-#endif /* __rtems__ */
- }
-}
-
-int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
-{
- struct qm_mcc_initcgr initcgr;
- u32 cs_th;
- int err;
-
- err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
- if (err < 0) {
- pr_err("Error %d allocating CGR ID\n", err);
- goto out_error;
- }
- priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
-
- /* Enable Congestion State Change Notifications and CS taildrop */
- initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
- initcgr.cgr.cscn_en = QM_CGR_EN;
-
- /* Set different thresholds based on the MAC speed.
- * This may turn suboptimal if the MAC is reconfigured at a speed
- * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
- * In such cases, we ought to reconfigure the threshold, too.
- */
-#ifndef __rtems__
- if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
- cs_th = DPAA_CS_THRESHOLD_10G;
- else
- cs_th = DPAA_CS_THRESHOLD_1G;
-#else /* __rtems__ */
- /* FIXME */
- cs_th = DPAA_CS_THRESHOLD_1G;
-#endif /* __rtems__ */
- qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
-
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
- initcgr.cgr.cstd_en = QM_CGR_EN;
-
- err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
- &initcgr);
- if (err < 0) {
- pr_err("Error %d creating CGR with ID %d\n", err,
- priv->cgr_data.cgr.cgrid);
- qman_release_cgrid(priv->cgr_data.cgr.cgrid);
- goto out_error;
- }
- pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
- priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
- priv->cgr_data.cgr.chan);
-
-out_error:
- return err;
-}
-
-static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
- struct dpa_fq *fq,
- const struct qman_fq *template)
-{
- fq->fq_base = *template;
- fq->net_dev = priv->net_dev;
-
- fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
- fq->channel = priv->channel;
-}
-
-static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
- struct dpa_fq *fq,
- struct fman_port *port,
- const struct qman_fq *template)
-{
- fq->fq_base = *template;
- fq->net_dev = priv->net_dev;
-
- if (port) {
- fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
- fq->channel = (u16)fman_port_get_qman_channel_id(port);
- } else {
- fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
- }
-}
-
-void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
- struct fman_port *tx_port)
-{
- struct dpa_fq *fq;
-#ifndef __rtems__
- u16 portals[NR_CPUS];
- int cpu, num_portals = 0;
- const cpumask_t *affine_cpus = qman_affine_cpus();
-#endif /* __rtems__ */
- int egress_cnt = 0, conf_cnt = 0;
-
-#ifndef __rtems__
- for_each_cpu(cpu, affine_cpus)
- portals[num_portals++] = qman_affine_channel(cpu);
- if (num_portals == 0)
- dev_err(priv->net_dev->dev.parent,
- "No Qman software (affine) channels found");
-#else /* __rtems__ */
- /* FIXME */
-#endif /* __rtems__ */
-
- /* Initialize each FQ in the list */
- list_for_each_entry(fq, &priv->dpa_fq_list, list) {
- switch (fq->fq_type) {
- case FQ_TYPE_RX_DEFAULT:
- DPA_ERR_ON(!priv->mac_dev);
- dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
- break;
- case FQ_TYPE_RX_ERROR:
- DPA_ERR_ON(!priv->mac_dev);
- dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
- break;
- case FQ_TYPE_TX:
- dpa_setup_egress(priv, fq, tx_port,
- &fq_cbs->egress_ern);
- /* If we have more Tx queues than the number of cores,
- * just ignore the extra ones.
- */
- if (egress_cnt < DPAA_ETH_TX_QUEUES)
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- break;
- case FQ_TYPE_TX_CONFIRM:
- DPA_ERR_ON(!priv->mac_dev);
- dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
- break;
- case FQ_TYPE_TX_CONF_MQ:
- DPA_ERR_ON(!priv->mac_dev);
- dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
- priv->conf_fqs[conf_cnt++] = &fq->fq_base;
- break;
- case FQ_TYPE_TX_ERROR:
- DPA_ERR_ON(!priv->mac_dev);
- dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
- break;
- default:
-#ifndef __rtems__
- dev_warn(priv->net_dev->dev.parent,
- "Unknown FQ type detected!\n");
-#else /* __rtems__ */
- BSD_ASSERT(0);
-#endif /* __rtems__ */
- break;
- }
- }
-
- /* The number of Tx queues may be smaller than the number of cores, if
- * the Tx queue range is specified in the device tree instead of being
- * dynamically allocated.
- * Make sure all CPUs receive a corresponding Tx queue.
- */
- while (egress_cnt < DPAA_ETH_TX_QUEUES) {
- list_for_each_entry(fq, &priv->dpa_fq_list, list) {
- if (fq->fq_type != FQ_TYPE_TX)
- continue;
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- if (egress_cnt == DPAA_ETH_TX_QUEUES)
- break;
- }
- }
-}
-
-int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
-{
- int err;
- const struct dpa_priv_s *priv;
-#ifndef __rtems__
- struct device *dev;
-#endif /* __rtems__ */
- struct qman_fq *fq;
- struct qm_mcc_initfq initfq;
- struct qman_fq *confq = NULL;
- int queue_id;
-
- priv = netdev_priv(dpa_fq->net_dev);
-#ifndef __rtems__
- dev = dpa_fq->net_dev->dev.parent;
-#endif /* __rtems__ */
-
- if (dpa_fq->fqid == 0)
- dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
-
- dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
-
- err = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
- if (err) {
-#ifndef __rtems__
- dev_err(dev, "qman_create_fq() failed\n");
-#else /* __rtems__ */
- BSD_ASSERT(0);
-#endif /* __rtems__ */
- return err;
- }
- fq = &dpa_fq->fq_base;
-
- if (dpa_fq->init) {
- memset(&initfq, 0, sizeof(initfq));
-
- initfq.we_mask = QM_INITFQ_WE_FQCTRL;
- /* Note: we may get to keep an empty FQ in cache */
- initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
-
- /* Try to reduce the number of portal interrupts for
- * Tx Confirmation FQs.
- */
- if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
- initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
-
- /* FQ placement */
- initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
-
- initfq.fqd.dest.channel = dpa_fq->channel;
- initfq.fqd.dest.wq = dpa_fq->wq;
-
- /* Put all egress queues in a congestion group of their own.
- * Sensu stricto, the Tx confirmation queues are Rx FQs,
- * rather than Tx - but they nonetheless account for the
- * memory footprint on behalf of egress traffic. We therefore
- * place them in the netdev's CGR, along with the Tx FQs.
- */
- if (dpa_fq->fq_type == FQ_TYPE_TX ||
- dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
- dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
- initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
- /* Set a fixed overhead accounting, in an attempt to
- * reduce the impact of fixed-size skb shells and the
- * driver's needed headroom on system memory. This is
- * especially the case when the egress traffic is
- * composed of small datagrams.
- * Unfortunately, QMan's OAL value is capped to an
- * insufficient value, but even that is better than
- * no overhead accounting at all.
- */
- initfq.we_mask |= QM_INITFQ_WE_OAC;
- initfq.fqd.oac_init.oac = QM_OAC_CG;
-#ifndef __rtems__
- initfq.fqd.oac_init.oal =
- (signed char)(min(sizeof(struct sk_buff) +
- priv->tx_headroom,
- (size_t)FSL_QMAN_MAX_OAL));
-#else /* __rtems__ */
- /* FIXME */
- initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL;
-#endif /* __rtems__ */
- }
-
- if (td_enable) {
- initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
- qm_fqd_taildrop_set(&initfq.fqd.td,
- DPA_FQ_TD, 1);
- initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
- }
-
- /* Configure the Tx confirmation queue, now that we know
- * which Tx queue it pairs with.
- */
- if (dpa_fq->fq_type == FQ_TYPE_TX) {
- queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
- if (queue_id >= 0)
- confq = priv->conf_fqs[queue_id];
- if (confq) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
- * A2V=1 (contextA A2 field is valid)
- * A0V=1 (contextA A0 field is valid)
- * B0V=1 (contextB field is valid)
- * ContextA A2: EBD=1 (deallocate buffers inside FMan)
- * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
- */
- initfq.fqd.context_a.hi = 0x1e000000;
- initfq.fqd.context_a.lo = 0x80000000;
- }
- }
-
- /* Put all *private* ingress queues in our "ingress CGR". */
- if (priv->use_ingress_cgr &&
- (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
- dpa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
- initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
- /* Set a fixed overhead accounting, just like for the
- * egress CGR.
- */
- initfq.we_mask |= QM_INITFQ_WE_OAC;
- initfq.fqd.oac_init.oac = QM_OAC_CG;
-#ifndef __rtems__
- initfq.fqd.oac_init.oal =
- (signed char)(min(sizeof(struct sk_buff) +
- priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
-#else /* __rtems__ */
- /* FIXME */
- initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL;
-#endif /* __rtems__ */
- }
-
- /* Initialization common to all ingress queues */
- if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- initfq.fqd.fq_ctrl |=
- QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
- initfq.fqd.context_a.stashing.exclusive =
- QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
- QM_STASHING_EXCL_ANNOTATION;
- initfq.fqd.context_a.stashing.data_cl = 2;
- initfq.fqd.context_a.stashing.annotation_cl = 1;
- initfq.fqd.context_a.stashing.context_cl =
- DIV_ROUND_UP(sizeof(struct qman_fq), 64);
- }
-
- err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
- if (err < 0) {
-#ifndef __rtems__
- dev_err(dev, "qman_init_fq(%u) = %d\n",
- qman_fq_fqid(fq), err);
-#endif /* __rtems__ */
- qman_destroy_fq(fq, 0);
- return err;
- }
- }
-
- dpa_fq->fqid = qman_fq_fqid(fq);
-
- return 0;
-}
-
-#ifndef __rtems__
-static int _dpa_fq_free(struct device *dev, struct qman_fq *fq)
-{
- int err, error;
- struct dpa_fq *dpa_fq;
- const struct dpa_priv_s *priv;
-
- err = 0;
-
- dpa_fq = container_of(fq, struct dpa_fq, fq_base);
- priv = netdev_priv(dpa_fq->net_dev);
-
- if (dpa_fq->init) {
- err = qman_retire_fq(fq, NULL);
- if (err < 0 && netif_msg_drv(priv))
- dev_err(dev, "qman_retire_fq(%u) = %d\n",
- qman_fq_fqid(fq), err);
-
- error = qman_oos_fq(fq);
- if (error < 0 && netif_msg_drv(priv)) {
- dev_err(dev, "qman_oos_fq(%u) = %d\n",
- qman_fq_fqid(fq), error);
- if (err >= 0)
- err = error;
- }
- }
-
- qman_destroy_fq(fq, 0);
- list_del(&dpa_fq->list);
-
- return err;
-}
-
-int dpa_fq_free(struct device *dev, struct list_head *list)
-{
- int err, error;
- struct dpa_fq *dpa_fq, *tmp;
-
- err = 0;
- list_for_each_entry_safe(dpa_fq, tmp, list, list) {
- error = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
- if (error < 0 && err >= 0)
- err = error;
- }
-
- return err;
-}
-#endif /* __rtems__ */
-
-static void
-dpaa_eth_init_tx_port(struct fman_port *port, struct dpa_fq *errq,
- struct dpa_fq *defq,
- struct dpa_buffer_layout_s *buf_layout)
-{
- struct fman_port_params params;
- struct fman_buffer_prefix_content buf_prefix_content;
- int err;
-
- memset(&params, 0, sizeof(params));
- memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
-
- buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
- buf_prefix_content.pass_prs_result = buf_layout->parse_results;
- buf_prefix_content.pass_hash_result = buf_layout->hash_results;
- buf_prefix_content.pass_time_stamp = buf_layout->time_stamp;
- buf_prefix_content.data_align = buf_layout->data_align;
-
- params.specific_params.non_rx_params.err_fqid = errq->fqid;
- params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
-
- err = fman_port_config(port, &params);
- if (err)
- pr_info("fman_port_config failed\n");
-
- err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
- if (err)
- pr_info("fman_port_cfg_buf_prefix_content failed\n");
-
- err = fman_port_init(port);
- if (err)
- pr_err("fm_port_init failed\n");
-}
-
-static void
-dpaa_eth_init_rx_port(struct fman_port *port, struct dpa_bp *bp,
- size_t count, struct dpa_fq *errq, struct dpa_fq *defq,
- struct dpa_buffer_layout_s *buf_layout)
-{
- struct fman_port_params params;
- struct fman_buffer_prefix_content buf_prefix_content;
- struct fman_port_rx_params *rx_p;
- int i, err;
-
- memset(&params, 0, sizeof(params));
- memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
-
- buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
- buf_prefix_content.pass_prs_result = buf_layout->parse_results;
- buf_prefix_content.pass_hash_result = buf_layout->hash_results;
- buf_prefix_content.pass_time_stamp = buf_layout->time_stamp;
- buf_prefix_content.data_align = buf_layout->data_align;
-
- rx_p = &params.specific_params.rx_params;
- rx_p->err_fqid = errq->fqid;
- rx_p->dflt_fqid = defq->fqid;
-
- count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
- rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
- for (i = 0; i < count; i++) {
- rx_p->ext_buf_pools.ext_buf_pool[i].id = bp[i].bpid;
- rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bp[i].size;
- }
-
- err = fman_port_config(port, &params);
- if (err)
- pr_info("fman_port_config failed\n");
-
- err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
- if (err)
- pr_info("fman_port_cfg_buf_prefix_content failed\n");
-
- err = fman_port_init(port);
- if (err)
- pr_err("fm_port_init failed\n");
-}
-
-void dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpa_bp *bp, size_t count,
- struct fm_port_fqs *port_fqs,
- struct dpa_buffer_layout_s *buf_layout,
- struct device *dev)
-{
- struct fman_port *rxport = mac_dev->port[RX];
- struct fman_port *txport = mac_dev->port[TX];
-
- dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
- port_fqs->tx_defq, &buf_layout[TX]);
- dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
- port_fqs->rx_defq, &buf_layout[RX]);
-}
-
-void dpa_release_sgt(struct qm_sg_entry *sgt)
-{
- struct dpa_bp *dpa_bp;
- struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
- u8 i = 0, j;
-
- memset(bmb, 0, sizeof(bmb));
-
- do {
- dpa_bp = dpa_bpid2pool(sgt[i].bpid);
- DPA_ERR_ON(!dpa_bp);
-
- j = 0;
- do {
- DPA_ERR_ON(sgt[i].extension);
-
- bmb[j].hi = sgt[i].addr_hi;
- bmb[j].lo = be32_to_cpu(sgt[i].addr_lo);
-
- j++; i++;
- } while (j < ARRAY_SIZE(bmb) &&
- !sgt[i - 1].final &&
- sgt[i - 1].bpid == sgt[i].bpid);
-
- while (bman_release(dpa_bp->pool, bmb, j, 0))
- cpu_relax();
- } while (!sgt[i - 1].final);
-}
-
-void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
-{
- struct qm_sg_entry *sgt;
- struct dpa_bp *dpa_bp;
- struct bm_buffer bmb;
- dma_addr_t addr;
- void *vaddr;
-
- memset(&bmb, 0, sizeof(bmb));
- bm_buffer_set64(&bmb, fd->addr);
-
- dpa_bp = dpa_bpid2pool(fd->bpid);
- DPA_ERR_ON(!dpa_bp);
-
- if (fd->format == qm_fd_sg) {
- vaddr = phys_to_virt(fd->addr);
- sgt = vaddr + dpa_fd_offset(fd);
-
-#ifndef __rtems__
- dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
- DMA_BIDIRECTIONAL);
-#endif /* __rtems__ */
-
- dpa_release_sgt(sgt);
-
-#ifndef __rtems__
- addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dpa_bp->dev, addr)) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- return;
- }
-#else /* __rtems__ */
- addr = (dma_addr_t)vaddr;
-#endif /* __rtems__ */
- bm_buffer_set64(&bmb, addr);
- }
-
- while (bman_release(dpa_bp->pool, &bmb, 1, 0))
- cpu_relax();
-}
-
-void count_ern(struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_mr_entry *msg)
-{
- switch (msg->ern.rc & QM_MR_RC_MASK) {
- case QM_MR_RC_CGR_TAILDROP:
- percpu_priv->ern_cnt.cg_tdrop++;
- break;
- case QM_MR_RC_WRED:
- percpu_priv->ern_cnt.wred++;
- break;
- case QM_MR_RC_ERROR:
- percpu_priv->ern_cnt.err_cond++;
- break;
- case QM_MR_RC_ORPWINDOW_EARLY:
- percpu_priv->ern_cnt.early_window++;
- break;
- case QM_MR_RC_ORPWINDOW_LATE:
- percpu_priv->ern_cnt.late_window++;
- break;
- case QM_MR_RC_FQ_TAILDROP:
- percpu_priv->ern_cnt.fq_tdrop++;
- break;
- case QM_MR_RC_ORPWINDOW_RETIRED:
- percpu_priv->ern_cnt.fq_retired++;
- break;
- case QM_MR_RC_ORP_ZERO:
- percpu_priv->ern_cnt.orp_zero++;
- break;
- }
-}
-
-#ifndef __rtems__
-/* Turn on HW checksum computation for this outgoing frame.
- * If the current protocol is not something we support in this regard
- * (or if the stack has already computed the SW checksum), we do nothing.
- *
- * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
- * otherwise.
- *
- * Note that this function may modify the fd->cmd field and the skb data buffer
- * (the Parse Results area).
- */
-int dpa_enable_tx_csum(struct dpa_priv_s *priv,
- struct sk_buff *skb,
- struct qm_fd *fd,
- char *parse_results)
-{
- struct fman_prs_result *parse_result;
- struct iphdr *iph;
- struct ipv6hdr *ipv6h = NULL;
- u8 l4_proto;
- u16 ethertype = ntohs(skb->protocol);
- int retval = 0;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
-
- /* Note: L3 csum seems to be already computed in sw, but we can't choose
- * L4 alone from the FM configuration anyway.
- */
-
- /* Fill in some fields of the Parse Results array, so the FMan
- * can find them as if they came from the FMan Parser.
- */
- parse_result = (struct fman_prs_result *)parse_results;
-
- /* If we're dealing with VLAN, get the real Ethernet type */
- if (ethertype == ETH_P_8021Q) {
- /* We can't always assume the MAC header is set correctly
- * by the stack, so reset to beginning of skb->data
- */
- skb_reset_mac_header(skb);
- ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
- }
-
- /* Fill in the relevant L3 parse result fields
- * and read the L4 protocol type
- */
- switch (ethertype) {
- case ETH_P_IP:
- parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
- iph = ip_hdr(skb);
- DPA_ERR_ON(!iph);
- l4_proto = iph->protocol;
- break;
- case ETH_P_IPV6:
- parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
- ipv6h = ipv6_hdr(skb);
- DPA_ERR_ON(!ipv6h);
- l4_proto = ipv6h->nexthdr;
- break;
- default:
- /* We shouldn't even be here */
- if (net_ratelimit())
- netif_alert(priv, tx_err, priv->net_dev,
- "Can't compute HW csum for L3 proto 0x%x\n",
- ntohs(skb->protocol));
- retval = -EIO;
- goto return_error;
- }
-
- /* Fill in the relevant L4 parse result fields */
- switch (l4_proto) {
- case IPPROTO_UDP:
- parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
- break;
- case IPPROTO_TCP:
- parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
- break;
- default:
- /* This can as well be a BUG() */
- if (net_ratelimit())
- netif_alert(priv, tx_err, priv->net_dev,
- "Can't compute HW csum for L4 proto 0x%x\n",
- l4_proto);
- retval = -EIO;
- goto return_error;
- }
-
- /* At index 0 is IPOffset_1 as defined in the Parse Results */
- parse_result->ip_off[0] = (u8)skb_network_offset(skb);
- parse_result->l4_off = (u8)skb_transport_offset(skb);
-
- /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
- fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
-
- /* On P1023 and similar platforms fd->cmd interpretation could
- * be disabled by setting CONTEXT_A bit ICMD; currently this bit
- * is not set so we do not need to check; in the future, if/when
- * using context_a we need to check this bit
- */
-
-return_error:
- return retval;
-}
-#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h
deleted file mode 100644
index 954de393..00000000
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __DPAA_ETH_COMMON_H
-#define __DPAA_ETH_COMMON_H
-
-#include <linux/etherdevice.h>
-#include <soc/fsl/bman.h>
-#include <linux/of_platform.h>
-
-#include "dpaa_eth.h"
-
-#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
-#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
-
-/* used in napi related functions */
-extern u16 qman_portal_max;
-
-/* from dpa_ethtool.c */
-extern const struct ethtool_ops dpa_ethtool_ops;
-
-int dpa_netdev_init(struct net_device *net_dev,
- const u8 *mac_addr,
- u16 tx_timeout);
-int dpa_start(struct net_device *net_dev);
-int dpa_stop(struct net_device *net_dev);
-void dpa_timeout(struct net_device *net_dev);
-struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev,
- struct rtnl_link_stats64 *stats);
-int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
-int dpa_ndo_init(struct net_device *net_dev);
-#ifndef __rtems__
-int dpa_set_features(struct net_device *dev, netdev_features_t features);
-netdev_features_t dpa_fix_features(struct net_device *dev,
- netdev_features_t features);
-#endif /* __rtems__ */
-int dpa_remove(struct platform_device *pdev);
-struct mac_device *dpa_mac_dev_get(struct platform_device *pdev);
-int dpa_mac_hw_index_get(struct platform_device *pdev);
-int dpa_mac_fman_index_get(struct platform_device *pdev);
-int dpa_set_mac_address(struct net_device *net_dev, void *addr);
-void dpa_set_rx_mode(struct net_device *net_dev);
-void dpa_set_buffers_layout(struct mac_device *mac_dev,
- struct dpa_buffer_layout_s *layout);
-int dpa_bp_alloc(struct dpa_bp *dpa_bp);
-void dpa_bp_free(struct dpa_priv_s *priv);
-struct dpa_bp *dpa_bpid2pool(int bpid);
-void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
-bool dpa_bpid2pool_use(int bpid);
-void dpa_bp_drain(struct dpa_bp *bp);
-#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
-#endif
-struct dpa_fq *dpa_fq_alloc(struct device *dev,
- const struct fqid_cell *fqids,
- struct list_head *list,
- enum dpa_fq_type fq_type);
-int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
- struct fm_port_fqs *port_fqs,
- bool tx_conf_fqs_per_core,
- enum port_type ptype);
-int dpa_get_channel(void);
-void dpa_release_channel(void);
-int dpaa_eth_add_channel(void *__arg);
-int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
-void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
- struct fman_port *tx_port);
-int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
-int dpa_fq_free(struct device *dev, struct list_head *list);
-void dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpa_bp *bp, size_t count,
- struct fm_port_fqs *port_fqs,
- struct dpa_buffer_layout_s *buf_layout,
- struct device *dev);
-void dpa_release_sgt(struct qm_sg_entry *sgt);
-void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
-void count_ern(struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_mr_entry *msg);
-#ifndef __rtems__
-int dpa_enable_tx_csum(struct dpa_priv_s *priv,
- struct sk_buff *skb,
- struct qm_fd *fd,
- char *parse_results);
-#endif /* __rtems__ */
-#endif /* __DPAA_ETH_COMMON_H */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c
deleted file mode 100644
index 2d0903e3..00000000
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c
+++ /dev/null
@@ -1,710 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2012 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/highmem.h>
-#include <soc/fsl/bman.h>
-
-#include "dpaa_eth.h"
-#include "dpaa_eth_common.h"
-
-/* Convenience macros for storing/retrieving the skb back-pointers.
- *
- * NB: @off is an offset from a (struct sk_buff **) pointer!
- */
-#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
- { \
- skbh = (struct sk_buff **)addr; \
- *(skbh + (off)) = skb; \
- }
-#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
- { \
- skbh = (struct sk_buff **)addr; \
- skb = *(skbh + (off)); \
- }
-
-/* DMA map and add a page frag back into the bpool.
- * @vaddr fragment must have been allocated with netdev_alloc_frag(),
- * specifically for fitting into @dpa_bp.
- */
-static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
- int *count_ptr)
-{
- struct bm_buffer bmb;
- dma_addr_t addr;
-
- addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- return;
- }
-
- bm_buffer_set64(&bmb, addr);
-
- while (bman_release(dpa_bp->pool, &bmb, 1, 0))
- cpu_relax();
-
- (*count_ptr)++;
-}
-
-static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
-{
- struct bm_buffer bmb[8];
- void *new_buf;
- dma_addr_t addr;
- u8 i;
- struct device *dev = dpa_bp->dev;
- struct sk_buff *skb, **skbh;
-
- memset(bmb, 0, sizeof(bmb));
-
- for (i = 0; i < 8; i++) {
- /* We'll prepend the skb back-pointer; can't use the DPA
- * priv space, because FMan will overwrite it (from offset 0)
- * if it ends up being the second, third, etc. fragment
- * in a S/G frame.
- *
- * We only need enough space to store a pointer, but allocate
- * an entire cacheline for performance reasons.
- */
- new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
- if (unlikely(!new_buf))
- goto netdev_alloc_failed;
- new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
-
- skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- if (unlikely(!skb)) {
- put_page(virt_to_head_page(new_buf));
- goto build_skb_failed;
- }
- DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
-
- addr = dma_map_single(dev, new_buf,
- dpa_bp->size, DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, addr)))
- goto dma_map_failed;
-
- bm_buffer_set64(&bmb[i], addr);
- }
-
-release_bufs:
- /* Release the buffers. In case bman is busy, keep trying
- * until successful. bman_release() is guaranteed to succeed
- * in a reasonable amount of time
- */
- while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
- cpu_relax();
- return i;
-
-dma_map_failed:
- kfree_skb(skb);
-
-build_skb_failed:
-netdev_alloc_failed:
- net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
- WARN_ONCE(1, "Memory allocation failure on Rx\n");
-
- bm_buffer_set64(&bmb[i], 0);
- /* Avoid releasing a completely null buffer; bman_release() requires
- * at least one buffer.
- */
- if (likely(i))
- goto release_bufs;
-
- return 0;
-}
-
-/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
-static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
-{
- int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
- *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
-}
-
-int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
-{
- int i;
-
- /* Give each CPU an allotment of "config_count" buffers */
- for_each_possible_cpu(i) {
- int j;
-
- /* Although we access another CPU's counters here
- * we do it at boot time so it is safe
- */
- for (j = 0; j < dpa_bp->config_count; j += 8)
- dpa_bp_add_8_bufs(dpa_bp, i);
- }
- return 0;
-}
-
-/* Add buffers/(pages) for Rx processing whenever bpool count falls below
- * REFILL_THRESHOLD.
- */
-int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
-{
- int count = *countptr;
- int new_bufs;
-
- if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
- do {
- new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
- if (unlikely(!new_bufs)) {
- /* Avoid looping forever if we've temporarily
- * run out of memory. We'll try again at the
- * next NAPI cycle.
- */
- break;
- }
- count += new_bufs;
- } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
-
- *countptr = count;
- if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/* Cleanup function for outgoing frame descriptors that were built on Tx path,
- * either contiguous frames or scatter/gather ones.
- * Skb freeing is not handled here.
- *
- * This function may be called on error paths in the Tx function, so guard
- * against cases when not all fd relevant fields were filled in.
- *
- * Return the skb backpointer, since for S/G frames the buffer containing it
- * gets freed here.
- */
-struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
- const struct qm_fd *fd)
-{
- const struct qm_sg_entry *sgt;
- int i;
- struct dpa_bp *dpa_bp = priv->dpa_bp;
- dma_addr_t addr = qm_fd_addr(fd);
- struct sk_buff **skbh;
- struct sk_buff *skb = NULL;
- const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
- int nr_frags;
-
-
- /* retrieve skb back pointer */
- DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
-
- if (unlikely(fd->format == qm_fd_sg)) {
- nr_frags = skb_shinfo(skb)->nr_frags;
- dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
- sizeof(struct qm_sg_entry) * (1 + nr_frags),
- dma_dir);
-
- /* The sgt buffer has been allocated with netdev_alloc_frag(),
- * it's from lowmem.
- */
- sgt = phys_to_virt(addr + dpa_fd_offset(fd));
-
- /* sgt[0] is from lowmem, was dma_map_single()-ed */
- dma_unmap_single(dpa_bp->dev, (dma_addr_t)sgt[0].addr,
- sgt[0].length, dma_dir);
-
- /* remaining pages were mapped with dma_map_page() */
- for (i = 1; i < nr_frags; i++) {
- DPA_ERR_ON(sgt[i].extension);
-
- dma_unmap_page(dpa_bp->dev, (dma_addr_t)sgt[i].addr,
- sgt[i].length, dma_dir);
- }
-
- /* Free the page frag that we allocated on Tx */
- put_page(virt_to_head_page(sgt));
- } else {
- dma_unmap_single(dpa_bp->dev, addr,
- skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
- }
-
- return skb;
-}
-
-/* Build a linear skb around the received buffer.
- * We are guaranteed there is enough room at the end of the data buffer to
- * accommodate the shared info area of the skb.
- */
-static struct sk_buff *contig_fd_to_skb(const struct dpa_priv_s *priv,
- const struct qm_fd *fd)
-{
- struct sk_buff *skb = NULL, **skbh;
- ssize_t fd_off = dpa_fd_offset(fd);
- dma_addr_t addr = qm_fd_addr(fd);
- void *vaddr;
-
- vaddr = phys_to_virt(addr);
- DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-
- /* Retrieve the skb and adjust data and tail pointers, to make sure
- * forwarded skbs will have enough space on Tx if extra headers
- * are added.
- */
- DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
-
- DPA_ERR_ON(fd_off != priv->rx_headroom);
- skb_reserve(skb, fd_off);
- skb_put(skb, dpa_fd_length(fd));
-
- skb->ip_summed = CHECKSUM_NONE;
-
- return skb;
-}
-
-/* Build an skb with the data of the first S/G entry in the linear portion and
- * the rest of the frame as skb fragments.
- *
- * The page fragment holding the S/G Table is recycled here.
- */
-static struct sk_buff *sg_fd_to_skb(const struct dpa_priv_s *priv,
- const struct qm_fd *fd,
- int *count_ptr)
-{
- const struct qm_sg_entry *sgt;
- dma_addr_t addr = qm_fd_addr(fd);
- ssize_t fd_off = dpa_fd_offset(fd);
- dma_addr_t sg_addr;
- void *vaddr, *sg_vaddr;
- struct dpa_bp *dpa_bp;
- struct page *page, *head_page;
- int frag_offset, frag_len;
- int page_offset;
- int i;
- struct sk_buff *skb = NULL, *skb_tmp, **skbh;
-
- vaddr = phys_to_virt(addr);
- DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-
- dpa_bp = priv->dpa_bp;
- /* Iterate through the SGT entries and add data buffers to the skb */
- sgt = vaddr + fd_off;
- for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
- /* Extension bit is not supported */
- DPA_ERR_ON(sgt[i].extension);
-
- /* We use a single global Rx pool */
- DPA_ERR_ON(dpa_bp != dpa_bpid2pool(sgt[i].bpid));
-
- sg_addr = qm_sg_addr(&sgt[i]);
- sg_vaddr = phys_to_virt(sg_addr);
- DPA_ERR_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
- SMP_CACHE_BYTES));
-
- dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
- DMA_BIDIRECTIONAL);
- if (i == 0) {
- DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
- DPA_ERR_ON(skb->head != sg_vaddr);
-
- skb->ip_summed = CHECKSUM_NONE;
-
- /* Make sure forwarded skbs will have enough space
- * on Tx, if extra headers are added.
- */
- DPA_ERR_ON(fd_off != priv->rx_headroom);
- skb_reserve(skb, fd_off);
- skb_put(skb, sgt[i].length);
- } else {
- /* Not the first S/G entry; all data from buffer will
- * be added in an skb fragment; fragment index is offset
- * by one since first S/G entry was incorporated in the
- * linear part of the skb.
- *
- * Caution: 'page' may be a tail page.
- */
- DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
- page = virt_to_page(sg_vaddr);
- head_page = virt_to_head_page(sg_vaddr);
-
- /* Free (only) the skbuff shell because its data buffer
- * is already a frag in the main skb.
- */
- get_page(head_page);
- dev_kfree_skb(skb_tmp);
-
- /* Compute offset in (possibly tail) page */
- page_offset = ((unsigned long)sg_vaddr &
- (PAGE_SIZE - 1)) +
- (page_address(page) - page_address(head_page));
- /* page_offset only refers to the beginning of sgt[i];
- * but the buffer itself may have an internal offset.
- */
- frag_offset = sgt[i].offset + page_offset;
- frag_len = sgt[i].length;
- /* skb_add_rx_frag() does no checking on the page; if
- * we pass it a tail page, we'll end up with
- * bad page accounting and eventually with segafults.
- */
- skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
- frag_len, dpa_bp->size);
- }
- /* Update the pool count for the current {cpu x bpool} */
- (*count_ptr)--;
-
- if (sgt[i].final)
- break;
- }
- WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
-
- /* recycle the SGT fragment */
- DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
- dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
- return skb;
-}
-
-void _dpa_rx(struct net_device *net_dev,
- struct qman_portal *portal,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_fd *fd,
- u32 fqid,
- int *count_ptr)
-{
- struct dpa_bp *dpa_bp;
- struct sk_buff *skb;
- dma_addr_t addr = qm_fd_addr(fd);
- u32 fd_status = fd->status;
- unsigned int skb_len;
- struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
-
- if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
- if (net_ratelimit())
- netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd_status & FM_FD_STAT_RX_ERRORS);
-
- percpu_stats->rx_errors++;
- goto _release_frame;
- }
-
- dpa_bp = priv->dpa_bp;
- DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
-
- /* prefetch the first 64 bytes of the frame or the SGT start */
- dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
- prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
-
- /* The only FD types that we may receive are contig and S/G */
- DPA_ERR_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
-
- if (likely(fd->format == qm_fd_contig))
- skb = contig_fd_to_skb(priv, fd);
- else
- skb = sg_fd_to_skb(priv, fd, count_ptr);
-
- /* Account for either the contig buffer or the SGT buffer (depending on
- * which case we were in) having been removed from the pool.
- */
- (*count_ptr)--;
- skb->protocol = eth_type_trans(skb, net_dev);
-
- /* IP Reassembled frames are allowed to be larger than MTU */
- if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
- !(fd_status & FM_FD_IPR))) {
- percpu_stats->rx_dropped++;
- goto drop_bad_frame;
- }
-
- skb_len = skb->len;
-
- if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
- goto packet_dropped;
-
- percpu_stats->rx_packets++;
- percpu_stats->rx_bytes += skb_len;
-
-packet_dropped:
- return;
-
-drop_bad_frame:
- dev_kfree_skb(skb);
- return;
-
-_release_frame:
- dpa_fd_release(net_dev, fd);
-}
-
-static int skb_to_contig_fd(struct dpa_priv_s *priv,
- struct sk_buff *skb, struct qm_fd *fd,
- int *count_ptr, int *offset)
-{
- struct sk_buff **skbh;
- dma_addr_t addr;
- struct dpa_bp *dpa_bp = priv->dpa_bp;
- struct net_device *net_dev = priv->net_dev;
- int err;
- enum dma_data_direction dma_dir;
- unsigned char *buffer_start;
-
- {
- /* We are guaranteed to have at least tx_headroom bytes
- * available, so just use that for offset.
- */
- fd->bpid = 0xff;
- buffer_start = skb->data - priv->tx_headroom;
- fd->offset = priv->tx_headroom;
- dma_dir = DMA_TO_DEVICE;
-
- DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
- }
-
- /* Enable L3/L4 hardware checksum computation.
- *
- * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
- * need to write into the skb.
- */
- err = dpa_enable_tx_csum(priv, skb, fd,
- ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
- if (unlikely(err < 0)) {
- if (net_ratelimit())
- netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
- err);
- return err;
- }
-
- /* Fill in the rest of the FD fields */
- fd->format = qm_fd_contig;
- fd->length20 = skb->len;
- fd->cmd |= FM_FD_CMD_FCO;
-
- /* Map the entire buffer size that may be seen by FMan, but no more */
- addr = dma_map_single(dpa_bp->dev, skbh,
- skb_tail_pointer(skb) - buffer_start, dma_dir);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- if (net_ratelimit())
- netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
- return -EINVAL;
- }
- fd->addr_hi = (u8)upper_32_bits(addr);
- fd->addr_lo = lower_32_bits(addr);
-
- return 0;
-}
-
-static int skb_to_sg_fd(struct dpa_priv_s *priv,
- struct sk_buff *skb, struct qm_fd *fd)
-{
- struct dpa_bp *dpa_bp = priv->dpa_bp;
- dma_addr_t addr;
- struct sk_buff **skbh;
- struct net_device *net_dev = priv->net_dev;
- int err;
-
- struct qm_sg_entry *sgt;
- void *sgt_buf;
- void *buffer_start;
- skb_frag_t *frag;
- int i, j;
- const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
- const int nr_frags = skb_shinfo(skb)->nr_frags;
-
- fd->format = qm_fd_sg;
-
- /* get a page frag to store the SGTable */
- sgt_buf = netdev_alloc_frag(priv->tx_headroom +
- sizeof(struct qm_sg_entry) * (1 + nr_frags));
- if (unlikely(!sgt_buf)) {
- netdev_err(net_dev, "netdev_alloc_frag() failed\n");
- return -ENOMEM;
- }
-
- /* Enable L3/L4 hardware checksum computation.
- *
- * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
- * need to write into the skb.
- */
- err = dpa_enable_tx_csum(priv, skb, fd,
- sgt_buf + DPA_TX_PRIV_DATA_SIZE);
- if (unlikely(err < 0)) {
- if (net_ratelimit())
- netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
- err);
- goto csum_failed;
- }
-
- sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
- sgt[0].bpid = 0xff;
- sgt[0].offset = 0;
- sgt[0].length = cpu_to_be32(skb_headlen(skb));
- sgt[0].extension = 0;
- sgt[0].final = 0;
- addr = dma_map_single(dpa_bp->dev, skb->data, sgt[0].length, dma_dir);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- err = -EINVAL;
- goto sg0_map_failed;
- }
- sgt[0].addr_hi = (u8)upper_32_bits(addr);
- sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
-
- /* populate the rest of SGT entries */
- for (i = 1; i <= nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i - 1];
- sgt[i].bpid = 0xff;
- sgt[i].offset = 0;
- sgt[i].length = cpu_to_be32(frag->size);
- sgt[i].extension = 0;
- sgt[i].final = 0;
-
- DPA_ERR_ON(!skb_frag_page(frag));
- addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
- dma_dir);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- err = -EINVAL;
- goto sg_map_failed;
- }
-
- /* keep the offset in the address */
- sgt[i].addr_hi = (u8)upper_32_bits(addr);
- sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
- }
- sgt[i - 1].final = 1;
-
- fd->length20 = skb->len;
- fd->offset = priv->tx_headroom;
-
- /* DMA map the SGT page */
- buffer_start = (void *)sgt - priv->tx_headroom;
- DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
-
- addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
- sizeof(struct qm_sg_entry) * (1 + nr_frags),
- dma_dir);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- err = -EINVAL;
- goto sgt_map_failed;
- }
-
- fd->bpid = 0xff;
- fd->cmd |= FM_FD_CMD_FCO;
- fd->addr_hi = (u8)upper_32_bits(addr);
- fd->addr_lo = lower_32_bits(addr);
-
- return 0;
-
-sgt_map_failed:
-sg_map_failed:
- for (j = 0; j < i; j++)
- dma_unmap_page(dpa_bp->dev, qm_sg_addr(&sgt[j]),
- cpu_to_be32(sgt[j].length), dma_dir);
-sg0_map_failed:
-csum_failed:
- put_page(virt_to_head_page(sgt_buf));
-
- return err;
-}
-
-int dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
-{
- struct dpa_priv_s *priv;
- struct qm_fd fd;
- struct dpa_percpu_priv_s *percpu_priv;
- struct rtnl_link_stats64 *percpu_stats;
- int err = 0;
- const int queue_mapping = dpa_get_queue_mapping(skb);
- bool nonlinear = skb_is_nonlinear(skb);
- int *countptr, offset = 0;
-
- priv = netdev_priv(net_dev);
- /* Non-migratable context, safe to use raw_cpu_ptr */
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- percpu_stats = &percpu_priv->stats;
- countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
-
- clear_fd(&fd);
-
- if (!nonlinear) {
- /* We're going to store the skb backpointer at the beginning
- * of the data buffer, so we need a privately owned skb
- *
- * We've made sure skb is not shared in dev->priv_flags,
- * we need to verify the skb head is not cloned
- */
- if (skb_cow_head(skb, priv->tx_headroom))
- goto enomem;
-
- BUG_ON(skb_is_nonlinear(skb));
- }
-
- /* MAX_SKB_FRAGS is equal or larger than our DPA_SGT_MAX_ENTRIES;
- * make sure we don't feed FMan with more fragments than it supports.
- * Btw, we're using the first sgt entry to store the linear part of
- * the skb, so we're one extra frag short.
- */
- if (nonlinear &&
- likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
- /* Just create a S/G fd based on the skb */
- err = skb_to_sg_fd(priv, skb, &fd);
- percpu_priv->tx_frag_skbuffs++;
- } else {
- /* If the egress skb contains more fragments than we support
- * we have no choice but to linearize it ourselves.
- */
- if (unlikely(nonlinear) && __skb_linearize(skb))
- goto enomem;
-
- /* Finally, create a contig FD from this skb */
- err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
- }
- if (unlikely(err < 0))
- goto skb_to_fd_failed;
-
- if (likely(dpa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
- return NETDEV_TX_OK;
-
- /* dpa_xmit failed */
- if (fd.bpid != 0xff) {
- (*countptr)--;
- dpa_fd_release(net_dev, &fd);
- percpu_stats->tx_errors++;
- return NETDEV_TX_OK;
- }
- _dpa_cleanup_tx_fd(priv, &fd);
-skb_to_fd_failed:
-enomem:
- percpu_stats->tx_errors++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
index 46eca272..ee6dfc9a 100644
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -80,9 +80,9 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
TP_fast_assign(
__entry->fqid = fq->fqid;
__entry->fd_addr = qm_fd_addr_get64(fd);
- __entry->fd_format = fd->format;
- __entry->fd_offset = dpa_fd_offset(fd);
- __entry->fd_length = dpa_fd_length(fd);
+ __entry->fd_format = qm_fd_get_format(fd);
+ __entry->fd_offset = qm_fd_get_offset(fd);
+ __entry->fd_length = qm_fd_get_length(fd);
__entry->fd_status = fd->status;
__assign_str(name, netdev->name);
),
@@ -99,7 +99,7 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
*/
/* Tx (egress) fd */
-DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
+DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_fd,
TP_PROTO(struct net_device *netdev,
struct qman_fq *fq,
@@ -109,7 +109,7 @@ DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
);
/* Rx fd */
-DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
+DEFINE_EVENT(dpaa_eth_fd, dpaa_rx_fd,
TP_PROTO(struct net_device *netdev,
struct qman_fq *fq,
@@ -119,7 +119,7 @@ DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
);
/* Tx confirmation fd */
-DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
+DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_conf_fd,
TP_PROTO(struct net_device *netdev,
struct qman_fq *fq,
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman.c b/linux/drivers/net/ethernet/freescale/fman/fman.c
index 5119b400..c0f26b31 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman.c
@@ -16,7 +16,7 @@
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
-// *
+ *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -38,8 +38,8 @@
#include "fman.h"
#include "fman_muram.h"
-#include <asm/mpc85xx.h>
+#include <linux/fsl/guts.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -48,12 +48,12 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
+#include <linux/libfdt_env.h>
#ifdef __rtems__
#include <bsp/fdt.h>
#include <bsp/qoriq.h>
#endif /* __rtems__ */
-
/* General defines */
#define FMAN_LIODN_TBL 64 /* size of LIODN table */
#define MAX_NUM_OF_MACS 10
@@ -88,31 +88,11 @@
#define EX_BMI_DISPATCH_RAM_ECC 0x00010000
#define EX_DMA_SINGLE_PORT_ECC 0x00008000
-#define DFLT_EXCEPTIONS \
- ((EX_DMA_BUS_ERROR) | \
- (EX_DMA_READ_ECC) | \
- (EX_DMA_SYSTEM_WRITE_ECC) | \
- (EX_DMA_FM_WRITE_ECC) | \
- (EX_FPM_STALL_ON_TASKS) | \
- (EX_FPM_SINGLE_ECC) | \
- (EX_FPM_DOUBLE_ECC) | \
- (EX_QMI_DEQ_FROM_UNKNOWN_PORTID) | \
- (EX_BMI_LIST_RAM_ECC) | \
- (EX_BMI_STORAGE_PROFILE_ECC) | \
- (EX_BMI_STATISTICS_RAM_ECC) | \
- (EX_MURAM_ECC) | \
- (EX_BMI_DISPATCH_RAM_ECC) | \
- (EX_QMI_DOUBLE_ECC) | \
- (EX_QMI_SINGLE_ECC))
-
/* DMA defines */
/* masks */
-#define DMA_MODE_AID_OR 0x20000000
-#define DMA_MODE_SBER 0x10000000
#define DMA_MODE_BER 0x00200000
#define DMA_MODE_ECC 0x00000020
#define DMA_MODE_SECURE_PROT 0x00000800
-#define DMA_MODE_EMER_READ 0x00080000
#define DMA_MODE_AXI_DBG_MASK 0x0F000000
#define DMA_TRANSFER_PORTID_MASK 0xFF000000
@@ -130,7 +110,6 @@
#define DMA_MODE_CEN_SHIFT 13
#define DMA_MODE_CEN_MASK 0x00000007
#define DMA_MODE_DBG_SHIFT 7
-#define DMA_MODE_EMER_LVL_SHIFT 6
#define DMA_MODE_AID_MODE_SHIFT 4
#define DMA_THRESH_COMMQ_SHIFT 24
@@ -160,8 +139,6 @@
#define FPM_RAM_MURAM_ECC 0x00008000
#define FPM_RAM_IRAM_ECC 0x00004000
-#define FPM_RAM_MURAM_TEST_ECC 0x20000000
-#define FPM_RAM_IRAM_TEST_ECC 0x10000000
#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000
#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000
#define FPM_RAM_IRAM_ECC_EN 0x40000000
@@ -247,8 +224,6 @@
#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
#define QMI_INTR_EN_SINGLE_ECC 0x80000000
-#define QMI_TAPC_TAP 22
-
#define QMI_GS_HALT_NOT_BUSY 0x00000002
/* IRAM defines */
@@ -266,7 +241,6 @@
#define DEFAULT_DMA_DBG_CNT_MODE 0
#define DEFAULT_DMA_SOS_EMERGENCY 0
#define DEFAULT_DMA_WATCHDOG 0
-#define DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER 0
#define DEFAULT_DISP_LIMIT 0
#define DEFAULT_PRS_DISP_TH 16
#define DEFAULT_PLCR_DISP_TH 16
@@ -509,13 +483,9 @@ struct fman_dma_regs {
u32 res00e0[0x400 - 56];
};
-struct fman_rg {
- struct fman_fpm_regs __iomem *fpm_rg;
- struct fman_dma_regs __iomem *dma_rg;
- struct fman_bmi_regs __iomem *bmi_rg;
- struct fman_qmi_regs __iomem *qmi_rg;
-};
-
+/* Structure that holds current FMan state.
+ * Used for saving run time information.
+ */
struct fman_state_struct {
u8 fm_id;
u16 fm_clk_freq;
@@ -527,7 +497,6 @@ struct fman_state_struct {
u32 accumulated_fifo_size;
u8 accumulated_num_of_open_dmas;
u8 accumulated_num_of_deq_tnums;
- bool low_end_restriction;
u32 exceptions;
u32 extra_fifo_pool_size;
u8 extra_tasks_pool_size;
@@ -558,6 +527,7 @@ struct fman_state_struct {
struct resource *res;
};
+/* Structure that holds FMan initial configuration */
struct fman_cfg {
u8 disp_limit_tsh;
u8 prs_disp_tsh;
@@ -570,7 +540,6 @@ struct fman_cfg {
u8 fm_ctl2_disp_tsh;
int dma_cache_override;
enum fman_dma_aid_mode dma_aid_mode;
- bool dma_aid_override;
u32 dma_axi_dbg_num_of_beats;
u32 dma_cam_num_of_entries;
u32 dma_watchdog;
@@ -582,31 +551,18 @@ struct fman_cfg {
u32 dma_read_buf_tsh_clr_emer;
u32 dma_sos_emergency;
int dma_dbg_cnt_mode;
- bool dma_stop_on_bus_error;
- bool dma_en_emergency;
- u32 dma_emergency_bus_select;
- int dma_emergency_level;
- bool dma_en_emergency_smoother;
- u32 dma_emergency_switch_counter;
- bool halt_on_external_activ;
- bool halt_on_unrecov_ecc_err;
int catastrophic_err;
int dma_err;
- bool en_muram_test_mode;
- bool en_iram_test_mode;
- bool external_ecc_rams_enable;
- u16 tnum_aging_period;
u32 exceptions;
u16 clk_freq;
- bool pedantic_dma;
u32 cam_base_addr;
u32 fifo_base_addr;
u32 total_fifo_size;
u32 total_num_of_tasks;
- bool qmi_deq_option_support;
u32 qmi_def_tnums_thresh;
};
+/* Structure that holds information received from device tree */
struct fman_dts_params {
void __iomem *base_addr; /* FMan virtual address */
#ifndef __rtems__
@@ -621,10 +577,36 @@ struct fman_dts_params {
u32 qman_channel_base; /* QMan channels base */
u32 num_of_qman_channels; /* Number of QMan channels */
- phys_addr_t muram_phy_base_addr; /* MURAM physical address */
- resource_size_t muram_size; /* MURAM size */
+ struct resource muram_res; /* MURAM resource */
};
+/** fman_exceptions_cb
+ * fman - Pointer to FMan
+ * exception - The exception.
+ *
+ * Exceptions user callback routine, will be called upon an exception
+ * passing the exception identification.
+ *
+ * Return: irq status
+ */
+typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
+ enum fman_exceptions exception);
+
+/** fman_bus_error_cb
+ * fman - Pointer to FMan
+ * port_id - Port id
+ * addr - Address that caused the error
+ * tnum - Owner of error
+ * liodn - Logical IO device number
+ *
+ * Bus error user callback routine, will be called upon bus error,
+ * passing parameters describing the errors and the owner.
+ *
+ * Return: IRQ status
+ */
+typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
+ u64 addr, u8 tnum, u16 liodn);
+
struct fman {
struct device *dev;
void __iomem *base_addr;
@@ -643,12 +625,11 @@ struct fman {
struct fman_cfg *cfg;
struct muram_info *muram;
/* cam section in muram */
- int cam_offset;
+ unsigned long cam_offset;
size_t cam_size;
/* Fifo in MURAM */
- int fifo_offset;
+ unsigned long fifo_offset;
size_t fifo_size;
- bool reset_on_init;
u32 liodn_base[64];
u32 liodn_offset[64];
@@ -656,38 +637,47 @@ struct fman {
struct fman_dts_params dts_params;
};
-static void fman_exceptions(struct fman *fman, enum fman_exceptions exception)
+static irqreturn_t fman_exceptions(struct fman *fman,
+ enum fman_exceptions exception)
{
- pr_debug("FMan[%d] exception %d\n",
- fman->state->fm_id, exception);
+ dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n",
+ __func__, fman->state->fm_id, exception);
+
+ return IRQ_HANDLED;
}
-static void fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
- u64 __maybe_unused addr, u8 __maybe_unused tnum,
- u16 __maybe_unused liodn)
+static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
+ u64 __maybe_unused addr,
+ u8 __maybe_unused tnum,
+ u16 __maybe_unused liodn)
{
- pr_debug("FMan[%d] bus error: port_id[%d]\n",
- fman->state->fm_id, port_id);
+ dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n",
+ __func__, fman->state->fm_id, port_id);
+
+ return IRQ_HANDLED;
}
-static inline void call_mac_isr(struct fman *fman, u8 id)
+static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id)
{
- if (fman->intr_mng[id].isr_cb)
+ if (fman->intr_mng[id].isr_cb) {
fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
}
static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
{
u8 sw_port_id = 0;
- if (hw_port_id >= BASE_TX_PORTID) {
+ if (hw_port_id >= BASE_TX_PORTID)
sw_port_id = hw_port_id - BASE_TX_PORTID;
- } else if (hw_port_id >= BASE_RX_PORTID) {
+ else if (hw_port_id >= BASE_RX_PORTID)
sw_port_id = hw_port_id - BASE_RX_PORTID;
- } else {
+ else
sw_port_id = 0;
- WARN_ON(false);
- }
return sw_port_id;
}
@@ -697,26 +687,26 @@ static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
{
u32 tmp = 0;
- tmp = (u32)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
+ tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT;
- tmp |= (FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1);
+ tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
/* order restoration */
if (port_id % 2)
- tmp |= (FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
+ tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
else
- tmp |= (FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
+ tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
iowrite32be(tmp, &fpm_rg->fmfp_prc);
}
-static void set_port_liodn(struct fman_rg *fman_rg, u8 port_id,
+static void set_port_liodn(struct fman *fman, u8 port_id,
u32 liodn_base, u32 liodn_ofst)
{
u32 tmp;
/* set LIODN base for this port */
- tmp = ioread32be(&fman_rg->dma_rg->fmdmplr[port_id / 2]);
+ tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
if (port_id % 2) {
tmp &= ~DMA_LIODN_BASE_MASK;
tmp |= liodn_base;
@@ -724,8 +714,8 @@ static void set_port_liodn(struct fman_rg *fman_rg, u8 port_id,
tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
tmp |= liodn_base << DMA_LIODN_SHIFT;
}
- iowrite32be(tmp, &fman_rg->dma_rg->fmdmplr[port_id / 2]);
- iowrite32be(liodn_ofst, &fman_rg->bmi_rg->fmbm_spliodn[port_id - 1]);
+ iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
+ iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
}
static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
@@ -758,24 +748,14 @@ static void fman_defconfig(struct fman_cfg *cfg)
cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
cfg->dma_err = DEFAULT_DMA_ERR;
- cfg->halt_on_external_activ = false;
- cfg->halt_on_unrecov_ecc_err = false;
- cfg->en_iram_test_mode = false;
- cfg->en_muram_test_mode = false;
- cfg->external_ecc_rams_enable = false;
- cfg->dma_aid_override = false;
cfg->dma_aid_mode = DEFAULT_AID_MODE;
cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
- cfg->dma_en_emergency = false;
cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
- cfg->dma_en_emergency_smoother = false;
- cfg->dma_emergency_switch_counter =
- DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER;
cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
@@ -785,11 +765,6 @@ static void fman_defconfig(struct fman_cfg *cfg)
cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
-
- cfg->pedantic_dma = false;
- cfg->tnum_aging_period = 0;
- cfg->dma_stop_on_bus_error = false;
- cfg->qmi_deq_option_support = false;
}
static int dma_init(struct fman *fman)
@@ -808,37 +783,23 @@ static int dma_init(struct fman *fman)
/* configure mode register */
tmp_reg = 0;
tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
- if (cfg->dma_aid_override)
- tmp_reg |= DMA_MODE_AID_OR;
if (cfg->exceptions & EX_DMA_BUS_ERROR)
tmp_reg |= DMA_MODE_BER;
if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
(cfg->exceptions & EX_DMA_READ_ECC) |
(cfg->exceptions & EX_DMA_FM_WRITE_ECC))
tmp_reg |= DMA_MODE_ECC;
- if (cfg->dma_stop_on_bus_error)
- tmp_reg |= DMA_MODE_SBER;
if (cfg->dma_axi_dbg_num_of_beats)
tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
((cfg->dma_axi_dbg_num_of_beats - 1)
<< DMA_MODE_AXI_DBG_SHIFT));
- if (cfg->dma_en_emergency) {
- tmp_reg |= cfg->dma_emergency_bus_select;
- tmp_reg |= cfg->dma_emergency_level << DMA_MODE_EMER_LVL_SHIFT;
- if (cfg->dma_en_emergency_smoother)
- iowrite32be(cfg->dma_emergency_switch_counter,
- &dma_rg->fmdmemsr);
- }
tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
tmp_reg |= DMA_MODE_SECURE_PROT;
tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
- if (cfg->pedantic_dma)
- tmp_reg |= DMA_MODE_EMER_READ;
-
iowrite32be(tmp_reg, &dma_rg->fmdmmr);
/* configure thresholds register */
@@ -874,7 +835,8 @@ static int dma_init(struct fman *fman)
(u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
if (IS_ERR_VALUE(fman->cam_offset)) {
- pr_err("MURAM alloc for DMA CAM failed\n");
+ dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+ __func__);
return -ENOMEM;
}
@@ -888,22 +850,24 @@ static int dma_init(struct fman *fman)
fman->cam_offset = fman_muram_alloc(fman->muram,
fman->cam_size);
if (IS_ERR_VALUE(fman->cam_offset)) {
- pr_err("MURAM alloc for DMA CAM failed\n");
+ dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+ __func__);
return -ENOMEM;
}
if (fman->cfg->dma_cam_num_of_entries % 8 ||
fman->cfg->dma_cam_num_of_entries > 32) {
- pr_err("wrong dma_cam_num_of_entries\n");
+ dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n",
+ __func__);
return -EINVAL;
}
cam_base_addr = (u32 __iomem *)
fman_muram_offset_to_vbase(fman->muram,
fman->cam_offset);
- out_be32(cam_base_addr,
- ~((1 << (32 - fman->cfg->dma_cam_num_of_entries)) -
- 1));
+ iowrite32be(~((1 <<
+ (32 - fman->cfg->dma_cam_num_of_entries)) - 1),
+ cam_base_addr);
}
fman->cfg->cam_base_addr = fman->cam_offset;
@@ -948,10 +912,10 @@ static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
- if (!cfg->halt_on_external_activ)
- tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
- if (!cfg->halt_on_unrecov_ecc_err)
- tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
+ /* FMan is not halted upon external halt activation */
+ tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
+ /* Man is not halted upon Unrecoverable ECC error behavior */
+ tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
/* clear all fmCtls event registers */
@@ -964,17 +928,7 @@ static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
*/
/* event bits */
tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
- /* Rams enable not effected by RCR bit,
- * but by a COP configuration
- */
- if (cfg->external_ecc_rams_enable)
- tmp_reg |= FPM_RAM_RAMS_ECC_EN_SRC_SEL;
-
- /* enable test mode */
- if (cfg->en_muram_test_mode)
- tmp_reg |= FPM_RAM_MURAM_TEST_ECC;
- if (cfg->en_iram_test_mode)
- tmp_reg |= FPM_RAM_IRAM_TEST_ECC;
+
iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
tmp_reg = 0;
@@ -1031,8 +985,6 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
struct fman_cfg *cfg)
{
u32 tmp_reg;
- u16 period_in_fm_clocks;
- u8 remainder;
/* Init QMI Registers */
@@ -1048,22 +1000,6 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
/* enable events */
iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
- if (cfg->tnum_aging_period) {
- /* tnum_aging_period is in units of usec, clk_freq in Mhz */
- period_in_fm_clocks = (u16)
- (cfg->tnum_aging_period * cfg->clk_freq);
- /* period_in_fm_clocks must be a 64 multiple */
- remainder = (u8)(period_in_fm_clocks % 64);
- if (remainder) {
- tmp_reg = (u32)((period_in_fm_clocks / 64) + 1);
- } else {
- tmp_reg = (u32)(period_in_fm_clocks / 64);
- if (!tmp_reg)
- tmp_reg = 1;
- }
- tmp_reg <<= QMI_TAPC_TAP;
- iowrite32be(tmp_reg, &qmi_rg->fmqm_tapc);
- }
tmp_reg = 0;
/* Clear interrupt events */
iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
@@ -1073,163 +1009,163 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
}
-static int enable(struct fman_rg *fman_rg, struct fman_cfg *cfg)
+static int enable(struct fman *fman, struct fman_cfg *cfg)
{
u32 cfg_reg = 0;
/* Enable all modules */
- /* clear&enable global counters - calculate reg and save for later,
+ /* clear&enable global counters - calculate reg and save for later,
* because it's the same reg for QMI enable
*/
cfg_reg = QMI_CFG_EN_COUNTERS;
- if (cfg->qmi_deq_option_support)
- cfg_reg |= (u32)(((cfg->qmi_def_tnums_thresh) << 8) |
- cfg->qmi_def_tnums_thresh);
- iowrite32be(BMI_INIT_START, &fman_rg->bmi_rg->fmbm_init);
+ /* Set enqueue and dequeue thresholds */
+ cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh;
+
+ iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init);
iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
- &fman_rg->qmi_rg->fmqm_gc);
+ &fman->qmi_regs->fmqm_gc);
return 0;
}
-static int set_exception(struct fman_rg *fman_rg,
+static int set_exception(struct fman *fman,
enum fman_exceptions exception, bool enable)
{
u32 tmp;
switch (exception) {
case FMAN_EX_DMA_BUS_ERROR:
- tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
+ tmp = ioread32be(&fman->dma_regs->fmdmmr);
if (enable)
tmp |= DMA_MODE_BER;
else
tmp &= ~DMA_MODE_BER;
/* disable bus error */
- iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
+ iowrite32be(tmp, &fman->dma_regs->fmdmmr);
break;
case FMAN_EX_DMA_READ_ECC:
case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
case FMAN_EX_DMA_FM_WRITE_ECC:
- tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
+ tmp = ioread32be(&fman->dma_regs->fmdmmr);
if (enable)
tmp |= DMA_MODE_ECC;
else
tmp &= ~DMA_MODE_ECC;
- iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
+ iowrite32be(tmp, &fman->dma_regs->fmdmmr);
break;
case FMAN_EX_FPM_STALL_ON_TASKS:
- tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
if (enable)
tmp |= FPM_EV_MASK_STALL_EN;
else
tmp &= ~FPM_EV_MASK_STALL_EN;
- iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
break;
case FMAN_EX_FPM_SINGLE_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
if (enable)
tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
else
tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
- iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
break;
case FMAN_EX_FPM_DOUBLE_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
if (enable)
tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
else
tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
- iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
break;
case FMAN_EX_QMI_SINGLE_ECC:
- tmp = ioread32be(&fman_rg->qmi_rg->fmqm_ien);
+ tmp = ioread32be(&fman->qmi_regs->fmqm_ien);
if (enable)
tmp |= QMI_INTR_EN_SINGLE_ECC;
else
tmp &= ~QMI_INTR_EN_SINGLE_ECC;
- iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_ien);
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_ien);
break;
case FMAN_EX_QMI_DOUBLE_ECC:
- tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
+ tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
if (enable)
tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
else
tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
- iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
break;
case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
- tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
+ tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
if (enable)
tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
else
tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
- iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
break;
case FMAN_EX_BMI_LIST_RAM_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_BMI_STATISTICS_RAM_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_BMI_DISPATCH_RAM_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_IRAM_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
+ tmp = ioread32be(&fman->fpm_regs->fm_rie);
if (enable) {
/* enable ECC if not enabled */
- enable_rams_ecc(fman_rg->fpm_rg);
+ enable_rams_ecc(fman->fpm_regs);
/* enable ECC interrupts */
tmp |= FPM_IRAM_ECC_ERR_EX_EN;
} else {
/* ECC mechanism may be disabled,
* depending on driver status
*/
- disable_rams_ecc(fman_rg->fpm_rg);
+ disable_rams_ecc(fman->fpm_regs);
tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
}
- iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
+ iowrite32be(tmp, &fman->fpm_regs->fm_rie);
break;
case FMAN_EX_MURAM_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
+ tmp = ioread32be(&fman->fpm_regs->fm_rie);
if (enable) {
/* enable ECC if not enabled */
- enable_rams_ecc(fman_rg->fpm_rg);
+ enable_rams_ecc(fman->fpm_regs);
/* enable ECC interrupts */
tmp |= FPM_MURAM_ECC_ERR_EX_EN;
} else {
/* ECC mechanism may be disabled,
* depending on driver status
*/
- disable_rams_ecc(fman_rg->fpm_rg);
+ disable_rams_ecc(fman->fpm_regs);
tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
}
- iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
+ iowrite32be(tmp, &fman->fpm_regs->fm_rie);
break;
default:
return -EINVAL;
@@ -1346,10 +1282,11 @@ static void free_init_resources(struct fman *fman)
fman->fifo_size);
}
-static void bmi_err_event(struct fman *fman)
+static irqreturn_t bmi_err_event(struct fman *fman)
{
u32 event, mask, force;
struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&bmi_rg->fmbm_ievr);
mask = ioread32be(&bmi_rg->fmbm_ier);
@@ -1362,19 +1299,22 @@ static void bmi_err_event(struct fman *fman)
iowrite32be(event, &bmi_rg->fmbm_ievr);
if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+
+ return ret;
}
-static void qmi_err_event(struct fman *fman)
+static irqreturn_t qmi_err_event(struct fman *fman)
{
u32 event, mask, force;
struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&qmi_rg->fmqm_eie);
mask = ioread32be(&qmi_rg->fmqm_eien);
@@ -1388,17 +1328,21 @@ static void qmi_err_event(struct fman *fman)
iowrite32be(event, &qmi_rg->fmqm_eie);
if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
- fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
- fman->exception_cb(fman, FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+ ret = fman->exception_cb(fman,
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+
+ return ret;
}
-static void dma_err_event(struct fman *fman)
+static irqreturn_t dma_err_event(struct fman *fman)
{
u32 status, mask, com_id;
u8 tnum, port_id, relative_port_id;
u16 liodn;
struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+ irqreturn_t ret = IRQ_NONE;
status = ioread32be(&dma_rg->fmdmsr);
mask = ioread32be(&dma_rg->fmdmmr);
@@ -1431,22 +1375,26 @@ static void dma_err_event(struct fman *fman)
tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
DMA_TRANSFER_TNUM_SHIFT);
liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
- fman->bus_error_cb(fman, relative_port_id, addr, tnum, liodn);
+ ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum,
+ liodn);
}
if (status & DMA_STATUS_FM_SPDAT_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
if (status & DMA_STATUS_READ_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
if (status & DMA_STATUS_FM_WRITE_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+
+ return ret;
}
-static void fpm_err_event(struct fman *fman)
+static irqreturn_t fpm_err_event(struct fman *fman)
{
u32 event;
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&fpm_rg->fmfp_ee);
/* clear the all occurred events */
@@ -1454,18 +1402,21 @@ static void fpm_err_event(struct fman *fman)
if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
(event & FPM_EV_MASK_DOUBLE_ECC_EN))
- fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
- fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
if ((event & FPM_EV_MASK_SINGLE_ECC) &&
(event & FPM_EV_MASK_SINGLE_ECC_EN))
- fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+
+ return ret;
}
-static void muram_err_intr(struct fman *fman)
+static irqreturn_t muram_err_intr(struct fman *fman)
{
u32 event, mask;
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&fpm_rg->fm_rcr);
mask = ioread32be(&fpm_rg->fm_rie);
@@ -1474,13 +1425,16 @@ static void muram_err_intr(struct fman *fman)
iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
- fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+
+ return ret;
}
-static void qmi_event(struct fman *fman)
+static irqreturn_t qmi_event(struct fman *fman)
{
u32 event, mask, force;
struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&qmi_rg->fmqm_ie);
mask = ioread32be(&qmi_rg->fmqm_ien);
@@ -1493,7 +1447,9 @@ static void qmi_event(struct fman *fman)
iowrite32be(event, &qmi_rg->fmqm_ie);
if (event & QMI_INTR_EN_SINGLE_ECC)
- fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+
+ return ret;
}
static void enable_time_stamp(struct fman *fman)
@@ -1534,23 +1490,29 @@ static void enable_time_stamp(struct fman *fman)
static int clear_iram(struct fman *fman)
{
struct fman_iram_regs __iomem *iram;
- int i;
+ int i, count;
- iram = (struct fman_iram_regs __iomem *)(fman->base_addr + IMEM_OFFSET);
+ iram = fman->base_addr + IMEM_OFFSET;
/* Enable the auto-increment */
- out_be32(&iram->iadd, IRAM_IADD_AIE);
- while (in_be32(&iram->iadd) != IRAM_IADD_AIE)
- ;
+ iowrite32be(IRAM_IADD_AIE, &iram->iadd);
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count);
+ if (count == 0)
+ return -EBUSY;
for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
- out_be32(&iram->idata, 0xffffffff);
+ iowrite32be(0xffffffff, &iram->idata);
- out_be32(&iram->iadd, fman->state->fm_iram_size - 4);
- /* Memory barrier */
- mb();
- while (in_be32(&iram->idata) != 0xffffffff)
- ;
+ iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd);
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&iram->idata) != 0xffffffff) && --count);
+ if (count == 0)
+ return -EBUSY;
return 0;
}
@@ -1623,9 +1585,10 @@ static int get_module_event(enum fman_event_modules module, u8 mod_id,
switch (module) {
case FMAN_MOD_MAC:
- event = (intr_type == FMAN_INTR_TYPE_ERR) ?
- (FMAN_EV_ERR_MAC0 + mod_id) :
- (FMAN_EV_MAC0 + mod_id);
+ if (intr_type == FMAN_INTR_TYPE_ERR)
+ event = FMAN_EV_ERR_MAC0 + mod_id;
+ else
+ event = FMAN_EV_MAC0 + mod_id;
break;
case FMAN_MOD_FMAN_CTRL:
if (intr_type == FMAN_INTR_TYPE_ERR)
@@ -1667,14 +1630,15 @@ static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
if ((fman->state->accumulated_fifo_size + fifo) >
(fman->state->total_fifo_size -
fman->state->extra_fifo_pool_size)) {
- pr_err("Requested fifo size and extra size exceed total FIFO size.\n");
+ dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n",
+ __func__);
return -EAGAIN;
}
/* Read, modify and write to HW */
- tmp = (u32)((fifo / FMAN_BMI_FIFO_UNITS - 1) |
- ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
- BMI_EXTRA_FIFO_SIZE_SHIFT));
+ tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) |
+ ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
+ BMI_EXTRA_FIFO_SIZE_SHIFT);
iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
/* update accumulated */
@@ -1693,14 +1657,14 @@ static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
if (extra_tasks)
fman->state->extra_tasks_pool_size =
- (u8)max(fman->state->extra_tasks_pool_size, extra_tasks);
+ max(fman->state->extra_tasks_pool_size, extra_tasks);
/* check that there are enough uncommitted tasks */
if ((fman->state->accumulated_num_of_tasks + tasks) >
(fman->state->total_num_of_tasks -
fman->state->extra_tasks_pool_size)) {
- pr_err("Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
- fman->state->fm_id);
+ dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
+ __func__, fman->state->fm_id);
return -EAGAIN;
}
/* update accumulated */
@@ -1759,8 +1723,8 @@ static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
if ((fman->state->rev_info.major < 6) &&
(fman->state->accumulated_num_of_open_dmas - current_val +
open_dmas > fman->state->max_num_of_open_dmas)) {
- pr_err("Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
- fman->state->fm_id);
+ dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
+ __func__, fman->state->fm_id);
return -EAGAIN;
} else if ((fman->state->rev_info.major >= 6) &&
!((fman->state->rev_info.major == 6) &&
@@ -1768,8 +1732,8 @@ static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
(fman->state->accumulated_num_of_open_dmas -
current_val + open_dmas >
fman->state->dma_thresh_max_commq + 1)) {
- pr_err("Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
- fman->state->fm_id,
+ dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
+ __func__, fman->state->fm_id,
fman->state->dma_thresh_max_commq + 1);
return -EAGAIN;
}
@@ -1820,8 +1784,9 @@ static int fman_config(struct fman *fman)
goto err_fm_drv;
/* Initialize MURAM block */
- fman->muram = fman_muram_init(fman->dts_params.muram_phy_base_addr,
- fman->dts_params.muram_size);
+ fman->muram =
+ fman_muram_init(fman->dts_params.muram_res.start,
+ resource_size(&fman->dts_params.muram_res));
if (!fman->muram)
goto err_fm_soc_specific;
@@ -1836,24 +1801,31 @@ static int fman_config(struct fman *fman)
#endif /* __rtems__ */
fman->exception_cb = fman_exceptions;
fman->bus_error_cb = fman_bus_error;
- fman->fpm_regs =
- (struct fman_fpm_regs __iomem *)(base_addr + FPM_OFFSET);
- fman->bmi_regs =
- (struct fman_bmi_regs __iomem *)(base_addr + BMI_OFFSET);
- fman->qmi_regs =
- (struct fman_qmi_regs __iomem *)(base_addr + QMI_OFFSET);
- fman->dma_regs =
- (struct fman_dma_regs __iomem *)(base_addr + DMA_OFFSET);
+ fman->fpm_regs = base_addr + FPM_OFFSET;
+ fman->bmi_regs = base_addr + BMI_OFFSET;
+ fman->qmi_regs = base_addr + QMI_OFFSET;
+ fman->dma_regs = base_addr + DMA_OFFSET;
fman->base_addr = base_addr;
spin_lock_init(&fman->spinlock);
fman_defconfig(fman->cfg);
- fman->cfg->qmi_deq_option_support = true;
-
fman->state->extra_fifo_pool_size = 0;
- fman->state->exceptions = DFLT_EXCEPTIONS;
- fman->reset_on_init = true;
+ fman->state->exceptions = (EX_DMA_BUS_ERROR |
+ EX_DMA_READ_ECC |
+ EX_DMA_SYSTEM_WRITE_ECC |
+ EX_DMA_FM_WRITE_ECC |
+ EX_FPM_STALL_ON_TASKS |
+ EX_FPM_SINGLE_ECC |
+ EX_FPM_DOUBLE_ECC |
+ EX_QMI_DEQ_FROM_UNKNOWN_PORTID |
+ EX_BMI_LIST_RAM_ECC |
+ EX_BMI_STORAGE_PROFILE_ECC |
+ EX_BMI_STATISTICS_RAM_ECC |
+ EX_MURAM_ECC |
+ EX_BMI_DISPATCH_RAM_ECC |
+ EX_QMI_DOUBLE_ECC |
+ EX_QMI_SINGLE_ECC);
/* Read FMan revision for future use*/
fman_get_revision(fman, &fman->state->rev_info);
@@ -1912,20 +1884,108 @@ err_fm_state:
return -EINVAL;
}
+#ifndef __rtems__
+static int fman_reset(struct fman *fman)
+{
+ u32 count;
+ int err = 0;
+
+ if (fman->state->rev_info.major < 6) {
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0)
+ err = -EBUSY;
+
+ goto _return;
+ } else {
+#ifdef CONFIG_PPC
+ struct device_node *guts_node;
+ struct ccsr_guts __iomem *guts_regs;
+ u32 devdisr2, reg;
+
+ /* Errata A007273 */
+ guts_node =
+ of_find_compatible_node(NULL, NULL,
+ "fsl,qoriq-device-config-2.0");
+ if (!guts_node) {
+ dev_err(fman->dev, "%s: Couldn't find guts node\n",
+ __func__);
+ goto guts_node;
+ }
+
+ guts_regs = of_iomap(guts_node, 0);
+ if (!guts_regs) {
+ dev_err(fman->dev, "%s: Couldn't map %s regs\n",
+ __func__, guts_node->full_name);
+ goto guts_regs;
+ }
+#define FMAN1_ALL_MACS_MASK 0xFCC00000
+#define FMAN2_ALL_MACS_MASK 0x000FCC00
+ /* Read current state */
+ devdisr2 = ioread32be(&guts_regs->devdisr2);
+ if (fman->dts_params.id == 0)
+ reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
+ else
+ reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
+
+ /* Enable all MACs */
+ iowrite32be(reg, &guts_regs->devdisr2);
+#endif
+
+ /* Perform FMan reset */
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0) {
+#ifdef CONFIG_PPC
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+#endif
+ err = -EBUSY;
+ goto _return;
+ }
+#ifdef CONFIG_PPC
+
+ /* Restore devdisr2 value */
+ iowrite32be(devdisr2, &guts_regs->devdisr2);
+
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+#endif
+
+ goto _return;
+
+#ifdef CONFIG_PPC
+guts_regs:
+ of_node_put(guts_node);
+guts_node:
+ dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
+ __func__);
+#endif
+ }
+_return:
+ return err;
+}
+#endif /* __rtems__ */
+
static int fman_init(struct fman *fman)
{
struct fman_cfg *cfg = NULL;
- struct fman_rg fman_rg;
- int err = 0, i;
+ int err = 0, i, count;
if (is_init_done(fman->cfg))
return -EINVAL;
- fman_rg.bmi_rg = fman->bmi_regs;
- fman_rg.qmi_rg = fman->qmi_regs;
- fman_rg.fpm_rg = fman->fpm_regs;
- fman_rg.dma_rg = fman->dma_regs;
-
fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
cfg = fman->cfg;
@@ -1948,8 +2008,8 @@ static int fman_init(struct fman *fman)
u32 liodn_base;
fman->liodn_offset[i] =
- ioread32be(&fman_rg.bmi_rg->fmbm_spliodn[i - 1]);
- liodn_base = ioread32be(&fman_rg.dma_rg->fmdmplr[i / 2]);
+ ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
+ liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
if (i % 2) {
/* FMDM_PLR LSB holds LIODN base for odd ports */
liodn_base &= DMA_LIODN_BASE_MASK;
@@ -1961,23 +2021,21 @@ static int fman_init(struct fman *fman)
fman->liodn_base[i] = liodn_base;
}
- /* Reset the FM if required. */
- if (fman->reset_on_init) {
- if (fman->state->rev_info.major >= 6) {
- /* Errata A007273 */
- pr_debug("FManV3 reset is not supported!\n");
- } else {
- out_be32(&fman->fpm_regs->fm_rstc, FPM_RSTC_FM_RESET);
- /* Memory barrier */
- mb();
- usleep_range(100, 300);
- }
+ err = fman_reset(fman);
+ if (err)
+ return err;
- if (!!(ioread32be(&fman_rg.qmi_rg->fmqm_gs) &
- QMI_GS_HALT_NOT_BUSY)) {
- resume(fman->fpm_regs);
- usleep_range(100, 300);
- }
+ if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
+ resume(fman->fpm_regs);
+ /* Wait until QMI is not in halt not busy state */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) &
+ QMI_GS_HALT_NOT_BUSY) && --count);
+ if (count == 0)
+ dev_warn(fman->dev, "%s: QMI is in halt not busy state\n",
+ __func__);
}
if (clear_iram(fman) != 0)
@@ -2000,9 +2058,10 @@ static int fman_init(struct fman *fman)
/* allocate MURAM for FIFO according to total size */
fman->fifo_offset = fman_muram_alloc(fman->muram,
fman->state->total_fifo_size);
- if (IS_ERR_VALUE(fman->cam_offset)) {
+ if (IS_ERR_VALUE(fman->fifo_offset)) {
free_init_resources(fman);
- pr_err("MURAM alloc for BMI FIFO failed\n");
+ dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
+ __func__);
return -ENOMEM;
}
@@ -2017,7 +2076,7 @@ static int fman_init(struct fman *fman)
/* Init QMI Registers */
qmi_init(fman->qmi_regs, fman->cfg);
- err = enable(&fman_rg, cfg);
+ err = enable(fman, cfg);
if (err != 0)
return err;
@@ -2033,16 +2092,10 @@ static int fman_set_exception(struct fman *fman,
enum fman_exceptions exception, bool enable)
{
u32 bit_mask = 0;
- struct fman_rg fman_rg;
if (!is_init_done(fman->cfg))
return -EINVAL;
- fman_rg.bmi_rg = fman->bmi_regs;
- fman_rg.qmi_rg = fman->qmi_regs;
- fman_rg.fpm_rg = fman->fpm_regs;
- fman_rg.dma_rg = fman->dma_regs;
-
bit_mask = get_exception_flag(exception);
if (bit_mask) {
if (enable)
@@ -2050,13 +2103,27 @@ static int fman_set_exception(struct fman *fman,
else
fman->state->exceptions &= ~bit_mask;
} else {
- pr_err("Undefined exception\n");
+ dev_err(fman->dev, "%s: Undefined exception (%d)\n",
+ __func__, exception);
return -EINVAL;
}
- return set_exception(&fman_rg, exception, enable);
+ return set_exception(fman, exception, enable);
}
+/**
+ * fman_register_intr
+ * @fman: A Pointer to FMan device
+ * @mod: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ * @f_isr: The interrupt service routine.
+ * @h_src_arg: Argument to be passed to f_isr.
+ *
+ * Used to register an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
void fman_register_intr(struct fman *fman, enum fman_event_modules module,
u8 mod_id, enum fman_intr_type intr_type,
void (*isr_cb)(void *src_arg), void *src_arg)
@@ -2064,47 +2131,61 @@ void fman_register_intr(struct fman *fman, enum fman_event_modules module,
int event = 0;
event = get_module_event(module, mod_id, intr_type);
- WARN_ON(!(event < FMAN_EV_CNT));
+ WARN_ON(event >= FMAN_EV_CNT);
/* register in local FM structure */
fman->intr_mng[event].isr_cb = isr_cb;
fman->intr_mng[event].src_handle = src_arg;
}
-
+EXPORT_SYMBOL(fman_register_intr);
+
+/**
+ * fman_unregister_intr
+ * @fman: A Pointer to FMan device
+ * @mod: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ *
+ * Used to unregister an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
u8 mod_id, enum fman_intr_type intr_type)
{
int event = 0;
event = get_module_event(module, mod_id, intr_type);
- WARN_ON(!(event < FMAN_EV_CNT));
+ WARN_ON(event >= FMAN_EV_CNT);
fman->intr_mng[event].isr_cb = NULL;
fman->intr_mng[event].src_handle = NULL;
}
+EXPORT_SYMBOL(fman_unregister_intr);
+/**
+ * fman_set_port_params
+ * @fman: A Pointer to FMan device
+ * @port_params: Port parameters
+ *
+ * Used by FMan Port to pass parameters to the FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_set_port_params(struct fman *fman,
struct fman_port_init_params *port_params)
{
int err;
- unsigned long int_flags;
+ unsigned long flags;
u8 port_id = port_params->port_id, mac_id;
- struct fman_rg fman_rg;
-
- fman_rg.bmi_rg = fman->bmi_regs;
- fman_rg.qmi_rg = fman->qmi_regs;
- fman_rg.fpm_rg = fman->fpm_regs;
- fman_rg.dma_rg = fman->dma_regs;
- spin_lock_irqsave(&fman->spinlock, int_flags);
+ spin_lock_irqsave(&fman->spinlock, flags);
err = set_num_of_tasks(fman, port_params->port_id,
&port_params->num_of_tasks,
&port_params->num_of_extra_tasks);
- if (err) {
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return err;
- }
+ if (err)
+ goto return_err;
/* TX Ports */
if (port_params->port_type != FMAN_PORT_TYPE_RX) {
@@ -2113,7 +2194,7 @@ int fman_set_port_params(struct fman *fman,
/* update qmi ENQ/DEQ threshold */
fman->state->accumulated_num_of_deq_tnums +=
port_params->deq_pipeline_depth;
- enq_th = (ioread32be(&fman_rg.qmi_rg->fmqm_gc) &
+ enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) &
QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
/* if enq_th is too big, we reduce it to the max value
* that is still 0
@@ -2124,13 +2205,13 @@ int fman_set_port_params(struct fman *fman,
fman->state->qmi_max_num_of_tnums -
fman->state->accumulated_num_of_deq_tnums - 1;
- reg = ioread32be(&fman_rg.qmi_rg->fmqm_gc);
+ reg = ioread32be(&fman->qmi_regs->fmqm_gc);
reg &= ~QMI_CFG_ENQ_MASK;
reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
- iowrite32be(reg, &fman_rg.qmi_rg->fmqm_gc);
+ iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
}
- deq_th = ioread32be(&fman_rg.qmi_rg->fmqm_gc) &
+ deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) &
QMI_CFG_DEQ_MASK;
/* if deq_th is too small, we enlarge it to the min
* value that is still 0.
@@ -2139,59 +2220,70 @@ int fman_set_port_params(struct fman *fman,
*/
if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
(deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
- deq_th =
- fman->state->accumulated_num_of_deq_tnums + 1;
- reg = ioread32be(&fman_rg.qmi_rg->fmqm_gc);
+ deq_th = fman->state->accumulated_num_of_deq_tnums + 1;
+ reg = ioread32be(&fman->qmi_regs->fmqm_gc);
reg &= ~QMI_CFG_DEQ_MASK;
reg |= deq_th;
- iowrite32be(reg, &fman_rg.qmi_rg->fmqm_gc);
+ iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
}
}
err = set_size_of_fifo(fman, port_params->port_id,
&port_params->size_of_fifo,
&port_params->extra_size_of_fifo);
- if (err) {
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return err;
- }
+ if (err)
+ goto return_err;
err = set_num_of_open_dmas(fman, port_params->port_id,
&port_params->num_of_open_dmas,
&port_params->num_of_extra_open_dmas);
- if (err) {
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return err;
- }
+ if (err)
+ goto return_err;
- set_port_liodn(&fman_rg, port_id, fman->liodn_base[port_id],
+ set_port_liodn(fman, port_id, fman->liodn_base[port_id],
fman->liodn_offset[port_id]);
if (fman->state->rev_info.major < 6)
- set_port_order_restoration(fman_rg.fpm_rg, port_id);
+ set_port_order_restoration(fman->fpm_regs, port_id);
mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
fman->state->port_mfl[mac_id] = port_params->max_frame_length;
} else {
- pr_warn("Port max_frame_length is smaller than MAC current MTU\n");
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return -EINVAL;
+ dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n",
+ __func__, port_id, mac_id);
+ err = -EINVAL;
+ goto return_err;
}
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
+ spin_unlock_irqrestore(&fman->spinlock, flags);
return 0;
+
+return_err:
+ spin_unlock_irqrestore(&fman->spinlock, flags);
+ return err;
}
+EXPORT_SYMBOL(fman_set_port_params);
+/**
+ * fman_reset_mac
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id to be reset
+ *
+ * Reset a specific MAC
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_reset_mac(struct fman *fman, u8 mac_id)
{
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
u32 msk, timeout = 100;
if (fman->state->rev_info.major >= 6) {
- pr_warn("FMan MAC reset no available for FMan V3!\n");
+ dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n",
+ __func__);
return -EINVAL;
}
@@ -2228,7 +2320,8 @@ int fman_reset_mac(struct fman *fman, u8 mac_id)
msk = FPM_RSTC_MAC9_RESET;
break;
default:
- pr_warn("Illegal MAC Id\n");
+ dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n",
+ __func__, mac_id);
return -EINVAL;
}
@@ -2242,125 +2335,73 @@ int fman_reset_mac(struct fman *fman, u8 mac_id)
return 0;
}
+EXPORT_SYMBOL(fman_reset_mac);
+/**
+ * fman_set_mac_max_frame
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id
+ * @mfl: Maximum frame length
+ *
+ * Set maximum frame length of specific MAC in FMan driver
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
{
/* if port is already initialized, check that MaxFrameLength is smaller
* or equal to the port's max
*/
if ((!fman->state->port_mfl[mac_id]) ||
- (fman->state->port_mfl[mac_id] &&
- (mfl <= fman->state->port_mfl[mac_id]))) {
+ (mfl <= fman->state->port_mfl[mac_id])) {
fman->state->mac_mfl[mac_id] = mfl;
} else {
- pr_warn("MAC max_frame_length is larger than Port max_frame_length\n");
+ dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
+ __func__);
return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL(fman_set_mac_max_frame);
+/**
+ * fman_get_clock_freq
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan clock frequency
+ *
+ * Return: FMan clock frequency
+ */
u16 fman_get_clock_freq(struct fman *fman)
{
return fman->state->fm_clk_freq;
}
+/**
+ * fman_get_bmi_max_fifo_size
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan maximum FIFO size
+ *
+ * Return: FMan Maximum FIFO size
+ */
u32 fman_get_bmi_max_fifo_size(struct fman *fman)
{
return fman->state->bmi_max_fifo_size;
}
+EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
-static void fman_event_isr(struct fman *fman)
-{
- u32 pending;
- struct fman_fpm_regs __iomem *fpm_rg;
-
- if (!is_init_done(fman->cfg))
- return;
-
- fpm_rg = fman->fpm_regs;
-
- /* normal interrupts */
- pending = ioread32be(&fpm_rg->fm_npi);
- if (!pending)
- return;
-
- if (pending & INTR_EN_QMI)
- qmi_event(fman);
-
- /* MAC interrupts */
- if (pending & INTR_EN_MAC0)
- call_mac_isr(fman, FMAN_EV_MAC0 + 0);
- if (pending & INTR_EN_MAC1)
- call_mac_isr(fman, FMAN_EV_MAC0 + 1);
- if (pending & INTR_EN_MAC2)
- call_mac_isr(fman, FMAN_EV_MAC0 + 2);
- if (pending & INTR_EN_MAC3)
- call_mac_isr(fman, FMAN_EV_MAC0 + 3);
- if (pending & INTR_EN_MAC4)
- call_mac_isr(fman, FMAN_EV_MAC0 + 4);
- if (pending & INTR_EN_MAC5)
- call_mac_isr(fman, FMAN_EV_MAC0 + 5);
- if (pending & INTR_EN_MAC6)
- call_mac_isr(fman, FMAN_EV_MAC0 + 6);
- if (pending & INTR_EN_MAC7)
- call_mac_isr(fman, FMAN_EV_MAC0 + 7);
- if (pending & INTR_EN_MAC8)
- call_mac_isr(fman, FMAN_EV_MAC0 + 8);
- if (pending & INTR_EN_MAC9)
- call_mac_isr(fman, FMAN_EV_MAC0 + 9);
-}
-
-static int fman_error_isr(struct fman *fman)
-{
- u32 pending;
- struct fman_fpm_regs __iomem *fpm_rg;
-
- if (!is_init_done(fman->cfg))
- return -EINVAL;
-
- fpm_rg = fman->fpm_regs;
-
- /* error interrupts */
- pending = ioread32be(&fpm_rg->fm_epi);
- if (!pending)
- return -EINVAL;
-
- if (pending & ERR_INTR_EN_BMI)
- bmi_err_event(fman);
- if (pending & ERR_INTR_EN_QMI)
- qmi_err_event(fman);
- if (pending & ERR_INTR_EN_FPM)
- fpm_err_event(fman);
- if (pending & ERR_INTR_EN_DMA)
- dma_err_event(fman);
- if (pending & ERR_INTR_EN_MURAM)
- muram_err_intr(fman);
-
- /* MAC error interrupts */
- if (pending & ERR_INTR_EN_MAC0)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
- if (pending & ERR_INTR_EN_MAC1)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
- if (pending & ERR_INTR_EN_MAC2)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
- if (pending & ERR_INTR_EN_MAC3)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
- if (pending & ERR_INTR_EN_MAC4)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
- if (pending & ERR_INTR_EN_MAC5)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
- if (pending & ERR_INTR_EN_MAC6)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
- if (pending & ERR_INTR_EN_MAC7)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
- if (pending & ERR_INTR_EN_MAC8)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
- if (pending & ERR_INTR_EN_MAC9)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
-
- return 0;
-}
-
+/**
+ * fman_get_revision
+ * @fman - Pointer to the FMan module
+ * @rev_info - A structure of revision information parameters.
+ *
+ * Returns the FM revision
+ *
+ * Allowed only following fman_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
{
u32 tmp;
@@ -2370,7 +2411,17 @@ void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
FPM_REV1_MAJOR_SHIFT);
rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
}
+EXPORT_SYMBOL(fman_get_revision);
+/**
+ * fman_get_qman_channel_id
+ * @fman: A Pointer to FMan device
+ * @port_id: Port id
+ *
+ * Get QMan channel ID associated to the Port id
+ *
+ * Return: QMan channel ID
+ */
u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
{
int i;
@@ -2396,11 +2447,21 @@ u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
return fman->state->qman_channel_base + i;
}
+EXPORT_SYMBOL(fman_get_qman_channel_id);
+/**
+ * fman_get_mem_region
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan memory region
+ *
+ * Return: A structure with FMan memory region information
+ */
struct resource *fman_get_mem_region(struct fman *fman)
{
return fman->state->res;
}
+EXPORT_SYMBOL(fman_get_mem_region);
/* Bootargs defines */
/* Extra headroom for RX buffers - Default, min and max */
@@ -2422,7 +2483,7 @@ struct resource *fman_get_mem_region(struct fman *fman)
* particular forwarding scenarios that add extra headers to the
* forwarded frame.
*/
-int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
module_param(fsl_fm_rx_extra_headroom, int, 0);
MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
@@ -2436,13 +2497,18 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
* fm_set_max_frm() callback.
*/
#ifndef __rtems__
-int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
#else /* __rtems__ */
-int fsl_fm_max_frm = FSL_FM_MAX_POSSIBLE_FRAME_SIZE;
+static int fsl_fm_max_frm = FSL_FM_MAX_POSSIBLE_FRAME_SIZE;
#endif /* __rtems__ */
module_param(fsl_fm_max_frm, int, 0);
MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
+/**
+ * fman_get_max_frm
+ *
+ * Return: Max frame length configured in the FM driver
+ */
u16 fman_get_max_frm(void)
{
static bool fm_check_mfl;
@@ -2464,6 +2530,11 @@ u16 fman_get_max_frm(void)
}
EXPORT_SYMBOL(fman_get_max_frm);
+/**
+ * fman_get_rx_extra_headroom
+ *
+ * Return: Extra headroom size configured in the FM driver
+ */
int fman_get_rx_extra_headroom(void)
{
static bool fm_check_rx_extra_headroom;
@@ -2479,7 +2550,7 @@ int fman_get_rx_extra_headroom(void)
fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
}
- fsl_fm_rx_extra_headroom = true;
+ fm_check_rx_extra_headroom = true;
fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
}
@@ -2487,32 +2558,202 @@ int fman_get_rx_extra_headroom(void)
}
EXPORT_SYMBOL(fman_get_rx_extra_headroom);
+/**
+ * fman_bind
+ * @dev: FMan OF device pointer
+ *
+ * Bind to a specific FMan device.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan device
+ */
struct fman *fman_bind(struct device *fm_dev)
{
return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
}
+EXPORT_SYMBOL(fman_bind);
-void fman_unbind(struct fman *fman)
+static irqreturn_t fman_err_irq(int irq, void *handle)
{
- put_device(fman->dev);
-}
+ struct fman *fman = (struct fman *)handle;
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+ irqreturn_t single_ret, ret = IRQ_NONE;
-struct device *fman_get_device(struct fman *fman)
-{
- return fman->dev;
+ if (!is_init_done(fman->cfg))
+ return IRQ_NONE;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* error interrupts */
+ pending = ioread32be(&fpm_rg->fm_epi);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & ERR_INTR_EN_BMI) {
+ single_ret = bmi_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_QMI) {
+ single_ret = qmi_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_FPM) {
+ single_ret = fpm_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_DMA) {
+ single_ret = dma_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MURAM) {
+ single_ret = muram_err_intr(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ /* MAC error interrupts */
+ if (pending & ERR_INTR_EN_MAC0) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC1) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC2) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC3) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC4) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC5) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC6) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC7) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC8) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC9) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
}
-static irqreturn_t fman_irq(int irq, void *fman)
+static irqreturn_t fman_irq(int irq, void *handle)
{
- fman_event_isr(fman);
+ struct fman *fman = (struct fman *)handle;
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+ irqreturn_t single_ret, ret = IRQ_NONE;
- return IRQ_HANDLED;
+ if (!is_init_done(fman->cfg))
+ return IRQ_NONE;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* normal interrupts */
+ pending = ioread32be(&fpm_rg->fm_npi);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & INTR_EN_QMI) {
+ single_ret = qmi_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ /* MAC interrupts */
+ if (pending & INTR_EN_MAC0) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC1) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC2) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC3) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC4) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC5) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC6) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC7) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC8) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC9) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
}
#ifndef __rtems__
static const struct of_device_id fman_muram_match[] = {
{
- .compatible = "fsl,fman-muram"},
+ .compatible = "fsl,fman-muram"},
{}
};
MODULE_DEVICE_TABLE(of, fman_muram_match);
@@ -2528,8 +2769,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
const char *fdt = bsp_fdt_get();
struct device_node *fm_node;
#endif /* __rtems__ */
- const u32 *u32_prop;
- int lenp, err, irq;
+ u32 val, range[2];
+ int err, irq;
#ifndef __rtems__
struct clk *clk;
u32 clk_rate;
@@ -2545,22 +2786,20 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fm_node = of_node_get(of_dev->dev.of_node);
- u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp);
- if (!u32_prop) {
- pr_err("of_get_property(%s, cell-index) failed\n",
- fm_node->full_name);
+ err = of_property_read_u32(fm_node, "cell-index", &val);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
+ __func__, fm_node->full_name);
goto fman_node_put;
}
- if (WARN_ON(lenp != sizeof(u32)))
- goto fman_node_put;
-
- fman->dts_params.id = (u8)*u32_prop;
+ fman->dts_params.id = (u8)val;
#ifndef __rtems__
/* Get the FM interrupt */
res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
if (!res) {
- pr_err("Can't get FMan IRQ resource\n");
+ dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
+ __func__);
goto fman_node_put;
}
irq = res->start;
@@ -2568,7 +2807,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the FM error interrupt */
res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
if (!res) {
- pr_err("Can't get FMan Error IRQ resource\n");
+ dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
+ __func__);
goto fman_node_put;
}
fman->dts_params.err_irq = res->start;
@@ -2576,12 +2816,14 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the FM address */
res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
if (!res) {
- pr_err("Can't get FMan memory resouce\n");
+ dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
+ __func__);
goto fman_node_put;
}
phys_base_addr = res->start;
- mem_size = res->end + 1 - res->start;
+ mem_size = resource_size(res);
+
#else /* __rtems__ */
irq = of_irq_to_resource(fm_node, 0, NULL);
fman->dts_params.err_irq = of_irq_to_resource(fm_node, 1, NULL);
@@ -2590,57 +2832,54 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
#endif /* __rtems__ */
#ifndef __rtems__
- clk = of_clk_get_by_name(fm_node, NULL);
+ clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
- pr_err("Failed to get FM%d clock structure\n",
- fman->dts_params.id);
+ dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
+ __func__, fman->dts_params.id);
goto fman_node_put;
}
clk_rate = clk_get_rate(clk);
if (!clk_rate) {
- pr_err("Failed to determine FM%d clock rate\n",
- fman->dts_params.id);
+ dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
+ __func__, fman->dts_params.id);
goto fman_node_put;
}
/* Rounding to MHz */
- fman->dts_params.clk_freq = (u16)((clk_rate + 500000) / 1000000);
+ fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
#else /* __rtems__ */
/* FIXME */
fman->dts_params.clk_freq = 733;
#endif /* __rtems__ */
- u32_prop = (const u32 *)of_get_property(fm_node,
- "fsl,qman-channel-range",
- &lenp);
- if (!u32_prop) {
- pr_err("of_get_property(%s, fsl,qman-channel-range) failed\n",
- fm_node->full_name);
+ err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
+ &range[0], 2);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
+ __func__, fm_node->full_name);
goto fman_node_put;
}
- if (WARN_ON(lenp != sizeof(u32) * 2))
- goto fman_node_put;
- fman->dts_params.qman_channel_base = u32_prop[0];
- fman->dts_params.num_of_qman_channels = u32_prop[1];
+ fman->dts_params.qman_channel_base = range[0];
+ fman->dts_params.num_of_qman_channels = range[1];
/* Get the MURAM base address and size */
#ifndef __rtems__
/* FIXME */
muram_node = of_find_matching_node(fm_node, fman_muram_match);
if (!muram_node) {
- pr_err("could not find MURAM node\n");
+ dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
+ __func__);
goto fman_node_put;
}
- err = of_address_to_resource(muram_node, 0, res);
+ err = of_address_to_resource(muram_node, 0,
+ &fman->dts_params.muram_res);
if (err) {
of_node_put(muram_node);
- pr_err("of_address_to_resource() = %d\n", err);
+ dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
+ __func__, err);
goto fman_node_put;
}
-
- fman->dts_params.muram_phy_base_addr = res->start;
- fman->dts_params.muram_size = res->end + 1 - res->start;
#else /* __rtems__ */
{
int node = fdt_node_offset_by_compatible(fdt,
@@ -2655,48 +2894,55 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
pr_err("could not find MURAM node\n");
goto fman_node_put;
}
- fman->dts_params.muram_phy_base_addr = phys_base_addr +
- res.start;
- fman->dts_params.muram_size = res.end - res.start;
+ fman->dts_params.muram_res.start = phys_base_addr + res.start;
+ fman->dts_params.muram_res.end = phys_base_addr + res.end - 1;
}
#endif /* __rtems__ */
- {
- /* In B4 rev 2.0 (and above) the MURAM size is 512KB.
- * Check the SVR and update MURAM size if required.
- */
- u32 svr;
-
- svr = mfspr(SPRN_SVR);
-
- if ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_MAJ(svr) >= 2))
- fman->dts_params.muram_size = 0x80000;
- }
-
#ifndef __rtems__
of_node_put(muram_node);
#endif /* __rtems__ */
- of_node_put(fm_node);
- err = devm_request_irq(&of_dev->dev, irq, fman_irq,
- IRQF_NO_SUSPEND, "fman", fman);
+ err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
if (err < 0) {
- pr_err("Error: allocating irq %d (error = %d)\n", irq, err);
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, irq, err);
goto fman_free;
}
+ if (fman->dts_params.err_irq != 0) {
+ err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq,
+ fman_err_irq, IRQF_SHARED,
+ "fman-err", fman);
+ if (err < 0) {
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, fman->dts_params.err_irq, err);
+ goto fman_free;
+ }
+ }
+
#ifndef __rtems__
fman->dts_params.res =
devm_request_mem_region(&of_dev->dev, phys_base_addr,
mem_size, "fman");
if (!fman->dts_params.res) {
- pr_err("request_mem_region() failed\n");
+ dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
+ __func__);
goto fman_free;
}
fman->dts_params.base_addr =
devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (fman->dts_params.base_addr == 0) {
- pr_err("devm_ioremap() failed\n");
+ if (!fman->dts_params.base_addr) {
+ dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
+ goto fman_free;
+ }
+
+ fman->dev = &of_dev->dev;
+
+ err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: of_platform_populate() failed\n",
+ __func__);
goto fman_free;
}
#endif /* __rtems__ */
@@ -2710,14 +2956,6 @@ fman_free:
return NULL;
}
-static irqreturn_t fman_err_irq(int irq, void *fman)
-{
- if (fman_error_isr(fman) == 0)
- return IRQ_HANDLED;
-
- return IRQ_NONE;
-}
-
static int fman_probe(struct platform_device *of_dev)
{
struct fman *fman;
@@ -2730,26 +2968,14 @@ static int fman_probe(struct platform_device *of_dev)
if (!fman)
return -EIO;
- if (fman->dts_params.err_irq != 0) {
- err = devm_request_irq(dev, fman->dts_params.err_irq,
- fman_err_irq,
- IRQF_SHARED | IRQF_NO_SUSPEND,
- "fman-err", fman);
- if (err < 0) {
- pr_err("Error: allocating irq %d (error = %d)\n",
- fman->dts_params.err_irq, err);
- return -EINVAL;
- }
- }
-
err = fman_config(fman);
if (err) {
- pr_err("FMan config failed\n");
+ dev_err(dev, "%s: FMan config failed\n", __func__);
return -EINVAL;
}
if (fman_init(fman) != 0) {
- pr_err("FMan init failed\n");
+ dev_err(dev, "%s: FMan init failed\n", __func__);
return -EINVAL;
}
@@ -2775,9 +3001,7 @@ static int fman_probe(struct platform_device *of_dev)
dev_set_drvdata(dev, fman);
- fman->dev = dev;
-
- pr_debug("FM%d probed\n", fman->dts_params.id);
+ dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
return 0;
}
@@ -2785,21 +3009,42 @@ static int fman_probe(struct platform_device *of_dev)
#ifndef __rtems__
static const struct of_device_id fman_match[] = {
{
- .compatible = "fsl,fman"},
+ .compatible = "fsl,fman"},
{}
};
-MODULE_DEVICE_TABLE(of, fm_match);
+MODULE_DEVICE_TABLE(of, fman_match);
static struct platform_driver fman_driver = {
.driver = {
- .name = "fsl-fman",
- .of_match_table = fman_match,
- },
+ .name = "fsl-fman",
+ .of_match_table = fman_match,
+ },
.probe = fman_probe,
};
-builtin_platform_driver(fman_driver);
+static int __init fman_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA FMan driver\n");
+
+ err = platform_driver_register(&fman_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(fman_load);
+
+static void __exit fman_unload(void)
+{
+ platform_driver_unregister(&fman_driver);
+}
+module_exit(fman_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
#else /* __rtems__ */
#include <sys/cdefs.h>
#include <sys/param.h>
@@ -2807,7 +3052,7 @@ builtin_platform_driver(fman_driver);
#include <sys/bus.h>
#include <sys/kernel.h>
-void
+int
fman_reset(struct fman *fman)
{
@@ -2825,6 +3070,8 @@ fman_reset(struct fman *fman)
QMI_GS_HALT_NOT_BUSY)) {
usleep_range(100, 300);
}
+
+ return (0);
}
struct fman_softc {
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman.h b/linux/drivers/net/ethernet/freescale/fman/fman.h
index 291d263a..4af36c66 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman.h
+++ b/linux/drivers/net/ethernet/freescale/fman/fman.h
@@ -58,7 +58,6 @@
/* TX Port: Length Error */
#define FM_FD_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
-
/* Rx FIFO overflow, FCS error, code error, running disparity error
* (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
* PHY error control character detected.
@@ -167,8 +166,8 @@ struct fman_prs_result {
u8 ip_off[2]; /* IP offset */
u8 gre_off; /* GRE offset */
u8 l4_off; /* Layer 4 offset */
- u8 nxthdr_off; /** Parser end point */
-} __attribute__((__packed__));
+ u8 nxthdr_off; /* Parser end point */
+};
/* A structure for defining buffer prefix area content. */
struct fman_buffer_prefix_content {
@@ -237,29 +236,6 @@ struct fman_buf_pool_depletion {
bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
};
-/** fman_exceptions_cb
- * fman - Pointer to FMan
- * exception - The exception.
- *
- * Exceptions user callback routine, will be called upon an exception
- * passing the exception identification.
- */
-typedef void (fman_exceptions_cb)(struct fman *fman,
- enum fman_exceptions exception);
-
-/** fman_bus_error_cb
- * fman - Pointer to FMan
- * port_id - Port id
- * addr - Address that caused the error
- * tnum - Owner of error
- * liodn - Logical IO device number
- *
- * Bus error user callback routine, will be called upon bus error,
- * passing parameters describing the errors and the owner.
- */
-typedef void (fman_bus_error_cb)(struct fman *fman, u8 port_id, u64 addr,
- u8 tnum, u16 liodn);
-
/* Enum for inter-module interrupts registration */
enum fman_event_modules {
FMAN_MOD_MAC = 0, /* MAC event */
@@ -325,176 +301,37 @@ struct fman_port_init_params {
/* LIODN base for this port, to be used together with LIODN offset. */
};
-struct fman;
-
-/**
- * fman_get_revision
- * @fman - Pointer to the FMan module
- * @rev_info - A structure of revision information parameters.
- *
- * Returns the FM revision
- *
- * Allowed only following fman_init().
- *
- * Return: 0 on success; Error code otherwise.
- */
void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
-/**
- * fman_register_intr
- * @fman: A Pointer to FMan device
- * @mod: Calling module
- * @mod_id: Module id (if more than 1 exists, '0' if not)
- * @intr_type: Interrupt type (error/normal) selection.
- * @f_isr: The interrupt service routine.
- * @h_src_arg: Argument to be passed to f_isr.
- *
- * Used to register an event handler to be processed by FMan
- *
- * Return: 0 on success; Error code otherwise.
- */
void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
u8 mod_id, enum fman_intr_type intr_type,
void (*f_isr)(void *h_src_arg), void *h_src_arg);
-/**
- * fman_unregister_intr
- * @fman: A Pointer to FMan device
- * @mod: Calling module
- * @mod_id: Module id (if more than 1 exists, '0' if not)
- * @intr_type: Interrupt type (error/normal) selection.
- *
- * Used to unregister an event handler to be processed by FMan
- *
- * Return: 0 on success; Error code otherwise.
- */
void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
u8 mod_id, enum fman_intr_type intr_type);
-/**
- * fman_set_port_params
- * @fman: A Pointer to FMan device
- * @port_params: Port parameters
- *
- * Used by FMan Port to pass parameters to the FMan
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_set_port_params(struct fman *fman,
struct fman_port_init_params *port_params);
-/**
- * fman_reset_mac
- * @fman: A Pointer to FMan device
- * @mac_id: MAC id to be reset
- *
- * Reset a specific MAC
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_reset_mac(struct fman *fman, u8 mac_id);
-/**
- * fman_get_clock_freq
- * @fman: A Pointer to FMan device
- *
- * Get FMan clock frequency
- *
- * Return: FMan clock frequency
- */
-
u16 fman_get_clock_freq(struct fman *fman);
-/**
- * fman_get_bmi_max_fifo_size
- * @fman: A Pointer to FMan device
- *
- * Get FMan maximum FIFO size
- *
- * Return: FMan Maximum FIFO size
- */
u32 fman_get_bmi_max_fifo_size(struct fman *fman);
-/**
- * fman_set_mac_max_frame
- * @fman: A Pointer to FMan device
- * @mac_id: MAC id
- * @mfl: Maximum frame length
- *
- * Set maximum frame length of specific MAC in FMan driver
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
-/**
- * fman_get_qman_channel_id
- * @fman: A Pointer to FMan device
- * @port_id: Port id
- *
- * Get QMan channel ID associated to the Port id
- *
- * Return: QMan channel ID
- */
u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
-/**
- * fman_get_mem_region
- * @fman: A Pointer to FMan device
- *
- * Get FMan memory region
- *
- * Return: A structure with FMan memory region information
- */
struct resource *fman_get_mem_region(struct fman *fman);
-/**
- * fman_get_max_frm
- *
- * Return: Max frame length configured in the FM driver
- */
u16 fman_get_max_frm(void);
-/**
- * fman_get_rx_extra_headroom
- *
- * Return: Extra headroom size configured in the FM driver
- */
int fman_get_rx_extra_headroom(void);
-/**
- * fman_bind
- * @dev: FMan OF device pointer
- *
- * Bind to a specific FMan device.
- *
- * Allowed only after the port was created.
- *
- * Return: A pointer to the FMan device
- */
struct fman *fman_bind(struct device *dev);
-
-/**
- * fman_unbind
- * @fman: Pointer to the FMan device
- *
- * Un-bind from a specific FMan device.
- *
- * Allowed only after the port was created.
- */
-void fman_unbind(struct fman *fman);
-
-/**
- * fman_get_device
- * @fman: A pointer to the FMan device.
- *
- * Get the FMan device pointer
- *
- * Return: Pointer to FMan device.
- */
-struct device *fman_get_device(struct fman *fman);
#ifdef __rtems__
-void fman_reset(struct fman *fman);
+int fman_reset(struct fman *fman);
#endif /* __rtems__ */
#endif /* __FM_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 5be951b8..3bc70a43 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -36,8 +36,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "crc_mac_addr_ext.h"
-
#include "fman_dtsec.h"
#include "fman.h"
@@ -46,30 +44,23 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/phy.h>
+#include <linux/crc32.h>
+#include <linux/of_mdio.h>
+#include <linux/mii.h>
-/* MII Management Command Register */
-#define MIIMCOM_READ_CYCLE 0x00000001
-
-/* MII Management Address Register */
-#define MIIMADD_PHY_ADDR_SHIFT 8
-
-/* MII Management Indicator Register */
-#define MIIMIND_BUSY 0x00000001
+/* TBI register addresses */
+#define MII_TBICON 0x11
-/* PHY Control Register */
-#define PHY_CR_PHY_RESET 0x8000
-#define PHY_CR_SPEED0 0x2000
-#define PHY_CR_ANE 0x1000
-#define PHY_CR_RESET_AN 0x0200
-#define PHY_CR_FULLDUPLEX 0x0100
-#define PHY_CR_SPEED1 0x0040
+/* TBICON register bit fields */
+#define TBICON_SOFT_RESET 0x8000 /* Soft reset */
+#define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
+#define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
+#define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
+#define TBICON_CLK_SELECT 0x0020 /* Clock select */
+#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
-#define PHY_TBICON_SRESET 0x8000
-#define PHY_TBICON_CLK_SEL 0x0020
-#define PHY_TBIANA_SGMII 0x4001
-#define PHY_TBIANA_1000X 0x01a0
-
-#define DTSEC_TO_MII_OFFSET 0x1000
+#define TBIANA_SGMII 0x4001
+#define TBIANA_1000X 0x01a0
/* Interrupt Mask Register (IMASK) */
#define DTSEC_IMASK_BREN 0x80000000
@@ -116,9 +107,7 @@
/* Defaults */
#define DEFAULT_HALFDUP_RETRANSMIT 0xf
#define DEFAULT_HALFDUP_COLL_WINDOW 0x37
-#define DEFAULT_HALFDUP_ALT_BACKOFF_VAL 0x0A
#define DEFAULT_TX_PAUSE_TIME 0xf000
-#define DEFAULT_TBIPA 5
#define DEFAULT_RX_PREPEND 0
#define DEFAULT_PREAMBLE_LEN 7
#define DEFAULT_TX_PAUSE_TIME_EXTD 0
@@ -127,22 +116,6 @@
#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
#define DEFAULT_BACK_TO_BACK_IPG 0x60
#define DEFAULT_MAXIMUM_FRAME 0x600
-#define DEFAULT_TBI_PHY_ADDR 5
-
-#define DTSEC_DEFAULT_EXCEPTIONS \
- ((u32)((DTSEC_IMASK_BREN) |\
- (DTSEC_IMASK_RXCEN) |\
- (DTSEC_IMASK_BTEN) |\
- (DTSEC_IMASK_TXCEN) |\
- (DTSEC_IMASK_TXEEN) |\
- (DTSEC_IMASK_ABRTEN) |\
- (DTSEC_IMASK_LCEN) |\
- (DTSEC_IMASK_CRLEN) |\
- (DTSEC_IMASK_XFUNEN) |\
- (DTSEC_IMASK_IFERREN) |\
- (DTSEC_IMASK_MAGEN) |\
- (DTSEC_IMASK_TDPEEN) |\
- (DTSEC_IMASK_RDPEEN)))
/* register related defines (bits, field offsets..) */
#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
@@ -154,24 +127,17 @@
#define DTSEC_ECNTRL_R100M 0x00000008
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
-#define DTSEC_TCTRL_THDF 0x00000800
-#define DTSEC_TCTRL_TTSE 0x00000040
#define DTSEC_TCTRL_GTS 0x00000020
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_PAL_SHIFT 16
-#define RCTRL_CFA 0x00008000
#define RCTRL_GHTX 0x00000400
-#define RCTRL_RTSE 0x00000040
#define RCTRL_GRS 0x00000020
-#define RCTRL_BC_REJ 0x00000010
#define RCTRL_MPROM 0x00000008
#define RCTRL_RSF 0x00000004
#define RCTRL_UPROM 0x00000001
-#define RCTRL_PROM (RCTRL_UPROM | RCTRL_MPROM)
#define MACCFG1_SOFT_RESET 0x80000000
-#define MACCFG1_LOOPBACK 0x00000100
#define MACCFG1_RX_FLOW 0x00000020
#define MACCFG1_TX_FLOW 0x00000010
#define MACCFG1_TX_EN 0x00000001
@@ -179,11 +145,7 @@
#define MACCFG2_NIBBLE_MODE 0x00000100
#define MACCFG2_BYTE_MODE 0x00000200
-#define MACCFG2_PRE_AM_RX_EN 0x00000080
-#define MACCFG2_PRE_AM_TX_EN 0x00000040
-#define MACCFG2_LENGTH_CHECK 0x00000010
#define MACCFG2_PAD_CRC_EN 0x00000004
-#define MACCFG2_CRC_EN 0x00000002
#define MACCFG2_FULL_DUPLEX 0x00000001
#define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
@@ -197,13 +159,8 @@
#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
-#define HAFDUP_ALT_BEB 0x00080000
-#define HAFDUP_BP_NO_BACKOFF 0x00040000
-#define HAFDUP_NO_BACKOFF 0x00020000
#define HAFDUP_EXCESS_DEFER 0x00010000
#define HAFDUP_COLLISION_WINDOW 0x000003ff
-#define HAFDUP_ALTERNATE_BEB_TRUNCATION_MASK 0x00f00000
-#define HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT 20
#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
#define HAFDUP_RETRANSMISSION_MAX 0x0000f000
@@ -215,7 +172,6 @@
#define MAX_PACKET_ALIGNMENT 31
#define MAX_INTER_PACKET_GAP 0x7f
-#define MAX_INTER_PALTERNATE_BEB 0x0f
#define MAX_RETRANSMISSION 0x0f
#define MAX_COLLISION_WINDOW 0x03ff
@@ -224,20 +180,6 @@
/* Extended Hash table size (32 bits*16 regs) */
#define EXTENDED_HASH_TABLE_SIZE 512
-/* maximum number of phys */
-#define MAX_PHYS 32
-
-/* MII Configuration Control Memory Map Registers */
-struct dtsec_mii_regs {
- u32 reserved1[72];
- u32 miimcfg; /* MII Mgmt:configuration */
- u32 miimcom; /* MII Mgmt:command */
- u32 miimadd; /* MII Mgmt:address */
- u32 miimcon; /* MII Mgmt:control 3 */
- u32 miimstat; /* MII Mgmt:status */
- u32 miimind; /* MII Mgmt:indicators */
-};
-
/* dTSEC Memory Map registers */
struct dtsec_regs {
/* dTSEC General Control and Status Registers */
@@ -340,43 +282,13 @@ struct dtsec_regs {
* standard 512-bit slot time window. If collisions are detected after this
* byte, the late collision event is asserted and transmission of current
* frame is aborted.
- * rx_drop_bcast:
- * Discard broadcast frames. If set, all broadcast frames will be discarded
- * by dTSEC.
- * rx_short_frm:
- * Accept short frames. If set, dTSEC will accept frames of length 14-63 bytes.
- * rx_len_check:
- * Length check for received frames. If set, the MAC checks the frame's length
- * field on receive to ensure it matches the actual data field length.
- * This only works for received frames with length field less than 1500.
- * No check is performed for larger frames.
* tx_pad_crc:
* Pad and append CRC. If set, the MAC pads all ransmitted short frames and
* appends a CRC to every frame regardless of padding requirement.
- * tx_crc:
- * Transmission CRC enable. If set, the MAC appends a CRC to all frames.
- * If frames presented to the MAC have a valid length and contain a valid CRC,
- * tx_crc should be reset. This field is ignored if tx_pad_crc is set.
- * rx_ctrl_acc:
- * Control frame accept. If set, this overrides 802.3 standard control frame
- * behavior, and all Ethernet frames that have an ethertype of 0x8808 are
- * treated as normal Ethernet frames and passed up to the packet interface on
- * a DA match. Received pause control frames are passed to the packet
- * interface only if Rx flow control is also disabled.
- * See dtsec_accept_rx_pause_frames() function.
* tx_pause_time:
* Transmit pause time value. This pause value is used as part of the pause
* frame to be sent when a transmit pause frame is initiated.
* If set to 0 this disables transmission of pause frames.
- * rx_preamble:
- * Receive preamble enable. If set, the MAC recovers the received Ethernet
- * 7-byte preamble and passes it to the packet interface at the start of each
- * received frame.
- * This field should be reset for internal MAC loop-back mode.
- * tx_preamble:
- * User defined preamble enable for transmitted frames.
- * If set, a user-defined preamble must passed to the MAC and it is
- * transmitted instead of the standard preamble.
* preamble_len:
* Length, in bytes, of the preamble field preceding each Ethernet
* start-of-frame delimiter byte. The default value of 0x7 should be used in
@@ -393,36 +305,14 @@ struct dtsec_regs {
* obtained by calling set_dflts().
*/
struct dtsec_cfg {
- bool halfdup_on;
- bool halfdup_alt_backoff_en;
- bool halfdup_excess_defer;
- bool halfdup_no_backoff;
- bool halfdup_bp_no_backoff;
- u32 halfdup_alt_backoff_val;
u16 halfdup_retransmit;
u16 halfdup_coll_window;
- bool rx_drop_bcast;
- bool rx_short_frm;
- bool rx_len_check;
bool tx_pad_crc;
- bool tx_crc;
- bool rx_ctrl_acc;
u16 tx_pause_time;
- u16 tbipa;
bool ptp_tsu_en;
bool ptp_exception_en;
- bool rx_preamble;
- bool tx_preamble;
u32 preamble_len;
u32 rx_prepend;
- bool loopback;
- bool rx_time_stamp_en;
- bool tx_time_stamp_en;
- bool rx_flow;
- bool tx_flow;
- bool rx_group_hash_exd;
- bool rx_promisc;
- u8 tbi_phy_addr;
u16 tx_pause_time_extd;
u16 maximum_frame;
u32 non_back_to_back_ipg1;
@@ -434,10 +324,6 @@ struct dtsec_cfg {
struct fman_mac {
/* pointer to dTSEC memory mapped registers */
struct dtsec_regs __iomem *regs;
- /* pointer to dTSEC MII memory mapped registers */
- struct dtsec_mii_regs __iomem *mii_regs;
- /* MII management clock */
- u16 mii_mgmt_clk;
/* MAC address of device */
u64 addr;
/* Ethernet physical interface */
@@ -453,169 +339,38 @@ struct fman_mac {
/* pointer to driver's individual address hash table */
struct eth_hash_t *unicast_addr_hash;
u8 mac_id;
- u8 tbi_phy_addr;
u32 exceptions;
bool ptp_tsu_enabled;
- bool en_tsu_err_exeption;
+ bool en_tsu_err_exception;
struct dtsec_cfg *dtsec_drv_param;
void *fm;
struct fman_rev_info fm_rev_info;
bool basex_if;
+ struct phy_device *tbiphy;
};
-static u32 calc_mii_mgmt_clk(struct fman_mac *dtsec)
-{
- u16 fm_clk_freq, dtsec_freq;
- u32 mgmt_clk;
-
- fm_clk_freq = fman_get_clock_freq(dtsec->fm);
- if (fm_clk_freq == 0) {
- pr_err("Can't get clock for MAC!\n");
- return 0;
- }
-
- dtsec_freq = (u16)(fm_clk_freq >> 1);
-
- if (dtsec_freq < 80)
- mgmt_clk = 1;
- else if (dtsec_freq < 120)
- mgmt_clk = 2;
- else if (dtsec_freq < 160)
- mgmt_clk = 3;
- else if (dtsec_freq < 200)
- mgmt_clk = 4;
- else if (dtsec_freq < 280)
- mgmt_clk = 5;
- else if (dtsec_freq < 400)
- mgmt_clk = 6;
- else
- mgmt_clk = 7;
-
- return mgmt_clk;
-}
-
-static int mii_write_reg(struct fman_mac *dtsec, u8 addr, u8 reg, u16 data)
-{
- struct dtsec_mii_regs __iomem *regs = dtsec->mii_regs;
- u32 tmp;
- int count;
-
- /* Setup the MII Mgmt clock speed */
- iowrite32be(dtsec->mii_mgmt_clk, &regs->miimcfg);
-
- /* Stop the MII management read cycle */
- iowrite32be(0, &regs->miimcom);
- /* Dummy read to make sure MIIMCOM is written */
- tmp = ioread32be(&regs->miimcom);
-
- /* Setting up MII Management Address Register */
- tmp = (u32)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
- iowrite32be(tmp, &regs->miimadd);
-
- /* Setting up MII Management Control Register with data */
- iowrite32be((u32)data, &regs->miimcon);
- /* Dummy read to make sure MIIMCON is written */
- tmp = ioread32be(&regs->miimcon);
-
- /* Wait until MII management write is complete */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&regs->miimind)) & MIIMIND_BUSY) && count--);
-
- if (count == 0)
- return -EBUSY;
-
- return 0;
-}
-
-static int mii_read_reg(struct fman_mac *dtsec, u8 addr, u8 reg, u16 *data)
-{
- struct dtsec_mii_regs __iomem *regs = dtsec->mii_regs;
- u32 tmp;
- int count;
-
- /* Setup the MII Mgmt clock speed */
- iowrite32be(dtsec->mii_mgmt_clk, &regs->miimcfg);
-
- /* Setting up the MII Management Address Register */
- tmp = (u32)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
- iowrite32be(tmp, &regs->miimadd);
-
- /* Perform an MII management read cycle */
- iowrite32be(MIIMCOM_READ_CYCLE, &regs->miimcom);
- /* Dummy read to make sure MIIMCOM is written */
- tmp = ioread32be(&regs->miimcom);
-
- /* Wait until MII management write is complete */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&regs->miimind)) & MIIMIND_BUSY) && count--);
-
- if (count == 0)
- return -EBUSY;
-
- /* Read MII management status */
- *data = (u16)ioread32be(&regs->miimstat);
-
- iowrite32be(0, &regs->miimcom);
- /* Dummy read to make sure MIIMCOM is written */
- tmp = ioread32be(&regs->miimcom);
-
- if (*data == 0xffff) {
- pr_warn("Read wrong data(0xffff):phy_addr 0x%x,reg 0x%x",
- addr, reg);
- return -ENXIO;
- }
-
- return 0;
-}
-
static void set_dflts(struct dtsec_cfg *cfg)
{
- cfg->halfdup_on = false;
cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
- cfg->halfdup_excess_defer = true;
- cfg->halfdup_no_backoff = false;
- cfg->halfdup_bp_no_backoff = false;
- cfg->halfdup_alt_backoff_val = DEFAULT_HALFDUP_ALT_BACKOFF_VAL;
- cfg->halfdup_alt_backoff_en = false;
- cfg->rx_drop_bcast = false;
- cfg->rx_short_frm = true;
- cfg->rx_len_check = false;
cfg->tx_pad_crc = true;
- cfg->tx_crc = false;
- cfg->rx_ctrl_acc = false;
cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
/* PHY address 0 is reserved (DPAA RM) */
- cfg->tbipa = DEFAULT_TBIPA;
cfg->rx_prepend = DEFAULT_RX_PREPEND;
cfg->ptp_tsu_en = true;
cfg->ptp_exception_en = true;
cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
- cfg->rx_preamble = false;
- cfg->tx_preamble = false;
- cfg->loopback = false;
- cfg->rx_time_stamp_en = false;
- cfg->tx_time_stamp_en = false;
- cfg->rx_flow = true;
- cfg->tx_flow = true;
- cfg->rx_group_hash_exd = false;
cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
- cfg->rx_promisc = false;
cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
- cfg->tbi_phy_addr = DEFAULT_TBI_PHY_ADDR;
}
static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
phy_interface_t iface, u16 iface_speed, u8 *macaddr,
- u32 exception_mask)
+ u32 exception_mask, u8 tbi_addr)
{
bool is_rgmii, is_sgmii, is_qsgmii;
int i;
@@ -659,14 +414,6 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
iowrite32be(tmp, &regs->ecntrl);
tmp = 0;
- if (cfg->halfdup_on)
- tmp |= DTSEC_TCTRL_THDF;
- if (cfg->tx_time_stamp_en)
- tmp |= DTSEC_TCTRL_TTSE;
-
- iowrite32be(tmp, &regs->tctrl);
-
- tmp = 0;
if (cfg->tx_pause_time)
tmp |= cfg->tx_pause_time;
@@ -676,18 +423,8 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
tmp = 0;
tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
- if (cfg->rx_ctrl_acc)
- tmp |= RCTRL_CFA;
- if (cfg->rx_group_hash_exd)
- tmp |= RCTRL_GHTX;
- if (cfg->rx_time_stamp_en)
- tmp |= RCTRL_RTSE;
- if (cfg->rx_drop_bcast)
- tmp |= RCTRL_BC_REJ;
- if (cfg->rx_short_frm)
- tmp |= RCTRL_RSF;
- if (cfg->rx_promisc)
- tmp |= RCTRL_PROM;
+ /* Accept short frames */
+ tmp |= RCTRL_RSF;
iowrite32be(tmp, &regs->rctrl);
@@ -695,7 +432,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
* Done also in cases where TBI is not selected to avoid conflict with
* the external PHY's Physical address
*/
- iowrite32be(cfg->tbipa, &regs->tbipa);
+ iowrite32be(tbi_addr, &regs->tbipa);
iowrite32be(0, &regs->tmr_ctrl);
@@ -712,12 +449,8 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
}
tmp = 0;
- if (cfg->loopback)
- tmp |= MACCFG1_LOOPBACK;
- if (cfg->rx_flow)
- tmp |= MACCFG1_RX_FLOW;
- if (cfg->tx_flow)
- tmp |= MACCFG1_TX_FLOW;
+ tmp |= MACCFG1_RX_FLOW;
+ tmp |= MACCFG1_TX_FLOW;
iowrite32be(tmp, &regs->maccfg1);
tmp = 0;
@@ -729,18 +462,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
MACCFG2_PREAMBLE_LENGTH_MASK;
- if (cfg->rx_preamble)
- tmp |= MACCFG2_PRE_AM_RX_EN;
- if (cfg->tx_preamble)
- tmp |= MACCFG2_PRE_AM_TX_EN;
- if (cfg->rx_len_check)
- tmp |= MACCFG2_LENGTH_CHECK;
if (cfg->tx_pad_crc)
tmp |= MACCFG2_PAD_CRC_EN;
- if (cfg->tx_crc)
- tmp |= MACCFG2_CRC_EN;
- if (!cfg->halfdup_on)
- tmp |= MACCFG2_FULL_DUPLEX;
+ /* Full Duplex */
+ tmp |= MACCFG2_FULL_DUPLEX;
iowrite32be(tmp, &regs->maccfg2);
tmp = (((cfg->non_back_to_back_ipg1 <<
@@ -755,19 +480,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
iowrite32be(tmp, &regs->ipgifg);
tmp = 0;
-
- if (cfg->halfdup_alt_backoff_en) {
- tmp = HAFDUP_ALT_BEB;
- tmp |= (cfg->halfdup_alt_backoff_val <<
- HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT) &
- HAFDUP_ALTERNATE_BEB_TRUNCATION_MASK;
- }
- if (cfg->halfdup_bp_no_backoff)
- tmp |= HAFDUP_BP_NO_BACKOFF;
- if (cfg->halfdup_no_backoff)
- tmp |= HAFDUP_NO_BACKOFF;
- if (cfg->halfdup_excess_defer)
- tmp |= HAFDUP_EXCESS_DEFER;
+ tmp |= HAFDUP_EXCESS_DEFER;
tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
& HAFDUP_RETRANSMISSION_MAX);
tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
@@ -843,35 +556,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
pr_err("Ethernet MAC Must have a valid MAC Address\n");
return -EINVAL;
}
- if (dtsec->max_speed >= SPEED_1000 &&
- dtsec->dtsec_drv_param->halfdup_on) {
- pr_err("Ethernet MAC 1G can't work in half duplex\n");
- return -EINVAL;
- }
-
- /* FM_RX_PREAM_4_ERRATA_DTSEC_A001 Errata workaround */
- if (dtsec->dtsec_drv_param->rx_preamble) {
- pr_err("preamble_rx_en\n");
- return -EINVAL;
- }
-
- if (((dtsec->dtsec_drv_param)->tx_preamble ||
- (dtsec->dtsec_drv_param)->rx_preamble) &&
- ((dtsec->dtsec_drv_param)->preamble_len != 0x7)) {
- pr_err("Preamble length should be 0x7 bytes\n");
- return -EINVAL;
- }
- if ((dtsec->dtsec_drv_param)->halfdup_on &&
- (dtsec->dtsec_drv_param->tx_time_stamp_en ||
- dtsec->dtsec_drv_param->rx_time_stamp_en)) {
- pr_err("1588 timeStamp disabled in half duplex mode\n");
- return -EINVAL;
- }
- if ((dtsec->dtsec_drv_param)->rx_flow &&
- (dtsec->dtsec_drv_param)->rx_ctrl_acc) {
- pr_err("Receive control frame can not be accepted\n");
- return -EINVAL;
- }
if ((dtsec->dtsec_drv_param)->rx_prepend >
MAX_PACKET_ALIGNMENT) {
pr_err("packetAlignmentPadding can't be > than %d\n",
@@ -888,12 +572,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
MAX_INTER_PACKET_GAP);
return -EINVAL;
}
- if ((dtsec->dtsec_drv_param)->halfdup_alt_backoff_val >
- MAX_INTER_PALTERNATE_BEB) {
- pr_err("alternateBackoffVal can't be greater than %d\n",
- MAX_INTER_PALTERNATE_BEB);
- return -EINVAL;
- }
if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
MAX_RETRANSMISSION) {
pr_err("maxRetransmission can't be greater than %d\n",
@@ -909,10 +587,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
* using the MII Management Interface
*/
}
- if (dtsec->dtsec_drv_param->tbipa > MAX_PHYS) {
- pr_err("PHY address (should be 0-%d)\n", MAX_PHYS);
- return -ERANGE;
- }
if (!dtsec->exception_cb) {
pr_err("uninitialized exception_cb\n");
return -EINVAL;
@@ -922,12 +596,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
return -EINVAL;
}
- /* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
- if (dtsec->dtsec_drv_param->rx_len_check) {
- pr_warn("Length Check!\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -998,18 +666,6 @@ static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
return false;
}
-static u32 get_mac_addr_hash_code(u64 eth_addr)
-{
- u32 crc;
-
- /* CRC calculation */
- GET_MAC_ADDR_CRC(eth_addr, crc);
-
- crc = bitrev32(crc);
-
- return crc;
-}
-
static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
@@ -1059,10 +715,10 @@ static void dtsec_isr(void *handle)
* This is a read only register
* b. Read and save the value of TPKT
*/
- tpkt1 = in_be32(&regs->tpkt);
+ tpkt1 = ioread32be(&regs->tpkt);
/* c. Read the register at dTSEC address offset 0x32C */
- tmp_reg1 = in_be32(&regs->reserved02c0[27]);
+ tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
/* d. Compare bits [9:15] to bits [25:31] of the
* register at address offset 0x32C.
@@ -1083,8 +739,8 @@ static void dtsec_isr(void *handle)
/* e. Read and save TPKT again and read the register
* at dTSEC address offset 0x32C again
*/
- tpkt2 = in_be32(&regs->tpkt);
- tmp_reg2 = in_be32(&regs->reserved02c0[27]);
+ tpkt2 = ioread32be(&regs->tpkt);
+ tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
/* f. Compare the value of TPKT saved in step b to
* value read in step e. Also compare bits [9:15] of
@@ -1100,21 +756,22 @@ static void dtsec_isr(void *handle)
/* a.Write a 1 to RCTRL[GRS] */
- out_be32(&regs->rctrl,
- in_be32(&regs->rctrl) | RCTRL_GRS);
+ iowrite32be(ioread32be(&regs->rctrl) |
+ RCTRL_GRS, &regs->rctrl);
/* b.Wait until IEVENT[GRSC]=1, or at least
* 100 us has elapsed.
*/
for (i = 0; i < 100; i++) {
- if (in_be32(&regs->ievent) &
+ if (ioread32be(&regs->ievent) &
DTSEC_IMASK_GRSCEN)
break;
udelay(1);
}
- if (in_be32(&regs->ievent) & DTSEC_IMASK_GRSCEN)
- out_be32(&regs->ievent,
- DTSEC_IMASK_GRSCEN);
+ if (ioread32be(&regs->ievent) &
+ DTSEC_IMASK_GRSCEN)
+ iowrite32be(DTSEC_IMASK_GRSCEN,
+ &regs->ievent);
else
pr_debug("Rx lockup due to Tx lockup\n");
@@ -1279,15 +936,14 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
- if (dtsec->fm_rev_info.major == 2)
- if (0 < pause_time && pause_time <= 320) {
+ if (pause_time) {
+ /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
+ if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
pr_warn("pause-time: %d illegal.Should be > 320\n",
pause_time);
return -EINVAL;
}
- if (pause_time) {
ptv = ioread32be(&regs->ptv);
ptv &= PTV_PTE_MASK;
ptv |= pause_time & PTV_PT_MASK;
@@ -1341,7 +997,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
struct eth_hash_entry *hash_entry;
u64 addr;
s32 bucket;
- u32 crc;
+ u32 crc = 0xFFFFFFFF;
bool mcast, ghtx;
if (!is_init_done(dtsec->dtsec_drv_param))
@@ -1357,7 +1013,8 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
pr_err("Could not compute hash bucket\n");
return -EINVAL;
}
- crc = get_mac_addr_hash_code(addr);
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
/* considering the 9 highest order bits in crc H[8:0]:
*if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
@@ -1407,7 +1064,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
struct eth_hash_entry *hash_entry = NULL;
u64 addr;
s32 bucket;
- u32 crc;
+ u32 crc = 0xFFFFFFFF;
bool mcast, ghtx;
if (!is_init_done(dtsec->dtsec_drv_param))
@@ -1423,7 +1080,8 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
pr_err("Could not compute hash bucket\n");
return -EINVAL;
}
- crc = get_mac_addr_hash_code(addr);
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
if (ghtx) {
bucket = (s32)((crc >> 23) & 0x1ff);
@@ -1532,22 +1190,17 @@ int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
int dtsec_restart_autoneg(struct fman_mac *dtsec)
{
u16 tmp_reg16;
- int err;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- err = mii_read_reg(dtsec, dtsec->tbi_phy_addr, 0, &tmp_reg16);
- if (err) {
- pr_err("Autonegotiation restart failed\n");
- return err;
- }
+ tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
- tmp_reg16 &= ~(PHY_CR_SPEED0 | PHY_CR_SPEED1);
- tmp_reg16 |=
- (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
+ tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+ tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
- mii_write_reg(dtsec, dtsec->tbi_phy_addr, 0, tmp_reg16);
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
return 0;
}
@@ -1598,12 +1251,12 @@ int dtsec_set_exception(struct fman_mac *dtsec,
switch (exception) {
case FM_MAC_EX_1G_1588_TS_RX_ERR:
if (enable) {
- dtsec->en_tsu_err_exeption = true;
+ dtsec->en_tsu_err_exception = true;
iowrite32be(ioread32be(&regs->tmr_pemask) |
TMR_PEMASK_TSREEN,
&regs->tmr_pemask);
} else {
- dtsec->en_tsu_err_exeption = false;
+ dtsec->en_tsu_err_exception = false;
iowrite32be(ioread32be(&regs->tmr_pemask) &
~TMR_PEMASK_TSREEN,
&regs->tmr_pemask);
@@ -1644,7 +1297,8 @@ int dtsec_init(struct fman_mac *dtsec)
MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
- dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions);
+ dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
+ dtsec->tbiphy->mdio.addr);
if (err) {
free_init_resources(dtsec);
pr_err("DTSEC version doesn't support this i/f mode\n");
@@ -1655,30 +1309,26 @@ int dtsec_init(struct fman_mac *dtsec)
u16 tmp_reg16;
/* Configure the TBI PHY Control Register */
- tmp_reg16 = PHY_TBICON_CLK_SEL | PHY_TBICON_SRESET;
- mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 17,
- tmp_reg16);
+ tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
+ phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
- tmp_reg16 = PHY_TBICON_CLK_SEL;
- mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 17,
- tmp_reg16);
+ tmp_reg16 = TBICON_CLK_SELECT;
+ phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
- tmp_reg16 =
- (PHY_CR_PHY_RESET | PHY_CR_ANE | PHY_CR_FULLDUPLEX |
- PHY_CR_SPEED1);
- mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 0, tmp_reg16);
+ tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
if (dtsec->basex_if)
- tmp_reg16 = PHY_TBIANA_1000X;
+ tmp_reg16 = TBIANA_1000X;
else
- tmp_reg16 = PHY_TBIANA_SGMII;
- mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 4, tmp_reg16);
+ tmp_reg16 = TBIANA_SGMII;
+ phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
- tmp_reg16 =
- (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX |
- PHY_CR_SPEED1);
+ tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
- mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 0, tmp_reg16);
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
}
/* Max Frame Length */
@@ -1752,34 +1402,53 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
set_dflts(dtsec_drv_param);
- dtsec->regs = (struct dtsec_regs __iomem *)(base_addr);
- dtsec->mii_regs = (struct dtsec_mii_regs __iomem *)
- (base_addr + DTSEC_TO_MII_OFFSET);
+ dtsec->regs = base_addr;
dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
dtsec->max_speed = params->max_speed;
dtsec->phy_if = params->phy_if;
dtsec->mac_id = params->mac_id;
- dtsec->exceptions = DTSEC_DEFAULT_EXCEPTIONS;
+ dtsec->exceptions = (DTSEC_IMASK_BREN |
+ DTSEC_IMASK_RXCEN |
+ DTSEC_IMASK_BTEN |
+ DTSEC_IMASK_TXCEN |
+ DTSEC_IMASK_TXEEN |
+ DTSEC_IMASK_ABRTEN |
+ DTSEC_IMASK_LCEN |
+ DTSEC_IMASK_CRLEN |
+ DTSEC_IMASK_XFUNEN |
+ DTSEC_IMASK_IFERREN |
+ DTSEC_IMASK_MAGEN |
+ DTSEC_IMASK_TDPEEN |
+ DTSEC_IMASK_RDPEEN);
dtsec->exception_cb = params->exception_cb;
dtsec->event_cb = params->event_cb;
dtsec->dev_id = params->dev_id;
dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
- dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
- dtsec->tbi_phy_addr = dtsec->dtsec_drv_param->tbi_phy_addr;
+ dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
dtsec->fm = params->fm;
dtsec->basex_if = params->basex_if;
- dtsec->mii_mgmt_clk = calc_mii_mgmt_clk(dtsec);
- if (dtsec->mii_mgmt_clk == 0) {
- pr_err("Can't calculate MII management clock\n");
- goto err_dtsec;
+
+ if (!params->internal_phy_node) {
+ pr_err("TBI PHY node is not available\n");
+ goto err_dtsec_drv_param;
+ }
+
+ dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
+ if (!dtsec->tbiphy) {
+ pr_err("of_phy_find_device (TBI PHY) failed\n");
+ goto err_dtsec_drv_param;
}
+ put_device(&dtsec->tbiphy->mdio.dev);
+
/* Save FMan revision */
fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
return dtsec;
+err_dtsec_drv_param:
+ kfree(dtsec_drv_param);
err_dtsec:
kfree(dtsec);
return NULL;
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_mac.h b/linux/drivers/net/ethernet/freescale/fman/fman_mac.h
index 7a5e752e..dd6d0526 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -191,10 +191,6 @@ struct fman_mac_params {
u16 max_speed;
/* A handle to the FM object this port related to */
void *fm;
- /* MDIO exceptions interrupt source - not valid for all
- * MACs; MUST be set to 'NO_IRQ' for MACs that don't have
- * mdio-irq, or for polling
- */
void *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
@@ -204,6 +200,8 @@ struct fman_mac_params {
* synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
*/
bool basex_if;
+ /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
+ struct device_node *internal_phy_node;
};
struct eth_hash_t {
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_memac.c b/linux/drivers/net/ethernet/freescale/fman/fman_memac.c
index 5730194a..bc4dfb18 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -42,59 +42,55 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/of_mdio.h>
+
+/* PCS registers */
+#define MDIO_SGMII_CR 0x00
+#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
+#define MDIO_SGMII_LINK_TMR_L 0x12
+#define MDIO_SGMII_LINK_TMR_H 0x13
+#define MDIO_SGMII_IF_MODE 0x14
+
+/* SGMII Control defines */
+#define SGMII_CR_AN_EN 0x1000
+#define SGMII_CR_RESTART_AN 0x0200
+#define SGMII_CR_FD 0x0100
+#define SGMII_CR_SPEED_SEL1_1G 0x0040
+#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
+ SGMII_CR_SPEED_SEL1_1G)
+
+/* SGMII Device Ability for SGMII defines */
+#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
+#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
+
+/* Link timer define */
+#define LINK_TMR_L 0xa120
+#define LINK_TMR_H 0x0007
+#define LINK_TMR_L_BASEX 0xaf08
+#define LINK_TMR_H_BASEX 0x002f
+
+/* SGMII IF Mode defines */
+#define IF_MODE_USE_SGMII_AN 0x0002
+#define IF_MODE_SGMII_EN 0x0001
+#define IF_MODE_SGMII_SPEED_100M 0x0004
+#define IF_MODE_SGMII_SPEED_1G 0x0008
+#define IF_MODE_SGMII_DUPLEX_HALF 0x0010
-/* MII Management Registers */
-#define MDIO_CFG_CLK_DIV_MASK 0x0080ff80
-#define MDIO_CFG_HOLD_MASK 0x0000001c
-#define MDIO_CFG_ENC45 0x00000040
-#define MDIO_CFG_BSY 0x00000001
-
-#define MDIO_CTL_PHY_ADDR_SHIFT 5
-
-#define MDIO_DATA_BSY 0x80000000
-
-/* Internal PHY access */
-#define PHY_MDIO_ADDR 0
-
-/* Internal PHY Registers - SGMII */
-#define PHY_SGMII_CR_RESET_AN 0x0200
-#define PHY_SGMII_CR_AN_ENABLE 0x1000
-#define PHY_SGMII_CR_DEF_VAL 0x1140
-#define PHY_SGMII_DEV_ABILITY_SGMII 0x4001
-#define PHY_SGMII_DEV_ABILITY_1000X 0x01A0
-#define PHY_SGMII_IF_MODE_DUPLEX_FULL 0x0000
-#define PHY_SGMII_IF_MODE_DUPLEX_HALF 0x0010
-#define PHY_SGMII_IF_MODE_SPEED_GB 0x0008
-#define PHY_SGMII_IF_MODE_SPEED_100M 0x0004
-#define PHY_SGMII_IF_MODE_SPEED_10M 0x0000
-#define PHY_SGMII_IF_MODE_AN 0x0002
-#define PHY_SGMII_IF_MODE_SGMII 0x0001
-#define PHY_SGMII_IF_MODE_1000X 0x0000
-
-/* Offset from the MEM map to the MDIO mem map */
-#define MEMAC_TO_MII_OFFSET 0x030
/* Num of additional exact match MAC adr regs */
#define MEMAC_NUM_OF_PADDRS 7
/* Control and Configuration Register (COMMAND_CONFIG) */
-#define CMD_CFG_MG 0x80000000 /* 00 Magic Packet detection */
#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */
-#define CMD_CFG_SFD_ANY 0x00200000 /* 10 Disable SFD check */
#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */
#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */
-#define CMD_CFG_SEND_IDLE 0x00010000 /* 15 Force idle generation */
-#define CMD_CFG_CNT_FRM_EN 0x00002000 /* 18 Control frame rx enable */
#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */
#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */
-#define CMD_CFG_LOOPBACK_EN 0x00000400 /* 21 XGMII/GMII loopback enable */
-#define CMD_CFG_TX_ADDR_INS 0x00000200 /* 22 Tx source MAC addr insertion */
#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */
-#define CMD_CFG_PAUSE_FWD 0x00000080 /* 24 Terminate/frwd Pause frames */
#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */
#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */
#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */
-#define CMD_CFG_WAN_MODE 0x00000008 /* 28 WAN mode enable */
#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */
#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */
@@ -171,10 +167,6 @@ do { \
#define DEFAULT_FRAME_LENGTH 0x600
#define DEFAULT_TX_IPG_LENGTH 12
-#define MEMAC_DEFAULT_EXCEPTIONS \
- ((u32)(MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER | \
- MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI))
-
#define CLXY_PAUSE_QUANTA_CLX_PQNT 0x0000FFFF
#define CLXY_PAUSE_QUANTA_CLY_PQNT 0xFFFF0000
#define CLXY_PAUSE_THRESH_CLX_QTH 0x0000FFFF
@@ -187,14 +179,6 @@ struct mac_addr {
u32 mac_addr_u;
};
-/* MII Configuration Control Memory Map Registers */
-struct memac_mii_regs {
- u32 mdio_cfg; /* 0x030 */
- u32 mdio_ctrl; /* 0x034 */
- u32 mdio_data; /* 0x038 */
- u32 mdio_addr; /* 0x03c */
-};
-
/* memory map */
struct memac_regs {
u32 res0000[2]; /* General Control and Status */
@@ -340,25 +324,8 @@ struct memac_regs {
struct memac_cfg {
bool reset_on_init;
- bool rx_error_discard;
bool pause_ignore;
- bool pause_forward_enable;
- bool no_length_check_enable;
- bool cmd_frame_enable;
- bool send_idle_enable;
- bool wan_mode_enable;
bool promiscuous_mode_enable;
- bool tx_addr_ins_enable;
- bool loopback_enable;
- bool lgth_check_nostdr;
- bool time_stamp_enable;
- bool pad_enable;
- bool phy_tx_ena_on;
- bool rx_sfd_any;
- bool rx_pbl_fwd;
- bool tx_pbl_fwd;
- bool debug_mode;
- bool wake_on_lan;
struct fixed_phy_status *fixed_link;
u16 max_frame_length;
u16 pause_quanta;
@@ -368,8 +335,6 @@ struct memac_cfg {
struct fman_mac {
/* Pointer to MAC memory mapped registers */
struct memac_regs __iomem *regs;
- /* Pointer to MII memory mapped registers */
- struct memac_mii_regs __iomem *mii_regs;
/* MAC address of device */
u64 addr;
/* Ethernet physical interface */
@@ -382,133 +347,15 @@ struct fman_mac {
struct eth_hash_t *multicast_addr_hash;
/* Pointer to driver's individual address hash table */
struct eth_hash_t *unicast_addr_hash;
- bool debug_mode;
u8 mac_id;
u32 exceptions;
struct memac_cfg *memac_drv_param;
void *fm;
struct fman_rev_info fm_rev_info;
bool basex_if;
+ struct phy_device *pcsphy;
};
-static int write_phy_reg_10g(struct memac_mii_regs __iomem *mii_regs,
- u8 phy_addr, u8 reg, u16 data)
-{
- u32 tmp_reg;
- int count;
-
- tmp_reg = ioread32be(&mii_regs->mdio_cfg);
- /* Leave only MDIO_CLK_DIV bits set on */
- tmp_reg &= MDIO_CFG_CLK_DIV_MASK;
- /* Set maximum MDIO_HOLD value to allow phy to see
- * change of data signal
- */
- tmp_reg |= MDIO_CFG_HOLD_MASK;
- /* Add 10G interface mode */
- tmp_reg |= MDIO_CFG_ENC45;
- iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
-
- /* Wait for command completion */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
-
- if (count == 0)
- return -EBUSY;
-
- /* Specify phy and register to be accessed */
- iowrite32be(phy_addr, &mii_regs->mdio_ctrl);
- iowrite32be(reg, &mii_regs->mdio_addr);
-
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
-
- if (count == 0)
- return -EBUSY;
-
- /* Write data */
- iowrite32be(data, &mii_regs->mdio_data);
-
- /* Wait for write transaction end */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY) &&
- --count);
-
- if (count == 0)
- return -EBUSY;
-
- return 0;
-}
-
-static int write_phy_reg_1g(struct memac_mii_regs __iomem *mii_regs,
- u8 phy_addr, u8 reg, u16 data)
-{
- u32 tmp_reg;
- int count;
-
- /* Leave only MDIO_CLK_DIV and MDIO_HOLD bits set on */
- tmp_reg = ioread32be(&mii_regs->mdio_cfg);
- tmp_reg &= (MDIO_CFG_CLK_DIV_MASK | MDIO_CFG_HOLD_MASK);
- iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
-
- /* Wait for command completion */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
-
- if (count == 0)
- return -EBUSY;
-
- /* Write transaction */
- tmp_reg = (phy_addr << MDIO_CTL_PHY_ADDR_SHIFT);
- tmp_reg |= reg;
- iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
-
- /* Wait for command completion */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
-
- if (count == 0)
- return -EBUSY;
-
- iowrite32be(data, &mii_regs->mdio_data);
-
- /* Wait for write transaction to end */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY) &&
- --count);
-
- if (count == 0)
- return -EBUSY;
-
- return 0;
-}
-
-static int mii_write_phy_reg(struct fman_mac *memac, u8 phy_addr, u8 reg,
- u16 data)
-{
- int err = 0;
- /* Figure out interface type - 10G vs 1G.
- * In 10G interface both phy_addr and devAddr present.
- */
- if (memac->max_speed == SPEED_10000)
- err = write_phy_reg_10g(memac->mii_regs, phy_addr, reg, data);
- else
- err = write_phy_reg_1g(memac->mii_regs, phy_addr, reg, data);
-
- return err;
-}
-
static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
u8 paddr_num)
{
@@ -571,30 +418,15 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
/* Config */
tmp = 0;
- if (cfg->wan_mode_enable)
- tmp |= CMD_CFG_WAN_MODE;
if (cfg->promiscuous_mode_enable)
tmp |= CMD_CFG_PROMIS_EN;
- if (cfg->pause_forward_enable)
- tmp |= CMD_CFG_PAUSE_FWD;
if (cfg->pause_ignore)
tmp |= CMD_CFG_PAUSE_IGNORE;
- if (cfg->tx_addr_ins_enable)
- tmp |= CMD_CFG_TX_ADDR_INS;
- if (cfg->loopback_enable)
- tmp |= CMD_CFG_LOOPBACK_EN;
- if (cfg->cmd_frame_enable)
- tmp |= CMD_CFG_CNT_FRM_EN;
- if (cfg->send_idle_enable)
- tmp |= CMD_CFG_SEND_IDLE;
- if (cfg->no_length_check_enable)
- tmp |= CMD_CFG_NO_LEN_CHK;
- if (cfg->rx_sfd_any)
- tmp |= CMD_CFG_SFD_ANY;
- if (cfg->pad_enable)
- tmp |= CMD_CFG_TX_PAD_EN;
- if (cfg->wake_on_lan)
- tmp |= CMD_CFG_MG;
+
+ /* Payload length check disable */
+ tmp |= CMD_CFG_NO_LEN_CHK;
+ /* Enable padding of frames in transmit direction */
+ tmp |= CMD_CFG_TX_PAD_EN;
tmp |= CMD_CFG_CRC_FWD;
@@ -615,7 +447,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
break;
default:
tmp |= IF_MODE_GMII;
- if (phy_if == PHY_INTERFACE_MODE_RGMII && !cfg->loopback_enable)
+ if (phy_if == PHY_INTERFACE_MODE_RGMII)
tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
}
iowrite32be(tmp, &regs->if_mode);
@@ -646,28 +478,11 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
static void set_dflts(struct memac_cfg *cfg)
{
cfg->reset_on_init = false;
- cfg->wan_mode_enable = false;
cfg->promiscuous_mode_enable = false;
- cfg->pause_forward_enable = false;
cfg->pause_ignore = false;
- cfg->tx_addr_ins_enable = false;
- cfg->loopback_enable = false;
- cfg->cmd_frame_enable = false;
- cfg->rx_error_discard = false;
- cfg->send_idle_enable = false;
- cfg->no_length_check_enable = true;
- cfg->lgth_check_nostdr = false;
- cfg->time_stamp_enable = false;
cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
- cfg->pad_enable = true;
- cfg->phy_tx_ena_on = false;
- cfg->rx_sfd_any = false;
- cfg->rx_pbl_fwd = false;
- cfg->tx_pbl_fwd = false;
- cfg->debug_mode = false;
- cfg->wake_on_lan = false;
}
static u32 get_mac_addr_hash_code(u64 eth_addr)
@@ -692,49 +507,42 @@ static u32 get_mac_addr_hash_code(u64 eth_addr)
return xor_val;
}
-static void setup_sgmii_internal_phy(struct fman_mac *memac, u8 phy_addr,
+static void setup_sgmii_internal_phy(struct fman_mac *memac,
struct fixed_phy_status *fixed_link)
{
- u16 tmp_reg16, speed;
+ u16 tmp_reg16;
- /* In case the higher MACs are used (i.e. the MACs that should
- * support 10G), speed=10000 is provided for SGMII ports.
- * Temporary modify enet mode to 1G one, so MII functions can
- * work correctly.
- */
- speed = memac->max_speed;
- memac->max_speed = SPEED_1000;
+ if (WARN_ON(!memac->pcsphy))
+ return;
/* SGMII mode */
- tmp_reg16 = PHY_SGMII_IF_MODE_SGMII;
+ tmp_reg16 = IF_MODE_SGMII_EN;
if (!fixed_link)
/* AN enable */
- tmp_reg16 |= PHY_SGMII_IF_MODE_AN;
+ tmp_reg16 |= IF_MODE_USE_SGMII_AN;
else {
#ifndef __rtems__
switch (fixed_link->speed) {
case 10:
- tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_10M;
+ /* For 10M: IF_MODE[SPEED_10M] = 0 */
break;
case 100:
- tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_100M;
+ tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
break;
case 1000: /* fallthrough */
default:
- tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_GB;
+ tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
break;
}
- if (fixed_link->duplex)
- tmp_reg16 |= PHY_SGMII_IF_MODE_DUPLEX_FULL;
- else
- tmp_reg16 |= PHY_SGMII_IF_MODE_DUPLEX_HALF;
+ if (!fixed_link->duplex)
+ tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
#endif /* __rtems__ */
}
- mii_write_phy_reg(memac, phy_addr, 0x14, tmp_reg16);
+ phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
/* Device ability according to SGMII specification */
- tmp_reg16 = PHY_SGMII_DEV_ABILITY_SGMII;
- mii_write_phy_reg(memac, phy_addr, 0x4, tmp_reg16);
+ tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
+ phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
/* Adjust link timer for SGMII -
* According to Cisco SGMII specification the timer should be 1.6 ms.
@@ -748,40 +556,25 @@ static void setup_sgmii_internal_phy(struct fman_mac *memac, u8 phy_addr,
* Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
* we always set up here a value of 2.5 SGMII.
*/
- mii_write_phy_reg(memac, phy_addr, 0x13, 0x0007);
- mii_write_phy_reg(memac, phy_addr, 0x12, 0xa120);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
if (!fixed_link)
/* Restart AN */
- tmp_reg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
+ tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
else
/* AN disabled */
- tmp_reg16 = PHY_SGMII_CR_DEF_VAL & ~PHY_SGMII_CR_AN_ENABLE;
- mii_write_phy_reg(memac, phy_addr, 0x0, tmp_reg16);
-
- /* Restore original speed */
- memac->max_speed = speed;
+ tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
+ phy_write(memac->pcsphy, 0x0, tmp_reg16);
}
-static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac, u8 phy_addr)
+static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
{
- u16 tmp_reg16, speed;
-
- /* In case the higher MACs are used (i.e. the MACs that
- * should support 10G), speed=10000 is provided for SGMII ports.
- * Temporary modify enet mode to 1G one, so MII functions can
- * work correctly.
- */
- speed = memac->max_speed;
- memac->max_speed = SPEED_1000;
-
- /* 1000BaseX mode */
- tmp_reg16 = PHY_SGMII_IF_MODE_1000X;
- mii_write_phy_reg(memac, phy_addr, 0x14, tmp_reg16);
+ u16 tmp_reg16;
/* AN Device capability */
- tmp_reg16 = PHY_SGMII_DEV_ABILITY_1000X;
- mii_write_phy_reg(memac, phy_addr, 0x4, tmp_reg16);
+ tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
+ phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
/* Adjust link timer for SGMII -
* For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
@@ -795,15 +588,12 @@ static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac, u8 phy_addr)
* Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
* we always set up here a value of 2.5 SGMII.
*/
- mii_write_phy_reg(memac, phy_addr, 0x13, 0x002f);
- mii_write_phy_reg(memac, phy_addr, 0x12, 0xaf08);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
/* Restart AN */
- tmp_reg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
- mii_write_phy_reg(memac, phy_addr, 0x0, tmp_reg16);
-
- /* Restore original speed */
- memac->max_speed = speed;
+ tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
+ phy_write(memac->pcsphy, 0x0, tmp_reg16);
}
static int check_init_parameters(struct fman_mac *memac)
@@ -821,12 +611,6 @@ static int check_init_parameters(struct fman_mac *memac)
return -EINVAL;
}
- /* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
- if (!memac->memac_drv_param->no_length_check_enable) {
- pr_err("Length Check!\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -1216,7 +1000,7 @@ int memac_set_exception(struct fman_mac *memac,
int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
- u8 i, phy_addr;
+ u8 i;
enet_addr_t eth_addr;
bool slow_10g_if = false;
struct fixed_phy_status *fixed_link;
@@ -1262,33 +1046,35 @@ int memac_init(struct fman_mac *memac)
/* MAC strips CRC from received frames - this workaround
* should decrease the likelihood of bug appearance
*/
- reg32 = in_be32(&memac->regs->command_config);
+ reg32 = ioread32be(&memac->regs->command_config);
reg32 &= ~CMD_CFG_CRC_FWD;
- out_be32(&memac->regs->command_config, reg32);
+ iowrite32be(reg32, &memac->regs->command_config);
}
if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
/* Configure internal SGMII PHY */
if (memac->basex_if)
- setup_sgmii_internal_phy_base_x(memac, PHY_MDIO_ADDR);
+ setup_sgmii_internal_phy_base_x(memac);
else
- setup_sgmii_internal_phy(memac, PHY_MDIO_ADDR,
- fixed_link);
+ setup_sgmii_internal_phy(memac, fixed_link);
} else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
/* Configure 4 internal SGMII PHYs */
for (i = 0; i < 4; i++) {
+ u8 qsmgii_phy_addr, phy_addr;
/* QSGMII PHY address occupies 3 upper bits of 5-bit
* phy_address; the lower 2 bits are used to extend
* register address space and access each one of 4
* ports inside QSGMII.
*/
- phy_addr = (u8)((PHY_MDIO_ADDR << 2) | i);
+ phy_addr = memac->pcsphy->mdio.addr;
+ qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
+ memac->pcsphy->mdio.addr = qsmgii_phy_addr;
if (memac->basex_if)
- setup_sgmii_internal_phy_base_x(memac,
- phy_addr);
+ setup_sgmii_internal_phy_base_x(memac);
else
- setup_sgmii_internal_phy(memac, phy_addr,
- fixed_link);
+ setup_sgmii_internal_phy(memac, fixed_link);
+
+ memac->pcsphy->mdio.addr = phy_addr;
}
}
@@ -1330,6 +1116,9 @@ int memac_free(struct fman_mac *memac)
{
free_init_resources(memac);
+ if (memac->pcsphy)
+ put_device(&memac->pcsphy->mdio.dev);
+
kfree(memac->memac_drv_param);
kfree(memac);
@@ -1362,13 +1151,12 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
memac->addr = ENET_ADDR_TO_UINT64(params->addr);
- memac->regs = (struct memac_regs __iomem *)(base_addr);
- memac->mii_regs = (struct memac_mii_regs __iomem *)
- (base_addr + MEMAC_TO_MII_OFFSET);
+ memac->regs = base_addr;
memac->max_speed = params->max_speed;
memac->phy_if = params->phy_if;
memac->mac_id = params->mac_id;
- memac->exceptions = MEMAC_DEFAULT_EXCEPTIONS;
+ memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
+ MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
memac->exception_cb = params->exception_cb;
memac->event_cb = params->event_cb;
memac->dev_id = params->dev_id;
@@ -1378,5 +1166,21 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
/* Save FMan revision */
fman_get_revision(memac->fm, &memac->fm_rev_info);
+ if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
+ memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
+ if (!params->internal_phy_node) {
+ pr_err("PCS PHY node is not available\n");
+ memac_free(memac);
+ return NULL;
+ }
+
+ memac->pcsphy = of_phy_find_device(params->internal_phy_node);
+ if (!memac->pcsphy) {
+ pr_err("of_phy_find_device (PCS PHY) failed\n");
+ memac_free(memac);
+ return NULL;
+ }
+ }
+
return memac;
}
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_muram.h b/linux/drivers/net/ethernet/freescale/fman/fman_muram.h
index c715795b..453bf849 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -39,52 +39,14 @@
/* Structure for FM MURAM information */
struct muram_info;
-/**
- * fman_muram_init
- * @base: Pointer to base of memory mapped FM-MURAM.
- * @size: Size of the FM-MURAM partition.
- *
- * Creates partition in the MURAM.
- * The routine returns a pointer to the MURAM partition.
- * This pointer must be passed as to all other FM-MURAM function calls.
- * No actual initialization or configuration of FM_MURAM hardware is done by
- * this routine.
- *
- * Return: pointer to FM-MURAM object, or NULL for Failure.
- */
struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
-/**
- * fman_muram_offset_to_vbase
- * @muram: FM-MURAM module pointer.
- * @offset: the offset of the memory block
- *
- * Gives the address of the memory region from specific offset
- *
- * Return: The address of the memory block
- */
unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
unsigned long offset);
-/**
- * fman_muram_alloc
- * @muram: FM-MURAM module pointer.
- * @size: Size of the memory to be allocated.
- *
- * Allocate some memory from FM-MURAM partition.
- *
- * Return: address of the allocated memory; NULL otherwise.
- */
-int fman_muram_alloc(struct muram_info *muram, size_t size);
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
-/**
- * fman_muram_free_mem
- * muram: FM-MURAM module pointer.
- * offset: offset of the memory region to be freed.
- * size: size of the memory to be freed.
- *
- * Free an allocated memory from FM-MURAM partition.
- */
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
+ size_t size);
#endif /* __FM_MURAM_EXT */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_port.c b/linux/drivers/net/ethernet/freescale/fman/fman_port.c
index e42ac1c1..7adba4f3 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -40,13 +40,14 @@
#include "fman.h"
#include "fman_sp.h"
-#include <asm/mpc85xx.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/libfdt_env.h>
/* Queue ID */
#define DFLT_FQ_ID 0x00FFFFFF
@@ -107,14 +108,10 @@
#define BMI_EBD_EN 0x80000000
#define BMI_PORT_CFG_EN 0x80000000
-#define BMI_PORT_CFG_FDOVR 0x02000000
#define BMI_PORT_STATUS_BSY 0x80000000
#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT
-#define BMI_DMA_ATTR_IC_STASH_ON 0x10000000
-#define BMI_DMA_ATTR_HDR_STASH_ON 0x04000000
-#define BMI_DMA_ATTR_SG_STASH_ON 0x01000000
#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
@@ -165,16 +162,12 @@
#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
-#define BMI_SG_DISABLE FMAN_SP_SG_DISABLE
-
#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
#define BMI_FIFO_THRESHOLD ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed) \
((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
-#define BMI_PORT_RFNE_FRWD_RPD 0x40000000
-
#define RX_ERRS_TO_ENQ \
(FM_PORT_FRM_ERR_DMA | \
FM_PORT_FRM_ERR_PHYSICAL | \
@@ -190,12 +183,10 @@
/* NIA defines */
#define NIA_ORDER_RESTOR 0x00800000
-#define NIA_ENG_FM_CTL 0x00000000
#define NIA_ENG_BMI 0x00500000
#define NIA_ENG_QMI_ENQ 0x00540000
#define NIA_ENG_QMI_DEQ 0x00580000
-#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME 0x00000028
#define NIA_BMI_AC_ENQ_FRAME 0x00000002
#define NIA_BMI_AC_TX_RELEASE 0x000002C0
#define NIA_BMI_AC_RELEASE 0x000000C0
@@ -401,8 +392,6 @@ struct fman_port_cfg {
u8 cheksum_last_bytes_ignore;
u8 rx_cut_end_bytes;
struct fman_buf_pool_depletion buf_pool_depletion;
- bool discard_override;
- bool en_buf_pool_depletion;
struct fman_ext_pools ext_buf_pools;
u32 tx_fifo_min_level;
u32 tx_fifo_low_comf_level;
@@ -413,32 +402,17 @@ struct fman_port_cfg {
struct fman_sp_int_context_data_copy int_context;
u32 discard_mask;
u32 err_mask;
- bool forward_reuse_int_context;
struct fman_buffer_prefix_content buffer_prefix_content;
bool dont_release_buf;
- bool set_num_of_tasks;
- bool set_num_of_open_dmas;
- bool set_size_of_fifo;
- bool bcb_workaround;
u8 rx_fd_bits;
u32 tx_fifo_deq_pipeline_depth;
- bool errata_A006675;
bool errata_A006320;
bool excessive_threshold_register;
- bool fmbm_rebm_has_sgd;
bool fmbm_tfne_has_features;
- bool qmi_deq_options_support;
enum fman_port_dma_swap dma_swap_data;
- bool dma_ic_stash_on;
- bool dma_header_stash_on;
- bool dma_sg_stash_on;
- bool dma_write_optimize;
enum fman_port_color color;
- bool sync_req;
-
- bool no_scatter_gather;
};
struct fman_port_rx_pools_params {
@@ -458,6 +432,7 @@ struct fman_port_dts_params {
struct fman_port {
void *fm;
+ struct device *dev;
struct fman_rev_info rev_info;
u8 port_id;
enum fman_port_type port_type;
@@ -493,22 +468,10 @@ static int init_bmi_rx(struct fman_port *port)
struct fman_port_cfg *cfg = port->cfg;
u32 tmp;
- /* Rx Configuration register */
- tmp = 0;
- if (cfg->discard_override)
- tmp |= BMI_PORT_CFG_FDOVR;
- iowrite32be(tmp, &regs->fmbm_rcfg);
-
/* DMA attributes */
tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
- if (cfg->dma_ic_stash_on)
- tmp |= BMI_DMA_ATTR_IC_STASH_ON;
- if (cfg->dma_header_stash_on)
- tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
- if (cfg->dma_sg_stash_on)
- tmp |= BMI_DMA_ATTR_SG_STASH_ON;
- if (cfg->dma_write_optimize)
- tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
+ /* Enable write optimization */
+ tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
iowrite32be(tmp, &regs->fmbm_rda);
/* Rx FIFO parameters */
@@ -548,27 +511,21 @@ static int init_bmi_rx(struct fman_port *port)
tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
BMI_EXT_BUF_MARG_START_SHIFT;
tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
- if (cfg->fmbm_rebm_has_sgd && cfg->no_scatter_gather)
- tmp |= BMI_SG_DISABLE;
iowrite32be(tmp, &regs->fmbm_rebm);
/* Frame attributes */
tmp = BMI_CMD_RX_MR_DEF;
tmp |= BMI_CMD_ATTR_ORDER;
tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
- if (cfg->sync_req)
- tmp |= BMI_CMD_ATTR_SYNC;
+ /* Synchronization request */
+ tmp |= BMI_CMD_ATTR_SYNC;
iowrite32be(tmp, &regs->fmbm_rfca);
/* NIA */
tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
- if (cfg->errata_A006675)
- tmp |= NIA_ENG_FM_CTL |
- NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME;
- else
- tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
+ tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
iowrite32be(tmp, &regs->fmbm_rfne);
/* Enqueue NIA */
@@ -597,12 +554,6 @@ static int init_bmi_tx(struct fman_port *port)
/* DMA attributes */
tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
- if (cfg->dma_ic_stash_on)
- tmp |= BMI_DMA_ATTR_IC_STASH_ON;
- if (cfg->dma_header_stash_on)
- tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
- if (cfg->dma_sg_stash_on)
- tmp |= BMI_DMA_ATTR_SG_STASH_ON;
iowrite32be(tmp, &regs->fmbm_tda);
/* Tx FIFO parameters */
@@ -698,20 +649,19 @@ static int init_qmi(struct fman_port *port)
return -EINVAL;
}
- if (cfg->qmi_deq_options_support) {
- switch (cfg->deq_prefetch_option) {
- case FMAN_PORT_DEQ_NO_PREFETCH:
- break;
- case FMAN_PORT_DEQ_PART_PREFETCH:
- tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
- break;
- case FMAN_PORT_DEQ_FULL_PREFETCH:
- tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
- break;
- default:
- return -EINVAL;
- }
+ switch (cfg->deq_prefetch_option) {
+ case FMAN_PORT_DEQ_NO_PREFETCH:
+ break;
+ case FMAN_PORT_DEQ_PART_PREFETCH:
+ tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
+ break;
+ case FMAN_PORT_DEQ_FULL_PREFETCH:
+ tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
+ break;
+ default:
+ return -EINVAL;
}
+
tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
tmp |= cfg->deq_byte_cnt;
iowrite32be(tmp, &regs->fmqm_pndc);
@@ -883,11 +833,11 @@ static int verify_size_of_fifo(struct fman_port *port)
/* Verify the size */
if (port->fifo_bufs.num < min_fifo_size_required)
- pr_debug("FIFO size should be enlarged to %d bytes\n",
- min_fifo_size_required);
+ dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n",
+ __func__, min_fifo_size_required);
else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
- pr_debug("For b2b processing,FIFO may be enlarged to %d bytes\n",
- opt_fifo_size_for_b2b);
+ dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n",
+ __func__, opt_fifo_size_for_b2b);
return 0;
}
@@ -961,7 +911,7 @@ static int set_ext_buffer_pools(struct fman_port *port)
err = set_bpools(port, &bpools);
if (err != 0) {
- pr_err("FMan port: set_bpools\n");
+ dev_err(port->dev, "%s: set_bpools() failed\n", __func__);
return -EINVAL;
}
@@ -976,8 +926,6 @@ static int init_low_level_driver(struct fman_port *port)
switch (port->port_type) {
case FMAN_PORT_TYPE_RX:
cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
- if (cfg->forward_reuse_int_context)
- cfg->rx_fd_bits = (u8)(BMI_PORT_RFNE_FRWD_RPD >> 24);
break;
default:
break;
@@ -990,7 +938,8 @@ static int init_low_level_driver(struct fman_port *port)
port->cfg->int_buf_start_margin = port->internal_buf_offset;
if (init(port) != 0) {
- pr_err("fman_port_init\n");
+ dev_err(port->dev, "%s: fman port initialization failed\n",
+ __func__);
return -ENODEV;
}
@@ -1004,9 +953,9 @@ static int init_low_level_driver(struct fman_port *port)
* Otherwise, if fmbm_tcfqid is 0 the FM will release
* buffers to BM regardless of fmbm_tfene
*/
- out_be32(&port->bmi_regs->tx.fmbm_tcfqid, 0xFFFFFF);
- out_be32(&port->bmi_regs->tx.fmbm_tfene,
- NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE);
+ iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid);
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+ &port->bmi_regs->tx.fmbm_tfene);
}
}
@@ -1038,7 +987,7 @@ static int fill_soc_specific_params(struct fman_port *port)
break;
default:
- pr_err("Unsupported FMan version\n");
+ dev_err(port->dev, "%s: Unsupported FMan version\n", __func__);
return -EINVAL;
}
@@ -1218,14 +1167,12 @@ static void set_dflt_cfg(struct fman_port *port,
struct fman_port_cfg *cfg = port->cfg;
cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
- cfg->dma_write_optimize = true;
cfg->color = FMAN_PORT_COLOR_GREEN;
cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
cfg->tx_fifo_low_comf_level = (5 * 1024);
cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
- cfg->sync_req = true;
cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
cfg->tx_fifo_deq_pipeline_depth =
BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
@@ -1242,14 +1189,10 @@ static void set_dflt_cfg(struct fman_port *port,
cfg->errata_A006320 = true;
/* Excessive Threshold register - exists for pre-FMv3 chips only */
- if (port->rev_info.major < 6) {
+ if (port->rev_info.major < 6)
cfg->excessive_threshold_register = true;
- } else {
- cfg->fmbm_rebm_has_sgd = true;
+ else
cfg->fmbm_tfne_has_features = true;
- }
-
- cfg->qmi_deq_options_support = true;
cfg->buffer_prefix_content.data_align =
DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
@@ -1267,15 +1210,6 @@ static void set_rx_dflt_cfg(struct fman_port *port,
port_params->specific_params.rx_params.err_fqid;
port->cfg->dflt_fqid =
port_params->specific_params.rx_params.dflt_fqid;
-
- /* Set BCB workaround on Rx ports, only for B4860 rev1 */
- if (port->rev_info.major >= 6) {
- unsigned int svr;
-
- svr = mfspr(SPRN_SVR);
- if ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_MAJ(svr) == 1))
- port->cfg->bcb_workaround = true;
- }
}
static void set_tx_dflt_cfg(struct fman_port *port,
@@ -1295,6 +1229,20 @@ static void set_tx_dflt_cfg(struct fman_port *port,
port->cfg->deq_high_priority = true;
}
+/**
+ * fman_port_config
+ * @port: Pointer to the port structure
+ * @params: Pointer to data structure of parameters
+ *
+ * Creates a descriptor for the FM PORT module.
+ * The routine returns a pointer to the FM PORT object.
+ * This descriptor must be passed as first parameter to all other FM PORT
+ * function calls.
+ * No actual initialization or configuration of FM hardware is done by this
+ * routine.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_port_config(struct fman_port *port, struct fman_port_params *params)
{
void __iomem *base_addr = port->dts_params.base_addr;
@@ -1330,10 +1278,8 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* Continue with other parameters */
/* set memory map pointers */
- port->bmi_regs = (union fman_port_bmi_regs __iomem *)
- (base_addr + BMI_PORT_REGS_OFFSET);
- port->qmi_regs = (struct fman_port_qmi_regs __iomem *)
- (base_addr + QMI_PORT_REGS_OFFSET);
+ port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
+ port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
/* resource distribution. */
@@ -1377,7 +1323,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
u32 reg;
reg = 0x00001013;
- out_be32(&port->bmi_regs->tx.fmbm_tfp, reg);
+ iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
}
}
@@ -1391,6 +1337,14 @@ err_params:
}
EXPORT_SYMBOL(fman_port_config);
+/**
+ * fman_port_init
+ * port: A pointer to a FM Port module.
+ * Initializes the FM PORT module by defining the software structure and
+ * configuring the hardware registers.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_port_init(struct fman_port *port)
{
struct fman_port_cfg *cfg;
@@ -1408,14 +1362,6 @@ int fman_port_init(struct fman_port *port)
if (err)
return err;
- /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 Errata workaround */
- if (port->rev_info.major >= 6 && (port->cfg->bcb_workaround) &&
- ((port->port_type == FMAN_PORT_TYPE_RX) &&
- (port->port_speed == 1000))) {
- port->cfg->discard_mask |= FM_PORT_FRM_ERR_PHYSICAL;
- port->fifo_bufs.num += 4 * 1024;
- }
-
cfg = port->cfg;
if (port->port_type == FMAN_PORT_TYPE_RX) {
@@ -1430,10 +1376,10 @@ int fman_port_init(struct fman_port *port)
if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
cfg->buf_margins.end_margins >
port->rx_pools_params.largest_buf_size) {
- pr_err("buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
- cfg->buf_margins.start_margins,
- cfg->buf_margins.end_margins,
- port->rx_pools_params.largest_buf_size);
+ dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
+ __func__, cfg->buf_margins.start_margins,
+ cfg->buf_margins.end_margins,
+ port->rx_pools_params.largest_buf_size);
return -EINVAL;
}
}
@@ -1473,6 +1419,31 @@ int fman_port_init(struct fman_port *port)
}
EXPORT_SYMBOL(fman_port_init);
+/**
+ * fman_port_cfg_buf_prefix_content
+ * @port A pointer to a FM Port module.
+ * @buffer_prefix_content A structure of parameters describing
+ * the structure of the buffer.
+ * Out parameter:
+ * Start margin - offset of data from
+ * start of external buffer.
+ * Defines the structure, size and content of the application buffer.
+ * The prefix, in Tx ports, if 'pass_prs_result', the application should set
+ * a value to their offsets in the prefix of the FM will save the first
+ * 'priv_data_size', than, depending on 'pass_prs_result' and
+ * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
+ * (in this order), to the application buffer, and to offset.
+ * Calling this routine changes the buffer margins definitions in the internal
+ * driver data base from its default configuration:
+ * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
+ * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
+ * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
+ * May be used for all ports
+ *
+ * Allowed only following fman_port_config() and before fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_port_cfg_buf_prefix_content(struct fman_port *port,
struct fman_buffer_prefix_content *
buffer_prefix_content)
@@ -1494,9 +1465,24 @@ int fman_port_cfg_buf_prefix_content(struct fman_port *port,
}
EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
+/**
+ * fman_port_disable
+ * port: A pointer to a FM Port module.
+ *
+ * Gracefully disable an FM port. The port will not start new tasks after all
+ * tasks associated with the port are terminated.
+ *
+ * This is a blocking routine, it returns after port is gracefully stopped,
+ * i.e. the port will not except new frames, but it will finish all frames
+ * or tasks which were already began.
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_port_disable(struct fman_port *port)
{
- u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp;
+ u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
+ u32 tmp;
bool rx_port, failure = false;
int count;
@@ -1553,16 +1539,27 @@ int fman_port_disable(struct fman_port *port)
}
if (failure)
- pr_debug("FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
- port->port_id);
+ dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
+ __func__, port->port_id);
return 0;
}
EXPORT_SYMBOL(fman_port_disable);
+/**
+ * fman_port_enable
+ * port: A pointer to a FM Port module.
+ *
+ * A runtime routine provided to allow disable/enable of port.
+ *
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_port_enable(struct fman_port *port)
{
- u32 __iomem *bmi_cfg_reg, tmp;
+ u32 __iomem *bmi_cfg_reg;
+ u32 tmp;
bool rx_port;
if (!is_init_done(port->cfg))
@@ -1595,12 +1592,30 @@ int fman_port_enable(struct fman_port *port)
}
EXPORT_SYMBOL(fman_port_enable);
+/**
+ * fman_port_bind
+ * dev: FMan Port OF device pointer
+ *
+ * Bind to a specific FMan Port.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan port device.
+ */
struct fman_port *fman_port_bind(struct device *dev)
{
return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
}
EXPORT_SYMBOL(fman_port_bind);
+/**
+ * fman_port_get_qman_channel_id
+ * port: Pointer to the FMan port devuce
+ *
+ * Get the QMan channel ID for the specific port
+ *
+ * Return: QMan channel ID
+ */
u32 fman_port_get_qman_channel_id(struct fman_port *port)
{
return port->dts_params.qman_channel_id;
@@ -1624,7 +1639,7 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
#ifndef __rtems__
struct resource *dev_res;
#endif /* __rtems__ */
- const u32 *u32_prop;
+ u32 val;
int err = 0, lenp;
enum fman_port_type port_type;
u16 port_speed;
@@ -1634,13 +1649,15 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
if (!port)
return -ENOMEM;
+ port->dev = &of_dev->dev;
+
port_node = of_node_get(of_dev->dev.of_node);
/* Get the FM node */
#ifndef __rtems__
fm_node = of_get_parent(port_node);
if (!fm_node) {
- pr_err("of_get_parent() failed\n");
+ dev_err(port->dev, "%s: of_get_parent() failed\n", __func__);
err = -ENODEV;
goto return_err;
}
@@ -1653,28 +1670,20 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
}
#endif /* __rtems__ */
- u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp);
- if (!u32_prop) {
- pr_err("of_get_property(%s, cell-index) failed\n",
- port_node->full_name);
- err = -EINVAL;
- goto return_err;
- }
- if (WARN_ON(lenp != sizeof(u32))) {
+ err = of_property_read_u32(port_node, "cell-index", &val);
+ if (err) {
+ dev_err(port->dev, "%s: reading cell-index for %s failed\n",
+ __func__, port_node->full_name);
err = -EINVAL;
goto return_err;
}
- port_id = (u8)*u32_prop;
-
+ port_id = (u8)val;
port->dts_params.id = port_id;
if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
port_type = FMAN_PORT_TYPE_TX;
port_speed = 1000;
- u32_prop = (const u32 *)of_get_property(port_node,
- "fsl,fman-10g-port",
- &lenp);
- if (u32_prop)
+ if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
@@ -1687,9 +1696,7 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
port_type = FMAN_PORT_TYPE_RX;
port_speed = 1000;
- u32_prop = (const u32 *)of_get_property(port_node,
- "fsl,fman-10g-port", &lenp);
- if (u32_prop)
+ if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
@@ -1700,7 +1707,7 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
port_type = FMAN_PORT_TYPE_RX;
} else {
- pr_err("Illegal port type\n");
+ dev_err(port->dev, "%s: Illegal port type\n", __func__);
err = -EINVAL;
goto return_err;
}
@@ -1713,7 +1720,8 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
qman_channel_id = fman_get_qman_channel_id(fman, port_id);
if (qman_channel_id == 0) {
- pr_err("incorrect qman-channel-id\n");
+ dev_err(port->dev, "%s: incorrect qman-channel-id\n",
+ __func__);
err = -EINVAL;
goto return_err;
}
@@ -1722,7 +1730,8 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
err = of_address_to_resource(port_node, 0, &res);
if (err < 0) {
- pr_err("of_address_to_resource() failed\n");
+ dev_err(port->dev, "%s: of_address_to_resource() failed\n",
+ __func__);
err = -ENOMEM;
goto return_err;
}
@@ -1732,21 +1741,20 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
of_node_put(port_node);
#ifndef __rtems__
- dev_res = __devm_request_region(fman_get_device(fman), &res,
- res.start, (res.end + 1 - res.start),
- "fman-port");
+ dev_res = __devm_request_region(port->dev, &res, res.start,
+ resource_size(&res), "fman-port");
if (!dev_res) {
- pr_err("__devm_request_region() failed\n");
+ dev_err(port->dev, "%s: __devm_request_region() failed\n",
+ __func__);
err = -EINVAL;
goto free_port;
}
#endif /* __rtems__ */
- port->dts_params.base_addr = devm_ioremap(fman_get_device(fman),
- res.start,
- (res.end + 1 - res.start));
- if (port->dts_params.base_addr == 0)
- pr_err("devm_ioremap() failed\n");
+ port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
+ resource_size(&res));
+ if (!port->dts_params.base_addr)
+ dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
dev_set_drvdata(&of_dev->dev, port);
@@ -1774,14 +1782,34 @@ MODULE_DEVICE_TABLE(of, fman_port_match);
static struct platform_driver fman_port_driver = {
.driver = {
- .name = "fsl-fman-port",
- .of_match_table = fman_port_match,
- },
+ .name = "fsl-fman-port",
+ .of_match_table = fman_port_match,
+ },
.probe = fman_port_probe,
};
-builtin_platform_driver(fman_port_driver);
+static int __init fman_port_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA FMan driver\n");
+
+ err = platform_driver_register(&fman_port_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(fman_port_load);
+
+static void __exit fman_port_unload(void)
+{
+ platform_driver_unregister(&fman_port_driver);
+}
+module_exit(fman_port_unload);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
#else /* __rtems__ */
#include <sys/cdefs.h>
#include <sys/param.h>
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_port.h b/linux/drivers/net/ethernet/freescale/fman/fman_port.h
index 56c1d023..8ba90173 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -132,109 +132,20 @@ struct fman_port_params {
/* Additional parameters depending on port type. */
};
-/**
- * fman_port_config
- * @port: Pointer to the port structure
- * @params: Pointer to data structure of parameters
- *
- * Creates a descriptor for the FM PORT module.
- * The routine returns a pointer to the FM PORT object.
- * This descriptor must be passed as first parameter to all other FM PORT
- * function calls.
- * No actual initialization or configuration of FM hardware is done by this
- * routine.
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_port_config(struct fman_port *port, struct fman_port_params *params);
-/**
- * fman_port_init
- * port: A pointer to a FM Port module.
- * Initializes the FM PORT module by defining the software structure and
- * configuring the hardware registers.
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_port_init(struct fman_port *port);
-/**
- * fman_port_cfg_buf_prefix_content
- * @port A pointer to a FM Port module.
- * @buffer_prefix_content A structure of parameters describing
- * the structure of the buffer.
- * Out parameter:
- * Start margin - offset of data from
- * start of external buffer.
- * Defines the structure, size and content of the application buffer.
- * The prefix, in Tx ports, if 'pass_prs_result', the application should set
- * a value to their offsets in the prefix of the FM will save the first
- * 'priv_data_size', than, depending on 'pass_prs_result' and
- * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
- * (in this order), to the application buffer, and to offset.
- * Calling this routine changes the buffer margins definitions in the internal
- * driver data base from its default configuration:
- * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
- * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
- * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
- * May be used for all ports
- *
- * Allowed only following fman_port_config() and before fman_port_init().
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_port_cfg_buf_prefix_content(struct fman_port *port,
struct fman_buffer_prefix_content
*buffer_prefix_content);
-/**
- * fman_port_disable
- * port: A pointer to a FM Port module.
- *
- * Gracefully disable an FM port. The port will not start new tasks after all
- * tasks associated with the port are terminated.
- *
- * This is a blocking routine, it returns after port is gracefully stopped,
- * i.e. the port will not except new frames, but it will finish all frames
- * or tasks which were already began.
- * Allowed only following fman_port_init().
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_port_disable(struct fman_port *port);
-/**
- * fman_port_enable
- * port: A pointer to a FM Port module.
- *
- * A runtime routine provided to allow disable/enable of port.
- *
- * Allowed only following fman_port_init().
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_port_enable(struct fman_port *port);
-/**
- * fman_port_get_qman_channel_id
- * port: Pointer to the FMan port devuce
- *
- * Get the QMan channel ID for the specific port
- *
- * Return: QMan channel ID
- */
u32 fman_port_get_qman_channel_id(struct fman_port *port);
-/**
- * fman_port_bind
- * dev: FMan Port OF device pointer
- *
- * Bind to a specific FMan Port.
- *
- * Allowed only after the port was created.
- *
- * Return: A pointer to the FMan port device.
- */
struct fman_port *fman_port_bind(struct device *dev);
#endif /* __FMAN_PORT_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_sp.c b/linux/drivers/net/ethernet/freescale/fman/fman_sp.c
index 2fcfa6c0..a059cb21 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_sp.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -84,6 +84,7 @@ void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
}
}
}
+EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes);
int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
int_context_data_copy,
@@ -168,4 +169,5 @@ int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
return 0;
}
+EXPORT_SYMBOL(fman_sp_build_buffer_struct);
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 5b22a044..0b5e971e 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -36,31 +36,22 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "crc_mac_addr_ext.h"
-
#include "fman_tgec.h"
#include "fman.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
#include <linux/io.h>
+#include <linux/crc32.h>
/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
/* Command and Configuration Register (COMMAND_CONFIG) */
-#define CMD_CFG_EN_TIMESTAMP 0x00100000
#define CMD_CFG_NO_LEN_CHK 0x00020000
-#define CMD_CFG_SEND_IDLE 0x00010000
-#define CMD_CFG_RX_ER_DISC 0x00004000
-#define CMD_CFG_CMD_FRM_EN 0x00002000
-#define CMD_CFG_LOOPBACK_EN 0x00000400
-#define CMD_CFG_TX_ADDR_INS 0x00000200
#define CMD_CFG_PAUSE_IGNORE 0x00000100
-#define CMD_CFG_PAUSE_FWD 0x00000080
#define CMF_CFG_CRC_FWD 0x00000040
#define CMD_CFG_PROMIS_EN 0x00000010
-#define CMD_CFG_WAN_MODE 0x00000008
#define CMD_CFG_RX_EN 0x00000002
#define CMD_CFG_TX_EN 0x00000001
@@ -92,23 +83,6 @@
#define DEFAULT_MAX_FRAME_LENGTH 0x600
#define DEFAULT_PAUSE_QUANT 0xf000
-#define TGEC_DEFAULT_EXCEPTIONS \
- ((u32)((TGEC_IMASK_MDIO_SCAN_EVENT) |\
- (TGEC_IMASK_REM_FAULT) |\
- (TGEC_IMASK_LOC_FAULT) |\
- (TGEC_IMASK_TX_ECC_ER) |\
- (TGEC_IMASK_TX_FIFO_UNFL) |\
- (TGEC_IMASK_TX_FIFO_OVFL) |\
- (TGEC_IMASK_TX_ER) |\
- (TGEC_IMASK_RX_FIFO_OVFL) |\
- (TGEC_IMASK_RX_ECC_ER) |\
- (TGEC_IMASK_RX_JAB_FRM) |\
- (TGEC_IMASK_RX_OVRSZ_FRM) |\
- (TGEC_IMASK_RX_RUNT_FRM) |\
- (TGEC_IMASK_RX_FRAG_FRM) |\
- (TGEC_IMASK_RX_CRC_ER) |\
- (TGEC_IMASK_RX_ALIGN_ER)))
-
/* number of pattern match registers (entries) */
#define TGEC_NUM_OF_PADDRS 1
@@ -222,17 +196,8 @@ struct tgec_regs {
};
struct tgec_cfg {
- bool rx_error_discard;
bool pause_ignore;
- bool pause_forward_enable;
- bool no_length_check_enable;
- bool cmd_frame_enable;
- bool send_idle_enable;
- bool wan_mode_enable;
bool promiscuous_mode_enable;
- bool tx_addr_ins_enable;
- bool loopback_enable;
- bool time_stamp_enable;
u16 max_frame_length;
u16 pause_quant;
u32 tx_ipg_length;
@@ -270,17 +235,8 @@ static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
static void set_dflts(struct tgec_cfg *cfg)
{
- cfg->wan_mode_enable = false;
cfg->promiscuous_mode_enable = false;
- cfg->pause_forward_enable = false;
cfg->pause_ignore = false;
- cfg->tx_addr_ins_enable = false;
- cfg->loopback_enable = false;
- cfg->cmd_frame_enable = false;
- cfg->rx_error_discard = false;
- cfg->send_idle_enable = false;
- cfg->no_length_check_enable = true;
- cfg->time_stamp_enable = false;
cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
cfg->pause_quant = DEFAULT_PAUSE_QUANT;
@@ -293,28 +249,12 @@ static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
/* Config */
tmp = CMF_CFG_CRC_FWD;
- if (cfg->wan_mode_enable)
- tmp |= CMD_CFG_WAN_MODE;
if (cfg->promiscuous_mode_enable)
tmp |= CMD_CFG_PROMIS_EN;
- if (cfg->pause_forward_enable)
- tmp |= CMD_CFG_PAUSE_FWD;
if (cfg->pause_ignore)
tmp |= CMD_CFG_PAUSE_IGNORE;
- if (cfg->tx_addr_ins_enable)
- tmp |= CMD_CFG_TX_ADDR_INS;
- if (cfg->loopback_enable)
- tmp |= CMD_CFG_LOOPBACK_EN;
- if (cfg->cmd_frame_enable)
- tmp |= CMD_CFG_CMD_FRM_EN;
- if (cfg->rx_error_discard)
- tmp |= CMD_CFG_RX_ER_DISC;
- if (cfg->send_idle_enable)
- tmp |= CMD_CFG_SEND_IDLE;
- if (cfg->no_length_check_enable)
- tmp |= CMD_CFG_NO_LEN_CHK;
- if (cfg->time_stamp_enable)
- tmp |= CMD_CFG_EN_TIMESTAMP;
+ /* Payload length check disable */
+ tmp |= CMD_CFG_NO_LEN_CHK;
iowrite32be(tmp, &regs->command_config);
/* Max Frame Length */
@@ -348,12 +288,6 @@ static int check_init_parameters(struct fman_mac *tgec)
return -EINVAL;
}
- /* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
- if (!tgec->cfg->no_length_check_enable) {
- pr_warn("Length Check!\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -421,18 +355,6 @@ static int get_exception_flag(enum fman_mac_exceptions exception)
return bit_mask;
}
-static u32 get_mac_addr_hash_code(u64 eth_addr)
-{
- u32 crc;
-
- /* CRC calculation */
- GET_MAC_ADDR_CRC(eth_addr, crc);
-
- crc = bitrev32(crc);
-
- return crc;
-}
-
static void tgec_err_exception(void *handle)
{
struct fman_mac *tgec = (struct fman_mac *)handle;
@@ -613,7 +535,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry;
- u32 crc, hash;
+ u32 crc = 0xFFFFFFFF, hash;
u64 addr;
if (!is_init_done(tgec->cfg))
@@ -627,8 +549,8 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return -EINVAL;
}
/* CRC calculation */
- crc = get_mac_addr_hash_code(addr);
-
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
/* Take 9 MSB bits */
hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
@@ -651,7 +573,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry = NULL;
struct list_head *pos;
- u32 crc, hash;
+ u32 crc = 0xFFFFFFFF, hash;
u64 addr;
if (!is_init_done(tgec->cfg))
@@ -660,7 +582,8 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
addr = ((*(u64 *)eth_addr) >> 16);
/* CRC calculation */
- crc = get_mac_addr_hash_code(addr);
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
/* Take 9 MSB bits */
hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
@@ -803,9 +726,6 @@ int tgec_free(struct fman_mac *tgec)
{
free_init_resources(tgec);
- if (tgec->cfg)
- tgec->cfg = NULL;
-
kfree(tgec->cfg);
kfree(tgec);
@@ -836,11 +756,25 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
set_dflts(cfg);
- tgec->regs = (struct tgec_regs __iomem *)(base_addr);
+ tgec->regs = base_addr;
tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
tgec->max_speed = params->max_speed;
tgec->mac_id = params->mac_id;
- tgec->exceptions = TGEC_DEFAULT_EXCEPTIONS;
+ tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
+ TGEC_IMASK_REM_FAULT |
+ TGEC_IMASK_LOC_FAULT |
+ TGEC_IMASK_TX_ECC_ER |
+ TGEC_IMASK_TX_FIFO_UNFL |
+ TGEC_IMASK_TX_FIFO_OVFL |
+ TGEC_IMASK_TX_ER |
+ TGEC_IMASK_RX_FIFO_OVFL |
+ TGEC_IMASK_RX_ECC_ER |
+ TGEC_IMASK_RX_JAB_FRM |
+ TGEC_IMASK_RX_OVRSZ_FRM |
+ TGEC_IMASK_RX_RUNT_FRM |
+ TGEC_IMASK_RX_FRAG_FRM |
+ TGEC_IMASK_RX_CRC_ER |
+ TGEC_IMASK_RX_ALIGN_ER);
tgec->exception_cb = params->exception_cb;
tgec->event_cb = params->event_cb;
tgec->dev_id = params->dev_id;
diff --git a/linux/drivers/net/ethernet/freescale/fman/mac.c b/linux/drivers/net/ethernet/freescale/fman/mac.c
index 4b26211e..f04ae704 100644
--- a/linux/drivers/net/ethernet/freescale/fman/mac.c
+++ b/linux/drivers/net/ethernet/freescale/fman/mac.c
@@ -37,6 +37,7 @@
#ifdef __rtems__
#include <sys/types.h>
+#include <sys/socket.h>
#include <net/if_dl.h>
#include <bsp/fdt.h>
#include "../../../../../../rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h"
@@ -52,6 +53,7 @@
#include <linux/netdevice.h>
#include <linux/phy_fixed.h>
#include <linux/etherdevice.h>
+#include <linux/libfdt_env.h>
#include "mac.h"
#include "fman_mac.h"
@@ -59,13 +61,8 @@
#include "fman_tgec.h"
#include "fman_memac.h"
-#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
-
MODULE_LICENSE("Dual BSD/GPL");
-
-MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
-
-MODULE_DESCRIPTION(MAC_DESCRIPTION);
+MODULE_DESCRIPTION("FSL FMan MAC API based driver");
struct mac_priv_s {
struct device *dev;
@@ -74,6 +71,11 @@ struct mac_priv_s {
phy_interface_t phy_if;
struct fman *fman;
struct device_node *phy_node;
+ struct device_node *internal_phy_node;
+#ifdef __rtems__
+ struct device_node phy_node_storage;
+ struct device_node internal_phy_node_storage;
+#endif /* __rtems__ */
/* List of multicast addresses */
struct list_head mc_addr_list;
struct platform_device *eth_dev;
@@ -90,15 +92,15 @@ struct mac_address {
struct list_head list;
};
-static void mac_exception(void *_mac_dev, enum fman_mac_exceptions ex)
+static void mac_exception(void *handle, enum fman_mac_exceptions ex)
{
struct mac_device *mac_dev;
struct mac_priv_s *priv;
- mac_dev = (struct mac_device *)_mac_dev;
+ mac_dev = handle;
priv = mac_dev->priv;
- if (FM_MAC_EX_10G_RX_FIFO_OVFL == ex) {
+ if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
/* don't flag RX FIFO after the first */
mac_dev->set_exception(mac_dev->fman_mac,
FM_MAC_EX_10G_RX_FIFO_OVFL, false);
@@ -118,7 +120,8 @@ static void set_fman_mac_params(struct mac_device *mac_dev,
#ifndef __rtems__
params->base_addr = (typeof(params->base_addr))
- devm_ioremap(priv->dev, mac_dev->res->start, 0x2000);
+ devm_ioremap(priv->dev, mac_dev->res->start,
+ resource_size(mac_dev->res));
#else /* __rtems__ */
params->base_addr = priv->vaddr;
#endif /* __rtems__ */
@@ -131,6 +134,7 @@ static void set_fman_mac_params(struct mac_device *mac_dev,
params->exception_cb = mac_exception;
params->event_cb = mac_exception;
params->dev_id = mac_dev;
+ params->internal_phy_node = priv->internal_phy_node;
}
static int tgec_initialization(struct mac_device *mac_dev)
@@ -362,9 +366,19 @@ static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
return 0;
}
-/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
+/**
+ * fman_set_mac_active_pause
+ * @mac_dev: A pointer to the MAC device
+ * @rx: Pause frame setting for RX
+ * @tx: Pause frame setting for TX
+ *
+ * Set the MAC RX/TX PAUSE frames settings
+ *
+ * Avoid redundant calls to FMD, if the MAC driver already contains the desired
* active PAUSE settings. Otherwise, the new active settings should be reflected
* in FMan.
+ *
+ * Return: 0 on success; Error code otherwise.
*/
int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
{
@@ -392,8 +406,16 @@ int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
EXPORT_SYMBOL(fman_set_mac_active_pause);
#ifndef __rtems__
-/* Determine the MAC RX/TX PAUSE frames settings based on PHY
+/**
+ * fman_get_pause_cfg
+ * @mac_dev: A pointer to the MAC device
+ * @rx: Return value for RX setting
+ * @tx: Return value for TX setting
+ *
+ * Determine the MAC RX/TX PAUSE frames settings based on PHY
* autonegotiation or values set by eththool.
+ *
+ * Return: Pointer to FMan device.
*/
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
bool *tx_pause)
@@ -495,9 +517,9 @@ static void adjust_link_memac(struct net_device *net_dev)
/* Initializes driver's PHY state, and attaches to the PHY.
* Returns 0 on success.
*/
-static int init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev,
- void (*adj_lnk)(struct net_device *))
+static struct phy_device *init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev,
+ void (*adj_lnk)(struct net_device *))
{
struct phy_device *phy_dev;
struct mac_priv_s *priv = mac_dev->priv;
@@ -506,7 +528,7 @@ static int init_phy(struct net_device *net_dev,
priv->phy_if);
if (!phy_dev) {
netdev_err(net_dev, "Could not connect to PHY\n");
- return -ENODEV;
+ return NULL;
}
/* Remove any features not supported by the controller */
@@ -519,23 +541,23 @@ static int init_phy(struct net_device *net_dev,
mac_dev->phy_dev = phy_dev;
- return 0;
+ return phy_dev;
}
-static int dtsec_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
+static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
{
return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
}
-static int tgec_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
+static struct phy_device *tgec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
{
return init_phy(net_dev, mac_dev, adjust_link_void);
}
-static int memac_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
+static struct phy_device *memac_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
{
return init_phy(net_dev, mac_dev, &adjust_link_memac);
}
@@ -639,31 +661,6 @@ static void setup_memac(struct mac_device *mac_dev)
static DEFINE_MUTEX(eth_lock);
#endif /* __rtems__ */
-static const char phy_str[][11] = {
- [PHY_INTERFACE_MODE_MII] = "mii",
- [PHY_INTERFACE_MODE_GMII] = "gmii",
- [PHY_INTERFACE_MODE_SGMII] = "sgmii",
- [PHY_INTERFACE_MODE_TBI] = "tbi",
- [PHY_INTERFACE_MODE_RMII] = "rmii",
- [PHY_INTERFACE_MODE_RGMII] = "rgmii",
- [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
- [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
- [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
- [PHY_INTERFACE_MODE_RTBI] = "rtbi",
- [PHY_INTERFACE_MODE_XGMII] = "xgmii"
-};
-
-static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(phy_str); i++)
- if (strcmp(str, phy_str[i]) == 0)
- return (phy_interface_t)i;
-
- return PHY_INTERFACE_MODE_MII;
-}
-
static const u16 phy2speed[] = {
[PHY_INTERFACE_MODE_MII] = SPEED_100,
[PHY_INTERFACE_MODE_GMII] = SPEED_1000,
@@ -675,6 +672,7 @@ static const u16 phy2speed[] = {
[PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
[PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
[PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
[PHY_INTERFACE_MODE_XGMII] = SPEED_10000
};
@@ -754,13 +752,9 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
#ifdef __rtems__
struct fman_mac_softc *sc = device_get_softc(_dev);
#endif /* __rtems__ */
- int err, i, lenp;
+ int err, i, nph;
struct device *dev;
-#ifndef __rtems__
- struct device_node *mac_node, *dev_node, *tbi_node;
-#else /* __rtems__ */
- struct device_node *mac_node;
-#endif /* __rtems__ */
+ struct device_node *mac_node, *dev_node;
struct mac_device *mac_dev;
#ifndef __rtems__
struct platform_device *of_dev;
@@ -768,10 +762,9 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
struct resource res;
struct mac_priv_s *priv;
const u8 *mac_addr;
- const char *char_prop;
- const u32 *u32_prop;
+ u32 val;
u8 fman_id;
- const phandle *phandle_prop;
+ int phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
@@ -798,10 +791,26 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
setup_dtsec(mac_dev);
+#ifndef __rtems__
+ priv->internal_phy_node = of_parse_phandle(mac_node,
+ "tbi-handle", 0);
+#else /* __rtems__ */
+ priv->internal_phy_node = of_parse_phandle(
+ &priv->internal_phy_node_storage, mac_node, "tbi-handle",
+ 0);
+#endif /* __rtems__ */
} else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
setup_tgec(mac_dev);
} else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
setup_memac(mac_dev);
+#ifndef __rtems__
+ priv->internal_phy_node = of_parse_phandle(mac_node,
+ "pcsphy-handle", 0);
+#else /* __rtems__ */
+ priv->internal_phy_node = of_parse_phandle(
+ &priv->internal_phy_node_storage, mac_node, "pcsphy-handle",
+ 0);
+#endif /* __rtems__ */
} else {
#ifndef __rtems__
dev_err(dev, "MAC node (%s) contains unsupported MAC\n",
@@ -835,15 +844,15 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
}
/* Get the FMan cell-index */
- u32_prop = of_get_property(dev_node, "cell-index", &lenp);
- if (!u32_prop) {
- dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+ err = of_property_read_u32(dev_node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "failed to read cell-index for %s\n",
dev_node->full_name);
err = -EINVAL;
goto _return_of_node_put;
}
- WARN_ON(lenp != sizeof(u32));
- fman_id = (u8)*u32_prop + 1; /* cell-index 0 => FMan id 1 */
+ /* cell-index 0 => FMan id 1 */
+ fman_id = (u8)(val + 1);
priv->fman = fman_bind(&of_dev->dev);
if (!priv->fman) {
@@ -888,26 +897,11 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
priv->vaddr = devm_ioremap(dev, res.start, res.end + 1 - res.start);
#endif /* __rtems__ */
-#ifndef __rtems__
-#define TBIPA_OFFSET 0x1c
-#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
- tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
- if (tbi_node) {
- u32 tbiaddr = TBIPA_DEFAULT_ADDR;
-
- u32_prop = of_get_property(tbi_node, "reg", NULL);
- if (u32_prop)
- tbiaddr = *u32_prop;
- out_be32(priv->vaddr + TBIPA_OFFSET, tbiaddr);
- }
-#endif /* __rtems__ */
-
if (!of_device_is_available(mac_node)) {
#ifndef __rtems__
devm_iounmap(dev, priv->vaddr);
__devm_release_region(dev, fman_get_mem_region(priv->fman),
res.start, res.end + 1 - res.start);
- fman_unbind(priv->fman);
devm_kfree(dev, mac_dev);
#endif /* __rtems__ */
dev_set_drvdata(dev, NULL);
@@ -915,15 +909,14 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
}
/* Get the cell-index */
- u32_prop = of_get_property(mac_node, "cell-index", &lenp);
- if (!u32_prop) {
- dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+ err = of_property_read_u32(mac_node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "failed to read cell-index for %s\n",
mac_node->full_name);
err = -EINVAL;
goto _return_dev_set_drvdata;
}
- WARN_ON(lenp != sizeof(u32));
- priv->cell_index = (u8)*u32_prop;
+ priv->cell_index = (u8)val;
/* Get the MAC address */
mac_addr = of_get_mac_address(mac_node);
@@ -936,25 +929,43 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
/* Get the port handles */
- phandle_prop = of_get_property(mac_node, "fsl,fman-ports", &lenp);
- if (!phandle_prop) {
- dev_err(dev, "of_get_property(%s, fsl,fman-ports) failed\n",
+ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
+ if (unlikely(nph < 0)) {
+ dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n",
+ mac_node->full_name);
+ err = nph;
+ goto _return_dev_set_drvdata;
+ }
+
+ if (nph != ARRAY_SIZE(mac_dev->port)) {
+ dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n",
mac_node->full_name);
err = -EINVAL;
goto _return_dev_set_drvdata;
}
- BUG_ON(lenp != sizeof(phandle) * ARRAY_SIZE(mac_dev->port));
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
-#ifndef __rtems__
+#ifdef __rtems__
+ struct fman_ivars *ivars;
+ device_t child;
+
+ ivars = &mac_dev->ivars[i];
+#endif /* __rtems__ */
/* Find the port node */
- dev_node = of_find_node_by_phandle(phandle_prop[i]);
+#ifndef __rtems__
+ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
+#else /* __rtems__ */
+ dev_node = of_parse_phandle(&ivars->dn, mac_node,
+ "fsl,fman-ports", i);
+#endif /* __rtems__ */
if (!dev_node) {
- dev_err(dev, "of_find_node_by_phandle() failed\n");
+ dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n",
+ mac_node->full_name);
err = -EINVAL;
goto _return_of_node_put;
}
+#ifndef __rtems__
of_dev = of_find_device_by_node(dev_node);
if (!of_dev) {
dev_err(dev, "of_find_device_by_node(%s) failed\n",
@@ -972,22 +983,7 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
}
of_node_put(dev_node);
#else /* __rtems__ */
- int node;
- struct fman_ivars *ivars;
- device_t child;
-
- node = fdt_node_offset_by_phandle(bsp_fdt_get(), phandle_prop[i]);
- if (node < 0) {
- goto _return_of_node_put;
- }
-
- ivars = kzalloc(sizeof(*ivars), GFP_KERNEL);
- if (ivars == NULL) {
- goto _return_of_node_put;
- }
-
- ivars->dn.offset = node;
- ivars->of_dev.dev.of_node = &ivars->dn;
+ ivars->of_dev.dev.of_node = dev_node;
ivars->of_dev.dev.base = _of_dev->dev.base;
ivars->fman = fman;
@@ -1010,23 +1006,20 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
}
/* Get the PHY connection type */
- char_prop = (const char *)of_get_property(mac_node,
- "phy-connection-type", NULL);
- if (!char_prop) {
+ phy_if = of_get_phy_mode(mac_node);
+ if (phy_if < 0) {
dev_warn(dev,
- "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n",
+ "of_get_phy_mode() for %s failed. Defaulting to SGMII\n",
mac_node->full_name);
- priv->phy_if = PHY_INTERFACE_MODE_MII;
- } else {
- priv->phy_if = str2phy(char_prop);
+ phy_if = PHY_INTERFACE_MODE_SGMII;
}
+ priv->phy_if = phy_if;
priv->speed = phy2speed[priv->phy_if];
priv->max_speed = priv->speed;
-#ifndef __rtems__
mac_dev->if_support = DTSEC_SUPPORTED;
/* We don't support half-duplex in SGMII mode */
- if (strstr(char_prop, "sgmii"))
+ if (priv->phy_if == PHY_INTERFACE_MODE_SGMII)
mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
SUPPORTED_100baseT_Half);
@@ -1035,9 +1028,8 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
mac_dev->if_support |= SUPPORTED_1000baseT_Full;
/* The 10G interface only supports one mode */
- if (strstr(char_prop, "xgmii"))
+ if (priv->phy_if == PHY_INTERFACE_MODE_XGMII)
mac_dev->if_support = SUPPORTED_10000baseT_Full;
-#endif /* __rtems__ */
/* Get the rest of the PHY information */
#ifndef __rtems__
@@ -1051,20 +1043,30 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
GFP_KERNEL);
- if (!priv->fixed_link)
+ if (!priv->fixed_link) {
+ err = -ENOMEM;
goto _return_dev_set_drvdata;
+ }
priv->phy_node = of_node_get(mac_node);
phy = of_phy_find_device(priv->phy_node);
- if (!phy)
+ if (!phy) {
+ err = -EINVAL;
goto _return_dev_set_drvdata;
+ }
priv->fixed_link->link = phy->link;
priv->fixed_link->speed = phy->speed;
priv->fixed_link->duplex = phy->duplex;
priv->fixed_link->pause = phy->pause;
priv->fixed_link->asym_pause = phy->asym_pause;
+
+ put_device(&phy->mdio.dev);
}
+#else /* __rtems__ */
+ priv->phy_node = of_parse_phandle(&priv->phy_node_storage, mac_node,
+ "phy-handle", 0);
+ mac_dev->phy_dev = of_phy_find_device(priv->phy_node);
#endif /* __rtems__ */
err = mac_dev->init(mac_dev);
@@ -1077,7 +1079,7 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
/* pause frame autonegotiation enabled */
mac_dev->autoneg_pause = true;
- /* by intializing the values to false, force FMD to enable PAUSE frames
+ /* By intializing the values to false, force FMD to enable PAUSE frames
* on RX and TX
*/
mac_dev->rx_pause_req = true;
@@ -1107,7 +1109,6 @@ _return_of_node_put:
#endif /* __rtems__ */
_return_dev_set_drvdata:
kfree(priv->fixed_link);
- kfree(priv);
dev_set_drvdata(dev, NULL);
_return:
return err;
diff --git a/linux/drivers/net/ethernet/freescale/fman/mac.h b/linux/drivers/net/ethernet/freescale/fman/mac.h
index 727320e8..066072ab 100644
--- a/linux/drivers/net/ethernet/freescale/fman/mac.h
+++ b/linux/drivers/net/ethernet/freescale/fman/mac.h
@@ -65,11 +65,10 @@ struct mac_device {
#endif /* __rtems__ */
u8 addr[ETH_ALEN];
struct fman_port *port[2];
-#ifndef __rtems__
u32 if_support;
struct phy_device *phy_dev;
-#endif /* __rtems__ */
#ifdef __rtems__
+ struct fman_ivars ivars[2];
struct platform_device pdev;
struct dpaa_eth_data data;
struct net_device net_dev;
@@ -83,7 +82,8 @@ struct mac_device {
bool promisc;
#ifndef __rtems__
- int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
+ struct phy_device *(*init_phy)(struct net_device *net_dev,
+ struct mac_device *mac_dev);
#else /* __rtems__ */
void (*adjust_link)(struct mac_device *mac_dev, u16 speed);
#endif /* __rtems__ */
@@ -119,28 +119,8 @@ struct dpaa_eth_data {
extern const char *mac_driver_description;
-/**
- * fman_set_mac_active_pause
- * @mac_dev: A pointer to the MAC device
- * @rx: Pause frame setting for RX
- * @tx: Pause frame setting for TX
- *
- * Set the MAC RX/TX PAUSE frames settings
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
-/**
- * fman_get_pause_cfg
- * @mac_dev: A pointer to the MAC device
- * @rx: Return value for RX setting
- * @tx: Return value for TX setting
- *
- * Determine the MAC RX/TX PAUSE frames settings
- *
- * Return: Pointer to FMan device.
- */
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
bool *tx_pause);
diff --git a/linux/drivers/soc/fsl/qbman/bman.c b/linux/drivers/soc/fsl/qbman/bman.c
index 35459bc7..2cc7f5c5 100644
--- a/linux/drivers/soc/fsl/qbman/bman.c
+++ b/linux/drivers/soc/fsl/qbman/bman.c
@@ -2,7 +2,7 @@
#include <rtems/bsd/local/opt_dpaa.h>
-/* Copyright (c) 2009 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,659 +34,776 @@
#include "bman_priv.h"
-/* Last updated for v00.79 of the BG */
-
-struct bman;
-
-/* Register offsets */
-#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04))
-#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04))
-#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
-#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
-#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
-#define REG_FBPR_FPC 0x0800
-#define REG_ECSR 0x0a00
-#define REG_ECIR 0x0a04
-#define REG_EADR 0x0a08
-#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
-#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
-#define REG_IP_REV_1 0x0bf8
-#define REG_IP_REV_2 0x0bfc
-#define REG_FBPR_BARE 0x0c00
-#define REG_FBPR_BAR 0x0c04
-#define REG_FBPR_AR 0x0c10
-#define REG_SRCIDR 0x0d04
-#define REG_LIODNR 0x0d08
-#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */
-
-/* Used by all error interrupt registers except 'inhibit' */
-#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
-#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
-#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
-#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
-#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
-
-/* BMAN_ECIR valid error bit */
-#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI)
-
-union bman_ecir {
- u32 ecir_raw;
- struct {
- u32 __reserved1:4;
- u32 portal_num:4;
- u32 __reserved2:12;
- u32 numb:4;
- u32 __reserved3:2;
- u32 pid:6;
- } __packed info;
-};
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x0000
+#define BM_REG_RCR_CI_CINH 0x0004
+#define BM_REG_RCR_ITR 0x0008
+#define BM_REG_CFG 0x0100
+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
+#define BM_REG_ISR 0x0e00
+#define BM_REG_IER 0x0e04
+#define BM_REG_ISDR 0x0e08
+#define BM_REG_IIR 0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
-union bman_eadr {
- u32 eadr_raw;
- struct {
- u32 __reserved1:5;
- u32 memid:3;
- u32 __reserved2:14;
- u32 eadr:10;
- } __packed info;
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
};
-
-struct bman_hwerr_txt {
- u32 mask;
- const char *txt;
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
};
-#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
-static const struct bman_hwerr_txt bman_hwerr_txts[] = {
- BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
- BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
- BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
- BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
- BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+/* Release Command */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+};
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
};
-#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
-struct bman_error_info_mdata {
- u16 addr_mask;
- u16 bits;
- const char *txt;
+/* MC (Management Command) command */
+struct bm_mc_command {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used by acquire command */
+ u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+ struct {
+ u8 verb;
+ u8 bpid;
+ u8 __reserved[62];
+ };
+ struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT 10000 /* us */
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ union bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
};
-#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
-static const struct bman_error_info_mdata error_mdata[] = {
- BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
- BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
- BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
+struct bm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
};
-#define BMAN_ERR_MDATA_COUNT \
- (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
-/* Add this in Kconfig */
-#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+} ____cacheline_aligned;
-/**
- * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
- * @v: for accessors that write values, this is the 32-bit value
- *
- * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
- * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
- * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
- * "write the enable register" rather than "enable the write register"!
- */
-#define bm_err_isr_status_read(bm) \
- __bm_err_isr_read(bm, bm_isr_status)
-#define bm_err_isr_status_clear(bm, m) \
- __bm_err_isr_write(bm, bm_isr_status, m)
-#define bm_err_isr_enable_read(bm) \
- __bm_err_isr_read(bm, bm_isr_enable)
-#define bm_err_isr_enable_write(bm, v) \
- __bm_err_isr_write(bm, bm_isr_enable, v)
-#define bm_err_isr_disable_read(bm) \
- __bm_err_isr_read(bm, bm_isr_disable)
-#define bm_err_isr_disable_write(bm, v) \
- __bm_err_isr_write(bm, bm_isr_disable, v)
-#define bm_err_isr_inhibit(bm) \
- __bm_err_isr_write(bm, bm_isr_inhibit, 1)
-#define bm_err_isr_uninhibit(bm) \
- __bm_err_isr_write(bm, bm_isr_inhibit, 0)
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
+}
+
+struct bman_portal {
+ struct bm_portal p;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ /* probing time config params for cpu-affine portals */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
#ifndef __rtems__
-static u16 bman_pool_max;
-#else /* __rtems__ */
-/* FIXME */
-extern u16 bman_ip_rev;
-extern u16 bman_pool_max;
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
#endif /* __rtems__ */
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(bman_affine_portal);
+}
/*
- * TODO: unimplemented registers
- *
- * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
- * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
*/
+struct bman_pool {
+ /* index of the buffer pool to encapsulate (0-63) */
+ u32 bpid;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+};
-/* Encapsulate "struct bman *" as a cast of the register space address. */
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
-static struct bman *bm_create(void *regs)
+static irqreturn_t portal_isr(int irq, void *ptr)
{
- return (struct bman *)regs;
+ struct bman_portal *p = ptr;
+ struct bm_portal *portal = &p->p;
+ u32 clear = p->irq_sources;
+ u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ clear |= poll_portal_slow(p, is);
+ bm_out(portal, BM_REG_ISR, clear);
+ return IRQ_HANDLED;
}
-static inline u32 __bm_in(struct bman *bm, u32 offset)
+/* --- RCR API --- */
+
+#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
{
- return ioread32be((void *)bm + offset);
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~RCR_CARRY;
+
+ return (struct bm_rcr_entry *)addr;
}
-static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
{
- iowrite32be(val, (void*) bm + offset);
+ return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
}
-#define bm_in(reg) __bm_in(bm, REG_##reg)
-#define bm_out(reg, val) __bm_out(bm, REG_##reg, val)
+#endif
-static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
{
- return __bm_in(bm, REG_ERR_ISR + (n << 2));
+ /* increment to the next RCR pointer and handle overflow and 'vbit' */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = rcr_carryclear(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
}
-static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
+static int bm_rcr_get_avail(struct bm_portal *portal)
{
- __bm_out(bm, REG_ERR_ISR + (n << 2), val);
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
}
-static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
+static int bm_rcr_get_fill(struct bm_portal *portal)
{
- u32 v = bm_in(IP_REV_1);
- *id = (v >> 16);
- *major = (v >> 8) & 0xff;
- *minor = v & 0xff;
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
}
-static u32 __generate_thresh(u32 val, int roundup)
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
{
- u32 e = 0; /* co-efficient, exponent */
- int oddbit = 0;
+ struct bm_rcr *rcr = &portal->rcr;
- while (val > 0xff) {
- oddbit = val & 1;
- val >>= 1;
- e++;
- if (roundup && oddbit)
- val++;
- }
- DPA_ASSERT(e < 0x10);
- return val | (e << 8);
+ rcr->ithresh = ithresh;
+ bm_out(portal, BM_REG_RCR_ITR, ithresh);
}
-static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
- u32 hwdet, u32 hwdxt)
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
{
- DPA_ASSERT(pool < bman_pool_max);
+ __maybe_unused struct bm_rcr *rcr = &portal->rcr;
- bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
- bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
- bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
- bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
}
-static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
{
- u32 exp = ilog2(size);
- /* choke if size isn't within range */
- DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
- is_power_of_2(size));
- /* choke if '[e]ba' has lower-alignment than 'size' */
- DPA_ASSERT(!(ba & (size - 1)));
- bm_out(FBPR_BARE, upper_32_bits(ba));
- bm_out(FBPR_BAR, lower_32_bits(ba));
- bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
+ struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+ diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
}
-/*****************/
-/* Config driver */
-/*****************/
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 1;
+#endif
+ dpaa_zero(rcr->cursor);
+ return rcr->cursor;
+}
-/* We support only one of these. */
-static struct bman *bm;
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ DPAA_ASSERT(rcr->available >= 1);
+ dma_wmb();
+ rcursor = rcr->cursor;
+ rcursor->_ncw_verb = myverb | rcr->vbit;
+ dpaa_flush(rcursor);
+ rcr_inc(rcr);
+ rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+#endif
+}
-/* And this state belongs to 'bm' */
-#ifndef __rtems__
-static dma_addr_t fbpr_a;
-static size_t fbpr_sz;
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ enum bm_rcr_cmode cmode)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+ BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+ | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(portal, BM_REG_CFG, cfg);
+ return 0;
+}
-static int bman_fbpr(struct reserved_mem *rmem)
+static void bm_rcr_finish(struct bm_portal *portal)
{
- fbpr_a = rmem->base;
- fbpr_sz = rmem->size;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_rcr *rcr = &portal->rcr;
+ int i;
- WARN_ON(!(fbpr_a && fbpr_sz));
+ DPAA_ASSERT(!rcr->busy);
- return 0;
+ i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr_ptr2idx(rcr->cursor))
+ pr_crit("losing uncommitted RCR entries\n");
+
+ i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+#endif
}
-RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
-#else /* __rtems__ */
-static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fbpr, 16777216);
-#define fbpr_a ((uintptr_t)&fbpr[0])
-#define fbpr_sz sizeof(fbpr)
-#endif /* __rtems__ */
-int bm_pool_set(u32 bpid, const u32 *thresholds)
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
{
- if (!bm)
- return -ENODEV;
- bm_set_pool(bm, bpid, thresholds[0], thresholds[1],
- thresholds[2], thresholds[3]);
+ struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
+ 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
return 0;
}
-EXPORT_SYMBOL(bm_pool_set);
-static void log_edata_bits(u32 bit_count)
+static void bm_mc_finish(struct bm_portal *portal)
{
- u32 i, j, mask = 0xffffffff;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_mc *mc = &portal->mc;
- pr_warn("ErrInt, EDATA:\n");
- i = bit_count/32;
- if (bit_count%32) {
- i++;
- mask = ~(mask << bit_count%32);
- }
- j = 16-i;
- pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask);
- j++;
- for (; j < 16; j++)
- pr_warn(" 0x%08x\n", bm_in(EDATA(j)));
+ DPAA_ASSERT(mc->state == mc_idle);
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
}
-static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
{
- union bman_ecir ecir_val;
- union bman_eadr eadr_val;
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
- ecir_val.ecir_raw = bm_in(ECIR);
- /* Is portal info valid */
- if (ecsr_val & PORTAL_ECSR_ERR) {
- pr_warn("ErrInt: SWP id %d, numb %d, pid %d\n",
- ecir_val.info.portal_num, ecir_val.info.numb,
- ecir_val.info.pid);
- }
- if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
- eadr_val.eadr_raw = bm_in(EADR);
- pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
- error_mdata[eadr_val.info.memid].txt,
- error_mdata[eadr_val.info.memid].addr_mask
- & eadr_val.info.eadr);
- log_edata_bits(error_mdata[eadr_val.info.memid].bits);
- }
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_hw;
+#endif
}
-/* BMan interrupt handler */
-static irqreturn_t bman_isr(int irq, void *ptr)
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
{
- u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
- ier_val = bm_err_isr_enable_read(bm);
- isr_val = bm_err_isr_status_read(bm);
- ecsr_val = bm_in(ECSR);
- isr_mask = isr_val & ier_val;
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+ union bm_mc_result **mcr)
+{
+ int timeout = BM_MCR_TIMEOUT;
- if (!isr_mask)
- return IRQ_NONE;
+ do {
+ *mcr = bm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
- for (i = 0; i < BMAN_HWE_COUNT; i++) {
- if (bman_hwerr_txts[i].mask & isr_mask) {
- pr_warn("ErrInt: %s\n", bman_hwerr_txts[i].txt);
- if (bman_hwerr_txts[i].mask & ecsr_val) {
- log_additional_error_info(isr_mask, ecsr_val);
- /* Re-arm error capture registers */
- bm_out(ECSR, ecsr_val);
- }
- if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
- pr_devel("Un-enabling error 0x%x\n",
- bman_hwerr_txts[i].mask);
- ier_val &= ~bman_hwerr_txts[i].mask;
- bm_err_isr_enable_write(bm, ier_val);
- }
- }
- }
- bm_err_isr_status_clear(bm, isr_val);
-
- return IRQ_HANDLED;
+ return timeout;
}
-u32 bm_pool_free_buffers(u32 bpid)
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
{
- return bm_in(POOL_CONTENT(bpid));
+ bm_out(portal, BM_REG_SCN(0), 0);
+ bm_out(portal, BM_REG_SCN(1), 0);
}
-EXPORT_SYMBOL(bm_pool_free_buffers);
-#ifndef __rtems__
-static ssize_t show_fbpr_fpc(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
{
- return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
-};
+ struct bm_portal *p;
+ int ret;
-static ssize_t show_pool_count(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
-{
- u32 data;
- int i;
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ dev_err(c->dev, "RCR initialisation failed\n");
+ goto fail_rcr;
+ }
+ if (bm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_disable(p);
+
+ /* Write-to-clear any stale interrupt status bits */
+ bm_out(p, BM_REG_ISDR, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_out(p, BM_REG_IER, 0);
+ bm_out(p, BM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+#ifndef __rtems__
+ if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+ irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+ dev_err(c->dev, "irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+#endif /* __rtems__ */
- if (kstrtoint(dev_attr->attr.name, 10, &i))
- return -EINVAL;
- data = bm_in(POOL_CONTENT(i));
- return snprintf(buf, PAGE_SIZE, "%d\n", data);
-};
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
-static ssize_t show_err_isr(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
-};
+ bm_out(p, BM_REG_ISDR, 0);
+ bm_out(p, BM_REG_IIR, 0);
+
+ return 0;
-static ssize_t show_sbec(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+fail_rcr_empty:
+#ifndef __rtems__
+fail_affinity:
+#endif /* __rtems__ */
+ free_irq(c->irq, portal);
+fail_irq:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+fail_rcr:
+ return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
{
- int i;
+ struct bman_portal *portal;
+ int err;
- if (sscanf(dev_attr->attr.name, "sbec_%d", &i) != 1)
- return -EINVAL;
- return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
-};
+ portal = &per_cpu(bman_affine_portal, c->cpu);
+ err = bman_create_portal(portal, c);
+ if (err)
+ return NULL;
-static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
-static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
+#ifndef __rtems__
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+#endif /* __rtems__ */
-/* Didn't use DEVICE_ATTR as 64 of this would be required.
- * Initialize them when needed. */
-static char *name_attrs_pool_count; /* "xx" + null-terminator */
-static struct device_attribute *dev_attr_buffer_pool_count;
+ return portal;
+}
-static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+ u32 ret = is;
-static struct attribute *bman_dev_attributes[] = {
- &dev_attr_fbpr_fpc.attr,
- &dev_attr_err_isr.attr,
- NULL
-};
+ if (is & BM_PIRQ_RCRI) {
+ bm_rcr_cce_update(&p->p);
+ bm_rcr_set_ithresh(&p->p, 0);
+ bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+ is &= ~BM_PIRQ_RCRI;
+ }
-static struct attribute *bman_dev_ecr_attributes[] = {
- &dev_attr_sbec_0.attr,
- &dev_attr_sbec_1.attr,
- NULL
-};
+ /* There should be no status register bits left undefined */
+ DPAA_ASSERT(!is);
+ return ret;
+}
-static struct attribute **bman_dev_pool_count_attributes;
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
-/* root level */
-static const struct attribute_group bman_dev_attr_grp = {
- .name = NULL,
- .attrs = bman_dev_attributes
-};
-static const struct attribute_group bman_dev_ecr_grp = {
- .name = "error_capture",
- .attrs = bman_dev_ecr_attributes
-};
-static struct attribute_group bman_dev_pool_countent_grp = {
- .name = "pool_count",
-};
+ local_irq_save(irqflags);
+ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+ bm_out(&p->p, BM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+ return 0;
+}
-static int of_fsl_bman_remove(struct platform_device *ofdev)
+static int bm_shutdown_pool(u32 bpid)
{
- sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
+ struct bm_mc_command *bm_cmd;
+ union bm_mc_result *bm_res;
+
+ while (1) {
+ struct bman_portal *p = get_affine_portal();
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(&p->p);
+ bm_cmd->bpid = bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Command timedout\n");
+ return -ETIMEDOUT;
+ }
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ put_affine_portal();
+ /* Pool is empty */
+ return 0;
+ }
+ put_affine_portal();
+ }
+
return 0;
-};
-#endif /* __rtems__ */
+}
+
+struct gen_pool *bm_bpalloc;
-static int of_fsl_bman_probe(struct platform_device *ofdev)
+static int bm_alloc_bpid_range(u32 *result, u32 count)
{
- int ret, err_irq, i;
- struct device *dev = &ofdev->dev;
- struct device_node *node = dev->of_node;
- struct resource res;
- u32 __iomem *regs;
- u16 id;
- u8 major, minor;
+ unsigned long addr;
- if (!of_device_is_available(node))
- return -ENODEV;
+ addr = gen_pool_alloc(bm_bpalloc, count);
+ if (!addr)
+ return -ENOMEM;
- ret = of_address_to_resource(node, 0, &res);
- if (ret) {
- dev_err(dev, "Can't get %s property 'reg'\n", node->full_name);
- return ret;
- }
- regs = devm_ioremap(dev, res.start, res.end - res.start + 1);
- if (!regs)
- return -ENXIO;
-
- bm = bm_create(regs);
-
- bm_get_version(bm, &id, &major, &minor);
- dev_info(dev, "Bman ver:%04x,%02x,%02x\n", id, major, minor);
- if ((major == 1) && (minor == 0))
- bman_pool_max = 64;
- else if ((major == 2) && (minor == 0))
- bman_pool_max = 8;
- else if ((major == 2) && (minor == 1))
- bman_pool_max = 64;
- else
- dev_warn(dev, "unknown Bman version, default to rev1.0\n");
-#ifdef __rtems__
- bman_ip_rev = (u16)((major << 8) | minor);
-#endif /* __rtems__ */
+ *result = addr & ~DPAA_GENALLOC_OFF;
+ return 0;
+}
- bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
+static int bm_release_bpid(u32 bpid)
+{
+ int ret;
- err_irq = of_irq_to_resource(node, 0, NULL);
- if (err_irq == NO_IRQ) {
- dev_info(dev, "Can't get %s property 'interrupts'\n",
- node->full_name);
- return -ENODEV;
- }
- ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
- node);
- if (ret) {
- dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
- ret, node->full_name);
+ ret = bm_shutdown_pool(bpid);
+ if (ret) {
+ pr_debug("BPID %d leaked\n", bpid);
return ret;
}
- /* Disable Buffer Pool State Change */
- bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
- /* Write-to-clear any stale bits, (eg. starvation being asserted prior
- * to resource allocation during driver init). */
- bm_err_isr_status_clear(bm, 0xffffffff);
- /* Enable Error Interrupts */
- bm_err_isr_enable_write(bm, 0xffffffff);
-#ifndef __rtems__
- ret = sysfs_create_group(&dev->kobj, &bman_dev_attr_grp);
- if (ret)
- goto done;
- ret = sysfs_create_group(&dev->kobj, &bman_dev_ecr_grp);
- if (ret)
- goto del_group_0;
-
- name_attrs_pool_count = devm_kmalloc(dev,
- sizeof(char) * bman_pool_max * 3, GFP_KERNEL);
- if (!name_attrs_pool_count)
- goto del_group_1;
-
- dev_attr_buffer_pool_count = devm_kmalloc(dev,
- sizeof(struct device_attribute) * bman_pool_max, GFP_KERNEL);
- if (!dev_attr_buffer_pool_count)
- goto del_group_1;
-
- bman_dev_pool_count_attributes = devm_kmalloc(dev,
- sizeof(struct attribute *) * (bman_pool_max + 1), GFP_KERNEL);
- if (!bman_dev_pool_count_attributes)
- goto del_group_1;
-
- for (i = 0; i < bman_pool_max; i++) {
- ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
- if (!ret)
- goto del_group_1;
- dev_attr_buffer_pool_count[i].attr.name =
- (name_attrs_pool_count + i * 3);
- dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
- dev_attr_buffer_pool_count[i].show = show_pool_count;
- bman_dev_pool_count_attributes[i] =
- &dev_attr_buffer_pool_count[i].attr;
- }
- bman_dev_pool_count_attributes[bman_pool_max] = NULL;
-
- bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
-
- ret = sysfs_create_group(&dev->kobj, &bman_dev_pool_countent_grp);
- if (ret)
- goto del_group_1;
+ gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
- goto done;
+struct bman_pool *bman_new_pool(void)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
-del_group_1:
- sysfs_remove_group(&dev->kobj, &bman_dev_ecr_grp);
-del_group_0:
- sysfs_remove_group(&dev->kobj, &bman_dev_attr_grp);
-done:
- if (ret)
- dev_err(dev, "Cannot create dev attributes ret=%d\n", ret);
-#else /* __rtems__ */
- (void)i;
-#endif /* __rtems__ */
+ if (bm_alloc_bpid_range(&bpid, 1))
+ return NULL;
- return ret;
-};
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
-#ifndef __rtems__
-static const struct of_device_id of_fsl_bman_ids[] = {
- {
- .compatible = "fsl,bman",
- },
- {}
-};
+ pool->bpid = bpid;
-static struct platform_driver of_fsl_bman_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = of_fsl_bman_ids,
- },
- .probe = of_fsl_bman_probe,
- .remove = of_fsl_bman_remove,
-};
+ return pool;
+err:
+ bm_release_bpid(bpid);
+ kfree(pool);
+ return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
-builtin_platform_driver(of_fsl_bman_driver);
-#else /* __rtems__ */
-#include <sys/types.h>
-#include <sys/kernel.h>
-#include <rtems.h>
-#include <bsp/fdt.h>
-#include <bsp/qoriq.h>
+void bman_free_pool(struct bman_pool *pool)
+{
+ bm_release_bpid(pool->bpid);
-static struct bm_portal_config bman_configs[NR_CPUS];
+ kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
-u16 bman_ip_rev;
+int bman_get_bpid(const struct bman_pool *pool)
+{
+ return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
-u16 bman_pool_max;
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
-SYSINIT_REFERENCE(irqs);
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ unsigned long irqflags;
+ int avail, timeout = 1000; /* 1ms */
+ int i = num - 1;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ do {
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ if (likely(r))
+ break;
+
+ udelay(1);
+ } while (--timeout);
+
+ if (unlikely(!timeout))
+ return -ETIMEDOUT;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+ bm_buffer_set_bpid(r->bufs, pool->bpid);
+ if (i)
+ memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_release);
-static void
-bman_sysinit(void)
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
{
- const char *fdt = bsp_fdt_get();
- struct device_node dn;
- struct platform_device ofdev = {
- .dev = {
- .of_node = &dn,
- .base = (uintptr_t)&qoriq
- }
- };
- const char *name;
- int cpu_count = (int)rtems_get_processor_count();
- int cpu;
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ union bm_mc_result *mcr;
int ret;
- int node;
- int parent;
-
- qoriq_reset_qman_and_bman();
- qoriq_clear_ce_portal(&qoriq_bman_portal[0][0],
- sizeof(qoriq_bman_portal[0]));
- qoriq_clear_ci_portal(&qoriq_bman_portal[1][0],
- sizeof(qoriq_bman_portal[1]));
-
- memset(&dn, 0, sizeof(dn));
-
- name = "fsl,bman";
- node = fdt_node_offset_by_compatible(fdt, 0, name);
- if (node < 0)
- panic("bman: no bman in FDT");
-
- dn.full_name = name;
- dn.offset = node;
- ret = of_fsl_bman_probe(&ofdev);
- if (ret != 0)
- panic("bman: probe failed");
-
- name = "fsl,bman-portal";
- node = fdt_node_offset_by_compatible(fdt, 0, name);
- if (node < 0)
- panic("bman: no portals in FDT");
- parent = fdt_parent_offset(fdt, node);
- if (parent < 0)
- panic("bman: no parent of portals in FDT");
- node = fdt_first_subnode(fdt, parent);
-
- dn.full_name = name;
- dn.offset = node;
-
- for (cpu = 0; cpu < cpu_count; ++cpu) {
- struct bm_portal_config *pcfg = &bman_configs[cpu];
- struct bman_portal *portal;
- struct resource res;
-
- if (node < 0)
- panic("bman: missing portal in FDT");
-
- ret = of_address_to_resource(&dn, 0, &res);
- if (ret != 0)
- panic("bman: no portal CE address");
- pcfg->addr_virt[0] = (__iomem void *)
- ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
- BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] >=
- (uintptr_t)&qoriq_bman_portal[0][0]);
- BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] <
- (uintptr_t)&qoriq_bman_portal[1][0]);
-
- ret = of_address_to_resource(&dn, 1, &res);
- if (ret != 0)
- panic("bman: no portal CI address");
- pcfg->addr_virt[1] = (__iomem void *)
- ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
- BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] >=
- (uintptr_t)&qoriq_bman_portal[1][0]);
- BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
- (uintptr_t)&qoriq_bman_portal[2][0]);
-
- pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL);
- if (pcfg->public_cfg.irq == NO_IRQ)
- panic("bman: no portal interrupt");
-
- pcfg->public_cfg.cpu = cpu;
- bman_depletion_fill(&pcfg->public_cfg.mask);
-
- portal = bman_create_affine_portal(pcfg);
- if (portal == NULL)
- panic("bman: cannot create portal");
-
- bman_p_irqsource_add(portal, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
-
- node = fdt_next_subnode(fdt, node);
- dn.offset = node;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ mcc = bm_mc_start(&p->p);
+ mcc->bpid = pool->bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ if (!bm_mc_result_timeout(&p->p, &mcr)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Timeout\n");
+ return -ETIMEDOUT;
}
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs)
+ memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
- bman_seed_bpid_range(0, bman_pool_max);
+ put_affine_portal();
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+ return portal->config;
}
-SYSINIT(bman_sysinit, SI_SUB_CPU, SI_ORDER_FIRST, bman_sysinit, NULL);
-#endif /* __rtems__ */
diff --git a/linux/drivers/soc/fsl/qbman/bman_api.c b/linux/drivers/soc/fsl/qbman/bman_api.c
deleted file mode 100644
index cdfcebbd..00000000
--- a/linux/drivers/soc/fsl/qbman/bman_api.c
+++ /dev/null
@@ -1,1123 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "bman.h"
-
-/* Compilation constants */
-#define RCR_THRESH 2 /* reread h/w CI when running out of space */
-#define IRQNAME "BMan portal %d"
-#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
-#define FSL_DPA_PORTAL_SHARE 1 /* Allow portals to be shared */
-
-struct bman_portal {
- struct bm_portal p;
- /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
- struct bman_depletion *pools;
- int thresh_set;
- unsigned long irq_sources;
- u32 slowpoll; /* only used when interrupts are off */
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */
-#endif
-#ifdef FSL_DPA_PORTAL_SHARE
- raw_spinlock_t sharing_lock; /* only used if is_shared */
-#ifndef __rtems__
- int is_shared;
- struct bman_portal *sharing_redirect;
-#endif /* __rtems__ */
-#endif
- /* When the cpu-affine portal is activated, this is non-NULL */
- const struct bm_portal_config *config;
- /* 64-entry hash-table of pool objects that are tracking depletion
- * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so
- * we're not fussy about cache-misses and so forth - whereas the above
- * members should all fit in one cacheline.
- * BTW, with 64 entries in the hash table and 64 buffer pools to track,
- * you'll never guess the hash-function ... */
- struct bman_pool *cb[64];
- char irqname[MAX_IRQNAME];
- /* Track if the portal was alloced by the driver */
- u8 alloced;
-};
-
-
-#ifdef FSL_DPA_PORTAL_SHARE
-/* For an explanation of the locking, redirection, or affine-portal logic,
- * please consult the QMan driver for details. This is the same, only simpler
- * (no fiddly QMan-specific bits.) */
-#ifndef __rtems__
-#define PORTAL_IRQ_LOCK(p, irqflags) \
- do { \
- if ((p)->is_shared) \
- raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
- else \
- local_irq_save(irqflags); \
- } while (0)
-#define PORTAL_IRQ_UNLOCK(p, irqflags) \
- do { \
- if ((p)->is_shared) \
- raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
- irqflags); \
- else \
- local_irq_restore(irqflags); \
- } while (0)
-#else /* __rtems__ */
-#define PORTAL_IRQ_LOCK(p, irqflags) \
- raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags)
-#define PORTAL_IRQ_UNLOCK(p, irqflags) \
- raw_spin_unlock_irqrestore(&(p)->sharing_lock, irqflags)
-#endif /* __rtems__ */
-#else
-#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
-#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
-#endif
-
-#ifndef __rtems__
-static cpumask_t affine_mask;
-static DEFINE_SPINLOCK(affine_mask_lock);
-#endif /* __rtems__ */
-static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
-static inline struct bman_portal *get_raw_affine_portal(void)
-{
- return &get_cpu_var(bman_affine_portal);
-}
-#ifdef FSL_DPA_PORTAL_SHARE
-static inline struct bman_portal *get_affine_portal(void)
-{
- struct bman_portal *p = get_raw_affine_portal();
-
-#ifndef __rtems__
- if (p->sharing_redirect)
- return p->sharing_redirect;
-#endif /* __rtems__ */
- return p;
-}
-#else
-#define get_affine_portal() get_raw_affine_portal()
-#endif
-static inline void put_affine_portal(void)
-{
- put_cpu_var(bman_affine_portal);
-}
-static inline struct bman_portal *get_poll_portal(void)
-{
- return this_cpu_ptr(&bman_affine_portal);
-}
-#define put_poll_portal()
-
-/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be
- * more than one such object per BMan buffer pool, eg. if different users of the
- * pool are operating via different portals. */
-struct bman_pool {
- struct bman_pool_params params;
- /* Used for hash-table admin when using depletion notifications. */
- struct bman_portal *portal;
- struct bman_pool *next;
- /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */
- struct bm_buffer *sp;
- unsigned int sp_fill;
-#ifdef CONFIG_FSL_DPA_CHECKING
- atomic_t in_use;
-#endif
-};
-
-/* (De)Registration of depletion notification callbacks */
-static void depletion_link(struct bman_portal *portal, struct bman_pool *pool)
-{
- __maybe_unused unsigned long irqflags;
-
- pool->portal = portal;
- PORTAL_IRQ_LOCK(portal, irqflags);
- pool->next = portal->cb[pool->params.bpid];
- portal->cb[pool->params.bpid] = pool;
- if (!pool->next)
- /* First object for that bpid on this portal, enable the BSCN
- * mask bit. */
- bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1);
- PORTAL_IRQ_UNLOCK(portal, irqflags);
-}
-static void depletion_unlink(struct bman_pool *pool)
-{
- struct bman_pool *it, *last = NULL;
- struct bman_pool **base = &pool->portal->cb[pool->params.bpid];
- __maybe_unused unsigned long irqflags;
-
- PORTAL_IRQ_LOCK(pool->portal, irqflags);
- it = *base; /* <-- gotcha, don't do this prior to the irq_save */
- while (it != pool) {
- last = it;
- it = it->next;
- }
- if (!last)
- *base = pool->next;
- else
- last->next = pool->next;
- if (!last && !pool->next) {
- /* Last object for that bpid on this portal, disable the BSCN
- * mask bit. */
- bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0);
- /* And "forget" that we last saw this pool as depleted */
- bman_depletion_unset(&pool->portal->pools[1],
- pool->params.bpid);
- }
- PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
-}
-
-/* In the case that the application's core loop calls qman_poll() and
- * bman_poll(), we ought to balance how often we incur the overheads of the
- * slow-path poll. We'll use two decrementer sources. The idle decrementer
- * constant is used when the last slow-poll detected no work to do, and the busy
- * decrementer constant when the last slow-poll had work to do. */
-#define SLOW_POLL_IDLE 1000
-#define SLOW_POLL_BUSY 10
-static u32 __poll_portal_slow(struct bman_portal *p, u32 is);
-
-/* Portal interrupt handler */
-static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
-{
- struct bman_portal *p = ptr;
- u32 clear = p->irq_sources;
- u32 is = bm_isr_status_read(&p->p) & p->irq_sources;
-
- clear |= __poll_portal_slow(p, is);
- bm_isr_status_clear(&p->p, clear);
- return IRQ_HANDLED;
-}
-
-
-struct bman_portal *bman_create_portal(
- struct bman_portal *portal,
- const struct bm_portal_config *config)
-{
- struct bm_portal *__p;
- const struct bman_depletion *pools = &config->public_cfg.mask;
- int ret;
- u8 bpid = 0;
-
- if (!portal) {
- portal = kmalloc(sizeof(*portal), GFP_KERNEL);
- if (!portal)
- return portal;
- portal->alloced = 1;
- } else
- portal->alloced = 0;
-
- __p = &portal->p;
-
- /* prep the low-level portal struct with the mapped addresses from the
- * config, everything that follows depends on it and "config" is more
- * for (de)reference... */
- __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
- __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
- if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) {
- pr_err("RCR initialisation failed\n");
- goto fail_rcr;
- }
- if (bm_mc_init(__p)) {
- pr_err("MC initialisation failed\n");
- goto fail_mc;
- }
- if (bm_isr_init(__p)) {
- pr_err("ISR initialisation failed\n");
- goto fail_isr;
- }
- portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
- if (!portal->pools)
- goto fail_pools;
- portal->pools[0] = *pools;
- bman_depletion_init(portal->pools + 1);
- while (bpid < bman_pool_max) {
- /* Default to all BPIDs disabled, we enable as required at
- * run-time. */
- bm_isr_bscn_mask(__p, bpid, 0);
- bpid++;
- }
- portal->slowpoll = 0;
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- portal->rcri_owned = NULL;
-#endif
-#ifdef FSL_DPA_PORTAL_SHARE
- raw_spin_lock_init(&portal->sharing_lock);
-#ifndef __rtems__
- portal->is_shared = config->public_cfg.is_shared;
- portal->sharing_redirect = NULL;
-#endif /* __rtems__ */
-#endif
- memset(&portal->cb, 0, sizeof(portal->cb));
- /* Write-to-clear any stale interrupt status bits */
- bm_isr_disable_write(__p, 0xffffffff);
- portal->irq_sources = 0;
- bm_isr_enable_write(__p, portal->irq_sources);
- bm_isr_status_clear(__p, 0xffffffff);
- snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
- if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
- portal)) {
- pr_err("request_irq() failed\n");
- goto fail_irq;
- }
-#ifndef __rtems__
- if ((config->public_cfg.cpu != -1) &&
- irq_can_set_affinity(config->public_cfg.irq) &&
- irq_set_affinity(config->public_cfg.irq,
- cpumask_of(config->public_cfg.cpu))) {
- pr_err("irq_set_affinity() failed\n");
- goto fail_affinity;
- }
-#endif /* __rtems__ */
-
- /* Need RCR to be empty before continuing */
- ret = bm_rcr_get_fill(__p);
- if (ret) {
- pr_err("RCR unclean\n");
- goto fail_rcr_empty;
- }
- /* Success */
- portal->config = config;
-
- bm_isr_disable_write(__p, 0);
- bm_isr_uninhibit(__p);
- return portal;
-fail_rcr_empty:
-#ifndef __rtems__
-fail_affinity:
-#endif /* __rtems__ */
- free_irq(config->public_cfg.irq, portal);
-fail_irq:
- kfree(portal->pools);
-fail_pools:
- bm_isr_finish(__p);
-fail_isr:
- bm_mc_finish(__p);
-fail_mc:
- bm_rcr_finish(__p);
-fail_rcr:
- if (portal->alloced)
- kfree(portal);
- return NULL;
-}
-
-struct bman_portal *bman_create_affine_portal(
- const struct bm_portal_config *config)
-{
- struct bman_portal *portal;
-
- portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu);
- portal = bman_create_portal(portal, config);
-#ifndef __rtems__
- if (portal) {
- spin_lock(&affine_mask_lock);
- cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
- spin_unlock(&affine_mask_lock);
- }
-#endif /* __rtems__ */
- return portal;
-}
-
-
-#ifndef __rtems__
-struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
- int cpu)
-{
-#ifdef FSL_DPA_PORTAL_SHARE
- struct bman_portal *p = &per_cpu(bman_affine_portal, cpu);
-
- BUG_ON(p->config);
- BUG_ON(p->is_shared);
- BUG_ON(!redirect->config->public_cfg.is_shared);
- p->irq_sources = 0;
- p->sharing_redirect = redirect;
- put_affine_portal();
- return p;
-#else
- BUG();
- return NULL;
-#endif
-}
-#endif /* __rtems__ */
-
-void bman_destroy_portal(struct bman_portal *bm)
-{
- const struct bm_portal_config *pcfg = bm->config;
-
- bm_rcr_cce_update(&bm->p);
- bm_rcr_cce_update(&bm->p);
-
- free_irq(pcfg->public_cfg.irq, bm);
-
- kfree(bm->pools);
- bm_isr_finish(&bm->p);
- bm_mc_finish(&bm->p);
- bm_rcr_finish(&bm->p);
- bm->config = NULL;
- if (bm->alloced)
- kfree(bm);
-}
-
-const struct bm_portal_config *bman_destroy_affine_portal(void)
-{
- struct bman_portal *bm = get_raw_affine_portal();
- const struct bm_portal_config *pcfg;
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (bm->sharing_redirect) {
- bm->sharing_redirect = NULL;
- put_affine_portal();
- return NULL;
- }
- bm->is_shared = 0;
-#endif /* __rtems__ */
-#endif
- pcfg = bm->config;
- bman_destroy_portal(bm);
-#ifndef __rtems__
- spin_lock(&affine_mask_lock);
- cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask);
- spin_unlock(&affine_mask_lock);
-#endif /* __rtems__ */
- put_affine_portal();
- return pcfg;
-}
-
-/* When release logic waits on available RCR space, we need a global waitqueue
- * in the case of "affine" use (as the waits wake on different cpus which means
- * different portals - so we can't wait on any per-portal waitqueue). */
-static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
-
-static u32 __poll_portal_slow(struct bman_portal *p, u32 is)
-{
- struct bman_depletion tmp;
- u32 ret = is;
-
- /* There is a gotcha to be aware of. If we do the query before clearing
- * the status register, we may miss state changes that occur between the
- * two. If we write to clear the status register before the query, the
- * cache-enabled query command may overtake the status register write
- * unless we use a heavyweight sync (which we don't want). Instead, we
- * write-to-clear the status register then *read it back* before doing
- * the query, hence the odd while loop with the 'is' accumulation. */
- if (is & BM_PIRQ_BSCN) {
- struct bm_mc_result *mcr;
- __maybe_unused unsigned long irqflags;
- unsigned int i, j;
- u32 __is;
-
- bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
- while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) {
- is |= __is;
- bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
- }
- is &= ~BM_PIRQ_BSCN;
- PORTAL_IRQ_LOCK(p, irqflags);
- bm_mc_start(&p->p);
- bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
- while (!(mcr = bm_mc_result(&p->p)))
- cpu_relax();
- tmp = mcr->query.ds.state;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- for (i = 0; i < 2; i++) {
- int idx = i * 32;
- /* tmp is a mask of currently-depleted pools.
- * pools[0] is mask of those we care about.
- * pools[1] is our previous view (we only want to
- * be told about changes). */
- tmp.__state[i] &= p->pools[0].__state[i];
- if (tmp.__state[i] == p->pools[1].__state[i])
- /* fast-path, nothing to see, move along */
- continue;
- for (j = 0; j <= 31; j++, idx++) {
- struct bman_pool *pool = p->cb[idx];
- int b4 = bman_depletion_get(&p->pools[1], idx);
- int af = bman_depletion_get(&tmp, idx);
-
- if (b4 == af)
- continue;
- while (pool) {
- pool->params.cb(p, pool,
- pool->params.cb_ctx, af);
- pool = pool->next;
- }
- }
- }
- p->pools[1] = tmp;
- }
-
- if (is & BM_PIRQ_RCRI) {
- __maybe_unused unsigned long irqflags;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- bm_rcr_cce_update(&p->p);
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- /* If waiting for sync, we only cancel the interrupt threshold
- * when the ring utilisation hits zero. */
- if (p->rcri_owned) {
- if (!bm_rcr_get_fill(&p->p)) {
- p->rcri_owned = NULL;
- bm_rcr_set_ithresh(&p->p, 0);
- }
- } else
-#endif
- bm_rcr_set_ithresh(&p->p, 0);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- wake_up(&affine_queue);
- bm_isr_status_clear(&p->p, BM_PIRQ_RCRI);
- is &= ~BM_PIRQ_RCRI;
- }
-
- /* There should be no status register bits left undefined */
- DPA_ASSERT(!is);
- return ret;
-}
-
-const struct bman_portal_config *bman_get_portal_config(void)
-{
- struct bman_portal *p = get_affine_portal();
- const struct bman_portal_config *ret = &p->config->public_cfg;
-
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(bman_get_portal_config);
-
-u32 bman_irqsource_get(void)
-{
- struct bman_portal *p = get_raw_affine_portal();
- u32 ret = p->irq_sources & BM_PIRQ_VISIBLE;
-
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(bman_irqsource_get);
-
-int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits)
-{
- __maybe_unused unsigned long irqflags;
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (p->sharing_redirect)
- return -EINVAL;
-#endif /* __rtems__ */
-#endif
- PORTAL_IRQ_LOCK(p, irqflags);
- set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
- bm_isr_enable_write(&p->p, p->irq_sources);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- return 0;
-}
-EXPORT_SYMBOL(bman_p_irqsource_add);
-
-int bman_irqsource_add(__maybe_unused u32 bits)
-{
- struct bman_portal *p = get_raw_affine_portal();
- int ret = bman_p_irqsource_add(p, bits);
-
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(bman_irqsource_add);
-
-int bman_irqsource_remove(u32 bits)
-{
- struct bman_portal *p = get_raw_affine_portal();
- __maybe_unused unsigned long irqflags;
- u32 ier;
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (p->sharing_redirect) {
- put_affine_portal();
- return -EINVAL;
- }
-#endif /* __rtems__ */
-#endif
- /* Our interrupt handler only processes+clears status register bits that
- * are in p->irq_sources. As we're trimming that mask, if one of them
- * were to assert in the status register just before we remove it from
- * the enable register, there would be an interrupt-storm when we
- * release the IRQ lock. So we wait for the enable register update to
- * take effect in h/w (by reading it back) and then clear all other bits
- * in the status register. Ie. we clear them from ISR once it's certain
- * IER won't allow them to reassert. */
- PORTAL_IRQ_LOCK(p, irqflags);
- bits &= BM_PIRQ_VISIBLE;
- clear_bits(bits, &p->irq_sources);
- bm_isr_enable_write(&p->p, p->irq_sources);
- ier = bm_isr_enable_read(&p->p);
- /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
- * data-dependency, ie. to protect against re-ordering. */
- bm_isr_status_clear(&p->p, ~ier);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return 0;
-}
-EXPORT_SYMBOL(bman_irqsource_remove);
-
-#ifndef __rtems__
-const cpumask_t *bman_affine_cpus(void)
-{
- return &affine_mask;
-}
-EXPORT_SYMBOL(bman_affine_cpus);
-#endif /* __rtems__ */
-
-u32 bman_poll_slow(void)
-{
- struct bman_portal *p = get_poll_portal();
- u32 ret;
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (unlikely(p->sharing_redirect))
- ret = (u32)-1;
- else
-#endif /* __rtems__ */
-#endif
- {
- u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
-
- ret = __poll_portal_slow(p, is);
- bm_isr_status_clear(&p->p, ret);
- }
- put_poll_portal();
- return ret;
-}
-EXPORT_SYMBOL(bman_poll_slow);
-
-/* Legacy wrapper */
-void bman_poll(void)
-{
- struct bman_portal *p = get_poll_portal();
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (unlikely(p->sharing_redirect))
- goto done;
-#endif /* __rtems__ */
-#endif
- if (!(p->slowpoll--)) {
- u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
- u32 active = __poll_portal_slow(p, is);
-
- if (active)
- p->slowpoll = SLOW_POLL_BUSY;
- else
- p->slowpoll = SLOW_POLL_IDLE;
- }
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
-done:
-#endif /* __rtems__ */
-#endif
- put_poll_portal();
-}
-EXPORT_SYMBOL(bman_poll);
-
-static const u32 zero_thresholds[4] = {0, 0, 0, 0};
-
-struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
-{
- struct bman_pool *pool = NULL;
- u32 bpid;
-
- if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
- if (bman_alloc_bpid(&bpid))
- return NULL;
- } else {
- if (params->bpid >= bman_pool_max)
- return NULL;
- bpid = params->bpid;
- }
-#ifdef CONFIG_FSL_BMAN
- if (params->flags & BMAN_POOL_FLAG_THRESH) {
- if (bm_pool_set(bpid, params->thresholds))
- goto err;
- }
-#else
- if (params->flags & BMAN_POOL_FLAG_THRESH)
- goto err;
-#endif
- pool = kmalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool)
- goto err;
- pool->sp = NULL;
- pool->sp_fill = 0;
- pool->params = *params;
-#ifdef CONFIG_FSL_DPA_CHECKING
- atomic_set(&pool->in_use, 1);
-#endif
- if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
- pool->params.bpid = bpid;
- if (params->flags & BMAN_POOL_FLAG_STOCKPILE) {
- pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ,
- GFP_KERNEL);
- if (!pool->sp)
- goto err;
- }
- if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) {
- struct bman_portal *p = get_affine_portal();
-
- if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) {
- pr_err("Depletion events disabled for bpid %d\n", bpid);
- goto err;
- }
- depletion_link(p, pool);
- put_affine_portal();
- }
- return pool;
-err:
-#ifdef CONFIG_FSL_BMAN
- if (params->flags & BMAN_POOL_FLAG_THRESH)
- bm_pool_set(bpid, zero_thresholds);
-#endif
- if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
- bman_release_bpid(bpid);
- if (pool) {
- kfree(pool->sp);
- kfree(pool);
- }
- return NULL;
-}
-EXPORT_SYMBOL(bman_new_pool);
-
-void bman_free_pool(struct bman_pool *pool)
-{
-#ifdef CONFIG_FSL_BMAN
- if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
- bm_pool_set(pool->params.bpid, zero_thresholds);
-#endif
- if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION)
- depletion_unlink(pool);
- if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) {
- if (pool->sp_fill)
- pr_err("Stockpile not flushed, has %u in bpid %u.\n",
- pool->sp_fill, pool->params.bpid);
- kfree(pool->sp);
- pool->sp = NULL;
- pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE;
- }
- if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
- bman_release_bpid(pool->params.bpid);
- kfree(pool);
-}
-EXPORT_SYMBOL(bman_free_pool);
-
-const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
-{
- return &pool->params;
-}
-EXPORT_SYMBOL(bman_get_params);
-
-static noinline void update_rcr_ci(struct bman_portal *p, u8 avail)
-{
- if (avail)
- bm_rcr_cce_prefetch(&p->p);
- else
- bm_rcr_cce_update(&p->p);
-}
-
-int bman_rcr_is_empty(void)
-{
- __maybe_unused unsigned long irqflags;
- struct bman_portal *p = get_affine_portal();
- u8 avail;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- update_rcr_ci(p, 0);
- avail = bm_rcr_get_fill(&p->p);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return avail == 0;
-}
-EXPORT_SYMBOL(bman_rcr_is_empty);
-
-static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p,
-#ifdef FSL_DPA_CAN_WAIT
- __maybe_unused struct bman_pool *pool,
-#endif
- __maybe_unused unsigned long *irqflags,
- __maybe_unused u32 flags)
-{
- struct bm_rcr_entry *r;
- u8 avail;
-
- *p = get_affine_portal();
- PORTAL_IRQ_LOCK(*p, (*irqflags));
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
- (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
- if ((*p)->rcri_owned) {
- PORTAL_IRQ_UNLOCK(*p, (*irqflags));
- put_affine_portal();
- return NULL;
- }
- (*p)->rcri_owned = pool;
- }
-#endif
- avail = bm_rcr_get_avail(&(*p)->p);
- if (avail < 2)
- update_rcr_ci(*p, avail);
- r = bm_rcr_start(&(*p)->p);
- if (unlikely(!r)) {
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
- (flags & BMAN_RELEASE_FLAG_WAIT_SYNC)))
- (*p)->rcri_owned = NULL;
-#endif
- PORTAL_IRQ_UNLOCK(*p, (*irqflags));
- put_affine_portal();
- }
- return r;
-}
-
-#ifdef FSL_DPA_CAN_WAIT
-static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p,
- struct bman_pool *pool,
- __maybe_unused unsigned long *irqflags,
- u32 flags)
-{
- struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags);
-
- if (!rcr)
- bm_rcr_set_ithresh(&(*p)->p, 1);
- return rcr;
-}
-
-static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p,
- struct bman_pool *pool,
- __maybe_unused unsigned long *irqflags,
- u32 flags)
-{
- struct bm_rcr_entry *rcr;
-#ifndef FSL_DPA_CAN_WAIT_SYNC
- pool = NULL;
-#endif
-#ifndef __rtems__
- if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
- wait_event_interruptible(affine_queue,
- (rcr = __wait_rel_start(p, pool, irqflags, flags)));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue,
- (rcr = __wait_rel_start(p, pool, irqflags, flags)));
- return rcr;
-}
-#endif
-
-/* to facilitate better copying of bufs into the ring without either (a) copying
- * noise into the first byte (prematurely triggering the command), nor (b) being
- * very inefficient by copying small fields using read-modify-write */
-struct overlay_bm_buffer {
- u32 first;
- u32 second;
-};
-
-static inline int __bman_release(struct bman_pool *pool,
- const struct bm_buffer *bufs, u8 num, u32 flags)
-{
- struct bman_portal *p;
- struct bm_rcr_entry *r;
- struct overlay_bm_buffer *o_dest;
- struct overlay_bm_buffer *o_src = (struct overlay_bm_buffer *)&bufs[0];
- __maybe_unused unsigned long irqflags;
- u32 i = num - 1;
-
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & BMAN_RELEASE_FLAG_WAIT)
- r = wait_rel_start(&p, pool, &irqflags, flags);
- else
- r = try_rel_start(&p, pool, &irqflags, flags);
-#else
- r = try_rel_start(&p, &irqflags, flags);
-#endif
- if (!r)
- return -EBUSY;
- /* We can copy all but the first entry, as this can trigger badness
- * with the valid-bit. Use the overlay to mask the verb byte. */
- o_dest = (struct overlay_bm_buffer *)&r->bufs[0];
- o_dest->first = (o_src->first & 0x0000ffff) |
- (((u32)pool->params.bpid << 16) & 0x00ff0000);
- o_dest->second = o_src->second;
- if (i)
- copy_words(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
- bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
- (num & BM_RCR_VERB_BUFCOUNT_MASK));
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- /* if we wish to sync we need to set the threshold after h/w sees the
- * new ring entry. As we're mixing cache-enabled and cache-inhibited
- * accesses, this requires a heavy-weight sync. */
- if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
- (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
- hwsync();
- bm_rcr_set_ithresh(&p->p, 1);
- }
-#endif
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
- (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
-#ifndef __rtems__
- if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
- wait_event_interruptible(affine_queue,
- (p->rcri_owned != pool));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue, (p->rcri_owned != pool));
- }
-#endif
- return 0;
-}
-
-int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
- u32 flags)
-{
- int ret = 0;
-
-#ifdef CONFIG_FSL_DPA_CHECKING
- if (!num || (num > 8))
- return -EINVAL;
- if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
- return -EINVAL;
-#endif
- /* Without stockpile, this API is a pass-through to the h/w operation */
- if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
- return __bman_release(pool, bufs, num, flags);
-#ifdef CONFIG_FSL_DPA_CHECKING
- if (!atomic_dec_and_test(&pool->in_use)) {
- pr_crit("Parallel attempts to enter bman_released() detected.");
- panic("only one instance of bman_released/acquired allowed");
- }
-#endif
- /* This needs some explanation. Adding the given buffers may take the
- * stockpile over the threshold, but in fact the stockpile may already
- * *be* over the threshold if a previous release-to-hw attempt had
- * failed. So we have 3 cases to cover;
- * 1. we add to the stockpile and don't hit the threshold,
- * 2. we add to the stockpile, hit the threshold and release-to-hw,
- * 3. we have to release-to-hw before adding to the stockpile
- * (not enough room in the stockpile for case 2).
- * Our constraints on thresholds guarantee that in case 3, there must be
- * at least 8 bufs already in the stockpile, so all release-to-hw ops
- * are for 8 bufs. Despite all this, the API must indicate whether the
- * given buffers were taken off the caller's hands, irrespective of
- * whether a release-to-hw was attempted. */
- while (num) {
- /* Add buffers to stockpile if they fit */
- if ((pool->sp_fill + num) < BMAN_STOCKPILE_SZ) {
- copy_words(pool->sp + pool->sp_fill, bufs,
- sizeof(struct bm_buffer) * num);
- pool->sp_fill += num;
- num = 0; /* --> will return success no matter what */
- }
- /* Do hw op if hitting the high-water threshold */
- if ((pool->sp_fill + num) >= BMAN_STOCKPILE_HIGH) {
- ret = __bman_release(pool,
- pool->sp + (pool->sp_fill - 8), 8, flags);
- if (ret) {
- ret = (num ? ret : 0);
- goto release_done;
- }
- pool->sp_fill -= 8;
- }
- }
-release_done:
-#ifdef CONFIG_FSL_DPA_CHECKING
- atomic_inc(&pool->in_use);
-#endif
- return ret;
-}
-EXPORT_SYMBOL(bman_release);
-
-static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs,
- u8 num)
-{
- struct bman_portal *p = get_affine_portal();
- struct bm_mc_command *mcc;
- struct bm_mc_result *mcr;
- __maybe_unused unsigned long irqflags;
- int ret;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- mcc = bm_mc_start(&p->p);
- mcc->acquire.bpid = pool->params.bpid;
- bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
- (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
- while (!(mcr = bm_mc_result(&p->p)))
- cpu_relax();
- ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
- if (bufs)
- copy_words(&bufs[0], &mcr->acquire.bufs[0],
- num * sizeof(bufs[0]));
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- if (ret != num)
- ret = -ENOMEM;
- return ret;
-}
-
-int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
- u32 flags)
-{
- int ret = 0;
-
-#ifdef CONFIG_FSL_DPA_CHECKING
- if (!num || (num > 8))
- return -EINVAL;
- if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
- return -EINVAL;
-#endif
- /* Without stockpile, this API is a pass-through to the h/w operation */
- if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
- return __bman_acquire(pool, bufs, num);
-#ifdef CONFIG_FSL_DPA_CHECKING
- if (!atomic_dec_and_test(&pool->in_use)) {
- pr_crit("Parallel attempts to enter bman_acquire() detected.");
- panic("only one instance of bman_released/acquired allowed");
- }
-#endif
- /* Only need a h/w op if we'll hit the low-water thresh */
- if (!(flags & BMAN_ACQUIRE_FLAG_STOCKPILE) &&
- (pool->sp_fill <= (BMAN_STOCKPILE_LOW + num))) {
- /* refill stockpile with max amount, but if max amount
- * isn't available, try amount the user wants */
- int bufcount = 8;
-
- ret = __bman_acquire(pool, pool->sp + pool->sp_fill, bufcount);
- if (ret < 0 && bufcount != num) {
- bufcount = num;
- /* Maybe buffer pool has less than 8 */
- ret = __bman_acquire(pool, pool->sp + pool->sp_fill,
- bufcount);
- }
- if (ret < 0)
- goto hw_starved;
- DPA_ASSERT(ret == bufcount);
- pool->sp_fill += bufcount;
- } else {
-hw_starved:
- if (pool->sp_fill < num) {
- ret = -ENOMEM;
- goto acquire_done;
- }
- }
- copy_words(bufs, pool->sp + (pool->sp_fill - num),
- sizeof(struct bm_buffer) * num);
- pool->sp_fill -= num;
- ret = num;
-acquire_done:
-#ifdef CONFIG_FSL_DPA_CHECKING
- atomic_inc(&pool->in_use);
-#endif
- return ret;
-}
-EXPORT_SYMBOL(bman_acquire);
-
-int bman_flush_stockpile(struct bman_pool *pool, u32 flags)
-{
- u8 num;
- int ret;
-
- while (pool->sp_fill) {
- num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill);
- ret = __bman_release(pool, pool->sp + (pool->sp_fill - num),
- num, flags);
- if (ret)
- return ret;
- pool->sp_fill -= num;
- }
- return 0;
-}
-EXPORT_SYMBOL(bman_flush_stockpile);
-
-int bman_query_pools(struct bm_pool_state *state)
-{
- struct bman_portal *p = get_affine_portal();
- struct bm_mc_result *mcr;
- __maybe_unused unsigned long irqflags;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- bm_mc_start(&p->p);
- bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
- while (!(mcr = bm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY);
- *state = mcr->query;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return 0;
-}
-EXPORT_SYMBOL(bman_query_pools);
-
-#ifdef CONFIG_FSL_BMAN
-u32 bman_query_free_buffers(struct bman_pool *pool)
-{
- return bm_pool_free_buffers(pool->params.bpid);
-}
-EXPORT_SYMBOL(bman_query_free_buffers);
-
-int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
-{
- u32 bpid;
-
- bpid = bman_get_params(pool)->bpid;
-
- return bm_pool_set(bpid, thresholds);
-}
-EXPORT_SYMBOL(bman_update_pool_thresholds);
-#endif
-
-int bman_shutdown_pool(u32 bpid)
-{
- struct bman_portal *p = get_affine_portal();
- __maybe_unused unsigned long irqflags;
- int ret;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- ret = bm_shutdown_pool(&p->p, bpid);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(bman_shutdown_pool);
-
-const struct bm_portal_config *
-bman_get_bm_portal_config(const struct bman_portal *portal)
-{
-#ifndef __rtems__
- return portal->sharing_redirect ? NULL : portal->config;
-#else /* __rtems__ */
- return portal->config;
-#endif /* __rtems__ */
-}
diff --git a/linux/drivers/soc/fsl/qbman/bman_ccsr.c b/linux/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644
index 00000000..1df1d9c8
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -0,0 +1,392 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FBPR_BARE 0x0c00
+#define REG_FBPR_BAR 0x0c04
+#define REG_FBPR_AR 0x0c10
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_ERR_ISDR 0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
+
+struct bman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+ { BM_EIRQ_IVCI, "Invalid Command Verb" },
+ { BM_EIRQ_FLWI, "FBPR Low Watermark" },
+ { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { BM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+ return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = bm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+static void bm_set_memory(u64 ba, u32 size)
+{
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+ is_power_of_2(size));
+ /* choke if '[e]ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+ bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_AR, exp - 1);
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+#ifndef __rtems__
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+ fbpr_a = rmem->base;
+ fbpr_sz = rmem->size;
+
+ WARN_ON(!(fbpr_a && fbpr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+#else /* __rtems__ */
+static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fbpr, 16777216);
+#define fbpr_a ((uintptr_t)&fbpr[0])
+#define fbpr_sz sizeof(fbpr)
+#endif /* __rtems__ */
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = bm_ccsr_in(REG_ERR_IER);
+ isr_val = bm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = bm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+ if (bman_hwerr_txts[i].mask & isr_mask) {
+#ifndef __rtems__
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ bman_hwerr_txts[i].txt);
+#endif /* __rtems__ */
+ if (bman_hwerr_txts[i].mask & ecsr_val) {
+ /* Re-arm error capture registers */
+ bm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ bman_hwerr_txts[i].mask);
+ ier_val &= ~bman_hwerr_txts[i].mask;
+ bm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+ int ret, err_irq;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+#ifdef __rtems__
+ struct resource res_storage;
+#endif /* __rtems__ */
+ struct resource *res;
+ u16 id, bm_pool_cnt;
+ u8 major, minor;
+
+#ifndef __rtems__
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+#else /* __rtems__ */
+ res = platform_get_resource(&res_storage, pdev, IORESOURCE_MEM, 0);
+#endif /* __rtems__ */
+ if (!res) {
+ dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!bm_ccsr_start)
+ return -ENXIO;
+
+ bm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ bman_ip_rev = BMAN_REV10;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else if (major == 2 && minor == 0) {
+ bman_ip_rev = BMAN_REV20;
+ bm_pool_cnt = 8;
+ } else if (major == 2 && minor == 1) {
+ bman_ip_rev = BMAN_REV21;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else {
+ dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+ id, major, minor);
+ return -ENODEV;
+ }
+
+ bm_set_memory(fbpr_a, fbpr_sz);
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %s IRQ\n", node->full_name);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return ret;
+ }
+ /* Disable Buffer Pool State Change */
+ bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+ if (IS_ERR(bm_bpalloc)) {
+ ret = PTR_ERR(bm_bpalloc);
+ dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* seed BMan resource pool */
+ ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+ 0, bm_pool_cnt - 1, ret);
+ return ret;
+ }
+
+ return 0;
+};
+
+#ifndef __rtems__
+static const struct of_device_id fsl_bman_ids[] = {
+ {
+ .compatible = "fsl,bman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_bman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_bman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
+#else /* __rtems__ */
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <rtems.h>
+#include <bsp/fdt.h>
+#include <bsp/qoriq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+static struct bm_portal_config bman_configs[NR_CPUS];
+
+u16 bman_pool_max;
+
+SYSINIT_REFERENCE(irqs);
+
+static void
+bman_sysinit(void)
+{
+ const char *fdt = bsp_fdt_get();
+ struct device_node dn;
+ struct platform_device ofdev = {
+ .dev = {
+ .of_node = &dn,
+ .base = (uintptr_t)&qoriq
+ }
+ };
+ const char *name;
+ int cpu_count = (int)rtems_get_processor_count();
+ int cpu;
+ int ret;
+ int node;
+ int parent;
+
+ qoriq_reset_qman_and_bman();
+ qoriq_clear_ce_portal(&qoriq_bman_portal[0][0],
+ sizeof(qoriq_bman_portal[0]));
+ qoriq_clear_ci_portal(&qoriq_bman_portal[1][0],
+ sizeof(qoriq_bman_portal[1]));
+
+ memset(&dn, 0, sizeof(dn));
+
+ name = "fsl,bman";
+ node = fdt_node_offset_by_compatible(fdt, 0, name);
+ if (node < 0)
+ panic("bman: no bman in FDT");
+
+ dn.full_name = name;
+ dn.offset = node;
+ ret = fsl_bman_probe(&ofdev);
+ if (ret != 0)
+ panic("bman: probe failed");
+
+ name = "fsl,bman-portal";
+ node = fdt_node_offset_by_compatible(fdt, 0, name);
+ if (node < 0)
+ panic("bman: no portals in FDT");
+ parent = fdt_parent_offset(fdt, node);
+ if (parent < 0)
+ panic("bman: no parent of portals in FDT");
+ node = fdt_first_subnode(fdt, parent);
+
+ dn.full_name = name;
+ dn.offset = node;
+
+ for (cpu = 0; cpu < cpu_count; ++cpu) {
+ struct bm_portal_config *pcfg = &bman_configs[cpu];
+ struct bman_portal *portal;
+ struct resource res;
+
+ if (node < 0)
+ panic("bman: missing portal in FDT");
+
+ ret = of_address_to_resource(&dn, 0, &res);
+ if (ret != 0)
+ panic("bman: no portal CE address");
+ pcfg->addr_virt[0] = (__iomem void *)
+ ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] >=
+ (uintptr_t)&qoriq_bman_portal[0][0]);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] <
+ (uintptr_t)&qoriq_bman_portal[1][0]);
+
+ ret = of_address_to_resource(&dn, 1, &res);
+ if (ret != 0)
+ panic("bman: no portal CI address");
+ pcfg->addr_virt[1] = (__iomem void *)
+ ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] >=
+ (uintptr_t)&qoriq_bman_portal[1][0]);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
+ (uintptr_t)&qoriq_bman_portal[2][0]);
+
+ pcfg->irq = of_irq_to_resource(&dn, 0, NULL);
+ if (pcfg->irq == NO_IRQ)
+ panic("bman: no portal interrupt");
+
+ pcfg->cpu = cpu;
+
+ portal = bman_create_affine_portal(pcfg);
+ if (portal == NULL)
+ panic("bman: cannot create portal");
+
+ bman_p_irqsource_add(portal, BM_PIRQ_RCRI);
+
+ node = fdt_next_subnode(fdt, node);
+ dn.offset = node;
+ }
+}
+SYSINIT(bman, SI_SUB_CPU, SI_ORDER_FIRST, bman_sysinit, NULL);
+#endif /* __rtems__ */
diff --git a/linux/drivers/soc/fsl/qbman/bman_portal.c b/linux/drivers/soc/fsl/qbman/bman_portal.c
deleted file mode 100644
index f9fd022c..00000000
--- a/linux/drivers/soc/fsl/qbman/bman_portal.c
+++ /dev/null
@@ -1,399 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "bman_priv.h"
-
-/*
- * Global variables of the max portal/pool number this BMan version supported
- */
-u16 bman_ip_rev;
-EXPORT_SYMBOL(bman_ip_rev);
-
-u16 bman_pool_max;
-EXPORT_SYMBOL(bman_pool_max);
-
-/* After initialising cpus that own shared portal configs, we cache the
- * resulting portals (ie. not just the configs) in this array. Then we
- * initialise slave cpus that don't have their own portals, redirecting them to
- * portals from this cache in a round-robin assignment. */
-static struct bman_portal *shared_portals[NR_CPUS] __initdata;
-static int num_shared_portals __initdata;
-static int shared_portals_idx __initdata;
-
-static LIST_HEAD(unused_pcfgs);
-static void *affine_bportals[NR_CPUS];
-
-#ifndef __rtems__
-static const int flags[] = {0, _PAGE_GUARDED | _PAGE_NO_CACHE};
-#else /* __rtems__ */
-static const int flags[] = {0, 0};
-#endif /* __rtems__ */
-
-static struct bm_portal_config * __init get_pcfg(struct list_head *list)
-{
- struct bm_portal_config *pcfg;
-
- if (list_empty(list))
- return NULL;
- pcfg = list_entry(list->prev, struct bm_portal_config, list);
- list_del(&pcfg->list);
-
- return pcfg;
-}
-
-static struct bman_portal * __init init_pcfg(struct bm_portal_config *pcfg)
-{
- struct bman_portal *p = bman_create_affine_portal(pcfg);
-
- if (p) {
-#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
- bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
-#endif
- pr_info("Portal %sinitialised, cpu %d\n",
- pcfg->public_cfg.is_shared ? "(shared) " : "",
- pcfg->public_cfg.cpu);
- affine_bportals[pcfg->public_cfg.cpu] = p;
- } else
- pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu);
-
- return p;
-}
-
-static void __init init_slave(int cpu)
-{
- struct bman_portal *p;
-
- p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
- if (!p)
- pr_err("Slave portal failure on cpu %d\n", cpu);
- else
- pr_info("Portal %s initialised, cpu %d\n", "(slave) ", cpu);
- if (shared_portals_idx >= num_shared_portals)
- shared_portals_idx = 0;
- affine_bportals[cpu] = p;
-}
-
-/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
- * parsing is in dpaa_sys.h. The syntax is a comma-separated list of indexes
- * and/or ranges of indexes, with each being optionally prefixed by "s" to
- * explicitly mark it or them for sharing.
- * Eg;
- * bportals=s0,1-3,s4
- * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
- * portals, and any remaining cpus share the portals that are assigned to cpus 0
- * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
- * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
- * 0's portal.) */
-static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
-static struct cpumask want_shared __initdata; /* cpus requested with "s" */
-
-static int __init parse_bportals(char *str)
-{
- return parse_portals_bootarg(str, &want_shared, &want_unshared,
- "bportals");
-}
-__setup("bportals=", parse_bportals);
-
-static void __cold bman_offline_cpu(unsigned int cpu)
-{
- struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu];
- const struct bm_portal_config *pcfg;
-
- if (p) {
- pcfg = bman_get_bm_portal_config(p);
- if (pcfg)
- irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
- }
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static void __cold bman_online_cpu(unsigned int cpu)
-{
- struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu];
- const struct bm_portal_config *pcfg;
-
- if (p) {
- pcfg = bman_get_bm_portal_config(p);
- if (pcfg)
- irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
- }
-}
-
-static int __cold bman_hotplug_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- bman_online_cpu(cpu);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- bman_offline_cpu(cpu);
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block bman_hotplug_cpu_notifier = {
- .notifier_call = bman_hotplug_cpu_callback,
-};
-#endif /* CONFIG_HOTPLUG_CPU */
-
-static int __cold bman_portal_probe(struct platform_device *of_dev)
-{
- struct device *dev = &of_dev->dev;
- struct device_node *node = dev->of_node;
- struct bm_portal_config *pcfg;
- int i, irq, ret;
-
- if (!of_device_is_available(node))
- return -ENODEV;
-
- if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
- of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
- bman_ip_rev = BMAN_REV10;
- bman_pool_max = 64;
- } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
- of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
- bman_ip_rev = BMAN_REV20;
- bman_pool_max = 8;
- } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0") ||
- of_device_is_compatible(node, "fsl,bman-portal-2.1.1") ||
- of_device_is_compatible(node, "fsl,bman-portal-2.1.2") ||
- of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
- bman_ip_rev = BMAN_REV21;
- bman_pool_max = 64;
- }
-
- pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
- if (!pcfg) {
- dev_err(dev, "Can't allocate portal config\n");
- return -ENOMEM;
- }
-
- for (i = DPA_PORTAL_CE; i <= DPA_PORTAL_CI; i++) {
- ret = of_address_to_resource(node, i, pcfg->addr_phys + i);
- if (ret < 0) {
- dev_err(dev, "Can't get %s property 'reg::%d'\n",
- node->full_name, i);
- return ret;
- }
- ret = devm_request_resource(dev, &iomem_resource,
- pcfg->addr_phys + i);
- if (ret < 0)
- return ret;
- pcfg->addr_virt[i] = devm_ioremap_prot(dev,
- pcfg->addr_phys[i].start,
- resource_size(pcfg->addr_phys + i),
- flags[i]);
- if (!pcfg->addr_virt[i])
- return -ENXIO;
- }
-
- pcfg->public_cfg.cpu = -1;
-
- irq = irq_of_parse_and_map(node, 0);
- if (irq == NO_IRQ) {
- dev_err(dev, "Can't get %s property 'interrupts'\n",
- node->full_name);
- return -ENXIO;
- }
- pcfg->public_cfg.irq = irq;
-
- bman_depletion_fill(&pcfg->public_cfg.mask);
-
- list_add_tail(&pcfg->list, &unused_pcfgs);
-
- return 0;
-};
-
-static int __cold bman_portal_remove(struct platform_device *of_dev)
-{
- return 0;
-};
-
-static const struct of_device_id bman_portal_ids[] = {
- {
- .compatible = "fsl,bman-portal",
- },
- {}
-};
-MODULE_DEVICE_TABLE(of, bman_portal_ids);
-
-static struct platform_driver bman_portal_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = bman_portal_ids,
- },
- .probe = bman_portal_probe,
- .remove = bman_portal_remove,
-};
-
-static int __init bman_portal_driver_register(struct platform_driver *drv)
-{
- int _errno;
- struct cpumask slave_cpus;
- struct cpumask unshared_cpus = *cpu_none_mask;
- struct cpumask shared_cpus = *cpu_none_mask;
- LIST_HEAD(unshared_pcfgs);
- LIST_HEAD(shared_pcfgs);
- struct bm_portal_config *pcfg;
- struct bman_portal *p;
- int cpu;
- struct cpumask offline_cpus;
-
- _errno = platform_driver_register(drv);
- if (_errno < 0)
- return _errno;
-
-/* Initialise the BMan driver. The meat of this function deals with portals. The
- * following describes the flow of portal-handling, the code "steps" refer to
- * this description;
- * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
- * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
- * bound).
- * 2. The "want_shared" and "want_unshared" lists (as filled by the
- * "bportals=[...]" bootarg) are processed, allocating portals and assigning
- * them to cpus, placing them in the relevant list and setting ::cpu as
- * appropriate. If no "bportals" bootarg was present, the defaut is to try to
- * assign portals to all online cpus at the time of driver initialisation.
- * Any failure to allocate portals (when parsing the "want" lists or when
- * using default behaviour) will be silently tolerated (the "fixup" logic in
- * step 3 will determine what happens in this case).
- * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
- * sharing and sharing is required (because not all cpus have been assigned
- * portals), then one portal will marked for sharing. Conversely if no
- * sharing is required, any portals marked for sharing will not be shared. It
- * may be that sharing occurs when it wasn't expected, if portal allocation
- * failed to honour all the requested assignments (including the default
- * assignments if no bootarg is present).
- * 4. Unshared portals are initialised on their respective cpus.
- * 5. Shared portals are initialised on their respective cpus.
- * 6. Each remaining cpu is initialised to slave to one of the shared portals,
- * which are selected in a round-robin fashion.
- */
- /* Step 2. */
- for_each_possible_cpu(cpu) {
- if (cpumask_test_cpu(cpu, &want_shared)) {
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &shared_pcfgs);
- cpumask_set_cpu(cpu, &shared_cpus);
- }
- if (cpumask_test_cpu(cpu, &want_unshared)) {
- if (cpumask_test_cpu(cpu, &shared_cpus))
- continue;
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &unshared_pcfgs);
- cpumask_set_cpu(cpu, &unshared_cpus);
- }
- }
- if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
- /* Default, give an unshared portal to each online cpu */
- for_each_possible_cpu(cpu) {
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &unshared_pcfgs);
- cpumask_set_cpu(cpu, &unshared_cpus);
- }
- }
- /* Step 3. */
- cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
- cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
- if (cpumask_empty(&slave_cpus)) {
- /* No sharing required */
- if (!list_empty(&shared_pcfgs)) {
- /* Migrate "shared" to "unshared" */
- cpumask_or(&unshared_cpus, &unshared_cpus,
- &shared_cpus);
- cpumask_clear(&shared_cpus);
- list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
- INIT_LIST_HEAD(&shared_pcfgs);
- }
- } else {
- /* Sharing required */
- if (list_empty(&shared_pcfgs)) {
- /* Migrate one "unshared" to "shared" */
- pcfg = get_pcfg(&unshared_pcfgs);
- if (!pcfg) {
- pr_crit("No portals available!\n");
- return 0;
- }
- cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
- cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
- list_add_tail(&pcfg->list, &shared_pcfgs);
- }
- }
- /* Step 4. */
- list_for_each_entry(pcfg, &unshared_pcfgs, list) {
- pcfg->public_cfg.is_shared = 0;
- p = init_pcfg(pcfg);
- }
- /* Step 5. */
- list_for_each_entry(pcfg, &shared_pcfgs, list) {
- pcfg->public_cfg.is_shared = 1;
- p = init_pcfg(pcfg);
- if (p)
- shared_portals[num_shared_portals++] = p;
- }
- /* Step 6. */
- if (!cpumask_empty(&slave_cpus))
- for_each_cpu(cpu, &slave_cpus)
- init_slave(cpu);
- pr_info("Portals initialised\n");
- cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
- for_each_cpu(cpu, &offline_cpus)
- bman_offline_cpu(cpu);
-
-#ifdef CONFIG_HOTPLUG_CPU
- register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
-#endif
-
- bman_seed_bpid_range(0, bman_pool_max);
-
- return 0;
-}
-
-module_driver(bman_portal_driver,
- bman_portal_driver_register, platform_driver_unregister);
diff --git a/linux/drivers/soc/fsl/qbman/bman_priv.h b/linux/drivers/soc/fsl/qbman/bman_priv.h
index e87f17a3..e8ba0be5 100644
--- a/linux/drivers/soc/fsl/qbman/bman_priv.h
+++ b/linux/drivers/soc/fsl/qbman/bman_priv.h
@@ -1,4 +1,4 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,103 +34,49 @@
#include <soc/fsl/bman.h>
-/* used by CCSR and portal interrupt code */
-enum bm_isr_reg {
- bm_isr_status = 0,
- bm_isr_enable = 1,
- bm_isr_disable = 2,
- bm_isr_inhibit = 3
-};
-
-#ifdef CONFIG_FSL_BMAN
-/* Set depletion thresholds associated with a buffer pool. Requires that the
- * operating system have access to BMan CCSR (ie. compiled in support and
- * run-time access courtesy of the device-tree). */
-int bm_pool_set(u32 bpid, const u32 *thresholds);
-#define BM_POOL_THRESH_SW_ENTER 0
-#define BM_POOL_THRESH_SW_EXIT 1
-#define BM_POOL_THRESH_HW_ENTER 2
-#define BM_POOL_THRESH_HW_EXIT 3
-
-/* Read the free buffer count for a given buffer */
-u32 bm_pool_free_buffers(u32 bpid);
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
-#endif /* CONFIG_FSL_BMAN */
-
-#if defined(CONFIG_FSL_BMAN_PORTAL) || defined(CONFIG_FSL_BMAN_PORTAL_MODULE)
/* Revision info (for errata and feature handling) */
#define BMAN_REV10 0x0100
#define BMAN_REV20 0x0200
#define BMAN_REV21 0x0201
extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
+extern struct gen_pool *bm_bpalloc;
+
struct bm_portal_config {
- /* Corenet portal addresses;
- * [0]==cache-enabled, [1]==cache-inhibited. */
- __iomem void *addr_virt[2];
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
#ifndef __rtems__
- struct resource addr_phys[2];
/* Allow these to be joined in lists */
struct list_head list;
#endif /* __rtems__ */
+ struct device *dev;
/* User-visible portal configuration settings */
- struct bman_portal_config public_cfg;
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
};
-/* Hooks from bman_driver.c in to bman_high.c */
-struct bman_portal *bman_create_portal(
- struct bman_portal *portal,
- const struct bm_portal_config *config);
struct bman_portal *bman_create_affine_portal(
const struct bm_portal_config *config);
-struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
- int cpu);
-void bman_destroy_portal(struct bman_portal *bm);
-
-const struct bm_portal_config *bman_destroy_affine_portal(void);
-
-/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
- * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
- * might fail (if the buffer pool is depleted). So this value provides some
- * "stagger" in that the bman_acquire() function will only fail if lots of bufs
- * are requested at once or if h/w has been tested a couple of times without
- * luck. The _HIGH value: when bman_release() is called and the stockpile
- * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
- * the release ring is full). So this value provides some "stagger" so that
- * ring-access is retried a couple of times prior to the API returning a
- * failure. The following *must* be true;
- * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
- * (to avoid thrashing)
- * BMAN_STOCKPILE_SZ >= 16
- * (as the release logic expects to either send 8 buffers to hw prior to
- * adding the given buffers to the stockpile or add the buffers to the
- * stockpile before sending 8 to hw, as the API must be an all-or-nothing
- * success/fail.)
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
*/
-#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */
-#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */
-#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
-/*************************************************/
-/* BMan s/w corenet portal, low-level i/face */
-/*************************************************/
-
-/* Used by all portal interrupt registers except 'inhibit'
+/*
+ * Used by all portal interrupt registers except 'inhibit'
* This mask contains all the "irqsource" bits visible to API users
*/
-#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
-
-/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
- * the disable register" rather than "disable the ability to write". */
-#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
-#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
-#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
-#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
-#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
-#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
-#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
-#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
+#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI
const struct bm_portal_config *
bman_get_bm_portal_config(const struct bman_portal *portal);
-#endif /* CONFIG_FSL_BMAN_PORTAL* */
diff --git a/linux/drivers/soc/fsl/qbman/bman_test.c b/linux/drivers/soc/fsl/qbman/bman_test.c
deleted file mode 100644
index 154b7374..00000000
--- a/linux/drivers/soc/fsl/qbman/bman_test.c
+++ /dev/null
@@ -1,60 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "bman_test.h"
-
-MODULE_AUTHOR("Geoff Thorpe");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("BMan testing");
-
-static int test_init(void)
-{
-#ifdef CONFIG_FSL_BMAN_TEST_API
- int loop = 1;
-
- while (loop--)
- bman_test_api();
-#endif
-#ifdef CONFIG_FSL_BMAN_TEST_THRESH
- bman_test_thresh();
-#endif
- return 0;
-}
-
-static void test_exit(void)
-{
-}
-
-module_init(test_init);
-module_exit(test_exit);
diff --git a/linux/drivers/soc/fsl/qbman/bman_test.h b/linux/drivers/soc/fsl/qbman/bman_test.h
index 9c51c38b..037ed342 100644
--- a/linux/drivers/soc/fsl/qbman/bman_test.h
+++ b/linux/drivers/soc/fsl/qbman/bman_test.h
@@ -1,4 +1,4 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -30,5 +30,6 @@
#include "bman_priv.h"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
void bman_test_api(void);
-void bman_test_thresh(void);
diff --git a/linux/drivers/soc/fsl/qbman/bman_test_api.c b/linux/drivers/soc/fsl/qbman/bman_test_api.c
index 5585bdf6..f3e5ec03 100644
--- a/linux/drivers/soc/fsl/qbman/bman_test_api.c
+++ b/linux/drivers/soc/fsl/qbman/bman_test_api.c
@@ -2,7 +2,7 @@
#include <rtems/bsd/local/opt_dpaa.h>
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,33 +34,15 @@
#include "bman_test.h"
-/*************/
-/* constants */
-/*************/
-
-#define PORTAL_OPAQUE ((void *)0xf00dbeef)
-#define POOL_OPAQUE ((void *)0xdeadabba)
#define NUM_BUFS 93
#define LOOPS 3
#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
-/***************/
-/* global vars */
-/***************/
-
static struct bman_pool *pool;
-static int depleted;
static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
static int bufs_received;
-/* Predeclare the callback so we can instantiate pool parameters */
-static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int);
-
-/**********************/
-/* internal functions */
-/**********************/
-
static void bufs_init(void)
{
int i;
@@ -72,9 +54,10 @@ static void bufs_init(void)
static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
{
- if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) {
+ if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
- /* On SoCs with BMan revison 2.0, BMan only respects the 40
+ /*
+ * On SoCs with BMan revison 2.0, BMan only respects the 40
* LS-bits of buffer addresses, masking off the upper 8-bits on
* release commands. The API provides for 48-bit addresses
* because some SoCs support all 48-bits. When generating
@@ -84,11 +67,11 @@ static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
* don't match), or we need to mask the upper 8-bits off when
* comparing. We do the latter.
*/
- if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
- < (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
return -1;
- if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
- > (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
return 1;
} else {
if (bm_buffer_get64(a) < bm_buffer_get64(b))
@@ -110,79 +93,63 @@ static void bufs_confirm(void)
for (j = 0; j < NUM_BUFS; j++)
if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
matches++;
- BUG_ON(matches != 1);
+ WARN_ON(matches != 1);
}
}
-/********/
/* test */
-/********/
-
-static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool,
- void *pool_ctx, int __depleted)
-{
- BUG_ON(__pool != pool);
- BUG_ON(pool_ctx != POOL_OPAQUE);
- depleted = __depleted;
-}
-
void bman_test_api(void)
{
- struct bman_pool_params pparams = {
- .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID,
- .cb = depletion_cb,
- .cb_ctx = POOL_OPAQUE,
- };
int i, loops = LOOPS;
bufs_init();
- pr_info(" --- Starting high-level test ---\n");
+ pr_info("%s(): Starting\n", __func__);
- pool = bman_new_pool(&pparams);
- BUG_ON(!pool);
+ pool = bman_new_pool();
+ if (!pool) {
+ pr_crit("bman_new_pool() failed\n");
+ goto failed;
+ }
- /*******************/
/* Release buffers */
- /*******************/
do_loop:
i = 0;
while (i < NUM_BUFS) {
- u32 flags = BMAN_RELEASE_FLAG_WAIT;
int num = 8;
- if ((i + num) > NUM_BUFS)
+ if (i + num > NUM_BUFS)
num = NUM_BUFS - i;
- if ((i + num) == NUM_BUFS)
- flags |= BMAN_RELEASE_FLAG_WAIT_SYNC;
- if (bman_release(pool, bufs_in + i, num, flags))
- panic("bman_release() failed\n");
+ if (bman_release(pool, bufs_in + i, num)) {
+ pr_crit("bman_release() failed\n");
+ goto failed;
+ }
i += num;
}
- /*******************/
/* Acquire buffers */
- /*******************/
while (i > 0) {
int tmp, num = 8;
if (num > i)
num = i;
- tmp = bman_acquire(pool, bufs_out + i - num, num, 0);
- BUG_ON(tmp != num);
+ tmp = bman_acquire(pool, bufs_out + i - num, num);
+ WARN_ON(tmp != num);
i -= num;
}
- i = bman_acquire(pool, NULL, 1, 0);
- BUG_ON(i > 0);
+ i = bman_acquire(pool, NULL, 1);
+ WARN_ON(i > 0);
bufs_confirm();
if (--loops)
goto do_loop;
- /************/
/* Clean up */
- /************/
bman_free_pool(pool);
- pr_info(" --- Finished high-level test ---\n");
+ pr_info("%s(): Finished\n", __func__);
+ return;
+
+failed:
+ WARN_ON(1);
}
diff --git a/linux/drivers/soc/fsl/qbman/bman_test_thresh.c b/linux/drivers/soc/fsl/qbman/bman_test_thresh.c
deleted file mode 100644
index c0f045be..00000000
--- a/linux/drivers/soc/fsl/qbman/bman_test_thresh.c
+++ /dev/null
@@ -1,216 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2010 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "bman_test.h"
-
-/* Test constants */
-#define TEST_NUMBUFS 129728
-#define TEST_EXIT 129536
-#define TEST_ENTRY 129024
-
-struct affine_test_data {
- struct task_struct *t;
- int cpu;
-#ifndef __rtems__
- int expect_affinity;
-#endif /* __rtems__ */
- int drain;
- int num_enter;
- int num_exit;
- struct list_head node;
- struct completion wakethread;
- struct completion wakeparent;
-};
-
-static void cb_depletion(struct bman_portal *portal,
- struct bman_pool *pool,
- void *opaque,
- int depleted)
-{
- struct affine_test_data *data = opaque;
- int c = smp_processor_id();
-
- pr_info("%s(): bpid=%d, depleted=%d, cpu=%d, original=%d\n", __func__,
- bman_get_params(pool)->bpid, !!depleted, c, data->cpu);
- /* We should be executing on the CPU of the thread that owns the pool if
- * and that CPU has an affine portal (ie. it isn't slaved). */
-#ifndef __rtems__
- BUG_ON((c != data->cpu) && data->expect_affinity);
- BUG_ON((c == data->cpu) && !data->expect_affinity);
-#endif /* __rtems__ */
- if (depleted)
- data->num_enter++;
- else
- data->num_exit++;
-}
-
-/* Params used to set up a pool, this also dynamically allocates a BPID */
-static const struct bman_pool_params params_nocb = {
- .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH,
- .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 }
-};
-
-/* Params used to set up each cpu's pool with callbacks enabled */
-static struct bman_pool_params params_cb = {
- .bpid = 0, /* will be replaced to match pool_nocb */
- .flags = BMAN_POOL_FLAG_DEPLETION,
- .cb = cb_depletion
-};
-
-static struct bman_pool *pool_nocb;
-static LIST_HEAD(threads);
-
-static int affine_test(void *__data)
-{
- struct bman_pool *pool;
- struct affine_test_data *data = __data;
- struct bman_pool_params my_params = params_cb;
-
- pr_info("Thread %d: starting\n", data->cpu);
- /* create the pool */
- my_params.cb_ctx = data;
- pool = bman_new_pool(&my_params);
- BUG_ON(!pool);
- complete(&data->wakeparent);
- wait_for_completion(&data->wakethread);
- init_completion(&data->wakethread);
-
- /* if we're the drainer, we get signalled for that */
- if (data->drain) {
- struct bm_buffer buf;
- int ret;
-
- pr_info("Thread %d: draining...\n", data->cpu);
- do {
- ret = bman_acquire(pool, &buf, 1, 0);
- } while (ret > 0);
- pr_info("Thread %d: draining done.\n", data->cpu);
- complete(&data->wakeparent);
- wait_for_completion(&data->wakethread);
- init_completion(&data->wakethread);
- }
-
- /* cleanup */
- bman_free_pool(pool);
- while (!kthread_should_stop())
- cpu_relax();
- pr_info("Thread %d: exiting\n", data->cpu);
- return 0;
-}
-
-static struct affine_test_data *start_affine_test(int cpu, int drain)
-{
- struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
-
- if (!data)
- return NULL;
- data->cpu = cpu;
-#ifndef __rtems__
- data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus());
-#endif /* __rtems__ */
- data->drain = drain;
- data->num_enter = 0;
- data->num_exit = 0;
- init_completion(&data->wakethread);
- init_completion(&data->wakeparent);
- list_add_tail(&data->node, &threads);
- data->t = kthread_create(affine_test, data, "threshtest%d", cpu);
-#ifndef __rtems__
- BUG_ON(IS_ERR(data->t));
-#else /* __rtems__ */
- BUG_ON(data->t == NULL);
-#endif /* __rtems__ */
- kthread_bind(data->t, cpu);
- wake_up_process(data->t);
- return data;
-}
-
-void bman_test_thresh(void)
-{
- int loop = TEST_NUMBUFS;
- int ret, num_cpus = 0;
- struct affine_test_data *data, *drainer = NULL;
-
- pr_info("%s(): Start\n", __func__);
-
- /* allocate a BPID and seed it */
- pool_nocb = bman_new_pool(&params_nocb);
- BUG_ON(!pool_nocb);
- while (loop--) {
- struct bm_buffer buf;
-
- bm_buffer_set64(&buf, 0x0badbeef + loop);
- ret = bman_release(pool_nocb, &buf, 1,
- BMAN_RELEASE_FLAG_WAIT);
- BUG_ON(ret);
- }
- while (!bman_rcr_is_empty())
- cpu_relax();
- pr_info("%s(): Buffers are in\n", __func__);
-
- /* create threads and wait for them to create pools */
- params_cb.bpid = bman_get_params(pool_nocb)->bpid;
-#ifndef __rtems__
- for_each_cpu(loop, cpu_online_mask) {
-#else /* __rtems__ */
- for (loop = 0; loop < rtems_get_processor_count(); ++loop) {
-#endif /* __rtems__ */
- data = start_affine_test(loop, drainer ? 0 : 1);
- BUG_ON(!data);
- if (!drainer)
- drainer = data;
- num_cpus++;
- wait_for_completion(&data->wakeparent);
- }
-
- /* signal the drainer to start draining */
- complete(&drainer->wakethread);
- wait_for_completion(&drainer->wakeparent);
- init_completion(&drainer->wakeparent);
-
- /* tear down */
- list_for_each_entry_safe(data, drainer, &threads, node) {
- complete(&data->wakethread);
- ret = kthread_stop(data->t);
- BUG_ON(ret);
- list_del(&data->node);
- /* check that we get the expected callbacks (and no others) */
- BUG_ON(data->num_enter != 1);
- BUG_ON(data->num_exit != 0);
- kfree(data);
- }
- bman_free_pool(pool_nocb);
-
- pr_info("%s(): Done\n", __func__);
-}
diff --git a/linux/drivers/soc/fsl/qbman/bman_utils.c b/linux/drivers/soc/fsl/qbman/bman_utils.c
deleted file mode 100644
index c6fa0b33..00000000
--- a/linux/drivers/soc/fsl/qbman/bman_utils.c
+++ /dev/null
@@ -1,76 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "dpaa_sys.h"
-
-#include <soc/fsl/bman.h>
-
-/* BMan APIs are front-ends to the common code */
-
-static DECLARE_DPAA_RESOURCE(bpalloc); /* BPID allocator */
-
-/* BPID allocator front-end */
-
-int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
-{
- return dpaa_resource_new(&bpalloc, result, count, align, partial);
-}
-EXPORT_SYMBOL(bman_alloc_bpid_range);
-
-static int bp_cleanup(u32 bpid)
-{
- return bman_shutdown_pool(bpid) == 0;
-}
-void bman_release_bpid_range(u32 bpid, u32 count)
-{
- u32 total_invalid = dpaa_resource_release(&bpalloc,
- bpid, count, bp_cleanup);
-
- if (total_invalid)
- pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
- bpid, bpid + count - 1, count, total_invalid);
-}
-EXPORT_SYMBOL(bman_release_bpid_range);
-
-void bman_seed_bpid_range(u32 bpid, u32 count)
-{
- dpaa_resource_seed(&bpalloc, bpid, count);
-}
-EXPORT_SYMBOL(bman_seed_bpid_range);
-
-int bman_reserve_bpid_range(u32 bpid, u32 count)
-{
- return dpaa_resource_reserve(&bpalloc, bpid, count);
-}
-EXPORT_SYMBOL(bman_reserve_bpid_range);
diff --git a/linux/drivers/soc/fsl/qbman/dpaa_resource.c b/linux/drivers/soc/fsl/qbman/dpaa_resource.c
deleted file mode 100644
index 3f7b74bf..00000000
--- a/linux/drivers/soc/fsl/qbman/dpaa_resource.c
+++ /dev/null
@@ -1,363 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#if defined(CONFIG_FSL_BMAN_PORTAL) || \
- defined(CONFIG_FSL_BMAN_PORTAL_MODULE) || \
- defined(CONFIG_FSL_QMAN_PORTAL) || \
- defined(CONFIG_FSL_QMAN_PORTAL_MODULE)
-#include "dpaa_sys.h"
-
-/* The allocator is a (possibly-empty) list of these */
-struct dpaa_resource_node {
- struct list_head list;
- u32 base;
- u32 num;
- /* refcount and is_alloced are only set
- when the node is in the used list */
- unsigned int refcount;
- int is_alloced;
-};
-
-#ifdef DPAA_RESOURCE_DEBUG
-#define DPRINT pr_info
-static void DUMP(struct dpaa_resource *alloc)
-{
- int off = 0;
- char buf[256];
- struct dpaa_resource_node *p;
-
- pr_info("Free Nodes\n");
- list_for_each_entry(p, &alloc->free, list) {
- if (off < 255)
- off += snprintf(buf + off, 255-off, "{%d,%d}",
- p->base, p->base + p->num - 1);
- }
- pr_info("%s\n", buf);
-
- off = 0;
- pr_info("Used Nodes\n");
- list_for_each_entry(p, &alloc->used, list) {
- if (off < 255)
- off += snprintf(buf + off, 255-off, "{%d,%d}",
- p->base, p->base + p->num - 1);
- }
- pr_info("%s\n", buf);
-}
-#else
-#define DPRINT(x...)
-#define DUMP(a)
-#endif
-
-int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result,
- u32 count, u32 align, int partial)
-{
- struct dpaa_resource_node *i = NULL, *next_best = NULL,
- *used_node = NULL;
- u32 base, next_best_base = 0, num = 0, next_best_num = 0;
- struct dpaa_resource_node *margin_left, *margin_right;
-
- *result = (u32)-1;
- DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
- DUMP(alloc);
- /* If 'align' is 0, it should behave as though it was 1 */
- if (!align)
- align = 1;
- margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
- if (!margin_left)
- goto err;
- margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
- if (!margin_right) {
- kfree(margin_left);
- goto err;
- }
- spin_lock_irq(&alloc->lock);
- list_for_each_entry(i, &alloc->free, list) {
- base = (i->base + align - 1) / align;
- base *= align;
- if ((base - i->base) >= i->num)
- /* alignment is impossible, regardless of count */
- continue;
- num = i->num - (base - i->base);
- if (num >= count) {
- /* this one will do nicely */
- num = count;
- goto done;
- }
- if (num > next_best_num) {
- next_best = i;
- next_best_base = base;
- next_best_num = num;
- }
- }
- if (partial && next_best) {
- i = next_best;
- base = next_best_base;
- num = next_best_num;
- } else
- i = NULL;
-done:
- if (i) {
- if (base != i->base) {
- margin_left->base = i->base;
- margin_left->num = base - i->base;
- list_add_tail(&margin_left->list, &i->list);
- } else
- kfree(margin_left);
- if ((base + num) < (i->base + i->num)) {
- margin_right->base = base + num;
- margin_right->num = (i->base + i->num) -
- (base + num);
- list_add(&margin_right->list, &i->list);
- } else
- kfree(margin_right);
- list_del(&i->list);
- kfree(i);
- *result = base;
- }
- spin_unlock_irq(&alloc->lock);
-err:
- DPRINT("returning %d\n", i ? num : -ENOMEM);
- DUMP(alloc);
- if (!i)
- return -ENOMEM;
-
- /* Add the allocation to the used list with a refcount of 1 */
- used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
- if (!used_node)
- return -ENOMEM;
- used_node->base = *result;
- used_node->num = num;
- used_node->refcount = 1;
- used_node->is_alloced = 1;
- list_add_tail(&used_node->list, &alloc->used);
- return (int)num;
-}
-EXPORT_SYMBOL(dpaa_resource_new);
-
-/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
- * forcing error-handling on to users in the deallocation path. */
-static void _dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id,
- u32 count)
-{
- struct dpaa_resource_node *i,
- *node = kmalloc(sizeof(*node), GFP_ATOMIC);
-
- BUG_ON(!node);
- DPRINT("release_range(%d,%d)\n", base_id, count);
- DUMP(alloc);
- BUG_ON(!count);
- spin_lock_irq(&alloc->lock);
-
- node->base = base_id;
- node->num = count;
- list_for_each_entry(i, &alloc->free, list) {
- if (i->base >= node->base) {
- /* BUG_ON(any overlapping) */
- BUG_ON(i->base < (node->base + node->num));
- list_add_tail(&node->list, &i->list);
- goto done;
- }
- }
- list_add_tail(&node->list, &alloc->free);
-done:
- /* Merge to the left */
- i = list_entry(node->list.prev, struct dpaa_resource_node, list);
- if (node->list.prev != &alloc->free) {
- BUG_ON((i->base + i->num) > node->base);
- if ((i->base + i->num) == node->base) {
- node->base = i->base;
- node->num += i->num;
- list_del(&i->list);
- kfree(i);
- }
- }
- /* Merge to the right */
- i = list_entry(node->list.next, struct dpaa_resource_node, list);
- if (node->list.next != &alloc->free) {
- BUG_ON((node->base + node->num) > i->base);
- if ((node->base + node->num) == i->base) {
- node->num += i->num;
- list_del(&i->list);
- kfree(i);
- }
- }
- spin_unlock_irq(&alloc->lock);
- DUMP(alloc);
-}
-
-static void dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id,
- u32 count)
-{
- struct dpaa_resource_node *i = NULL;
-
- spin_lock_irq(&alloc->lock);
-
- /* First find the node in the used list and decrement its ref count */
- list_for_each_entry(i, &alloc->used, list) {
- if (i->base == base_id && i->num == count) {
- --i->refcount;
- if (i->refcount == 0) {
- list_del(&i->list);
- spin_unlock_irq(&alloc->lock);
- if (i->is_alloced)
- _dpaa_resource_free(alloc, base_id,
- count);
- kfree(i);
- return;
- }
- spin_unlock_irq(&alloc->lock);
- return;
- }
- }
- /* Couldn't find the allocation */
- pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
- base_id, count);
- spin_unlock_irq(&alloc->lock);
-}
-
-/* Same as free but no previous allocation checking is needed */
-void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count)
-{
- _dpaa_resource_free(alloc, base_id, count);
-}
-EXPORT_SYMBOL(dpaa_resource_seed);
-
-/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
- * desired range is not available, or 0 for success
- */
-int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num)
-{
- struct dpaa_resource_node *i = NULL, *used_node;
-
- DPRINT("alloc_reserve(%d,%d)\n", base, num);
- DUMP(alloc);
-
- spin_lock_irq(&alloc->lock);
-
- /* Check for the node in the used list.
- If found, increase it's refcount */
- list_for_each_entry(i, &alloc->used, list) {
- if ((i->base == base) && (i->num == num)) {
- ++i->refcount;
- spin_unlock_irq(&alloc->lock);
- return 0;
- }
- if ((base >= i->base) && (base < (i->base + i->num))) {
- /* This is an attempt to reserve a region that was
- already reserved or alloced with a different
- base or num */
- pr_err("Cannot reserve %d - %d, it overlaps with"
- " existing reservation from %d - %d\n",
- base, base + num - 1, i->base,
- i->base + i->num - 1);
- spin_unlock_irq(&alloc->lock);
- return -1;
- }
- }
- /* Check to make sure this ID isn't in the free list */
- list_for_each_entry(i, &alloc->free, list) {
- if ((base >= i->base) && (base < (i->base + i->num))) {
- /* yep, the reservation is within this node */
- pr_err("Cannot reserve %d - %d, it overlaps with"
- " free range %d - %d and must be alloced\n",
- base, base + num - 1,
- i->base, i->base + i->num - 1);
- spin_unlock_irq(&alloc->lock);
- return -1;
- }
- }
- /* Add the allocation to the used list with a refcount of 1 */
- used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
- if (!used_node) {
- spin_unlock_irq(&alloc->lock);
- return -ENOMEM;
-
- }
- used_node->base = base;
- used_node->num = num;
- used_node->refcount = 1;
- used_node->is_alloced = 0;
- list_add_tail(&used_node->list, &alloc->used);
- spin_unlock_irq(&alloc->lock);
- return 0;
-}
-EXPORT_SYMBOL(dpaa_resource_reserve);
-
-/* This is a sort-of-conditional dpaa_resource_free() routine. Eg. when
- * releasing FQIDs (probably from user-space), it can filter out those
- * that aren't in the OOS state (better to leak a h/w resource than to
- * crash). This function returns the number of invalid IDs that were not
- * released.
-*/
-u32 dpaa_resource_release(struct dpaa_resource *alloc,
- u32 id, u32 count, int (*is_valid)(u32 id))
-{
- int valid_mode = 0;
- u32 loop = id, total_invalid = 0;
-
- while (loop < (id + count)) {
- int isvalid = is_valid ? is_valid(loop) : 1;
-
- if (!valid_mode) {
- /* We're looking for a valid ID to terminate an invalid
- * range */
- if (isvalid) {
- /* We finished a range of invalid IDs, a valid
- * range is now underway */
- valid_mode = 1;
- count -= (loop - id);
- id = loop;
- } else
- total_invalid++;
- } else {
- /* We're looking for an invalid ID to terminate a
- * valid range */
- if (!isvalid) {
- /* Release the range of valid IDs, an unvalid
- * range is now underway */
- if (loop > id)
- dpaa_resource_free(alloc, id,
- loop - id);
- valid_mode = 0;
- }
- }
- loop++;
- }
- /* Release any unterminated range of valid IDs */
- if (valid_mode && count)
- dpaa_resource_free(alloc, id, count);
- return total_invalid;
-}
-EXPORT_SYMBOL(dpaa_resource_release);
-#endif /* CONFIG_FSL_*MAN_PORTAL* */
diff --git a/linux/drivers/soc/fsl/qbman/dpaa_sys.h b/linux/drivers/soc/fsl/qbman/dpaa_sys.h
index 85f87800..0e897026 100644
--- a/linux/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/linux/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -1,4 +1,4 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -31,23 +31,19 @@
#ifndef __DPAA_SYS_H
#define __DPAA_SYS_H
+#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_reserved_mem.h>
#include <linux/kthread.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
+#include <linux/sched/signal.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
-#include <linux/ctype.h>
-#ifdef CONFIG_HOTPLUG_CPU
-#include <linux/cpu.h>
-#endif
-
-#include <asm/pgtable.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
#ifdef __rtems__
#include <asm/cache.h>
#include <asm/mpc85xx.h>
@@ -55,238 +51,91 @@
#include <linux/io.h>
#include <linux/rbtree.h>
#include <bsp/linker-symbols.h>
+
#define DPAA_NOCACHENOLOAD_ALIGNED_REGION(designator, size) \
BSP_NOCACHENOLOAD_SUBSECTION(designator) __aligned(size) \
uint8_t designator[size]
-#endif /* __rtems__ */
-
-struct dpaa_resource {
- struct list_head free;
- spinlock_t lock;
- struct list_head used;
-};
-#define DECLARE_DPAA_RESOURCE(name) \
-struct dpaa_resource name = { \
- .free = { \
- .prev = &name.free, \
- .next = &name.free \
- }, \
- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .used = { \
- .prev = &name.used, \
- .next = &name.used \
- } \
-}
-
-int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result,
- u32 count, u32 align, int partial);
-u32 dpaa_resource_release(struct dpaa_resource *alloc,
- u32 id, u32 count, int (*is_valid)(u32 id));
-void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count);
-int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num);
+#ifdef __PPC_CPU_E6500__
+#define dma_wmb() ppc_light_weight_synchronize()
+#else
+#define dma_wmb() ppc_enforce_in_order_execution_of_io()
+#endif
-/* When copying aligned words or shorts, try to avoid memcpy() */
-#define CONFIG_TRY_BETTER_MEMCPY
+#define prefetch(x) ppc_data_cache_block_touch(x)
+#endif /* __rtems__ */
/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
-#define DPA_PORTAL_CE 0
-#define DPA_PORTAL_CI 1
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
-/* Misc inline assists */
+#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
+#error "Unsupported Cacheline Size"
+#endif
-/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
- * barriers and that dcb*() won't fall victim to compiler or execution
- * reordering with respect to other code/instructions that manipulate the same
- * cacheline. */
-#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
+static inline void dpaa_flush(void *p)
+{
#ifndef __rtems__
-#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
+#ifdef CONFIG_PPC
+ flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#elif defined(CONFIG_ARM32)
+ __cpuc_flush_dcache_area(p, 64);
+#elif defined(CONFIG_ARM64)
+ __flush_dcache_area(p, 64);
+#endif
#else /* __rtems__ */
- #ifdef __PPC_CPU_E6500__
- #define lwsync() ppc_light_weight_synchronize()
- #else
- #define lwsync() ppc_synchronize_data()
- #endif
-#endif /* __rtems__ */
-#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
-#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
-#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
-#define dcbi(p) dcbf(p)
-#ifdef CONFIG_PPC_E500MC
-#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
-#define dcbz_64(p) dcbzl(p)
-#define dcbf_64(p) dcbf(p)
-/* Commonly used combo */
-#define dcbit_ro(p) \
- do { \
- dcbi(p); \
- dcbt_ro(p); \
- } while (0)
+#ifdef __PPC_CPU_E6500__
+ ppc_data_cache_block_flush(p);
#else
-#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
-#define dcbz_64(p) \
- do { \
- dcbz((u32)p + 32); \
- dcbz(p); \
- } while (0)
-#define dcbf_64(p) \
- do { \
- dcbf((u32)p + 32); \
- dcbf(p); \
- } while (0)
-/* Commonly used combo */
-#define dcbit_ro(p) \
- do { \
- dcbi(p); \
- dcbi((u32)p + 32); \
- dcbt_ro(p); \
- dcbt_ro((u32)p + 32); \
- } while (0)
-#endif /* CONFIG_PPC_E500MC */
-
-static inline u64 mfatb(void)
-{
- u32 hi, lo, chk;
-
- do {
- hi = mfspr(SPRN_ATBU);
- lo = mfspr(SPRN_ATBL);
- chk = mfspr(SPRN_ATBU);
- } while (unlikely(hi != chk));
- return ((u64)hi << 32) | (u64)lo;
+#error "Unsupported platform"
+#endif
+#endif /* __rtems__ */
}
-#ifdef CONFIG_FSL_DPA_CHECKING
-#define DPA_ASSERT(x) WARN_ON(!(x))
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#ifndef __rtems__
+#define dpaa_zero(p) memset(p, 0, 64)
+#else /* __rtems__ */
+#ifdef __PPC_CPU_E6500__
+#define dpaa_zero(p) ppc_data_cache_block_clear_to_zero(p)
#else
-#define DPA_ASSERT(x)
+#define dpaa_zero(p) memset(p, 0, 64)
#endif
+#endif /* __rtems__ */
-#ifdef CONFIG_TRY_BETTER_MEMCPY
-static inline void copy_words(void *dest, const void *src, size_t sz)
+static inline void dpaa_touch_ro(void *p)
{
- u32 *__dest = dest;
- const u32 *__src = src;
- size_t __sz = sz >> 2;
-
- BUG_ON((unsigned long)dest & 0x3);
- BUG_ON((unsigned long)src & 0x3);
- BUG_ON(sz & 0x3);
- while (__sz--)
- *(__dest++) = *(__src++);
-}
-#else
-#define copy_words memcpy
+#if (L1_CACHE_BYTES == 32)
+ prefetch(p+32);
#endif
+ prefetch(p);
+}
-/* RB-trees */
-
-/* We encapsulate RB-trees so that its easier to use non-linux forms in
- * non-linux systems. This also encapsulates the extra plumbing that linux code
- * usually provides when using RB-trees. This encapsulation assumes that the
- * data type held by the tree is u32. */
-
-struct dpa_rbtree {
- struct rb_root root;
-};
-#define DPA_RBTREE { .root = RB_ROOT }
-
-static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
{
- tree->root = RB_ROOT;
+ dpaa_invalidate(p);
+ dpaa_touch_ro(p);
}
-#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
-static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
-{ \
- struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
- while (*p) { \
- u32 item; \
- parent = *p; \
- item = rb_entry(parent, type, node_field)->val_field; \
- if (obj->val_field < item) \
- p = &parent->rb_left; \
- else if (obj->val_field > item) \
- p = &parent->rb_right; \
- else \
- return -EBUSY; \
- } \
- rb_link_node(&obj->node_field, parent, p); \
- rb_insert_color(&obj->node_field, &tree->root); \
- return 0; \
-} \
-static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
-{ \
- rb_erase(&obj->node_field, &tree->root); \
-} \
-static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
-{ \
- type *ret; \
- struct rb_node *p = tree->root.rb_node; \
- while (p) { \
- ret = rb_entry(p, type, node_field); \
- if (val < ret->val_field) \
- p = p->rb_left; \
- else if (val > ret->val_field) \
- p = p->rb_right; \
- else \
- return ret; \
- } \
- return NULL; \
-}
-#ifndef __rtems__
-/* Bootargs */
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
-/* QMan has "qportals=" and BMan has "bportals=", they use the same syntax
- * though; a comma-separated list of items, each item being a cpu index and/or a
- * range of cpu indices, and each item optionally be prefixed by "s" to indicate
- * that the portal associated with that cpu should be shared. See bman_driver.c
- * for more specifics. */
-static int __parse_portals_cpu(const char **s, unsigned int *cpu)
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
{
- *cpu = 0;
- if (!isdigit(**s))
- return -EINVAL;
- while (isdigit(**s))
- *cpu = *cpu * 10 + (*((*s)++) - '0');
- return 0;
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
}
-static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
- struct cpumask *want_unshared,
- const char *argname)
-{
- const char *s = str;
- unsigned int shared, cpu1, cpu2, loop;
-keep_going:
- if (*s == 's') {
- shared = 1;
- s++;
- } else
- shared = 0;
- if (__parse_portals_cpu(&s, &cpu1))
- goto err;
- if (*s == '-') {
- s++;
- if (__parse_portals_cpu(&s, &cpu2))
- goto err;
- if (cpu2 < cpu1)
- goto err;
- } else
- cpu2 = cpu1;
- for (loop = cpu1; loop <= cpu2; loop++)
- cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
- if (*s == ',') {
- s++;
- goto keep_going;
- } else if ((*s == '\0') || isspace(*s))
- return 0;
-err:
- pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
- (unsigned long)s - (unsigned long)str);
- return -EINVAL;
-}
-#endif /* __rtems__ */
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF 0x80000000
+
#endif /* __DPAA_SYS_H */
diff --git a/linux/drivers/soc/fsl/qbman/qman.c b/linux/drivers/soc/fsl/qbman/qman.c
index aa188888..244cf164 100644
--- a/linux/drivers/soc/fsl/qbman/qman.c
+++ b/linux/drivers/soc/fsl/qbman/qman.c
@@ -2,7 +2,7 @@
#include <rtems/bsd/local/opt_dpaa.h>
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -33,1074 +33,2875 @@
*/
#include "qman_priv.h"
+#ifdef __rtems__
+#undef dev_crit
+#undef dev_dbg
+#undef dev_err
+#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__)
+#define dev_dbg dev_crit
+#define dev_err dev_crit
+#endif /* __rtems__ */
+
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x0000
+#define QM_REG_EQCR_CI_CINH 0x0004
+#define QM_REG_EQCR_ITR 0x0008
+#define QM_REG_DQRR_PI_CINH 0x0040
+#define QM_REG_DQRR_CI_CINH 0x0044
+#define QM_REG_DQRR_ITR 0x0048
+#define QM_REG_DQRR_DCAP 0x0050
+#define QM_REG_DQRR_SDQCR 0x0054
+#define QM_REG_DQRR_VDQCR 0x0058
+#define QM_REG_DQRR_PDQCR 0x005c
+#define QM_REG_MR_PI_CINH 0x0080
+#define QM_REG_MR_CI_CINH 0x0084
+#define QM_REG_MR_ITR 0x0088
+#define QM_REG_CFG 0x0100
+#define QM_REG_ISR 0x0e00
+#define QM_REG_IER 0x0e04
+#define QM_REG_ISDR 0x0e08
+#define QM_REG_IIR 0x0e0c
+#define QM_REG_ITPR 0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3100
+#define QM_CL_DQRR_PI_CENA 0x3200
+#define QM_CL_DQRR_CI_CENA 0x3300
+#define QM_CL_MR_PI_CENA 0x3400
+#define QM_CL_MR_CI_CENA 0x3500
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
-#include <asm/cacheflush.h>
-
-/* Last updated for v00.800 of the BG */
-
-/* Register offsets */
-#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
-#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
-#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
-#define REG_DD_CFG 0x0200
-#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
-#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
-#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
-#define REG_PFDR_FPC 0x0400
-#define REG_PFDR_FP_HEAD 0x0404
-#define REG_PFDR_FP_TAIL 0x0408
-#define REG_PFDR_FP_LWIT 0x0410
-#define REG_PFDR_CFG 0x0414
-#define REG_SFDR_CFG 0x0500
-#define REG_SFDR_IN_USE 0x0504
-#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
-#define REG_WQ_DEF_ENC_WQID 0x0630
-#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
-#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
-#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
-#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
-#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
-#define REG_CM_CFG 0x0800
-#define REG_ECSR 0x0a00
-#define REG_ECIR 0x0a04
-#define REG_EADR 0x0a08
-#define REG_ECIR2 0x0a0c
-#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
-#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
-#define REG_MCR 0x0b00
-#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
-#define REG_MISC_CFG 0x0be0
-#define REG_HID_CFG 0x0bf0
-#define REG_IDLE_STAT 0x0bf4
-#define REG_IP_REV_1 0x0bf8
-#define REG_IP_REV_2 0x0bfc
-#define REG_FQD_BARE 0x0c00
-#define REG_PFDR_BARE 0x0c20
-#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
-#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
-#define REG_QCSP_BARE 0x0c80
-#define REG_QCSP_BAR 0x0c84
-#define REG_CI_SCHED_CFG 0x0d00
-#define REG_SRCIDR 0x0d04
-#define REG_LIODNR 0x0d08
-#define REG_CI_RLM_AVG 0x0d14
-#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */
-#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
-#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
-#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
-
-/* Assists for QMAN_MCR */
-#define MCR_INIT_PFDR 0x01000000
-#define MCR_get_rslt(v) (u8)((v) >> 24)
-#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0))
-#define MCR_rslt_ok(r) (rslt == 0xf0)
-#define MCR_rslt_eaccess(r) (rslt == 0xf8)
-#define MCR_rslt_inval(r) (rslt == 0xff)
-
-/* Corenet initiator settings. Stash request queues are 4-deep to match cores
- ability to snarf. Stash priority is 3, other priorities are 2. */
-#define FSL_QMAN_CI_SCHED_CFG_SRCCIV 4
-#define FSL_QMAN_CI_SCHED_CFG_SRQ_W 3
-#define FSL_QMAN_CI_SCHED_CFG_RW_W 2
-#define FSL_QMAN_CI_SCHED_CFG_BMAN_W 2
-
-struct qman;
-
-/* Follows WQ_CS_CFG0-5 */
-enum qm_wq_class {
- qm_wq_portal = 0,
- qm_wq_pool = 1,
- qm_wq_fman0 = 2,
- qm_wq_fman1 = 3,
- qm_wq_caam = 4,
- qm_wq_pme = 5,
- qm_wq_first = qm_wq_portal,
- qm_wq_last = qm_wq_pme
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
};
-/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
-enum qm_memory {
- qm_memory_fqd,
- qm_memory_pfdr
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 dca;
+ __be16 seqnum;
+ u8 __reserved[4];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
+ struct qm_fd fd;
+ u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
};
-/* Used by all error interrupt registers except 'inhibit' */
-#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
-#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
-#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
-#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
-#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
-#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
-#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
-#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
-#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
-#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
-#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
-#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
-#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
-#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
-#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
-#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
-#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
-#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
-
-/* QMAN_ECIR valid error bit */
-#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
- QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
- QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
-#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
- QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
- QM_EIRQ_IFSI)
-
-union qman_ecir {
- u32 ecir_raw;
- struct {
- u32 __reserved:2;
- u32 portal_type:1;
- u32 portal_num:5;
- u32 fqid:24;
- } __packed info;
+struct qm_dqrr {
+ const struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
};
-union qman_ecir2 {
- u32 ecir2_raw;
- struct {
- u32 portal_type:1;
- u32 __reserved:21;
- u32 portal_num:10;
- } __packed info;
+struct qm_mr {
+ union qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
};
-union qman_eadr {
- u32 eadr_raw;
- struct {
- u32 __reserved1:4;
- u32 memid:4;
- u32 __reserved2:12;
- u32 eadr:12;
- } __packed info;
- struct {
- u32 __reserved1:3;
- u32 memid:5;
- u32 __reserved:8;
- u32 eadr:16;
- } __packed info_rev3;
+/* MC (Management Command) command */
+/* "FQ" command layout */
+struct qm_mcc_fq {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ __be32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+
+/* "CGR" command layout */
+struct qm_mcc_cgr {
+ u8 _ncw_verb;
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
};
-struct qman_hwerr_txt {
- u32 mask;
- const char *txt;
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+union qm_mc_command {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 __reserved[63];
+ };
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_fq fq;
+ struct qm_mcc_cgr cgr;
};
-#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b }
-
-static const struct qman_hwerr_txt qman_hwerr_txts[] = {
- QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"),
- QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"),
- QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"),
- QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"),
- QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
- QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
- QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"),
- QMAN_HWE_TXT(ICVI, "Invalid Command Verb"),
- QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"),
- QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"),
- QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"),
- QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"),
- QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"),
- QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"),
- QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"),
- QMAN_HWE_TXT(IESI, "Invalid Enqueue State"),
- QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"),
- QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue")
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+ u8 verb;
+ u8 result;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT 10000 /* us */
+union qm_mc_result {
+ struct {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[62];
+ };
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ struct qm_mcr_queryfq_np queryfq_np;
};
-#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt))
-struct qman_error_info_mdata {
- u16 addr_mask;
- u16 bits;
- const char *txt;
+struct qm_mc {
+ union qm_mc_command *cr;
+ union qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
};
-#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
-static const struct qman_error_info_mdata error_mdata[] = {
- QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"),
- QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"),
- QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"),
- QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"),
- QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"),
- QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"),
- QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"),
- QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"),
- QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"),
- QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"),
+struct qm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
};
-#define QMAN_ERR_MDATA_COUNT \
- (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata))
-/* Add this in Kconfig */
-#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+struct qm_portal {
+ /*
+ * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+ * and including 'mc' fits within a cacheline (yay!). The 'config' part
+ * is setup-only, so isn't a cause for a concern. In other words, don't
+ * rearrange this structure on a whim, there be dragons ...
+ */
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
+}
-/**
- * qm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
- * @v: for accessors that write values, this is the 32-bit value
- *
- * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All
- * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of
- * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means
- * "write the enable register" rather than "enable the write register"!
- */
-#define qm_err_isr_status_read(qm) \
- __qm_err_isr_read(qm, qm_isr_status)
-#define qm_err_isr_status_clear(qm, m) \
- __qm_err_isr_write(qm, qm_isr_status, m)
-#define qm_err_isr_enable_read(qm) \
- __qm_err_isr_read(qm, qm_isr_enable)
-#define qm_err_isr_enable_write(qm, v) \
- __qm_err_isr_write(qm, qm_isr_enable, v)
-#define qm_err_isr_disable_read(qm) \
- __qm_err_isr_read(qm, qm_isr_disable)
-#define qm_err_isr_disable_write(qm, v) \
- __qm_err_isr_write(qm, qm_isr_disable, v)
-#define qm_err_isr_inhibit(qm) \
- __qm_err_isr_write(qm, qm_isr_inhibit, 1)
-#define qm_err_isr_uninhibit(qm) \
- __qm_err_isr_write(qm, qm_isr_inhibit, 0)
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
+}
-/*
- * TODO: unimplemented registers
- *
- * Keeping a list here of QMan registers I have not yet covered;
- * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
- * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
- * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
- */
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
-/* Encapsulate "struct qman *" as a cast of the register space address. */
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
-static struct qman *qm_create(void *regs)
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{
- return (struct qman *)regs;
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
}
-static inline u32 __qm_in(struct qman *qm, u32 offset)
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
{
- return ioread32be((void *)qm + offset);
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~EQCR_CARRY;
+
+ return (struct qm_eqcr_entry *)addr;
}
-static inline void __qm_out(struct qman *qm, u32 offset, u32 val)
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
{
- iowrite32be(val, (void *)qm + offset);
+ return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
}
-#define qm_in(reg) __qm_in(qm, REG_##reg)
-#define qm_out(reg, val) __qm_out(qm, REG_##reg, val)
-static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n)
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
{
- return __qm_in(qm, REG_ERR_ISR + (n << 2));
+ /* increment to the next EQCR pointer and handle overflow and 'vbit' */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = eqcr_carryclear(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
}
-static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val)
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
{
- __qm_out(qm, REG_ERR_ISR + (n << 2), val);
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
}
-static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal,
- int ed, u8 sernd)
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
{
- DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) ||
- (portal == qm_dc_portal_fman1));
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff));
- else
- qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f));
+ return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
}
-static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class,
- u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5,
- u8 csw6, u8 csw7)
+static inline void qm_eqcr_finish(struct qm_portal *portal)
{
- qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
- ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
- ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
- ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (pi != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("losing uncommitted EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
}
-static void qm_set_hid(struct qman *qm)
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
{
- qm_out(HID_CFG, 0);
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
}
-static void qm_set_corenet_initiator(struct qman *qm)
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
{
- qm_out(CI_SCHED_CFG,
- 0x80000000 | /* write srcciv enable */
- (FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) |
- (FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) |
- (FSL_QMAN_CI_SCHED_CFG_RW_W << 4) |
- FSL_QMAN_CI_SCHED_CFG_BMAN_W);
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+ (QM_EQCR_SIZE - 1);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
}
-static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor)
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
{
- u32 v = qm_in(IP_REV_1);
- *id = (v >> 16);
- *major = (v >> 8) & 0xff;
- *minor = v & 0xff;
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
+ DPAA_ASSERT(eqcr->available >= 1);
}
-static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
- int enable, int prio, int stash, u32 size)
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
{
- u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
- u32 exp = ilog2(size);
- /* choke if size isn't within range */
- DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
- is_power_of_2(size));
- /* choke if 'ba' has lower-alignment than 'size' */
- DPA_ASSERT(!(ba & (size - 1)));
- __qm_out(qm, offset, upper_32_bits(ba));
- __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba));
- __qm_out(qm, offset + REG_offset_AR,
- (enable ? 0x80000000 : 0) |
- (prio ? 0x40000000 : 0) |
- (stash ? 0x20000000 : 0) |
- (exp - 1));
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+ eqcr_commit_checks(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+ dma_wmb();
+ eqcursor = eqcr->cursor;
+ eqcursor->_ncw_verb = myverb | eqcr->vbit;
+ dpaa_flush(eqcursor);
+ eqcr_inc(eqcr);
+ eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+#endif
}
-static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k)
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
{
- qm_out(PFDR_FP_LWIT, th & 0xffffff);
- qm_out(PFDR_CFG, k);
+ qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
}
-static void qm_set_sfdr_threshold(struct qman *qm, u16 th)
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
{
- qm_out(SFDR_CFG, th & 0x3ff);
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
}
-static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num)
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
{
- u8 rslt = MCR_get_rslt(qm_in(MCR));
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
- DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
- /* Make sure the command interface is 'idle' */
- if (!MCR_rslt_idle(rslt))
- panic("QMAN_MCR isn't idle");
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
- /* Write the MCR command params then the verb */
- qm_out(MCP(0), pfdr_start);
- /* TODO: remove this - it's a workaround for a model bug that is
- * corrected in more recent versions. We use the workaround until
- * everyone has upgraded. */
- qm_out(MCP(1), (pfdr_start + num - 16));
- lwsync();
- qm_out(MCR, MCR_INIT_PFDR);
- /* Poll for the result */
- do {
- rslt = MCR_get_rslt(qm_in(MCR));
- } while (!MCR_rslt_idle(rslt));
- if (MCR_rslt_ok(rslt))
- return 0;
- if (MCR_rslt_eaccess(rslt))
- return -EACCES;
- if (MCR_rslt_inval(rslt))
- return -EINVAL;
- pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
- return -ENODEV;
+ return eqcr->available;
}
-/*****************/
-/* Config driver */
-/*****************/
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
-/* We support only one of these */
-static struct qman *qm;
-#ifndef __rtems__
-static struct device_node *qm_node;
-#endif /* __rtems__ */
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
-/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used
- * during qman_init_ccsr(). */
-#ifndef __rtems__
-static dma_addr_t fqd_a, pfdr_a;
-static size_t fqd_sz, pfdr_sz;
+/* --- DQRR API --- */
-static int qman_fqd(struct reserved_mem *rmem)
+#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+ const struct qm_dqrr_entry *p)
{
- fqd_a = rmem->base;
- fqd_sz = rmem->size;
+ uintptr_t addr = (uintptr_t)p;
- WARN_ON(!(fqd_a && fqd_sz));
+ addr &= ~DQRR_CARRY;
- return 0;
+ return (const struct qm_dqrr_entry *)addr;
}
-RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
-#else /* __rtems__ */
-static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fqd, 4194304);
-#define fqd_a ((uintptr_t)&fqd[0])
-#define fqd_sz sizeof(fqd)
-static DPAA_NOCACHENOLOAD_ALIGNED_REGION(pfdr, 33554432);
-#define pfdr_a ((uintptr_t)&pfdr[0])
-#define pfdr_sz sizeof(pfdr)
-#endif /* __rtems__ */
-size_t qman_fqd_size(void)
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
{
- return fqd_sz;
+ return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
}
-#ifndef __rtems__
-static int qman_pfdr(struct reserved_mem *rmem)
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
{
- pfdr_a = rmem->base;
- pfdr_sz = rmem->size;
+ return dqrr_carryclear(e + 1);
+}
- WARN_ON(!(pfdr_a && pfdr_sz));
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(portal, QM_REG_CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
return 0;
}
-RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr);
-/* Parse the <name> property to extract the memory location and size and
- * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default
- * size. Also flush this memory range from data cache so that QMAN originated
- * transactions for this memory region could be marked non-coherent.
- */
-static __init int parse_mem_property(struct device_node *node,
- dma_addr_t *addr, size_t *sz, int zero)
+static inline void qm_dqrr_finish(struct qm_portal *portal)
{
- int ret;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (dqrr->cmode != qm_dqrr_cdc &&
+ dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = dqrr_inc(dqrr->cursor);
+ return --dqrr->fill;
+}
- /* If using a "zero-pma", don't try to zero it, even if you asked */
- if (zero && of_find_property(node, "zero-pma", &ret)) {
- pr_info(" it's a 'zero-pma', not zeroing from s/w\n");
- zero = 0;
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+ /*
+ * If PAMU is not available we need to invalidate the cache.
+ * When PAMU is available the cache is updated by stash
+ */
+ dpaa_invalidate_touch_ro(res);
+#endif
+ /*
+ * when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
}
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+ int idx = dqrr_ptr2idx(dq);
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPAA_ASSERT((dqrr->ring + idx) == dq);
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~MR_CARRY;
+
+ return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+ return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+ ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != mr_ptr2idx(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
- if (zero) {
- /* map as cacheable, non-guarded */
- void __iomem *tmpp = ioremap_prot(*addr, *sz, 0);
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
- memset_io(tmpp, 0, *sz);
- flush_dcache_range((unsigned long)tmpp,
- (unsigned long)tmpp + *sz);
- iounmap(tmpp);
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = mr_inc(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+ union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+ /*
+ * when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = mr_inc(res);
}
+ dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = mr_ptr2idx(mr->cursor);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
+ ? 0 : 1;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
return 0;
}
-#else /* __rtems__ */
-#include <bsp/qoriq.h>
-#endif /* __rtems__ */
-/* TODO:
- * - there is obviously no handling of errors,
- * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for
- * both memory resources to zero.
- */
-static int __init fsl_qman_init(struct device_node *node)
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+ union qm_mc_result **mcr)
+{
+ int timeout = QM_MCR_TIMEOUT;
+
+ do {
+ *mcr = qm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
{
+ clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ /* probing time config params for cpu-affine portals */
+ const struct qm_portal_config *config;
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ struct work_struct congestion_work;
+ struct work_struct mr_work;
+ char irqname[MAX_IRQNAME];
+};
+
#ifndef __rtems__
- struct resource res;
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
#endif /* __rtems__ */
- u32 __iomem *regs;
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+struct qman_portal *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(qman_affine_portal);
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_wq_alloc(void)
+{
+ qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+ if (!qm_portal_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+ num_fqids = _num_fqids;
+
+ fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
+ if (!fq_table)
+ return -ENOMEM;
+
+ pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+ fq_table, num_fqids * 2);
+ return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+ struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (WARN_ON(idx >= num_fqids * 2))
+ return NULL;
+#endif
+ fq = fq_table[idx];
+ DPAA_ASSERT(!fq || idx == fq->idx);
+
+ return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+ return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+ return idx_to_fq(tag);
+#else
+ return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+ return fq->idx;
+#else
+ return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+
+ u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
+ u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, QMAN_POLL_LIMIT);
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is);
+ qm_out(&p->p, QM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const union qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ u64 now, then = jiffies;
+
+ do {
+ now = jiffies;
+ } while ((then + 10000) > now);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+ /* PAMU is required for stashing */
+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+ portal->use_eqcr_ci_stashing = 0;
+#endif
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ dev_err(c->dev, "EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ dev_err(c->dev, "DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ dev_err(c->dev, "MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+ qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+ qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ INIT_WORK(&portal->congestion_work, qm_congestion_task);
+ INIT_WORK(&portal->mr_work, qm_mr_process_task);
+ portal->bits = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ isdr = 0xffffffff;
+ qm_out(p, QM_REG_ISDR, isdr);
+ portal->irq_sources = 0;
+ qm_out(p, QM_REG_IER, 0);
+ qm_out(p, QM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
#ifndef __rtems__
- const char *s;
- int ret, standby = 0;
+ if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+ irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+ dev_err(c->dev, "irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
#endif /* __rtems__ */
- u16 id;
- u8 major, minor;
-#ifndef __rtems__
- ret = of_address_to_resource(node, 0, &res);
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_out(p, QM_REG_ISDR, isdr);
+ ret = qm_eqcr_get_fill(p);
if (ret) {
- pr_err("Can't get %s property 'reg'\n", node->full_name);
- return ret;
+ dev_err(c->dev, "EQCR unclean\n");
+ goto fail_eqcr_empty;
}
- s = of_get_property(node, "fsl,hv-claimable", &ret);
- if (s && !strcmp(s, "standby"))
- standby = 1;
- if (!standby) {
- ret = parse_mem_property(node, &fqd_a, &fqd_sz, 1);
- BUG_ON(ret);
- ret = parse_mem_property(node, &pfdr_a, &pfdr_sz, 0);
- BUG_ON(ret);
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_out(p, QM_REG_ISDR, isdr);
+ if (qm_dqrr_current(p)) {
+ dev_err(c->dev, "DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
}
-#else /* __rtems__ */
- memset((void *)fqd_a, 0, fqd_sz);
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ const union qm_mr_entry *e = qm_mr_current(p);
+
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
+ e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_out(p, QM_REG_ISDR, 0);
+ qm_out(p, QM_REG_IIR, 0);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+#ifndef __rtems__
+fail_affinity:
#endif /* __rtems__ */
- /* Global configuration */
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->cgrs);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *portal;
+ int err;
+
+ portal = &per_cpu(qman_affine_portal, c->cpu);
+ err = qman_create_portal(portal, c, cgrs);
+ if (err)
+ return NULL;
+
#ifndef __rtems__
- regs = ioremap(res.start, res.end - res.start + 1);
-#else /* __rtems__ */
- regs = (u32 __iomem *)&qoriq.qman;
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ affine_channels[c->cpu] = c->channel;
#endif /* __rtems__ */
- qm = qm_create(regs);
+ affine_portals[c->cpu] = portal;
#ifndef __rtems__
- qm_node = node;
+ spin_unlock(&affine_mask_lock);
#endif /* __rtems__ */
- qm_get_version(qm, &id, &major, &minor);
- pr_info("Ver: %04x,%02x,%02x\n", id, major, minor);
- if (!qman_ip_rev) {
- if ((major == 1) && (minor == 0)) {
- pr_err("Rev1.0 on P4080 rev1 is not supported!\n");
+
+ return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+ struct qman_portal *qm = get_affine_portal();
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
#ifndef __rtems__
- iounmap(regs);
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+#else /* __rtems__ */
+ (void)cpu;
#endif /* __rtems__ */
- return -ENODEV;
- } else if ((major == 1) && (minor == 1))
- qman_ip_rev = QMAN_REV11;
- else if ((major == 1) && (minor == 2))
- qman_ip_rev = QMAN_REV12;
- else if ((major == 2) && (minor == 0))
- qman_ip_rev = QMAN_REV20;
- else if ((major == 3) && (minor == 0))
- qman_ip_rev = QMAN_REV30;
- else if ((major == 3) && (minor == 1))
- qman_ip_rev = QMAN_REV31;
- else {
- pr_warn("Unknown version, default to rev1.1\n");
- qman_ip_rev = QMAN_REV11;
- }
+ put_affine_portal();
+ return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg, u8 verb)
+{
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+ fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
}
+}
-#ifndef __rtems__
- if (standby) {
- pr_info(" -> in standby mode\n");
- return 0;
+static void qm_congestion_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ congestion_work);
+ struct qman_cgrs rr, c;
+ union qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ spin_unlock(&p->cgr_lock);
+ dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ return;
}
-#endif /* __rtems__ */
- return 0;
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+ &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
}
-int qman_have_ccsr(void)
+static void qm_mr_process_task(struct work_struct *work)
{
- return qm ? 1 : 0;
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ mr_work);
+ const union qm_mr_entry *msg;
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+
+ preempt_disable();
+
+ while (1) {
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ break;
+
+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is clear */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = fqid_to_fq(qm_fqid_get(&msg->fq));
+ if (WARN_ON(!fq))
+ break;
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+ fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ pr_crit_once("Leaking DCP ERNs!\n");
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+ fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
+ fq->cb.ern(p, fq, msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ }
+
+ qm_mr_cci_consume(&p->p, num);
+ preempt_enable();
}
-#ifndef __rtems__
-__init void qman_init_early(void)
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
{
- struct device_node *dn;
- int ret;
+ if (is & QM_PIRQ_CSCI) {
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->congestion_work);
+ }
- for_each_compatible_node(dn, NULL, "fsl,qman") {
- if (qm)
- pr_err("%s: only one 'fsl,qman' allowed\n",
- dn->full_name);
- else {
- if (!of_device_is_available(dn))
- continue;
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
- ret = fsl_qman_init(dn);
- BUG_ON(ret);
- }
+ if (is & QM_PIRQ_MRI) {
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->mr_work);
}
+
+ return is;
}
-#else /* __rtems__ */
-#include <bsp/fdt.h>
-static void
-qman_sysinit(void)
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
{
- const char *fdt = bsp_fdt_get();
- struct device_node dn;
- const char *name;
- int node;
- int ret;
+ p->vdqcr_owned = NULL;
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
- memset(&dn, 0, sizeof(dn));
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust context_b as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: context_b points to the FQ */
+ fq = tag_to_fq(be32_to_cpu(dq->context_b));
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
- name = "fsl,qman";
- node = fdt_node_offset_by_compatible(fdt, 0, name);
- if (node < 0)
- panic("qman: no qman in FDT");
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
- dn.full_name = name;
- dn.offset = node;
+ local_irq_save(irqflags);
+ set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
- ret = fsl_qman_init(&dn);
- if (ret != 0)
- panic("qman: init 1 failed");
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+ u32 ier;
+
+ /*
+ * Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+ local_irq_save(irqflags);
+ bits &= QM_PIRQ_VISIBLE;
+ clear_bits(bits, &p->irq_sources);
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ ier = qm_in(&p->p, QM_REG_IER);
+ /*
+ * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_out(&p->p, QM_REG_ISR, ~ier);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
- ret = qman_init_ccsr(&dn);
- if (ret != 0)
- panic("qman: init CCSR failed");
+#ifndef __rtems__
+const cpumask_t *qman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
- ret = qman_init(&dn);
- if (ret != 0)
- panic("qman: init 2 failed");
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
- ret = qman_resource_init();
- if (ret != 0)
- panic("qman: resource init failed");
+ cpu = portal->config->cpu;
+ put_affine_portal();
+ }
+ WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+ return affine_channels[cpu];
}
-SYSINIT(qman_sysinit, SI_SUB_CPU, SI_ORDER_SECOND, qman_sysinit, NULL);
+EXPORT_SYMBOL(qman_affine_channel);
#endif /* __rtems__ */
-static void log_edata_bits(u32 bit_count)
+struct qman_portal *qman_get_affine_portal(int cpu)
+{
+ return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
- u32 i, j, mask = 0xffffffff;
+ return __poll_portal_fast(p, limit);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
- pr_warn("ErrInt, EDATA:\n");
- i = bit_count/32;
- if (bit_count%32) {
- i++;
- mask = ~(mask << bit_count%32);
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
}
- j = 16-i;
- pr_warn(" 0x%08x\n", qm_in(EDATA(j)) & mask);
- j++;
- for (; j < 16; j++)
- pr_warn(" 0x%08x\n", qm_in(EDATA(j)));
+ return "<unknown MCR result>";
}
-static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
{
- union qman_ecir ecir_val;
- union qman_eadr eadr_val;
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ fq->fqid = fqid;
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+
+ /* A context_b of 0 is allegedly special, so don't use that fqid */
+ if (fqid == 0 || fqid >= num_fqids) {
+ WARN(1, "bad fqid %d\n", fqid);
+ return -EINVAL;
+ }
- ecir_val.ecir_raw = qm_in(ECIR);
- /* Is portal info valid */
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
- union qman_ecir2 ecir2_val;
+ fq->idx = fqid * 2;
+ if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+ fq->idx++;
- ecir2_val.ecir2_raw = qm_in(ECIR2);
- if (ecsr_val & PORTAL_ECSR_ERR) {
- pr_warn("ErrInt: %s id %d\n",
- ecir2_val.info.portal_type ? "DCP" : "SWP",
- ecir2_val.info.portal_num);
- }
- if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
- pr_warn("ErrInt: ecir.fqid 0x%x\n", ecir_val.info.fqid);
-
- if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
- eadr_val.eadr_raw = qm_in(EADR);
- pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
- error_mdata[eadr_val.info_rev3.memid].txt,
- error_mdata[eadr_val.info_rev3.memid].addr_mask
- & eadr_val.info_rev3.eadr);
- log_edata_bits(
- error_mdata[eadr_val.info_rev3.memid].bits);
- }
- } else {
- if (ecsr_val & PORTAL_ECSR_ERR) {
- pr_warn("ErrInt: %s id %d\n",
- ecir_val.info.portal_type ? "DCP" : "SWP",
- ecir_val.info.portal_num);
- }
- if (ecsr_val & FQID_ECSR_ERR)
- pr_warn("ErrInt: ecir.fqid 0x%x\n", ecir_val.info.fqid);
-
- if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
- eadr_val.eadr_raw = qm_in(EADR);
- pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
- error_mdata[eadr_val.info.memid].txt,
- error_mdata[eadr_val.info.memid].addr_mask
- & eadr_val.info.eadr);
- log_edata_bits(error_mdata[eadr_val.info.memid].bits);
- }
+ WARN_ON(fq_table[fq->idx]);
+ fq_table[fq->idx] = fq;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+
+ DPAA_ASSERT(fq_table[fq->idx]);
+ fq_table[fq->idx] = NULL;
+ return;
+ default:
+ break;
}
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
}
+EXPORT_SYMBOL(qman_destroy_fq);
-/* QMan interrupt handler */
-static irqreturn_t qman_isr(int irq, void *ptr)
+u32 qman_fq_fqid(struct qman_fq *fq)
{
- u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
- ier_val = qm_err_isr_enable_read(qm);
- isr_val = qm_err_isr_status_read(qm);
- ecsr_val = qm_in(ECSR);
- isr_mask = isr_val & ier_val;
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ u8 res, myverb;
+ int ret = 0;
- if (!isr_mask)
- return IRQ_NONE;
- for (i = 0; i < QMAN_HWE_COUNT; i++) {
- if (qman_hwerr_txts[i].mask & isr_mask) {
- pr_warn("ErrInt: %s\n", qman_hwerr_txts[i].txt);
- if (qman_hwerr_txts[i].mask & ecsr_val) {
- log_additional_error_info(isr_mask, ecsr_val);
- /* Re-arm error capture registers */
- qm_out(ECSR, ecsr_val);
- }
- if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) {
- pr_devel("Un-enabling error 0x%x\n",
- qman_hwerr_txts[i].mask);
- ier_val &= ~qman_hwerr_txts[i].mask;
- qm_err_isr_enable_write(qm, ier_val);
+ myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+ ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+#ifndef __rtems__
+ struct qman_portal *p = qman_dma_portal;
+
+ phys_fq = dma_map_single(p->config->dev, fq,
+ sizeof(*fq), DMA_TO_DEVICE);
+ if (dma_mapping_error(p->config->dev, phys_fq)) {
+ dev_err(p->config->dev, "dma_mapping failed\n");
+ ret = -EIO;
+ goto out;
}
+#else /* __rtems__ */
+ phys_fq = (dma_addr_t)fq;
+#endif /* __rtems__ */
+
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
}
}
- qm_err_isr_status_clear(qm, isr_val);
- return IRQ_HANDLED;
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ int wq = 0;
+
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_DESTWQ);
+ wq = 4;
+ }
+ qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+ }
+ qm_mc_commit(&p->p, myverb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "MCR timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ if (opts) {
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+ if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+ put_affine_portal();
+ return ret;
}
+EXPORT_SYMBOL(qman_init_fq);
-static int __bind_irq(struct device_node *node)
+int qman_schedule_fq(struct qman_fq *fq)
{
- int ret, err_irq;
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
- err_irq = of_irq_to_resource(node, 0, NULL);
- if (err_irq == NO_IRQ) {
- pr_info("Can't get %s property 'interrupts'\n",
- node->full_name);
- return -ENODEV;
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state != qman_fq_state_parked) {
+ ret = -EBUSY;
+ goto out;
}
- ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", node);
- if (ret) {
- pr_err("request_irq() failed %d for '%s'\n",
- ret, node->full_name);
- return -ENODEV;
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
}
- /* Write-to-clear any stale bits, (eg. starvation being asserted prior
- * to resource allocation during driver init). */
- qm_err_isr_status_clear(qm, 0xffffffff);
- /* Enable Error Interrupts */
- qm_err_isr_enable_write(qm, 0xffffffff);
- return 0;
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ put_affine_portal();
+ return ret;
}
+EXPORT_SYMBOL(qman_schedule_fq);
-int qman_init_ccsr(struct device_node *node)
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
int ret;
+ u8 res;
-#ifndef __rtems__
- if (!qman_have_ccsr())
- return 0;
- if (node != qm_node)
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_sched)
return -EINVAL;
-#endif /* __rtems__ */
- /* FQD memory */
- qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz);
- /* PFDR memory */
- qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz);
- qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
- /* thresholds */
- qm_set_pfdr_threshold(qm, 512, 64);
- qm_set_sfdr_threshold(qm, 128);
- /* clear stale PEBI bit from interrupt status register */
- qm_err_isr_status_clear(qm, QM_EIRQ_PEBI);
- /* corenet initiator settings */
- qm_set_corenet_initiator(qm);
- /* HID settings */
- qm_set_hid(qm);
- /* Set scheduling weights to defaults */
- for (ret = qm_wq_first; ret <= qm_wq_last; ret++)
- qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0);
- /* We are not prepared to accept ERNs for hardware enqueues */
- qm_set_dc(qm, qm_dc_portal_fman0, 1, 0);
- qm_set_dc(qm, qm_dc_portal_fman1, 1, 0);
- /* Initialise Error Interrupt Handler */
- ret = __bind_irq(node);
- if (ret)
- return ret;
- return 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state == qman_fq_state_retired ||
+ fq->state == qman_fq_state_oos) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (res == QM_MCR_RESULT_OK) {
+ ret = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ union qm_mr_entry msg;
+
+ msg.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ qm_fqid_set(&msg.fq, fq->fqid);
+ msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ ret = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
}
+EXPORT_SYMBOL(qman_retire_fq);
-#define LIO_CFG_LIODN_MASK 0x0fff0000
-void qman_liodn_fixup(u16 channel)
+int qman_oos_fq(struct qman_fq *fq)
{
- static int done;
- static u32 liodn_offset;
- u32 before, after;
- int idx = channel - QM_CHANNEL_SWPORTAL0;
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
- if (!qman_have_ccsr())
- return;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- before = qm_in(REV3_QCSP_LIO_CFG(idx));
- else
- before = qm_in(QCSP_LIO_CFG(idx));
- if (!done) {
- liodn_offset = before & LIO_CFG_LIODN_MASK;
- done = 1;
- return;
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+ fq->state != qman_fq_state_retired) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
}
- after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- qm_out(REV3_QCSP_LIO_CFG(idx), after);
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
else
- qm_out(QCSP_LIO_CFG(idx), after);
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
}
-#define IO_CFG_SDEST_MASK 0x00ff0000
-int qman_set_sdest(u16 channel, unsigned int cpu_idx)
+static int qman_query_fq_np(struct qman_fq *fq,
+ struct qm_mcr_queryfq_np *np)
{
- int idx = channel - QM_CHANNEL_SWPORTAL0;
- u32 before, after;
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
- if (!qman_have_ccsr())
- return -ENODEV;
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *np = mcr->queryfq_np;
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ ret = -ERANGE;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
- before = qm_in(REV3_QCSP_IO_CFG(idx));
- /* Each pair of vcpu share the same SRQ(SDEST) */
- cpu_idx /= 2;
- after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
- qm_out(REV3_QCSP_IO_CFG(idx), after);
- } else {
- before = qm_in(QCSP_IO_CFG(idx));
- after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
- qm_out(QCSP_IO_CFG(idx), after);
+static int qman_query_cgr(struct qman_cgr *cgr,
+ struct qm_mcr_querycgr *cgrd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->cgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
}
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ else {
+ dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+ mcr_result_str(mcr->result));
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+ struct qm_mcr_querycgr query_cgr;
+ int err;
+
+ err = qman_query_cgr(cgr, &query_cgr);
+ if (err)
+ return err;
+
+ *result = !!query_cgr.cgr.cs;
return 0;
}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+ unsigned long irqflags;
+ int ret = -EBUSY;
+
+ local_irq_save(irqflags);
+ if (p->vdqcr_owned)
+ goto out;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto out;
+
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ p->vdqcr_owned = fq;
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ ret = 0;
+out:
+ local_irq_restore(irqflags);
+ return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+ int ret;
+
+ *p = get_affine_portal();
+ ret = set_p_vdqcr(*p, fq, vdqcr);
+ put_affine_portal();
+ return ret;
+}
-#define MISC_CFG_WPM_MASK 0x00000002
-int qm_set_wpm(int wpm)
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
{
- u32 before;
- u32 after;
+ int ret = 0;
- if (!qman_have_ccsr())
- return -ENODEV;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !set_vdqcr(p, fq, vdqcr));
+ else
+ wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret;
- before = qm_in(MISC_CFG);
- after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1);
- qm_out(MISC_CFG, after);
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_retired)
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+ else
+ ret = set_vdqcr(&p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /*
+ * NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue.
+ */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
return 0;
}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
-int qm_get_wpm(int *wpm)
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
{
- u32 before;
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags;
+ u8 avail;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ goto out;
- if (!qman_have_ccsr())
- return -ENODEV;
+ qm_fqid_set(eq, fq->fqid);
+ eq->tag = cpu_to_be32(fq_to_tag(fq));
+ eq->fd = *fd;
- before = qm_in(MISC_CFG);
- *wpm = (before & MISC_CFG_WPM_MASK) >> 1;
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+ local_irq_restore(irqflags);
+ put_affine_portal();
return 0;
}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ if (mcr->result != QM_MCR_RESULT_OK)
+ ret = -EIO;
-#ifdef CONFIG_SYSFS
+out:
+ put_affine_portal();
+ return ret;
+}
-#define DRV_NAME "fsl-qman"
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
-static ssize_t show_pfdr_fpc(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+/* congestion state change notification target update control */
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
{
- return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC));
-};
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
+ else
+ cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
+}
-static ssize_t show_dlm_avg(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
{
- u32 data;
- int i;
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
+ else
+ cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
+}
- if (sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i) != 1)
- return -EINVAL;
- data = qm_in(DCP_DLM_AVG(i));
- return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
- (data & 0x000000ff)*390625);
-};
+static u8 qman_cgr_cpus[CGR_NUM];
-static ssize_t set_dlm_avg(struct device *dev,
- struct device_attribute *dev_attr, const char *buf, size_t count)
+void qman_init_cgr_all(void)
{
- unsigned long val;
- int i;
+ struct qman_cgr cgr;
+ int err_cnt = 0;
- if (sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i) != 1)
- return -EINVAL;
- if (kstrtoul(buf, 0, &val)) {
- dev_dbg(dev, "invalid input %s\n", buf);
+ for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+ if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+ err_cnt++;
+ }
+
+ if (err_cnt)
+ pr_err("Warning: %d error%s while initialising CGR h/w\n",
+ err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ int ret;
+ struct qman_portal *p;
+
+ /*
+ * We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= CGR_NUM)
return -EINVAL;
+
+ preempt_disable();
+ p = get_affine_portal();
+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+ preempt_enable();
+
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto out;
+
+ qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+ local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+
+ /* send init if flags indicate so */
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto out;
}
- qm_out(DCP_DLM_AVG(i), val);
- return count;
-};
-static ssize_t show_pfdr_cfg(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success */
+ dev_err(p->config->dev, "CGR HW state partially modified\n");
+ ret = 0;
+ goto out;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en &&
+ qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+out:
+ spin_unlock(&p->cgr_lock);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
{
- return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG));
+ unsigned long irqflags;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ /* attempt to delete from other portal than creator */
+ dev_err(p->config->dev, "CGR not owned by current portal");
+ dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if (i->cgrid == cgr->cgrid && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+
+ local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+ qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+ struct qman_cgr *cgr;
+ struct completion completion;
};
-static ssize_t set_pfdr_cfg(struct device *dev,
- struct device_attribute *dev_attr, const char *buf, size_t count)
+static int qman_delete_cgr_thread(void *p)
{
- unsigned long val;
+ struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
+ int ret;
- if (kstrtoul(buf, 0, &val)) {
- dev_dbg(dev, "invalid input %s\n", buf);
- return -EINVAL;
- }
- qm_out(PFDR_CFG, val);
- return count;
-};
+ ret = qman_delete_cgr(cgr_comp->cgr);
+ complete(&cgr_comp->completion);
-static ssize_t show_sfdr_in_use(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+ return ret;
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
{
- return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE));
-};
+ struct task_struct *thread;
+ struct cgr_comp cgr_comp;
+
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+ init_completion(&cgr_comp.completion);
+ cgr_comp.cgr = cgr;
+ thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
+ "cgr_del");
+
+ if (IS_ERR(thread))
+ goto out;
+
+ kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
+ wake_up_process(thread);
+ wait_for_completion(&cgr_comp.completion);
+ preempt_enable();
+ return;
+ }
+out:
+ qman_delete_cgr(cgr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+/* Cleanup FQs */
-static ssize_t show_idle_stat(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
{
- return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT));
-};
+ const union qm_mr_entry *msg;
+ int found = 0;
+
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ while (msg) {
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+ found = 1;
+ qm_mr_next(p);
+ qm_mr_cci_consume_to_current(p);
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ }
+ return found;
+}
-static ssize_t show_ci_rlm_avg(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+ bool wait)
{
- u32 data = qm_in(CI_RLM_AVG);
+ const struct qm_dqrr_entry *dqrr;
+ int found = 0;
- return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
- (data & 0x000000ff)*390625);
-};
+ do {
+ qm_dqrr_pvb_update(p);
+ dqrr = qm_dqrr_current(p);
+ if (!dqrr)
+ cpu_relax();
+ } while (wait && !dqrr);
+
+ while (dqrr) {
+ if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
+ found = 1;
+ qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+ qm_dqrr_pvb_update(p);
+ qm_dqrr_next(p);
+ dqrr = qm_dqrr_current(p);
+ }
+ return found;
+}
+
+#define qm_mr_drain(p, V) \
+ _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
-static ssize_t set_ci_rlm_avg(struct device *dev,
- struct device_attribute *dev_attr, const char *buf, size_t count)
+#define qm_dqrr_drain_wait(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+ _qm_dqrr_consume_and_match(p, 0, 0, false)
+
+static int qman_shutdown_fq(u32 fqid)
{
- unsigned long val;
+ struct qman_portal *p;
+#ifndef __rtems__
+ struct device *dev;
+#endif /* __rtems__ */
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ int orl_empty, drain = 0, ret = 0;
+ u32 channel, wq, res;
+ u8 state;
- if (kstrtoul(buf, 0, &val)) {
- dev_dbg(dev, "invalid input %s\n", buf);
- return -EINVAL;
+ p = get_affine_portal();
+#ifndef __rtems__
+ dev = p->config->dev;
+#endif /* __rtems__ */
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
}
- qm_out(CI_RLM_AVG, val);
- return count;
-};
-static ssize_t show_err_isr(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ goto out; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ /* Need to store these since the MCR gets reused */
+ channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+ wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (res == QM_MCR_RESULT_PENDING) {
+ /*
+ * Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ int found_fqrn = 0;
+ u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < qm_channel_pool1 + 15) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1)<<4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+ fqid, channel);
+ ret = -EBUSY;
+ goto out;
+ }
+#ifdef __rtems__
+ (void)dequeue_wq;
+#endif /* __rtems__ */
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(&p->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(&p->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ do {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_drain_nomatch(&p->p);
+ /* Process message ring too */
+ found_fqrn = qm_mr_drain(&p->p, FQRN);
+ cpu_relax();
+ } while (!found_fqrn);
+
+ }
+ if (res != QM_MCR_RESULT_OK &&
+ res != QM_MCR_RESULT_PENDING) {
+ dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+ fqid, res);
+ ret = -EIO;
+ goto out;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /*
+ * ORL had no entries, no need to wait until the
+ * ERNs come in
+ */
+ orl_empty = 1;
+ }
+ /*
+ * Retirement succeeded, check to see if FQ needs
+ * to be drained
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ do {
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ /*
+ * Wait for a dequeue and process the dequeues,
+ * making sure to empty the ring completely
+ */
+ } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+ }
+ qm_dqrr_sdqcr_set(&p->p, 0);
+
+ while (!orl_empty) {
+ /* Wait for the ORL to have been completely drained */
+ orl_empty = qm_mr_drain(&p->p, FQRL);
+ cpu_relax();
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_OOS:
+ /* Done */
+ break;
+
+ default:
+ ret = -EIO;
+ }
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal)
{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR));
-};
+ return portal->config;
+}
+EXPORT_SYMBOL(qman_get_qm_portal_config);
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
-static ssize_t show_sbec(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
{
- int i;
+ unsigned long addr;
- if (sscanf(dev_attr->attr.name, "sbec_%d", &i) != 1)
- return -EINVAL;
- return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i)));
-};
+ addr = gen_pool_alloc(p, cnt);
+ if (!addr)
+ return -ENOMEM;
-static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL);
-static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg);
-static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL);
-static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR),
- show_ci_rlm_avg, set_ci_rlm_avg);
-static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
-static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL);
-
-static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
-static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
-static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
-static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
-
-static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL);
-static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL);
-
-static struct attribute *qman_dev_attributes[] = {
- &dev_attr_pfdr_fpc.attr,
- &dev_attr_pfdr_cfg.attr,
- &dev_attr_idle_stat.attr,
- &dev_attr_ci_rlm_avg.attr,
- &dev_attr_err_isr.attr,
- &dev_attr_dcp0_dlm_avg.attr,
- &dev_attr_dcp1_dlm_avg.attr,
- &dev_attr_dcp2_dlm_avg.attr,
- &dev_attr_dcp3_dlm_avg.attr,
- /* sfdr_in_use will be added if necessary */
- NULL
-};
+ *result = addr & ~DPAA_GENALLOC_OFF;
-static struct attribute *qman_dev_ecr_attributes[] = {
- &dev_attr_sbec_0.attr,
- &dev_attr_sbec_1.attr,
- &dev_attr_sbec_2.attr,
- &dev_attr_sbec_3.attr,
- &dev_attr_sbec_4.attr,
- &dev_attr_sbec_5.attr,
- &dev_attr_sbec_6.attr,
- &dev_attr_sbec_7.attr,
- &dev_attr_sbec_8.attr,
- &dev_attr_sbec_9.attr,
- &dev_attr_sbec_10.attr,
- &dev_attr_sbec_11.attr,
- &dev_attr_sbec_12.attr,
- &dev_attr_sbec_13.attr,
- &dev_attr_sbec_14.attr,
- NULL
-};
+ return 0;
+}
-/* root level */
-static const struct attribute_group qman_dev_attr_grp = {
- .name = NULL,
- .attrs = qman_dev_attributes
-};
-static const struct attribute_group qman_dev_ecr_grp = {
- .name = "error_capture",
- .attrs = qman_dev_ecr_attributes
-};
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
-static int of_fsl_qman_remove(struct platform_device *ofdev)
+int qman_alloc_pool_range(u32 *result, u32 count)
{
- sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
+ return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+ int ret = qman_shutdown_fq(fqid);
+
+ if (ret) {
+ pr_debug("FQID %d leaked\n", fqid);
+ return ret;
+ }
+
+ gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
return 0;
-};
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+ /*
+ * We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose destination channel is the pool-channel being released.
+ * When a non-OOS FQD is found we attempt to clean it up
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err == -ERANGE)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ else if (WARN_ON(err))
+ return err;
+
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return err;
+ if (qm_fqd_get_chan(&fqd) == qp) {
+ /* The channel is the FQ's target, clean it */
+ err = qman_shutdown_fq(fq.fqid);
+ if (err)
+ /*
+ * Couldn't shut down the FQ
+ * so the pool must be leaked
+ */
+ return err;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
-static int of_fsl_qman_probe(struct platform_device *ofdev)
+int qman_release_pool(u32 qp)
{
int ret;
- struct device *dev = &ofdev->dev;
- ret = sysfs_create_group(&dev->kobj, &qman_dev_attr_grp);
- if (ret)
- goto done;
- ret = sysfs_add_file_to_group(&dev->kobj,
- &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name);
- if (ret)
- goto del_group_0;
- ret = sysfs_create_group(&dev->kobj, &qman_dev_ecr_grp);
- if (ret)
- goto del_group_0;
+ ret = qpool_cleanup(qp);
+ if (ret) {
+ pr_debug("CHID %d leaked\n", qp);
+ return ret;
+ }
- goto done;
+ gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
-del_group_0:
- sysfs_remove_group(&dev->kobj, &qman_dev_attr_grp);
-done:
- if (ret)
- dev_err(dev, "Cannot create dev attributes ret=%d\n", ret);
- return ret;
-};
+static int cgr_cleanup(u32 cgrid)
+{
+ /*
+ * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+ * error, looking for non-OOS FQDs whose CGR is the CGR being released
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
-static const struct of_device_id of_fsl_qman_ids[] = {
- {
- .compatible = "fsl,qman",
- },
- {}
-};
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err == -ERANGE)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ else if (WARN_ON(err))
+ return err;
+
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return err;
+ if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
+ fqd.cgid == cgrid) {
+ pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+ cgrid, fq.fqid);
+ return -EIO;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
-static struct platform_driver of_fsl_qman_driver = {
- .driver = {
- .name = DRV_NAME,
- .of_match_table = of_fsl_qman_ids,
- },
- .probe = of_fsl_qman_probe,
- .remove = of_fsl_qman_remove,
-};
+int qman_release_cgrid(u32 cgrid)
+{
+ int ret;
-builtin_platform_driver(of_fsl_qman_driver);
+ ret = cgr_cleanup(cgrid);
+ if (ret) {
+ pr_debug("CGRID %d leaked\n", cgrid);
+ return ret;
+ }
-#endif /* CONFIG_SYSFS */
+ gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/linux/drivers/soc/fsl/qbman/qman.h b/linux/drivers/soc/fsl/qbman/qman.h
deleted file mode 100644
index 331db7c7..00000000
--- a/linux/drivers/soc/fsl/qbman/qman.h
+++ /dev/null
@@ -1,1133 +0,0 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "qman_priv.h"
-
-/* Portal register assists */
-
-/* Cache-inhibited register offsets */
-#define QM_REG_EQCR_PI_CINH 0x0000
-#define QM_REG_EQCR_CI_CINH 0x0004
-#define QM_REG_EQCR_ITR 0x0008
-#define QM_REG_DQRR_PI_CINH 0x0040
-#define QM_REG_DQRR_CI_CINH 0x0044
-#define QM_REG_DQRR_ITR 0x0048
-#define QM_REG_DQRR_DCAP 0x0050
-#define QM_REG_DQRR_SDQCR 0x0054
-#define QM_REG_DQRR_VDQCR 0x0058
-#define QM_REG_DQRR_PDQCR 0x005c
-#define QM_REG_MR_PI_CINH 0x0080
-#define QM_REG_MR_CI_CINH 0x0084
-#define QM_REG_MR_ITR 0x0088
-#define QM_REG_CFG 0x0100
-#define QM_REG_ISR 0x0e00
-#define QM_REG_IIR 0x0e0c
-#define QM_REG_ITPR 0x0e14
-
-/* Cache-enabled register offsets */
-#define QM_CL_EQCR 0x0000
-#define QM_CL_DQRR 0x1000
-#define QM_CL_MR 0x2000
-#define QM_CL_EQCR_PI_CENA 0x3000
-#define QM_CL_EQCR_CI_CENA 0x3100
-#define QM_CL_DQRR_PI_CENA 0x3200
-#define QM_CL_DQRR_CI_CENA 0x3300
-#define QM_CL_MR_PI_CENA 0x3400
-#define QM_CL_MR_CI_CENA 0x3500
-#define QM_CL_CR 0x3800
-#define QM_CL_RR0 0x3900
-#define QM_CL_RR1 0x3940
-
-/* BTW, the drivers (and h/w programming model) already obtain the required
- * synchronisation for portal accesses via lwsync(), hwsync(), and
- * data-dependencies. Use of barrier()s or other order-preserving primitives
- * simply degrade performance. Hence the use of the __raw_*() interfaces, which
- * simply ensure that the compiler treats the portal registers as volatile (ie.
- * non-coherent). */
-
-/* Cache-inhibited register access. */
-#define __qm_in(qm, o) __raw_readl((qm)->addr_ci + (o))
-#define __qm_out(qm, o, val) __raw_writel((val), (qm)->addr_ci + (o))
-#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
-#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
-
-/* Cache-enabled (index) register access */
-#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o))
-#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o))
-#define __qm_cl_in(qm, o) __raw_readl((qm)->addr_ce + (o))
-#define __qm_cl_out(qm, o, val) \
- do { \
- u32 *__tmpclout = (qm)->addr_ce + (o); \
- __raw_writel((val), __tmpclout); \
- dcbf(__tmpclout); \
- } while (0)
-#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o))
-#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
-#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
-#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
-#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
-#define qm_cl_invalidate(reg)\
- __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
-
-/* Cache-enabled ring access */
-#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
-
-/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
- * analysis, look at using the "extra" bit in the ring index registers to avoid
- * cyclic issues. */
-static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
-{
- /* 'first' is included, 'last' is excluded */
- if (first <= last)
- return last - first;
- return ringsize + last - first;
-}
-
-/* Portal modes.
- * Enum types;
- * pmode == production mode
- * cmode == consumption mode,
- * dmode == h/w dequeue mode.
- * Enum values use 3 letter codes. First letter matches the portal mode,
- * remaining two letters indicate;
- * ci == cache-inhibited portal register
- * ce == cache-enabled portal register
- * vb == in-band valid-bit (cache-enabled)
- * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
- * As for "enum qm_dqrr_dmode", it should be self-explanatory.
- */
-enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
- qm_eqcr_pci = 0, /* PI index, cache-inhibited */
- qm_eqcr_pce = 1, /* PI index, cache-enabled */
- qm_eqcr_pvb = 2 /* valid-bit */
-};
-enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
- qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
- qm_dqrr_dpull = 1 /* PDQCR */
-};
-enum qm_dqrr_pmode { /* s/w-only */
- qm_dqrr_pci, /* reads DQRR_PI_CINH */
- qm_dqrr_pce, /* reads DQRR_PI_CENA */
- qm_dqrr_pvb /* reads valid-bit */
-};
-enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
- qm_dqrr_cci = 0, /* CI index, cache-inhibited */
- qm_dqrr_cce = 1, /* CI index, cache-enabled */
- qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
-};
-enum qm_mr_pmode { /* s/w-only */
- qm_mr_pci, /* reads MR_PI_CINH */
- qm_mr_pce, /* reads MR_PI_CENA */
- qm_mr_pvb /* reads valid-bit */
-};
-enum qm_mr_cmode { /* matches QCSP_CFG::MM */
- qm_mr_cci = 0, /* CI index, cache-inhibited */
- qm_mr_cce = 1 /* CI index, cache-enabled */
-};
-
-/* --- Portal structures --- */
-
-#define QM_EQCR_SIZE 8
-#define QM_DQRR_SIZE 16
-#define QM_MR_SIZE 8
-
-struct qm_eqcr {
- struct qm_eqcr_entry *ring, *cursor;
- u8 ci, available, ithresh, vbit;
-#ifdef CONFIG_FSL_DPA_CHECKING
- u32 busy;
- enum qm_eqcr_pmode pmode;
-#endif
-};
-
-struct qm_dqrr {
- const struct qm_dqrr_entry *ring, *cursor;
- u8 pi, ci, fill, ithresh, vbit;
-#ifdef CONFIG_FSL_DPA_CHECKING
- enum qm_dqrr_dmode dmode;
- enum qm_dqrr_pmode pmode;
- enum qm_dqrr_cmode cmode;
-#endif
-};
-
-struct qm_mr {
- const struct qm_mr_entry *ring, *cursor;
- u8 pi, ci, fill, ithresh, vbit;
-#ifdef CONFIG_FSL_DPA_CHECKING
- enum qm_mr_pmode pmode;
- enum qm_mr_cmode cmode;
-#endif
-};
-
-struct qm_mc {
- struct qm_mc_command *cr;
- struct qm_mc_result *rr;
- u8 rridx, vbit;
-#ifdef CONFIG_FSL_DPA_CHECKING
- enum {
- /* Can be _mc_start()ed */
- qman_mc_idle,
- /* Can be _mc_commit()ed or _mc_abort()ed */
- qman_mc_user,
- /* Can only be _mc_retry()ed */
- qman_mc_hw
- } state;
-#endif
-};
-
-#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
-
-struct qm_addr {
- void __iomem *addr_ce; /* cache-enabled */
- void __iomem *addr_ci; /* cache-inhibited */
-};
-
-struct qm_portal {
- /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to
- * and including 'mc' fits within a cacheline (yay!). The 'config' part
- * is setup-only, so isn't a cause for a concern. In other words, don't
- * rearrange this structure on a whim, there be dragons ... */
- struct qm_addr addr;
- struct qm_eqcr eqcr;
- struct qm_dqrr dqrr;
- struct qm_mr mr;
- struct qm_mc mc;
-} QM_PORTAL_ALIGNMENT;
-
-/* --- EQCR API --- */
-
-/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
-#define EQCR_CARRYCLEAR(p) \
- (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
-
-/* Bit-wise logic to convert a ring pointer to a ring index */
-static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
-{
- return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
-}
-
-/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
-static inline void EQCR_INC(struct qm_eqcr *eqcr)
-{
- /* NB: this is odd-looking, but experiments show that it generates fast
- * code with essentially no branching overheads. We increment to the
- * next EQCR pointer and handle overflow and 'vbit'. */
- struct qm_eqcr_entry *partial = eqcr->cursor + 1;
-
- eqcr->cursor = EQCR_CARRYCLEAR(partial);
- if (partial != eqcr->cursor)
- eqcr->vbit ^= QM_EQCR_VERB_VBIT;
-}
-
-static inline int qm_eqcr_init(struct qm_portal *portal,
- enum qm_eqcr_pmode pmode,
- unsigned int eq_stash_thresh,
- int eq_stash_prio)
-{
- /* This use of 'register', as well as all other occurrences, is because
- * it has been observed to generate much faster code with gcc than is
- * otherwise the case. */
- register struct qm_eqcr *eqcr = &portal->eqcr;
- u32 cfg;
- u8 pi;
-
- eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR;
- eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
- qm_cl_invalidate(EQCR_CI);
- pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
- eqcr->cursor = eqcr->ring + pi;
- eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
- QM_EQCR_VERB_VBIT : 0;
- eqcr->available = QM_EQCR_SIZE - 1 -
- qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
- eqcr->ithresh = qm_in(EQCR_ITR);
-#ifdef CONFIG_FSL_DPA_CHECKING
- eqcr->busy = 0;
- eqcr->pmode = pmode;
-#endif
- cfg = (qm_in(CFG) & 0x00ffffff) |
- (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
- (eq_stash_prio << 26) | /* QCSP_CFG: EP */
- ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
- qm_out(CFG, cfg);
- return 0;
-}
-
-static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
-{
- return (qm_in(CFG) >> 28) & 0x7;
-}
-
-static inline void qm_eqcr_finish(struct qm_portal *portal)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
- u8 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
- u8 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
-
- DPA_ASSERT(!eqcr->busy);
- if (pi != EQCR_PTR2IDX(eqcr->cursor))
- pr_crit("losing uncommited EQCR entries\n");
- if (ci != eqcr->ci)
- pr_crit("missing existing EQCR completions\n");
- if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
- pr_crit("EQCR destroyed unquiesced\n");
-}
-
-static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
- *portal)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
-
- DPA_ASSERT(!eqcr->busy);
- if (!eqcr->available)
- return NULL;
-
-
-#ifdef CONFIG_FSL_DPA_CHECKING
- eqcr->busy = 1;
-#endif
- dcbz_64(eqcr->cursor);
- return eqcr->cursor;
-}
-
-static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
- *portal)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
- u8 diff, old_ci;
-
- DPA_ASSERT(!eqcr->busy);
- if (!eqcr->available) {
- old_ci = eqcr->ci;
- eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
- diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
- eqcr->available += diff;
- if (!diff)
- return NULL;
- }
-#ifdef CONFIG_FSL_DPA_CHECKING
- eqcr->busy = 1;
-#endif
- dcbz_64(eqcr->cursor);
- return eqcr->cursor;
-}
-
-static inline void qm_eqcr_abort(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
-
- DPA_ASSERT(eqcr->busy);
-#ifdef CONFIG_FSL_DPA_CHECKING
- eqcr->busy = 0;
-#endif
-}
-
-static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
- struct qm_portal *portal, u8 myverb)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
-
- DPA_ASSERT(eqcr->busy);
- DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
- if (eqcr->available == 1)
- return NULL;
- eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
- dcbf(eqcr->cursor);
- EQCR_INC(eqcr);
- eqcr->available--;
- dcbz_64(eqcr->cursor);
- return eqcr->cursor;
-}
-
-#define EQCR_COMMIT_CHECKS(eqcr) \
-do { \
- DPA_ASSERT(eqcr->busy); \
- DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
- DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
-} while (0)
-
-static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
-
- EQCR_COMMIT_CHECKS(eqcr);
- DPA_ASSERT(eqcr->pmode == qm_eqcr_pci);
- eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
- EQCR_INC(eqcr);
- eqcr->available--;
- dcbf(eqcr->cursor);
- hwsync();
- qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
-#ifdef CONFIG_FSL_DPA_CHECKING
- eqcr->busy = 0;
-#endif
-}
-
-static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
-
- DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
- qm_cl_invalidate(EQCR_PI);
- qm_cl_touch_rw(EQCR_PI);
-}
-
-static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
-
- EQCR_COMMIT_CHECKS(eqcr);
- DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
- eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
- EQCR_INC(eqcr);
- eqcr->available--;
- dcbf(eqcr->cursor);
- lwsync();
- qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
-#ifdef CONFIG_FSL_DPA_CHECKING
- eqcr->busy = 0;
-#endif
-}
-
-static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
- struct qm_eqcr_entry *eqcursor;
-
- EQCR_COMMIT_CHECKS(eqcr);
- DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
- lwsync();
- eqcursor = eqcr->cursor;
- eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
- dcbf(eqcursor);
- EQCR_INC(eqcr);
- eqcr->available--;
-#ifdef CONFIG_FSL_DPA_CHECKING
- eqcr->busy = 0;
-#endif
-}
-
-static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
- u8 diff, old_ci = eqcr->ci;
-
- eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
- diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
- eqcr->available += diff;
- return diff;
-}
-
-static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
-
- qm_cl_touch_ro(EQCR_CI);
-}
-
-static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
- u8 diff, old_ci = eqcr->ci;
-
- eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
- qm_cl_invalidate(EQCR_CI);
- diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
- eqcr->available += diff;
- return diff;
-}
-
-static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
-
- return eqcr->ithresh;
-}
-
-static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
-
- eqcr->ithresh = ithresh;
- qm_out(EQCR_ITR, ithresh);
-}
-
-static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
-
- return eqcr->available;
-}
-
-static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
-{
- register struct qm_eqcr *eqcr = &portal->eqcr;
-
- return QM_EQCR_SIZE - 1 - eqcr->available;
-}
-
-/* --- DQRR API --- */
-
-/* FIXME: many possible improvements;
- * - look at changing the API to use pointer rather than index parameters now
- * that 'cursor' is a pointer,
- * - consider moving other parameters to pointer if it could help (ci)
- */
-
-#define DQRR_CARRYCLEAR(p) \
- (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
-
-static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
-{
- return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
-}
-
-static inline const struct qm_dqrr_entry *DQRR_INC(
- const struct qm_dqrr_entry *e)
-{
- return DQRR_CARRYCLEAR(e + 1);
-}
-
-static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
-{
- qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
- ((mf & (QM_DQRR_SIZE - 1)) << 20));
-}
-
-static inline int qm_dqrr_init(struct qm_portal *portal,
- const struct qm_portal_config *config,
- enum qm_dqrr_dmode dmode,
- __maybe_unused enum qm_dqrr_pmode pmode,
- enum qm_dqrr_cmode cmode, u8 max_fill)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
- u32 cfg;
-
- /* Make sure the DQRR will be idle when we enable */
- qm_out(DQRR_SDQCR, 0);
- qm_out(DQRR_VDQCR, 0);
- qm_out(DQRR_PDQCR, 0);
- dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR;
- dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
- dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
- dqrr->cursor = dqrr->ring + dqrr->ci;
- dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
- dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
- QM_DQRR_VERB_VBIT : 0;
- dqrr->ithresh = qm_in(DQRR_ITR);
-#ifdef CONFIG_FSL_DPA_CHECKING
- dqrr->dmode = dmode;
- dqrr->pmode = pmode;
- dqrr->cmode = cmode;
-#endif
- /* Invalidate every ring entry before beginning */
- for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
- dcbi(qm_cl(dqrr->ring, cfg));
- cfg = (qm_in(CFG) & 0xff000f00) |
- ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
- ((dmode & 1) << 18) | /* DP */
- ((cmode & 3) << 16) | /* DCM */
-#ifndef __rtems__
- 0xa0 | /* RE+SE */
-#endif /* __rtems__ */
- (0 ? 0x40 : 0) | /* Ignore RP */
- (0 ? 0x10 : 0); /* Ignore SP */
- qm_out(CFG, cfg);
- qm_dqrr_set_maxfill(portal, max_fill);
- return 0;
-}
-
-static inline void qm_dqrr_finish(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-#ifdef CONFIG_FSL_DPA_CHECKING
- if ((dqrr->cmode != qm_dqrr_cdc) &&
- (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
- pr_crit("Ignoring completed DQRR entries\n");
-#endif
-}
-
-static inline const struct qm_dqrr_entry *qm_dqrr_current(
- struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
-
- if (!dqrr->fill)
- return NULL;
- return dqrr->cursor;
-}
-
-static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
-
- return DQRR_PTR2IDX(dqrr->cursor);
-}
-
-static inline u8 qm_dqrr_next(struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->fill);
- dqrr->cursor = DQRR_INC(dqrr->cursor);
- return --dqrr->fill;
-}
-
-static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
- u8 diff, old_pi = dqrr->pi;
-
- DPA_ASSERT(dqrr->pmode == qm_dqrr_pci);
- dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
- diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
- dqrr->fill += diff;
- return diff;
-}
-
-static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
- qm_cl_invalidate(DQRR_PI);
- qm_cl_touch_ro(DQRR_PI);
-}
-
-static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
- u8 diff, old_pi = dqrr->pi;
-
- DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
- dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
- diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
- dqrr->fill += diff;
- return diff;
-}
-
-static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
- const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
-
- DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
- /* when accessing 'verb', use __raw_readb() to ensure that compiler
- * inlining doesn't try to optimise out "excess reads". */
-#ifdef __rtems__
- dcbi(res);
-#endif /* __rtems__ */
- if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
- dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
- if (!dqrr->pi)
- dqrr->vbit ^= QM_DQRR_VERB_VBIT;
- dqrr->fill++;
- }
-}
-
-static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
- dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
- qm_out(DQRR_CI_CINH, dqrr->ci);
-}
-
-static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
- dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
- qm_out(DQRR_CI_CINH, dqrr->ci);
-}
-
-static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
- qm_cl_invalidate(DQRR_CI);
- qm_cl_touch_rw(DQRR_CI);
-}
-
-static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
- dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
- qm_cl_out(DQRR_CI, dqrr->ci);
-}
-
-static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
- dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
- qm_cl_out(DQRR_CI, dqrr->ci);
-}
-
-static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
- int park)
-{
- __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
- DPA_ASSERT(idx < QM_DQRR_SIZE);
- qm_out(DQRR_DCAP, (0 << 8) | /* S */
- ((park ? 1 : 0) << 6) | /* PK */
- idx); /* DCAP_CI */
-}
-
-static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
- const struct qm_dqrr_entry *dq,
- int park)
-{
- __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
- u8 idx = DQRR_PTR2IDX(dq);
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
- DPA_ASSERT((dqrr->ring + idx) == dq);
- DPA_ASSERT(idx < QM_DQRR_SIZE);
- qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
- ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
- idx); /* DQRR_DCAP::DCAP_CI */
-}
-
-static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
-{
- __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
- qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
- ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
-}
-
-static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
- return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
-}
-
-static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
- qm_cl_invalidate(DQRR_CI);
- qm_cl_touch_ro(DQRR_CI);
-}
-
-static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
- return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
-}
-
-static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
- return dqrr->ci;
-}
-
-static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
-{
- __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
- qm_out(DQRR_DCAP, (0 << 8) | /* S */
- (1 << 6) | /* PK */
- (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
-}
-
-static inline void qm_dqrr_park_current(struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
-
- DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
- qm_out(DQRR_DCAP, (0 << 8) | /* S */
- (1 << 6) | /* PK */
- DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
-}
-
-static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
-{
- qm_out(DQRR_SDQCR, sdqcr);
-}
-
-static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
-{
- return qm_in(DQRR_SDQCR);
-}
-
-static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
-{
- qm_out(DQRR_VDQCR, vdqcr);
-}
-
-static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
-{
- return qm_in(DQRR_VDQCR);
-}
-
-static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr)
-{
- qm_out(DQRR_PDQCR, pdqcr);
-}
-
-static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal)
-{
- return qm_in(DQRR_PDQCR);
-}
-
-static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
-{
- register struct qm_dqrr *dqrr = &portal->dqrr;
-
- return dqrr->ithresh;
-}
-
-static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
-{
- qm_out(DQRR_ITR, ithresh);
-}
-
-static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
-{
- return (qm_in(CFG) & 0x00f00000) >> 20;
-}
-
-/* --- MR API --- */
-
-#define MR_CARRYCLEAR(p) \
- (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
-
-static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
-{
- return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
-}
-
-static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
-{
- return MR_CARRYCLEAR(e + 1);
-}
-
-static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
- enum qm_mr_cmode cmode)
-{
- register struct qm_mr *mr = &portal->mr;
- u32 cfg;
-
- mr->ring = portal->addr.addr_ce + QM_CL_MR;
- mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
- mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
- mr->cursor = mr->ring + mr->ci;
- mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
- mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
- mr->ithresh = qm_in(MR_ITR);
-#ifdef CONFIG_FSL_DPA_CHECKING
- mr->pmode = pmode;
- mr->cmode = cmode;
-#endif
- cfg = (qm_in(CFG) & 0xfffff0ff) |
- ((cmode & 1) << 8); /* QCSP_CFG:MM */
- qm_out(CFG, cfg);
- return 0;
-}
-
-static inline void qm_mr_finish(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
-
- if (mr->ci != MR_PTR2IDX(mr->cursor))
- pr_crit("Ignoring completed MR entries\n");
-}
-
-static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
-
- if (!mr->fill)
- return NULL;
- return mr->cursor;
-}
-
-static inline u8 qm_mr_cursor(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
-
- return MR_PTR2IDX(mr->cursor);
-}
-
-static inline u8 qm_mr_next(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
-
- DPA_ASSERT(mr->fill);
- mr->cursor = MR_INC(mr->cursor);
- return --mr->fill;
-}
-
-static inline u8 qm_mr_pci_update(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
- u8 diff, old_pi = mr->pi;
-
- DPA_ASSERT(mr->pmode == qm_mr_pci);
- mr->pi = qm_in(MR_PI_CINH);
- diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
- mr->fill += diff;
- return diff;
-}
-
-static inline void qm_mr_pce_prefetch(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_mr *mr = &portal->mr;
-
- DPA_ASSERT(mr->pmode == qm_mr_pce);
- qm_cl_invalidate(MR_PI);
- qm_cl_touch_ro(MR_PI);
-}
-
-static inline u8 qm_mr_pce_update(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
- u8 diff, old_pi = mr->pi;
-
- DPA_ASSERT(mr->pmode == qm_mr_pce);
- mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1);
- diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
- mr->fill += diff;
- return diff;
-}
-
-static inline void qm_mr_pvb_update(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
- const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
-
- DPA_ASSERT(mr->pmode == qm_mr_pvb);
- /* when accessing 'verb', use __raw_readb() to ensure that compiler
- * inlining doesn't try to optimise out "excess reads". */
- if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
- mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
- if (!mr->pi)
- mr->vbit ^= QM_MR_VERB_VBIT;
- mr->fill++;
- res = MR_INC(res);
- }
- dcbit_ro(res);
-}
-
-static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
-{
- register struct qm_mr *mr = &portal->mr;
-
- DPA_ASSERT(mr->cmode == qm_mr_cci);
- mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
- qm_out(MR_CI_CINH, mr->ci);
-}
-
-static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
-
- DPA_ASSERT(mr->cmode == qm_mr_cci);
- mr->ci = MR_PTR2IDX(mr->cursor);
- qm_out(MR_CI_CINH, mr->ci);
-}
-
-static inline void qm_mr_cce_prefetch(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_mr *mr = &portal->mr;
-
- DPA_ASSERT(mr->cmode == qm_mr_cce);
- qm_cl_invalidate(MR_CI);
- qm_cl_touch_rw(MR_CI);
-}
-
-static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num)
-{
- register struct qm_mr *mr = &portal->mr;
-
- DPA_ASSERT(mr->cmode == qm_mr_cce);
- mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
- qm_cl_out(MR_CI, mr->ci);
-}
-
-static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
-
- DPA_ASSERT(mr->cmode == qm_mr_cce);
- mr->ci = MR_PTR2IDX(mr->cursor);
- qm_cl_out(MR_CI, mr->ci);
-}
-
-static inline u8 qm_mr_get_ci(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
-
- return mr->ci;
-}
-
-static inline u8 qm_mr_get_ithresh(struct qm_portal *portal)
-{
- register struct qm_mr *mr = &portal->mr;
-
- return mr->ithresh;
-}
-
-static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
-{
- qm_out(MR_ITR, ithresh);
-}
-
-/* --- Management command API --- */
-
-static inline int qm_mc_init(struct qm_portal *portal)
-{
- register struct qm_mc *mc = &portal->mc;
-
- mc->cr = portal->addr.addr_ce + QM_CL_CR;
- mc->rr = portal->addr.addr_ce + QM_CL_RR0;
- mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
- QM_MCC_VERB_VBIT) ? 0 : 1;
- mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
-#ifdef CONFIG_FSL_DPA_CHECKING
- mc->state = qman_mc_idle;
-#endif
- return 0;
-}
-
-static inline void qm_mc_finish(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_mc *mc = &portal->mc;
-
- DPA_ASSERT(mc->state == qman_mc_idle);
-#ifdef CONFIG_FSL_DPA_CHECKING
- if (mc->state != qman_mc_idle)
- pr_crit("Losing incomplete MC command\n");
-#endif
-}
-
-static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
-{
- register struct qm_mc *mc = &portal->mc;
-
- DPA_ASSERT(mc->state == qman_mc_idle);
-#ifdef CONFIG_FSL_DPA_CHECKING
- mc->state = qman_mc_user;
-#endif
- dcbz_64(mc->cr);
- return mc->cr;
-}
-
-static inline void qm_mc_abort(struct qm_portal *portal)
-{
- __maybe_unused register struct qm_mc *mc = &portal->mc;
-
- DPA_ASSERT(mc->state == qman_mc_user);
-#ifdef CONFIG_FSL_DPA_CHECKING
- mc->state = qman_mc_idle;
-#endif
-}
-
-static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
-{
- register struct qm_mc *mc = &portal->mc;
- struct qm_mc_result *rr = mc->rr + mc->rridx;
-
- DPA_ASSERT(mc->state == qman_mc_user);
- lwsync();
- mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
- dcbf(mc->cr);
- dcbit_ro(rr);
-#ifdef CONFIG_FSL_DPA_CHECKING
- mc->state = qman_mc_hw;
-#endif
-}
-
-static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
-{
- register struct qm_mc *mc = &portal->mc;
- struct qm_mc_result *rr = mc->rr + mc->rridx;
-
- DPA_ASSERT(mc->state == qman_mc_hw);
- /* The inactive response register's verb byte always returns zero until
- * its command is submitted and completed. This includes the valid-bit,
- * in case you were wondering... */
- if (!__raw_readb(&rr->verb)) {
- dcbit_ro(rr);
- return NULL;
- }
- mc->rridx ^= 1;
- mc->vbit ^= QM_MCC_VERB_VBIT;
-#ifdef CONFIG_FSL_DPA_CHECKING
- mc->state = qman_mc_idle;
-#endif
- return rr;
-}
-
-/* --- Portal interrupt register API --- */
-
-static inline int qm_isr_init(__always_unused struct qm_portal *portal)
-{
- return 0;
-}
-
-static inline void qm_isr_finish(__always_unused struct qm_portal *portal)
-{
-}
-
-static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
-{
- qm_out(ITPR, iperiod);
-}
-
-static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
-{
- return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
-}
-
-static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
- u32 val)
-{
- __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
-}
diff --git a/linux/drivers/soc/fsl/qbman/qman_api.c b/linux/drivers/soc/fsl/qbman/qman_api.c
deleted file mode 100644
index e838d08f..00000000
--- a/linux/drivers/soc/fsl/qbman/qman_api.c
+++ /dev/null
@@ -1,3026 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "qman.h"
-
-/* Compilation constants */
-#define DQRR_MAXFILL 15
-#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
-#define IRQNAME "QMan portal %d"
-#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
-#define QMAN_POLL_LIMIT 32
-#define QMAN_PIRQ_DQRR_ITHRESH 12
-#define QMAN_PIRQ_MR_ITHRESH 4
-#define QMAN_PIRQ_IPERIOD 100
-#define FSL_DPA_PORTAL_SHARE 1 /* Allow portals to be shared */
-/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's
- * positive, and rounding to the closest value if it's zero. NB, this macro
- * implicitly upgrades parameters to unsigned 64-bit, so feed it with types
- * that are compatible with this. NB, these arguments should not be expressions
- * unless it is safe for them to be evaluated multiple times. Eg. do not pass
- * in "some_value++" as a parameter to the macro! */
-#define ROUNDING(n, d, r) \
- (((r) < 0) ? div64_u64((n), (d)) : \
- (((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \
- div64_u64(((n) + ((d) / 2)), (d))))
-
-/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
- * inter-processor locking only. Note, FQLOCK() is always called either under a
- * local_irq_save() or from interrupt context - hence there's no need for irq
- * protection (and indeed, attempting to nest irq-protection doesn't work, as
- * the "irq en/disable" machinery isn't recursive...). */
-#define FQLOCK(fq) \
- do { \
- struct qman_fq *__fq478 = (fq); \
- if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
- spin_lock(&__fq478->fqlock); \
- } while (0)
-#define FQUNLOCK(fq) \
- do { \
- struct qman_fq *__fq478 = (fq); \
- if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
- spin_unlock(&__fq478->fqlock); \
- } while (0)
-
-static inline void fq_set(struct qman_fq *fq, u32 mask)
-{
- set_bits(mask, &fq->flags);
-}
-static inline void fq_clear(struct qman_fq *fq, u32 mask)
-{
- clear_bits(mask, &fq->flags);
-}
-static inline int fq_isset(struct qman_fq *fq, u32 mask)
-{
- return fq->flags & mask;
-}
-static inline int fq_isclear(struct qman_fq *fq, u32 mask)
-{
- return !(fq->flags & mask);
-}
-
-struct qman_portal {
- struct qm_portal p;
- unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */
- unsigned long irq_sources;
- u32 use_eqcr_ci_stashing;
- u32 slowpoll; /* only used when interrupts are off */
- struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */
-#endif
-#ifdef FSL_DPA_PORTAL_SHARE
- raw_spinlock_t sharing_lock; /* only used if is_shared */
-#ifndef __rtems__
- int is_shared;
- struct qman_portal *sharing_redirect;
-#endif /* __rtems__ */
-#endif
- u32 sdqcr;
- int dqrr_disable_ref;
- /* A portal-specific handler for DCP ERNs. If this is NULL, the global
- * handler is called instead. */
- qman_cb_dc_ern cb_dc_ern;
- /* When the cpu-affine portal is activated, this is non-NULL */
- const struct qm_portal_config *config;
-#ifndef __rtems__
- /* This is needed for providing a non-NULL device to dma_map_***() */
- struct platform_device *pdev;
-#endif /* __rtems__ */
- struct dpa_rbtree retire_table;
- char irqname[MAX_IRQNAME];
- /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
- struct qman_cgrs *cgrs;
- /* linked-list of CSCN handlers. */
- struct list_head cgr_cbs;
- /* list lock */
- spinlock_t cgr_lock;
- /* track if memory was allocated by the driver */
- u8 alloced;
-};
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
-#define PORTAL_IRQ_LOCK(p, irqflags) \
- do { \
- if ((p)->is_shared) \
- raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
- else \
- local_irq_save(irqflags); \
- } while (0)
-#define PORTAL_IRQ_UNLOCK(p, irqflags) \
- do { \
- if ((p)->is_shared) \
- raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
- irqflags); \
- else \
- local_irq_restore(irqflags); \
- } while (0)
-#else /* __rtems__ */
-#define PORTAL_IRQ_LOCK(p, irqflags) \
- raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags)
-#define PORTAL_IRQ_UNLOCK(p, irqflags) \
- raw_spin_unlock_irqrestore(&(p)->sharing_lock, irqflags)
-#endif /* __rtems__ */
-#else
-#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
-#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
-#endif
-
-/* Global handler for DCP ERNs. Used when the portal receiving the message does
- * not have a portal-specific handler. */
-static qman_cb_dc_ern cb_dc_ern;
-
-#ifndef __rtems__
-static cpumask_t affine_mask;
-static DEFINE_SPINLOCK(affine_mask_lock);
-static u16 affine_channels[NR_CPUS];
-#endif /* __rtems__ */
-static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
-#ifndef __rtems__
-void *affine_portals[NR_CPUS];
-#endif /* __rtems__ */
-
-/* "raw" gets the cpu-local struct whether it's a redirect or not. */
-static inline struct qman_portal *get_raw_affine_portal(void)
-{
- return &get_cpu_var(qman_affine_portal);
-}
-/* For ops that can redirect, this obtains the portal to use */
-#ifdef FSL_DPA_PORTAL_SHARE
-static inline struct qman_portal *get_affine_portal(void)
-{
- struct qman_portal *p = get_raw_affine_portal();
-
-#ifndef __rtems__
- if (p->sharing_redirect)
- return p->sharing_redirect;
-#endif /* __rtems__ */
- return p;
-}
-#else
-#define get_affine_portal() get_raw_affine_portal()
-#endif
-/* For every "get", there must be a "put" */
-static inline void put_affine_portal(void)
-{
- put_cpu_var(qman_affine_portal);
-}
-/* Exception: poll functions assume the caller is cpu-affine and in no risk of
- * re-entrance, which are the two reasons we usually use the get/put_cpu_var()
- * semantic - ie. to disable pre-emption. Some use-cases expect the execution
- * context to remain as non-atomic during poll-triggered callbacks as it was
- * when the poll API was first called (eg. NAPI), so we go out of our way in
- * this case to not disable pre-emption. */
-static inline struct qman_portal *get_poll_portal(void)
-{
- return this_cpu_ptr(&qman_affine_portal);
-}
-#define put_poll_portal()
-
-/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
- * retirement notifications (the fact they are sometimes h/w-consumed means that
- * contextB isn't always a s/w demux - and as we can't know which case it is
- * when looking at the notification, we have to use the slow lookup for all of
- * them). NB, it's possible to have multiple FQ objects refer to the same FQID
- * (though at most one of them should be the consumer), so this table isn't for
- * all FQs - FQs are added when retirement commands are issued, and removed when
- * they complete, which also massively reduces the size of this table. */
-IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid);
-
-/* This is what everything can wait on, even if it migrates to a different cpu
- * to the one whose affine portal it is waiting on. */
-static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
-
-static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
-{
- int ret = fqtree_push(&p->retire_table, fq);
-
- if (ret)
- pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
- return ret;
-}
-
-static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
-{
- fqtree_del(&p->retire_table, fq);
-}
-
-static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
-{
- return fqtree_find(&p->retire_table, fqid);
-}
-
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-static void **qman_fq_lookup_table;
-static size_t qman_fq_lookup_table_size;
-
-int qman_setup_fq_lookup_table(size_t num_entries)
-{
- num_entries++;
- /* Allocate 1 more entry since the first entry is not used */
- qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *)));
- if (!qman_fq_lookup_table)
- return -ENOMEM;
- qman_fq_lookup_table_size = num_entries;
- pr_info("Allocated lookup table at %p, entry count %lu\n",
- qman_fq_lookup_table, (unsigned long)qman_fq_lookup_table_size);
- return 0;
-}
-
-/* global structure that maintains fq object mapping */
-static DEFINE_SPINLOCK(fq_hash_table_lock);
-
-static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
-{
- u32 i;
-
- spin_lock(&fq_hash_table_lock);
- /* Can't use index zero because this has special meaning
- * in context_b field. */
- for (i = 1; i < qman_fq_lookup_table_size; i++) {
- if (qman_fq_lookup_table[i] == NULL) {
- *entry = i;
- qman_fq_lookup_table[i] = fq;
- spin_unlock(&fq_hash_table_lock);
- return 0;
- }
- }
- spin_unlock(&fq_hash_table_lock);
- return -ENOMEM;
-}
-
-static void clear_fq_table_entry(u32 entry)
-{
- spin_lock(&fq_hash_table_lock);
- BUG_ON(entry >= qman_fq_lookup_table_size);
- qman_fq_lookup_table[entry] = NULL;
- spin_unlock(&fq_hash_table_lock);
-}
-
-static inline struct qman_fq *get_fq_table_entry(u32 entry)
-{
- BUG_ON(entry >= qman_fq_lookup_table_size);
- return qman_fq_lookup_table[entry];
-}
-#endif
-
-/* In the case that slow- and fast-path handling are both done by qman_poll()
- * (ie. because there is no interrupt handling), we ought to balance how often
- * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
- * sources, so we call the fast poll 'n' times before calling the slow poll
- * once. The idle decrementer constant is used when the last slow-poll detected
- * no work to do, and the busy decrementer constant when the last slow-poll had
- * work to do. */
-#define SLOW_POLL_IDLE 1000
-#define SLOW_POLL_BUSY 10
-static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
-static inline unsigned int __poll_portal_fast(struct qman_portal *p,
- unsigned int poll_limit);
-
-/* Portal interrupt handler */
-static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
-{
- struct qman_portal *p = ptr;
- /*
- * The CSCI source is cleared inside __poll_portal_slow(), because
- * it could race against a Query Congestion State command also given
- * as part of the handling of this interrupt source. We mustn't
- * clear it a second time in this top-level function.
- */
- u32 clear = QM_DQAVAIL_MASK;
- u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
- /* DQRR-handling if it's interrupt-driven */
- if (is & QM_PIRQ_DQRI) {
- clear |= QM_PIRQ_DQRI;
- __poll_portal_fast(p, QMAN_POLL_LIMIT);
- }
- /* Handling of anything else that's interrupt-driven */
- clear |= __poll_portal_slow(p, is);
- qm_isr_status_clear(&p->p, clear);
- return IRQ_HANDLED;
-}
-
-/* This inner version is used privately by qman_create_affine_portal(), as well
- * as by the exported qman_stop_dequeues(). */
-static inline void qman_stop_dequeues_ex(struct qman_portal *p)
-{
- unsigned long irqflags __maybe_unused;
- PORTAL_IRQ_LOCK(p, irqflags);
- if (!(p->dqrr_disable_ref++))
- qm_dqrr_set_maxfill(&p->p, 0);
- PORTAL_IRQ_UNLOCK(p, irqflags);
-}
-
-static int drain_mr_fqrni(struct qm_portal *p)
-{
- const struct qm_mr_entry *msg;
-loop:
- msg = qm_mr_current(p);
- if (!msg) {
- /* if MR was full and h/w had other FQRNI entries to produce, we
- * need to allow it time to produce those entries once the
- * existing entries are consumed. A worst-case situation
- * (fully-loaded system) means h/w sequencers may have to do 3-4
- * other things before servicing the portal's MR pump, each of
- * which (if slow) may take ~50 qman cycles (which is ~200
- * processor cycles). So rounding up and then multiplying this
- * worst-case estimate by a factor of 10, just to be
- * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
- * one entry at a time, so h/w has an opportunity to produce new
- * entries well before the ring has been fully consumed, so
- * we're being *really* paranoid here. */
- u64 now, then = mfatb();
-
- do {
- now = mfatb();
- } while ((then + 10000) > now);
- msg = qm_mr_current(p);
- if (!msg)
- return 0;
- }
- if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
- /* We aren't draining anything but FQRNIs */
- pr_err("Found verb 0x%x in MR\n", msg->verb);
- return -1;
- }
- qm_mr_next(p);
- qm_mr_cci_consume(p, 1);
- goto loop;
-}
-
-struct qman_portal *qman_create_portal(
- struct qman_portal *portal,
- const struct qm_portal_config *config,
- const struct qman_cgrs *cgrs)
-{
- struct qm_portal *__p;
-#ifndef __rtems__
- char buf[16];
-#endif /* __rtems__ */
- int ret;
- u32 isdr;
-
- if (!portal) {
- portal = kmalloc(sizeof(*portal), GFP_KERNEL);
- if (!portal)
- return portal;
- portal->alloced = 1;
- } else
- portal->alloced = 0;
-
- __p = &portal->p;
-
-#ifndef __rtems__
- portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ?
- 1 : 0);
-#else /* __rtems__ */
- portal->use_eqcr_ci_stashing = 0;
-#endif /* __rtems__ */
-
- /* prep the low-level portal struct with the mapped addresses from the
- * config, everything that follows depends on it and "config" is more
- * for (de)reference... */
- __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
- __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
- /*
- * If CI-stashing is used, the current defaults use a threshold of 3,
- * and stash with high-than-DQRR priority.
- */
- if (qm_eqcr_init(__p, qm_eqcr_pvb,
- portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
- pr_err("EQCR initialisation failed\n");
- goto fail_eqcr;
- }
- if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb,
- qm_dqrr_cdc, DQRR_MAXFILL)) {
- pr_err("DQRR initialisation failed\n");
- goto fail_dqrr;
- }
- if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) {
- pr_err("MR initialisation failed\n");
- goto fail_mr;
- }
- if (qm_mc_init(__p)) {
- pr_err("MC initialisation failed\n");
- goto fail_mc;
- }
- if (qm_isr_init(__p)) {
- pr_err("ISR initialisation failed\n");
- goto fail_isr;
- }
- /* static interrupt-gating controls */
- qm_dqrr_set_ithresh(__p, QMAN_PIRQ_DQRR_ITHRESH);
- qm_mr_set_ithresh(__p, QMAN_PIRQ_MR_ITHRESH);
- qm_isr_set_iperiod(__p, QMAN_PIRQ_IPERIOD);
- portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
- if (!portal->cgrs)
- goto fail_cgrs;
- /* initial snapshot is no-depletion */
- qman_cgrs_init(&portal->cgrs[1]);
- if (cgrs)
- portal->cgrs[0] = *cgrs;
- else
- /* if the given mask is NULL, assume all CGRs can be seen */
- qman_cgrs_fill(&portal->cgrs[0]);
- INIT_LIST_HEAD(&portal->cgr_cbs);
- spin_lock_init(&portal->cgr_lock);
- portal->bits = 0;
- portal->slowpoll = 0;
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- portal->eqci_owned = NULL;
-#endif
-#ifdef FSL_DPA_PORTAL_SHARE
- raw_spin_lock_init(&portal->sharing_lock);
-#ifndef __rtems__
- portal->is_shared = config->public_cfg.is_shared;
- portal->sharing_redirect = NULL;
-#endif /* __rtems__ */
-#endif
- portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
- QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
- QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
- portal->dqrr_disable_ref = 0;
- portal->cb_dc_ern = NULL;
-#ifndef __rtems__
- sprintf(buf, "qportal-%d", config->public_cfg.channel);
- portal->pdev = platform_device_alloc(buf, -1);
- if (!portal->pdev)
- goto fail_devalloc;
- if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
- goto fail_devadd;
- ret = platform_device_add(portal->pdev);
- if (ret)
- goto fail_devadd;
-#endif /* __rtems__ */
- dpa_rbtree_init(&portal->retire_table);
- isdr = 0xffffffff;
- qm_isr_disable_write(__p, isdr);
- portal->irq_sources = 0;
- qm_isr_enable_write(__p, portal->irq_sources);
- qm_isr_status_clear(__p, 0xffffffff);
- snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
- if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
- portal)) {
- pr_err("request_irq() failed\n");
- goto fail_irq;
- }
-#ifndef __rtems__
- if ((config->public_cfg.cpu != -1) &&
- irq_can_set_affinity(config->public_cfg.irq) &&
- irq_set_affinity(config->public_cfg.irq,
- cpumask_of(config->public_cfg.cpu))) {
- pr_err("irq_set_affinity() failed\n");
- goto fail_affinity;
- }
-#endif /* __rtems__ */
-
- /* Need EQCR to be empty before continuing */
- isdr ^= QM_PIRQ_EQCI;
- qm_isr_disable_write(__p, isdr);
- ret = qm_eqcr_get_fill(__p);
- if (ret) {
- pr_err("EQCR unclean\n");
- goto fail_eqcr_empty;
- }
- isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
- qm_isr_disable_write(__p, isdr);
- if (qm_dqrr_current(__p) != NULL) {
- pr_err("DQRR unclean\n");
- qm_dqrr_cdc_consume_n(__p, 0xffff);
- }
- if (qm_mr_current(__p) != NULL) {
- /* special handling, drain just in case it's a few FQRNIs */
- if (drain_mr_fqrni(__p)) {
- const struct qm_mr_entry *e = qm_mr_current(__p);
-
- pr_err("MR unclean, MR VERB 0x%x, rc 0x%x\n, addr 0x%x",
- e->verb, e->ern.rc, e->ern.fd.addr_lo);
- goto fail_dqrr_mr_empty;
- }
- }
- /* Success */
- portal->config = config;
- qm_isr_disable_write(__p, 0);
- qm_isr_uninhibit(__p);
- /* Write a sane SDQCR */
- qm_dqrr_sdqcr_set(__p, portal->sdqcr);
- return portal;
-fail_dqrr_mr_empty:
-fail_eqcr_empty:
-#ifndef __rtems__
-fail_affinity:
-#endif /* __rtems__ */
- free_irq(config->public_cfg.irq, portal);
-fail_irq:
-#ifndef __rtems__
- platform_device_del(portal->pdev);
-fail_devadd:
- platform_device_put(portal->pdev);
-fail_devalloc:
-#endif /* __rtems__ */
- kfree(portal->cgrs);
-fail_cgrs:
- qm_isr_finish(__p);
-fail_isr:
- qm_mc_finish(__p);
-fail_mc:
- qm_mr_finish(__p);
-fail_mr:
- qm_dqrr_finish(__p);
-fail_dqrr:
- qm_eqcr_finish(__p);
-fail_eqcr:
- return NULL;
-}
-
-struct qman_portal *qman_create_affine_portal(
- const struct qm_portal_config *config,
- const struct qman_cgrs *cgrs)
-{
- struct qman_portal *res;
- struct qman_portal *portal;
-
- portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
- res = qman_create_portal(portal, config, cgrs);
- if (res) {
-#ifndef __rtems__
- spin_lock(&affine_mask_lock);
- cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
- affine_channels[config->public_cfg.cpu] =
- config->public_cfg.channel;
- affine_portals[config->public_cfg.cpu] = portal;
- spin_unlock(&affine_mask_lock);
-#endif /* __rtems__ */
- }
- return res;
-}
-
-#ifndef __rtems__
-/* These checks are BUG_ON()s because the driver is already supposed to avoid
- * these cases. */
-struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
- int cpu)
-{
-#ifdef FSL_DPA_PORTAL_SHARE
- struct qman_portal *p = &per_cpu(qman_affine_portal, cpu);
-
- /* Check that we don't already have our own portal */
- BUG_ON(p->config);
- /* Check that we aren't already slaving to another portal */
- BUG_ON(p->is_shared);
- /* Check that 'redirect' is prepared to have us */
- BUG_ON(!redirect->config->public_cfg.is_shared);
- /* These are the only elements to initialise when redirecting */
- p->irq_sources = 0;
- p->sharing_redirect = redirect;
- affine_portals[cpu] = p;
- return p;
-#else
- BUG();
- return NULL;
-#endif
-}
-#endif /* __rtems__ */
-
-void qman_destroy_portal(struct qman_portal *qm)
-{
- const struct qm_portal_config *pcfg;
-
- /* Stop dequeues on the portal */
- qm_dqrr_sdqcr_set(&qm->p, 0);
-
- /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
- * something related to QM_PIRQ_EQCI, this may need fixing.
- * Also, due to the prefetching model used for CI updates in the enqueue
- * path, this update will only invalidate the CI cacheline *after*
- * working on it, so we need to call this twice to ensure a full update
- * irrespective of where the enqueue processing was at when the teardown
- * began. */
- qm_eqcr_cce_update(&qm->p);
- qm_eqcr_cce_update(&qm->p);
- pcfg = qm->config;
-
- free_irq(pcfg->public_cfg.irq, qm);
-
- kfree(qm->cgrs);
- qm_isr_finish(&qm->p);
- qm_mc_finish(&qm->p);
- qm_mr_finish(&qm->p);
- qm_dqrr_finish(&qm->p);
- qm_eqcr_finish(&qm->p);
-
-#ifndef __rtems__
- platform_device_del(qm->pdev);
- platform_device_put(qm->pdev);
-#endif /* __rtems__ */
-
- qm->config = NULL;
- if (qm->alloced)
- kfree(qm);
-}
-
-const struct qm_portal_config *qman_destroy_affine_portal(void)
-{
- /* We don't want to redirect if we're a slave, use "raw" */
- struct qman_portal *qm = get_raw_affine_portal();
- const struct qm_portal_config *pcfg;
-#ifndef __rtems__
- int cpu;
-#endif /* __rtems__ */
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (qm->sharing_redirect) {
- qm->sharing_redirect = NULL;
- put_affine_portal();
- return NULL;
- }
- qm->is_shared = 0;
-#endif /* __rtems__ */
-#endif
- pcfg = qm->config;
-#ifndef __rtems__
- cpu = pcfg->public_cfg.cpu;
-#endif /* __rtems__ */
-
- qman_destroy_portal(qm);
-
-#ifndef __rtems__
- spin_lock(&affine_mask_lock);
- cpumask_clear_cpu(cpu, &affine_mask);
- spin_unlock(&affine_mask_lock);
-#endif /* __rtems__ */
- put_affine_portal();
- return pcfg;
-}
-
-const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p)
-{
- return &p->config->public_cfg;
-}
-EXPORT_SYMBOL(qman_p_get_portal_config);
-
-const struct qman_portal_config *qman_get_portal_config(void)
-{
- struct qman_portal *p = get_affine_portal();
- const struct qman_portal_config *ret = qman_p_get_portal_config(p);
-
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_get_portal_config);
-
-/* Inline helper to reduce nesting in __poll_portal_slow() */
-static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
- const struct qm_mr_entry *msg, u8 verb)
-{
- FQLOCK(fq);
- switch (verb) {
- case QM_MR_VERB_FQRL:
- DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
- fq_clear(fq, QMAN_FQ_STATE_ORL);
- table_del_fq(p, fq);
- break;
- case QM_MR_VERB_FQRN:
- DPA_ASSERT((fq->state == qman_fq_state_parked) ||
- (fq->state == qman_fq_state_sched));
- DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
- fq_clear(fq, QMAN_FQ_STATE_CHANGING);
- if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
- fq_set(fq, QMAN_FQ_STATE_NE);
- if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
- fq_set(fq, QMAN_FQ_STATE_ORL);
- else
- table_del_fq(p, fq);
- fq->state = qman_fq_state_retired;
- break;
- case QM_MR_VERB_FQPN:
- DPA_ASSERT(fq->state == qman_fq_state_sched);
- DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
- fq->state = qman_fq_state_parked;
- }
- FQUNLOCK(fq);
-}
-
-static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
-{
- const struct qm_mr_entry *msg;
-
- if (is & QM_PIRQ_CSCI) {
- struct qman_cgrs rr, c;
- struct qm_mc_result *mcr;
- struct qman_cgr *cgr;
- unsigned long irqflags __maybe_unused;
-
- spin_lock_irqsave(&p->cgr_lock, irqflags);
- /*
- * The CSCI bit must be cleared _before_ issuing the
- * Query Congestion State command, to ensure that a long
- * CGR State Change callback cannot miss an intervening
- * state change.
- */
- qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
- qm_mc_start(&p->p);
- qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- /* mask out the ones I'm not interested in */
- qman_cgrs_and(&rr, (const struct qman_cgrs *)
- &mcr->querycongestion.state, &p->cgrs[0]);
- /* check previous snapshot for delta, enter/exit congestion */
- qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
- /* update snapshot */
- qman_cgrs_cp(&p->cgrs[1], &rr);
- /* Invoke callback */
- list_for_each_entry(cgr, &p->cgr_cbs, node)
- if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
- cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
- }
-
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (is & QM_PIRQ_EQCI) {
- unsigned long irqflags;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- p->eqci_owned = NULL;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- wake_up(&affine_queue);
- }
-#endif
-
- if (is & QM_PIRQ_EQRI) {
- unsigned long irqflags __maybe_unused;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- qm_eqcr_cce_update(&p->p);
- qm_eqcr_set_ithresh(&p->p, 0);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- wake_up(&affine_queue);
- }
-
- if (is & QM_PIRQ_MRI) {
- struct qman_fq *fq;
- u8 verb, num = 0;
-mr_loop:
- qm_mr_pvb_update(&p->p);
- msg = qm_mr_current(&p->p);
- if (!msg)
- goto mr_done;
- verb = msg->verb & QM_MR_VERB_TYPE_MASK;
- /* The message is a software ERN iff the 0x20 bit is set */
- if (verb & 0x20) {
- switch (verb) {
- case QM_MR_VERB_FQRNI:
- /* nada, we drop FQRNIs on the floor */
- break;
- case QM_MR_VERB_FQRN:
- case QM_MR_VERB_FQRL:
- /* Lookup in the retirement table */
- fq = table_find_fq(p, msg->fq.fqid);
- BUG_ON(!fq);
- fq_state_change(p, fq, msg, verb);
- if (fq->cb.fqs)
- fq->cb.fqs(p, fq, msg);
- break;
- case QM_MR_VERB_FQPN:
- /* Parked */
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- fq = get_fq_table_entry(msg->fq.contextB);
-#else
- fq = (void *)(uintptr_t)msg->fq.contextB;
-#endif
- fq_state_change(p, fq, msg, verb);
- if (fq->cb.fqs)
- fq->cb.fqs(p, fq, msg);
- break;
- case QM_MR_VERB_DC_ERN:
- /* DCP ERN */
- if (p->cb_dc_ern)
- p->cb_dc_ern(p, msg);
- else if (cb_dc_ern)
- cb_dc_ern(p, msg);
- else
- pr_crit_once("Leaking DCP ERNs!\n");
- break;
- default:
- pr_crit("Invalid MR verb 0x%02x\n", verb);
- }
- } else {
- /* Its a software ERN */
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- fq = get_fq_table_entry(msg->ern.tag);
-#else
- fq = (void *)(uintptr_t)msg->ern.tag;
-#endif
- fq->cb.ern(p, fq, msg);
- }
- num++;
- qm_mr_next(&p->p);
- goto mr_loop;
-mr_done:
- qm_mr_cci_consume(&p->p, num);
- }
- /*
- * QM_PIRQ_CSCI has already been cleared, as part of its specific
- * processing. If that interrupt source has meanwhile been re-asserted,
- * we mustn't clear it here (or in the top-level interrupt handler).
- */
- return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
-}
-
-/* remove some slowish-path stuff from the "fast path" and make sure it isn't
- * inlined. */
-static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
-{
- p->vdqcr_owned = NULL;
- FQLOCK(fq);
- fq_clear(fq, QMAN_FQ_STATE_VDQCR);
- FQUNLOCK(fq);
- wake_up(&affine_queue);
-}
-
-/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states
- * that would conflict with other things if they ran at the same time on the
- * same cpu are;
- *
- * (i) setting/clearing vdqcr_owned, and
- * (ii) clearing the NE (Not Empty) flag.
- *
- * Both are safe. Because;
- *
- * (i) this clearing can only occur after qman_volatile_dequeue() has set the
- * vdqcr_owned field (which it does before setting VDQCR), and
- * qman_volatile_dequeue() blocks interrupts and preemption while this is
- * done so that we can't interfere.
- * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
- * with (i) that API prevents us from interfering until it's safe.
- *
- * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
- * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
- * advantage comes from this function not having to "lock" anything at all.
- *
- * Note also that the callbacks are invoked at points which are safe against the
- * above potential conflicts, but that this function itself is not re-entrant
- * (this is because the function tracks one end of each FIFO in the portal and
- * we do *not* want to lock that). So the consequence is that it is safe for
- * user callbacks to call into any QMan API *except* qman_poll() (as that's the
- * sole API that could be invoking the callback through this function).
- */
-static inline unsigned int __poll_portal_fast(struct qman_portal *p,
- unsigned int poll_limit)
-{
- const struct qm_dqrr_entry *dq;
- struct qman_fq *fq;
- enum qman_cb_dqrr_result res;
- unsigned int limit = 0;
-
-loop:
- qm_dqrr_pvb_update(&p->p);
- dq = qm_dqrr_current(&p->p);
- if (!dq)
- goto done;
- if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
- /* VDQCR: don't trust contextB as the FQ may have been
- * configured for h/w consumption and we're draining it
- * post-retirement. */
- fq = p->vdqcr_owned;
- /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
- * to check for clearing it when doing volatile dequeues. It's
- * one less thing to check in the critical path (SDQCR). */
- if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
- fq_clear(fq, QMAN_FQ_STATE_NE);
- /* this is duplicated from the SDQCR code, but we have stuff to
- * do before *and* after this callback, and we don't want
- * multiple if()s in the critical path (SDQCR). */
- res = fq->cb.dqrr(p, fq, dq);
- if (res == qman_cb_dqrr_stop)
- goto done;
- /* Check for VDQCR completion */
- if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
- clear_vdqcr(p, fq);
- } else {
- /* SDQCR: contextB points to the FQ */
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- fq = get_fq_table_entry(dq->contextB);
-#else
- fq = (void *)(uintptr_t)dq->contextB;
-#endif
- /* Now let the callback do its stuff */
- res = fq->cb.dqrr(p, fq, dq);
- /* The callback can request that we exit without consuming this
- * entry nor advancing; */
- if (res == qman_cb_dqrr_stop)
- goto done;
- }
- /* Interpret 'dq' from a driver perspective. */
- /* Parking isn't possible unless HELDACTIVE was set. NB,
- * FORCEELIGIBLE implies HELDACTIVE, so we only need to
- * check for HELDACTIVE to cover both. */
- DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
- (res != qman_cb_dqrr_park));
- /* Defer just means "skip it, I'll consume it myself later on" */
- if (res != qman_cb_dqrr_defer)
- qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park));
- /* Move forward */
- qm_dqrr_next(&p->p);
- /* Entry processed and consumed, increment our counter. The callback can
- * request that we exit after consuming the entry, and we also exit if
- * we reach our processing limit, so loop back only if neither of these
- * conditions is met. */
- if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop))
- goto loop;
-done:
- return limit;
-}
-
-u32 qman_irqsource_get(void)
-{
- /* "irqsource" and "poll" APIs mustn't redirect when sharing, they
- * should shut the user out if they are not the primary CPU hosting the
- * portal. That's why we use the "raw" interface. */
- struct qman_portal *p = get_raw_affine_portal();
- u32 ret = p->irq_sources & QM_PIRQ_VISIBLE;
-
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_irqsource_get);
-
-int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused)
-{
- __maybe_unused unsigned long irqflags;
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (p->sharing_redirect)
- return -EINVAL;
-#endif /* __rtems__ */
-#endif
- PORTAL_IRQ_LOCK(p, irqflags);
- set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
- qm_isr_enable_write(&p->p, p->irq_sources);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- return 0;
-}
-EXPORT_SYMBOL(qman_p_irqsource_add);
-
-int qman_irqsource_add(u32 bits __maybe_unused)
-{
- struct qman_portal *p = get_raw_affine_portal();
- int ret;
-
- ret = qman_p_irqsource_add(p, bits);
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_irqsource_add);
-
-int qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
-{
- __maybe_unused unsigned long irqflags;
- u32 ier;
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (p->sharing_redirect) {
- put_affine_portal();
- return -EINVAL;
- }
-#endif /* __rtems__ */
-#endif
- /* Our interrupt handler only processes+clears status register bits that
- * are in p->irq_sources. As we're trimming that mask, if one of them
- * were to assert in the status register just before we remove it from
- * the enable register, there would be an interrupt-storm when we
- * release the IRQ lock. So we wait for the enable register update to
- * take effect in h/w (by reading it back) and then clear all other bits
- * in the status register. Ie. we clear them from ISR once it's certain
- * IER won't allow them to reassert. */
- PORTAL_IRQ_LOCK(p, irqflags);
- bits &= QM_PIRQ_VISIBLE;
- clear_bits(bits, &p->irq_sources);
- qm_isr_enable_write(&p->p, p->irq_sources);
- ier = qm_isr_enable_read(&p->p);
- /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
- * data-dependency, ie. to protect against re-ordering. */
- qm_isr_status_clear(&p->p, ~ier);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- return 0;
-}
-EXPORT_SYMBOL(qman_p_irqsource_remove);
-
-int qman_irqsource_remove(u32 bits)
-{
- struct qman_portal *p = get_raw_affine_portal();
- int ret;
-
- ret = qman_p_irqsource_remove(p, bits);
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_irqsource_remove);
-
-#ifndef __rtems__
-const cpumask_t *qman_affine_cpus(void)
-{
- return &affine_mask;
-}
-EXPORT_SYMBOL(qman_affine_cpus);
-
-u16 qman_affine_channel(int cpu)
-{
- if (cpu < 0) {
- struct qman_portal *portal = get_raw_affine_portal();
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- BUG_ON(portal->sharing_redirect);
-#endif /* __rtems__ */
-#endif
- cpu = portal->config->public_cfg.cpu;
- put_affine_portal();
- }
- BUG_ON(!cpumask_test_cpu(cpu, &affine_mask));
- return affine_channels[cpu];
-}
-EXPORT_SYMBOL(qman_affine_channel);
-#endif /* __rtems__ */
-
-void *qman_get_affine_portal(int cpu)
-{
-#ifndef __rtems__
- return affine_portals[cpu];
-#else /* __rtems__ */
- return &per_cpu(qman_affine_portal, cpu);
-#endif /* __rtems__ */
-}
-EXPORT_SYMBOL(qman_get_affine_portal);
-
-int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
-{
- int ret;
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (unlikely(p->sharing_redirect))
- ret = -EINVAL;
- else
-#endif /* __rtems__ */
-#endif
- {
- BUG_ON(p->irq_sources & QM_PIRQ_DQRI);
- ret = __poll_portal_fast(p, limit);
- }
- return ret;
-}
-EXPORT_SYMBOL(qman_p_poll_dqrr);
-
-int qman_poll_dqrr(unsigned int limit)
-{
- struct qman_portal *p = get_poll_portal();
- int ret;
-
- ret = qman_p_poll_dqrr(p, limit);
- put_poll_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_poll_dqrr);
-
-u32 qman_p_poll_slow(struct qman_portal *p)
-{
- u32 ret;
-
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (unlikely(p->sharing_redirect))
- ret = (u32)-1;
- else
-#endif /* __rtems__ */
-#endif
- {
- u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
-
- ret = __poll_portal_slow(p, is);
- qm_isr_status_clear(&p->p, ret);
- }
- return ret;
-}
-EXPORT_SYMBOL(qman_p_poll_slow);
-
-u32 qman_poll_slow(void)
-{
- struct qman_portal *p = get_poll_portal();
- u32 ret;
-
- ret = qman_p_poll_slow(p);
- put_poll_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_poll_slow);
-
-/* Legacy wrapper */
-void qman_p_poll(struct qman_portal *p)
-{
-#ifdef FSL_DPA_PORTAL_SHARE
-#ifndef __rtems__
- if (unlikely(p->sharing_redirect))
- return;
-#endif /* __rtems__ */
-#endif
- if ((~p->irq_sources) & QM_PIRQ_SLOW) {
- if (!(p->slowpoll--)) {
- u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
- u32 active = __poll_portal_slow(p, is);
-
- if (active) {
- qm_isr_status_clear(&p->p, active);
- p->slowpoll = SLOW_POLL_BUSY;
- } else
- p->slowpoll = SLOW_POLL_IDLE;
- }
- }
- if ((~p->irq_sources) & QM_PIRQ_DQRI)
- __poll_portal_fast(p, QMAN_POLL_LIMIT);
-}
-EXPORT_SYMBOL(qman_p_poll);
-
-void qman_poll(void)
-{
- struct qman_portal *p = get_poll_portal();
-
- qman_p_poll(p);
- put_poll_portal();
-}
-EXPORT_SYMBOL(qman_poll);
-
-void qman_p_stop_dequeues(struct qman_portal *p)
-{
- qman_stop_dequeues_ex(p);
-}
-EXPORT_SYMBOL(qman_p_stop_dequeues);
-
-void qman_stop_dequeues(void)
-{
- struct qman_portal *p = get_affine_portal();
-
- qman_p_stop_dequeues(p);
- put_affine_portal();
-}
-EXPORT_SYMBOL(qman_stop_dequeues);
-
-void qman_p_start_dequeues(struct qman_portal *p)
-{
- unsigned long irqflags __maybe_unused;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- DPA_ASSERT(p->dqrr_disable_ref > 0);
- if (!(--p->dqrr_disable_ref))
- qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
- PORTAL_IRQ_UNLOCK(p, irqflags);
-}
-EXPORT_SYMBOL(qman_p_start_dequeues);
-
-void qman_start_dequeues(void)
-{
- struct qman_portal *p = get_affine_portal();
-
- qman_p_start_dequeues(p);
- put_affine_portal();
-}
-EXPORT_SYMBOL(qman_start_dequeues);
-
-void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
-{
- unsigned long irqflags __maybe_unused;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- pools &= p->config->public_cfg.pools;
- p->sdqcr |= pools;
- qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
- PORTAL_IRQ_UNLOCK(p, irqflags);
-}
-EXPORT_SYMBOL(qman_p_static_dequeue_add);
-
-void qman_static_dequeue_add(u32 pools)
-{
- struct qman_portal *p = get_affine_portal();
-
- qman_p_static_dequeue_add(p, pools);
- put_affine_portal();
-}
-EXPORT_SYMBOL(qman_static_dequeue_add);
-
-void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools)
-{
- unsigned long irqflags __maybe_unused;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- pools &= p->config->public_cfg.pools;
- p->sdqcr &= ~pools;
- qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
- PORTAL_IRQ_UNLOCK(p, irqflags);
-}
-EXPORT_SYMBOL(qman_p_static_dequeue_del);
-
-void qman_static_dequeue_del(u32 pools)
-{
- struct qman_portal *p = get_affine_portal();
-
- qman_p_static_dequeue_del(p, pools);
- put_affine_portal();
-}
-EXPORT_SYMBOL(qman_static_dequeue_del);
-
-u32 qman_p_static_dequeue_get(struct qman_portal *p)
-{
- return p->sdqcr;
-}
-EXPORT_SYMBOL(qman_p_static_dequeue_get);
-
-u32 qman_static_dequeue_get(void)
-{
- struct qman_portal *p = get_affine_portal();
- u32 ret = qman_p_static_dequeue_get(p);
-
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_static_dequeue_get);
-
-void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
- int park_request)
-{
- qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
-}
-EXPORT_SYMBOL(qman_p_dca);
-
-void qman_dca(struct qm_dqrr_entry *dq, int park_request)
-{
- struct qman_portal *p = get_affine_portal();
-
- qman_p_dca(p, dq, park_request);
- put_affine_portal();
-}
-EXPORT_SYMBOL(qman_dca);
-
-/* Frame queue API */
-
-static const char *mcr_result_str(u8 result)
-{
- switch (result) {
- case QM_MCR_RESULT_NULL:
- return "QM_MCR_RESULT_NULL";
- case QM_MCR_RESULT_OK:
- return "QM_MCR_RESULT_OK";
- case QM_MCR_RESULT_ERR_FQID:
- return "QM_MCR_RESULT_ERR_FQID";
- case QM_MCR_RESULT_ERR_FQSTATE:
- return "QM_MCR_RESULT_ERR_FQSTATE";
- case QM_MCR_RESULT_ERR_NOTEMPTY:
- return "QM_MCR_RESULT_ERR_NOTEMPTY";
- case QM_MCR_RESULT_PENDING:
- return "QM_MCR_RESULT_PENDING";
- case QM_MCR_RESULT_ERR_BADCOMMAND:
- return "QM_MCR_RESULT_ERR_BADCOMMAND";
- }
- return "<unknown MCR result>";
-}
-
-int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
-{
- struct qm_fqd fqd;
- struct qm_mcr_queryfq_np np;
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p;
- unsigned long irqflags __maybe_unused;
-
- if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
- int ret = qman_alloc_fqid(&fqid);
-
- if (ret)
- return ret;
- }
- spin_lock_init(&fq->fqlock);
- fq->fqid = fqid;
- fq->flags = flags;
- fq->state = qman_fq_state_oos;
- fq->cgr_groupid = 0;
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
- return -ENOMEM;
-#endif
- if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
- return 0;
- /* Everything else is AS_IS support */
- p = get_affine_portal();
- PORTAL_IRQ_LOCK(p, irqflags);
- mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fqid;
- qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
- if (mcr->result != QM_MCR_RESULT_OK) {
- pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
- goto err;
- }
- fqd = mcr->queryfq.fqd;
- mcc = qm_mc_start(&p->p);
- mcc->queryfq_np.fqid = fqid;
- qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
- if (mcr->result != QM_MCR_RESULT_OK) {
- pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
- goto err;
- }
- np = mcr->queryfq_np;
- /* Phew, have queryfq and queryfq_np results, stitch together
- * the FQ object from those. */
- fq->cgr_groupid = fqd.cgid;
- switch (np.state & QM_MCR_NP_STATE_MASK) {
- case QM_MCR_NP_STATE_OOS:
- break;
- case QM_MCR_NP_STATE_RETIRED:
- fq->state = qman_fq_state_retired;
- if (np.frm_cnt)
- fq_set(fq, QMAN_FQ_STATE_NE);
- break;
- case QM_MCR_NP_STATE_TEN_SCHED:
- case QM_MCR_NP_STATE_TRU_SCHED:
- case QM_MCR_NP_STATE_ACTIVE:
- fq->state = qman_fq_state_sched;
- if (np.state & QM_MCR_NP_STATE_R)
- fq_set(fq, QMAN_FQ_STATE_CHANGING);
- break;
- case QM_MCR_NP_STATE_PARKED:
- fq->state = qman_fq_state_parked;
- break;
- default:
- DPA_ASSERT(NULL == "invalid FQ state");
- }
- if (fqd.fq_ctrl & QM_FQCTRL_CGE)
- fq->state |= QMAN_FQ_STATE_CGR_EN;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return 0;
-err:
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
- qman_release_fqid(fqid);
- return -EIO;
-}
-EXPORT_SYMBOL(qman_create_fq);
-
-void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
-{
-
- /* We don't need to lock the FQ as it is a pre-condition that the FQ be
- * quiesced. Instead, run some checks. */
- switch (fq->state) {
- case qman_fq_state_parked:
- DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
- case qman_fq_state_oos:
- if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
- qman_release_fqid(fq->fqid);
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- clear_fq_table_entry(fq->key);
-#endif
- return;
- default:
- break;
- }
- DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
-}
-EXPORT_SYMBOL(qman_destroy_fq);
-
-u32 qman_fq_fqid(struct qman_fq *fq)
-{
- return fq->fqid;
-}
-EXPORT_SYMBOL(qman_fq_fqid);
-
-void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
-{
- if (state)
- *state = fq->state;
- if (flags)
- *flags = fq->flags;
-}
-EXPORT_SYMBOL(qman_fq_state);
-
-int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p;
- unsigned long irqflags __maybe_unused;
- u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
- QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
-
- if ((fq->state != qman_fq_state_oos) &&
- (fq->state != qman_fq_state_parked))
- return -EINVAL;
-#ifdef CONFIG_FSL_DPA_CHECKING
- if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
- return -EINVAL;
-#endif
- if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
- /* And can't be set at the same time as TDTHRESH */
- if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
- return -EINVAL;
- }
- /* Issue an INITFQ_[PARKED|SCHED] management command */
- p = get_affine_portal();
- PORTAL_IRQ_LOCK(p, irqflags);
- FQLOCK(fq);
- if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
- ((fq->state != qman_fq_state_oos) &&
- (fq->state != qman_fq_state_parked)))) {
- FQUNLOCK(fq);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return -EBUSY;
- }
- mcc = qm_mc_start(&p->p);
- if (opts)
- mcc->initfq = *opts;
- mcc->initfq.fqid = fq->fqid;
- mcc->initfq.count = 0;
- /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
- * demux pointer. Otherwise, the caller-provided value is allowed to
- * stand, don't overwrite it. */
- if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
- dma_addr_t phys_fq;
-
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- mcc->initfq.fqd.context_b = fq->key;
-#else
- mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
-#endif
- /* and the physical address - NB, if the user wasn't trying to
- * set CONTEXTA, clear the stashing settings. */
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- memset(&mcc->initfq.fqd.context_a, 0,
- sizeof(mcc->initfq.fqd.context_a));
- } else {
-#ifndef __rtems__
- phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
- DMA_TO_DEVICE);
-#else /* __rtems__ */
- phys_fq = (dma_addr_t)fq;
-#endif /* __rtems__ */
- qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
- }
- }
- if (flags & QMAN_INITFQ_FLAG_LOCAL) {
- mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel;
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
- mcc->initfq.fqd.dest.wq = 4;
- }
- }
- qm_mc_commit(&p->p, myverb);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
- res = mcr->result;
- if (res != QM_MCR_RESULT_OK) {
- FQUNLOCK(fq);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return -EIO;
- }
- if (opts) {
- if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
- if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
- fq_set(fq, QMAN_FQ_STATE_CGR_EN);
- else
- fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
- }
- if (opts->we_mask & QM_INITFQ_WE_CGID)
- fq->cgr_groupid = opts->fqd.cgid;
- }
- fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
- qman_fq_state_sched : qman_fq_state_parked;
- FQUNLOCK(fq);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return 0;
-}
-EXPORT_SYMBOL(qman_init_fq);
-
-int qman_schedule_fq(struct qman_fq *fq)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p;
- unsigned long irqflags __maybe_unused;
- int ret = 0;
- u8 res;
-
- if (fq->state != qman_fq_state_parked)
- return -EINVAL;
-#ifdef CONFIG_FSL_DPA_CHECKING
- if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
- return -EINVAL;
-#endif
- /* Issue a ALTERFQ_SCHED management command */
- p = get_affine_portal();
- PORTAL_IRQ_LOCK(p, irqflags);
- FQLOCK(fq);
- if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
- (fq->state != qman_fq_state_parked))) {
- ret = -EBUSY;
- goto out;
- }
- mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
- qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
- res = mcr->result;
- if (res != QM_MCR_RESULT_OK) {
- ret = -EIO;
- goto out;
- }
- fq->state = qman_fq_state_sched;
-out:
- FQUNLOCK(fq);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_schedule_fq);
-
-int qman_retire_fq(struct qman_fq *fq, u32 *flags)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p;
- unsigned long irqflags __maybe_unused;
- int rval;
- u8 res;
-
- if ((fq->state != qman_fq_state_parked) &&
- (fq->state != qman_fq_state_sched))
- return -EINVAL;
-#ifdef CONFIG_FSL_DPA_CHECKING
- if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
- return -EINVAL;
-#endif
- p = get_affine_portal();
- PORTAL_IRQ_LOCK(p, irqflags);
- FQLOCK(fq);
- if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
- (fq->state == qman_fq_state_retired) ||
- (fq->state == qman_fq_state_oos))) {
- rval = -EBUSY;
- goto out;
- }
- rval = table_push_fq(p, fq);
- if (rval)
- goto out;
- mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
- qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
- res = mcr->result;
- /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
- * and defer the flags until FQRNI or FQRN (respectively) show up. But
- * "Friendly" is to process OK immediately, and not set CHANGING. We do
- * friendly, otherwise the caller doesn't necessarily have a fully
- * "retired" FQ on return even if the retirement was immediate. However
- * this does mean some code duplication between here and
- * fq_state_change(). */
- if (likely(res == QM_MCR_RESULT_OK)) {
- rval = 0;
- /* Process 'fq' right away, we'll ignore FQRNI */
- if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
- fq_set(fq, QMAN_FQ_STATE_NE);
- if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
- fq_set(fq, QMAN_FQ_STATE_ORL);
- else
- table_del_fq(p, fq);
- if (flags)
- *flags = fq->flags;
- fq->state = qman_fq_state_retired;
- if (fq->cb.fqs) {
- /* Another issue with supporting "immediate" retirement
- * is that we're forced to drop FQRNIs, because by the
- * time they're seen it may already be "too late" (the
- * fq may have been OOS'd and free()'d already). But if
- * the upper layer wants a callback whether it's
- * immediate or not, we have to fake a "MR" entry to
- * look like an FQRNI... */
- struct qm_mr_entry msg;
-
- msg.verb = QM_MR_VERB_FQRNI;
- msg.fq.fqs = mcr->alterfq.fqs;
- msg.fq.fqid = fq->fqid;
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- msg.fq.contextB = fq->key;
-#else
- msg.fq.contextB = (u32)(uintptr_t)fq;
-#endif
- fq->cb.fqs(p, fq, &msg);
- }
- } else if (res == QM_MCR_RESULT_PENDING) {
- rval = 1;
- fq_set(fq, QMAN_FQ_STATE_CHANGING);
- } else {
- rval = -EIO;
- table_del_fq(p, fq);
- }
-out:
- FQUNLOCK(fq);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return rval;
-}
-EXPORT_SYMBOL(qman_retire_fq);
-
-int qman_oos_fq(struct qman_fq *fq)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p;
- unsigned long irqflags __maybe_unused;
- int ret = 0;
- u8 res;
-
- if (fq->state != qman_fq_state_retired)
- return -EINVAL;
-#ifdef CONFIG_FSL_DPA_CHECKING
- if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
- return -EINVAL;
-#endif
- p = get_affine_portal();
- PORTAL_IRQ_LOCK(p, irqflags);
- FQLOCK(fq);
- if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
- (fq->state != qman_fq_state_retired))) {
- ret = -EBUSY;
- goto out;
- }
- mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
- qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
- res = mcr->result;
- if (res != QM_MCR_RESULT_OK) {
- ret = -EIO;
- goto out;
- }
- fq->state = qman_fq_state_oos;
-out:
- FQUNLOCK(fq);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_oos_fq);
-
-int qman_fq_flow_control(struct qman_fq *fq, int xon)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p;
- unsigned long irqflags __maybe_unused;
- int ret = 0;
- u8 res;
- u8 myverb;
-
- if ((fq->state == qman_fq_state_oos) ||
- (fq->state == qman_fq_state_retired) ||
- (fq->state == qman_fq_state_parked))
- return -EINVAL;
-
-#ifdef CONFIG_FSL_DPA_CHECKING
- if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
- return -EINVAL;
-#endif
- /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
- p = get_affine_portal();
- PORTAL_IRQ_LOCK(p, irqflags);
- FQLOCK(fq);
- if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
- (fq->state == qman_fq_state_parked) ||
- (fq->state == qman_fq_state_oos) ||
- (fq->state == qman_fq_state_retired))) {
- ret = -EBUSY;
- goto out;
- }
- mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
- mcc->alterfq.count = 0;
- myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
-
- qm_mc_commit(&p->p, myverb);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
-
- res = mcr->result;
- if (res != QM_MCR_RESULT_OK) {
- ret = -EIO;
- goto out;
- }
-out:
- FQUNLOCK(fq);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_fq_flow_control);
-
-int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p = get_affine_portal();
- unsigned long irqflags __maybe_unused;
- u8 res;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fq->fqid;
- qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
- res = mcr->result;
- if (res == QM_MCR_RESULT_OK)
- *fqd = mcr->queryfq.fqd;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- if (res != QM_MCR_RESULT_OK)
- return -EIO;
- return 0;
-}
-EXPORT_SYMBOL(qman_query_fq);
-
-int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p = get_affine_portal();
- unsigned long irqflags __maybe_unused;
- u8 res;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fq->fqid;
- qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
- res = mcr->result;
- if (res == QM_MCR_RESULT_OK)
- *np = mcr->queryfq_np;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- if (res == QM_MCR_RESULT_ERR_FQID)
- return -ERANGE;
- else if (res != QM_MCR_RESULT_OK)
- return -EIO;
- return 0;
-}
-EXPORT_SYMBOL(qman_query_fq_np);
-
-int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p = get_affine_portal();
- unsigned long irqflags __maybe_unused;
- u8 res, myverb;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
- QM_MCR_VERB_QUERYWQ;
- mcc = qm_mc_start(&p->p);
- mcc->querywq.channel.id = wq->channel.id;
- qm_mc_commit(&p->p, myverb);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
- res = mcr->result;
- if (res == QM_MCR_RESULT_OK)
- *wq = mcr->querywq;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- if (res != QM_MCR_RESULT_OK) {
- pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
- return -EIO;
- }
- return 0;
-}
-EXPORT_SYMBOL(qman_query_wq);
-
-int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
- struct qm_mcr_cgrtestwrite *result)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p = get_affine_portal();
- unsigned long irqflags __maybe_unused;
- u8 res;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- mcc = qm_mc_start(&p->p);
- mcc->cgrtestwrite.cgid = cgr->cgrid;
- mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
- mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
- qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
- res = mcr->result;
- if (res == QM_MCR_RESULT_OK)
- *result = mcr->cgrtestwrite;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- if (res != QM_MCR_RESULT_OK) {
- pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
- return -EIO;
- }
- return 0;
-}
-EXPORT_SYMBOL(qman_testwrite_cgr);
-
-int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p = get_affine_portal();
- unsigned long irqflags __maybe_unused;
- u8 res;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- mcc = qm_mc_start(&p->p);
- mcc->querycgr.cgid = cgr->cgrid;
- qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
- res = mcr->result;
- if (res == QM_MCR_RESULT_OK)
- *cgrd = mcr->querycgr;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- if (res != QM_MCR_RESULT_OK) {
- pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
- return -EIO;
- }
- return 0;
-}
-EXPORT_SYMBOL(qman_query_cgr);
-
-int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
-{
- struct qm_mc_result *mcr;
- struct qman_portal *p = get_affine_portal();
- unsigned long irqflags __maybe_unused;
- u8 res;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- qm_mc_start(&p->p);
- qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
- QM_MCC_VERB_QUERYCONGESTION);
- res = mcr->result;
- if (res == QM_MCR_RESULT_OK)
- *congestion = mcr->querycongestion;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- if (res != QM_MCR_RESULT_OK) {
- pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
- return -EIO;
- }
- return 0;
-}
-EXPORT_SYMBOL(qman_query_congestion);
-
-/* internal function used as a wait_event() expression */
-static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
-{
- unsigned long irqflags __maybe_unused;
- int ret = -EBUSY;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- if (!p->vdqcr_owned) {
- FQLOCK(fq);
- if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
- goto escape;
- fq_set(fq, QMAN_FQ_STATE_VDQCR);
- FQUNLOCK(fq);
- p->vdqcr_owned = fq;
- ret = 0;
- }
-escape:
- PORTAL_IRQ_UNLOCK(p, irqflags);
- if (!ret)
- qm_dqrr_vdqcr_set(&p->p, vdqcr);
- return ret;
-}
-
-static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
-{
- int ret;
-
- *p = get_affine_portal();
- ret = set_p_vdqcr(*p, fq, vdqcr);
- put_affine_portal();
- return ret;
-}
-
-#ifdef FSL_DPA_CAN_WAIT
-static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq,
- u32 vdqcr, u32 flags)
-{
- int ret = 0;
-
-#ifndef __rtems__
- if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
- ret = wait_event_interruptible(affine_queue,
- !(ret = set_p_vdqcr(p, fq, vdqcr)));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr)));
- return ret;
-}
-
-static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
- u32 vdqcr, u32 flags)
-{
- int ret = 0;
-
-#ifndef __rtems__
- if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
- ret = wait_event_interruptible(affine_queue,
- !(ret = set_vdqcr(p, fq, vdqcr)));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr)));
- return ret;
-}
-#endif
-
-int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
- u32 flags __maybe_unused, u32 vdqcr)
-{
- int ret;
-
- if ((fq->state != qman_fq_state_parked) &&
- (fq->state != qman_fq_state_retired))
- return -EINVAL;
- if (vdqcr & QM_VDQCR_FQID_MASK)
- return -EINVAL;
- if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
- return -EBUSY;
- vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & QMAN_VOLATILE_FLAG_WAIT)
- ret = wait_p_vdqcr_start(p, fq, vdqcr, flags);
- else
-#endif
- ret = set_p_vdqcr(p, fq, vdqcr);
- if (ret)
- return ret;
- /* VDQCR is set */
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & QMAN_VOLATILE_FLAG_FINISH) {
-#ifndef __rtems__
- if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
- /* NB: don't propagate any error - the caller wouldn't
- * know whether the VDQCR was issued or not. A signal
- * could arrive after returning anyway, so the caller
- * can check signal_pending() if that's an issue. */
- wait_event_interruptible(affine_queue,
- !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue,
- !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
- }
-#endif
- return 0;
-}
-EXPORT_SYMBOL(qman_p_volatile_dequeue);
-
-int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
- u32 vdqcr)
-{
- struct qman_portal *p;
- int ret;
-
- if ((fq->state != qman_fq_state_parked) &&
- (fq->state != qman_fq_state_retired))
- return -EINVAL;
- if (vdqcr & QM_VDQCR_FQID_MASK)
- return -EINVAL;
- if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
- return -EBUSY;
- vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & QMAN_VOLATILE_FLAG_WAIT)
- ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
- else
-#endif
- ret = set_vdqcr(&p, fq, vdqcr);
- if (ret)
- return ret;
- /* VDQCR is set */
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & QMAN_VOLATILE_FLAG_FINISH) {
-#ifndef __rtems__
- if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
- /* NB: don't propagate any error - the caller wouldn't
- * know whether the VDQCR was issued or not. A signal
- * could arrive after returning anyway, so the caller
- * can check signal_pending() if that's an issue. */
- wait_event_interruptible(affine_queue,
- !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue,
- !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
- }
-#endif
- return 0;
-}
-EXPORT_SYMBOL(qman_volatile_dequeue);
-
-static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
-{
- if (avail)
- qm_eqcr_cce_prefetch(&p->p);
- else
- qm_eqcr_cce_update(&p->p);
-}
-
-int qman_eqcr_is_empty(void)
-{
- unsigned long irqflags __maybe_unused;
- struct qman_portal *p = get_affine_portal();
- u8 avail;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- update_eqcr_ci(p, 0);
- avail = qm_eqcr_get_fill(&p->p);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return avail == 0;
-}
-EXPORT_SYMBOL(qman_eqcr_is_empty);
-
-void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
-{
- if (affine) {
- unsigned long irqflags __maybe_unused;
- struct qman_portal *p = get_affine_portal();
-
- PORTAL_IRQ_LOCK(p, irqflags);
- p->cb_dc_ern = handler;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- } else
- cb_dc_ern = handler;
-}
-EXPORT_SYMBOL(qman_set_dc_ern);
-
-static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
- unsigned long *irqflags __maybe_unused,
- struct qman_fq *fq,
- const struct qm_fd *fd,
- u32 flags)
-{
- struct qm_eqcr_entry *eq;
- u8 avail;
-
- PORTAL_IRQ_LOCK(p, (*irqflags));
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
- (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
- if (p->eqci_owned) {
- PORTAL_IRQ_UNLOCK(p, (*irqflags));
- return NULL;
- }
- p->eqci_owned = fq;
- }
-#endif
- if (p->use_eqcr_ci_stashing) {
- /*
- * The stashing case is easy, only update if we need to in
- * order to try and liberate ring entries.
- */
- eq = qm_eqcr_start_stash(&p->p);
- } else {
- /*
- * The non-stashing case is harder, need to prefetch ahead of
- * time.
- */
- avail = qm_eqcr_get_avail(&p->p);
- if (avail < 2)
- update_eqcr_ci(p, avail);
- eq = qm_eqcr_start_no_stash(&p->p);
- }
-
- if (unlikely(!eq)) {
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
- (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC)))
- p->eqci_owned = NULL;
-#endif
- PORTAL_IRQ_UNLOCK(p, (*irqflags));
- return NULL;
- }
- if (flags & QMAN_ENQUEUE_FLAG_DCA)
- eq->dca = QM_EQCR_DCA_ENABLE |
- ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
- QM_EQCR_DCA_PARK : 0) |
- ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
- eq->fqid = fq->fqid;
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- eq->tag = fq->key;
-#else
- eq->tag = (u32)(uintptr_t)fq;
-#endif
- eq->fd = *fd;
- return eq;
-}
-
-static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
- unsigned long *irqflags __maybe_unused,
- struct qman_fq *fq,
- const struct qm_fd *fd,
- u32 flags)
-{
- struct qm_eqcr_entry *eq;
-
- *p = get_affine_portal();
- eq = try_p_eq_start(*p, irqflags, fq, fd, flags);
- if (!eq)
- put_affine_portal();
- return eq;
-}
-
-#ifdef FSL_DPA_CAN_WAIT
-static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p,
- unsigned long *irqflags __maybe_unused,
- struct qman_fq *fq,
- const struct qm_fd *fd,
- u32 flags)
-{
- struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags);
-
- if (!eq)
- qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH);
- return eq;
-}
-static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p,
- unsigned long *irqflags __maybe_unused,
- struct qman_fq *fq,
- const struct qm_fd *fd,
- u32 flags)
-{
- struct qm_eqcr_entry *eq;
-
-#ifndef __rtems__
- if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
- wait_event_interruptible(affine_queue,
- (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue,
- (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
- return eq;
-}
-static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p,
- unsigned long *irqflags __maybe_unused,
- struct qman_fq *fq,
- const struct qm_fd *fd,
- u32 flags)
-{
- struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags);
-
- if (!eq)
- qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH);
- return eq;
-}
-static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p,
- unsigned long *irqflags __maybe_unused,
- struct qman_fq *fq,
- const struct qm_fd *fd,
- u32 flags)
-{
- struct qm_eqcr_entry *eq;
-
-#ifndef __rtems__
- if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
- wait_event_interruptible(affine_queue,
- (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue,
- (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
- return eq;
-}
-#endif
-
-int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
- const struct qm_fd *fd, u32 flags)
-{
- struct qm_eqcr_entry *eq;
- unsigned long irqflags __maybe_unused;
-
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & QMAN_ENQUEUE_FLAG_WAIT)
- eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
- else
-#endif
- eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
- if (!eq)
- return -EBUSY;
- /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
- qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
- (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
- /* Factor the below out, it's used from qman_enqueue_orp() too */
- PORTAL_IRQ_UNLOCK(p, irqflags);
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
- (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-#ifndef __rtems__
- if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
- wait_event_interruptible(affine_queue,
- (p->eqci_owned != fq));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue, (p->eqci_owned != fq));
- }
-#endif
- return 0;
-}
-EXPORT_SYMBOL(qman_p_enqueue);
-
-int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
-{
- struct qman_portal *p;
- struct qm_eqcr_entry *eq;
- unsigned long irqflags __maybe_unused;
-
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & QMAN_ENQUEUE_FLAG_WAIT)
- eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
- else
-#endif
- eq = try_eq_start(&p, &irqflags, fq, fd, flags);
- if (!eq)
- return -EBUSY;
- /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
- qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
- (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
- /* Factor the below out, it's used from qman_enqueue_orp() too */
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
- (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-#ifndef __rtems__
- if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
- wait_event_interruptible(affine_queue,
- (p->eqci_owned != fq));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue, (p->eqci_owned != fq));
- }
-#endif
- return 0;
-}
-EXPORT_SYMBOL(qman_enqueue);
-
-int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
- const struct qm_fd *fd, u32 flags,
- struct qman_fq *orp, u16 orp_seqnum)
-{
- struct qm_eqcr_entry *eq;
- unsigned long irqflags __maybe_unused;
-
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & QMAN_ENQUEUE_FLAG_WAIT)
- eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
- else
-#endif
- eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
- if (!eq)
- return -EBUSY;
- /* Process ORP-specifics here */
- if (flags & QMAN_ENQUEUE_FLAG_NLIS)
- orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
- else {
- orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
- if (flags & QMAN_ENQUEUE_FLAG_NESN)
- orp_seqnum |= QM_EQCR_SEQNUM_NESN;
- else
- /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
- orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
- }
- eq->seqnum = orp_seqnum;
- eq->orp = orp->fqid;
- /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
- qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
- ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
- 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
- (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
- PORTAL_IRQ_UNLOCK(p, irqflags);
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
- (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-#ifndef __rtems__
- if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
- wait_event_interruptible(affine_queue,
- (p->eqci_owned != fq));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue, (p->eqci_owned != fq));
- }
-#endif
- return 0;
-}
-EXPORT_SYMBOL(qman_p_enqueue_orp);
-
-int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
- struct qman_fq *orp, u16 orp_seqnum)
-{
- struct qman_portal *p;
- struct qm_eqcr_entry *eq;
- unsigned long irqflags __maybe_unused;
-
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & QMAN_ENQUEUE_FLAG_WAIT)
- eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
- else
-#endif
- eq = try_eq_start(&p, &irqflags, fq, fd, flags);
- if (!eq)
- return -EBUSY;
- /* Process ORP-specifics here */
- if (flags & QMAN_ENQUEUE_FLAG_NLIS)
- orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
- else {
- orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
- if (flags & QMAN_ENQUEUE_FLAG_NESN)
- orp_seqnum |= QM_EQCR_SEQNUM_NESN;
- else
- /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
- orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
- }
- eq->seqnum = orp_seqnum;
- eq->orp = orp->fqid;
- /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
- qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
- ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
- 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
- (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
- (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-#ifndef __rtems__
- if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
- wait_event_interruptible(affine_queue,
- (p->eqci_owned != fq));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue, (p->eqci_owned != fq));
- }
-#endif
- return 0;
-}
-EXPORT_SYMBOL(qman_enqueue_orp);
-
-int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
- const struct qm_fd *fd, u32 flags,
- qman_cb_precommit cb, void *cb_arg)
-{
- struct qm_eqcr_entry *eq;
- unsigned long irqflags __maybe_unused;
-
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & QMAN_ENQUEUE_FLAG_WAIT)
- eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
- else
-#endif
- eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
- if (!eq)
- return -EBUSY;
- /* invoke user supplied callback function before writing commit verb */
- if (cb(cb_arg)) {
- PORTAL_IRQ_UNLOCK(p, irqflags);
- return -EINVAL;
- }
- /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
- qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
- (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
- /* Factor the below out, it's used from qman_enqueue_orp() too */
- PORTAL_IRQ_UNLOCK(p, irqflags);
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
- (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-#ifndef __rtems__
- if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
- wait_event_interruptible(affine_queue,
- (p->eqci_owned != fq));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue, (p->eqci_owned != fq));
- }
-#endif
- return 0;
-}
-EXPORT_SYMBOL(qman_p_enqueue_precommit);
-
-int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
- u32 flags, qman_cb_precommit cb, void *cb_arg)
-{
- struct qman_portal *p;
- struct qm_eqcr_entry *eq;
- unsigned long irqflags __maybe_unused;
-
-#ifdef FSL_DPA_CAN_WAIT
- if (flags & QMAN_ENQUEUE_FLAG_WAIT)
- eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
- else
-#endif
- eq = try_eq_start(&p, &irqflags, fq, fd, flags);
- if (!eq)
- return -EBUSY;
- /* invoke user supplied callback function before writing commit verb */
- if (cb(cb_arg)) {
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return -EINVAL;
- }
- /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
- qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
- (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
- /* Factor the below out, it's used from qman_enqueue_orp() too */
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
-#ifdef FSL_DPA_CAN_WAIT_SYNC
- if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
- (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-#ifndef __rtems__
- if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
- wait_event_interruptible(affine_queue,
- (p->eqci_owned != fq));
- else
-#endif /* __rtems__ */
- wait_event(affine_queue, (p->eqci_owned != fq));
- }
-#endif
- return 0;
-}
-EXPORT_SYMBOL(qman_enqueue_precommit);
-
-int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
- struct qm_mcc_initcgr *opts)
-{
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- struct qman_portal *p = get_affine_portal();
- unsigned long irqflags __maybe_unused;
- u8 res;
- u8 verb = QM_MCC_VERB_MODIFYCGR;
-
- PORTAL_IRQ_LOCK(p, irqflags);
- mcc = qm_mc_start(&p->p);
- if (opts)
- mcc->initcgr = *opts;
- mcc->initcgr.cgid = cgr->cgrid;
- if (flags & QMAN_CGR_FLAG_USE_INIT)
- verb = QM_MCC_VERB_INITCGR;
- qm_mc_commit(&p->p, verb);
- while (!(mcr = qm_mc_result(&p->p)))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
- res = mcr->result;
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
-}
-EXPORT_SYMBOL(qman_modify_cgr);
-
-#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \
- QM_CHANNEL_SWPORTAL0))
-#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0)
-
-static u8 qman_cgr_cpus[__CGR_NUM];
-
-int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
- struct qm_mcc_initcgr *opts)
-{
- unsigned long irqflags __maybe_unused;
- struct qm_mcr_querycgr cgr_state;
- struct qm_mcc_initcgr local_opts;
- int ret;
- struct qman_portal *p;
-
- /* We have to check that the provided CGRID is within the limits of the
- * data-structures, for obvious reasons. However we'll let h/w take
- * care of determining whether it's within the limits of what exists on
- * the SoC. */
- if (cgr->cgrid >= __CGR_NUM)
- return -EINVAL;
-
- preempt_disable();
- p = get_affine_portal();
- qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
- preempt_enable();
-
- memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
- cgr->chan = p->config->public_cfg.channel;
- spin_lock_irqsave(&p->cgr_lock, irqflags);
-
- /* if no opts specified, just add it to the list */
- if (!opts)
- goto add_list;
-
- ret = qman_query_cgr(cgr, &cgr_state);
- if (ret)
- goto release_lock;
- if (opts)
- local_opts = *opts;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl =
- QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
- else
- /* Overwrite TARG */
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
- TARG_MASK(p);
- local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
-
- /* send init if flags indicate so */
- if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
- ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
- else
- ret = qman_modify_cgr(cgr, 0, &local_opts);
- if (ret)
- goto release_lock;
-add_list:
- list_add(&cgr->node, &p->cgr_cbs);
-
- /* Determine if newly added object requires its callback to be called */
- ret = qman_query_cgr(cgr, &cgr_state);
- if (ret) {
- /* we can't go back, so proceed and return success, but screen
- * and wail to the log file */
- pr_crit("CGR HW state partially modified\n");
- ret = 0;
- goto release_lock;
- }
- if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
- cgr->cgrid))
- cgr->cb(p, cgr, 1);
-release_lock:
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_create_cgr);
-
-int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
- struct qm_mcc_initcgr *opts)
-{
- unsigned long irqflags __maybe_unused;
- struct qm_mcc_initcgr local_opts;
- int ret;
-
- if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
- pr_warn("This version doesn't support to send CSCN to DCP portal\n");
- return -EINVAL;
- }
- /* We have to check that the provided CGRID is within the limits of the
- * data-structures, for obvious reasons. However we'll let h/w take
- * care of determining whether it's within the limits of what exists on
- * the SoC.
- */
- if (cgr->cgrid >= __CGR_NUM)
- return -EINVAL;
-
- memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
- if (opts)
- local_opts = *opts;
-
- local_opts.cgr.cscn_targ_upd_ctrl = QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
- QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
- local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
-
- /* send init if flags indicate so */
- if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
- ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
- &local_opts);
- else
- ret = qman_modify_cgr(cgr, 0, &local_opts);
-
- return ret;
-}
-EXPORT_SYMBOL(qman_create_cgr_to_dcp);
-
-int qman_delete_cgr(struct qman_cgr *cgr)
-{
- unsigned long irqflags __maybe_unused;
- struct qm_mcr_querycgr cgr_state;
- struct qm_mcc_initcgr local_opts;
- int ret = 0;
- struct qman_cgr *i;
- struct qman_portal *p = get_affine_portal();
-
- if (cgr->chan != p->config->public_cfg.channel) {
- pr_crit("Attempting to delete cgr from different portal "
- "than it was create: create 0x%x, delete 0x%x\n",
- cgr->chan, p->config->public_cfg.channel);
- ret = -EINVAL;
- goto put_portal;
- }
- memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
- spin_lock_irqsave(&p->cgr_lock, irqflags);
- list_del(&cgr->node);
- /*
- * If there are no other CGR objects for this CGRID in the list, update
- * CSCN_TARG accordingly
- */
- list_for_each_entry(i, &p->cgr_cbs, node)
- if ((i->cgrid == cgr->cgrid) && i->cb)
- goto release_lock;
- ret = qman_query_cgr(cgr, &cgr_state);
- if (ret) {
- /* add back to the list */
- list_add(&cgr->node, &p->cgr_cbs);
- goto release_lock;
- }
- /* Overwrite TARG */
- local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
- else
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
- ~(TARG_MASK(p));
- ret = qman_modify_cgr(cgr, 0, &local_opts);
- if (ret)
- /* add back to the list */
- list_add(&cgr->node, &p->cgr_cbs);
-release_lock:
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
-put_portal:
- put_affine_portal();
- return ret;
-}
-EXPORT_SYMBOL(qman_delete_cgr);
-
-#ifndef __rtems__
-struct cgr_comp {
- struct qman_cgr *cgr;
- struct completion completion;
-};
-
-static int qman_delete_cgr_thread(void *p)
-{
- struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
- int res;
-
- res = qman_delete_cgr((struct qman_cgr *)cgr_comp->cgr);
- complete(&cgr_comp->completion);
-
- return res;
-}
-
-void qman_delete_cgr_safe(struct qman_cgr *cgr)
-{
- struct task_struct *thread;
- struct cgr_comp cgr_comp;
-
- preempt_disable();
- if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
- init_completion(&cgr_comp.completion);
- cgr_comp.cgr = cgr;
- thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
- "cgr_del");
-
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
- wake_up_process(thread);
- wait_for_completion(&cgr_comp.completion);
- preempt_enable();
- return;
- }
- }
- qman_delete_cgr(cgr);
- preempt_enable();
-}
-EXPORT_SYMBOL(qman_delete_cgr_safe);
-#endif /* __rtems__ */
-
-int qman_set_wpm(int wpm_enable)
-{
- return qm_set_wpm(wpm_enable);
-}
-EXPORT_SYMBOL(qman_set_wpm);
-
-int qman_get_wpm(int *wpm_enable)
-{
- return qm_get_wpm(wpm_enable);
-}
-EXPORT_SYMBOL(qman_get_wpm);
-
-
-/* Cleanup FQs */
-static int qm_shutdown_fq(struct qm_portal **portal, int portal_count,
- u32 fqid)
-{
-
- struct qm_mc_command *mcc;
- struct qm_mc_result *mcr;
- u8 state;
- int orl_empty, fq_empty, i, drain = 0;
- u32 result;
- u32 channel, wq;
-
- /* Determine the state of the FQID */
- mcc = qm_mc_start(portal[0]);
- mcc->queryfq_np.fqid = fqid;
- qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP);
- while (!(mcr = qm_mc_result(portal[0])))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
- state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
- if (state == QM_MCR_NP_STATE_OOS)
- return 0; /* Already OOS, no need to do anymore checks */
-
- /* Query which channel the FQ is using */
- mcc = qm_mc_start(portal[0]);
- mcc->queryfq.fqid = fqid;
- qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ);
- while (!(mcr = qm_mc_result(portal[0])))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
-
- /* Need to store these since the MCR gets reused */
- channel = mcr->queryfq.fqd.dest.channel;
- wq = mcr->queryfq.fqd.dest.wq;
-
- switch (state) {
- case QM_MCR_NP_STATE_TEN_SCHED:
- case QM_MCR_NP_STATE_TRU_SCHED:
- case QM_MCR_NP_STATE_ACTIVE:
- case QM_MCR_NP_STATE_PARKED:
- orl_empty = 0;
- mcc = qm_mc_start(portal[0]);
- mcc->alterfq.fqid = fqid;
- qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE);
- while (!(mcr = qm_mc_result(portal[0])))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
- QM_MCR_VERB_ALTER_RETIRE);
- result = mcr->result; /* Make a copy as we reuse MCR below */
-
- if (result == QM_MCR_RESULT_PENDING) {
- /* Need to wait for the FQRN in the message ring, which
- will only occur once the FQ has been drained. In
- order for the FQ to drain the portal needs to be set
- to dequeue from the channel the FQ is scheduled on */
- const struct qm_mr_entry *msg;
- const struct qm_dqrr_entry *dqrr = NULL;
- int found_fqrn = 0;
- u16 dequeue_wq = 0;
-
- /* Flag that we need to drain FQ */
- drain = 1;
-
- if (channel >= qm_channel_pool1 &&
- channel < (qm_channel_pool1 + 15)) {
- /* Pool channel, enable the bit in the portal */
- dequeue_wq = (channel -
- qm_channel_pool1 + 1)<<4 | wq;
- } else if (channel < qm_channel_pool1) {
- /* Dedicated channel */
- dequeue_wq = wq;
- } else {
- pr_info("Cannot recover FQ 0x%x, it is "
- "scheduled on channel 0x%x",
- fqid, channel);
- return -EBUSY;
- }
- /* Set the sdqcr to drain this channel */
- if (channel < qm_channel_pool1)
- for (i = 0; i < portal_count; i++)
- qm_dqrr_sdqcr_set(portal[i],
- QM_SDQCR_TYPE_ACTIVE |
- QM_SDQCR_CHANNELS_DEDICATED);
- else
- for (i = 0; i < portal_count; i++)
- qm_dqrr_sdqcr_set(
- portal[i],
- QM_SDQCR_TYPE_ACTIVE |
- QM_SDQCR_CHANNELS_POOL_CONV
- (channel));
- while (!found_fqrn) {
- /* Keep draining DQRR while checking the MR*/
- for (i = 0; i < portal_count; i++) {
- qm_dqrr_pvb_update(portal[i]);
- dqrr = qm_dqrr_current(portal[i]);
- while (dqrr) {
- qm_dqrr_cdc_consume_1ptr(
- portal[i], dqrr, 0);
- qm_dqrr_pvb_update(portal[i]);
- qm_dqrr_next(portal[i]);
- dqrr = qm_dqrr_current(
- portal[i]);
- }
- /* Process message ring too */
- qm_mr_pvb_update(portal[i]);
- msg = qm_mr_current(portal[i]);
- while (msg) {
- if ((msg->verb &
- QM_MR_VERB_TYPE_MASK)
- == QM_MR_VERB_FQRN)
- found_fqrn = 1;
- qm_mr_next(portal[i]);
- qm_mr_cci_consume_to_current(
- portal[i]);
- qm_mr_pvb_update(portal[i]);
- msg = qm_mr_current(portal[i]);
- }
- cpu_relax();
- }
- }
- }
- if (result != QM_MCR_RESULT_OK &&
- result != QM_MCR_RESULT_PENDING) {
- /* error */
- pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n",
- fqid, result);
- return -1;
- }
- if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
- /* ORL had no entries, no need to wait until the
- ERNs come in */
- orl_empty = 1;
- }
- /* Retirement succeeded, check to see if FQ needs
- to be drained */
- if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
- /* FQ is Not Empty, drain using volatile DQ commands */
- fq_empty = 0;
- do {
- const struct qm_dqrr_entry *dqrr = NULL;
- u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
- qm_dqrr_vdqcr_set(portal[0], vdqcr);
-
- /* Wait for a dequeue to occur */
- while (dqrr == NULL) {
- qm_dqrr_pvb_update(portal[0]);
- dqrr = qm_dqrr_current(portal[0]);
- if (!dqrr)
- cpu_relax();
- }
- /* Process the dequeues, making sure to
- empty the ring completely */
- while (dqrr) {
- if (dqrr->fqid == fqid &&
- dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
- fq_empty = 1;
- qm_dqrr_cdc_consume_1ptr(portal[0],
- dqrr, 0);
- qm_dqrr_pvb_update(portal[0]);
- qm_dqrr_next(portal[0]);
- dqrr = qm_dqrr_current(portal[0]);
- }
- } while (fq_empty == 0);
- }
- for (i = 0; i < portal_count; i++)
- qm_dqrr_sdqcr_set(portal[i], 0);
-
- /* Wait for the ORL to have been completely drained */
- while (orl_empty == 0) {
- const struct qm_mr_entry *msg;
-
- qm_mr_pvb_update(portal[0]);
- msg = qm_mr_current(portal[0]);
- while (msg) {
- if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
- QM_MR_VERB_FQRL)
- orl_empty = 1;
- qm_mr_next(portal[0]);
- qm_mr_cci_consume_to_current(portal[0]);
- qm_mr_pvb_update(portal[0]);
- msg = qm_mr_current(portal[0]);
- }
- cpu_relax();
- }
- mcc = qm_mc_start(portal[0]);
- mcc->alterfq.fqid = fqid;
- qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
- while (!(mcr = qm_mc_result(portal[0])))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
- QM_MCR_VERB_ALTER_OOS);
- if (mcr->result != QM_MCR_RESULT_OK) {
- pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n",
- fqid, mcr->result);
- return -1;
- }
- return 0;
- case QM_MCR_NP_STATE_RETIRED:
- /* Send OOS Command */
- mcc = qm_mc_start(portal[0]);
- mcc->alterfq.fqid = fqid;
- qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
- while (!(mcr = qm_mc_result(portal[0])))
- cpu_relax();
- DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
- QM_MCR_VERB_ALTER_OOS);
- if (mcr->result) {
- pr_err("OOS Failed on FQID 0x%x\n", fqid);
- return -1;
- }
- return 0;
- case QM_MCR_NP_STATE_OOS:
- /* Done */
- return 0;
- }
- return -1;
-}
-
-int qman_shutdown_fq(u32 fqid)
-{
- struct qman_portal *p;
- unsigned long irqflags __maybe_unused;
- int ret;
- struct qm_portal *low_p;
-
- p = get_affine_portal();
- PORTAL_IRQ_LOCK(p, irqflags);
- low_p = &p->p;
- ret = qm_shutdown_fq(&low_p, 1, fqid);
- PORTAL_IRQ_UNLOCK(p, irqflags);
- put_affine_portal();
- return ret;
-}
-
-const struct qm_portal_config *qman_get_qm_portal_config(
- struct qman_portal *portal)
-{
-#ifndef __rtems__
- return portal->sharing_redirect ? NULL : portal->config;
-#else /* __rtems__ */
- return portal->config;
-#endif /* __rtems__ */
-}
diff --git a/linux/drivers/soc/fsl/qbman/qman_ccsr.c b/linux/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 00000000..7def3431
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,883 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+#ifdef __rtems__
+#undef dev_crit
+#undef dev_dbg
+#undef dev_err
+#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__)
+#define dev_dbg dev_crit
+#define dev_err dev_crit
+#endif /* __rtems__ */
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
+#define REG_DD_CFG 0x0200
+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC 0x0400
+#define REG_PFDR_FP_HEAD 0x0404
+#define REG_PFDR_FP_TAIL 0x0408
+#define REG_PFDR_FP_LWIT 0x0410
+#define REG_PFDR_CFG 0x0414
+#define REG_SFDR_CFG 0x0500
+#define REG_SFDR_IN_USE 0x0504
+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID 0x0630
+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_ECIR2 0x0a0c
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_MCR 0x0b00
+#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG 0x0be0
+#define REG_HID_CFG 0x0bf0
+#define REG_IDLE_STAT 0x0bf4
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FQD_BARE 0x0c00
+#define REG_PFDR_BARE 0x0c20
+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE 0x0c80
+#define REG_QCSP_BAR 0x0c84
+#define REG_CI_SCHED_CFG 0x0d00
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_CI_RLM_AVG 0x0d14
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR 0x01000000
+#define MCR_get_rslt(v) (u8)((v) >> 24)
+#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r) ((r) == 0xf0)
+#define MCR_rslt_eaccess(r) ((r) == 0xf8)
+#define MCR_rslt_inval(r) ((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV 4
+#define QM_CI_SCHED_CFG_SRQ_W 3
+#define QM_CI_SCHED_CFG_RW_W 2
+#define QM_CI_SCHED_CFG_BMAN_W 2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+ qm_wq_portal = 0,
+ qm_wq_pool = 1,
+ qm_wq_fman0 = 2,
+ qm_wq_fman1 = 3,
+ qm_wq_caam = 4,
+ qm_wq_pme = 5,
+ qm_wq_first = qm_wq_portal,
+ qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+ qm_memory_fqd,
+ qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IFSI)
+
+struct qm_ecir {
+ u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+ return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+ return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+ u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+ return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+ return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+ u32 info; /* memid[24-27], eadr[0-11] */
+ /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+ { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+ { QM_EIRQ_CTDE, "Corenet Target Data Error" },
+ { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+ { QM_EIRQ_PLWI, "PFDR Low Watermark" },
+ { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { QM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+ { QM_EIRQ_ICVI, "Invalid Command Verb" },
+ { QM_EIRQ_IFSI, "Invalid Flow Control State" },
+ { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+ { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+ { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+ { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+ { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+ { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+ { QM_EIRQ_IESI, "Invalid Enqueue State" },
+ { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+ { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+ { 0x01FF, 24, "FQD cache tag memory 0" },
+ { 0x01FF, 24, "FQD cache tag memory 1" },
+ { 0x01FF, 24, "FQD cache tag memory 2" },
+ { 0x01FF, 24, "FQD cache tag memory 3" },
+ { 0x0FFF, 512, "FQD cache memory" },
+ { 0x07FF, 128, "SFDR memory" },
+ { 0x01FF, 72, "WQ context memory" },
+ { 0x00FF, 240, "CGR memory" },
+ { 0x00FF, 302, "Internal Order Restoration List memory" },
+ { 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+ return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+ return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+ DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+ portal == qm_dc_portal_fman1);
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+ else
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+ u8 csw5, u8 csw6, u8 csw7)
+{
+ qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+ qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+ qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+ (QM_CI_SCHED_CFG_SRCCIV << 24) |
+ (QM_CI_SCHED_CFG_SRQ_W << 8) |
+ (QM_CI_SCHED_CFG_RW_W << 4) |
+ QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = qm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+#define PFDR_AR_EN BIT(31)
+static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+ u32 exp = ilog2(size);
+
+ /* choke if size isn't within range */
+ DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+ is_power_of_2(size));
+ /* choke if 'ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+ qm_ccsr_out(offset, upper_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+ qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+ qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+ qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+ u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+ DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+ /* Make sure the command interface is 'idle' */
+ if (!MCR_rslt_idle(rslt)) {
+ dev_crit(dev, "QMAN_MCR isn't idle");
+ WARN_ON(1);
+ }
+
+ /* Write the MCR command params then the verb */
+ qm_ccsr_out(REG_MCP(0), pfdr_start);
+ /*
+ * TODO: remove this - it's a workaround for a model bug that is
+ * corrected in more recent versions. We use the workaround until
+ * everyone has upgraded.
+ */
+ qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+ dma_wmb();
+ qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+ /* Poll for the result */
+ do {
+ rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+ } while (!MCR_rslt_idle(rslt));
+ if (MCR_rslt_ok(rslt))
+ return 0;
+ if (MCR_rslt_eaccess(rslt))
+ return -EACCES;
+ if (MCR_rslt_inval(rslt))
+ return -EINVAL;
+ dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+ return -ENODEV;
+}
+
+/*
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+#ifndef __rtems__
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+ fqd_a = rmem->base;
+ fqd_sz = rmem->size;
+
+ WARN_ON(!(fqd_a && fqd_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+ pfdr_a = rmem->base;
+ pfdr_sz = rmem->size;
+
+ WARN_ON(!(pfdr_a && pfdr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+#else /* __rtems__ */
+static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fqd, 4194304);
+#define fqd_a ((uintptr_t)&fqd[0])
+#define fqd_sz sizeof(fqd)
+static DPAA_NOCACHENOLOAD_ALIGNED_REGION(pfdr, 33554432);
+#define pfdr_a ((uintptr_t)&pfdr[0])
+#define pfdr_sz sizeof(pfdr)
+#endif /* __rtems__ */
+
+static unsigned int qm_get_fqid_maxcnt(void)
+{
+ return fqd_sz / 64;
+}
+
+/*
+ * Flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static int zero_priv_mem(struct device *dev, struct device_node *node,
+ phys_addr_t addr, size_t sz)
+{
+#ifndef __rtems__
+ /* map as cacheable, non-guarded */
+ void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+ if (!tmpp)
+ return -ENOMEM;
+
+ memset_io(tmpp, 0, sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + sz);
+ iounmap(tmpp);
+
+#else /* __rtems__ */
+ memset((void *)(uintptr_t)addr, 0, sz);
+#endif /* __rtems__ */
+ return 0;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ dev_warn(dev, "ErrInt, EDATA:\n");
+ i = bit_count / 32;
+ if (bit_count % 32) {
+ i++;
+ mask = ~(mask << bit_count % 32);
+ }
+ j = 16 - i;
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+ u32 ecsr_val)
+{
+ struct qm_ecir ecir_val;
+ struct qm_eadr eadr_val;
+ int memid;
+
+ ecir_val.info = qm_ccsr_in(REG_ECIR);
+ /* Is portal info valid */
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ struct qm_ecir2 ecir2_val;
+
+ ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+ qm_ecir2_get_pnum(&ecir2_val));
+ }
+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_v3_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_v3_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ } else {
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+ qm_ecir_get_pnum(&ecir_val));
+ }
+ if (ecsr_val & FQID_ECSR_ERR)
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ }
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = qm_ccsr_in(REG_ERR_IER);
+ isr_val = qm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = qm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+ if (qman_hwerr_txts[i].mask & isr_mask) {
+#ifndef __rtems__
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ qman_hwerr_txts[i].txt);
+#endif /* __rtems__ */
+ if (qman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(dev, isr_mask,
+ ecsr_val);
+ /* Re-arm error capture registers */
+ qm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ qman_hwerr_txts[i].mask);
+ ier_val &= ~qman_hwerr_txts[i].mask;
+ qm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+ int i, err;
+
+ /* FQD memory */
+ qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ /* PFDR memory */
+ qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+ err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+ if (err)
+ return err;
+ /* thresholds */
+ qm_set_pfdr_threshold(512, 64);
+ qm_set_sfdr_threshold(128);
+ /* clear stale PEBI bit from interrupt status register */
+ qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+ /* corenet initiator settings */
+ qm_set_corenet_initiator();
+ /* HID settings */
+ qm_set_hid();
+ /* Set scheduling weights to defaults */
+ for (i = qm_wq_first; i <= qm_wq_last; i++)
+ qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+ /* We are not prepared to accept ERNs for hardware enqueues */
+ qm_set_dc(qm_dc_portal_fman0, 1, 0);
+ qm_set_dc(qm_dc_portal_fman1, 1, 0);
+ return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+ static int done;
+ static u32 liodn_offset;
+ u32 before, after;
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+ else
+ before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+ if (!done) {
+ liodn_offset = before & LIO_CFG_LIODN_MASK;
+ done = 1;
+ return;
+ }
+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+ else
+ qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+void qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+ u32 before, after;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+ /* Each pair of vcpu share the same SRQ(SDEST) */
+ cpu_idx /= 2;
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+ } else {
+ before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+ }
+}
+
+static int qman_resource_init(struct device *dev)
+{
+ int pool_chan_num, cgrid_num;
+ int ret, i;
+
+ switch (qman_ip_rev >> 8) {
+ case 1:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ case 2:
+ pool_chan_num = 3;
+ cgrid_num = 64;
+ break;
+ case 3:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+ pool_chan_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+ return ret;
+ }
+
+ ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+ return ret;
+ }
+
+ /* parse pool channels into the SDQCR mask */
+ for (i = 0; i < cgrid_num; i++)
+ qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+ ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
+ qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+#ifdef __rtems__
+ struct resource res_storage;
+#endif /* __rtems__ */
+ struct resource *res;
+ int ret, err_irq;
+ u16 id;
+ u8 major, minor;
+
+#ifndef __rtems__
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+#else /* __rtems__ */
+ res = platform_get_resource(&res_storage, pdev, IORESOURCE_MEM, 0);
+#endif /* __rtems__ */
+ if (!res) {
+ dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!qm_ccsr_start)
+ return -ENXIO;
+
+ qm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+ return -ENODEV;
+ } else if (major == 1 && minor == 1)
+ qman_ip_rev = QMAN_REV11;
+ else if (major == 1 && minor == 2)
+ qman_ip_rev = QMAN_REV12;
+ else if (major == 2 && minor == 0)
+ qman_ip_rev = QMAN_REV20;
+ else if (major == 3 && minor == 0)
+ qman_ip_rev = QMAN_REV30;
+ else if (major == 3 && minor == 1)
+ qman_ip_rev = QMAN_REV31;
+ else {
+ dev_err(dev, "Unknown QMan version\n");
+ return -ENODEV;
+ }
+
+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+
+ ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
+ WARN_ON(ret);
+ if (ret)
+ return -ENODEV;
+
+ ret = qman_init_ccsr(dev);
+ if (ret) {
+ dev_err(dev, "CCSR setup failed\n");
+ return ret;
+ }
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %s property 'interrupts'\n",
+ node->full_name);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return ret;
+ }
+
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+ if (IS_ERR(qm_fqalloc)) {
+ ret = PTR_ERR(qm_fqalloc);
+ dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+ if (IS_ERR(qm_qpalloc)) {
+ ret = PTR_ERR(qm_qpalloc);
+ dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+ if (IS_ERR(qm_cgralloc)) {
+ ret = PTR_ERR(qm_cgralloc);
+ dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qman_resource_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+ if (ret)
+ return ret;
+
+ ret = qman_wq_alloc();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#ifndef __rtems__
+static const struct of_device_id fsl_qman_ids[] = {
+ {
+ .compatible = "fsl,qman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_qman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_qman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
+#else /* __rtems__ */
+#include <bsp/fdt.h>
+#include <bsp/qoriq.h>
+
+SYSINIT_REFERENCE(bman);
+
+static void
+qman_sysinit(void)
+{
+ const char *fdt = bsp_fdt_get();
+ struct {
+ struct platform_device pdev;
+ struct device_node of_node;
+ } dev;
+ const char *name;
+ int node;
+ int ret;
+
+ name = "fsl,qman";
+ node = fdt_node_offset_by_compatible(fdt, 0, name);
+ if (node < 0)
+ panic("qman: no qman in FDT");
+
+ memset(&dev, 0, sizeof(dev));
+ dev.pdev.dev.of_node = &dev.of_node;
+ dev.pdev.dev.base = (uintptr_t)&qoriq;
+ dev.of_node.offset = node;
+ dev.of_node.full_name = name;
+
+ ret = fsl_qman_probe(&dev.pdev);
+ if (ret != 0)
+ panic("qman: init failed");
+
+ qman_sysinit_portals();
+}
+SYSINIT(qman, SI_SUB_CPU, SI_ORDER_SECOND, qman_sysinit, NULL);
+#endif /* __rtems__ */
diff --git a/linux/drivers/soc/fsl/qbman/qman_driver.c b/linux/drivers/soc/fsl/qbman/qman_driver.c
deleted file mode 100644
index 6923504e..00000000
--- a/linux/drivers/soc/fsl/qbman/qman_driver.c
+++ /dev/null
@@ -1,87 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2013 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "qman_priv.h"
-
-#include <linux/time.h>
-
-static int __init early_qman_init(void)
-{
- struct device_node *dn;
- u32 is_portal_available;
-
- qman_init();
-
- is_portal_available = 0;
- for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
- if (of_device_is_available(dn)) {
- is_portal_available = 1;
- break;
- }
- }
-
- if (!qman_have_ccsr() && is_portal_available) {
- struct qman_fq fq = {.fqid = 1};
- struct qm_mcr_queryfq_np np;
- int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
- struct timespec nowts, diffts, startts = current_kernel_time();
-
- /* Loop while querying given fqid succeeds or time out */
- while (1) {
- err = qman_query_fq_np(&fq, &np);
- if (!err) {
- /* success, control-plane has configured QMan */
- break;
- } else if (err != -ERANGE) {
- pr_err("I/O error, continuing anyway\n");
- break;
- }
- nowts = current_kernel_time();
- diffts = timespec_sub(nowts, startts);
- if (diffts.tv_sec > 0) {
- if (!retry--) {
- pr_err("Time out, control-plane dead?\n");
- break;
- }
- pr_warn("Polling for the control-plane (%d)\n",
- retry);
- }
- }
- }
-
- qman_resource_init();
-
- return 0;
-}
-subsys_initcall(early_qman_init);
diff --git a/linux/drivers/soc/fsl/qbman/qman_portal.c b/linux/drivers/soc/fsl/qbman/qman_portal.c
index c74059b1..640343ac 100644
--- a/linux/drivers/soc/fsl/qbman/qman_portal.c
+++ b/linux/drivers/soc/fsl/qbman/qman_portal.c
@@ -2,7 +2,7 @@
#include <rtems/bsd/local/opt_dpaa.h>
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,304 +35,40 @@
#include "qman_priv.h"
#ifdef __rtems__
#include <bsp/qoriq.h>
+#undef dev_crit
+#undef dev_info
+#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__)
+#define dev_info dev_crit
+#endif /* __rtems__ */
+
+#ifndef __rtems__
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
#endif /* __rtems__ */
/* Enable portal interupts (as opposed to polling mode) */
#define CONFIG_FSL_DPA_PIRQ_SLOW 1
#define CONFIG_FSL_DPA_PIRQ_FAST 1
-/* Global variable containing revision id (even on non-control plane systems
- * where CCSR isn't available) */
-u16 qman_ip_rev;
-EXPORT_SYMBOL(qman_ip_rev);
-u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
-EXPORT_SYMBOL(qm_channel_pool1);
-u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
-EXPORT_SYMBOL(qm_channel_caam);
-u16 qm_channel_pme = QMAN_CHANNEL_PME;
-EXPORT_SYMBOL(qm_channel_pme);
-u16 qm_channel_dce = QMAN_CHANNEL_DCE;
-EXPORT_SYMBOL(qm_channel_dce);
-u16 qman_portal_max;
-EXPORT_SYMBOL(qman_portal_max);
-
#ifndef __rtems__
-/* For these variables, and the portal-initialisation logic, the
- * comments in bman_driver.c apply here so won't be repeated. */
-static struct qman_portal *shared_portals[NR_CPUS];
-static int num_shared_portals;
-static int shared_portals_idx;
-static LIST_HEAD(unused_pcfgs);
-#endif /* __rtems__ */
-
-/* A SDQCR mask comprising all the available/visible pool channels */
-static u32 pools_sdqcr;
-
-#define STR_ERR_NOPROP "No '%s' property in node %s\n"
-#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n"
-#define STR_FQID_RANGE "fsl,fqid-range"
-#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
-#define STR_CGRID_RANGE "fsl,cgrid-range"
-
-/* A "fsl,fqid-range" node; release the given range to the allocator */
-static __init int fsl_fqid_range_init(struct device_node *node)
-{
- int ret;
- const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
-
- if (!range) {
- pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
- return -EINVAL;
- }
- if (ret != 8) {
- pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
- return -EINVAL;
- }
- qman_seed_fqid_range(range[0], range[1]);
- pr_info("FQID allocator includes range %d:%d\n",
- range[0], range[1]);
- return 0;
-}
-
-/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
-static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
-{
- int ret;
- const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
-
- if (!chanid) {
- pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
- return -EINVAL;
- }
- if (ret != 8) {
- pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
- return -EINVAL;
- }
- for (ret = 0; ret < chanid[1]; ret++)
- pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(chanid[0] + ret);
- return 0;
-}
-
-/* A "fsl,pool-channel-range" node; release the given range to the allocator */
-static __init int fsl_pool_channel_range_init(struct device_node *node)
-{
- int ret;
- const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
-
- if (!chanid) {
- pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
- return -EINVAL;
- }
- if (ret != 8) {
- pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
- return -EINVAL;
- }
- qman_seed_pool_range(chanid[0], chanid[1]);
- pr_info("Pool channel allocator includes range %d:%d\n",
- chanid[0], chanid[1]);
- return 0;
-}
-
-/* A "fsl,cgrid-range" node; release the given range to the allocator */
-static __init int fsl_cgrid_range_init(struct device_node *node)
-{
- struct qman_cgr cgr;
- int ret, errors = 0;
- const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
-
- if (!range) {
- pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
- return -EINVAL;
- }
- if (ret != 8) {
- pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
- return -EINVAL;
- }
- qman_seed_cgrid_range(range[0], range[1]);
- pr_info("CGRID allocator includes range %d:%d\n",
- range[0], range[1]);
- for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
- ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
- if (ret)
- errors++;
- }
- if (errors)
- pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
- errors, (errors > 1) ? "s" : "", range[0], range[1]);
- return 0;
-}
-
-static void qman_get_ip_revision(struct device_node *dn)
-{
-#ifdef __rtems__
- struct device_node of_dns;
-#endif /* __rtems__ */
- u16 ip_rev = 0;
-
- for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
- if (!of_device_is_available(dn))
- continue;
- if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
- of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
- pr_err("Rev1.0 on P4080 rev1 is not supported!\n");
- BUG_ON(1);
- } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
- of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
- ip_rev = QMAN_REV11;
- qman_portal_max = 10;
- } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
- of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
- ip_rev = QMAN_REV12;
- qman_portal_max = 10;
- } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
- of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
- ip_rev = QMAN_REV20;
- qman_portal_max = 3;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.0.0")) {
- ip_rev = QMAN_REV30;
- qman_portal_max = 50;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.0.1")) {
- ip_rev = QMAN_REV30;
- qman_portal_max = 25;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.1.0")) {
- ip_rev = QMAN_REV31;
- qman_portal_max = 50;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.1.1")) {
- ip_rev = QMAN_REV31;
- qman_portal_max = 25;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.1.2")) {
- ip_rev = QMAN_REV31;
- qman_portal_max = 18;
- } else if (of_device_is_compatible(dn,
- "fsl,qman-portal-3.1.3")) {
- ip_rev = QMAN_REV31;
- qman_portal_max = 10;
- } else {
- pr_warn("Unknown version in portal node, default to rev1.1\n");
- ip_rev = QMAN_REV11;
- qman_portal_max = 10;
- }
-
- if (!qman_ip_rev) {
- if (ip_rev) {
- qman_ip_rev = ip_rev;
- } else {
- pr_warn("Unknown version, default to rev1.1\n");
- qman_ip_rev = QMAN_REV11;
- }
- } else if (ip_rev && (qman_ip_rev != ip_rev))
- pr_warn("Revision = 0x%04x, but portal '%s' has 0x%04x\n",
- qman_ip_rev, dn->full_name, ip_rev);
- if (qman_ip_rev == ip_rev)
- break;
- }
-}
-
-#ifndef __rtems__
-/* Parse a portal node, perform generic mapping duties and return the config. It
- * is not known at this stage for what purpose (or even if) the portal will be
- * used. */
-static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
-{
- struct qm_portal_config *pcfg;
- const u32 *channel;
- int irq, ret;
- struct resource res;
-
- pcfg = kzalloc(sizeof(*pcfg), GFP_KERNEL);
- if (!pcfg)
- return NULL;
-
- /*
- * This is a *horrible hack*, but the IOMMU/PAMU driver needs a
- * 'struct device' in order to get the PAMU stashing setup and the QMan
- * portal [driver] won't function at all without ring stashing
- *
- * Making the QMan portal driver nice and proper is part of the
- * upstreaming effort
- */
- pcfg->dev.bus = &platform_bus_type;
- pcfg->dev.of_node = node;
-#ifdef CONFIG_IOMMU_API
- pcfg->dev.archdata.iommu_domain = NULL;
-#endif
-
- ret = of_address_to_resource(node, DPA_PORTAL_CE,
- &pcfg->addr_phys[DPA_PORTAL_CE]);
- if (ret) {
- pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
- goto err;
- }
- ret = of_address_to_resource(node, DPA_PORTAL_CI,
- &pcfg->addr_phys[DPA_PORTAL_CI]);
- if (ret) {
- pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
- goto err;
- }
-
- channel = of_get_property(node, "fsl,qman-channel-id", &ret);
- if (!channel || (ret != 4)) {
- pr_err("Can't get %s property 'fsl,qman-channel-id'\n",
- node->full_name);
- goto err;
- }
- pcfg->public_cfg.channel = *channel;
- pcfg->public_cfg.cpu = -1;
- irq = irq_of_parse_and_map(node, 0);
- if (irq == NO_IRQ) {
- pr_err("Can't get %s property 'interrupts'\n", node->full_name);
- goto err;
- }
- pcfg->public_cfg.irq = irq;
-#ifdef CONFIG_FSL_QMAN_CONFIG
- /* We need the same LIODN offset for all portals */
- qman_liodn_fixup(pcfg->public_cfg.channel);
-#endif
-
- pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
- pcfg->addr_phys[DPA_PORTAL_CE].start,
- resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]),
- 0);
- pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
- pcfg->addr_phys[DPA_PORTAL_CI].start,
- resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
- _PAGE_GUARDED | _PAGE_NO_CACHE);
-
- return pcfg;
-err:
- kfree(pcfg);
- return NULL;
-}
-
-static struct qm_portal_config *get_pcfg(struct list_head *list)
-{
- struct qm_portal_config *pcfg;
-
- if (list_empty(list))
- return NULL;
- pcfg = list_entry(list->prev, struct qm_portal_config, list);
- list_del(&pcfg->list);
- return pcfg;
-}
+static struct cpumask portal_cpus;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
#endif /* __rtems__ */
static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
{
#ifdef CONFIG_FSL_PAMU
- int ret;
+ struct device *dev = pcfg->dev;
int window_count = 1;
struct iommu_domain_geometry geom_attr;
struct pamu_stash_attribute stash_attr;
+ int ret;
pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
if (!pcfg->iommu_domain) {
- pr_err("%s(): iommu_domain_alloc() failed", __func__);
- goto _no_iommu;
+ dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+ goto no_iommu;
}
geom_attr.aperture_start = 0;
geom_attr.aperture_end =
@@ -341,14 +77,16 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
&geom_attr);
if (ret < 0) {
- pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
- goto _iommu_domain_free;
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
}
ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
&window_count);
if (ret < 0) {
- pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
- goto _iommu_domain_free;
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
}
stash_attr.cpu = cpu;
stash_attr.cache = PAMU_ATTR_CACHE_L1;
@@ -356,45 +94,42 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
DOMAIN_ATTR_FSL_PAMU_STASH,
&stash_attr);
if (ret < 0) {
- pr_err("%s(): iommu_domain_set_attr() = %d",
- __func__, ret);
- goto _iommu_domain_free;
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto out_domain_free;
}
ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
IOMMU_READ | IOMMU_WRITE);
if (ret < 0) {
- pr_err("%s(): iommu_domain_window_enable() = %d",
- __func__, ret);
- goto _iommu_domain_free;
+ dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+ __func__, ret);
+ goto out_domain_free;
}
- ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
+ ret = iommu_attach_device(pcfg->iommu_domain, dev);
if (ret < 0) {
- pr_err("%s(): iommu_device_attach() = %d",
- __func__, ret);
- goto _iommu_domain_free;
+ dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+ ret);
+ goto out_domain_free;
}
ret = iommu_domain_set_attr(pcfg->iommu_domain,
DOMAIN_ATTR_FSL_PAMU_ENABLE,
&window_count);
if (ret < 0) {
- pr_err("%s(): iommu_domain_set_attr() = %d",
- __func__, ret);
- goto _iommu_detach_device;
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_detach_device;
}
-_no_iommu:
-#endif
-#ifdef CONFIG_FSL_QMAN_CONFIG
- if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+no_iommu:
#endif
- pr_warn("Failed to set the stash request queue\n");
+ qman_set_sdest(pcfg->channel, cpu);
return;
#ifdef CONFIG_FSL_PAMU
-_iommu_detach_device:
+out_detach_device:
iommu_detach_device(pcfg->iommu_domain, NULL);
-_iommu_domain_free:
+out_domain_free:
iommu_domain_free(pcfg->iommu_domain);
pcfg->iommu_domain = NULL;
#endif
@@ -403,65 +138,54 @@ _iommu_domain_free:
static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
{
struct qman_portal *p;
+ u32 irq_sources = 0;
+
+ /* We need the same LIODN offset for all portals */
+ qman_liodn_fixup(pcfg->channel);
#ifndef __rtems__
pcfg->iommu_domain = NULL;
#endif /* __rtems__ */
- portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
+ portal_set_cpu(pcfg, pcfg->cpu);
p = qman_create_affine_portal(pcfg, NULL);
- if (p) {
- u32 irq_sources = 0;
- /* Determine what should be interrupt-vs-poll driven */
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ /* Determine what should be interrupt-vs-poll driven */
#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
- irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
- QM_PIRQ_CSCI;
+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+ QM_PIRQ_CSCI;
#endif
#ifdef CONFIG_FSL_DPA_PIRQ_FAST
- irq_sources |= QM_PIRQ_DQRI;
+ irq_sources |= QM_PIRQ_DQRI;
#endif
- qman_p_irqsource_add(p, irq_sources);
- pr_info("Portal %sinitialised, cpu %d\n",
-#ifndef __rtems__
- pcfg->public_cfg.is_shared ? "(shared) " : "",
-#else /* __rtems__ */
- "",
-#endif /* __rtems__ */
- pcfg->public_cfg.cpu);
- } else
- pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu);
- return p;
-}
+ qman_p_irqsource_add(p, irq_sources);
#ifndef __rtems__
-static void init_slave(int cpu)
-{
- struct qman_portal *p;
- struct cpumask oldmask = *tsk_cpus_allowed(current);
-
- set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
- p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
- if (!p)
- pr_err("Slave portal failure on cpu %d\n", cpu);
- else
- pr_info("Portal (slave) initialised, cpu %d\n", cpu);
- set_cpus_allowed_ptr(current, &oldmask);
- if (shared_portals_idx >= num_shared_portals)
- shared_portals_idx = 0;
-}
+ spin_lock(&qman_lock);
+ if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+ /* all assigned portals are initialized now */
+ qman_init_cgr_all();
+ }
-static struct cpumask want_unshared __initdata;
-static struct cpumask want_shared __initdata;
+ if (!qman_dma_portal)
+ qman_dma_portal = p;
-static int __init parse_qportals(char *str)
-{
- return parse_portals_bootarg(str, &want_shared, &want_unshared,
- "qportals");
+ spin_unlock(&qman_lock);
+#endif /* __rtems__ */
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
}
-__setup("qportals=", parse_qportals);
static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
unsigned int cpu)
{
+#ifdef CONFIG_FSL_PAMU /* TODO */
struct pamu_stash_attribute stash_attr;
int ret;
@@ -471,77 +195,196 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
ret = iommu_domain_set_attr(pcfg->iommu_domain,
DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
if (ret < 0) {
- pr_err("Failed to update pamu stash setting\n");
+ dev_err(pcfg->dev,
+ "Failed to update pamu stash setting\n");
return;
}
}
-#ifdef CONFIG_FSL_QMAN_CONFIG
- if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
#endif
- pr_warn("Failed to update portal's stash request queue\n");
+ qman_set_sdest(pcfg->channel, cpu);
}
-static void qman_offline_cpu(unsigned int cpu)
+#ifndef __rtems__
+static int qman_offline_cpu(unsigned int cpu)
{
struct qman_portal *p;
const struct qm_portal_config *pcfg;
- p = (struct qman_portal *)affine_portals[cpu];
+ p = affine_portals[cpu];
if (p) {
pcfg = qman_get_qm_portal_config(p);
if (pcfg) {
- irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
+ irq_set_affinity(pcfg->irq, cpumask_of(0));
qman_portal_update_sdest(pcfg, 0);
}
}
+ return 0;
}
-#endif /* __rtems__ */
-#ifdef CONFIG_HOTPLUG_CPU
-static void qman_online_cpu(unsigned int cpu)
+static int qman_online_cpu(unsigned int cpu)
{
struct qman_portal *p;
const struct qm_portal_config *pcfg;
- p = (struct qman_portal *)affine_portals[cpu];
+ p = affine_portals[cpu];
if (p) {
pcfg = qman_get_qm_portal_config(p);
if (pcfg) {
- irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
qman_portal_update_sdest(pcfg, cpu);
}
}
+ return 0;
}
-static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int qman_portal_probe(struct platform_device *pdev)
{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- qman_online_cpu(cpu);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- qman_offline_cpu(cpu);
- default:
- break;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct qm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ void __iomem *va;
+ int irq, cpu, err;
+ u32 val;
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return -ENOMEM;
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %s property 'reg::CE'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %s property 'reg::CI'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ err = of_property_read_u32(node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "Can't get %s property 'cell-index'\n",
+ node->full_name);
+ return err;
+ }
+ pcfg->channel = val;
+ pcfg->cpu = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %s IRQ\n", node->full_name);
+ return -ENXIO;
+ }
+ pcfg->irq = irq;
+
+ va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+ if (!va) {
+ dev_err(dev, "ioremap::CE failed\n");
+ goto err_ioremap1;
+ }
+
+ pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+ va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+ _PAGE_GUARDED | _PAGE_NO_CACHE);
+ if (!va) {
+ dev_err(dev, "ioremap::CI failed\n");
+ goto err_ioremap2;
+ }
+
+ pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+ pcfg->pools = qm_get_pools_sdqcr();
+
+ spin_lock(&qman_lock);
+ cpu = cpumask_next_zero(-1, &portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ /* unassigned portal, skip init */
+ spin_unlock(&qman_lock);
+ return 0;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&qman_lock);
+ pcfg->cpu = cpu;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+ dev_err(dev, "dma_set_mask() failed\n");
+ goto err_portal_init;
+ }
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
}
- return NOTIFY_OK;
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ qman_offline_cpu(cpu);
+
+ return 0;
+
+err_portal_init:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
+err_ioremap2:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+ return -ENXIO;
}
-static struct notifier_block qman_hotplug_cpu_notifier = {
- .notifier_call = qman_hotplug_cpu_callback,
+static const struct of_device_id qman_portal_ids[] = {
+ {
+ .compatible = "fsl,qman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = qman_portal_ids,
+ },
+ .probe = qman_portal_probe,
};
-#endif /* CONFIG_HOTPLUG_CPU */
-#ifdef __rtems__
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qman_portal:online",
+ qman_online_cpu, qman_offline_cpu);
+ if (ret < 0) {
+ pr_err("qman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
+ return 0;
+}
+
+module_driver(qman_portal_driver,
+ qman_portal_driver_register, platform_driver_unregister);
+#else /* __rtems__ */
#include <bsp/fdt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
static struct qm_portal_config qman_configs[NR_CPUS];
-static void
-qman_init_portals(void)
+
+void
+qman_sysinit_portals(void)
{
const char *fdt = bsp_fdt_get();
struct device_node dn;
@@ -575,7 +418,7 @@ qman_init_portals(void)
struct qm_portal_config *pcfg = &qman_configs[cpu];
struct qman_portal *portal;
struct resource res;
- const u32 *channel;
+ u32 val;
if (node < 0)
panic("qman: missing portal in FDT");
@@ -600,197 +443,29 @@ qman_init_portals(void)
BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
(uintptr_t)&qoriq_qman_portal[2][0]);
- pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL);
- if (pcfg->public_cfg.irq == NO_IRQ)
- panic("qman: no portal interrupt");
+ ret = of_property_read_u32(&dn, "cell-index", &val);
+ if (ret != 0)
+ panic("qman: no cell-index");
+ pcfg->channel = val;
- channel = of_get_property(&dn, "fsl,qman-channel-id", &ret);
- if (channel == NULL || ret != 4)
- panic("qman: no portal channel ID");
- pcfg->public_cfg.channel = *channel;
+ pcfg->irq = of_irq_to_resource(&dn, 0, NULL);
+ if (pcfg->irq == NO_IRQ)
+ panic("qman: no portal interrupt");
- pcfg->public_cfg.cpu = cpu;
- pcfg->public_cfg.pools = pools_sdqcr;
+ pcfg->cpu = cpu;
+ pcfg->pools = qm_get_pools_sdqcr();
portal = init_pcfg(pcfg);
if (portal == NULL)
panic("qman: cannot create portal");
+ qman_portal_update_sdest(pcfg, cpu);
+
node = fdt_next_subnode(fdt, node);
dn.offset = node;
}
-}
-#endif /* __rtems__ */
-#ifndef __rtems__
-__init int qman_init(void)
-{
- struct cpumask slave_cpus;
- struct cpumask unshared_cpus = *cpu_none_mask;
- struct cpumask shared_cpus = *cpu_none_mask;
- LIST_HEAD(unshared_pcfgs);
- LIST_HEAD(shared_pcfgs);
- struct device_node *dn;
- struct qm_portal_config *pcfg;
- struct qman_portal *p;
- int cpu, ret;
- struct cpumask offline_cpus;
-
- /* Initialise the QMan (CCSR) device */
- for_each_compatible_node(dn, NULL, "fsl,qman") {
- if (!qman_init_ccsr(dn))
- pr_info("Err interrupt handler present\n");
- else
- pr_err("CCSR setup failed\n");
- }
-#else /* __rtems__ */
-int
-qman_init(struct device_node *dn)
-{
- struct device_node of_dns;
- int ret;
-#endif /* __rtems__ */
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- /* Setup lookup table for FQ demux */
- ret = qman_setup_fq_lookup_table(qman_fqd_size()/64);
- if (ret)
- return ret;
-#endif
- /* Get qman ip revision */
- qman_get_ip_revision(dn);
- if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
- qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
- qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
- qm_channel_pme = QMAN_CHANNEL_PME_REV3;
- }
-
- /* Parse pool channels into the SDQCR mask. (Must happen before portals
- * are initialised.) */
- for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
- ret = fsl_pool_channel_range_sdqcr(dn);
- if (ret)
- return ret;
- }
-
-#ifndef __rtems__
- memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
- /* Initialise portals. See bman_driver.c for comments */
- for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
- if (!of_device_is_available(dn))
- continue;
- pcfg = parse_pcfg(dn);
- if (pcfg) {
- pcfg->public_cfg.pools = pools_sdqcr;
- list_add_tail(&pcfg->list, &unused_pcfgs);
- }
- }
- for_each_possible_cpu(cpu) {
- if (cpumask_test_cpu(cpu, &want_shared)) {
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &shared_pcfgs);
- cpumask_set_cpu(cpu, &shared_cpus);
- }
- if (cpumask_test_cpu(cpu, &want_unshared)) {
- if (cpumask_test_cpu(cpu, &shared_cpus))
- continue;
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &unshared_pcfgs);
- cpumask_set_cpu(cpu, &unshared_cpus);
- }
- }
- if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
- for_each_possible_cpu(cpu) {
- pcfg = get_pcfg(&unused_pcfgs);
- if (!pcfg)
- break;
- pcfg->public_cfg.cpu = cpu;
- list_add_tail(&pcfg->list, &unshared_pcfgs);
- cpumask_set_cpu(cpu, &unshared_cpus);
- }
- }
- cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
- cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
- if (cpumask_empty(&slave_cpus)) {
- if (!list_empty(&shared_pcfgs)) {
- cpumask_or(&unshared_cpus, &unshared_cpus,
- &shared_cpus);
- cpumask_clear(&shared_cpus);
- list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
- INIT_LIST_HEAD(&shared_pcfgs);
- }
- } else {
- if (list_empty(&shared_pcfgs)) {
- pcfg = get_pcfg(&unshared_pcfgs);
- if (!pcfg) {
- pr_crit("No portals available!\n");
- return 0;
- }
- cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
- cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
- list_add_tail(&pcfg->list, &shared_pcfgs);
- }
- }
- list_for_each_entry(pcfg, &unshared_pcfgs, list) {
- pcfg->public_cfg.is_shared = 0;
- p = init_pcfg(pcfg);
- }
- list_for_each_entry(pcfg, &shared_pcfgs, list) {
- pcfg->public_cfg.is_shared = 1;
- p = init_pcfg(pcfg);
- if (p)
- shared_portals[num_shared_portals++] = p;
- }
- if (!cpumask_empty(&slave_cpus))
- for_each_cpu(cpu, &slave_cpus)
- init_slave(cpu);
-#else /* __rtems__ */
- qman_init_portals();
-#endif /* __rtems__ */
- pr_info("Portals initialised\n");
-#ifndef __rtems__
- cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
- for_each_cpu(cpu, &offline_cpus)
- qman_offline_cpu(cpu);
-#endif /* __rtems__ */
-#ifdef CONFIG_HOTPLUG_CPU
- register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
-#endif
- return 0;
+ /* all assigned portals are initialized now */
+ qman_init_cgr_all();
}
-
-__init int qman_resource_init(void)
-{
-#ifdef __rtems__
- struct device_node of_dns;
#endif /* __rtems__ */
- struct device_node *dn;
- int ret;
-
- /* Initialise FQID allocation ranges */
- for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
- ret = fsl_fqid_range_init(dn);
- if (ret)
- return ret;
- }
- /* Initialise CGRID allocation ranges */
- for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
- ret = fsl_cgrid_range_init(dn);
- if (ret)
- return ret;
- }
- /* Parse pool channels into the allocator. (Must happen after portals
- * are initialised.) */
- for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
- ret = fsl_pool_channel_range_init(dn);
- if (ret)
- return ret;
- }
-
- return 0;
-}
diff --git a/linux/drivers/soc/fsl/qbman/qman_priv.h b/linux/drivers/soc/fsl/qbman/qman_priv.h
index f04bd476..c03f1b5b 100644
--- a/linux/drivers/soc/fsl/qbman/qman_priv.h
+++ b/linux/drivers/soc/fsl/qbman/qman_priv.h
@@ -1,4 +1,4 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,93 +34,213 @@
#include <soc/fsl/qman.h>
#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+ u8 verb;
+ u8 result;
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+ return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+ u8 verb;
+ u8 result;
+ u8 __reserved[30];
+ /* Access this struct using qman_cgrs_get() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+ u8 verb;
+ u8 result;
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[6];
+ u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
+ __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved3[3];
+ u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
+ __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
+}
+
+/* "Query FQ Non-Programmable Fields" */
+
+struct qm_mcr_queryfq_np {
+ u8 verb;
+ u8 result;
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+ u32 fqd_link; /* 24-bit, _res2[24-31] */
+ u16 odp_seq; /* 14-bit, _res3[14-15] */
+ u16 orp_nesn; /* 14-bit, _res4[14-15] */
+ u16 orp_ea_hseq; /* 15-bit, _res5[15] */
+ u16 orp_ea_tseq; /* 15-bit, _res6[15] */
+ u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
+ u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
+ u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
+ u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
+ u8 __reserved2[5];
+ u8 is; /* 1-bit, _res12[1-7] */
+ u16 ics_surp;
+ u32 byte_cnt;
+ u32 frm_cnt; /* 24-bit, _res13[24-31] */
+ u32 __reserved3;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved4;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+} __packed;
+
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+
+enum qm_mcr_queryfq_np_masks {
+ qm_mcr_fqd_link_mask = BIT(24)-1,
+ qm_mcr_odp_seq_mask = BIT(14)-1,
+ qm_mcr_orp_nesn_mask = BIT(14)-1,
+ qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
+ qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
+ qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
+ qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
+ qm_mcr_pfdr_hptr_mask = BIT(24)-1,
+ qm_mcr_pfdr_tptr_mask = BIT(24)-1,
+ qm_mcr_is_mask = BIT(1)-1,
+ qm_mcr_frm_cnt_mask = BIT(24)-1,
+};
+#define qm_mcr_np_get(np, field) \
+ ((np)->field & (qm_mcr_##field##_mask))
/* Congestion Groups */
-/* This wrapper represents a bit-array for the state of the 256 QMan congestion
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
* groups. Is also used as a *mask* for congestion groups, eg. so we ignore
* those that don't concern us. We harness the structure and accessor details
* already used in the management command to query congestion groups.
*/
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+
struct qman_cgrs {
struct __qm_mcr_querycongestion q;
};
+
static inline void qman_cgrs_init(struct qman_cgrs *c)
{
memset(c, 0, sizeof(*c));
}
+
static inline void qman_cgrs_fill(struct qman_cgrs *c)
{
memset(c, 0xff, sizeof(*c));
}
-static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
-{
- return QM_MCR_QUERYCONGESTION(&c->q, num);
-}
-static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
-{
- c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
-}
-static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
-{
- c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
-}
-static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
{
- while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
- ;
- return num;
+ return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
}
+
static inline void qman_cgrs_cp(struct qman_cgrs *dest,
const struct qman_cgrs *src)
{
*dest = *src;
}
+
static inline void qman_cgrs_and(struct qman_cgrs *dest,
const struct qman_cgrs *a, const struct qman_cgrs *b)
{
int ret;
- u32 *_d = dest->q.__state;
- const u32 *_a = a->q.__state;
- const u32 *_b = b->q.__state;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
for (ret = 0; ret < 8; ret++)
- *(_d++) = *(_a++) & *(_b++);
+ *_d++ = *_a++ & *_b++;
}
+
static inline void qman_cgrs_xor(struct qman_cgrs *dest,
const struct qman_cgrs *a, const struct qman_cgrs *b)
{
int ret;
- u32 *_d = dest->q.__state;
- const u32 *_a = a->q.__state;
- const u32 *_b = b->q.__state;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
for (ret = 0; ret < 8; ret++)
- *(_d++) = *(_a++) ^ *(_b++);
+ *_d++ = *_a++ ^ *_b++;
}
-/* used by CCSR and portal interrupt code */
-enum qm_isr_reg {
- qm_isr_status = 0,
- qm_isr_enable = 1,
- qm_isr_disable = 2,
- qm_isr_inhibit = 3
-};
+void qman_init_cgr_all(void);
struct qm_portal_config {
- /* Corenet portal addresses;
- * [0]==cache-enabled, [1]==cache-inhibited. */
- __iomem void *addr_virt[2];
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
#ifndef __rtems__
- struct resource addr_phys[2];
- struct device dev;
+ struct device *dev;
struct iommu_domain *iommu_domain;
/* Allow these to be joined in lists */
struct list_head list;
#endif /* __rtems__ */
/* User-visible portal configuration settings */
- struct qman_portal_config public_cfg;
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /*
+ * the portal's dedicated channel id, used initialising
+ * frame queues to target this portal when scheduled
+ */
+ u16 channel;
+ /*
+ * mask of pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+ */
+ u32 pools;
};
/* Revision info (for errata and feature handling) */
@@ -131,57 +251,70 @@ struct qm_portal_config {
#define QMAN_REV31 0x0301
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
-extern u16 qman_portal_max;
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
-#ifdef CONFIG_FSL_QMAN_CONFIG
-/* Hooks from qman_driver.c to qman_config.c */
-int qman_init_ccsr(struct device_node *node);
+int qman_wq_alloc(void);
void qman_liodn_fixup(u16 channel);
-int qman_set_sdest(u16 channel, unsigned int cpu_idx);
-size_t qman_fqd_size(void);
-#endif
-
-int qm_set_wpm(int wpm);
-int qm_get_wpm(int *wpm);
-
-/* Hooks from qman_driver.c in to qman_high.c */
-struct qman_portal *qman_create_portal(
- struct qman_portal *portal,
- const struct qm_portal_config *config,
- const struct qman_cgrs *cgrs);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
struct qman_portal *qman_create_affine_portal(
const struct qm_portal_config *config,
const struct qman_cgrs *cgrs);
-struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
- int cpu);
const struct qm_portal_config *qman_destroy_affine_portal(void);
-void qman_destroy_portal(struct qman_portal *qm);
-
-/* This CGR feature is supported by h/w and required by unit-tests and the
- * debugfs hooks, so is implemented in the driver. However it allows an explicit
- * corruption of h/w fields by s/w that are usually incorruptible (because the
- * counters are usually maintained entirely within h/w). As such, we declare
- * this API internally. */
-int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
- struct qm_mcr_cgrtestwrite *result);
-
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-/* If the fq object pointer is greater than the size of context_b field,
- * than a lookup table is required. */
-int qman_setup_fq_lookup_table(size_t num_entries);
-#endif
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
-/*************************************************/
-/* QMan s/w corenet portal, low-level i/face */
-/*************************************************/
+/*
+ * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
+
+/*
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
+ * for the QMAN_FQ_STATE_VDQCR bit to disappear.
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
-/* Note: most functions are only used by the high-level interface, so are
- * inlined from qman.h. The stuff below is for use by other parts of the
- * driver. */
+int qman_alloc_fq_table(u32 num_fqids);
+
+/* QMan s/w corenet portal, low-level i/face */
-/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
* dequeue TYPE. Choose TOKEN (8-bit).
* If SOURCE == CHANNELS,
* Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
@@ -216,42 +349,8 @@ int qman_setup_fq_lookup_table(size_t num_entries);
#define QM_VDQCR_FQID_MASK 0x00ffffff
#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
-/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
- * If MODE==SCHEDULED
- * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
- * If CHANNELS,
- * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
- * You can choose DEDICATED_PRECEDENCE if the portal channel should have
- * priority.
- * If SPECIFICWQ,
- * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
- * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
- * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
- * same value.
- * If MODE==UNSCHEDULED
- * Choose FQID().
- */
-#define QM_PDQCR_MODE_SCHEDULED 0x0
-#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000
-#define QM_PDQCR_SCHEDULED_CHANNELS 0x0
-#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000
-#define QM_PDQCR_COUNT_EXACT1 0x0
-#define QM_PDQCR_COUNT_UPTO3 0x20000000
-#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000
-#define QM_PDQCR_TYPE_MASK 0x03000000
-#define QM_PDQCR_TYPE_NULL 0x0
-#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000
-#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000
-#define QM_PDQCR_TYPE_ACTIVE 0x03000000
-#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000
-#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
-#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7
-#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000
-#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
-#define QM_PDQCR_SPECIFICWQ_WQ(n) (n)
-#define QM_PDQCR_FQID(n) ((n) & 0xffffff)
-
-/* Used by all portal interrupt registers except 'inhibit'
+/*
+ * Used by all portal interrupt registers except 'inhibit'
* Channels with frame availability
*/
#define QM_PIRQ_DQAVAIL 0x0000ffff
@@ -263,31 +362,10 @@ int qman_setup_fq_lookup_table(size_t num_entries);
/* This mask contains all the "irqsource" bits visible to API users */
#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
-/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
- * the disable register" rather than "disable the ability to write". */
-#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
-#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
-#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
-#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
-#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
-#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
-/* TODO: unfortunate name-clash here, reword? */
-#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
-#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
-
-#ifdef CONFIG_FSL_QMAN_CONFIG
-int qman_have_ccsr(void);
-#else
-#define qman_have_ccsr 0
-#endif
-
-#ifndef __rtems__
-__init int qman_init(void);
-#else /* __rtems__ */
-int qman_init(struct device_node *dn);
-#endif /* __rtems__ */
-__init int qman_resource_init(void);
-
-extern void *affine_portals[NR_CPUS];
+extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
const struct qm_portal_config *qman_get_qm_portal_config(
struct qman_portal *portal);
+#ifdef __rtems__
+void qman_sysinit_portals(void);
+#endif /* __rtems__ */
diff --git a/linux/drivers/soc/fsl/qbman/qman_test.c b/linux/drivers/soc/fsl/qbman/qman_test.c
deleted file mode 100644
index 18c04482..00000000
--- a/linux/drivers/soc/fsl/qbman/qman_test.c
+++ /dev/null
@@ -1,61 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "qman_test.h"
-
-MODULE_AUTHOR("Geoff Thorpe");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("QMan testing");
-
-static int test_init(void)
-{
- int loop = 1;
-
- while (loop--) {
-#ifdef CONFIG_FSL_QMAN_TEST_STASH
- qman_test_stash();
-#endif
-#ifdef CONFIG_FSL_QMAN_TEST_API
- qman_test_api();
-#endif
- }
- return 0;
-}
-
-static void test_exit(void)
-{
-}
-
-module_init(test_init);
-module_exit(test_exit);
diff --git a/linux/drivers/soc/fsl/qbman/qman_test.h b/linux/drivers/soc/fsl/qbman/qman_test.h
index 0b34a670..d5f8cb22 100644
--- a/linux/drivers/soc/fsl/qbman/qman_test.h
+++ b/linux/drivers/soc/fsl/qbman/qman_test.h
@@ -1,4 +1,4 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -28,17 +28,9 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/kthread.h>
-#include <linux/platform_device.h>
+#include "qman_priv.h"
-#include <soc/fsl/qman.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-void qman_test_stash(void);
-void qman_test_api(void);
+int qman_test_stash(void);
+int qman_test_api(void);
diff --git a/linux/drivers/soc/fsl/qbman/qman_test_api.c b/linux/drivers/soc/fsl/qbman/qman_test_api.c
index 63a6d11d..d5de4c9a 100644
--- a/linux/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/linux/drivers/soc/fsl/qbman/qman_test_api.c
@@ -2,7 +2,7 @@
#include <rtems/bsd/local/opt_dpaa.h>
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,10 +34,6 @@
#include "qman_test.h"
-/*************/
-/* constants */
-/*************/
-
#define CGR_ID 27
#define POOL_ID 2
#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
@@ -51,21 +47,13 @@
#define PORTAL_OPAQUE ((void *)0xf00dbeef)
#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
-/*************************************/
-/* Predeclarations (eg. for fq_base) */
-/*************************************/
-
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
struct qman_fq *,
const struct qm_dqrr_entry *);
static void cb_ern(struct qman_portal *, struct qman_fq *,
- const struct qm_mr_entry *);
+ const union qm_mr_entry *);
static void cb_fqs(struct qman_portal *, struct qman_fq *,
- const struct qm_mr_entry *);
-
-/***************/
-/* global vars */
-/***************/
+ const union qm_mr_entry *);
static struct qm_fd fd, fd_dq;
static struct qman_fq fq_base = {
@@ -76,67 +64,68 @@ static struct qman_fq fq_base = {
static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
static int retire_complete, sdqcr_complete;
-/**********************/
-/* internal functions */
-/**********************/
-
/* Helpers for initialising and "incrementing" a frame descriptor */
-static void fd_init(struct qm_fd *__fd)
+static void fd_init(struct qm_fd *fd)
{
- qm_fd_addr_set64(__fd, 0xabdeadbeefLLU);
- __fd->format = qm_fd_contig_big;
- __fd->length29 = 0x0000ffff;
- __fd->cmd = 0xfeedf00d;
+ qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
+ qm_fd_set_contig_big(fd, 0x0000ffff);
+ fd->cmd = cpu_to_be32(0xfeedf00d);
}
-static void fd_inc(struct qm_fd *__fd)
+static void fd_inc(struct qm_fd *fd)
{
- u64 t = qm_fd_addr_get64(__fd);
+ u64 t = qm_fd_addr_get64(fd);
int z = t >> 40;
+ unsigned int len, off;
+ enum qm_fd_format fmt;
t <<= 1;
if (z)
t |= 1;
- qm_fd_addr_set64(__fd, t);
- __fd->length29--;
- __fd->cmd++;
+ qm_fd_addr_set64(fd, t);
+
+ fmt = qm_fd_get_format(fd);
+ off = qm_fd_get_offset(fd);
+ len = qm_fd_get_length(fd);
+ len--;
+ qm_fd_set_param(fd, fmt, off, len);
+
+ fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
}
/* The only part of the 'fd' we can't memcmp() is the ppid */
-static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
{
- int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
-
- if (!r)
- r = a->format - b->format;
- if (!r)
- r = a->opaque - b->opaque;
- if (!r)
- r = a->cmd - b->cmd;
- return r;
+ bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
+
+ neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
+ neq |= a->cfg != b->cfg;
+ neq |= a->cmd != b->cmd;
+
+ return neq;
}
-/********/
/* test */
-/********/
-
-static void do_enqueues(struct qman_fq *fq)
+static int do_enqueues(struct qman_fq *fq)
{
unsigned int loop;
+ int err = 0;
for (loop = 0; loop < NUM_ENQUEUES; loop++) {
- if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT |
- (((loop + 1) == NUM_ENQUEUES) ?
- QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0)))
- panic("qman_enqueue() failed\n");
+ if (qman_enqueue(fq, &fd)) {
+ pr_crit("qman_enqueue() failed\n");
+ err = -EIO;
+ }
fd_inc(&fd);
}
+
+ return err;
}
-void qman_test_api(void)
+int qman_test_api(void)
{
- u32 flags;
- int res;
+ u32 flags, frmcnt;
+ int err;
struct qman_fq *fq = &fq_base;
pr_info("%s(): Starting\n", __func__);
@@ -144,57 +133,93 @@ void qman_test_api(void)
fd_init(&fd_dq);
/* Initialise (parked) FQ */
- if (qman_create_fq(0, FQ_FLAGS, fq))
- panic("qman_create_fq() failed\n");
- if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
- panic("qman_init_fq() failed\n");
-
+ err = qman_create_fq(0, FQ_FLAGS, fq);
+ if (err) {
+ pr_crit("qman_create_fq() failed\n");
+ goto failed;
+ }
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err) {
+ pr_crit("qman_init_fq() failed\n");
+ goto failed;
+ }
/* Do enqueues + VDQCR, twice. (Parked FQ) */
- do_enqueues(fq);
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
pr_info("VDQCR (till-empty);\n");
- if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
- QM_VDQCR_NUMFRAMES_TILLEMPTY))
- panic("qman_volatile_dequeue() failed\n");
- do_enqueues(fq);
+ frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
- if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
- QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
- panic("qman_volatile_dequeue() failed\n");
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
- NUM_ENQUEUES);
- if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
- QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
- panic("qman_volatile_dequeue() failed\n");
+ NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_err("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
- do_enqueues(fq);
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
pr_info("scheduled dequeue (till-empty)\n");
- if (qman_schedule_fq(fq))
- panic("qman_schedule_fq() failed\n");
+ err = qman_schedule_fq(fq);
+ if (err) {
+ pr_crit("qman_schedule_fq() failed\n");
+ goto failed;
+ }
wait_event(waitqueue, sdqcr_complete);
/* Retire and OOS the FQ */
- res = qman_retire_fq(fq, &flags);
- if (res < 0)
- panic("qman_retire_fq() failed\n");
+ err = qman_retire_fq(fq, &flags);
+ if (err < 0) {
+ pr_crit("qman_retire_fq() failed\n");
+ goto failed;
+ }
wait_event(waitqueue, retire_complete);
- if (flags & QMAN_FQ_STATE_BLOCKOOS)
- panic("leaking frames\n");
- if (qman_oos_fq(fq))
- panic("qman_oos_fq() failed\n");
- qman_destroy_fq(fq, 0);
+ if (flags & QMAN_FQ_STATE_BLOCKOOS) {
+ err = -EIO;
+ pr_crit("leaking frames\n");
+ goto failed;
+ }
+ err = qman_oos_fq(fq);
+ if (err) {
+ pr_crit("qman_oos_fq() failed\n");
+ goto failed;
+ }
+ qman_destroy_fq(fq);
pr_info("%s(): Finished\n", __func__);
+ return 0;
+
+failed:
+ WARN_ON(1);
+ return err;
}
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
- if (fd_cmp(&fd_dq, &dq->fd)) {
+ if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
pr_err("BADNESS: dequeued frame doesn't match;\n");
- BUG();
+ return qman_cb_dqrr_consume;
}
fd_inc(&fd_dq);
- if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
sdqcr_complete = 1;
wake_up(&waitqueue);
}
@@ -202,18 +227,22 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
}
static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
- const struct qm_mr_entry *msg)
+ const union qm_mr_entry *msg)
{
- panic("cb_ern() unimplemented");
+ pr_crit("cb_ern() unimplemented");
+ WARN_ON(1);
}
static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
- const struct qm_mr_entry *msg)
+ const union qm_mr_entry *msg)
{
u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
- if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI))
- panic("unexpected FQS message");
+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
+ pr_crit("unexpected FQS message");
+ WARN_ON(1);
+ return;
+ }
#ifndef __rtems__
pr_info("Retirement message received\n");
#endif /* __rtems__ */
diff --git a/linux/drivers/soc/fsl/qbman/qman_test_stash.c b/linux/drivers/soc/fsl/qbman/qman_test_stash.c
index a3ca6603..9c3a135f 100644
--- a/linux/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/linux/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -2,7 +2,7 @@
#include <rtems/bsd/local/opt_dpaa.h>
-/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,14 +35,15 @@
#include "qman_test.h"
#include <linux/dma-mapping.h>
+#include <linux/delay.h>
#ifdef __rtems__
#include <rtems/malloc.h>
#undef msleep
#define msleep(x) usleep((x) * 1000)
-#define L1_CACHE_BYTES 64
#endif /* __rtems__ */
-/* Algorithm:
+/*
+ * Algorithm:
*
* Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
* an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
@@ -86,23 +87,28 @@
* initialisation targets the correct cpu.
*/
-/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
- * the fn from irq context, which is too restrictive). */
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
struct bstrap {
- void (*fn)(void);
+ int (*fn)(void);
atomic_t started;
};
-static int bstrap_fn(void *__bstrap)
+static int bstrap_fn(void *bs)
{
- struct bstrap *bstrap = __bstrap;
+ struct bstrap *bstrap = bs;
+ int err;
atomic_inc(&bstrap->started);
- bstrap->fn();
+ err = bstrap->fn();
+ if (err)
+ return err;
while (!kthread_should_stop())
- msleep(1);
+ msleep(20);
return 0;
}
-static int on_all_cpus(void (*fn)(void))
+static int on_all_cpus(int (*fn)(void))
{
int cpu;
@@ -127,12 +133,14 @@ static int on_all_cpus(void (*fn)(void))
return -ENOMEM;
kthread_bind(k, cpu);
wake_up_process(k);
- /* If we call kthread_stop() before the "wake up" has had an
+ /*
+ * If we call kthread_stop() before the "wake up" has had an
* effect, then the thread may exit with -EINTR without ever
* running the function. So poll until it's started before
- * requesting it to stop. */
+ * requesting it to stop.
+ */
while (!atomic_read(&bstrap.started))
- msleep(10);
+ msleep(20);
ret = kthread_stop(k);
if (ret)
return ret;
@@ -172,8 +180,10 @@ struct hp_cpu {
struct list_head handlers;
/* list node for linking us into 'hp_cpu_list' */
struct list_head node;
- /* when repeatedly scanning 'hp_list', each time linking the n'th
- * handlers together, this is used as per-cpu iterator state */
+ /*
+ * when repeatedly scanning 'hp_list', each time linking the n'th
+ * handlers together, this is used as per-cpu iterator state
+ */
struct hp_handler *iterator;
};
@@ -182,7 +192,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
/* links together the hp_cpu structs, in first-come first-serve order. */
static LIST_HEAD(hp_cpu_list);
-static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+static DEFINE_SPINLOCK(hp_lock);
static unsigned int hp_cpu_list_length;
@@ -202,6 +212,9 @@ static u32 *frame_ptr;
static dma_addr_t frame_dma;
#endif /* __rtems__ */
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
/* the main function waits on this */
static DECLARE_WAIT_QUEUE_HEAD(queue);
@@ -217,22 +230,28 @@ static inline u32 do_lfsr(u32 prev)
return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
}
-static void allocate_frame_data(void)
+static int allocate_frame_data(void)
{
u32 lfsr = HP_FIRST_WORD;
int loop;
+
#ifndef __rtems__
- struct platform_device *pdev = platform_device_alloc("foobar", -1);
+ if (!qman_dma_portal) {
+ pr_crit("portal not available\n");
+ return -EIO;
+ }
+
+ pcfg = qman_get_qm_portal_config(qman_dma_portal);
+#else /* __rtems__ */
+ pcfg = qman_get_qm_portal_config(qman_get_affine_portal(0));
+#endif /* __rtems__ */
- if (!pdev)
- panic("platform_device_alloc() failed");
- if (platform_device_add(pdev))
- panic("platform_device_add() failed");
+#ifndef __rtems__
__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
if (!__frame_ptr)
- panic("kmalloc() failed");
- frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
- ~(unsigned long)63);
+ return -ENOMEM;
+
+ frame_ptr = PTR_ALIGN(__frame_ptr, 64);
#else /* __rtems__ */
frame_ptr = rtems_heap_allocate_aligned_with_boundary(4 * HP_NUM_WORDS, 64, 0);
if (frame_ptr == NULL)
@@ -242,73 +261,96 @@ static void allocate_frame_data(void)
frame_ptr[loop] = lfsr;
lfsr = do_lfsr(lfsr);
}
+
#ifndef __rtems__
- frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
- DMA_BIDIRECTIONAL);
- platform_device_del(pdev);
- platform_device_put(pdev);
+ frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pcfg->dev, frame_dma)) {
+ pr_crit("dma mapping failure\n");
+ kfree(__frame_ptr);
+ return -EIO;
+ }
+
#endif /* __rtems__ */
+ return 0;
}
static void deallocate_frame_data(void)
{
#ifndef __rtems__
- kfree(__frame_ptr);
+ dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
#endif /* __rtems__ */
}
-static inline void process_frame_data(struct hp_handler *handler,
- const struct qm_fd *fd)
+static inline int process_frame_data(struct hp_handler *handler,
+ const struct qm_fd *fd)
{
u32 *p = handler->frame_ptr;
u32 lfsr = HP_FIRST_WORD;
int loop;
- if (qm_fd_addr_get64(fd) != handler->addr)
- panic("bad frame address");
+ if (qm_fd_addr_get64(fd) != handler->addr) {
+ pr_crit("bad frame address, [%llX != %llX]\n",
+ qm_fd_addr_get64(fd), handler->addr);
+ return -EIO;
+ }
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
*p ^= handler->rx_mixer;
- if (*p != lfsr)
- panic("corrupt frame data");
+ if (*p != lfsr) {
+ pr_crit("corrupt frame data");
+ return -EIO;
+ }
*p ^= handler->tx_mixer;
lfsr = do_lfsr(lfsr);
}
+ return 0;
}
static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr)
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
{
struct hp_handler *handler = (struct hp_handler *)fq;
- process_frame_data(handler, &dqrr->fd);
- if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
- panic("qman_enqueue() failed");
+ if (process_frame_data(handler, &dqrr->fd)) {
+ WARN_ON(1);
+ goto skip;
+ }
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ }
+skip:
return qman_cb_dqrr_consume;
}
static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr)
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
{
struct hp_handler *handler = (struct hp_handler *)fq;
process_frame_data(handler, &dqrr->fd);
if (++loop_counter < HP_LOOPS) {
- if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
- panic("qman_enqueue() failed");
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ goto skip;
+ }
} else {
pr_info("Received final (%dth) frame\n", loop_counter);
wake_up(&queue);
}
+skip:
return qman_cb_dqrr_consume;
}
-static void create_per_cpu_handlers(void)
+static int create_per_cpu_handlers(void)
{
struct hp_handler *handler;
int loop;
- struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+ struct hp_cpu *hp_cpu = this_cpu_ptr(hp_cpus);
hp_cpu->processor_id = smp_processor_id();
spin_lock(&hp_lock);
@@ -318,8 +360,11 @@ static void create_per_cpu_handlers(void)
INIT_LIST_HEAD(&hp_cpu->handlers);
for (loop = 0; loop < HP_PER_CPU; loop++) {
handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
- if (!handler)
- panic("kmem_cache_alloc() failed");
+ if (!handler) {
+ pr_crit("kmem_cache_alloc() failed");
+ WARN_ON(1);
+ return -EIO;
+ }
handler->processor_id = hp_cpu->processor_id;
#ifndef __rtems__
handler->addr = frame_dma;
@@ -329,31 +374,39 @@ static void create_per_cpu_handlers(void)
handler->frame_ptr = frame_ptr;
list_add_tail(&handler->node, &hp_cpu->handlers);
}
+ return 0;
}
-static void destroy_per_cpu_handlers(void)
+static int destroy_per_cpu_handlers(void)
{
struct list_head *loop, *tmp;
- struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+ struct hp_cpu *hp_cpu = this_cpu_ptr(hp_cpus);
spin_lock(&hp_lock);
list_del(&hp_cpu->node);
spin_unlock(&hp_lock);
list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
- u32 flags;
+ u32 flags = 0;
struct hp_handler *handler = list_entry(loop, struct hp_handler,
node);
- if (qman_retire_fq(&handler->rx, &flags))
- panic("qman_retire_fq(rx) failed");
- BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
- if (qman_oos_fq(&handler->rx))
- panic("qman_oos_fq(rx) failed");
- qman_destroy_fq(&handler->rx, 0);
- qman_destroy_fq(&handler->tx, 0);
+ if (qman_retire_fq(&handler->rx, &flags) ||
+ (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+ pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+ WARN_ON(1);
+ return -EIO;
+ }
+ if (qman_oos_fq(&handler->rx)) {
+ pr_crit("qman_oos_fq(rx) failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ qman_destroy_fq(&handler->rx);
+ qman_destroy_fq(&handler->tx);
qman_release_fqid(handler->fqid_rx);
list_del(&handler->node);
kmem_cache_free(hp_handler_slab, handler);
}
+ return 0;
}
static inline u8 num_cachelines(u32 offset)
@@ -369,36 +422,59 @@ static inline u8 num_cachelines(u32 offset)
#define STASH_CTX_CL \
num_cachelines(offsetof(struct hp_handler, fqid_rx))
-static void init_handler(void *__handler)
+static int init_handler(void *h)
{
struct qm_mcc_initfq opts;
- struct hp_handler *handler = __handler;
+ struct hp_handler *handler = h;
+ int err;
- BUG_ON(handler->processor_id != smp_processor_id());
+ if (handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
/* Set up rx */
memset(&handler->rx, 0, sizeof(handler->rx));
if (handler == special_handler)
handler->rx.cb.dqrr = special_dqrr;
else
handler->rx.cb.dqrr = normal_dqrr;
- if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
- panic("qman_create_fq(rx) failed");
+ err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+ if (err) {
+ pr_crit("qman_create_fq(rx) failed");
+ goto failed;
+ }
memset(&opts, 0, sizeof(opts));
- opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
- opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
- opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
- opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
- if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
- QMAN_INITFQ_FLAG_LOCAL, &opts))
- panic("qman_init_fq(rx) failed");
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
+ qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+ err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+ QMAN_INITFQ_FLAG_LOCAL, &opts);
+ if (err) {
+ pr_crit("qman_init_fq(rx) failed");
+ goto failed;
+ }
/* Set up tx */
memset(&handler->tx, 0, sizeof(handler->tx));
- if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
- &handler->tx))
- panic("qman_create_fq(tx) failed");
+ err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+ &handler->tx);
+ if (err) {
+ pr_crit("qman_create_fq(tx) failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
}
-static void init_phase2(void)
+static void init_handler_cb(void *h)
+{
+ if (init_handler(h))
+ WARN_ON(1);
+}
+
+static int init_phase2(void)
{
int loop;
u32 fqid = 0;
@@ -408,7 +484,7 @@ static void init_phase2(void)
for (loop = 0; loop < HP_PER_CPU; loop++) {
list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
- int ret;
+ int err;
if (!loop)
hp_cpu->iterator = list_first_entry(
@@ -421,9 +497,11 @@ static void init_phase2(void)
/* Rx FQID is the previous handler's Tx FQID */
hp_cpu->iterator->fqid_rx = fqid;
/* Allocate new FQID for Tx */
- ret = qman_alloc_fqid(&fqid);
- if (ret)
- panic("qman_alloc_fqid() failed");
+ err = qman_alloc_fqid(&fqid);
+ if (err) {
+ pr_crit("qman_alloc_fqid() failed");
+ return err;
+ }
hp_cpu->iterator->fqid_tx = fqid;
/* Rx mixer is the previous handler's Tx mixer */
hp_cpu->iterator->rx_mixer = lfsr;
@@ -435,16 +513,18 @@ static void init_phase2(void)
/* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
- BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
+ if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+ return 1;
handler->fqid_rx = fqid;
handler->rx_mixer = lfsr;
/* and tag it as our "special" handler */
special_handler = handler;
+ return 0;
}
-static void init_phase3(void)
+static int init_phase3(void)
{
- int loop;
+ int loop, err;
struct hp_cpu *hp_cpu;
for (loop = 0; loop < HP_PER_CPU; loop++) {
@@ -458,45 +538,69 @@ static void init_phase3(void)
hp_cpu->iterator->node.next,
struct hp_handler, node);
preempt_disable();
- if (hp_cpu->processor_id == smp_processor_id())
- init_handler(hp_cpu->iterator);
- else
+ if (hp_cpu->processor_id == smp_processor_id()) {
+ err = init_handler(hp_cpu->iterator);
+ if (err)
+ return err;
+ } else {
smp_call_function_single(hp_cpu->processor_id,
- init_handler, hp_cpu->iterator, 1);
+ init_handler_cb, hp_cpu->iterator, 1);
+ }
preempt_enable();
}
}
+ return 0;
}
-static void send_first_frame(void *ignore)
+static int send_first_frame(void *ignore)
{
u32 *p = special_handler->frame_ptr;
u32 lfsr = HP_FIRST_WORD;
- int loop;
+ int loop, err;
struct qm_fd fd;
- BUG_ON(special_handler->processor_id != smp_processor_id());
+ if (special_handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
memset(&fd, 0, sizeof(fd));
qm_fd_addr_set64(&fd, special_handler->addr);
- fd.format = qm_fd_contig_big;
- fd.length29 = HP_NUM_WORDS * 4;
+ qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
- if (*p != lfsr)
- panic("corrupt frame data");
+ if (*p != lfsr) {
+ err = -EIO;
+ pr_crit("corrupt frame data");
+ goto failed;
+ }
*p ^= special_handler->tx_mixer;
lfsr = do_lfsr(lfsr);
}
pr_info("Sending first frame\n");
- if (qman_enqueue(&special_handler->tx, &fd, 0))
- panic("qman_enqueue() failed");
+ err = qman_enqueue(&special_handler->tx, &fd);
+ if (err) {
+ pr_crit("qman_enqueue() failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
}
-void qman_test_stash(void)
+static void send_first_frame_cb(void *ignore)
{
+ if (send_first_frame(NULL))
+ WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+ int err;
+
#ifndef __rtems__
if (cpumask_weight(cpu_online_mask) < 2) {
pr_info("%s(): skip - only 1 CPU\n", __func__);
- return;
+ return 0;
}
#endif /* __rtems__ */
@@ -507,34 +611,57 @@ void qman_test_stash(void)
hp_handler_slab = kmem_cache_create("hp_handler_slab",
sizeof(struct hp_handler), L1_CACHE_BYTES,
SLAB_HWCACHE_ALIGN, NULL);
- if (!hp_handler_slab)
- panic("kmem_cache_create() failed");
+ if (!hp_handler_slab) {
+ err = -EIO;
+ pr_crit("kmem_cache_create() failed");
+ goto failed;
+ }
- allocate_frame_data();
+ err = allocate_frame_data();
+ if (err)
+ goto failed;
/* Init phase 1 */
pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
- if (on_all_cpus(create_per_cpu_handlers))
- panic("on_each_cpu() failed");
+ if (on_all_cpus(create_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
pr_info("Number of cpus: %d, total of %d handlers\n",
hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
- init_phase2();
+ err = init_phase2();
+ if (err)
+ goto failed;
- init_phase3();
+ err = init_phase3();
+ if (err)
+ goto failed;
preempt_disable();
- if (special_handler->processor_id == smp_processor_id())
- send_first_frame(NULL);
- else
+ if (special_handler->processor_id == smp_processor_id()) {
+ err = send_first_frame(NULL);
+ if (err)
+ goto failed;
+ } else {
smp_call_function_single(special_handler->processor_id,
- send_first_frame, NULL, 1);
+ send_first_frame_cb, NULL, 1);
+ }
preempt_enable();
wait_event(queue, loop_counter == HP_LOOPS);
deallocate_frame_data();
- if (on_all_cpus(destroy_per_cpu_handlers))
- panic("on_each_cpu() failed");
+ if (on_all_cpus(destroy_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
kmem_cache_destroy(hp_handler_slab);
pr_info("%s(): Finished\n", __func__);
+
+ return 0;
+failed:
+ WARN_ON(1);
+ return err;
}
diff --git a/linux/drivers/soc/fsl/qbman/qman_utils.c b/linux/drivers/soc/fsl/qbman/qman_utils.c
deleted file mode 100644
index 5b85f037..00000000
--- a/linux/drivers/soc/fsl/qbman/qman_utils.c
+++ /dev/null
@@ -1,309 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "qman_priv.h"
-
-/* --- FQID Pool --- */
-
-struct qman_fqid_pool {
- /* Base and size of the FQID range */
- u32 fqid_base;
- u32 total;
- /* Number of FQIDs currently "allocated" */
- u32 used;
- /* Allocation optimisation. When 'used<total', it is the index of an
- * available FQID. Otherwise there are no available FQIDs, and this
- * will be set when the next deallocation occurs. */
- u32 next;
- /* A bit-field representation of the FQID range. */
- unsigned long *bits;
-};
-
-#define QLONG_BYTES sizeof(unsigned long)
-#define QLONG_BITS (QLONG_BYTES * 8)
-/* Number of 'longs' required for the given number of bits */
-#define QNUM_LONGS(b) (((b) + QLONG_BITS - 1) / QLONG_BITS)
-/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
-#define QNUM_BYTES(b) (QNUM_LONGS(b) * QLONG_BYTES)
-/* And in bits */
-#define QNUM_BITS(b) (QNUM_LONGS(b) * QLONG_BITS)
-
-struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
-{
- struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
- unsigned int i;
-
- BUG_ON(!num);
- if (!pool)
- return NULL;
- pool->fqid_base = fqid_start;
- pool->total = num;
- pool->used = 0;
- pool->next = 0;
- pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
- if (!pool->bits) {
- kfree(pool);
- return NULL;
- }
- /* If num is not an even multiple of QLONG_BITS (or even 8, for
- * byte-oriented searching) then we fill the trailing bits with 1, to
- * make them look allocated (permanently). */
- for (i = num + 1; i < QNUM_BITS(num); i++)
- set_bit(i, pool->bits);
- return pool;
-}
-EXPORT_SYMBOL(qman_fqid_pool_create);
-
-int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
-{
- int ret = pool->used;
-
- kfree(pool->bits);
- kfree(pool);
- return ret;
-}
-EXPORT_SYMBOL(qman_fqid_pool_destroy);
-
-int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
-{
- int ret;
-
- if (pool->used == pool->total)
- return -ENOMEM;
- *fqid = pool->fqid_base + pool->next;
- ret = test_and_set_bit(pool->next, pool->bits);
- BUG_ON(ret);
- if (++pool->used == pool->total)
- return 0;
- pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
- if (pool->next >= pool->total)
- pool->next = find_first_zero_bit(pool->bits, pool->total);
- BUG_ON(pool->next >= pool->total);
- return 0;
-}
-EXPORT_SYMBOL(qman_fqid_pool_alloc);
-
-void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
-{
- int ret;
-
- fqid -= pool->fqid_base;
- ret = test_and_clear_bit(fqid, pool->bits);
- BUG_ON(!ret);
- if (pool->used-- == pool->total)
- pool->next = fqid;
-}
-EXPORT_SYMBOL(qman_fqid_pool_free);
-
-u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
-{
- return pool->used;
-}
-EXPORT_SYMBOL(qman_fqid_pool_used);
-
-static DECLARE_DPAA_RESOURCE(fqalloc); /* FQID allocator */
-static DECLARE_DPAA_RESOURCE(qpalloc); /* pool-channel allocator */
-static DECLARE_DPAA_RESOURCE(cgralloc); /* CGR ID allocator */
-
-/* FQID allocator front-end */
-
-int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
-{
- return dpaa_resource_new(&fqalloc, result, count, align, partial);
-}
-EXPORT_SYMBOL(qman_alloc_fqid_range);
-
-static int fq_cleanup(u32 fqid)
-{
- return qman_shutdown_fq(fqid) == 0;
-}
-
-void qman_release_fqid_range(u32 fqid, u32 count)
-{
- u32 total_invalid = dpaa_resource_release(&fqalloc,
- fqid, count, fq_cleanup);
-
- if (total_invalid)
- pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
- fqid, fqid + count - 1, count, total_invalid);
-}
-EXPORT_SYMBOL(qman_release_fqid_range);
-
-int qman_reserve_fqid_range(u32 fqid, u32 count)
-{
- return dpaa_resource_reserve(&fqalloc, fqid, count);
-}
-EXPORT_SYMBOL(qman_reserve_fqid_range);
-
-void qman_seed_fqid_range(u32 fqid, u32 count)
-{
- dpaa_resource_seed(&fqalloc, fqid, count);
-}
-EXPORT_SYMBOL(qman_seed_fqid_range);
-
-/* Pool-channel allocator front-end */
-
-int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
-{
- return dpaa_resource_new(&qpalloc, result, count, align, partial);
-}
-EXPORT_SYMBOL(qman_alloc_pool_range);
-
-static int qpool_cleanup(u32 qp)
-{
- /* We query all FQDs starting from
- * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
- * whose destination channel is the pool-channel being released.
- * When a non-OOS FQD is found we attempt to clean it up */
- struct qman_fq fq = {
- .fqid = 1
- };
- int err;
-
- do {
- struct qm_mcr_queryfq_np np;
-
- err = qman_query_fq_np(&fq, &np);
- if (err)
- /* FQID range exceeded, found no problems */
- return 1;
- if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
- struct qm_fqd fqd;
-
- err = qman_query_fq(&fq, &fqd);
- BUG_ON(err);
- if (fqd.dest.channel == qp) {
- /* The channel is the FQ's target, clean it */
- if (qman_shutdown_fq(fq.fqid) != 0)
- /* Couldn't shut down the FQ
- so the pool must be leaked */
- return 0;
- }
- }
- /* Move to the next FQID */
- fq.fqid++;
- } while (1);
-}
-
-void qman_release_pool_range(u32 qp, u32 count)
-{
- u32 total_invalid = dpaa_resource_release(&qpalloc,
- qp, count, qpool_cleanup);
-
- if (total_invalid) {
- /* Pool channels are almost always used individually */
- if (count == 1)
- pr_err("Pool channel 0x%x had %d leaks\n",
- qp, total_invalid);
- else
- pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
- qp, qp + count - 1, count, total_invalid);
- }
-}
-EXPORT_SYMBOL(qman_release_pool_range);
-
-void qman_seed_pool_range(u32 poolid, u32 count)
-{
- dpaa_resource_seed(&qpalloc, poolid, count);
-
-}
-EXPORT_SYMBOL(qman_seed_pool_range);
-
-int qman_reserve_pool_range(u32 poolid, u32 count)
-{
- return dpaa_resource_reserve(&qpalloc, poolid, count);
-}
-EXPORT_SYMBOL(qman_reserve_pool_range);
-
-
-/* CGR ID allocator front-end */
-
-int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
-{
- return dpaa_resource_new(&cgralloc, result, count, align, partial);
-}
-EXPORT_SYMBOL(qman_alloc_cgrid_range);
-
-static int cqr_cleanup(u32 cgrid)
-{
- /* We query all FQDs starting from
- * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
- * whose CGR is the CGR being released.
- */
- struct qman_fq fq = {
- .fqid = 1
- };
- int err;
-
- do {
- struct qm_mcr_queryfq_np np;
-
- err = qman_query_fq_np(&fq, &np);
- if (err)
- /* FQID range exceeded, found no problems */
- return 1;
- if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
- struct qm_fqd fqd;
-
- err = qman_query_fq(&fq, &fqd);
- BUG_ON(err);
- if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
- (fqd.cgid == cgrid)) {
- pr_err("CRGID 0x%x is being used by FQID 0x%x,"
- " CGR will be leaked\n",
- cgrid, fq.fqid);
- return 1;
- }
- }
- /* Move to the next FQID */
- fq.fqid++;
- } while (1);
-}
-
-void qman_release_cgrid_range(u32 cgrid, u32 count)
-{
- u32 total_invalid = dpaa_resource_release(&cgralloc,
- cgrid, count, cqr_cleanup);
- if (total_invalid)
- pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
- cgrid, cgrid + count - 1, count, total_invalid);
-}
-EXPORT_SYMBOL(qman_release_cgrid_range);
-
-void qman_seed_cgrid_range(u32 cgrid, u32 count)
-{
- dpaa_resource_seed(&cgralloc, cgrid, count);
-
-}
-EXPORT_SYMBOL(qman_seed_cgrid_range);
diff --git a/linux/include/soc/fsl/bman.h b/linux/include/soc/fsl/bman.h
index 16f4efaa..eaaf56df 100644
--- a/linux/include/soc/fsl/bman.h
+++ b/linux/include/soc/fsl/bman.h
@@ -1,4 +1,4 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -31,436 +31,87 @@
#ifndef __FSL_BMAN_H
#define __FSL_BMAN_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Enable blocking waits */
-#define FSL_DPA_CAN_WAIT 1
-#define FSL_DPA_CAN_WAIT_SYNC 1
-
-/* Last updated for v00.79 of the BG */
-
-/* Portal processing (interrupt) sources */
-#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
-#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */
-
-/* This wrapper represents a bit-array for the depletion state of the 64 BMan
- * buffer pools. */
-struct bman_depletion {
- u32 __state[2];
-};
-#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } }
-#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } }
-#define __bmdep_word(x) ((x) >> 5)
-#define __bmdep_shift(x) ((x) & 0x1f)
-#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x))
-static inline void bman_depletion_init(struct bman_depletion *c)
-{
- c->__state[0] = c->__state[1] = 0;
-}
-static inline void bman_depletion_fill(struct bman_depletion *c)
-{
- c->__state[0] = c->__state[1] = ~0;
-}
-static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid)
-{
- return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid);
-}
-static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid)
-{
- c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid);
-}
-static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid)
-{
- c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid);
-}
-
-/* --- BMan data structures (and associated constants) --- */
-
-/* Represents s/w corenet portal mapped data structures */
-struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
-struct bm_mc_command; /* MC (Management Command) command */
-struct bm_mc_result; /* MC result */
-
-/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
- * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
- * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */
+/* wrapper for 48-bit buffers */
struct bm_buffer {
union {
struct {
- u8 __reserved1;
- u8 bpid;
- u16 hi; /* High 16-bits of 48-bit address */
- u32 lo; /* Low 32-bits of 48-bit address */
- };
- struct {
- u64 __notaddress:16;
- u64 addr:48;
+ __be16 bpid; /* hi 8-bits reserved */
+ __be16 hi; /* High 16-bits of 48-bit address */
+ __be32 lo; /* Low 32-bits of 48-bit address */
};
+ __be64 data;
};
} __aligned(8);
-static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
-{
- return buf->addr;
-}
+/*
+ * Restore the 48 bit address previously stored in BMan
+ * hardware pools as a dma_addr_t
+ */
static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
{
- return (dma_addr_t)buf->addr;
+ return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
}
-/* Macro, so we compile better if 'v' isn't always 64-bit */
-#define bm_buffer_set64(buf, v) \
- do { \
- struct bm_buffer *__buf931 = (buf); \
- __buf931->hi = upper_32_bits(v); \
- __buf931->lo = lower_32_bits(v); \
- } while (0)
-
-/* See 1.5.3.5.4: "Release Command" */
-struct bm_rcr_entry {
- union {
- struct {
- u8 __dont_write_directly__verb;
- u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
- u8 __reserved1[62];
- };
- struct bm_buffer bufs[8];
- };
-} __packed;
-#define BM_RCR_VERB_VBIT 0x80
-#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
-#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
-#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
-#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
-/* See 1.5.3.1: "Acquire Command" */
-/* See 1.5.3.2: "Query Command" */
-struct bm_mcc_acquire {
- u8 bpid;
- u8 __reserved1[62];
-} __packed;
-struct bm_mcc_query {
- u8 __reserved2[63];
-} __packed;
-struct bm_mc_command {
- u8 __dont_write_directly__verb;
- union {
- struct bm_mcc_acquire acquire;
- struct bm_mcc_query query;
- };
-} __packed;
-#define BM_MCC_VERB_VBIT 0x80
-#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
-#define BM_MCC_VERB_CMD_ACQUIRE 0x10
-#define BM_MCC_VERB_CMD_QUERY 0x40
-#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
-
-/* See 1.5.3.3: "Acquire Response" */
-/* See 1.5.3.4: "Query Response" */
-struct bm_pool_state {
- u8 __reserved1[32];
- /* "availability state" and "depletion state" */
- struct {
- u8 __reserved1[8];
- /* Access using bman_depletion_***() */
- struct bman_depletion state;
- } as, ds;
-};
-struct bm_mc_result {
- union {
- struct {
- u8 verb;
- u8 __reserved1[63];
- };
- union {
- struct {
- u8 __reserved1;
- u8 bpid;
- u8 __reserved2[62];
- };
- struct bm_buffer bufs[8];
- } acquire;
- struct bm_pool_state query;
- };
-} __packed;
-#define BM_MCR_VERB_VBIT 0x80
-#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
-#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
-#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
-#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
-#define BM_MCR_VERB_CMD_ERR_ECC 0x70
-#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
-/* Determine the "availability state" of pool 'p' from a query result 'r' */
-#define BM_MCR_QUERY_AVAILABILITY(r, p) \
- bman_depletion_get(&r->query.as.state, p)
-/* Determine the "depletion state" of pool 'p' from a query result 'r' */
-#define BM_MCR_QUERY_DEPLETION(r, p) \
- bman_depletion_get(&r->query.ds.state, p)
-
-/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
-
-/* Portal and Buffer Pools */
-
-/* Represents a managed portal */
-struct bman_portal;
-
-/* This object type represents BMan buffer pools. */
-struct bman_pool;
-
-struct bman_portal_config {
- /* This is used for any "core-affine" portals, ie. default portals
- * associated to the corresponding cpu. -1 implies that there is no core
- * affinity configured. */
- int cpu;
- /* portal interrupt line */
- int irq;
-#ifndef __rtems__
- /* Is this portal shared? (If so, it has coarser locking and demuxes
- * processing on behalf of other CPUs.) */
- int is_shared;
-#endif /* __rtems__ */
- /* These are the buffer pool IDs that may be used via this portal. */
- struct bman_depletion mask;
-};
-
-/* This callback type is used when handling pool depletion entry/exit. The
- * 'cb_ctx' value is the opaque value associated with the pool object in
- * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on
- * depletion-exit. */
-typedef void (*bman_cb_depletion)(struct bman_portal *bm,
- struct bman_pool *pool, void *cb_ctx, int depleted);
-
-/* This struct specifies parameters for a bman_pool object. */
-struct bman_pool_params {
- /* index of the buffer pool to encapsulate (0-63), ignored if
- * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */
- u32 bpid;
- /* bit-mask of BMAN_POOL_FLAG_*** options */
- u32 flags;
- /* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */
- bman_cb_depletion cb;
- /* opaque user value passed as a parameter to 'cb' */
- void *cb_ctx;
- /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
- * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
- * when run in the control plane (which controls BMan CCSR). This array
- * matches the definition of bm_pool_set(). */
- u32 thresholds[4];
-};
-
-/* Flags to bman_new_pool() */
-#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
-#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
-#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */
-#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
-#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
-#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */
-
-/* Flags to bman_release() */
-#ifdef FSL_DPA_CAN_WAIT
-#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */
-#ifndef __rtems__
-#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */
-#endif /* __rtems__ */
-#ifdef FSL_DPA_CAN_WAIT_SYNC
-#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
-#endif
-#endif
-#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
-
-/* Flags to bman_acquire() */
-#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */
-
-/* Portal Management */
-
-/**
- * bman_get_portal_config - get portal configuration settings
- *
- * This returns a read-only view of the current cpu's affine portal settings.
- */
-const struct bman_portal_config *bman_get_portal_config(void);
-
-/**
- * bman_irqsource_get - return the portal work that is interrupt-driven
- *
- * Returns a bitmask of BM_PIRQ_**I processing sources that are currently
- * enabled for interrupt handling on the current cpu's affine portal. These
- * sources will trigger the portal interrupt and the interrupt handler (or a
- * tasklet/bottom-half it defers to) will perform the corresponding processing
- * work. The bman_poll_***() functions will only process sources that are not in
- * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
- * this always returns zero.
- */
-u32 bman_irqsource_get(void);
-
-/**
- * bman_irqsource_add - add processing sources to be interrupt-driven
- * @bits: bitmask of BM_PIRQ_**I processing sources
- *
- * Adds processing sources that should be interrupt-driven (rather than
- * processed via bman_poll_***() functions). Returns zero for success, or
- * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
-int bman_irqsource_add(u32 bits);
-
-/**
- * bman_irqsource_remove - remove processing sources from being interrupt-driven
- * @bits: bitmask of BM_PIRQ_**I processing sources
- *
- * Removes processing sources from being interrupt-driven, so that they will
- * instead be processed via bman_poll_***() functions. Returns zero for success,
- * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
-int bman_irqsource_remove(u32 bits);
-
-#ifndef __rtems__
-/**
- * bman_affine_cpus - return a mask of cpus that have affine portals
- */
-const cpumask_t *bman_affine_cpus(void);
-#endif /* __rtems__ */
-
-/**
- * bman_poll_slow - process anything that isn't interrupt-driven.
- *
- * This function does any portal processing that isn't interrupt-driven. If the
- * current CPU is sharing a portal hosted on another CPU, this function will
- * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources
- * indicating what interrupt sources were actually processed by the call.
- *
- * NB, unlike the legacy wrapper bman_poll(), this function will
- * deterministically check for the presence of portal processing work and do it,
- * which implies some latency even if there's nothing to do. The bman_poll()
- * wrapper on the other hand (like the qman_poll() wrapper) attenuates this by
- * checking for (and doing) portal processing infrequently. Ie. such that
- * qman_poll() and bman_poll() can be called from core-processing loops. Use
- * bman_poll_slow() when you yourself are deciding when to incur the overhead of
- * processing.
- */
-u32 bman_poll_slow(void);
-
-/**
- * bman_poll - process anything that isn't interrupt-driven.
- *
- * Dispatcher logic on a cpu can use this to trigger any maintenance of the
- * affine portal. This function does whatever processing is not triggered by
- * interrupts. This is a legacy wrapper that can be used in core-processing
- * loops but mitigates the performance overhead of portal processing by
- * adaptively bypassing true portal processing most of the time. (Processing is
- * done once every 10 calls if the previous processing revealed that work needed
- * to be done, or once very 1000 calls if the previous processing revealed no
- * work needed doing.) If you wish to control this yourself, call
- * bman_poll_slow() instead, which always checks for portal processing work.
- */
-void bman_poll(void);
-
-/**
- * bman_rcr_is_empty - Determine if portal's RCR is empty
- *
- * For use in situations where a cpu-affine caller needs to determine when all
- * releases for the local portal have been processed by BMan but can't use the
- * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
- * The function forces tracking of RCR consumption (which normally doesn't
- * happen until release processing needs to find space to put new release
- * commands), and returns zero if the ring still has unprocessed entries,
- * non-zero if it is empty.
- */
-int bman_rcr_is_empty(void);
-
-/**
- * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
- * @result: is set by the API to the base BPID of the allocated range
- * @count: the number of BPIDs required
- * @align: required alignment of the allocated range
- * @partial: non-zero if the API can return fewer than @count BPIDs
- *
- * Returns the number of buffer pools allocated, or a negative error code. If
- * @partial is non zero, the allocation request may return a smaller range of
- * BPs than requested (though alignment will be as requested). If @partial is
- * zero, the return value will either be 'count' or negative.
- */
-int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
-static inline int bman_alloc_bpid(u32 *result)
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
{
- int ret = bman_alloc_bpid_range(result, 1, 0, 0);
-
- return (ret > 0) ? 0 : ret;
+ return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
}
-/**
- * bman_release_bpid_range - Release the specified range of buffer pool IDs
- * @bpid: the base BPID of the range to deallocate
- * @count: the number of BPIDs in the range
- *
- * This function can also be used to seed the allocator with ranges of BPIDs
- * that it can subsequently allocate from.
- */
-void bman_release_bpid_range(u32 bpid, u32 count);
-static inline void bman_release_bpid(u32 bpid)
+static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr)
{
- bman_release_bpid_range(bpid, 1);
+ buf->hi = cpu_to_be16(upper_32_bits(addr));
+ buf->lo = cpu_to_be32(lower_32_bits(addr));
}
-int bman_reserve_bpid_range(u32 bpid, u32 count);
-static inline int bman_reserve_bpid(u32 bpid)
+static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf)
{
- return bman_reserve_bpid_range(bpid, 1);
+ return be16_to_cpu(buf->bpid) & 0xff;
}
-void bman_seed_bpid_range(u32 bpid, u32 count);
+static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid)
+{
+ buf->bpid = cpu_to_be16(bpid & 0xff);
+}
+/* Managed portal, high-level i/face */
-int bman_shutdown_pool(u32 bpid);
+/* Portal and Buffer Pools */
+struct bman_portal;
+struct bman_pool;
-/* Pool management */
+#define BM_POOL_MAX 64 /* max # of buffer pools */
/**
* bman_new_pool - Allocates a Buffer Pool object
- * @params: parameters specifying the buffer pool ID and behaviour
*
- * Creates a pool object for the given @params. A portal and the depletion
- * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
- * is set. NB, the fields from @params are copied into the new pool object, so
- * the structure provided by the caller can be released or reused after the
- * function returns.
+ * Creates a pool object, and returns a reference to it or NULL on error.
*/
-struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
+struct bman_pool *bman_new_pool(void);
/**
* bman_free_pool - Deallocates a Buffer Pool object
* @pool: the pool object to release
- *
*/
void bman_free_pool(struct bman_pool *pool);
/**
- * bman_get_params - Returns a pool object's parameters.
+ * bman_get_bpid - Returns a pool object's BPID.
* @pool: the pool object
*
- * The returned pointer refers to state within the pool object so must not be
- * modified and can no longer be read once the pool object is destroyed.
+ * The returned value is the index of the encapsulated buffer pool,
+ * in the range of [0, @BM_POOL_MAX-1].
*/
-const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
+int bman_get_bpid(const struct bman_pool *pool);
/**
* bman_release - Release buffer(s) to the buffer pool
* @pool: the buffer pool object to release to
* @bufs: an array of buffers to release
* @num: the number of buffers in @bufs (1-8)
- * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
*
- * Adds the given buffers to RCR entries. If the portal @p was created with the
- * "COMPACT" flag, then it will be using a compaction algorithm to improve
- * utilisation of RCR. As such, these buffers may join an existing ring entry
- * and/or it may not be issued right away so as to allow future releases to join
- * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
- * behaviour by committing the RCR entry (or entries) right away. If the RCR
- * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
- * is selected, in which case it will sleep waiting for space to become
- * available in RCR. If the function receives a signal before such time (and
- * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
- * it returns zero.
+ * Adds the given buffers to RCR entries. If the RCR ring is unresponsive,
+ * the function will return -ETIMEDOUT. Otherwise, it returns zero.
*/
-int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
- u32 flags);
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
/**
* bman_acquire - Acquire buffer(s) from a buffer pool
@@ -473,52 +124,6 @@ int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
* negative error code if a h/w error or pool starvation was encountered. In
* the latter case, the content of @bufs is undefined.
*/
-int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
- u32 flags);
-
-/**
- * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool
- * @pool: the buffer pool object the stockpile belongs
- * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
- *
- * Adds stockpile buffers to RCR entries until the stockpile is empty.
- * The return value will be a negative error code if a h/w error occurred.
- * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full,
- * -EAGAIN will be returned.
- */
-int bman_flush_stockpile(struct bman_pool *pool, u32 flags);
-
-/**
- * bman_query_pools - Query all buffer pool states
- * @state: storage for the queried availability and depletion states
- */
-int bman_query_pools(struct bm_pool_state *state);
-
-#ifdef CONFIG_FSL_BMAN
-/**
- * bman_query_free_buffers - Query how many free buffers are in buffer pool
- * @pool: the buffer pool object to query
- *
- * Return the number of the free buffers
- */
-u32 bman_query_free_buffers(struct bman_pool *pool);
-
-/**
- * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
- * @pool: the buffer pool object to which the thresholds will be set
- * @thresholds: the new thresholds
- */
-int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
-#endif
-
-/**
- * The below bman_p_***() variant might be called in a situation that the cpu
- * which the portal affine to is not online yet.
- * @bman_portal specifies which portal the API will use.
-*/
-int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits);
-#ifdef __cplusplus
-}
-#endif
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
#endif /* __FSL_BMAN_H */
diff --git a/linux/include/soc/fsl/qman.h b/linux/include/soc/fsl/qman.h
index f63feb89..92909dc4 100644
--- a/linux/include/soc/fsl/qman.h
+++ b/linux/include/soc/fsl/qman.h
@@ -1,4 +1,4 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -31,43 +31,14 @@
#ifndef __FSL_QMAN_H
#define __FSL_QMAN_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
#include <linux/bitops.h>
#include <linux/rbtree.h>
-/* Extra lookup is needed on 64 bit machines */
-#if (BITS_PER_LONG == 64)
-#define CONFIG_FSL_QMAN_FQ_LOOKUP 1
-#endif
-
-/* Enable blocking waits */
-#define FSL_DPA_CAN_WAIT 1
-#define FSL_DPA_CAN_WAIT_SYNC 1
-
/* Hardware constants */
#define QM_CHANNEL_SWPORTAL0 0
#define QMAN_CHANNEL_POOL1 0x21
-#define QMAN_CHANNEL_CAAM 0x80
-#define QMAN_CHANNEL_PME 0xa0
#define QMAN_CHANNEL_POOL1_REV3 0x401
-#define QMAN_CHANNEL_CAAM_REV3 0x840
-#define QMAN_CHANNEL_PME_REV3 0x860
-#define QMAN_CHANNEL_DCE 0x8a0
extern u16 qm_channel_pool1;
-extern u16 qm_channel_caam;
-extern u16 qm_channel_pme;
-extern u16 qm_channel_dce;
-enum qm_dc_portal {
- qm_dc_portal_fman0 = 0,
- qm_dc_portal_fman1 = 1,
- qm_dc_portal_caam = 2,
- qm_dc_portal_pme = 3,
- qm_dc_portal_rman = 4,
- qm_dc_portal_dce = 5
-};
/* Portal processing (interrupt) sources */
#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
@@ -75,26 +46,13 @@ enum qm_dc_portal {
#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
-/* This mask contains all the interrupt sources that need handling except DQRI,
- * ie. that if present should trigger slow-path processing. */
+/*
+ * This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing.
+ */
#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
QM_PIRQ_MRI)
-/* --- Clock speed --- */
-/* A qman driver instance may or may not know the current qman clock speed.
- * However, certain CEETM calculations may not be possible if this is not known.
- * The 'set' function will only succeed (return zero) if the driver did not
- * already know the clock speed. Likewise, the 'get' function will only succeed
- * if the driver does know the clock speed (either because it knew when booting,
- * or was told via 'set'). In cases where software is running on a driver
- * instance that does not know the clock speed (eg. on a hypervised data-plane),
- * and the user can obtain the current qman clock speed by other means (eg. from
- * a message sent from the control-plane), then the 'set' function can be used
- * to enable rate-calculations in a driver where it would otherwise not be
- * possible. */
-int qm_get_clock(u64 *clock_hz);
-int qm_set_clock(u64 clock_hz);
-
/* For qman_static_dequeue_*** APIs */
#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
/* for n in [1,15] */
@@ -105,40 +63,46 @@ static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
}
-/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
- * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
- * FQID(n) to fill in the frame queue ID. */
-#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
-#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
-#define QM_VDQCR_EXACT 0x40000000
-#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
-#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
-#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
-#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+/* --- QMan data structures (and associated constants) --- */
+/* "Frame Descriptor (FD)" */
+struct qm_fd {
+ union {
+ struct {
+ u8 cfg8b_w1;
+ u8 bpid; /* Buffer Pool ID */
+ u8 cfg8b_w3;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ __be32 addr_lo; /* low 32-bits of 40-bit address */
+ } __packed;
+ __be64 data;
+ };
+ __be32 cfg; /* format, offset, length / congestion */
+ union {
+ __be32 cmd;
+ __be32 status;
+ };
+} __aligned(8);
-/* ------------------------------------------------------- */
-/* --- QMan data structures (and associated constants) --- */
+#define QM_FD_FORMAT_SG BIT(31)
+#define QM_FD_FORMAT_LONG BIT(30)
+#define QM_FD_FORMAT_COMPOUND BIT(29)
+#define QM_FD_FORMAT_MASK GENMASK(31, 29)
+#define QM_FD_OFF_SHIFT 20
+#define QM_FD_OFF_MASK GENMASK(28, 20)
+#define QM_FD_LEN_MASK GENMASK(19, 0)
+#define QM_FD_LEN_BIG_MASK GENMASK(28, 0)
-/* Represents s/w corenet portal mapped data structures */
-struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
-struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
-struct qm_mr_entry; /* MR (Message Ring) entries */
-struct qm_mc_command; /* MC (Management Command) command */
-struct qm_mc_result; /* MC result */
-
-/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */
-#define QM_FD_FORMAT_SG 0x4
-#define QM_FD_FORMAT_LONG 0x2
-#define QM_FD_FORMAT_COMPOUND 0x1
enum qm_fd_format {
- /* 'contig' implies a contiguous buffer, whereas 'sg' implies a
+ /*
+ * 'contig' implies a contiguous buffer, whereas 'sg' implies a
* scatter-gather table. 'big' implies a 29-bit length with no offset
* field, otherwise length is 20-bit and offset is 9-bit. 'compound'
* implies a s/g-like table, where each entry itself represents a frame
* (contiguous or scatter-gather) and the 29-bit "length" is
* interpreted purely for congestion calculations, ie. a "congestion
- * weight". */
+ * weight".
+ */
qm_fd_contig = 0,
qm_fd_contig_big = QM_FD_FORMAT_LONG,
qm_fd_sg = QM_FD_FORMAT_SG,
@@ -146,173 +110,149 @@ enum qm_fd_format {
qm_fd_compound = QM_FD_FORMAT_COMPOUND
};
-/* Capitalised versions are un-typed but can be used in static expressions */
-#define QM_FD_CONTIG 0
-#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
-#define QM_FD_SG QM_FD_FORMAT_SG
-#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
-#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+ return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
-/* See 1.5.1.1: "Frame Descriptor (FD)" */
-struct qm_fd {
- union {
- struct {
- u8 dd:2; /* dynamic debug */
- u8 liodn_offset:6;
- u8 bpid:8; /* Buffer Pool ID */
- u8 eliodn_offset:4;
- u8 __reserved:4;
- u8 addr_hi; /* high 8-bits of 40-bit address */
- u32 addr_lo; /* low 32-bits of 40-bit address */
- };
- struct {
- u64 __notaddress:24;
- /* More efficient address accessor */
- u64 addr:40;
- };
- u64 opaque_addr;
- };
- /* The 'format' field indicates the interpretation of the remaining 29
- * bits of the 32-bit word. For packing reasons, it is duplicated in the
- * other union elements. Note, union'd structs are difficult to use with
- * static initialisation under gcc, in which case use the "opaque" form
- * with one of the macros. */
- union {
- /* For easier/faster copying of this part of the fd (eg. from a
- * DQRR entry to an EQCR entry) copy 'opaque' */
- u32 opaque;
- /* If 'format' is _contig or _sg, 20b length and 9b offset */
- struct {
- enum qm_fd_format format:3;
- u16 offset:9;
- u32 length20:20;
- };
- /* If 'format' is _contig_big or _sg_big, 29b length */
- struct {
- enum qm_fd_format _format1:3;
- u32 length29:29;
- };
- /* If 'format' is _compound, 29b "congestion weight" */
- struct {
- enum qm_fd_format _format2:3;
- u32 cong_weight:29;
- };
- };
- union {
- u32 cmd;
- u32 status;
- };
-} __aligned(8);
-#define QM_FD_DD_NULL 0x00
-#define QM_FD_PID_MASK 0x3f
static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
{
- return fd->addr;
+ return be64_to_cpu(fd->data) & 0xffffffffffLLU;
}
-static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
{
- return (dma_addr_t)fd->addr;
+ fd->addr_hi = upper_32_bits(addr);
+ fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
}
-/* Macro, so we compile better if 'v' isn't always 64-bit */
-#define qm_fd_addr_set64(fd, v) \
- do { \
- struct qm_fd *__fd931 = (fd); \
- __fd931->addr = v; \
- } while (0)
-
-/* For static initialisation of FDs (which is complicated by the use of unions
- * in "struct qm_fd"), use the following macros. Note that;
- * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation
- * use-case),
- * - use capitalised QM_FD_*** formats for static initialisation.
+
+/*
+ * The 'format' field indicates the interpretation of the remaining
+ * 29 bits of the 32-bit word.
+ * If 'format' is _contig or _sg, 20b length and 9b offset.
+ * If 'format' is _contig_big or _sg_big, 29b length.
+ * If 'format' is _compound, 29b "congestion weight".
*/
-#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \
- { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
- { (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \
- { cmd } }
-#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \
- { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
- { (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \
- { cmd } }
-
-/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */
+static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
+{
+ return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
+}
+
+static inline int qm_fd_get_offset(const struct qm_fd *fd)
+{
+ return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
+}
+
+static inline int qm_fd_get_length(const struct qm_fd *fd)
+{
+ return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
+}
+
+static inline int qm_fd_get_len_big(const struct qm_fd *fd)
+{
+ return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
+}
+
+static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
+ int off, int len)
+{
+ fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
+ ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
+}
+
+#define qm_fd_set_contig(fd, off, len) \
+ qm_fd_set_param(fd, qm_fd_contig, off, len)
+#define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
+#define qm_fd_set_contig_big(fd, len) \
+ qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
+#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
+
+static inline void qm_fd_clear_fd(struct qm_fd *fd)
+{
+ fd->data = 0;
+ fd->cfg = 0;
+ fd->cmd = 0;
+}
+
+/* Scatter/Gather table entry */
struct qm_sg_entry {
union {
struct {
u8 __reserved1[3];
u8 addr_hi; /* high 8-bits of 40-bit address */
- u32 addr_lo; /* low 32-bits of 40-bit address */
- };
- struct {
- u64 __notaddress:24;
- u64 addr:40;
+ __be32 addr_lo; /* low 32-bits of 40-bit address */
};
+ __be64 data;
};
- u32 extension:1; /* Extension bit */
- u32 final:1; /* Final bit */
- u32 length:30;
+ __be32 cfg; /* E bit, F bit, length */
u8 __reserved2;
u8 bpid;
- u16 __reserved3:3;
- u16 offset:13;
+ __be16 offset; /* 13-bit, _res[13-15]*/
} __packed;
+
+#define QM_SG_LEN_MASK GENMASK(29, 0)
+#define QM_SG_OFF_MASK GENMASK(12, 0)
+#define QM_SG_FIN BIT(30)
+#define QM_SG_EXT BIT(31)
+
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+ return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
{
- return sg->addr;
+ return be64_to_cpu(sg->data) & 0xffffffffffLLU;
}
-static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+
+static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
{
- return (dma_addr_t)sg->addr;
+ sg->addr_hi = upper_32_bits(addr);
+ sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
}
-/* Macro, so we compile better if 'v' isn't always 64-bit */
-#define qm_sg_entry_set64(sg, v) \
- do { \
- struct qm_sg_entry *__sg931 = (sg); \
- __sg931->addr = v; \
- } while (0)
-
-/* See 1.5.8.1: "Enqueue Command" */
-struct qm_eqcr_entry {
- u8 __dont_write_directly__verb;
- u8 dca;
- u16 seqnum;
- u32 orp; /* 24-bit */
- u32 fqid; /* 24-bit */
- u32 tag;
- struct qm_fd fd;
- u8 __reserved3[32];
-} __packed;
-#define QM_EQCR_VERB_VBIT 0x80
-#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
-#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
-#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
-#define QM_EQCR_VERB_COLOUR_GREEN 0x00
-#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
-#define QM_EQCR_VERB_COLOUR_RED 0x10
-#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
-#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
-#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
-#define QM_EQCR_DCA_ENABLE 0x80
-#define QM_EQCR_DCA_PARK 0x40
-#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
-#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
-#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
-#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
-#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
-
-/* See 1.5.8.2: "Frame Dequeue Response" */
+
+static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->cfg) & QM_SG_FIN;
+}
+
+static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->cfg) & QM_SG_EXT;
+}
+
+static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
+}
+
+static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
+{
+ sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
+}
+
+static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
+{
+ sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
+}
+
+static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
+}
+
+/* "Frame Dequeue Response" */
struct qm_dqrr_entry {
u8 verb;
u8 stat;
- u16 seqnum; /* 15-bit */
+ __be16 seqnum; /* 15-bit */
u8 tok;
u8 __reserved2[3];
- u32 fqid; /* 24-bit */
- u32 contextB;
+ __be32 fqid; /* 24-bit */
+ __be32 context_b;
struct qm_fd fd;
u8 __reserved4[32];
-};
+} __packed;
#define QM_DQRR_VERB_VBIT 0x80
#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
@@ -323,46 +263,44 @@ struct qm_dqrr_entry {
#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
-/* See 1.5.8.3: "ERN Message Response" */
-/* See 1.5.8.4: "FQ State Change Notification" */
-struct qm_mr_entry {
- u8 verb;
- union {
- struct {
- u8 dca;
- u16 seqnum;
- u8 rc; /* Rejection Code */
- u32 orp:24;
- u32 fqid; /* 24-bit */
- u32 tag;
- struct qm_fd fd;
- } __packed ern;
- struct {
- u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
- u8 __reserved1:3;
- enum qm_dc_portal portal:3;
- u16 __reserved2;
- u8 rc; /* Rejection Code */
- u32 __reserved3:24;
- u32 fqid; /* 24-bit */
- u32 tag;
- struct qm_fd fd;
- } __packed dcern;
- struct {
- u8 fqs; /* Frame Queue Status */
- u8 __reserved1[6];
- u32 fqid; /* 24-bit */
- u32 contextB;
- u8 __reserved2[16];
- } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
+/* 'fqid' is a 24-bit field in every h/w descriptor */
+#define QM_FQID_MASK GENMASK(23, 0)
+#define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
+#define qm_fqid_get(p) (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
+
+/* "ERN Message Response" */
+/* "FQ State Change Notification" */
+union qm_mr_entry {
+ struct {
+ u8 verb;
+ u8 __reserved[63];
};
- u8 __reserved2[32];
-} __packed;
+ struct {
+ u8 verb;
+ u8 dca;
+ __be16 seqnum;
+ u8 rc; /* Rej Code: 8-bit */
+ u8 __reserved[3];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
+ struct qm_fd fd;
+ u8 __reserved1[32];
+ } __packed ern;
+ struct {
+ u8 verb;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[6];
+ __be32 fqid; /* 24-bit */
+ __be32 context_b;
+ u8 __reserved2[48];
+ } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
+};
#define QM_MR_VERB_VBIT 0x80
-/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs
- * originating from direct-connect portals ("dcern") use 0x20 as a verb which
- * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from
- * the other MR types by noting if the 0x20 bit is unset. */
+/*
+ * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
+ * from the other MR types by noting if the 0x20 bit is unset.
+ */
#define QM_MR_VERB_TYPE_MASK 0x27
#define QM_MR_VERB_DC_ERN 0x20
#define QM_MR_VERB_FQRN 0x21
@@ -380,146 +318,185 @@ struct qm_mr_entry {
#define QM_MR_RC_ORP_ZERO 0x70
#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
-#define QM_MR_DCERN_COLOUR_GREEN 0x00
-#define QM_MR_DCERN_COLOUR_YELLOW 0x01
-#define QM_MR_DCERN_COLOUR_RED 0x02
-#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
-/* An identical structure of FQD fields is present in the "Init FQ" command and
+/*
+ * An identical structure of FQD fields is present in the "Init FQ" command and
* the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
* Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
* latter has two inlines to assist with converting to/from the mant+exp
- * representation. */
+ * representation.
+ */
struct qm_fqd_stashing {
/* See QM_STASHING_EXCL_<...> */
u8 exclusive;
- u8 __reserved1:2;
/* Numbers of cachelines */
- u8 annotation_cl:2;
- u8 data_cl:2;
- u8 context_cl:2;
-} __packed;
-struct qm_fqd_taildrop {
- u16 __reserved1:3;
- u16 mant:8;
- u16 exp:5;
-} __packed;
+ u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
+};
+
struct qm_fqd_oac {
- /* See QM_OAC_<...> */
- u8 oac:2; /* "Overhead Accounting Control" */
- u8 __reserved1:6;
+ /* "Overhead Accounting Control", see QM_OAC_<...> */
+ u8 oac; /* oac[6-7], _res[0-5] */
/* Two's-complement value (-128 to +127) */
- signed char oal; /* "Overhead Accounting Length" */
-} __packed;
+ s8 oal; /* "Overhead Accounting Length" */
+};
+
struct qm_fqd {
- union {
- u8 orpc;
- struct {
- u8 __reserved1:2;
- u8 orprws:3;
- u8 oa:1;
- u8 olws:2;
- } __packed;
- };
+ /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
+ u8 orpc;
u8 cgid;
- u16 fq_ctrl; /* See QM_FQCTRL_<...> */
- union {
- u16 dest_wq;
- struct {
- u16 channel:13; /* qm_channel */
- u16 wq:3;
- } __packed dest;
- };
- u16 __reserved2:1;
- u16 ics_cred:15;
- /* For "Initialize Frame Queue" commands, the write-enable mask
+ __be16 fq_ctrl; /* See QM_FQCTRL_<...> */
+ __be16 dest_wq; /* channel[3-15], wq[0-2] */
+ __be16 ics_cred; /* 15-bit */
+ /*
+ * For "Initialize Frame Queue" commands, the write-enable mask
* determines whether 'td' or 'oac_init' is observed. For query
* commands, this field is always 'td', and 'oac_query' (below) reflects
- * the Overhead ACcounting values. */
+ * the Overhead ACcounting values.
+ */
union {
- struct qm_fqd_taildrop td;
+ __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
struct qm_fqd_oac oac_init;
};
- u32 context_b;
+ __be32 context_b;
union {
/* Treat it as 64-bit opaque */
- u64 opaque;
+ __be64 opaque;
struct {
- u32 hi;
- u32 lo;
+ __be32 hi;
+ __be32 lo;
};
/* Treat it as s/w portal stashing config */
- /* See 1.5.6.7.1: "FQD Context_A field used for [...] */
+ /* see "FQD Context_A field used for [...]" */
struct {
struct qm_fqd_stashing stashing;
- /* 48-bit address of FQ context to
- * stash, must be cacheline-aligned */
- u16 context_hi;
- u32 context_lo;
+ /*
+ * 48-bit address of FQ context to
+ * stash, must be cacheline-aligned
+ */
+ __be16 context_hi;
+ __be32 context_lo;
} __packed;
} context_a;
struct qm_fqd_oac oac_query;
} __packed;
+
+#define QM_FQD_CHAN_OFF 3
+#define QM_FQD_WQ_MASK GENMASK(2, 0)
+#define QM_FQD_TD_EXP_MASK GENMASK(4, 0)
+#define QM_FQD_TD_MANT_OFF 5
+#define QM_FQD_TD_MANT_MASK GENMASK(12, 5)
+#define QM_FQD_TD_MAX 0xe0000000
+#define QM_FQD_TD_MANT_MAX 0xff
+#define QM_FQD_OAC_OFF 6
+#define QM_FQD_AS_OFF 4
+#define QM_FQD_DS_OFF 2
+#define QM_FQD_XS_MASK 0x3
+
/* 64-bit converters for context_hi/lo */
static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
{
- return ((u64)fqd->context_a.context_hi << 32) |
- (u64)fqd->context_a.context_lo;
+ return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
}
+
static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
{
- return (dma_addr_t)qm_fqd_stashing_get64(fqd);
+ return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
}
+
static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
{
- return ((u64)fqd->context_a.hi << 32) |
- (u64)fqd->context_a.lo;
+ return qm_fqd_stashing_get64(fqd);
+}
+
+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr));
+ fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr));
}
-/* Macro, so we compile better when 'v' isn't necessarily 64-bit */
-#define qm_fqd_stashing_set64(fqd, v) \
- do { \
- struct qm_fqd *__fqd931 = (fqd); \
- __fqd931->context_a.context_hi = upper_32_bits(v); \
- __fqd931->context_a.context_lo = lower_32_bits(v); \
- } while (0)
-#define qm_fqd_context_a_set64(fqd, v) \
- do { \
- struct qm_fqd *__fqd931 = (fqd); \
- __fqd931->context_a.hi = upper_32_bits(v); \
- __fqd931->context_a.lo = lower_32_bits(v); \
- } while (0)
+
+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr));
+ fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
+}
+
/* convert a threshold value into mant+exp representation */
-static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
- int roundup)
+static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
+ int roundup)
{
u32 e = 0;
- int oddbit = 0;
+ int td, oddbit = 0;
- if (val > 0xe0000000)
+ if (val > QM_FQD_TD_MAX)
return -ERANGE;
- while (val > 0xff) {
+
+ while (val > QM_FQD_TD_MANT_MAX) {
oddbit = val & 1;
val >>= 1;
e++;
if (roundup && oddbit)
val++;
}
- td->exp = e;
- td->mant = val;
+
+ td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
+ td |= (e & QM_FQD_TD_EXP_MASK);
+ fqd->td = cpu_to_be16(td);
return 0;
}
/* and the other direction */
-static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
+static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
+{
+ int td = be16_to_cpu(fqd->td);
+
+ return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
+ << (td & QM_FQD_TD_EXP_MASK);
+}
+
+static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
+{
+ struct qm_fqd_stashing *st = &fqd->context_a.stashing;
+
+ st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
+ ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
+ (cs & QM_FQD_XS_MASK);
+}
+
+static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
+{
+ return fqd->context_a.stashing.cl;
+}
+
+static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
+{
+ fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
+}
+
+static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
+{
+ fqd->oac_init.oal = val;
+}
+
+static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
+{
+ fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
+ (wq & QM_FQD_WQ_MASK));
+}
+
+static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
+{
+ return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
+}
+
+static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
{
- return (u32)td->mant << td->exp;
+ return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
}
-/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */
+/* See "Frame Queue Descriptor (FQD)" */
/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
-#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
@@ -528,19 +505,19 @@ static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
-/* See 1.5.6.7.1: "FQD Context_A field used for [...] */
+/* See "FQD Context_A field used for [...] */
/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
#define QM_STASHING_EXCL_ANNOTATION 0x04
#define QM_STASHING_EXCL_DATA 0x02
#define QM_STASHING_EXCL_CTX 0x01
-/* See 1.5.5.3: "Intra Class Scheduling" */
+/* See "Intra Class Scheduling" */
/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
-/* See 1.5.8.4: "FQ State Change Notification" */
-/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+/*
+ * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
* and associated commands/responses. The WRED parameters are calculated from
* these fields as follows;
* MaxTH = MA * (2 ^ Mn)
@@ -548,31 +525,25 @@ static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
* MaxP = 4 * (Pn + 1)
*/
struct qm_cgr_wr_parm {
- union {
- u32 word;
- struct {
- u32 MA:8;
- u32 Mn:5;
- u32 SA:7; /* must be between 64-127 */
- u32 Sn:6;
- u32 Pn:6;
- } __packed;
- };
-} __packed;
-/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
+ __be32 word;
+};
+/*
+ * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
* management commands, this is padded to a 16-bit structure field, so that's
* how we represent it here. The congestion state threshold is calculated from
* these fields as follows;
* CS threshold = TA * (2 ^ Tn)
*/
struct qm_cgr_cs_thres {
- u16 __reserved:3;
- u16 TA:8;
- u16 Tn:5;
-} __packed;
-/* This identical structure of CGR fields is present in the "Init/Modify CGR"
+ /* _res[13-15], TA[5-12], Tn[0-4] */
+ __be16 word;
+};
+/*
+ * This identical structure of CGR fields is present in the "Init/Modify CGR"
* commands and the "Query CGR" result. It's suctioned out here into its own
- * struct. */
+ * struct.
+ */
struct __qm_mc_cgr {
struct qm_cgr_wr_parm wr_parm_g;
struct qm_cgr_wr_parm wr_parm_y;
@@ -583,10 +554,10 @@ struct __qm_mc_cgr {
u8 cscn_en; /* boolean, use QM_CGR_EN */
union {
struct {
- u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
- u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+ __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
+ __be16 cscn_targ_dcp_low;
};
- u32 cscn_targ; /* use QM_CGR_TARG_* */
+ __be32 cscn_targ; /* use QM_CGR_TARG_* */
};
u8 cstd_en; /* boolean, use QM_CGR_EN */
u8 cs; /* boolean, only used in query response */
@@ -602,8 +573,11 @@ struct __qm_mc_cgr {
/* Convert CGR thresholds to/from "cs_thres" format */
static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
{
- return (u64)th->TA << th->Tn;
+ int thres = be16_to_cpu(th->word);
+
+ return ((thres >> 5) & 0xff) << (thres & 0x1f);
}
+
static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
int roundup)
{
@@ -617,116 +591,29 @@ static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
if (roundup && oddbit)
val++;
}
- th->Tn = e;
- th->TA = val;
+ th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
return 0;
}
-/* See 1.5.8.5.1: "Initialize FQ" */
-/* See 1.5.8.5.2: "Query FQ" */
-/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
-/* See 1.5.8.5.4: "Alter FQ State Commands " */
-/* See 1.5.8.6.1: "Initialize/Modify CGR" */
-/* See 1.5.8.6.2: "CGR Test Write" */
-/* See 1.5.8.6.3: "Query CGR" */
-/* See 1.5.8.6.4: "Query Congestion Group State" */
+/* "Initialize FQ" */
struct qm_mcc_initfq {
- u8 __reserved1;
- u16 we_mask; /* Write Enable Mask */
- u32 fqid; /* 24-bit */
- u16 count; /* Initialises 'count+1' FQDs */
+ u8 __reserved1[2];
+ __be16 we_mask; /* Write Enable Mask */
+ __be32 fqid; /* 24-bit */
+ __be16 count; /* Initialises 'count+1' FQDs */
struct qm_fqd fqd; /* the FQD fields go here */
- u8 __reserved3[30];
-} __packed;
-struct qm_mcc_queryfq {
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2[56];
-} __packed;
-struct qm_mcc_queryfq_np {
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2[56];
-} __packed;
-struct qm_mcc_alterfq {
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2;
- u8 count; /* number of consecutive FQID */
- u8 __reserved3[10];
- u32 context_b; /* frame queue context b */
- u8 __reserved4[40];
+ u8 __reserved2[30];
} __packed;
+/* "Initialize/Modify CGR" */
struct qm_mcc_initcgr {
- u8 __reserved1;
- u16 we_mask; /* Write Enable Mask */
+ u8 __reserve1[2];
+ __be16 we_mask; /* Write Enable Mask */
struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[2];
u8 cgid;
- u8 __reserved4[32];
-} __packed;
-struct qm_mcc_cgrtestwrite {
- u8 __reserved1[2];
- u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
- u32 i_bcnt_lo; /* low 32-bits of 40-bit */
- u8 __reserved2[23];
- u8 cgid;
u8 __reserved3[32];
} __packed;
-struct qm_mcc_querycgr {
- u8 __reserved1[30];
- u8 cgid;
- u8 __reserved2[32];
-} __packed;
-struct qm_mcc_querycongestion {
- u8 __reserved[63];
-} __packed;
-struct qm_mcc_querywq {
- u8 __reserved;
- /* select channel if verb != QUERYWQ_DEDICATED */
- union {
- u16 channel_wq; /* ignores wq (3 lsbits) */
- struct {
- u16 id:13; /* qm_channel */
- u16 __reserved1:3;
- } __packed channel;
- };
- u8 __reserved2[60];
-} __packed;
-struct qm_mc_command {
- u8 __dont_write_directly__verb;
- union {
- struct qm_mcc_initfq initfq;
- struct qm_mcc_queryfq queryfq;
- struct qm_mcc_queryfq_np queryfq_np;
- struct qm_mcc_alterfq alterfq;
- struct qm_mcc_initcgr initcgr;
- struct qm_mcc_cgrtestwrite cgrtestwrite;
- struct qm_mcc_querycgr querycgr;
- struct qm_mcc_querycongestion querycongestion;
- struct qm_mcc_querywq querywq;
- };
-} __packed;
-#define QM_MCC_VERB_VBIT 0x80
-#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
-#define QM_MCC_VERB_INITFQ_PARKED 0x40
-#define QM_MCC_VERB_INITFQ_SCHED 0x41
-#define QM_MCC_VERB_QUERYFQ 0x44
-#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
-#define QM_MCC_VERB_QUERYWQ 0x46
-#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
-#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
-#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
-#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
-#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
-#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
-#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
-#define QM_MCC_VERB_INITCGR 0x50
-#define QM_MCC_VERB_MODIFYCGR 0x51
-#define QM_MCC_VERB_CGRTESTWRITE 0x52
-#define QM_MCC_VERB_QUERYCGR 0x58
-#define QM_MCC_VERB_QUERYCONGESTION 0x59
/* INITFQ-specific flags */
#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
#define QM_INITFQ_WE_OAC 0x0100
@@ -752,293 +639,45 @@ struct qm_mc_command {
#define QM_CGR_WE_CS_THRES 0x0002
#define QM_CGR_WE_MODE 0x0001
-/* See 1.5.8.5.1: "Initialize FQ" */
-/* See 1.5.8.5.2: "Query FQ" */
-/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
-/* See 1.5.8.5.4: "Alter FQ State Commands " */
-/* See 1.5.8.6.1: "Initialize/Modify CGR" */
-/* See 1.5.8.6.2: "CGR Test Write" */
-/* See 1.5.8.6.3: "Query CGR" */
-/* See 1.5.8.6.4: "Query Congestion Group State" */
-struct qm_mcr_initfq {
- u8 __reserved1[62];
-} __packed;
-struct qm_mcr_queryfq {
- u8 __reserved1[8];
- struct qm_fqd fqd; /* the FQD fields are here */
- u8 __reserved2[30];
-} __packed;
-struct qm_mcr_queryfq_np {
- u8 __reserved1;
- u8 state; /* QM_MCR_NP_STATE_*** */
- u8 __reserved2;
- u32 fqd_link:24;
- u16 __reserved3:2;
- u16 odp_seq:14;
- u16 __reserved4:2;
- u16 orp_nesn:14;
- u16 __reserved5:1;
- u16 orp_ea_hseq:15;
- u16 __reserved6:1;
- u16 orp_ea_tseq:15;
- u8 __reserved7;
- u32 orp_ea_hptr:24;
- u8 __reserved8;
- u32 orp_ea_tptr:24;
- u8 __reserved9;
- u32 pfdr_hptr:24;
- u8 __reserved10;
- u32 pfdr_tptr:24;
- u8 __reserved11[5];
- u8 __reserved12:7;
- u8 is:1;
- u16 ics_surp;
- u32 byte_cnt;
- u8 __reserved13;
- u32 frm_cnt:24;
- u32 __reserved14;
- u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
- u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
- u16 __reserved15;
- u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
- u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
- u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
-} __packed;
-struct qm_mcr_alterfq {
- u8 fqs; /* Frame Queue Status */
- u8 __reserved1[61];
-} __packed;
-struct qm_mcr_initcgr {
- u8 __reserved1[62];
-} __packed;
-struct qm_mcr_cgrtestwrite {
- u16 __reserved1;
- struct __qm_mc_cgr cgr; /* CGR fields */
- u8 __reserved2[3];
- u32 __reserved3:24;
- u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
- u32 i_bcnt_lo; /* low 32-bits of 40-bit */
- u32 __reserved4:24;
- u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
- u32 a_bcnt_lo; /* low 32-bits of 40-bit */
- u16 lgt; /* Last Group Tick */
- u16 wr_prob_g;
- u16 wr_prob_y;
- u16 wr_prob_r;
- u8 __reserved5[8];
-} __packed;
-struct qm_mcr_querycgr {
- u16 __reserved1;
- struct __qm_mc_cgr cgr; /* CGR fields */
- u8 __reserved2[3];
- u32 __reserved3:24;
- u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
- u32 i_bcnt_lo; /* low 32-bits of 40-bit */
- u32 __reserved4:24;
- u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
- u32 a_bcnt_lo; /* low 32-bits of 40-bit */
- union {
- u32 cscn_targ_swp[4];
- u8 __reserved5[16];
- };
-} __packed;
-static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
-{
- return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
-}
-static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
-{
- return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
-}
-static inline u64 qm_mcr_cgrtestwrite_i_get64(
- const struct qm_mcr_cgrtestwrite *q)
-{
- return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
-}
-static inline u64 qm_mcr_cgrtestwrite_a_get64(
- const struct qm_mcr_cgrtestwrite *q)
-{
- return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
-}
-/* Macro, so we compile better if 'v' isn't always 64-bit */
-#define qm_mcr_querycgr_i_set64(q, v) \
- do { \
- struct qm_mcr_querycgr *__q931 = (fd); \
- __q931->i_bcnt_hi = upper_32_bits(v); \
- __q931->i_bcnt_lo = lower_32_bits(v); \
- } while (0)
-#define qm_mcr_querycgr_a_set64(q, v) \
- do { \
- struct qm_mcr_querycgr *__q931 = (fd); \
- __q931->a_bcnt_hi = upper_32_bits(v); \
- __q931->a_bcnt_lo = lower_32_bits(v); \
- } while (0)
-struct __qm_mcr_querycongestion {
- u32 __state[8];
-};
-struct qm_mcr_querycongestion {
- u8 __reserved[30];
- /* Access this struct using QM_MCR_QUERYCONGESTION() */
- struct __qm_mcr_querycongestion state;
-} __packed;
-struct qm_mcr_querywq {
- union {
- u16 channel_wq; /* ignores wq (3 lsbits) */
- struct {
- u16 id:13; /* qm_channel */
- u16 __reserved:3;
- } __packed channel;
- };
- u8 __reserved[28];
- u32 wq_len[8];
-} __packed;
-
-struct qm_mc_result {
- u8 verb;
- u8 result;
- union {
- struct qm_mcr_initfq initfq;
- struct qm_mcr_queryfq queryfq;
- struct qm_mcr_queryfq_np queryfq_np;
- struct qm_mcr_alterfq alterfq;
- struct qm_mcr_initcgr initcgr;
- struct qm_mcr_cgrtestwrite cgrtestwrite;
- struct qm_mcr_querycgr querycgr;
- struct qm_mcr_querycongestion querycongestion;
- struct qm_mcr_querywq querywq;
- };
-} __packed;
-
-#define QM_MCR_VERB_RRID 0x80
-#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
-#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
-#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
-#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
-#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
-#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
-#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
-#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
-#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
-#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
-#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
-#define QM_MCR_RESULT_NULL 0x00
-#define QM_MCR_RESULT_OK 0xf0
-#define QM_MCR_RESULT_ERR_FQID 0xf1
-#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
-#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
-#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
-#define QM_MCR_RESULT_PENDING 0xf8
-#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
-#define QM_MCR_NP_STATE_FE 0x10
-#define QM_MCR_NP_STATE_R 0x08
-#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
-#define QM_MCR_NP_STATE_OOS 0x00
-#define QM_MCR_NP_STATE_RETIRED 0x01
-#define QM_MCR_NP_STATE_TEN_SCHED 0x02
-#define QM_MCR_NP_STATE_TRU_SCHED 0x03
-#define QM_MCR_NP_STATE_PARKED 0x04
-#define QM_MCR_NP_STATE_ACTIVE 0x05
-#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
-#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
-#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
-#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
-#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
-#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
-#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
-/* This extracts the state for congestion group 'n' from a query response.
- * Eg.
- * u8 cgr = [...];
- * struct qm_mc_result *res = [...];
- * printf("congestion group %d congestion state: %d\n", cgr,
- * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
- */
-#define __CGR_WORD(num) (num >> 5)
-#define __CGR_SHIFT(num) (num & 0x1f)
-#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
-static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
- u8 cgr)
-{
- return p->__state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
-}
-
-
-/*********************/
-/* Utility interface */
-/*********************/
-
-/* Represents an allocator over a range of FQIDs. NB, accesses are not locked,
- * spinlock them yourself if needed. */
-struct qman_fqid_pool;
-
-/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy()
- * always succeeds, but returns non-zero if there were "leaked" FQID
- * allocations. */
-struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num);
-int qman_fqid_pool_destroy(struct qman_fqid_pool *pool);
-/* Alloc/free a FQID from the range. _alloc() returns zero for success. */
-int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid);
-void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid);
-u32 qman_fqid_pool_used(struct qman_fqid_pool *pool);
-
-/*******************************************************************/
-/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
-/*******************************************************************/
+#define QMAN_CGR_FLAG_USE_INIT 0x00000001
/* Portal and Frame Queues */
- /* ----------------------- */
/* Represents a managed portal */
struct qman_portal;
-/* This object type represents QMan frame queue descriptors (FQD), it is
+/*
+ * This object type represents QMan frame queue descriptors (FQD), it is
* cacheline-aligned, and initialised by qman_create_fq(). The structure is
- * defined further down. */
+ * defined further down.
+ */
struct qman_fq;
-/* This object type represents a QMan congestion group, it is defined further
- * down. */
+/*
+ * This object type represents a QMan congestion group, it is defined further
+ * down.
+ */
struct qman_cgr;
-struct qman_portal_config {
- /* If the caller enables DQRR stashing (and thus wishes to operate the
- * portal from only one cpu), this is the logical CPU that the portal
- * will stash to. Whether stashing is enabled or not, this setting is
- * also used for any "core-affine" portals, ie. default portals
- * associated to the corresponding cpu. -1 implies that there is no core
- * affinity configured. */
- int cpu;
- /* portal interrupt line */
- int irq;
-#ifndef __rtems__
- /* Is this portal shared? (If so, it has coarser locking and demuxes
- * processing on behalf of other CPUs.) */
- int is_shared;
-#endif /* __rtems__ */
- /* The portal's dedicated channel id, use this value for initialising
- * frame queues to target this portal when scheduled. */
- u16 channel;
- /* A mask of which pool channels this portal has dequeue access to
- * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */
- u32 pools;
-};
-
-/* This enum, and the callback type that returns it, are used when handling
+/*
+ * This enum, and the callback type that returns it, are used when handling
* dequeued frames via DQRR. Note that for "null" callbacks registered with the
- * portal object (for handling dequeues that do not demux because contextB is
- * NULL), the return value *MUST* be qman_cb_dqrr_consume. */
+ * portal object (for handling dequeues that do not demux because context_b is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.
+ */
enum qman_cb_dqrr_result {
/* DQRR entry can be consumed */
qman_cb_dqrr_consume,
/* Like _consume, but requests parking - FQ must be held-active */
qman_cb_dqrr_park,
- /* Does not consume, for DCA mode only. This allows out-of-order
- * consumes by explicit calls to qman_dca() and/or the use of implicit
- * DCA via EQCR entries. */
+ /* Does not consume, for DCA mode only. */
qman_cb_dqrr_defer,
- /* Stop processing without consuming this ring entry. Exits the current
- * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an
- * interrupt handler, the callback would typically call
+ /*
+ * Stop processing without consuming this ring entry. Exits the current
+ * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
+ * an interrupt handler, the callback would typically call
* qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
- * otherwise the interrupt will reassert immediately. */
+ * otherwise the interrupt will reassert immediately.
+ */
qman_cb_dqrr_stop,
/* Like qman_cb_dqrr_stop, but consumes the current entry. */
qman_cb_dqrr_consume_stop
@@ -1047,16 +686,15 @@ typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr);
-/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
- * are always consumed after the callback returns. */
+/*
+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns.
+ */
typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
- const struct qm_mr_entry *msg);
-
-/* This callback type is used when handling DCP ERNs */
-typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
- const struct qm_mr_entry *msg);
+ const union qm_mr_entry *msg);
-/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+/*
+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
* held-active + held-suspended are just "sched". Things like "retired" will not
* be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
* then, to indicate it's completing and to gate attempts to retry the retire
@@ -1065,7 +703,8 @@ typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
* index rather than the FQ that ring entry corresponds to), so repeated park
* commands are allowed (if you're silly enough to try) but won't change FQ
* state, and the resulting park notifications move FQs from "sched" to
- * "parked". */
+ * "parked".
+ */
enum qman_fq_state {
qman_fq_state_oos,
qman_fq_state_parked,
@@ -1073,7 +712,15 @@ enum qman_fq_state {
qman_fq_state_retired
};
-/* Frame queue objects (struct qman_fq) are stored within memory passed to
+#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
+
+/*
+ * Frame queue objects (struct qman_fq) are stored within memory passed to
* qman_create_fq(), as this allows stashing of caller-provided demux callback
* pointers at no extra cost to stashing of (driver-internal) FQ state. If the
* caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
@@ -1108,24 +755,23 @@ struct qman_fq_cb {
struct qman_fq {
/* Caller of qman_create_fq() provides these demux callbacks */
struct qman_fq_cb cb;
- /* These are internal to the driver, don't touch. In particular, they
+ /*
+ * These are internal to the driver, don't touch. In particular, they
* may change, be removed, or extended (so you shouldn't rely on
- * sizeof(qman_fq) being a constant). */
- spinlock_t fqlock;
- u32 fqid;
- volatile unsigned long flags;
+ * sizeof(qman_fq) being a constant).
+ */
+ u32 fqid, idx;
+ unsigned long flags;
enum qman_fq_state state;
int cgr_groupid;
- struct rb_node node;
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- u32 key;
-#endif
};
-/* This callback type is used when handling congestion group entry/exit.
- * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */
+/*
+ * This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
typedef void (*qman_cb_cgr)(struct qman_portal *qm,
- struct qman_cgr *cgr, int congested);
+ struct qman_cgr *cgr, int congested);
struct qman_cgr {
/* Set these prior to qman_create_cgr() */
@@ -1140,111 +786,30 @@ struct qman_cgr {
#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
-#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
-#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
-/* Flags to qman_destroy_fq() */
-#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
-
-/* Flags from qman_fq_state() */
-#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
-#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
-#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
-#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
-#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
-#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
-
/* Flags to qman_init_fq() */
#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
-/* Flags to qman_volatile_dequeue() */
-#ifdef FSL_DPA_CAN_WAIT
-#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
-#ifndef __rtems__
-#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
-#endif /* __rtems__ */
-#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
-#endif
-
-/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
- * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
- * any change here should be audited in PME.) */
-#ifdef FSL_DPA_CAN_WAIT
-#define QMAN_ENQUEUE_FLAG_WAIT 0x00010000 /* wait if EQCR is full */
-#ifndef __rtems__
-#define QMAN_ENQUEUE_FLAG_WAIT_INT 0x00020000 /* if wait, interruptible? */
-#endif /* __rtems__ */
-#ifdef FSL_DPA_CAN_WAIT_SYNC
-#define QMAN_ENQUEUE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
-#endif
-#endif
-#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
-#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
-#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
-#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
- (((u32)(p) << 2) & 0x00000f00)
-#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
-#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
-#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
-#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
-/* For the ORP-specific qman_enqueue_orp() variant;
- * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
- * of a frame. */
-#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
-/* - this flag performs no enqueue but fills in an ORP sequence number that
- * would otherwise block it (eg. if a frame has been dropped). */
-#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
-/* - this flag performs no enqueue but advances NESN to the given sequence
- * number. */
-#define QMAN_ENQUEUE_FLAG_NESN 0x04000000
-
-/* Flags to qman_modify_cgr() */
-#define QMAN_CGR_FLAG_USE_INIT 0x00000001
-#define QMAN_CGR_MODE_FRAME 0x00000001
-
/* Portal Management */
- /* ----------------- */
/**
- * qman_get_portal_config - get portal configuration settings
- *
- * This returns a read-only view of the current cpu's affine portal settings.
- */
-const struct qman_portal_config *qman_get_portal_config(void);
-
-/**
- * qman_irqsource_get - return the portal work that is interrupt-driven
- *
- * Returns a bitmask of QM_PIRQ_**I processing sources that are currently
- * enabled for interrupt handling on the current cpu's affine portal. These
- * sources will trigger the portal interrupt and the interrupt handler (or a
- * tasklet/bottom-half it defers to) will perform the corresponding processing
- * work. The qman_poll_***() functions will only process sources that are not in
- * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
- * this always returns zero.
- */
-u32 qman_irqsource_get(void);
-
-/**
- * qman_irqsource_add - add processing sources to be interrupt-driven
+ * qman_p_irqsource_add - add processing sources to be interrupt-driven
* @bits: bitmask of QM_PIRQ_**I processing sources
*
* Adds processing sources that should be interrupt-driven (rather than
- * processed via qman_poll_***() functions). Returns zero for success, or
- * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ * processed via qman_poll_***() functions).
*/
-int qman_irqsource_add(u32 bits);
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
/**
- * qman_irqsource_remove - remove processing sources from being interrupt-driven
+ * qman_p_irqsource_remove - remove processing sources from being int-driven
* @bits: bitmask of QM_PIRQ_**I processing sources
*
* Removes processing sources from being interrupt-driven, so that they will
- * instead be processed via qman_poll_***() functions. Returns zero for success,
- * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ * instead be processed via qman_poll_***() functions.
*/
-int qman_irqsource_remove(u32 bits);
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
#ifndef __rtems__
/**
@@ -1266,145 +831,29 @@ u16 qman_affine_channel(int cpu);
/**
* qman_get_affine_portal - return the portal pointer affine to cpu
* @cpu: the cpu whose affine portal is the subject of the query
- *
*/
-void *qman_get_affine_portal(int cpu);
+struct qman_portal *qman_get_affine_portal(int cpu);
/**
- * qman_poll_dqrr - process DQRR (fast-path) entries
+ * qman_p_poll_dqrr - process DQRR (fast-path) entries
* @limit: the maximum number of DQRR entries to process
*
* Use of this function requires that DQRR processing not be interrupt-driven.
- * Ie. the value returned by qman_irqsource_get() should not include
- * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
- * this function will return -EINVAL, otherwise the return value is >=0 and
- * represents the number of DQRR entries processed.
- */
-int qman_poll_dqrr(unsigned int limit);
-
-/**
- * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven.
- *
- * This function does any portal processing that isn't interrupt-driven. If the
- * current CPU is sharing a portal hosted on another CPU, this function will
- * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources
- * indicating what interrupt sources were actually processed by the call.
- */
-u32 qman_poll_slow(void);
-
-/**
- * qman_poll - legacy wrapper for qman_poll_dqrr() and qman_poll_slow()
- *
- * Dispatcher logic on a cpu can use this to trigger any maintenance of the
- * affine portal. There are two classes of portal processing in question;
- * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
- * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
- * thresholds, congestion state changes, etc). This function does whatever
- * processing is not triggered by interrupts.
- *
- * Note, if DQRR and some slow-path processing are poll-driven (rather than
- * interrupt-driven) then this function uses a heuristic to determine how often
- * to run slow-path processing - as slow-path processing introduces at least a
- * minimum latency each time it is run, whereas fast-path (DQRR) processing is
- * close to zero-cost if there is no work to be done. Applications can tune this
- * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly
- * rather than going via this wrapper.
- */
-void qman_poll(void);
-
-/**
- * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
- *
- * Disables DQRR processing of the portal. This is reference-counted, so
- * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
- * truly re-enable dequeuing.
- */
-void qman_stop_dequeues(void);
-
-/**
- * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
- *
- * Enables DQRR processing of the portal. This is reference-counted, so
- * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
- * truly re-enable dequeuing.
+ * The return value represents the number of DQRR entries processed.
*/
-void qman_start_dequeues(void);
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
/**
- * qman_static_dequeue_add - Add pool channels to the portal SDQCR
+ * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
* @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
*
* Adds a set of pool channels to the portal's static dequeue command register
* (SDQCR). The requested pools are limited to those the portal has dequeue
* access to.
*/
-void qman_static_dequeue_add(u32 pools);
-
-/**
- * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
- * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
- *
- * Removes a set of pool channels from the portal's static dequeue command
- * register (SDQCR). The requested pools are limited to those the portal has
- * dequeue access to.
- */
-void qman_static_dequeue_del(u32 pools);
-
-/**
- * qman_static_dequeue_get - return the portal's current SDQCR
- *
- * Returns the portal's current static dequeue command register (SDQCR). The
- * entire register is returned, so if only the currently-enabled pool channels
- * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
- */
-u32 qman_static_dequeue_get(void);
-
-/**
- * qman_dca - Perform a Discrete Consumption Acknowledgment
- * @dq: the DQRR entry to be consumed
- * @park_request: indicates whether the held-active @fq should be parked
- *
- * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
- * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
- * does not take a 'portal' argument but implies the core affine portal from the
- * cpu that is currently executing the function. For reasons of locking, this
- * function must be called from the same CPU as that which processed the DQRR
- * entry in the first place.
- */
-void qman_dca(struct qm_dqrr_entry *dq, int park_request);
-
-/**
- * qman_eqcr_is_empty - Determine if portal's EQCR is empty
- *
- * For use in situations where a cpu-affine caller needs to determine when all
- * enqueues for the local portal have been processed by QMan but can't use the
- * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
- * The function forces tracking of EQCR consumption (which normally doesn't
- * happen until enqueue processing needs to find space to put new enqueue
- * commands), and returns zero if the ring still has unprocessed entries,
- * non-zero if it is empty.
- */
-int qman_eqcr_is_empty(void);
-
-/**
- * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
- * @handler: callback for processing DCP ERNs
- * @affine: whether this handler is specific to the locally affine portal
- *
- * If a hardware block's interface to QMan (ie. its direct-connect portal, or
- * DCP) is configured not to receive enqueue rejections, then any enqueues
- * through that DCP that are rejected will be sent to a given software portal.
- * If @affine is non-zero, then this handler will only be used for DCP ERNs
- * received on the portal affine to the current CPU. If multiple CPUs share a
- * portal and they all call this function, they will be setting the handler for
- * the same portal! If @affine is zero, then this handler will be global to all
- * portals handled by this instance of the driver. Only those portals that do
- * not have their own affine handler will use the global handler.
- */
-void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
/* FQ management */
- /* ------------- */
/**
* qman_create_fq - Allocates a FQ
* @fqid: the index of the FQD to encapsulate, must be "Out of Service"
@@ -1419,28 +868,23 @@ void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
* qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
* pre-existing frame-queues that aren't to be otherwise interfered with, it
* prevents all other modifications to the frame queue. The TO_DCPORTAL flag
- * causes the driver to honour any contextB modifications requested in the
+ * causes the driver to honour any context_b modifications requested in the
* qm_init_fq() API, as this indicates the frame queue will be consumed by a
* direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
- * software portals, the contextB field is controlled by the driver and can't be
- * modified by the caller. If the AS_IS flag is specified, management commands
- * will be used on portal @p to query state for frame queue @fqid and construct
- * a frame queue object based on that, rather than assuming/requiring that it be
- * Out of Service.
+ * software portals, the context_b field is controlled by the driver and can't
+ * be modified by the caller.
*/
int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
/**
* qman_destroy_fq - Deallocates a FQ
* @fq: the frame queue object to release
- * @flags: bit-mask of QMAN_FQ_FREE_*** options
*
* The memory for this frame queue object ('fq' provided in qman_create_fq()) is
* not deallocated but the caller regains ownership, to do with as desired. The
- * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
- * is specified, in which case it may also be in the 'parked' state.
+ * FQ must be in the 'out-of-service' or in the 'parked' state.
*/
-void qman_destroy_fq(struct qman_fq *fq, u32 flags);
+void qman_destroy_fq(struct qman_fq *fq);
/**
* qman_fq_fqid - Queries the frame queue ID of a FQ object
@@ -1449,18 +893,6 @@ void qman_destroy_fq(struct qman_fq *fq, u32 flags);
u32 qman_fq_fqid(struct qman_fq *fq);
/**
- * qman_fq_state - Queries the state of a FQ object
- * @fq: the frame queue object to query
- * @state: pointer to state enum to return the FQ scheduling state
- * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
- *
- * Queries the state of the FQ object, without performing any h/w commands.
- * This captures the state, as seen by the driver, at the time the function
- * executes.
- */
-void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
-
-/**
* qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
* @fq: the frame queue object to modify, must be 'parked' or new.
* @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
@@ -1508,7 +940,7 @@ int qman_schedule_fq(struct qman_fq *fq);
/**
* qman_retire_fq - Retires a FQ
* @fq: the frame queue object to retire
- * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
+ * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
*
* Retires the frame queue. This returns zero if it succeeds immediately, +1 if
* the retirement was started asynchronously, otherwise it returns negative for
@@ -1535,249 +967,58 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags);
int qman_oos_fq(struct qman_fq *fq);
/**
- * qman_fq_flow_control - Set the XON/XOFF state of a FQ
- * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
- * or 'retired' or 'parked' state
- * @xon: boolean to set fq in XON or XOFF state
- *
- * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
- * otherwise the IFSI interrupt will be asserted.
- */
-int qman_fq_flow_control(struct qman_fq *fq, int xon);
-
-/**
- * qman_query_fq - Queries FQD fields (via h/w query command)
- * @fq: the frame queue object to be queried
- * @fqd: storage for the queried FQD fields
- */
-int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
-
-/**
- * qman_query_fq_np - Queries non-programmable FQD fields
- * @fq: the frame queue object to be queried
- * @np: storage for the queried FQD fields
- */
-int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
-
-/**
- * qman_query_wq - Queries work queue lengths
- * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
- * to this software portal. Otherwise, query length of WQs in a
- * channel specified in wq.
- * @wq: storage for the queried WQs lengths. Also specified the channel to
- * to query if query_dedicated is zero.
- */
-int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
-
-/**
- * qman_volatile_dequeue - Issue a volatile dequeue command
- * @fq: the frame queue object to dequeue from
- * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
- * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
- *
- * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
- * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
- * the VDQCR is already in use, otherwise returns non-zero for failure. If
- * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
- * the VDQCR command has finished executing (ie. once the callback for the last
- * DQRR entry resulting from the VDQCR command has been called). If not using
- * the FINISH flag, completion can be determined either by detecting the
- * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
- * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
- * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
- * "flags" retrieved from qman_fq_state().
- */
-int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
-
-/**
* qman_enqueue - Enqueue a frame to a frame queue
* @fq: the frame queue object to enqueue to
* @fd: a descriptor of the frame to be enqueued
- * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
*
* Fills an entry in the EQCR of portal @qm to enqueue the frame described by
* @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
- * field is ignored. The return value is non-zero on error, such as ring full
- * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
- * specified), etc. If the ring is full and FLAG_WAIT is specified, this
- * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
- * interrupt will assert when QMan consumes the EQCR entry (subject to "status
- * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, QMan will
- * perform an implied "discrete consumption acknowledgment" on the dequeue
- * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
- * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
- * this implicit DCA can delay the release of a "held active" frame queue
- * corresponding to a DQRR entry until QMan consumes the EQCR entry - providing
- * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
- * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
- * acknowledgment should "park request" the "held active" frame queue. Ie.
- * when the portal eventually releases that frame queue, it will be left in the
- * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
- * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
- * is requested, and the FQ is a member of a congestion group, then this
- * function returns -EAGAIN if the congestion group is currently congested.
- * Note, this does not eliminate ERNs, as the async interface means we can be
- * sending enqueue commands to an un-congested FQ that becomes congested before
- * the enqueue commands are processed, but it does minimise needless thrashing
- * of an already busy hardware resource by throttling many of the to-be-dropped
- * enqueues "at the source".
- */
-int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
-
-typedef int (*qman_cb_precommit) (void *arg);
-/**
- * qman_enqueue_precommit - Enqueue a frame to a frame queue and call cb
- * @fq: the frame queue object to enqueue to
- * @fd: a descriptor of the frame to be enqueued
- * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
- * @cb: user supplied callback function to invoke before writing commit verb.
- * @cb_arg: callback function argument
- *
- * This is similar to qman_enqueue except that it will invoke a user supplied
- * callback function just before writng the commit verb. This is useful
- * when the user want to do something *just before* enqueuing the request and
- * the enqueue can't fail.
+ * field is ignored. The return value is non-zero on error, such as ring full.
*/
-int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
- u32 flags, qman_cb_precommit cb, void *cb_arg);
-
-/**
- * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
- * @fq: the frame queue object to enqueue to
- * @fd: a descriptor of the frame to be enqueued
- * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
- * @orp: the frame queue object used as an order restoration point.
- * @orp_seqnum: the sequence number of this frame in the order restoration path
- *
- * Similar to qman_enqueue(), but with the addition of an Order Restoration
- * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
- * enqueue operation to employ order restoration. Each frame queue object acts
- * as an Order Definition Point (ODP) by providing each frame dequeued from it
- * with an incrementing sequence number, this value is generally ignored unless
- * that sequence of dequeued frames will need order restoration later. Each
- * frame queue object also encapsulates an Order Restoration Point (ORP), which
- * is a re-assembly context for re-ordering frames relative to their sequence
- * numbers as they are enqueued. The ORP does not have to be within the frame
- * queue that receives the enqueued frame, in fact it is usually the frame
- * queue from which the frames were originally dequeued. For the purposes of
- * order restoration, multiple frames (or "fragments") can be enqueued for a
- * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
- * enqueues except the final fragment of a given sequence number. Ordering
- * between sequence numbers is guaranteed, even if fragments of different
- * sequence numbers are interlaced with one another. Fragments of the same
- * sequence number will retain the order in which they are enqueued. If no
- * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
- * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
- * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
- * sequence number should become the ORP's "Next Expected Sequence Number".
- *
- * Side note: a frame queue object can be used purely as an ORP, without
- * carrying any frames at all. Care should be taken not to deallocate a frame
- * queue object that is being actively used as an ORP, as a future allocation
- * of the frame queue object may start using the internal ORP before the
- * previous use has finished.
- */
-int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
- struct qman_fq *orp, u16 orp_seqnum);
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
/**
* qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
* @result: is set by the API to the base FQID of the allocated range
* @count: the number of FQIDs required
- * @align: required alignment of the allocated range
- * @partial: non-zero if the API can return fewer than @count FQIDs
*
- * Returns the number of frame queues allocated, or a negative error code. If
- * @partial is non zero, the allocation request may return a smaller range of
- * FQs than requested (though alignment will be as requested). If @partial is
- * zero, the return value will either be 'count' or negative.
+ * Returns 0 on success, or a negative error code.
*/
-int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
-static inline int qman_alloc_fqid(u32 *result)
-{
- int ret = qman_alloc_fqid_range(result, 1, 0, 0);
-
- return (ret > 0) ? 0 : ret;
-}
+int qman_alloc_fqid_range(u32 *result, u32 count);
+#define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
/**
- * qman_release_fqid_range - Release the specified range of frame queue IDs
- * @fqid: the base FQID of the range to deallocate
- * @count: the number of FQIDs in the range
+ * qman_release_fqid - Release the specified frame queue ID
+ * @fqid: the FQID to be released back to the resource pool
*
- * This function can also be used to seed the allocator with ranges of FQIDs
- * that it can subsequently allocate from.
- */
-void qman_release_fqid_range(u32 fqid, u32 count);
-static inline void qman_release_fqid(u32 fqid)
-{
- qman_release_fqid_range(fqid, 1);
-}
-
-void qman_seed_fqid_range(u32 fqid, u32 count);
-
-
-int qman_shutdown_fq(u32 fqid);
-
-/**
- * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
- * @fqid: the base FQID of the range to deallocate
- * @count: the number of FQIDs in the range
+ * This function can also be used to seed the allocator with
+ * FQID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
*/
-int qman_reserve_fqid_range(u32 fqid, u32 count);
-static inline int qman_reserve_fqid(u32 fqid)
-{
- return qman_reserve_fqid_range(fqid, 1);
-}
+int qman_release_fqid(u32 fqid);
/* Pool-channel management */
- /* ----------------------- */
/**
* qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
* @result: is set by the API to the base pool-channel ID of the allocated range
* @count: the number of pool-channel IDs required
- * @align: required alignment of the allocated range
- * @partial: non-zero if the API can return fewer than @count
*
- * Returns the number of pool-channel IDs allocated, or a negative error code.
- * If @partial is non zero, the allocation request may return a smaller range of
- * than requested (though alignment will be as requested). If @partial is zero,
- * the return value will either be 'count' or negative.
- */
-int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
-static inline int qman_alloc_pool(u32 *result)
-{
- int ret = qman_alloc_pool_range(result, 1, 0, 0);
-
- return (ret > 0) ? 0 : ret;
-}
-
-/**
- * qman_release_pool_range - Release the specified range of pool-channel IDs
- * @id: the base pool-channel ID of the range to deallocate
- * @count: the number of pool-channel IDs in the range
+ * Returns 0 on success, or a negative error code.
*/
-void qman_release_pool_range(u32 id, u32 count);
-static inline void qman_release_pool(u32 id)
-{
- qman_release_pool_range(id, 1);
-}
+int qman_alloc_pool_range(u32 *result, u32 count);
+#define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
/**
- * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
- * @id: the base pool-channel ID of the range to reserve
- * @count: the number of pool-channel IDs in the range
+ * qman_release_pool - Release the specified pool-channel ID
+ * @id: the pool-chan ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * pool-channel ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
*/
-int qman_reserve_pool_range(u32 id, u32 count);
-static inline int qman_reserve_pool(u32 id)
-{
- return qman_reserve_pool_range(id, 1);
-}
-
-void qman_seed_pool_range(u32 id, u32 count);
+int qman_release_pool(u32 id);
/* CGR management */
- /* -------------- */
/**
* qman_create_cgr - Register a congestion group object
* @cgr: the 'cgr' object, with fields filled in
@@ -1792,18 +1033,7 @@ void qman_seed_pool_range(u32 id, u32 count);
* (which only modifies the specified parameters).
*/
int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
- struct qm_mcc_initcgr *opts);
-
-/**
- * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
- * @cgr: the 'cgr' object, with fields filled in
- * @flags: QMAN_CGR_FLAG_* values
- * @dcp_portal: the DCP portal to which the cgr object is registered.
- * @opts: optional state of CGR settings
- *
- */
-int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
- struct qm_mcc_initcgr *opts);
+ struct qm_mcc_initcgr *opts);
/**
* qman_delete_cgr - Deregisters a congestion group object
@@ -1824,163 +1054,30 @@ int qman_delete_cgr(struct qman_cgr *cgr);
void qman_delete_cgr_safe(struct qman_cgr *cgr);
/**
- * qman_modify_cgr - Modify CGR fields
- * @cgr: the 'cgr' object to modify
- * @flags: QMAN_CGR_FLAG_* values
- * @opts: the CGR-modification settings
- *
- * The @opts parameter comes from the low-level portal API, and can be NULL.
- * Note that some fields and options within @opts may be ignored or overwritten
- * by the driver, in particular the 'cgrid' field is ignored (this operation
- * only affects the given CGR object). If @flags contains
- * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
- * unspecified parameters) will be used rather than a modify hw hardware (which
- * only modifies the specified parameters).
+ * qman_query_cgr_congested - Queries CGR's congestion status
+ * @cgr: the 'cgr' object to query
+ * @result: returns 'cgr's congestion status, 1 (true) if congested
*/
-int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
- struct qm_mcc_initcgr *opts);
-
-/**
-* qman_query_cgr - Queries CGR fields
-* @cgr: the 'cgr' object to query
-* @result: storage for the queried congestion group record
-*/
-int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
-
-/**
- * qman_query_congestion - Queries the state of all congestion groups
- * @congestion: storage for the queried state of all congestion groups
- */
-int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
/**
* qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
* @result: is set by the API to the base CGR ID of the allocated range
* @count: the number of CGR IDs required
- * @align: required alignment of the allocated range
- * @partial: non-zero if the API can return fewer than @count
- *
- * Returns the number of CGR IDs allocated, or a negative error code.
- * If @partial is non zero, the allocation request may return a smaller range of
- * than requested (though alignment will be as requested). If @partial is zero,
- * the return value will either be 'count' or negative.
- */
-int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
-static inline int qman_alloc_cgrid(u32 *result)
-{
- int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
-
- return (ret > 0) ? 0 : ret;
-}
-
-/**
- * qman_release_cgrid_range - Release the specified range of CGR IDs
- * @id: the base CGR ID of the range to deallocate
- * @count: the number of CGR IDs in the range
- */
-void qman_release_cgrid_range(u32 id, u32 count);
-static inline void qman_release_cgrid(u32 id)
-{
- qman_release_cgrid_range(id, 1);
-}
-
-/**
- * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
- * @id: the base CGR ID of the range to reserve
- * @count: the number of CGR IDs in the range
- */
-int qman_reserve_cgrid_range(u32 id, u32 count);
-static inline int qman_reserve_cgrid(u32 id)
-{
- return qman_reserve_cgrid_range(id, 1);
-}
-
-void qman_seed_cgrid_range(u32 id, u32 count);
-
-
- /* Helpers */
- /* ------- */
-/**
- * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
- * @fqid: the FQID that will be initialised by other s/w
*
- * In many situations, a FQID is provided for communication between s/w
- * entities, and whilst the consumer is responsible for initialising and
- * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
- * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
- * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
- * However, data can not be enqueued to the FQ until it is initialised out of
- * the OOS state - this function polls for that condition. It is particularly
- * useful for users of IPC functions - each endpoint's Rx FQ is the other
- * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
- * and then use this API on the (NO_MODIFY) Tx FQ object in order to
- * synchronise. The function returns zero for success, +1 if the FQ is still in
- * the OOS state, or negative if there was an error.
+ * Returns 0 on success, or a negative error code.
*/
-static inline int qman_poll_fq_for_init(struct qman_fq *fq)
-{
- struct qm_mcr_queryfq_np np;
- int err;
-
- err = qman_query_fq_np(fq, &np);
- if (err)
- return err;
- if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
- return 1;
- return 0;
-}
+int qman_alloc_cgrid_range(u32 *result, u32 count);
+#define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
/**
- * qman_set_wpm - Set waterfall power management
+ * qman_release_cgrid - Release the specified CGR ID
+ * @id: the CGR ID to be released back to the resource pool
*
- * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
- *
- * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
- * accessible.
+ * This function can also be used to seed the allocator with
+ * CGR ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
*/
-int qman_set_wpm(int wpm_enable);
-
-/**
- * qman_get_swp - Query the waterfall power management setting
- *
- * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
- *
- * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
- * accessible.
- */
-int qman_get_wpm(int *wpm_enable);
-
-/* The below qman_p_***() variants might be called in a migration situation
- * (e.g. cpu hotplug). They are used to continue accessing the portal that
- * execution was affine to prior to migration.
- * @qman_portal specifies which portal the APIs will use.
-*/
-const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal
- *p);
-int qman_p_irqsource_add(struct qman_portal *p, u32 bits);
-int qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
-int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
-u32 qman_p_poll_slow(struct qman_portal *p);
-void qman_p_poll(struct qman_portal *p);
-void qman_p_stop_dequeues(struct qman_portal *p);
-void qman_p_start_dequeues(struct qman_portal *p);
-void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
-void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools);
-u32 qman_p_static_dequeue_get(struct qman_portal *p);
-void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
- int park_request);
-int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
- u32 flags __maybe_unused, u32 vdqcr);
-int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
- const struct qm_fd *fd, u32 flags);
-int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
- const struct qm_fd *fd, u32 flags,
- struct qman_fq *orp, u16 orp_seqnum);
-int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
- const struct qm_fd *fd, u32 flags,
- qman_cb_precommit cb, void *cb_arg);
-#ifdef __cplusplus
-}
-#endif
+int qman_release_cgrid(u32 id);
#endif /* __FSL_QMAN_H */