summaryrefslogtreecommitdiffstats
path: root/linux
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-04-27 11:58:19 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-10-23 08:46:43 +0200
commit28ee86a9b0f2e980beeb637da4f787065c74a39e (patch)
treee2daa4e96e0488de2066fcb5f38e087c9a58f319 /linux
parentb2b2e1a6a5ed752659e5665822c014e45787f350 (diff)
downloadrtems-libbsd-28ee86a9b0f2e980beeb637da4f787065c74a39e.tar.bz2
Import DPAA driver snapshot
Imported from Freescale Linux repository git://git.freescale.com/ppc/upstream/linux.git commit 2774c204cd8bfc56a200ff4dcdfc9cdf5b6fc161. Linux compatibility layer is partly from FreeBSD.
Diffstat (limited to 'linux')
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c988
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h519
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c1491
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h113
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c710
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c171
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h143
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c417
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/crc_mac_addr_ext.h314
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman.c2957
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman.h500
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c1786
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_dtsec.h59
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_mac.h276
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_memac.c1382
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_memac.h62
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_muram.c124
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_muram.h90
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_port.c1827
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_port.h240
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_sp.c171
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_sp.h103
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_tgec.c853
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_tgec.h55
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/mac.c1180
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/mac.h147
-rw-r--r--linux/drivers/soc/fsl/qbman/bman-debugfs.c121
-rw-r--r--linux/drivers/soc/fsl/qbman/bman.c692
-rw-r--r--linux/drivers/soc/fsl/qbman/bman.h542
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_api.c1123
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_portal.c399
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_priv.h136
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test.c60
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test.h34
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test_api.c188
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_test_thresh.c216
-rw-r--r--linux/drivers/soc/fsl/qbman/bman_utils.c76
-rw-r--r--linux/drivers/soc/fsl/qbman/dpaa_resource.c363
-rw-r--r--linux/drivers/soc/fsl/qbman/dpaa_sys.h292
-rw-r--r--linux/drivers/soc/fsl/qbman/qman-debugfs.c1317
-rw-r--r--linux/drivers/soc/fsl/qbman/qman.c1106
-rw-r--r--linux/drivers/soc/fsl/qbman/qman.h1133
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_api.c3026
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_driver.c87
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_portal.c796
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_priv.h293
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test.c61
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test.h44
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test_api.c222
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_test_stash.c540
-rw-r--r--linux/drivers/soc/fsl/qbman/qman_utils.c309
-rw-r--r--linux/include/soc/fsl/bman.h524
-rw-r--r--linux/include/soc/fsl/qman.h1986
53 files changed, 32364 insertions, 0 deletions
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
new file mode 100644
index 00000000..73173b89
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -0,0 +1,988 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+#ifndef __rtems__
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/icmp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/net.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include <linux/percpu.h>
+#include <linux/dma-mapping.h>
+#endif /* __rtems__ */
+#include <soc/fsl/bman.h>
+
+#include "fman.h"
+#include "fman_port.h"
+
+#include "mac.h"
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+ * using trace events only need to #include <trace/events/sched.h>
+ */
+#define CREATE_TRACE_POINTS
+#include "dpaa_eth_trace.h"
+
+#define DPA_NAPI_WEIGHT 64
+
+/* Valid checksum indication */
+#define DPA_CSUM_VALID 0xFFFF
+
+#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
+
+#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
+/* Ingress congestion threshold on FMan ports
+ * The size in bytes of the ingress tail-drop threshold on FMan ports.
+ * Traffic piling up above this value will be rejected by QMan and discarded
+ * by FMan.
+ */
+
+#ifndef __rtems__
+static u8 debug = -1;
+module_param(debug, byte, S_IRUGO);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+
+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
+static u16 tx_timeout = 1000;
+module_param(tx_timeout, ushort, S_IRUGO);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+#endif /* __rtems__ */
+
+/* BM */
+
+#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
+
+static u8 dpa_priv_common_bpid;
+
+static void _dpa_rx_error(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ /* limit common, possibly innocuous Rx FIFO Overflow errors'
+ * interference with zero-loss convergence benchmark results.
+ */
+ if (likely(fd->status & FM_FD_ERR_PHYSICAL))
+ pr_warn_once("non-zero error counters in fman statistics (sysfs)\n");
+ else
+#ifndef __rtems__
+ if (net_ratelimit())
+ netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_RX_ERRORS);
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+
+#ifndef __rtems__
+ percpu_priv->stats.rx_errors++;
+#endif /* __rtems__ */
+
+ if (fd->status & FM_FD_ERR_DMA)
+ percpu_priv->rx_errors.dme++;
+ if (fd->status & FM_FD_ERR_PHYSICAL)
+ percpu_priv->rx_errors.fpe++;
+ if (fd->status & FM_FD_ERR_SIZE)
+ percpu_priv->rx_errors.fse++;
+ if (fd->status & FM_FD_ERR_PRS_HDR_ERR)
+ percpu_priv->rx_errors.phe++;
+
+ dpa_fd_release(net_dev, fd);
+}
+
+static void _dpa_tx_error(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+#ifndef __rtems__
+ struct sk_buff *skb;
+
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_TX_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+#else /* __rtems__ */
+ struct ifnet *ifp = net_dev->ifp;
+
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+#endif /* __rtems__ */
+
+ /* If we intended the buffers from this frame to go into the bpools
+ * when the FMan transmit was done, we need to put it in manually.
+ */
+ if (fd->bpid != 0xff) {
+ dpa_fd_release(net_dev, fd);
+ return;
+ }
+
+#ifndef __rtems__
+ skb = _dpa_cleanup_tx_fd(priv, fd);
+ dev_kfree_skb(skb);
+#else /* __rtems__ */
+ _dpa_cleanup_tx_fd(ifp, fd);
+#endif /* __rtems__ */
+}
+
+#ifndef __rtems__
+static int dpaa_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct dpa_napi_portal *np =
+ container_of(napi, struct dpa_napi_portal, napi);
+
+ int cleaned = qman_p_poll_dqrr(np->p, budget);
+
+ if (cleaned < budget) {
+ int tmp;
+
+ napi_complete(napi);
+ tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+ DPA_ERR_ON(tmp);
+ }
+
+ return cleaned;
+}
+#endif /* __rtems__ */
+
+static void _dpa_tx_conf(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+#ifndef __rtems__
+ struct sk_buff *skb;
+
+ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_TX_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+ }
+
+ percpu_priv->tx_confirm++;
+
+ skb = _dpa_cleanup_tx_fd(priv, fd);
+
+ dev_kfree_skb(skb);
+#else /* __rtems__ */
+ struct ifnet *ifp = net_dev->ifp;
+
+ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ }
+
+ _dpa_cleanup_tx_fd(ifp, fd);
+#endif /* __rtems__ */
+}
+
+static enum qman_cb_dqrr_result
+priv_rx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int *count_ptr;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ return qman_cb_dqrr_stop;
+
+ if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
+ /* Unable to refill the buffer pool due to insufficient
+ * system memory. Just release the frame back into the pool,
+ * otherwise we'll soon end up with an empty buffer pool.
+ */
+ dpa_fd_release(net_dev, &dq->fd);
+ else
+ _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+priv_rx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int *count_ptr;
+ struct dpa_bp *dpa_bp;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+ dpa_bp = priv->dpa_bp;
+
+#ifndef __rtems__
+ /* Trace the Rx fd */
+ trace_dpa_rx_fd(net_dev, fq, &dq->fd);
+#endif /* __rtems__ */
+
+ /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ return qman_cb_dqrr_stop;
+
+ /* Vale of plenty: make sure we didn't run out of buffers */
+
+ if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
+#ifdef __rtems__
+ {
+ struct ifnet *ifp = net_dev->ifp;
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+#endif /* __rtems__ */
+ /* Unable to refill the buffer pool due to insufficient
+ * system memory. Just release the frame back into the pool,
+ * otherwise we'll soon end up with an empty buffer pool.
+ */
+ dpa_fd_release(net_dev, &dq->fd);
+#ifdef __rtems__
+ }
+#endif /* __rtems__ */
+ else
+ _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
+ count_ptr);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+priv_tx_conf_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ return qman_cb_dqrr_stop;
+
+ _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+priv_tx_conf_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+#ifndef __rtems__
+ /* Trace the fd */
+ trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
+#endif /* __rtems__ */
+
+ /* Non-migratable context, safe to use raw_cpu_ptr */
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ return qman_cb_dqrr_stop;
+
+ _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+static void priv_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ const struct dpa_priv_s *priv;
+#ifndef __rtems__
+ struct sk_buff *skb;
+#else /* __rtems__ */
+ struct ifnet *ifp;
+#endif /* __rtems__ */
+ struct dpa_percpu_priv_s *percpu_priv;
+ const struct qm_fd *fd = &msg->ern.fd;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+ /* Non-migratable context, safe to use raw_cpu_ptr */
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+#ifndef __rtems__
+ percpu_priv->stats.tx_dropped++;
+ percpu_priv->stats.tx_fifo_errors++;
+#else /* __rtems__ */
+ ifp = net_dev->ifp;
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+#endif /* __rtems__ */
+ count_ern(percpu_priv, msg);
+
+ /* If we intended this buffer to go into the pool
+ * when the FM was done, we need to put it in
+ * manually.
+ */
+ if (msg->ern.fd.bpid != 0xff) {
+ dpa_fd_release(net_dev, fd);
+ return;
+ }
+
+#ifndef __rtems__
+ skb = _dpa_cleanup_tx_fd(priv, fd);
+ dev_kfree_skb_any(skb);
+#else /* __rtems__ */
+ _dpa_cleanup_tx_fd(ifp, fd);
+#endif /* __rtems__ */
+}
+
+static const struct dpa_fq_cbs_t private_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
+ .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
+ .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
+ .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
+ .egress_ern = { .cb = { .ern = priv_ern } }
+};
+
+static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
+{
+#ifndef __rtems__
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ for (j = 0; j < qman_portal_max; j++)
+ napi_enable(&percpu_priv->np[j].napi);
+ }
+#endif /* __rtems__ */
+}
+
+static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
+{
+#ifndef __rtems__
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ for (j = 0; j < qman_portal_max; j++)
+ napi_disable(&percpu_priv->np[j].napi);
+ }
+#endif /* __rtems__ */
+}
+
+#ifndef __rtems__
+static int dpa_eth_priv_start(struct net_device *net_dev)
+#else /* __rtems__ */
+int dpa_eth_priv_start(struct net_device *net_dev)
+#endif /* __rtems__ */
+{
+ int err;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ dpaa_eth_napi_enable(priv);
+
+ err = dpa_start(net_dev);
+ if (err < 0)
+ dpaa_eth_napi_disable(priv);
+
+ return err;
+}
+
+#ifndef __rtems__
+static int dpa_eth_priv_stop(struct net_device *net_dev)
+#else /* __rtems__ */
+int dpa_eth_priv_stop(struct net_device *net_dev)
+#endif /* __rtems__ */
+{
+ int err;
+ struct dpa_priv_s *priv;
+
+ err = dpa_stop(net_dev);
+ /* Allow NAPI to consume any frame still in the Rx/TxConfirm
+ * ingress queues. This is to avoid a race between the current
+ * context and ksoftirqd which could leave NAPI disabled while
+ * in fact there's still Rx traffic to be processed.
+ */
+ usleep_range(5000, 10000);
+
+ priv = netdev_priv(net_dev);
+ dpaa_eth_napi_disable(priv);
+
+ return err;
+}
+
+#ifndef __rtems__
+static const struct net_device_ops dpa_private_ops = {
+ .ndo_open = dpa_eth_priv_start,
+ .ndo_start_xmit = dpa_tx,
+ .ndo_stop = dpa_eth_priv_stop,
+ .ndo_tx_timeout = dpa_timeout,
+ .ndo_get_stats64 = dpa_get_stats64,
+ .ndo_set_mac_address = dpa_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+ .ndo_select_queue = dpa_select_queue,
+#endif
+ .ndo_change_mtu = dpa_change_mtu,
+ .ndo_set_rx_mode = dpa_set_rx_mode,
+ .ndo_init = dpa_ndo_init,
+ .ndo_set_features = dpa_set_features,
+ .ndo_fix_features = dpa_fix_features,
+};
+#endif /* __rtems__ */
+
+static int dpa_private_napi_add(struct net_device *net_dev)
+{
+#ifndef __rtems__
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+ percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
+ qman_portal_max * sizeof(struct dpa_napi_portal),
+ GFP_KERNEL);
+
+ if (!percpu_priv->np)
+ return -ENOMEM;
+
+ for (i = 0; i < qman_portal_max; i++)
+ netif_napi_add(net_dev, &percpu_priv->np[i].napi,
+ dpaa_eth_poll, DPA_NAPI_WEIGHT);
+ }
+#endif /* __rtems__ */
+
+ return 0;
+}
+
+void dpa_private_napi_del(struct net_device *net_dev)
+{
+#ifndef __rtems__
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+ if (percpu_priv->np) {
+ for (i = 0; i < qman_portal_max; i++)
+ netif_napi_del(&percpu_priv->np[i].napi);
+
+ devm_kfree(net_dev->dev.parent, percpu_priv->np);
+ }
+ }
+#endif /* __rtems__ */
+}
+
+static int dpa_private_netdev_init(struct net_device *net_dev)
+{
+ int i;
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_percpu_priv_s *percpu_priv;
+#ifndef __rtems__
+ const u8 *mac_addr;
+#endif /* __rtems__ */
+
+ /* Although we access another CPU's private data here
+ * we do it at initialization so it is safe
+ */
+#ifndef __rtems__
+ for_each_possible_cpu(i) {
+#else /* __rtems__ */
+ for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
+#endif /* __rtems__ */
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ percpu_priv->net_dev = net_dev;
+ }
+
+#ifndef __rtems__
+ net_dev->netdev_ops = &dpa_private_ops;
+ mac_addr = priv->mac_dev->addr;
+
+ net_dev->mem_start = priv->mac_dev->res->start;
+ net_dev->mem_end = priv->mac_dev->res->end;
+
+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_LLTX);
+
+ /* Advertise S/G and HIGHDMA support for private interfaces */
+ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
+ /* Recent kernels enable GSO automatically, if
+ * we declare NETIF_F_SG. For conformity, we'll
+ * still declare GSO explicitly.
+ */
+ net_dev->features |= NETIF_F_GSO;
+
+ return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
+#else /* __rtems__ */
+ return 0;
+#endif /* __rtems__ */
+}
+
+static struct dpa_bp *dpa_priv_bp_probe(struct device *dev)
+{
+ struct dpa_bp *dpa_bp;
+
+ dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
+ if (!dpa_bp)
+ return ERR_PTR(-ENOMEM);
+
+ dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
+ dpa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
+
+ dpa_bp->seed_cb = dpa_bp_priv_seed;
+ dpa_bp->free_buf_cb = _dpa_bp_free_pf;
+
+ return dpa_bp;
+}
+
+/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
+ * We won't be sending congestion notifications to FMan; for now, we just use
+ * this CGR to generate enqueue rejections to FMan in order to drop the frames
+ * before they reach our ingress queues and eat up memory.
+ */
+static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
+{
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
+ if (err < 0) {
+ pr_err("Error %d allocating CGR ID\n", err);
+ goto out_error;
+ }
+
+ /* Enable CS TD, but disable Congestion State Change Notifications. */
+ initcgr.we_mask = QM_CGR_WE_CS_THRES;
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+ cs_th = DPAA_INGRESS_CS_THRESHOLD;
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ /* This is actually a hack, because this CGR will be associated with
+ * our affine SWP. However, we'll place our ingress FQs in it.
+ */
+ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ pr_err("Error %d creating ingress CGR with ID %d\n", err,
+ priv->ingress_cgr.cgrid);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+ goto out_error;
+ }
+ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
+ priv->ingress_cgr.cgrid, priv->mac_dev->addr);
+
+ /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
+ * range), but we have no common initialization path between the
+ * different variants of the DPAA Eth driver, so we do it here rather
+ * than modifying every other variant than "private Eth".
+ */
+ priv->use_ingress_cgr = true;
+
+out_error:
+ return err;
+}
+
+static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
+ size_t count)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ int i;
+
+ netif_dbg(priv, probe, net_dev,
+ "Using private BM buffer pools\n");
+
+ priv->bp_count = count;
+
+ for (i = 0; i < count; i++) {
+ int err;
+
+ err = dpa_bp_alloc(&dpa_bp[i]);
+ if (err < 0) {
+ dpa_bp_free(priv);
+ priv->dpa_bp = NULL;
+ return err;
+ }
+
+ priv->dpa_bp = &dpa_bp[i];
+ }
+
+ dpa_priv_common_bpid = priv->dpa_bp->bpid;
+ return 0;
+}
+
+#ifndef __rtems__
+static const struct of_device_id dpa_match[];
+
+static int
+dpaa_eth_priv_probe(struct platform_device *pdev)
+#else /* __rtems__ */
+int
+dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
+#endif /* __rtems__ */
+{
+ int err = 0, i, channel;
+ struct device *dev;
+ struct dpa_bp *dpa_bp;
+ struct dpa_fq *dpa_fq, *tmp;
+ size_t count = 1;
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct fm_port_fqs port_fqs;
+ struct dpa_buffer_layout_s *buf_layout = NULL;
+#ifndef __rtems__
+ struct mac_device *mac_dev;
+ struct task_struct *kth;
+#endif /* __rtems__ */
+
+ dev = &pdev->dev;
+
+ /* Get the buffer pool assigned to this interface;
+ * run only once the default pool probing code
+ */
+ dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
+ dpa_priv_bp_probe(dev);
+ if (IS_ERR(dpa_bp))
+ return PTR_ERR(dpa_bp);
+
+#ifndef __rtems__
+ /* Allocate this early, so we can store relevant information in
+ * the private area
+ */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ goto alloc_etherdev_mq_failed;
+ }
+#else /* __rtems__ */
+ net_dev = &mac_dev->net_dev;
+ net_dev->priv = malloc(sizeof(*priv), M_KMALLOC, M_WAITOK | M_ZERO);
+#endif /* __rtems__ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_FRIENDLY_IF_NAME
+ snprintf(net_dev->name, IFNAMSIZ, "fm%d-mac%d",
+ dpa_mac_fman_index_get(pdev),
+ dpa_mac_hw_index_get(pdev));
+#endif
+
+ /* Do this here, so we can be verbose early */
+#ifndef __rtems__
+ SET_NETDEV_DEV(net_dev, dev);
+#endif /* __rtems__ */
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+
+#ifndef __rtems__
+ priv->msg_enable = netif_msg_init(debug, -1);
+
+ mac_dev = dpa_mac_dev_get(pdev);
+ if (IS_ERR(mac_dev) || !mac_dev) {
+ err = PTR_ERR(mac_dev);
+ goto mac_probe_failed;
+ }
+#endif /* __rtems__ */
+
+ /* We have physical ports, so we need to establish
+ * the buffer layout.
+ */
+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
+ GFP_KERNEL);
+ if (!buf_layout)
+ goto alloc_failed;
+
+ dpa_set_buffers_layout(mac_dev, buf_layout);
+
+ /* For private ports, need to compute the size of the default
+ * buffer pool, based on FMan port buffer layout;also update
+ * the maximum buffer size for private ports if necessary
+ */
+ dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
+
+ INIT_LIST_HEAD(&priv->dpa_fq_list);
+
+ memset(&port_fqs, 0, sizeof(port_fqs));
+
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
+ if (!err)
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
+ &port_fqs, true, TX);
+
+ if (err < 0)
+ goto fq_probe_failed;
+
+ /* bp init */
+
+ err = dpa_priv_bp_create(net_dev, dpa_bp, count);
+
+ if (err < 0)
+ goto bp_create_failed;
+
+ priv->mac_dev = mac_dev;
+
+ channel = dpa_get_channel();
+
+ if (channel < 0) {
+ err = channel;
+ goto get_channel_failed;
+ }
+
+ priv->channel = (u16)channel;
+
+#ifndef __rtems__
+ /* Start a thread that will walk the cpus with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ kth = kthread_run(dpaa_eth_add_channel,
+ (void *)(unsigned long)priv->channel,
+ "dpaa_%p:%d", net_dev, priv->channel);
+ if (!kth) {
+ err = -ENOMEM;
+ goto add_channel_failed;
+ }
+#else /* __rtems__ */
+ dpaa_eth_add_channel((void *)(unsigned long)priv->channel);
+#endif /* __rtems__ */
+
+ dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port[TX]);
+
+ /* Create a congestion group for this netdev, with
+ * dynamically-allocated CGR ID.
+ * Must be executed after probing the MAC, but before
+ * assigning the egress FQs to the CGRs.
+ */
+ err = dpaa_eth_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing CGR\n");
+ goto tx_cgr_init_failed;
+ }
+ err = dpaa_eth_priv_ingress_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing ingress CGR\n");
+ goto rx_cgr_init_failed;
+ }
+
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
+ err = dpa_fq_init(dpa_fq, false);
+ if (err < 0)
+ goto fq_alloc_failed;
+ }
+
+ priv->buf_layout = buf_layout;
+ priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
+ priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
+
+ /* All real interfaces need their ports initialized */
+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
+ buf_layout, dev);
+
+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
+
+ if (!priv->percpu_priv) {
+ dev_err(dev, "devm_alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+#ifndef __rtems__
+ for_each_possible_cpu(i) {
+#else /* __rtems__ */
+ for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
+#endif /* __rtems__ */
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ memset(percpu_priv, 0, sizeof(*percpu_priv));
+ }
+
+ /* Initialize NAPI */
+ err = dpa_private_napi_add(net_dev);
+
+ if (err < 0)
+ goto napi_add_failed;
+
+ err = dpa_private_netdev_init(net_dev);
+
+ if (err < 0)
+ goto netdev_init_failed;
+
+#ifndef __rtems__
+ dpaa_eth_sysfs_init(&net_dev->dev);
+
+ pr_info("Probed interface %s\n", net_dev->name);
+#endif /* __rtems__ */
+
+ return 0;
+
+netdev_init_failed:
+napi_add_failed:
+ dpa_private_napi_del(net_dev);
+alloc_percpu_failed:
+#ifndef __rtems__
+ dpa_fq_free(dev, &priv->dpa_fq_list);
+#endif /* __rtems__ */
+fq_alloc_failed:
+#ifndef __rtems__
+ qman_delete_cgr_safe(&priv->ingress_cgr);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+#endif /* __rtems__ */
+rx_cgr_init_failed:
+#ifndef __rtems__
+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+#endif /* __rtems__ */
+tx_cgr_init_failed:
+#ifndef __rtems__
+add_channel_failed:
+#endif /* __rtems__ */
+get_channel_failed:
+ dpa_bp_free(priv);
+bp_create_failed:
+fq_probe_failed:
+alloc_failed:
+#ifndef __rtems__
+mac_probe_failed:
+#endif /* __rtems__ */
+ dev_set_drvdata(dev, NULL);
+#ifndef __rtems__
+ free_netdev(net_dev);
+alloc_etherdev_mq_failed:
+ if (atomic_read(&dpa_bp->refs) == 0)
+ devm_kfree(dev, dpa_bp);
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+
+ return err;
+}
+
+#ifndef __rtems__
+static struct platform_device_id dpa_devtype[] = {
+ {
+ .name = "dpaa-ethernet",
+ .driver_data = 0,
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(platform, dpa_devtype);
+
+static struct platform_driver dpa_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .id_table = dpa_devtype,
+ .probe = dpaa_eth_priv_probe,
+ .remove = dpa_remove
+};
+
+static int __init dpa_load(void)
+{
+ int err;
+
+ pr_info(DPA_DESCRIPTION "\n");
+
+ /* initialise dpaa_eth mirror values */
+ dpa_rx_extra_headroom = fman_get_rx_extra_headroom();
+ dpa_max_frm = fman_get_max_frm();
+
+ err = platform_driver_register(&dpa_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(dpa_load);
+
+static void __exit dpa_unload(void)
+{
+ platform_driver_unregister(&dpa_driver);
+
+ /* Only one channel is used and needs to be relased after all
+ * interfaces are removed
+ */
+ dpa_release_channel();
+}
+module_exit(dpa_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
+MODULE_DESCRIPTION(DPA_DESCRIPTION);
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
new file mode 100644
index 00000000..11b11e65
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -0,0 +1,519 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPA_H
+#define __DPA_H
+
+#include <linux/netdevice.h>
+#include <soc/fsl/qman.h>
+
+#include "fman.h"
+#include "mac.h"
+#include "dpaa_eth_trace.h"
+
+#ifndef __rtems__
+extern int dpa_rx_extra_headroom;
+extern int dpa_max_frm;
+
+#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
+#define dpa_get_max_frm() dpa_max_frm
+#else /* __rtems__ */
+#define dpa_get_rx_extra_headroom fman_get_rx_extra_headroom
+#define dpa_get_max_frm fman_get_max_frm
+#endif /* __rtems__ */
+
+#define dpa_get_max_mtu() \
+ (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+
+/* Simple enum of FQ types - used for array indexing */
+enum port_type {RX, TX};
+
+struct dpa_buffer_layout_s {
+ u16 priv_data_size;
+ bool parse_results;
+ bool time_stamp;
+ bool hash_results;
+ u16 data_align;
+};
+
+#define DPA_ERR_ON(cond)
+
+#define DPA_TX_PRIV_DATA_SIZE 16
+#define DPA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
+#define DPA_TIME_STAMP_SIZE 8
+#define DPA_HASH_RESULTS_SIZE 8
+#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
+ dpa_get_rx_extra_headroom())
+
+#define FM_FD_STAT_RX_ERRORS \
+ (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
+ FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
+ FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
+ FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
+ FM_FD_ERR_PRS_HDR_ERR)
+
+#define FM_FD_STAT_TX_ERRORS \
+ (FM_FD_ERR_UNSUPPORTED_FORMAT | \
+ FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
+
+/* The raw buffer size must be cacheline aligned.
+ * Normally we use 2K buffers.
+ */
+#define DPA_BP_RAW_SIZE 2048
+
+/* This is what FMan is ever allowed to use.
+ * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
+ * half-page-aligned buffers (can we?), so we reserve some more space
+ * for start-of-buffer alignment.
+ */
+#ifndef __rtems__
+#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
+ SMP_CACHE_BYTES)
+#else /* __rtems__ */
+/*
+ * FIXME: 4 bytes would be enough for the mbuf pointer. However, jumbo receive
+ * frames overwrite this area if < 64 bytes.
+ */
+#define DPA_OUT_OF_BAND_SIZE 64
+#define DPA_MBUF_POINTER_OFFSET (DPA_BP_RAW_SIZE - DPA_OUT_OF_BAND_SIZE)
+#define dpa_bp_size(buffer_layout) DPA_MBUF_POINTER_OFFSET
+#endif /* __rtems__ */
+/* We must ensure that skb_shinfo is always cacheline-aligned. */
+#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
+
+/* Largest value that the FQD's OAL field can hold.
+ * This is DPAA-1.x specific.
+ */
+#define FSL_QMAN_MAX_OAL 127
+
+/* Default alignment for start of data in an Rx FD */
+#define DPA_FD_DATA_ALIGNMENT 16
+
+/* Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define FM_L3_PARSE_RESULT_IPV4 0x8000
+/* L3 Type field: First IP Present IPv6 */
+#define FM_L3_PARSE_RESULT_IPV6 0x4000
+
+/* Values for the L4R field of the FM Parse Results
+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
+ */
+/* L4 Type field: UDP */
+#define FM_L4_PARSE_RESULT_UDP 0x40
+/* L4 Type field: TCP */
+#define FM_L4_PARSE_RESULT_TCP 0x20
+
+/* number of Tx queues to FMan */
+#define DPAA_ETH_TX_QUEUES NR_CPUS
+
+#define DPAA_ETH_RX_QUEUES 128
+
+#define FSL_DPAA_ETH_MAX_BUF_COUNT 128
+#define FSL_DPAA_ETH_REFILL_THRESHOLD 80
+
+/* More detailed FQ types - used for fine-grained WQ assignments */
+enum dpa_fq_type {
+ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
+ FQ_TYPE_RX_ERROR, /* Rx Error FQs */
+ FQ_TYPE_RX_PCD, /* User-defined PCDs */
+ FQ_TYPE_TX, /* "Real" Tx FQs */
+ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
+ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
+ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
+};
+
+struct dpa_fq {
+ struct qman_fq fq_base;
+ struct list_head list;
+ struct net_device *net_dev;
+ bool init;
+ u32 fqid;
+ u32 flags;
+ u16 channel;
+ u8 wq;
+ enum dpa_fq_type fq_type;
+};
+
+struct dpa_fq_cbs_t {
+ struct qman_fq rx_defq;
+ struct qman_fq tx_defq;
+ struct qman_fq rx_errq;
+ struct qman_fq tx_errq;
+ struct qman_fq egress_ern;
+};
+
+struct fqid_cell {
+ u32 start;
+ u32 count;
+};
+
+struct dpa_bp {
+ struct bman_pool *pool;
+ u8 bpid;
+#ifndef __rtems__
+ struct device *dev;
+#endif /* __rtems__ */
+ /* the buffer pools used for the private ports are initialized
+ * with config_count buffers for each CPU; at runtime the
+ * number of buffers per CPU is constantly brought back to this
+ * level
+ */
+ int config_count;
+ size_t size;
+ bool seed_pool;
+ /* physical address of the contiguous memory used by the pool to store
+ * the buffers
+ */
+ dma_addr_t paddr;
+ /* virtual address of the contiguous memory used by the pool to store
+ * the buffers
+ */
+ void __iomem *vaddr;
+ /* current number of buffers in the bpool alloted to this CPU */
+ int __percpu *percpu_count;
+ atomic_t refs;
+ /* some bpools need to be seeded before use by this cb */
+ int (*seed_cb)(struct dpa_bp *);
+ /* some bpools need to be emptied before freeing; this cb is used
+ * for freeing of individual buffers taken from the pool
+ */
+ void (*free_buf_cb)(void *addr);
+};
+
+struct dpa_rx_errors {
+ u64 dme; /* DMA Error */
+ u64 fpe; /* Frame Physical Error */
+ u64 fse; /* Frame Size Error */
+ u64 phe; /* Header Error */
+};
+
+/* Counters for QMan ERN frames - one counter per rejection code */
+struct dpa_ern_cnt {
+ u64 cg_tdrop; /* Congestion group taildrop */
+ u64 wred; /* WRED congestion */
+ u64 err_cond; /* Error condition */
+ u64 early_window; /* Order restoration, frame too early */
+ u64 late_window; /* Order restoration, frame too late */
+ u64 fq_tdrop; /* FQ taildrop */
+ u64 fq_retired; /* FQ is retired */
+ u64 orp_zero; /* ORP disabled */
+};
+
+struct dpa_napi_portal {
+#ifndef __rtems__
+ struct napi_struct napi;
+#endif /* __rtems__ */
+ struct qman_portal *p;
+};
+
+struct dpa_percpu_priv_s {
+ struct net_device *net_dev;
+ struct dpa_napi_portal *np;
+ u64 in_interrupt;
+ u64 tx_confirm;
+ /* fragmented (non-linear) skbuffs received from the stack */
+ u64 tx_frag_skbuffs;
+#ifndef __rtems__
+ struct rtnl_link_stats64 stats;
+#endif /* __rtems__ */
+ struct dpa_rx_errors rx_errors;
+ struct dpa_ern_cnt ern_cnt;
+};
+
+struct dpa_priv_s {
+ struct dpa_percpu_priv_s __percpu *percpu_priv;
+ struct dpa_bp *dpa_bp;
+ /* Store here the needed Tx headroom for convenience and speed
+ * (even though it can be computed based on the fields of buf_layout)
+ */
+ u16 tx_headroom;
+ struct net_device *net_dev;
+ struct mac_device *mac_dev;
+ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
+ struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
+
+ size_t bp_count;
+
+ u16 channel; /* "fsl,qman-channel-id" */
+ struct list_head dpa_fq_list;
+
+#ifndef __rtems__
+ u32 msg_enable; /* net_device message level */
+#endif /* __rtems__ */
+
+ struct {
+ /* All egress queues to a given net device belong to one
+ * (and the same) congestion group.
+ */
+ struct qman_cgr cgr;
+ /* If congested, when it began. Used for performance stats. */
+ u32 congestion_start_jiffies;
+ /* Number of jiffies the Tx port was congested. */
+ u32 congested_jiffies;
+ /* Counter for the number of times the CGR
+ * entered congestion state
+ */
+ u32 cgr_congested_count;
+ } cgr_data;
+ /* Use a per-port CGR for ingress traffic. */
+ bool use_ingress_cgr;
+ struct qman_cgr ingress_cgr;
+
+ struct dpa_buffer_layout_s *buf_layout;
+ u16 rx_headroom;
+};
+
+struct fm_port_fqs {
+ struct dpa_fq *tx_defq;
+ struct dpa_fq *tx_errq;
+ struct dpa_fq *rx_defq;
+ struct dpa_fq *rx_errq;
+};
+
+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
+void _dpa_rx(struct net_device *net_dev,
+ struct qman_portal *portal,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid,
+ int *count_ptr);
+#ifndef __rtems__
+int dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
+ const struct qm_fd *fd);
+
+/* Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+int dpa_enable_tx_csum(struct dpa_priv_s *priv, struct sk_buff *skb,
+ struct qm_fd *fd, char *parse_results);
+#else /* __rtems__ */
+void _dpa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd);
+#endif /* __rtems__ */
+
+static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
+ struct qman_portal *portal)
+{
+#ifndef __rtems__
+ /* In case of threaded ISR for RT enable kernel,
+ * in_irq() does not return appropriate value, so use
+ * in_serving_softirq to distinguish softirq or irq context.
+ */
+ if (unlikely(in_irq() || !in_serving_softirq())) {
+ /* Disable QMan IRQ and invoke NAPI */
+ int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
+
+ if (likely(!ret)) {
+ const struct qman_portal_config *pc =
+ qman_p_get_portal_config(portal);
+ struct dpa_napi_portal *np =
+ &percpu_priv->np[pc->channel];
+
+ np->p = portal;
+ napi_schedule(&np->napi);
+ percpu_priv->in_interrupt++;
+ return 1;
+ }
+ }
+#else /* __rtems__ */
+ /* FIXME */
+#endif /* __rtems__ */
+ return 0;
+}
+
+static inline ssize_t __const dpa_fd_length(const struct qm_fd *fd)
+{
+ return fd->length20;
+}
+
+static inline ssize_t __const dpa_fd_offset(const struct qm_fd *fd)
+{
+ return fd->offset;
+}
+
+#ifndef __rtems__
+/* Verifies if the skb length is below the interface MTU */
+static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
+{
+ if (unlikely(skb->len > mtu))
+ if ((skb->protocol != htons(ETH_P_8021Q)) ||
+ (skb->len > mtu + 4))
+ return -1;
+
+ return 0;
+}
+#endif /* __rtems__ */
+
+static inline u16 dpa_get_headroom(struct dpa_buffer_layout_s *bl)
+{
+ u16 headroom;
+ /* The frame headroom must accommodate:
+ * - the driver private data area
+ * - parse results, hash results, timestamp if selected
+ * If either hash results or time stamp are selected, both will
+ * be copied to/from the frame headroom, as TS is located between PR and
+ * HR in the IC and IC copy size has a granularity of 16bytes
+ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
+ *
+ * Also make sure the headroom is a multiple of data_align bytes
+ */
+ headroom = (u16)(bl->priv_data_size +
+ (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
+ (bl->hash_results || bl->time_stamp ?
+ DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0));
+
+ return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
+}
+
+#ifndef __rtems__
+void dpaa_eth_sysfs_remove(struct device *dev);
+void dpaa_eth_sysfs_init(struct device *dev);
+
+void dpa_private_napi_del(struct net_device *net_dev);
+#endif /* __rtems__ */
+
+static inline void clear_fd(struct qm_fd *fd)
+{
+ fd->opaque_addr = 0;
+ fd->opaque = 0;
+ fd->cmd = 0;
+}
+
+static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
+ struct qman_fq *tx_fq)
+{
+ int i;
+
+ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
+ if (priv->egress_fqs[i] == tx_fq)
+ return i;
+
+ return -EINVAL;
+}
+
+#ifndef __rtems__
+static inline int dpa_xmit(struct dpa_priv_s *priv,
+ struct rtnl_link_stats64 *percpu_stats,
+ int queue,
+ struct qm_fd *fd)
+{
+ int err, i;
+ struct qman_fq *egress_fq;
+
+ egress_fq = priv->egress_fqs[queue];
+ if (fd->bpid == 0xff)
+ fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
+
+ /* Trace this Tx fd */
+ trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
+
+ for (i = 0; i < 100000; i++) {
+ err = qman_enqueue(egress_fq, fd, 0);
+ if (err != -EBUSY)
+ break;
+ }
+
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ percpu_stats->tx_fifo_errors++;
+ return err;
+ }
+
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += dpa_fd_length(fd);
+
+ return 0;
+}
+#endif /* __rtems__ */
+
+/* Use multiple WQs for FQ assignment:
+ * - Tx Confirmation queues go to WQ1.
+ * - Rx Default and Tx queues go to WQ3 (no differentiation between
+ * Rx and Tx traffic).
+ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
+ * to be scheduled, in case there are many more FQs in WQ3).
+ * This ensures that Tx-confirmed buffers are timely released. In particular,
+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
+ * are greatly outnumbered by other FQs in the system, while
+ * dequeue scheduling is round-robin.
+ */
+static inline void _dpa_assign_wq(struct dpa_fq *fq)
+{
+ switch (fq->fq_type) {
+ case FQ_TYPE_TX_CONFIRM:
+ case FQ_TYPE_TX_CONF_MQ:
+ fq->wq = 1;
+ break;
+ case FQ_TYPE_RX_DEFAULT:
+ case FQ_TYPE_TX:
+ fq->wq = 3;
+ break;
+ case FQ_TYPE_RX_ERROR:
+ case FQ_TYPE_TX_ERROR:
+ fq->wq = 2;
+ break;
+ default:
+ WARN(1, "Invalid FQ type %d for FQID %d!\n",
+ fq->fq_type, fq->fqid);
+ }
+}
+
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+/* Use in lieu of skb_get_queue_mapping() */
+#define dpa_get_queue_mapping(skb) \
+ raw_smp_processor_id()
+#else
+/* Use the queue selected by XPS */
+#define dpa_get_queue_mapping(skb) \
+ skb_get_queue_mapping(skb)
+#endif
+
+static inline void _dpa_bp_free_pf(void *addr)
+{
+#ifndef __rtems__
+ put_page(virt_to_head_page(addr));
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+}
+
+#endif /* __DPA_H */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c
new file mode 100644
index 00000000..9a4a2184
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c
@@ -0,0 +1,1491 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#ifndef __rtems__
+#include <linux/highmem.h>
+#include <linux/sort.h>
+#endif /* __rtems__ */
+#include <soc/fsl/qman.h>
+#ifndef __rtems__
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#endif /* __rtems__ */
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "mac.h"
+
+/* Size in bytes of the FQ taildrop threshold */
+#define DPA_FQ_TD 0x200000
+
+#define DPAA_CS_THRESHOLD_1G 0x06000000
+/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
+ * The size in bytes of the egress Congestion State notification threshold on
+ * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
+ * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
+ * and the larger the frame size, the more acute the problem.
+ * So we have to find a balance between these factors:
+ * - avoiding the device staying congested for a prolonged time (risking
+ * the netdev watchdog to fire - see also the tx_timeout module param);
+ * - affecting performance of protocols such as TCP, which otherwise
+ * behave well under the congestion notification mechanism;
+ * - preventing the Tx cores from tightly-looping (as if the congestion
+ * threshold was too low to be effective);
+ * - running out of memory if the CS threshold is set too high.
+ */
+
+#define DPAA_CS_THRESHOLD_10G 0x10000000
+/* The size in bytes of the egress Congestion State notification threshold on
+ * 10G ports, range 0x1000 .. 0x10000000
+ */
+
+static struct dpa_bp *dpa_bp_array[64];
+
+#ifndef __rtems__
+int dpa_max_frm;
+
+int dpa_rx_extra_headroom;
+#endif /* __rtems__ */
+
+static const struct fqid_cell tx_confirm_fqids[] = {
+ {0, DPAA_ETH_TX_QUEUES}
+};
+
+static const struct fqid_cell default_fqids[][3] = {
+ [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
+ [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
+};
+
+#ifndef __rtems__
+int dpa_netdev_init(struct net_device *net_dev,
+ const u8 *mac_addr,
+ u16 tx_timeout)
+{
+ int err;
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+
+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ /* we do not want shared skbs on TX */
+ net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+
+ net_dev->features |= net_dev->hw_features;
+ net_dev->vlan_features = net_dev->features;
+
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+
+ net_dev->ethtool_ops = &dpa_ethtool_ops;
+
+ net_dev->needed_headroom = priv->tx_headroom;
+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+
+ /* start without the RUNNING flag, phylib controls it later */
+ netif_carrier_off(net_dev);
+
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+#endif /* __rtems__ */
+
+int dpa_start(struct net_device *net_dev)
+{
+ int err, i;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+#ifndef __rtems__
+ err = mac_dev->init_phy(net_dev, priv->mac_dev);
+ if (err < 0) {
+ netif_err(priv, ifup, net_dev, "init_phy() = %d\n", err);
+ return err;
+ }
+#endif /* __rtems__ */
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ err = fman_port_enable(mac_dev->port[i]);
+ if (err)
+ goto mac_start_failed;
+ }
+
+ err = priv->mac_dev->start(mac_dev);
+ if (err < 0) {
+ netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+ goto mac_start_failed;
+ }
+
+#ifndef __rtems__
+ netif_tx_start_all_queues(net_dev);
+#endif /* __rtems__ */
+
+ return 0;
+
+mac_start_failed:
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
+ fman_port_disable(mac_dev->port[i]);
+
+ return err;
+}
+
+int dpa_stop(struct net_device *net_dev)
+{
+ int i, err, error;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+#ifndef __rtems__
+ netif_tx_stop_all_queues(net_dev);
+#endif /* __rtems__ */
+ /* Allow the Fman (Tx) port to process in-flight frames before we
+ * try switching it off.
+ */
+ usleep_range(5000, 10000);
+
+ err = mac_dev->stop(mac_dev);
+ if (err < 0)
+ netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
+ err);
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ error = fman_port_disable(mac_dev->port[i]);
+ if (error)
+ err = error;
+ }
+
+#ifndef __rtems__
+ if (mac_dev->phy_dev)
+ phy_disconnect(mac_dev->phy_dev);
+ mac_dev->phy_dev = NULL;
+#endif /* __rtems__ */
+
+ return err;
+}
+
+#ifndef __rtems__
+void dpa_timeout(struct net_device *net_dev)
+{
+ const struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+ netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
+ jiffies_to_msecs(jiffies - net_dev->trans_start));
+
+ percpu_priv->stats.tx_errors++;
+}
+
+/* Calculates the statistics for the given device by adding the statistics
+ * collected by each CPU.
+ */
+struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ u64 *cpustats;
+ u64 *netstats = (u64 *)stats;
+ int i, j;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ cpustats = (u64 *)&percpu_priv->stats;
+
+ for (j = 0; j < numstats; j++)
+ netstats[j] += cpustats[j];
+ }
+
+ return stats;
+}
+#endif /* __rtems__ */
+
+int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ const int max_mtu = dpa_get_max_mtu();
+
+ /* Make sure we don't exceed the Ethernet controller's MAXFRM */
+ if (new_mtu < 68 || new_mtu > max_mtu) {
+ netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
+ new_mtu, 68, max_mtu);
+ return -EINVAL;
+ }
+#ifndef __rtems__
+ net_dev->mtu = new_mtu;
+#endif /* __rtems__ */
+
+ return 0;
+}
+
+#ifndef __rtems__
+/* .ndo_init callback */
+int dpa_ndo_init(struct net_device *net_dev)
+{
+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
+ * we choose conservatively and let the user explicitly set a higher
+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
+ * in the same LAN.
+ * If on the other hand fsl_fm_max_frm has been chosen below 1500,
+ * start with the maximum allowed.
+ */
+ int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
+
+ netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
+ init_mtu);
+ net_dev->mtu = init_mtu;
+
+ return 0;
+}
+
+int dpa_set_features(struct net_device *dev, netdev_features_t features)
+{
+ /* Not much to do here for now */
+ dev->features = features;
+ return 0;
+}
+
+netdev_features_t dpa_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t unsupported_features = 0;
+
+ /* In theory we should never be requested to enable features that
+ * we didn't set in netdev->features and netdev->hw_features at probe
+ * time, but double check just to be on the safe side.
+ * We don't support enabling Rx csum through ethtool yet
+ */
+ unsupported_features |= NETIF_F_RXCSUM;
+
+ features &= ~unsupported_features;
+
+ return features;
+}
+
+int dpa_remove(struct platform_device *pdev)
+{
+ int err;
+ struct device *dev;
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+
+ dev = &pdev->dev;
+ net_dev = dev_get_drvdata(dev);
+
+ priv = netdev_priv(net_dev);
+
+ dpaa_eth_sysfs_remove(dev);
+
+ dev_set_drvdata(dev, NULL);
+ unregister_netdev(net_dev);
+
+ err = dpa_fq_free(dev, &priv->dpa_fq_list);
+
+ qman_delete_cgr_safe(&priv->ingress_cgr);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+
+ dpa_private_napi_del(net_dev);
+
+ dpa_bp_free(priv);
+
+ if (priv->buf_layout)
+ devm_kfree(dev, priv->buf_layout);
+
+ free_netdev(net_dev);
+
+ return err;
+}
+
+struct mac_device *dpa_mac_dev_get(struct platform_device *pdev)
+{
+ struct device *dpa_dev, *dev;
+ struct device_node *mac_node;
+ struct platform_device *of_dev;
+ struct mac_device *mac_dev;
+ struct dpaa_eth_data *eth_data;
+
+ dpa_dev = &pdev->dev;
+ eth_data = dpa_dev->platform_data;
+ if (!eth_data)
+ return ERR_PTR(-ENODEV);
+
+ mac_node = eth_data->mac_node;
+
+ of_dev = of_find_device_by_node(mac_node);
+ if (!of_dev) {
+ dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
+ mac_node->full_name);
+ of_node_put(mac_node);
+ return ERR_PTR(-EINVAL);
+ }
+ of_node_put(mac_node);
+
+ dev = &of_dev->dev;
+
+ mac_dev = dev_get_drvdata(dev);
+ if (!mac_dev) {
+ dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
+ dev_name(dev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ return mac_dev;
+}
+
+int dpa_mac_hw_index_get(struct platform_device *pdev)
+{
+ struct device *dpa_dev;
+ struct dpaa_eth_data *eth_data;
+
+ dpa_dev = &pdev->dev;
+ eth_data = dpa_dev->platform_data;
+
+ return eth_data->mac_hw_id;
+}
+
+int dpa_mac_fman_index_get(struct platform_device *pdev)
+{
+ struct device *dpa_dev;
+ struct dpaa_eth_data *eth_data;
+
+ dpa_dev = &pdev->dev;
+ eth_data = dpa_dev->platform_data;
+
+ return eth_data->fman_hw_id;
+}
+
+int dpa_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ const struct dpa_priv_s *priv;
+ int err;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+
+ err = eth_mac_addr(net_dev, addr);
+ if (err < 0) {
+ netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
+ return err;
+ }
+
+ mac_dev = priv->mac_dev;
+
+ err = mac_dev->change_addr(mac_dev->fman_mac,
+ (enet_addr_t *)net_dev->dev_addr);
+ if (err < 0) {
+ netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+void dpa_set_rx_mode(struct net_device *net_dev)
+{
+ int err;
+ const struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
+ priv->mac_dev->promisc = !priv->mac_dev->promisc;
+ err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
+ priv->mac_dev->promisc);
+ if (err < 0)
+ netif_err(priv, drv, net_dev,
+ "mac_dev->set_promisc() = %d\n",
+ err);
+ }
+
+ err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
+ if (err < 0)
+ netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
+ err);
+}
+#endif /* __rtems__ */
+
+void dpa_set_buffers_layout(struct mac_device *mac_dev,
+ struct dpa_buffer_layout_s *layout)
+{
+ /* Rx */
+ layout[RX].priv_data_size = (u16)DPA_RX_PRIV_DATA_SIZE;
+ layout[RX].parse_results = true;
+ layout[RX].hash_results = true;
+ layout[RX].data_align = DPA_FD_DATA_ALIGNMENT;
+
+ /* Tx */
+ layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
+ layout[TX].parse_results = true;
+ layout[TX].hash_results = true;
+ layout[TX].data_align = DPA_FD_DATA_ALIGNMENT;
+}
+
+int dpa_bp_alloc(struct dpa_bp *dpa_bp)
+{
+ int err;
+ struct bman_pool_params bp_params;
+#ifndef __rtems__
+ struct platform_device *pdev;
+#endif /* __rtems__ */
+
+ if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
+ pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
+ return -EINVAL;
+ }
+
+ memset(&bp_params, 0, sizeof(struct bman_pool_params));
+
+ /* If the pool is already specified, we only create one per bpid */
+ if (dpa_bpid2pool_use(dpa_bp->bpid))
+ return 0;
+
+ if (dpa_bp->bpid == 0)
+ bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
+ else
+ bp_params.bpid = dpa_bp->bpid;
+
+ dpa_bp->pool = bman_new_pool(&bp_params);
+ if (!dpa_bp->pool) {
+ pr_err("bman_new_pool() failed\n");
+ return -ENODEV;
+ }
+
+ dpa_bp->bpid = (u8)bman_get_params(dpa_bp->pool)->bpid;
+
+#ifndef __rtems__
+ pdev = platform_device_register_simple("DPAA_bpool",
+ dpa_bp->bpid, NULL, 0);
+ if (IS_ERR(pdev)) {
+ err = PTR_ERR(pdev);
+ goto pdev_register_failed;
+ }
+
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ goto pdev_mask_failed;
+
+ dpa_bp->dev = &pdev->dev;
+#endif /* __rtems__ */
+
+ if (dpa_bp->seed_cb) {
+ err = dpa_bp->seed_cb(dpa_bp);
+ if (err)
+ goto pool_seed_failed;
+ }
+
+ dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
+
+ return 0;
+
+pool_seed_failed:
+#ifndef __rtems__
+pdev_mask_failed:
+ platform_device_unregister(pdev);
+pdev_register_failed:
+#endif /* __rtems__ */
+ bman_free_pool(dpa_bp->pool);
+
+ return err;
+}
+
+void dpa_bp_drain(struct dpa_bp *bp)
+{
+ int ret;
+ u8 num = 8;
+
+ do {
+ struct bm_buffer bmb[8];
+ int i;
+
+ ret = bman_acquire(bp->pool, bmb, num, 0);
+ if (ret < 0) {
+ if (num == 8) {
+ /* we have less than 8 buffers left;
+ * drain them one by one
+ */
+ num = 1;
+ ret = 1;
+ continue;
+ } else {
+ /* Pool is fully drained */
+ break;
+ }
+ }
+
+ for (i = 0; i < num; i++) {
+ dma_addr_t addr = bm_buf_addr(&bmb[i]);
+
+#ifndef __rtems__
+ dma_unmap_single(bp->dev, addr, bp->size,
+ DMA_BIDIRECTIONAL);
+#endif /* __rtems__ */
+
+ bp->free_buf_cb(phys_to_virt(addr));
+ }
+ } while (ret > 0);
+}
+
+static void _dpa_bp_free(struct dpa_bp *dpa_bp)
+{
+ struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
+
+ /* the mapping between bpid and dpa_bp is done very late in the
+ * allocation procedure; if something failed before the mapping, the bp
+ * was not configured, therefore we don't need the below instructions
+ */
+ if (!bp)
+ return;
+
+ if (!atomic_dec_and_test(&bp->refs))
+ return;
+
+ if (bp->free_buf_cb)
+ dpa_bp_drain(bp);
+
+ dpa_bp_array[bp->bpid] = NULL;
+ bman_free_pool(bp->pool);
+
+#ifndef __rtems__
+ if (bp->dev)
+ platform_device_unregister(to_platform_device(bp->dev));
+#endif /* __rtems__ */
+}
+
+void dpa_bp_free(struct dpa_priv_s *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->bp_count; i++)
+ _dpa_bp_free(&priv->dpa_bp[i]);
+}
+
+struct dpa_bp *dpa_bpid2pool(int bpid)
+{
+ return dpa_bp_array[bpid];
+}
+
+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
+{
+ dpa_bp_array[bpid] = dpa_bp;
+ atomic_set(&dpa_bp->refs, 1);
+}
+
+bool dpa_bpid2pool_use(int bpid)
+{
+ if (dpa_bpid2pool(bpid)) {
+ atomic_inc(&dpa_bp_array[bpid]->refs);
+ return true;
+ }
+
+ return false;
+}
+
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ return dpa_get_queue_mapping(skb);
+}
+#endif
+
+struct dpa_fq *dpa_fq_alloc(struct device *dev,
+ const struct fqid_cell *fqids,
+ struct list_head *list,
+ enum dpa_fq_type fq_type)
+{
+ int i;
+ struct dpa_fq *dpa_fq;
+
+ dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids->count, GFP_KERNEL);
+ if (!dpa_fq)
+ return NULL;
+
+ for (i = 0; i < fqids->count; i++) {
+ dpa_fq[i].fq_type = fq_type;
+ dpa_fq[i].fqid = fqids->start ? fqids->start + i : 0;
+ list_add_tail(&dpa_fq[i].list, list);
+ }
+
+ for (i = 0; i < fqids->count; i++)
+ _dpa_assign_wq(dpa_fq + i);
+
+ return dpa_fq;
+}
+
+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs,
+ bool alloc_tx_conf_fqs,
+ enum port_type ptype)
+{
+ const struct fqid_cell *fqids;
+ struct dpa_fq *dpa_fq;
+ int num_ranges;
+ int i;
+
+ if (ptype == TX && alloc_tx_conf_fqs) {
+ if (!dpa_fq_alloc(dev, tx_confirm_fqids, list,
+ FQ_TYPE_TX_CONF_MQ))
+ goto fq_alloc_failed;
+ }
+
+ fqids = default_fqids[ptype];
+ num_ranges = 3;
+
+ for (i = 0; i < num_ranges; i++) {
+ switch (i) {
+ case 0:
+ /* The first queue is the error queue */
+ if (fqids[i].count != 1)
+ goto invalid_error_queue;
+
+ dpa_fq = dpa_fq_alloc(dev, &fqids[i], list,
+ ptype == RX ?
+ FQ_TYPE_RX_ERROR :
+ FQ_TYPE_TX_ERROR);
+ if (!dpa_fq)
+ goto fq_alloc_failed;
+
+ if (ptype == RX)
+ port_fqs->rx_errq = &dpa_fq[0];
+ else
+ port_fqs->tx_errq = &dpa_fq[0];
+ break;
+ case 1:
+ /* the second queue is the default queue */
+ if (fqids[i].count != 1)
+ goto invalid_default_queue;
+
+ dpa_fq = dpa_fq_alloc(dev, &fqids[i], list,
+ ptype == RX ?
+ FQ_TYPE_RX_DEFAULT :
+ FQ_TYPE_TX_CONFIRM);
+ if (!dpa_fq)
+ goto fq_alloc_failed;
+
+ if (ptype == RX)
+ port_fqs->rx_defq = &dpa_fq[0];
+ else
+ port_fqs->tx_defq = &dpa_fq[0];
+ break;
+ default:
+ /* all subsequent queues are Tx */
+ if (!dpa_fq_alloc(dev, &fqids[i], list, FQ_TYPE_TX))
+ goto fq_alloc_failed;
+ break;
+ }
+ }
+
+ return 0;
+
+fq_alloc_failed:
+ dev_err(dev, "dpa_fq_alloc() failed\n");
+ return -ENOMEM;
+
+invalid_default_queue:
+invalid_error_queue:
+ dev_err(dev, "Too many default or error queues\n");
+ return -EINVAL;
+}
+
+static u32 rx_pool_channel;
+static DEFINE_SPINLOCK(rx_pool_channel_init);
+
+int dpa_get_channel(void)
+{
+ spin_lock(&rx_pool_channel_init);
+ if (!rx_pool_channel) {
+ u32 pool;
+ int ret = qman_alloc_pool(&pool);
+
+ if (!ret)
+ rx_pool_channel = pool;
+ }
+ spin_unlock(&rx_pool_channel_init);
+ if (!rx_pool_channel)
+ return -ENOMEM;
+ return rx_pool_channel;
+}
+
+void dpa_release_channel(void)
+{
+ qman_release_pool(rx_pool_channel);
+}
+
+int dpaa_eth_add_channel(void *__arg)
+{
+#ifndef __rtems__
+ const cpumask_t *cpus = qman_affine_cpus();
+#endif /* __rtems__ */
+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg);
+ int cpu;
+ struct qman_portal *portal;
+
+#ifndef __rtems__
+ for_each_cpu(cpu, cpus) {
+#else /* __rtems__ */
+ for (cpu = 0; cpu < (int)rtems_get_processor_count(); ++cpu) {
+#endif /* __rtems__ */
+
+ portal = (struct qman_portal *)qman_get_affine_portal(cpu);
+ qman_p_static_dequeue_add(portal, pool);
+ }
+ return 0;
+}
+
+/* Congestion group state change notification callback.
+ * Stops the device's egress queues while they are congested and
+ * wakes them upon exiting congested state.
+ * Also updates some CGR-related stats.
+ */
+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
+ int congested)
+{
+ struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
+ struct dpa_priv_s, cgr_data.cgr);
+
+ if (congested) {
+ priv->cgr_data.congestion_start_jiffies = jiffies;
+#ifndef __rtems__
+ netif_tx_stop_all_queues(priv->net_dev);
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+ priv->cgr_data.cgr_congested_count++;
+ } else {
+ priv->cgr_data.congested_jiffies +=
+ (jiffies - priv->cgr_data.congestion_start_jiffies);
+#ifndef __rtems__
+ netif_tx_wake_all_queues(priv->net_dev);
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+ }
+}
+
+int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
+{
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
+ if (err < 0) {
+ pr_err("Error %d allocating CGR ID\n", err);
+ goto out_error;
+ }
+ priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
+
+ /* Enable Congestion State Change Notifications and CS taildrop */
+ initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+
+ /* Set different thresholds based on the MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at a speed
+ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
+ * In such cases, we ought to reconfigure the threshold, too.
+ */
+#ifndef __rtems__
+ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+ cs_th = DPAA_CS_THRESHOLD_10G;
+ else
+ cs_th = DPAA_CS_THRESHOLD_1G;
+#else /* __rtems__ */
+ /* FIXME */
+ cs_th = DPAA_CS_THRESHOLD_1G;
+#endif /* __rtems__ */
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ pr_err("Error %d creating CGR with ID %d\n", err,
+ priv->cgr_data.cgr.cgrid);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ goto out_error;
+ }
+ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
+ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
+ priv->cgr_data.cgr.chan);
+
+out_error:
+ return err;
+}
+
+static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
+ struct dpa_fq *fq,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ fq->channel = priv->channel;
+}
+
+static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
+ struct dpa_fq *fq,
+ struct fman_port *port,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ if (port) {
+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ fq->channel = (u16)fman_port_get_qman_channel_id(port);
+ } else {
+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
+ }
+}
+
+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
+ struct fman_port *tx_port)
+{
+ struct dpa_fq *fq;
+#ifndef __rtems__
+ u16 portals[NR_CPUS];
+ int cpu, num_portals = 0;
+ const cpumask_t *affine_cpus = qman_affine_cpus();
+#endif /* __rtems__ */
+ int egress_cnt = 0, conf_cnt = 0;
+
+#ifndef __rtems__
+ for_each_cpu(cpu, affine_cpus)
+ portals[num_portals++] = qman_affine_channel(cpu);
+ if (num_portals == 0)
+ dev_err(priv->net_dev->dev.parent,
+ "No Qman software (affine) channels found");
+#else /* __rtems__ */
+ /* FIXME */
+#endif /* __rtems__ */
+
+ /* Initialize each FQ in the list */
+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ DPA_ERR_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ break;
+ case FQ_TYPE_RX_ERROR:
+ DPA_ERR_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
+ break;
+ case FQ_TYPE_TX:
+ dpa_setup_egress(priv, fq, tx_port,
+ &fq_cbs->egress_ern);
+ /* If we have more Tx queues than the number of cores,
+ * just ignore the extra ones.
+ */
+ if (egress_cnt < DPAA_ETH_TX_QUEUES)
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_CONFIRM:
+ DPA_ERR_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ DPA_ERR_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+ priv->conf_fqs[conf_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_ERROR:
+ DPA_ERR_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
+ break;
+ default:
+#ifndef __rtems__
+ dev_warn(priv->net_dev->dev.parent,
+ "Unknown FQ type detected!\n");
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+ break;
+ }
+ }
+
+ /* The number of Tx queues may be smaller than the number of cores, if
+ * the Tx queue range is specified in the device tree instead of being
+ * dynamically allocated.
+ * Make sure all CPUs receive a corresponding Tx queue.
+ */
+ while (egress_cnt < DPAA_ETH_TX_QUEUES) {
+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
+ if (fq->fq_type != FQ_TYPE_TX)
+ continue;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ if (egress_cnt == DPAA_ETH_TX_QUEUES)
+ break;
+ }
+ }
+}
+
+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
+{
+ int err;
+ const struct dpa_priv_s *priv;
+#ifndef __rtems__
+ struct device *dev;
+#endif /* __rtems__ */
+ struct qman_fq *fq;
+ struct qm_mcc_initfq initfq;
+ struct qman_fq *confq = NULL;
+ int queue_id;
+
+ priv = netdev_priv(dpa_fq->net_dev);
+#ifndef __rtems__
+ dev = dpa_fq->net_dev->dev.parent;
+#endif /* __rtems__ */
+
+ if (dpa_fq->fqid == 0)
+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
+
+ err = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
+ if (err) {
+#ifndef __rtems__
+ dev_err(dev, "qman_create_fq() failed\n");
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+ return err;
+ }
+ fq = &dpa_fq->fq_base;
+
+ if (dpa_fq->init) {
+ memset(&initfq, 0, sizeof(initfq));
+
+ initfq.we_mask = QM_INITFQ_WE_FQCTRL;
+ /* Note: we may get to keep an empty FQ in cache */
+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+
+ /* Try to reduce the number of portal interrupts for
+ * Tx Confirmation FQs.
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+
+ /* FQ placement */
+ initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+
+ initfq.fqd.dest.channel = dpa_fq->channel;
+ initfq.fqd.dest.wq = dpa_fq->wq;
+
+ /* Put all egress queues in a congestion group of their own.
+ * Sensu stricto, the Tx confirmation queues are Rx FQs,
+ * rather than Tx - but they nonetheless account for the
+ * memory footprint on behalf of egress traffic. We therefore
+ * place them in the netdev's CGR, along with the Tx FQs.
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX ||
+ dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
+ dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
+ initfq.we_mask |= QM_INITFQ_WE_CGID;
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
+ /* Set a fixed overhead accounting, in an attempt to
+ * reduce the impact of fixed-size skb shells and the
+ * driver's needed headroom on system memory. This is
+ * especially the case when the egress traffic is
+ * composed of small datagrams.
+ * Unfortunately, QMan's OAL value is capped to an
+ * insufficient value, but even that is better than
+ * no overhead accounting at all.
+ */
+ initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.fqd.oac_init.oac = QM_OAC_CG;
+#ifndef __rtems__
+ initfq.fqd.oac_init.oal =
+ (signed char)(min(sizeof(struct sk_buff) +
+ priv->tx_headroom,
+ (size_t)FSL_QMAN_MAX_OAL));
+#else /* __rtems__ */
+ /* FIXME */
+ initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL;
+#endif /* __rtems__ */
+ }
+
+ if (td_enable) {
+ initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
+ qm_fqd_taildrop_set(&initfq.fqd.td,
+ DPA_FQ_TD, 1);
+ initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
+ }
+
+ /* Configure the Tx confirmation queue, now that we know
+ * which Tx queue it pairs with.
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX) {
+ queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
+ if (queue_id >= 0)
+ confq = priv->conf_fqs[queue_id];
+ if (confq) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
+ * A2V=1 (contextA A2 field is valid)
+ * A0V=1 (contextA A0 field is valid)
+ * B0V=1 (contextB field is valid)
+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
+ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
+ */
+ initfq.fqd.context_a.hi = 0x1e000000;
+ initfq.fqd.context_a.lo = 0x80000000;
+ }
+ }
+
+ /* Put all *private* ingress queues in our "ingress CGR". */
+ if (priv->use_ingress_cgr &&
+ (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+ dpa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
+ initfq.we_mask |= QM_INITFQ_WE_CGID;
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
+ /* Set a fixed overhead accounting, just like for the
+ * egress CGR.
+ */
+ initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.fqd.oac_init.oac = QM_OAC_CG;
+#ifndef __rtems__
+ initfq.fqd.oac_init.oal =
+ (signed char)(min(sizeof(struct sk_buff) +
+ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
+#else /* __rtems__ */
+ /* FIXME */
+ initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL;
+#endif /* __rtems__ */
+ }
+
+ /* Initialization common to all ingress queues */
+ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.fqd.fq_ctrl |=
+ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ initfq.fqd.context_a.stashing.exclusive =
+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_ANNOTATION;
+ initfq.fqd.context_a.stashing.data_cl = 2;
+ initfq.fqd.context_a.stashing.annotation_cl = 1;
+ initfq.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+ }
+
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (err < 0) {
+#ifndef __rtems__
+ dev_err(dev, "qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), err);
+#endif /* __rtems__ */
+ qman_destroy_fq(fq, 0);
+ return err;
+ }
+ }
+
+ dpa_fq->fqid = qman_fq_fqid(fq);
+
+ return 0;
+}
+
+#ifndef __rtems__
+static int _dpa_fq_free(struct device *dev, struct qman_fq *fq)
+{
+ int err, error;
+ struct dpa_fq *dpa_fq;
+ const struct dpa_priv_s *priv;
+
+ err = 0;
+
+ dpa_fq = container_of(fq, struct dpa_fq, fq_base);
+ priv = netdev_priv(dpa_fq->net_dev);
+
+ if (dpa_fq->init) {
+ err = qman_retire_fq(fq, NULL);
+ if (err < 0 && netif_msg_drv(priv))
+ dev_err(dev, "qman_retire_fq(%u) = %d\n",
+ qman_fq_fqid(fq), err);
+
+ error = qman_oos_fq(fq);
+ if (error < 0 && netif_msg_drv(priv)) {
+ dev_err(dev, "qman_oos_fq(%u) = %d\n",
+ qman_fq_fqid(fq), error);
+ if (err >= 0)
+ err = error;
+ }
+ }
+
+ qman_destroy_fq(fq, 0);
+ list_del(&dpa_fq->list);
+
+ return err;
+}
+
+int dpa_fq_free(struct device *dev, struct list_head *list)
+{
+ int err, error;
+ struct dpa_fq *dpa_fq, *tmp;
+
+ err = 0;
+ list_for_each_entry_safe(dpa_fq, tmp, list, list) {
+ error = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
+ if (error < 0 && err >= 0)
+ err = error;
+ }
+
+ return err;
+}
+#endif /* __rtems__ */
+
+static void
+dpaa_eth_init_tx_port(struct fman_port *port, struct dpa_fq *errq,
+ struct dpa_fq *defq,
+ struct dpa_buffer_layout_s *buf_layout)
+{
+ struct fman_port_params params;
+ struct fman_buffer_prefix_content buf_prefix_content;
+ int err;
+
+ memset(&params, 0, sizeof(params));
+ memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = buf_layout->parse_results;
+ buf_prefix_content.pass_hash_result = buf_layout->hash_results;
+ buf_prefix_content.pass_time_stamp = buf_layout->time_stamp;
+ buf_prefix_content.data_align = buf_layout->data_align;
+
+ params.specific_params.non_rx_params.err_fqid = errq->fqid;
+ params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
+
+ err = fman_port_config(port, &params);
+ if (err)
+ pr_info("fman_port_config failed\n");
+
+ err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+ if (err)
+ pr_info("fman_port_cfg_buf_prefix_content failed\n");
+
+ err = fman_port_init(port);
+ if (err)
+ pr_err("fm_port_init failed\n");
+}
+
+static void
+dpaa_eth_init_rx_port(struct fman_port *port, struct dpa_bp *bp,
+ size_t count, struct dpa_fq *errq, struct dpa_fq *defq,
+ struct dpa_buffer_layout_s *buf_layout)
+{
+ struct fman_port_params params;
+ struct fman_buffer_prefix_content buf_prefix_content;
+ struct fman_port_rx_params *rx_p;
+ int i, err;
+
+ memset(&params, 0, sizeof(params));
+ memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = buf_layout->parse_results;
+ buf_prefix_content.pass_hash_result = buf_layout->hash_results;
+ buf_prefix_content.pass_time_stamp = buf_layout->time_stamp;
+ buf_prefix_content.data_align = buf_layout->data_align;
+
+ rx_p = &params.specific_params.rx_params;
+ rx_p->err_fqid = errq->fqid;
+ rx_p->dflt_fqid = defq->fqid;
+
+ count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
+ rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
+ for (i = 0; i < count; i++) {
+ rx_p->ext_buf_pools.ext_buf_pool[i].id = bp[i].bpid;
+ rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bp[i].size;
+ }
+
+ err = fman_port_config(port, &params);
+ if (err)
+ pr_info("fman_port_config failed\n");
+
+ err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+ if (err)
+ pr_info("fman_port_cfg_buf_prefix_content failed\n");
+
+ err = fman_port_init(port);
+ if (err)
+ pr_err("fm_port_init failed\n");
+}
+
+void dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpa_bp *bp, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpa_buffer_layout_s *buf_layout,
+ struct device *dev)
+{
+ struct fman_port *rxport = mac_dev->port[RX];
+ struct fman_port *txport = mac_dev->port[TX];
+
+ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+ port_fqs->tx_defq, &buf_layout[TX]);
+ dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
+ port_fqs->rx_defq, &buf_layout[RX]);
+}
+
+void dpa_release_sgt(struct qm_sg_entry *sgt)
+{
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
+ u8 i = 0, j;
+
+ memset(bmb, 0, sizeof(bmb));
+
+ do {
+ dpa_bp = dpa_bpid2pool(sgt[i].bpid);
+ DPA_ERR_ON(!dpa_bp);
+
+ j = 0;
+ do {
+ DPA_ERR_ON(sgt[i].extension);
+
+ bmb[j].hi = sgt[i].addr_hi;
+ bmb[j].lo = be32_to_cpu(sgt[i].addr_lo);
+
+ j++; i++;
+ } while (j < ARRAY_SIZE(bmb) &&
+ !sgt[i - 1].final &&
+ sgt[i - 1].bpid == sgt[i].bpid);
+
+ while (bman_release(dpa_bp->pool, bmb, j, 0))
+ cpu_relax();
+ } while (!sgt[i - 1].final);
+}
+
+void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
+{
+ struct qm_sg_entry *sgt;
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb;
+ dma_addr_t addr;
+ void *vaddr;
+
+ memset(&bmb, 0, sizeof(bmb));
+ bm_buffer_set64(&bmb, fd->addr);
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ DPA_ERR_ON(!dpa_bp);
+
+ if (fd->format == qm_fd_sg) {
+ vaddr = phys_to_virt(fd->addr);
+ sgt = vaddr + dpa_fd_offset(fd);
+
+#ifndef __rtems__
+ dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
+ DMA_BIDIRECTIONAL);
+#endif /* __rtems__ */
+
+ dpa_release_sgt(sgt);
+
+#ifndef __rtems__
+ addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dpa_bp->dev, addr)) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ return;
+ }
+#else /* __rtems__ */
+ addr = (dma_addr_t)vaddr;
+#endif /* __rtems__ */
+ bm_buffer_set64(&bmb, addr);
+ }
+
+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
+ cpu_relax();
+}
+
+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_mr_entry *msg)
+{
+ switch (msg->ern.rc & QM_MR_RC_MASK) {
+ case QM_MR_RC_CGR_TAILDROP:
+ percpu_priv->ern_cnt.cg_tdrop++;
+ break;
+ case QM_MR_RC_WRED:
+ percpu_priv->ern_cnt.wred++;
+ break;
+ case QM_MR_RC_ERROR:
+ percpu_priv->ern_cnt.err_cond++;
+ break;
+ case QM_MR_RC_ORPWINDOW_EARLY:
+ percpu_priv->ern_cnt.early_window++;
+ break;
+ case QM_MR_RC_ORPWINDOW_LATE:
+ percpu_priv->ern_cnt.late_window++;
+ break;
+ case QM_MR_RC_FQ_TAILDROP:
+ percpu_priv->ern_cnt.fq_tdrop++;
+ break;
+ case QM_MR_RC_ORPWINDOW_RETIRED:
+ percpu_priv->ern_cnt.fq_retired++;
+ break;
+ case QM_MR_RC_ORP_ZERO:
+ percpu_priv->ern_cnt.orp_zero++;
+ break;
+ }
+}
+
+#ifndef __rtems__
+/* Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+ struct sk_buff *skb,
+ struct qm_fd *fd,
+ char *parse_results)
+{
+ struct fman_prs_result *parse_result;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h = NULL;
+ u8 l4_proto;
+ u16 ethertype = ntohs(skb->protocol);
+ int retval = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
+ * L4 alone from the FM configuration anyway.
+ */
+
+ /* Fill in some fields of the Parse Results array, so the FMan
+ * can find them as if they came from the FMan Parser.
+ */
+ parse_result = (struct fman_prs_result *)parse_results;
+
+ /* If we're dealing with VLAN, get the real Ethernet type */
+ if (ethertype == ETH_P_8021Q) {
+ /* We can't always assume the MAC header is set correctly
+ * by the stack, so reset to beginning of skb->data
+ */
+ skb_reset_mac_header(skb);
+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ }
+
+ /* Fill in the relevant L3 parse result fields
+ * and read the L4 protocol type
+ */
+ switch (ethertype) {
+ case ETH_P_IP:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
+ iph = ip_hdr(skb);
+ DPA_ERR_ON(!iph);
+ l4_proto = iph->protocol;
+ break;
+ case ETH_P_IPV6:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
+ ipv6h = ipv6_hdr(skb);
+ DPA_ERR_ON(!ipv6h);
+ l4_proto = ipv6h->nexthdr;
+ break;
+ default:
+ /* We shouldn't even be here */
+ if (net_ratelimit())
+ netif_alert(priv, tx_err, priv->net_dev,
+ "Can't compute HW csum for L3 proto 0x%x\n",
+ ntohs(skb->protocol));
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* Fill in the relevant L4 parse result fields */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+ break;
+ case IPPROTO_TCP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+ break;
+ default:
+ /* This can as well be a BUG() */
+ if (net_ratelimit())
+ netif_alert(priv, tx_err, priv->net_dev,
+ "Can't compute HW csum for L4 proto 0x%x\n",
+ l4_proto);
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
+ parse_result->ip_off[0] = (u8)skb_network_offset(skb);
+ parse_result->l4_off = (u8)skb_transport_offset(skb);
+
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+
+ /* On P1023 and similar platforms fd->cmd interpretation could
+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
+ * is not set so we do not need to check; in the future, if/when
+ * using context_a we need to check this bit
+ */
+
+return_error:
+ return retval;
+}
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h
new file mode 100644
index 00000000..954de393
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h
@@ -0,0 +1,113 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_ETH_COMMON_H
+#define __DPAA_ETH_COMMON_H
+
+#include <linux/etherdevice.h>
+#include <soc/fsl/bman.h>
+#include <linux/of_platform.h>
+
+#include "dpaa_eth.h"
+
+#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
+
+/* used in napi related functions */
+extern u16 qman_portal_max;
+
+/* from dpa_ethtool.c */
+extern const struct ethtool_ops dpa_ethtool_ops;
+
+int dpa_netdev_init(struct net_device *net_dev,
+ const u8 *mac_addr,
+ u16 tx_timeout);
+int dpa_start(struct net_device *net_dev);
+int dpa_stop(struct net_device *net_dev);
+void dpa_timeout(struct net_device *net_dev);
+struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats);
+int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
+int dpa_ndo_init(struct net_device *net_dev);
+#ifndef __rtems__
+int dpa_set_features(struct net_device *dev, netdev_features_t features);
+netdev_features_t dpa_fix_features(struct net_device *dev,
+ netdev_features_t features);
+#endif /* __rtems__ */
+int dpa_remove(struct platform_device *pdev);
+struct mac_device *dpa_mac_dev_get(struct platform_device *pdev);
+int dpa_mac_hw_index_get(struct platform_device *pdev);
+int dpa_mac_fman_index_get(struct platform_device *pdev);
+int dpa_set_mac_address(struct net_device *net_dev, void *addr);
+void dpa_set_rx_mode(struct net_device *net_dev);
+void dpa_set_buffers_layout(struct mac_device *mac_dev,
+ struct dpa_buffer_layout_s *layout);
+int dpa_bp_alloc(struct dpa_bp *dpa_bp);
+void dpa_bp_free(struct dpa_priv_s *priv);
+struct dpa_bp *dpa_bpid2pool(int bpid);
+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
+bool dpa_bpid2pool_use(int bpid);
+void dpa_bp_drain(struct dpa_bp *bp);
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
+#endif
+struct dpa_fq *dpa_fq_alloc(struct device *dev,
+ const struct fqid_cell *fqids,
+ struct list_head *list,
+ enum dpa_fq_type fq_type);
+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs,
+ bool tx_conf_fqs_per_core,
+ enum port_type ptype);
+int dpa_get_channel(void);
+void dpa_release_channel(void);
+int dpaa_eth_add_channel(void *__arg);
+int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
+ struct fman_port *tx_port);
+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
+int dpa_fq_free(struct device *dev, struct list_head *list);
+void dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpa_bp *bp, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpa_buffer_layout_s *buf_layout,
+ struct device *dev);
+void dpa_release_sgt(struct qm_sg_entry *sgt);
+void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_mr_entry *msg);
+#ifndef __rtems__
+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+ struct sk_buff *skb,
+ struct qm_fd *fd,
+ char *parse_results);
+#endif /* __rtems__ */
+#endif /* __DPAA_ETH_COMMON_H */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c
new file mode 100644
index 00000000..2d0903e3
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c
@@ -0,0 +1,710 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2012 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/highmem.h>
+#include <soc/fsl/bman.h>
+
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+
+/* Convenience macros for storing/retrieving the skb back-pointers.
+ *
+ * NB: @off is an offset from a (struct sk_buff **) pointer!
+ */
+#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
+ { \
+ skbh = (struct sk_buff **)addr; \
+ *(skbh + (off)) = skb; \
+ }
+#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
+ { \
+ skbh = (struct sk_buff **)addr; \
+ skb = *(skbh + (off)); \
+ }
+
+/* DMA map and add a page frag back into the bpool.
+ * @vaddr fragment must have been allocated with netdev_alloc_frag(),
+ * specifically for fitting into @dpa_bp.
+ */
+static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
+ int *count_ptr)
+{
+ struct bm_buffer bmb;
+ dma_addr_t addr;
+
+ addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ return;
+ }
+
+ bm_buffer_set64(&bmb, addr);
+
+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
+ cpu_relax();
+
+ (*count_ptr)++;
+}
+
+static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
+{
+ struct bm_buffer bmb[8];
+ void *new_buf;
+ dma_addr_t addr;
+ u8 i;
+ struct device *dev = dpa_bp->dev;
+ struct sk_buff *skb, **skbh;
+
+ memset(bmb, 0, sizeof(bmb));
+
+ for (i = 0; i < 8; i++) {
+ /* We'll prepend the skb back-pointer; can't use the DPA
+ * priv space, because FMan will overwrite it (from offset 0)
+ * if it ends up being the second, third, etc. fragment
+ * in a S/G frame.
+ *
+ * We only need enough space to store a pointer, but allocate
+ * an entire cacheline for performance reasons.
+ */
+ new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
+ if (unlikely(!new_buf))
+ goto netdev_alloc_failed;
+ new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
+
+ skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb)) {
+ put_page(virt_to_head_page(new_buf));
+ goto build_skb_failed;
+ }
+ DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
+
+ addr = dma_map_single(dev, new_buf,
+ dpa_bp->size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto dma_map_failed;
+
+ bm_buffer_set64(&bmb[i], addr);
+ }
+
+release_bufs:
+ /* Release the buffers. In case bman is busy, keep trying
+ * until successful. bman_release() is guaranteed to succeed
+ * in a reasonable amount of time
+ */
+ while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
+ cpu_relax();
+ return i;
+
+dma_map_failed:
+ kfree_skb(skb);
+
+build_skb_failed:
+netdev_alloc_failed:
+ net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
+ WARN_ONCE(1, "Memory allocation failure on Rx\n");
+
+ bm_buffer_set64(&bmb[i], 0);
+ /* Avoid releasing a completely null buffer; bman_release() requires
+ * at least one buffer.
+ */
+ if (likely(i))
+ goto release_bufs;
+
+ return 0;
+}
+
+/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
+static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
+{
+ int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
+ *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
+}
+
+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
+{
+ int i;
+
+ /* Give each CPU an allotment of "config_count" buffers */
+ for_each_possible_cpu(i) {
+ int j;
+
+ /* Although we access another CPU's counters here
+ * we do it at boot time so it is safe
+ */
+ for (j = 0; j < dpa_bp->config_count; j += 8)
+ dpa_bp_add_8_bufs(dpa_bp, i);
+ }
+ return 0;
+}
+
+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
+{
+ int count = *countptr;
+ int new_bufs;
+
+ if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
+ do {
+ new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
+ if (unlikely(!new_bufs)) {
+ /* Avoid looping forever if we've temporarily
+ * run out of memory. We'll try again at the
+ * next NAPI cycle.
+ */
+ break;
+ }
+ count += new_bufs;
+ } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
+
+ *countptr = count;
+ if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * either contiguous frames or scatter/gather ones.
+ * Skb freeing is not handled here.
+ *
+ * This function may be called on error paths in the Tx function, so guard
+ * against cases when not all fd relevant fields were filled in.
+ *
+ * Return the skb backpointer, since for S/G frames the buffer containing it
+ * gets freed here.
+ */
+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
+ const struct qm_fd *fd)
+{
+ const struct qm_sg_entry *sgt;
+ int i;
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ dma_addr_t addr = qm_fd_addr(fd);
+ struct sk_buff **skbh;
+ struct sk_buff *skb = NULL;
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ int nr_frags;
+
+
+ /* retrieve skb back pointer */
+ DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
+
+ if (unlikely(fd->format == qm_fd_sg)) {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+
+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
+ * it's from lowmem.
+ */
+ sgt = phys_to_virt(addr + dpa_fd_offset(fd));
+
+ /* sgt[0] is from lowmem, was dma_map_single()-ed */
+ dma_unmap_single(dpa_bp->dev, (dma_addr_t)sgt[0].addr,
+ sgt[0].length, dma_dir);
+
+ /* remaining pages were mapped with dma_map_page() */
+ for (i = 1; i < nr_frags; i++) {
+ DPA_ERR_ON(sgt[i].extension);
+
+ dma_unmap_page(dpa_bp->dev, (dma_addr_t)sgt[i].addr,
+ sgt[i].length, dma_dir);
+ }
+
+ /* Free the page frag that we allocated on Tx */
+ put_page(virt_to_head_page(sgt));
+ } else {
+ dma_unmap_single(dpa_bp->dev, addr,
+ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+ }
+
+ return skb;
+}
+
+/* Build a linear skb around the received buffer.
+ * We are guaranteed there is enough room at the end of the data buffer to
+ * accommodate the shared info area of the skb.
+ */
+static struct sk_buff *contig_fd_to_skb(const struct dpa_priv_s *priv,
+ const struct qm_fd *fd)
+{
+ struct sk_buff *skb = NULL, **skbh;
+ ssize_t fd_off = dpa_fd_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ void *vaddr;
+
+ vaddr = phys_to_virt(addr);
+ DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ /* Retrieve the skb and adjust data and tail pointers, to make sure
+ * forwarded skbs will have enough space on Tx if extra headers
+ * are added.
+ */
+ DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
+
+ DPA_ERR_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, dpa_fd_length(fd));
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return skb;
+}
+
+/* Build an skb with the data of the first S/G entry in the linear portion and
+ * the rest of the frame as skb fragments.
+ *
+ * The page fragment holding the S/G Table is recycled here.
+ */
+static struct sk_buff *sg_fd_to_skb(const struct dpa_priv_s *priv,
+ const struct qm_fd *fd,
+ int *count_ptr)
+{
+ const struct qm_sg_entry *sgt;
+ dma_addr_t addr = qm_fd_addr(fd);
+ ssize_t fd_off = dpa_fd_offset(fd);
+ dma_addr_t sg_addr;
+ void *vaddr, *sg_vaddr;
+ struct dpa_bp *dpa_bp;
+ struct page *page, *head_page;
+ int frag_offset, frag_len;
+ int page_offset;
+ int i;
+ struct sk_buff *skb = NULL, *skb_tmp, **skbh;
+
+ vaddr = phys_to_virt(addr);
+ DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ dpa_bp = priv->dpa_bp;
+ /* Iterate through the SGT entries and add data buffers to the skb */
+ sgt = vaddr + fd_off;
+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
+ /* Extension bit is not supported */
+ DPA_ERR_ON(sgt[i].extension);
+
+ /* We use a single global Rx pool */
+ DPA_ERR_ON(dpa_bp != dpa_bpid2pool(sgt[i].bpid));
+
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_vaddr = phys_to_virt(sg_addr);
+ DPA_ERR_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
+ SMP_CACHE_BYTES));
+
+ dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
+ DMA_BIDIRECTIONAL);
+ if (i == 0) {
+ DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
+ DPA_ERR_ON(skb->head != sg_vaddr);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Make sure forwarded skbs will have enough space
+ * on Tx, if extra headers are added.
+ */
+ DPA_ERR_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, sgt[i].length);
+ } else {
+ /* Not the first S/G entry; all data from buffer will
+ * be added in an skb fragment; fragment index is offset
+ * by one since first S/G entry was incorporated in the
+ * linear part of the skb.
+ *
+ * Caution: 'page' may be a tail page.
+ */
+ DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Free (only) the skbuff shell because its data buffer
+ * is already a frag in the main skb.
+ */
+ get_page(head_page);
+ dev_kfree_skb(skb_tmp);
+
+ /* Compute offset in (possibly tail) page */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+ /* page_offset only refers to the beginning of sgt[i];
+ * but the buffer itself may have an internal offset.
+ */
+ frag_offset = sgt[i].offset + page_offset;
+ frag_len = sgt[i].length;
+ /* skb_add_rx_frag() does no checking on the page; if
+ * we pass it a tail page, we'll end up with
+ * bad page accounting and eventually with segafults.
+ */
+ skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
+ frag_len, dpa_bp->size);
+ }
+ /* Update the pool count for the current {cpu x bpool} */
+ (*count_ptr)--;
+
+ if (sgt[i].final)
+ break;
+ }
+ WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
+
+ /* recycle the SGT fragment */
+ DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
+ dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
+ return skb;
+}
+
+void _dpa_rx(struct net_device *net_dev,
+ struct qman_portal *portal,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid,
+ int *count_ptr)
+{
+ struct dpa_bp *dpa_bp;
+ struct sk_buff *skb;
+ dma_addr_t addr = qm_fd_addr(fd);
+ u32 fd_status = fd->status;
+ unsigned int skb_len;
+ struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
+
+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ fd_status & FM_FD_STAT_RX_ERRORS);
+
+ percpu_stats->rx_errors++;
+ goto _release_frame;
+ }
+
+ dpa_bp = priv->dpa_bp;
+ DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
+
+ /* prefetch the first 64 bytes of the frame or the SGT start */
+ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
+ prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
+
+ /* The only FD types that we may receive are contig and S/G */
+ DPA_ERR_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
+
+ if (likely(fd->format == qm_fd_contig))
+ skb = contig_fd_to_skb(priv, fd);
+ else
+ skb = sg_fd_to_skb(priv, fd, count_ptr);
+
+ /* Account for either the contig buffer or the SGT buffer (depending on
+ * which case we were in) having been removed from the pool.
+ */
+ (*count_ptr)--;
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ /* IP Reassembled frames are allowed to be larger than MTU */
+ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
+ !(fd_status & FM_FD_IPR))) {
+ percpu_stats->rx_dropped++;
+ goto drop_bad_frame;
+ }
+
+ skb_len = skb->len;
+
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ goto packet_dropped;
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += skb_len;
+
+packet_dropped:
+ return;
+
+drop_bad_frame:
+ dev_kfree_skb(skb);
+ return;
+
+_release_frame:
+ dpa_fd_release(net_dev, fd);
+}
+
+static int skb_to_contig_fd(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd,
+ int *count_ptr, int *offset)
+{
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ struct net_device *net_dev = priv->net_dev;
+ int err;
+ enum dma_data_direction dma_dir;
+ unsigned char *buffer_start;
+
+ {
+ /* We are guaranteed to have at least tx_headroom bytes
+ * available, so just use that for offset.
+ */
+ fd->bpid = 0xff;
+ buffer_start = skb->data - priv->tx_headroom;
+ fd->offset = priv->tx_headroom;
+ dma_dir = DMA_TO_DEVICE;
+
+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
+ }
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpa_enable_tx_csum(priv, skb, fd,
+ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+ err);
+ return err;
+ }
+
+ /* Fill in the rest of the FD fields */
+ fd->format = qm_fd_contig;
+ fd->length20 = skb->len;
+ fd->cmd |= FM_FD_CMD_FCO;
+
+ /* Map the entire buffer size that may be seen by FMan, but no more */
+ addr = dma_map_single(dpa_bp->dev, skbh,
+ skb_tail_pointer(skb) - buffer_start, dma_dir);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
+ return -EINVAL;
+ }
+ fd->addr_hi = (u8)upper_32_bits(addr);
+ fd->addr_lo = lower_32_bits(addr);
+
+ return 0;
+}
+
+static int skb_to_sg_fd(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd)
+{
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ dma_addr_t addr;
+ struct sk_buff **skbh;
+ struct net_device *net_dev = priv->net_dev;
+ int err;
+
+ struct qm_sg_entry *sgt;
+ void *sgt_buf;
+ void *buffer_start;
+ skb_frag_t *frag;
+ int i, j;
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ const int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ fd->format = qm_fd_sg;
+
+ /* get a page frag to store the SGTable */
+ sgt_buf = netdev_alloc_frag(priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags));
+ if (unlikely(!sgt_buf)) {
+ netdev_err(net_dev, "netdev_alloc_frag() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpa_enable_tx_csum(priv, skb, fd,
+ sgt_buf + DPA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+ err);
+ goto csum_failed;
+ }
+
+ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+ sgt[0].bpid = 0xff;
+ sgt[0].offset = 0;
+ sgt[0].length = cpu_to_be32(skb_headlen(skb));
+ sgt[0].extension = 0;
+ sgt[0].final = 0;
+ addr = dma_map_single(dpa_bp->dev, skb->data, sgt[0].length, dma_dir);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg0_map_failed;
+ }
+ sgt[0].addr_hi = (u8)upper_32_bits(addr);
+ sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
+
+ /* populate the rest of SGT entries */
+ for (i = 1; i <= nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ sgt[i].bpid = 0xff;
+ sgt[i].offset = 0;
+ sgt[i].length = cpu_to_be32(frag->size);
+ sgt[i].extension = 0;
+ sgt[i].final = 0;
+
+ DPA_ERR_ON(!skb_frag_page(frag));
+ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
+ dma_dir);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg_map_failed;
+ }
+
+ /* keep the offset in the address */
+ sgt[i].addr_hi = (u8)upper_32_bits(addr);
+ sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
+ }
+ sgt[i - 1].final = 1;
+
+ fd->length20 = skb->len;
+ fd->offset = priv->tx_headroom;
+
+ /* DMA map the SGT page */
+ buffer_start = (void *)sgt - priv->tx_headroom;
+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
+
+ addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sgt_map_failed;
+ }
+
+ fd->bpid = 0xff;
+ fd->cmd |= FM_FD_CMD_FCO;
+ fd->addr_hi = (u8)upper_32_bits(addr);
+ fd->addr_lo = lower_32_bits(addr);
+
+ return 0;
+
+sgt_map_failed:
+sg_map_failed:
+ for (j = 0; j < i; j++)
+ dma_unmap_page(dpa_bp->dev, qm_sg_addr(&sgt[j]),
+ cpu_to_be32(sgt[j].length), dma_dir);
+sg0_map_failed:
+csum_failed:
+ put_page(virt_to_head_page(sgt_buf));
+
+ return err;
+}
+
+int dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv;
+ struct qm_fd fd;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct rtnl_link_stats64 *percpu_stats;
+ int err = 0;
+ const int queue_mapping = dpa_get_queue_mapping(skb);
+ bool nonlinear = skb_is_nonlinear(skb);
+ int *countptr, offset = 0;
+
+ priv = netdev_priv(net_dev);
+ /* Non-migratable context, safe to use raw_cpu_ptr */
+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
+ countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
+
+ clear_fd(&fd);
+
+ if (!nonlinear) {
+ /* We're going to store the skb backpointer at the beginning
+ * of the data buffer, so we need a privately owned skb
+ *
+ * We've made sure skb is not shared in dev->priv_flags,
+ * we need to verify the skb head is not cloned
+ */
+ if (skb_cow_head(skb, priv->tx_headroom))
+ goto enomem;
+
+ BUG_ON(skb_is_nonlinear(skb));
+ }
+
+ /* MAX_SKB_FRAGS is equal or larger than our DPA_SGT_MAX_ENTRIES;
+ * make sure we don't feed FMan with more fragments than it supports.
+ * Btw, we're using the first sgt entry to store the linear part of
+ * the skb, so we're one extra frag short.
+ */
+ if (nonlinear &&
+ likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
+ /* Just create a S/G fd based on the skb */
+ err = skb_to_sg_fd(priv, skb, &fd);
+ percpu_priv->tx_frag_skbuffs++;
+ } else {
+ /* If the egress skb contains more fragments than we support
+ * we have no choice but to linearize it ourselves.
+ */
+ if (unlikely(nonlinear) && __skb_linearize(skb))
+ goto enomem;
+
+ /* Finally, create a contig FD from this skb */
+ err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
+ }
+ if (unlikely(err < 0))
+ goto skb_to_fd_failed;
+
+ if (likely(dpa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
+ return NETDEV_TX_OK;
+
+ /* dpa_xmit failed */
+ if (fd.bpid != 0xff) {
+ (*countptr)--;
+ dpa_fd_release(net_dev, &fd);
+ percpu_stats->tx_errors++;
+ return NETDEV_TX_OK;
+ }
+ _dpa_cleanup_tx_fd(priv, &fd);
+skb_to_fd_failed:
+enomem:
+ percpu_stats->tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
new file mode 100644
index 00000000..3edc70c2
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -0,0 +1,171 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+#include <linux/of_net.h>
+#include "dpaa_eth.h"
+#include "mac.h"
+
+static ssize_t dpaa_eth_show_addr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev)
+ return sprintf(buf, "%llx",
+ (unsigned long long)mac_dev->res->start);
+ else
+ return sprintf(buf, "none");
+}
+
+static ssize_t dpaa_eth_show_fqids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t bytes = 0;
+ int i = 0;
+ char *str;
+ struct dpa_fq *fq;
+ struct dpa_fq *tmp;
+ struct dpa_fq *prev = NULL;
+ u32 first_fqid = 0;
+ u32 last_fqid = 0;
+ char *prevstr = NULL;
+
+ list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ str = "Rx default";
+ break;
+ case FQ_TYPE_RX_ERROR:
+ str = "Rx error";
+ break;
+ case FQ_TYPE_TX_CONFIRM:
+ str = "Tx default confirmation";
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ str = "Tx confirmation (mq)";
+ break;
+ case FQ_TYPE_TX_ERROR:
+ str = "Tx error";
+ break;
+ case FQ_TYPE_TX:
+ str = "Tx";
+ break;
+ default:
+ str = "Unknown";
+ }
+
+ if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
+ str != prevstr)) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes,
+ "%s: %d\n", prevstr, prev->fqid);
+ else
+ bytes += sprintf(buf + bytes,
+ "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ if (prev && abs(fq->fqid - prev->fqid) == 1 &&
+ str == prevstr) {
+ last_fqid = fq->fqid;
+ } else {
+ first_fqid = fq->fqid;
+ last_fqid = fq->fqid;
+ }
+
+ prev = fq;
+ prevstr = str;
+ i++;
+ }
+
+ if (prev) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
+ prev->fqid);
+ else
+ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ return bytes;
+}
+
+static ssize_t dpaa_eth_show_bpids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t bytes = 0;
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ int i = 0;
+
+ for (i = 0; i < priv->bp_count; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
+ dpa_bp[i].bpid);
+
+ return bytes;
+}
+
+static struct device_attribute dpaa_eth_attrs[] = {
+ __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
+ __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
+ __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
+};
+
+void dpaa_eth_sysfs_init(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
+ if (device_create_file(dev, &dpaa_eth_attrs[i])) {
+ dev_err(dev, "Error creating sysfs file\n");
+ while (i > 0)
+ device_remove_file(dev, &dpaa_eth_attrs[--i]);
+ return;
+ }
+}
+
+void dpaa_eth_sysfs_remove(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
+ device_remove_file(dev, &dpaa_eth_attrs[i]);
+}
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
new file mode 100644
index 00000000..46eca272
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -0,0 +1,143 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __rtems__
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpaa_eth
+
+#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPAA_ETH_TRACE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "dpaa_eth.h"
+#include <linux/tracepoint.h>
+
+#define fd_format_name(format) { qm_fd_##format, #format }
+#define fd_format_list \
+ fd_format_name(contig), \
+ fd_format_name(sg)
+
+/* This is used to declare a class of events.
+ * individual events of this type will be defined below.
+ */
+
+/* Store details about a frame descriptor and the FQ on which it was
+ * transmitted/received.
+ */
+DECLARE_EVENT_CLASS(dpaa_eth_fd,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, fq, fd),
+
+ /* A structure containing the relevant information we want to record.
+ * Declare name and type for each normal element, name, type and size
+ * for arrays. Use __string for variable length strings.
+ */
+ TP_STRUCT__entry(
+ __field(u32, fqid)
+ __field(u64, fd_addr)
+ __field(u8, fd_format)
+ __field(u16, fd_offset)
+ __field(u32, fd_length)
+ __field(u32, fd_status)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared fields */
+ TP_fast_assign(
+ __entry->fqid = fq->fqid;
+ __entry->fd_addr = qm_fd_addr_get64(fd);
+ __entry->fd_format = fd->format;
+ __entry->fd_offset = dpa_fd_offset(fd);
+ __entry->fd_length = dpa_fd_length(fd);
+ __entry->fd_status = fd->status;
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is triggered */
+ TP_printk("[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u, status=0x%08x",
+ __get_str(name), __entry->fqid, __entry->fd_addr,
+ __print_symbolic(__entry->fd_format, fd_format_list),
+ __entry->fd_offset, __entry->fd_length, __entry->fd_status)
+);
+
+/* Now declare events of the above type. Format is:
+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
+ */
+
+/* Tx (egress) fd */
+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* Rx fd */
+DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* Tx confirmation fd */
+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
+ * The syntax is the same as for DECLARE_EVENT_CLASS().
+ */
+
+#endif /* _DPAA_ETH_TRACE_H */
+
+/* This must be outside ifdef _DPAA_ETH_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpaa_eth_trace
+#include <trace/define_trace.h>
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
new file mode 100644
index 00000000..edf8d665
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -0,0 +1,417 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008-2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/string.h>
+
+#include "dpaa_eth.h"
+#include "mac.h"
+#include "dpaa_eth_common.h"
+
+static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
+ "interrupts",
+ "rx packets",
+ "tx packets",
+ "tx confirm",
+ "tx S/G",
+ "tx error",
+ "rx error",
+ "bp count"
+};
+
+static char dpa_stats_global[][ETH_GSTRING_LEN] = {
+ /* dpa rx errors */
+ "rx dma error",
+ "rx frame physical error",
+ "rx frame size error",
+ "rx header error",
+
+ /* demultiplexing errors */
+ "qman cg_tdrop",
+ "qman wred",
+ "qman error cond",
+ "qman early window",
+ "qman late window",
+ "qman fq tdrop",
+ "qman fq retired",
+ "qman orp disabled",
+
+ /* congestion related stats */
+ "congestion time (ms)",
+ "entered congestion",
+ "congested (0/1)"
+};
+
+#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
+#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
+
+static int dpa_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *et_cmd)
+{
+ int err;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (!priv->mac_dev->phy_dev) {
+ netdev_dbg(net_dev, "phy device not initialized\n");
+ return 0;
+ }
+
+ err = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
+
+ return err;
+}
+
+static int dpa_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *et_cmd)
+{
+ int err;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (!priv->mac_dev->phy_dev) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ err = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
+ if (err < 0)
+ netdev_err(net_dev, "phy_ethtool_sset() = %d\n", err);
+
+ return err;
+}
+
+static void dpa_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ int len;
+
+ strlcpy(drvinfo->driver, KBUILD_MODNAME,
+ sizeof(drvinfo->driver));
+ len = snprintf(drvinfo->version, sizeof(drvinfo->version),
+ "%X", 0);
+ len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%X", 0);
+
+ if (len >= sizeof(drvinfo->fw_version)) {
+ /* Truncated output */
+ netdev_notice(net_dev, "snprintf() = %d\n", len);
+ }
+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static u32 dpa_get_msglevel(struct net_device *net_dev)
+{
+ return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
+}
+
+static void dpa_set_msglevel(struct net_device *net_dev,
+ u32 msg_enable)
+{
+ ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
+}
+
+static int dpa_nway_reset(struct net_device *net_dev)
+{
+ int err;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (!priv->mac_dev->phy_dev) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ err = 0;
+ if (priv->mac_dev->phy_dev->autoneg) {
+ err = phy_start_aneg(priv->mac_dev->phy_dev);
+ if (err < 0)
+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
+ err);
+ }
+
+ return err;
+}
+
+static void dpa_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ struct phy_device *phy_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ phy_dev = mac_dev->phy_dev;
+ if (!phy_dev) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return;
+ }
+
+ epause->autoneg = mac_dev->autoneg_pause;
+ epause->rx_pause = mac_dev->rx_pause_active;
+ epause->tx_pause = mac_dev->tx_pause_active;
+}
+
+static int dpa_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ struct phy_device *phy_dev;
+ int err;
+ u32 newadv, oldadv;
+ bool rx_pause, tx_pause;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ phy_dev = mac_dev->phy_dev;
+ if (!phy_dev) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ if (!(phy_dev->supported & SUPPORTED_Pause) ||
+ (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+ return -EINVAL;
+
+ /* The MAC should know how to handle PAUSE frame autonegotiation before
+ * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
+ * settings.
+ */
+ mac_dev->autoneg_pause = !!epause->autoneg;
+ mac_dev->rx_pause_req = !!epause->rx_pause;
+ mac_dev->tx_pause_req = !!epause->tx_pause;
+
+ /* Determine the sym/asym advertised PAUSE capabilities from the desired
+ * rx/tx pause settings.
+ */
+ newadv = 0;
+ if (epause->rx_pause)
+ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ if (epause->tx_pause)
+ newadv |= ADVERTISED_Asym_Pause;
+
+ oldadv = phy_dev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ /* If there are differences between the old and the new advertised
+ * values, restart PHY autonegotiation and advertise the new values.
+ */
+ if (oldadv != newadv) {
+ phy_dev->advertising &= ~(ADVERTISED_Pause
+ | ADVERTISED_Asym_Pause);
+ phy_dev->advertising |= newadv;
+ if (phy_dev->autoneg) {
+ err = phy_start_aneg(phy_dev);
+ if (err < 0)
+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
+ err);
+ }
+ }
+
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
+
+ return err;
+}
+
+static int dpa_get_sset_count(struct net_device *net_dev, int type)
+{
+ unsigned int total_stats, num_stats;
+
+ num_stats = num_online_cpus() + 1;
+ total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
+
+ switch (type) {
+ case ETH_SS_STATS:
+ return total_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
+ int crr_cpu, u64 bp_count, u64 *data)
+{
+ int num_values = num_cpus + 1;
+ int crr = 0;
+
+ /* update current CPU's stats and also add them to the total values */
+ data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
+ data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
+
+ data[crr * num_values + crr_cpu] = bp_count;
+ data[crr++ * num_values + num_cpus] += bp_count;
+}
+
+static void dpa_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u64 bp_count, cg_time, cg_num, cg_status;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct qm_mcr_querycgr query_cgr;
+ struct dpa_rx_errors rx_errors;
+ struct dpa_ern_cnt ern_cnt;
+ struct dpa_priv_s *priv;
+ unsigned int num_cpus, offset;
+ struct dpa_bp *dpa_bp;
+ int total_stats, i;
+
+ total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
+ priv = netdev_priv(net_dev);
+ dpa_bp = priv->dpa_bp;
+ num_cpus = num_online_cpus();
+ bp_count = 0;
+
+ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
+ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
+ memset(data, 0, total_stats * sizeof(u64));
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ if (dpa_bp->percpu_count)
+ bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
+
+ rx_errors.dme += percpu_priv->rx_errors.dme;
+ rx_errors.fpe += percpu_priv->rx_errors.fpe;
+ rx_errors.fse += percpu_priv->rx_errors.fse;
+ rx_errors.phe += percpu_priv->rx_errors.phe;
+
+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
+ ern_cnt.wred += percpu_priv->ern_cnt.wred;
+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
+
+ copy_stats(percpu_priv, num_cpus, i, bp_count, data);
+ }
+
+ offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
+ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
+
+ offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
+ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
+
+ /* gather congestion related counters */
+ cg_num = 0;
+ cg_status = 0;
+ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
+ if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
+ cg_num = priv->cgr_data.cgr_congested_count;
+ cg_status = query_cgr.cgr.cs;
+
+ /* reset congestion stats (like QMan API does */
+ priv->cgr_data.congested_jiffies = 0;
+ priv->cgr_data.cgr_congested_count = 0;
+ }
+
+ offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
+ data[offset++] = cg_time;
+ data[offset++] = cg_num;
+ data[offset++] = cg_status;
+}
+
+static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
+{
+ unsigned int i, j, num_cpus, size;
+ char string_cpu[ETH_GSTRING_LEN];
+ u8 *strings;
+
+ strings = data;
+ num_cpus = num_online_cpus();
+ size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
+
+ for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
+ dpa_stats_percpu[i], j);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
+ dpa_stats_percpu[i]);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ memcpy(strings, dpa_stats_global, size);
+}
+
+const struct ethtool_ops dpa_ethtool_ops = {
+ .get_settings = dpa_get_settings,
+ .set_settings = dpa_set_settings,
+ .get_drvinfo = dpa_get_drvinfo,
+ .get_msglevel = dpa_get_msglevel,
+ .set_msglevel = dpa_set_msglevel,
+ .nway_reset = dpa_nway_reset,
+ .get_pauseparam = dpa_get_pauseparam,
+ .set_pauseparam = dpa_set_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_sset_count = dpa_get_sset_count,
+ .get_ethtool_stats = dpa_get_ethtool_stats,
+ .get_strings = dpa_get_strings,
+};
diff --git a/linux/drivers/net/ethernet/freescale/fman/crc_mac_addr_ext.h b/linux/drivers/net/ethernet/freescale/fman/crc_mac_addr_ext.h
new file mode 100644
index 00000000..92f2e879
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/crc_mac_addr_ext.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Define a macro that calculate the crc value of an Ethernet MAC address
+ * (48 bitd address)
+ */
+
+#ifndef __crc_mac_addr_ext_h
+#define __crc_mac_addr_ext_h
+
+#include <linux/types.h>
+
+static u32 crc_table[256] = {
+ 0x00000000,
+ 0x77073096,
+ 0xee0e612c,
+ 0x990951ba,
+ 0x076dc419,
+ 0x706af48f,
+ 0xe963a535,
+ 0x9e6495a3,
+ 0x0edb8832,
+ 0x79dcb8a4,
+ 0xe0d5e91e,
+ 0x97d2d988,
+ 0x09b64c2b,
+ 0x7eb17cbd,
+ 0xe7b82d07,
+ 0x90bf1d91,
+ 0x1db71064,
+ 0x6ab020f2,
+ 0xf3b97148,
+ 0x84be41de,
+ 0x1adad47d,
+ 0x6ddde4eb,
+ 0xf4d4b551,
+ 0x83d385c7,
+ 0x136c9856,
+ 0x646ba8c0,
+ 0xfd62f97a,
+ 0x8a65c9ec,
+ 0x14015c4f,
+ 0x63066cd9,
+ 0xfa0f3d63,
+ 0x8d080df5,
+ 0x3b6e20c8,
+ 0x4c69105e,
+ 0xd56041e4,
+ 0xa2677172,
+ 0x3c03e4d1,
+ 0x4b04d447,
+ 0xd20d85fd,
+ 0xa50ab56b,
+ 0x35b5a8fa,
+ 0x42b2986c,
+ 0xdbbbc9d6,
+ 0xacbcf940,
+ 0x32d86ce3,
+ 0x45df5c75,
+ 0xdcd60dcf,
+ 0xabd13d59,
+ 0x26d930ac,
+ 0x51de003a,
+ 0xc8d75180,
+ 0xbfd06116,
+ 0x21b4f4b5,
+ 0x56b3c423,
+ 0xcfba9599,
+ 0xb8bda50f,
+ 0x2802b89e,
+ 0x5f058808,
+ 0xc60cd9b2,
+ 0xb10be924,
+ 0x2f6f7c87,
+ 0x58684c11,
+ 0xc1611dab,
+ 0xb6662d3d,
+ 0x76dc4190,
+ 0x01db7106,
+ 0x98d220bc,
+ 0xefd5102a,
+ 0x71b18589,
+ 0x06b6b51f,
+ 0x9fbfe4a5,
+ 0xe8b8d433,
+ 0x7807c9a2,
+ 0x0f00f934,
+ 0x9609a88e,
+ 0xe10e9818,
+ 0x7f6a0dbb,
+ 0x086d3d2d,
+ 0x91646c97,
+ 0xe6635c01,
+ 0x6b6b51f4,
+ 0x1c6c6162,
+ 0x856530d8,
+ 0xf262004e,
+ 0x6c0695ed,
+ 0x1b01a57b,
+ 0x8208f4c1,
+ 0xf50fc457,
+ 0x65b0d9c6,
+ 0x12b7e950,
+ 0x8bbeb8ea,
+ 0xfcb9887c,
+ 0x62dd1ddf,
+ 0x15da2d49,
+ 0x8cd37cf3,
+ 0xfbd44c65,
+ 0x4db26158,
+ 0x3ab551ce,
+ 0xa3bc0074,
+ 0xd4bb30e2,
+ 0x4adfa541,
+ 0x3dd895d7,
+ 0xa4d1c46d,
+ 0xd3d6f4fb,
+ 0x4369e96a,
+ 0x346ed9fc,
+ 0xad678846,
+ 0xda60b8d0,
+ 0x44042d73,
+ 0x33031de5,
+ 0xaa0a4c5f,
+ 0xdd0d7cc9,
+ 0x5005713c,
+ 0x270241aa,
+ 0xbe0b1010,
+ 0xc90c2086,
+ 0x5768b525,
+ 0x206f85b3,
+ 0xb966d409,
+ 0xce61e49f,
+ 0x5edef90e,
+ 0x29d9c998,
+ 0xb0d09822,
+ 0xc7d7a8b4,
+ 0x59b33d17,
+ 0x2eb40d81,
+ 0xb7bd5c3b,
+ 0xc0ba6cad,
+ 0xedb88320,
+ 0x9abfb3b6,
+ 0x03b6e20c,
+ 0x74b1d29a,
+ 0xead54739,
+ 0x9dd277af,
+ 0x04db2615,
+ 0x73dc1683,
+ 0xe3630b12,
+ 0x94643b84,
+ 0x0d6d6a3e,
+ 0x7a6a5aa8,
+ 0xe40ecf0b,
+ 0x9309ff9d,
+ 0x0a00ae27,
+ 0x7d079eb1,
+ 0xf00f9344,
+ 0x8708a3d2,
+ 0x1e01f268,
+ 0x6906c2fe,
+ 0xf762575d,
+ 0x806567cb,
+ 0x196c3671,
+ 0x6e6b06e7,
+ 0xfed41b76,
+ 0x89d32be0,
+ 0x10da7a5a,
+ 0x67dd4acc,
+ 0xf9b9df6f,
+ 0x8ebeeff9,
+ 0x17b7be43,
+ 0x60b08ed5,
+ 0xd6d6a3e8,
+ 0xa1d1937e,
+ 0x38d8c2c4,
+ 0x4fdff252,
+ 0xd1bb67f1,
+ 0xa6bc5767,
+ 0x3fb506dd,
+ 0x48b2364b,
+ 0xd80d2bda,
+ 0xaf0a1b4c,
+ 0x36034af6,
+ 0x41047a60,
+ 0xdf60efc3,
+ 0xa867df55,
+ 0x316e8eef,
+ 0x4669be79,
+ 0xcb61b38c,
+ 0xbc66831a,
+ 0x256fd2a0,
+ 0x5268e236,
+ 0xcc0c7795,
+ 0xbb0b4703,
+ 0x220216b9,
+ 0x5505262f,
+ 0xc5ba3bbe,
+ 0xb2bd0b28,
+ 0x2bb45a92,
+ 0x5cb36a04,
+ 0xc2d7ffa7,
+ 0xb5d0cf31,
+ 0x2cd99e8b,
+ 0x5bdeae1d,
+ 0x9b64c2b0,
+ 0xec63f226,
+ 0x756aa39c,
+ 0x026d930a,
+ 0x9c0906a9,
+ 0xeb0e363f,
+ 0x72076785,
+ 0x05005713,
+ 0x95bf4a82,
+ 0xe2b87a14,
+ 0x7bb12bae,
+ 0x0cb61b38,
+ 0x92d28e9b,
+ 0xe5d5be0d,
+ 0x7cdcefb7,
+ 0x0bdbdf21,
+ 0x86d3d2d4,
+ 0xf1d4e242,
+ 0x68ddb3f8,
+ 0x1fda836e,
+ 0x81be16cd,
+ 0xf6b9265b,
+ 0x6fb077e1,
+ 0x18b74777,
+ 0x88085ae6,
+ 0xff0f6a70,
+ 0x66063bca,
+ 0x11010b5c,
+ 0x8f659eff,
+ 0xf862ae69,
+ 0x616bffd3,
+ 0x166ccf45,
+ 0xa00ae278,
+ 0xd70dd2ee,
+ 0x4e048354,
+ 0x3903b3c2,
+ 0xa7672661,
+ 0xd06016f7,
+ 0x4969474d,
+ 0x3e6e77db,
+ 0xaed16a4a,
+ 0xd9d65adc,
+ 0x40df0b66,
+ 0x37d83bf0,
+ 0xa9bcae53,
+ 0xdebb9ec5,
+ 0x47b2cf7f,
+ 0x30b5ffe9,
+ 0xbdbdf21c,
+ 0xcabac28a,
+ 0x53b39330,
+ 0x24b4a3a6,
+ 0xbad03605,
+ 0xcdd70693,
+ 0x54de5729,
+ 0x23d967bf,
+ 0xb3667a2e,
+ 0xc4614ab8,
+ 0x5d681b02,
+ 0x2a6f2b94,
+ 0xb40bbe37,
+ 0xc30c8ea1,
+ 0x5a05df1b,
+ 0x2d02ef8d
+};
+
+/* CRC calculation */
+#define GET_MAC_ADDR_CRC(addr, crc) \
+{ \
+ u32 i; \
+ u8 data; \
+ crc = 0xffffffff; \
+ for (i = 0; i < 6; i++) { \
+ data = (u8)(addr >> ((5 - i) * 8)); \
+ crc = crc ^ data; \
+ crc = crc_table[crc & 0xff] ^ (crc >> 8); \
+ } \
+} \
+
+#endif /* __crc_mac_addr_ext_h */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman.c b/linux/drivers/net/ethernet/freescale/fman/fman.c
new file mode 100644
index 00000000..5119b400
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman.c
@@ -0,0 +1,2957 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+// *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman.h"
+#include "fman_muram.h"
+#include <asm/mpc85xx.h>
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#ifdef __rtems__
+#include <bsp/fdt.h>
+#include <bsp/qoriq.h>
+#endif /* __rtems__ */
+
+
+/* General defines */
+#define FMAN_LIODN_TBL 64 /* size of LIODN table */
+#define MAX_NUM_OF_MACS 10
+#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
+#define BASE_RX_PORTID 0x08
+#define BASE_TX_PORTID 0x28
+
+/* Modules registers offsets */
+#define BMI_OFFSET 0x00080000
+#define QMI_OFFSET 0x00080400
+#define DMA_OFFSET 0x000C2000
+#define FPM_OFFSET 0x000C3000
+#define IMEM_OFFSET 0x000C4000
+#define CGP_OFFSET 0x000DB000
+
+/* Exceptions bit map */
+#define EX_DMA_BUS_ERROR 0x80000000
+#define EX_DMA_READ_ECC 0x40000000
+#define EX_DMA_SYSTEM_WRITE_ECC 0x20000000
+#define EX_DMA_FM_WRITE_ECC 0x10000000
+#define EX_FPM_STALL_ON_TASKS 0x08000000
+#define EX_FPM_SINGLE_ECC 0x04000000
+#define EX_FPM_DOUBLE_ECC 0x02000000
+#define EX_QMI_SINGLE_ECC 0x01000000
+#define EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000
+#define EX_QMI_DOUBLE_ECC 0x00400000
+#define EX_BMI_LIST_RAM_ECC 0x00200000
+#define EX_BMI_STORAGE_PROFILE_ECC 0x00100000
+#define EX_BMI_STATISTICS_RAM_ECC 0x00080000
+#define EX_IRAM_ECC 0x00040000
+#define EX_MURAM_ECC 0x00020000
+#define EX_BMI_DISPATCH_RAM_ECC 0x00010000
+#define EX_DMA_SINGLE_PORT_ECC 0x00008000
+
+#define DFLT_EXCEPTIONS \
+ ((EX_DMA_BUS_ERROR) | \
+ (EX_DMA_READ_ECC) | \
+ (EX_DMA_SYSTEM_WRITE_ECC) | \
+ (EX_DMA_FM_WRITE_ECC) | \
+ (EX_FPM_STALL_ON_TASKS) | \
+ (EX_FPM_SINGLE_ECC) | \
+ (EX_FPM_DOUBLE_ECC) | \
+ (EX_QMI_DEQ_FROM_UNKNOWN_PORTID) | \
+ (EX_BMI_LIST_RAM_ECC) | \
+ (EX_BMI_STORAGE_PROFILE_ECC) | \
+ (EX_BMI_STATISTICS_RAM_ECC) | \
+ (EX_MURAM_ECC) | \
+ (EX_BMI_DISPATCH_RAM_ECC) | \
+ (EX_QMI_DOUBLE_ECC) | \
+ (EX_QMI_SINGLE_ECC))
+
+/* DMA defines */
+/* masks */
+#define DMA_MODE_AID_OR 0x20000000
+#define DMA_MODE_SBER 0x10000000
+#define DMA_MODE_BER 0x00200000
+#define DMA_MODE_ECC 0x00000020
+#define DMA_MODE_SECURE_PROT 0x00000800
+#define DMA_MODE_EMER_READ 0x00080000
+#define DMA_MODE_AXI_DBG_MASK 0x0F000000
+
+#define DMA_TRANSFER_PORTID_MASK 0xFF000000
+#define DMA_TRANSFER_TNUM_MASK 0x00FF0000
+#define DMA_TRANSFER_LIODN_MASK 0x00000FFF
+
+#define DMA_STATUS_BUS_ERR 0x08000000
+#define DMA_STATUS_READ_ECC 0x04000000
+#define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000
+#define DMA_STATUS_FM_WRITE_ECC 0x01000000
+#define DMA_STATUS_FM_SPDAT_ECC 0x00080000
+
+#define DMA_MODE_CACHE_OR_SHIFT 30
+#define DMA_MODE_AXI_DBG_SHIFT 24
+#define DMA_MODE_CEN_SHIFT 13
+#define DMA_MODE_CEN_MASK 0x00000007
+#define DMA_MODE_DBG_SHIFT 7
+#define DMA_MODE_EMER_LVL_SHIFT 6
+#define DMA_MODE_AID_MODE_SHIFT 4
+
+#define DMA_THRESH_COMMQ_SHIFT 24
+#define DMA_THRESH_READ_INT_BUF_SHIFT 16
+#define DMA_THRESH_READ_INT_BUF_MASK 0x0000003f
+#define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000003f
+
+#define DMA_TRANSFER_PORTID_SHIFT 24
+#define DMA_TRANSFER_TNUM_SHIFT 16
+
+#define DMA_CAM_SIZEOF_ENTRY 0x40
+#define DMA_CAM_UNITS 8
+
+#define DMA_LIODN_SHIFT 16
+#define DMA_LIODN_BASE_MASK 0x00000FFF
+
+/* FPM defines */
+#define FPM_EV_MASK_DOUBLE_ECC 0x80000000
+#define FPM_EV_MASK_STALL 0x40000000
+#define FPM_EV_MASK_SINGLE_ECC 0x20000000
+#define FPM_EV_MASK_RELEASE_FM 0x00010000
+#define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000
+#define FPM_EV_MASK_STALL_EN 0x00004000
+#define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000
+#define FPM_EV_MASK_EXTERNAL_HALT 0x00000008
+#define FPM_EV_MASK_ECC_ERR_HALT 0x00000004
+
+#define FPM_RAM_MURAM_ECC 0x00008000
+#define FPM_RAM_IRAM_ECC 0x00004000
+#define FPM_RAM_MURAM_TEST_ECC 0x20000000
+#define FPM_RAM_IRAM_TEST_ECC 0x10000000
+#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000
+#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000
+#define FPM_RAM_IRAM_ECC_EN 0x40000000
+#define FPM_RAM_RAMS_ECC_EN 0x80000000
+#define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000
+
+#define FPM_REV1_MAJOR_MASK 0x0000FF00
+#define FPM_REV1_MINOR_MASK 0x000000FF
+
+#define FPM_DISP_LIMIT_SHIFT 24
+
+#define FPM_PRT_FM_CTL1 0x00000001
+#define FPM_PRT_FM_CTL2 0x00000002
+#define FPM_PORT_FM_CTL_PORTID_SHIFT 24
+#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16
+
+#define FPM_THR1_PRS_SHIFT 24
+#define FPM_THR1_KG_SHIFT 16
+#define FPM_THR1_PLCR_SHIFT 8
+#define FPM_THR1_BMI_SHIFT 0
+
+#define FPM_THR2_QMI_ENQ_SHIFT 24
+#define FPM_THR2_QMI_DEQ_SHIFT 0
+#define FPM_THR2_FM_CTL1_SHIFT 16
+#define FPM_THR2_FM_CTL2_SHIFT 8
+
+#define FPM_EV_MASK_CAT_ERR_SHIFT 1
+#define FPM_EV_MASK_DMA_ERR_SHIFT 0
+
+#define FPM_REV1_MAJOR_SHIFT 8
+
+#define FPM_RSTC_FM_RESET 0x80000000
+#define FPM_RSTC_MAC0_RESET 0x40000000
+#define FPM_RSTC_MAC1_RESET 0x20000000
+#define FPM_RSTC_MAC2_RESET 0x10000000
+#define FPM_RSTC_MAC3_RESET 0x08000000
+#define FPM_RSTC_MAC8_RESET 0x04000000
+#define FPM_RSTC_MAC4_RESET 0x02000000
+#define FPM_RSTC_MAC5_RESET 0x01000000
+#define FPM_RSTC_MAC6_RESET 0x00800000
+#define FPM_RSTC_MAC7_RESET 0x00400000
+#define FPM_RSTC_MAC9_RESET 0x00200000
+
+#define FPM_TS_INT_SHIFT 16
+#define FPM_TS_CTL_EN 0x80000000
+
+/* BMI defines */
+#define BMI_INIT_START 0x80000000
+#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000
+#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000
+#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000
+#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000
+#define BMI_NUM_OF_TASKS_MASK 0x3F000000
+#define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000
+#define BMI_NUM_OF_DMAS_MASK 0x00000F00
+#define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F
+#define BMI_FIFO_SIZE_MASK 0x000003FF
+#define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000
+#define BMI_CFG2_DMAS_MASK 0x0000003F
+#define BMI_CFG2_TASKS_MASK 0x0000003F
+
+#define BMI_CFG2_TASKS_SHIFT 16
+#define BMI_CFG2_DMAS_SHIFT 0
+#define BMI_CFG1_FIFO_SIZE_SHIFT 16
+#define BMI_NUM_OF_TASKS_SHIFT 24
+#define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16
+#define BMI_NUM_OF_DMAS_SHIFT 8
+#define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0
+
+#define BMI_FIFO_ALIGN 0x100
+
+#define BMI_EXTRA_FIFO_SIZE_SHIFT 16
+
+/* QMI defines */
+#define QMI_CFG_ENQ_EN 0x80000000
+#define QMI_CFG_DEQ_EN 0x40000000
+#define QMI_CFG_EN_COUNTERS 0x10000000
+#define QMI_CFG_DEQ_MASK 0x0000003F
+#define QMI_CFG_ENQ_MASK 0x00003F00
+#define QMI_CFG_ENQ_SHIFT 8
+
+#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000
+#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
+#define QMI_INTR_EN_SINGLE_ECC 0x80000000
+
+#define QMI_TAPC_TAP 22
+
+#define QMI_GS_HALT_NOT_BUSY 0x00000002
+
+/* IRAM defines */
+#define IRAM_IADD_AIE 0x80000000
+#define IRAM_READY 0x80000000
+
+/* Default values */
+#define DEFAULT_CATASTROPHIC_ERR 0
+#define DEFAULT_DMA_ERR 0
+#define DEFAULT_AID_MODE FMAN_DMA_AID_OUT_TNUM
+#define DEFAULT_DMA_COMM_Q_LOW 0x2A
+#define DEFAULT_DMA_COMM_Q_HIGH 0x3F
+#define DEFAULT_CACHE_OVERRIDE 0
+#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64
+#define DEFAULT_DMA_DBG_CNT_MODE 0
+#define DEFAULT_DMA_SOS_EMERGENCY 0
+#define DEFAULT_DMA_WATCHDOG 0
+#define DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER 0
+#define DEFAULT_DISP_LIMIT 0
+#define DEFAULT_PRS_DISP_TH 16
+#define DEFAULT_PLCR_DISP_TH 16
+#define DEFAULT_KG_DISP_TH 16
+#define DEFAULT_BMI_DISP_TH 16
+#define DEFAULT_QMI_ENQ_DISP_TH 16
+#define DEFAULT_QMI_DEQ_DISP_TH 16
+#define DEFAULT_FM_CTL1_DISP_TH 16
+#define DEFAULT_FM_CTL2_DISP_TH 16
+
+#define DFLT_AXI_DBG_NUM_OF_BEATS 1
+
+#define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf) \
+ ((dma_thresh_max_buf + 1) / 2)
+#define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf) \
+ ((dma_thresh_max_buf + 1) * 3 / 4)
+#define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf) \
+ ((dma_thresh_max_buf + 1) / 2)
+#define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\
+ ((dma_thresh_max_buf + 1) * 3 / 4)
+
+#define DMA_COMM_Q_LOW_FMAN_V3 0x2A
+#define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq) \
+ ((dma_thresh_max_commq + 1) / 2)
+#define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq) \
+ ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 : \
+ DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq))
+
+#define DMA_COMM_Q_HIGH_FMAN_V3 0x3f
+#define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq) \
+ ((dma_thresh_max_commq + 1) * 3 / 4)
+#define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq) \
+ ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 : \
+ DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq))
+
+#define TOTAL_NUM_OF_TASKS_FMAN_V3L 59
+#define TOTAL_NUM_OF_TASKS_FMAN_V3H 124
+#define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks) \
+ ((major == 6) ? ((minor == 1 || minor == 4) ? \
+ TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) : \
+ bmi_max_num_of_tasks)
+
+#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 64
+#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2 32
+#define DFLT_DMA_CAM_NUM_OF_ENTRIES(major) \
+ (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 : \
+ DMA_CAM_NUM_OF_ENTRIES_FMAN_V2)
+
+#define FM_TIMESTAMP_1_USEC_BIT 8
+
+/* Defines used for enabling/disabling FMan interrupts */
+#define ERR_INTR_EN_DMA 0x00010000
+#define ERR_INTR_EN_FPM 0x80000000
+#define ERR_INTR_EN_BMI 0x00800000
+#define ERR_INTR_EN_QMI 0x00400000
+#define ERR_INTR_EN_MURAM 0x00040000
+#define ERR_INTR_EN_MAC0 0x00004000
+#define ERR_INTR_EN_MAC1 0x00002000
+#define ERR_INTR_EN_MAC2 0x00001000
+#define ERR_INTR_EN_MAC3 0x00000800
+#define ERR_INTR_EN_MAC4 0x00000400
+#define ERR_INTR_EN_MAC5 0x00000200
+#define ERR_INTR_EN_MAC6 0x00000100
+#define ERR_INTR_EN_MAC7 0x00000080
+#define ERR_INTR_EN_MAC8 0x00008000
+#define ERR_INTR_EN_MAC9 0x00000040
+
+#define INTR_EN_QMI 0x40000000
+#define INTR_EN_MAC0 0x00080000
+#define INTR_EN_MAC1 0x00040000
+#define INTR_EN_MAC2 0x00020000
+#define INTR_EN_MAC3 0x00010000
+#define INTR_EN_MAC4 0x00000040
+#define INTR_EN_MAC5 0x00000020
+#define INTR_EN_MAC6 0x00000008
+#define INTR_EN_MAC7 0x00000002
+#define INTR_EN_MAC8 0x00200000
+#define INTR_EN_MAC9 0x00100000
+#define INTR_EN_REV0 0x00008000
+#define INTR_EN_REV1 0x00004000
+#define INTR_EN_REV2 0x00002000
+#define INTR_EN_REV3 0x00001000
+#define INTR_EN_TMR 0x01000000
+
+enum fman_dma_aid_mode {
+ FMAN_DMA_AID_OUT_PORT_ID = 0, /* 4 LSB of PORT_ID */
+ FMAN_DMA_AID_OUT_TNUM /* 4 LSB of TNUM */
+};
+
+struct fman_iram_regs {
+ u32 iadd; /* FM IRAM instruction address register */
+ u32 idata; /* FM IRAM instruction data register */
+ u32 itcfg; /* FM IRAM timing config register */
+ u32 iready; /* FM IRAM ready register */
+};
+
+struct fman_fpm_regs {
+ u32 fmfp_tnc; /* FPM TNUM Control 0x00 */
+ u32 fmfp_prc; /* FPM Port_ID FmCtl Association 0x04 */
+ u32 fmfp_brkc; /* FPM Breakpoint Control 0x08 */
+ u32 fmfp_mxd; /* FPM Flush Control 0x0c */
+ u32 fmfp_dist1; /* FPM Dispatch Thresholds1 0x10 */
+ u32 fmfp_dist2; /* FPM Dispatch Thresholds2 0x14 */
+ u32 fm_epi; /* FM Error Pending Interrupts 0x18 */
+ u32 fm_rie; /* FM Error Interrupt Enable 0x1c */
+ u32 fmfp_fcev[4]; /* FPM FMan-Controller Event 1-4 0x20-0x2f */
+ u32 res0030[4]; /* res 0x30 - 0x3f */
+ u32 fmfp_cee[4]; /* PM FMan-Controller Event 1-4 0x40-0x4f */
+ u32 res0050[4]; /* res 0x50-0x5f */
+ u32 fmfp_tsc1; /* FPM TimeStamp Control1 0x60 */
+ u32 fmfp_tsc2; /* FPM TimeStamp Control2 0x64 */
+ u32 fmfp_tsp; /* FPM Time Stamp 0x68 */
+ u32 fmfp_tsf; /* FPM Time Stamp Fraction 0x6c */
+ u32 fm_rcr; /* FM Rams Control 0x70 */
+ u32 fmfp_extc; /* FPM External Requests Control 0x74 */
+ u32 fmfp_ext1; /* FPM External Requests Config1 0x78 */
+ u32 fmfp_ext2; /* FPM External Requests Config2 0x7c */
+ u32 fmfp_drd[16]; /* FPM Data_Ram Data 0-15 0x80 - 0xbf */
+ u32 fmfp_dra; /* FPM Data Ram Access 0xc0 */
+ u32 fm_ip_rev_1; /* FM IP Block Revision 1 0xc4 */
+ u32 fm_ip_rev_2; /* FM IP Block Revision 2 0xc8 */
+ u32 fm_rstc; /* FM Reset Command 0xcc */
+ u32 fm_cld; /* FM Classifier Debug 0xd0 */
+ u32 fm_npi; /* FM Normal Pending Interrupts 0xd4 */
+ u32 fmfp_exte; /* FPM External Requests Enable 0xd8 */
+ u32 fmfp_ee; /* FPM Event&Mask 0xdc */
+ u32 fmfp_cev[4]; /* FPM CPU Event 1-4 0xe0-0xef */
+ u32 res00f0[4]; /* res 0xf0-0xff */
+ u32 fmfp_ps[50]; /* FPM Port Status 0x100-0x1c7 */
+ u32 res01c8[14]; /* res 0x1c8-0x1ff */
+ u32 fmfp_clfabc; /* FPM CLFABC 0x200 */
+ u32 fmfp_clfcc; /* FPM CLFCC 0x204 */
+ u32 fmfp_clfaval; /* FPM CLFAVAL 0x208 */
+ u32 fmfp_clfbval; /* FPM CLFBVAL 0x20c */
+ u32 fmfp_clfcval; /* FPM CLFCVAL 0x210 */
+ u32 fmfp_clfamsk; /* FPM CLFAMSK 0x214 */
+ u32 fmfp_clfbmsk; /* FPM CLFBMSK 0x218 */
+ u32 fmfp_clfcmsk; /* FPM CLFCMSK 0x21c */
+ u32 fmfp_clfamc; /* FPM CLFAMC 0x220 */
+ u32 fmfp_clfbmc; /* FPM CLFBMC 0x224 */
+ u32 fmfp_clfcmc; /* FPM CLFCMC 0x228 */
+ u32 fmfp_decceh; /* FPM DECCEH 0x22c */
+ u32 res0230[116]; /* res 0x230 - 0x3ff */
+ u32 fmfp_ts[128]; /* 0x400: FPM Task Status 0x400 - 0x5ff */
+ u32 res0600[0x400 - 384];
+};
+
+struct fman_bmi_regs {
+ u32 fmbm_init; /* BMI Initialization 0x00 */
+ u32 fmbm_cfg1; /* BMI Configuration 1 0x04 */
+ u32 fmbm_cfg2; /* BMI Configuration 2 0x08 */
+ u32 res000c[5]; /* 0x0c - 0x1f */
+ u32 fmbm_ievr; /* Interrupt Event Register 0x20 */
+ u32 fmbm_ier; /* Interrupt Enable Register 0x24 */
+ u32 fmbm_ifr; /* Interrupt Force Register 0x28 */
+ u32 res002c[5]; /* 0x2c - 0x3f */
+ u32 fmbm_arb[8]; /* BMI Arbitration 0x40 - 0x5f */
+ u32 res0060[12]; /* 0x60 - 0x8f */
+ u32 fmbm_dtc[3]; /* Debug Trap Counter 0x90 - 0x9b */
+ u32 res009c; /* 0x9c */
+ u32 fmbm_dcv[3][4]; /* Debug Compare val 0xa0-0xcf */
+ u32 fmbm_dcm[3][4]; /* Debug Compare Mask 0xd0-0xff */
+ u32 fmbm_gde; /* BMI Global Debug Enable 0x100 */
+ u32 fmbm_pp[63]; /* BMI Port Parameters 0x104 - 0x1ff */
+ u32 res0200; /* 0x200 */
+ u32 fmbm_pfs[63]; /* BMI Port FIFO Size 0x204 - 0x2ff */
+ u32 res0300; /* 0x300 */
+ u32 fmbm_spliodn[63]; /* Port Partition ID 0x304 - 0x3ff */
+};
+
+struct fman_qmi_regs {
+ u32 fmqm_gc; /* General Configuration Register 0x00 */
+ u32 res0004; /* 0x04 */
+ u32 fmqm_eie; /* Error Interrupt Event Register 0x08 */
+ u32 fmqm_eien; /* Error Interrupt Enable Register 0x0c */
+ u32 fmqm_eif; /* Error Interrupt Force Register 0x10 */
+ u32 fmqm_ie; /* Interrupt Event Register 0x14 */
+ u32 fmqm_ien; /* Interrupt Enable Register 0x18 */
+ u32 fmqm_if; /* Interrupt Force Register 0x1c */
+ u32 fmqm_gs; /* Global Status Register 0x20 */
+ u32 fmqm_ts; /* Task Status Register 0x24 */
+ u32 fmqm_etfc; /* Enqueue Total Frame Counter 0x28 */
+ u32 fmqm_dtfc; /* Dequeue Total Frame Counter 0x2c */
+ u32 fmqm_dc0; /* Dequeue Counter 0 0x30 */
+ u32 fmqm_dc1; /* Dequeue Counter 1 0x34 */
+ u32 fmqm_dc2; /* Dequeue Counter 2 0x38 */
+ u32 fmqm_dc3; /* Dequeue Counter 3 0x3c */
+ u32 fmqm_dfdc; /* Dequeue FQID from Default Counter 0x40 */
+ u32 fmqm_dfcc; /* Dequeue FQID from Context Counter 0x44 */
+ u32 fmqm_dffc; /* Dequeue FQID from FD Counter 0x48 */
+ u32 fmqm_dcc; /* Dequeue Confirm Counter 0x4c */
+ u32 res0050[7]; /* 0x50 - 0x6b */
+ u32 fmqm_tapc; /* Tnum Aging Period Control 0x6c */
+ u32 fmqm_dmcvc; /* Dequeue MAC Command Valid Counter 0x70 */
+ u32 fmqm_difdcc; /* Dequeue Invalid FD Command Counter 0x74 */
+ u32 fmqm_da1v; /* Dequeue A1 Valid Counter 0x78 */
+ u32 res007c; /* 0x7c */
+ u32 fmqm_dtc; /* 0x80 Debug Trap Counter 0x80 */
+ u32 fmqm_efddd; /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */
+ u32 res0088[2]; /* 0x88 - 0x8f */
+ struct {
+ u32 fmqm_dtcfg1; /* 0x90 dbg trap cfg 1 Register 0x00 */
+ u32 fmqm_dtval1; /* Debug Trap Value 1 Register 0x04 */
+ u32 fmqm_dtm1; /* Debug Trap Mask 1 Register 0x08 */
+ u32 fmqm_dtc1; /* Debug Trap Counter 1 Register 0x0c */
+ u32 fmqm_dtcfg2; /* dbg Trap cfg 2 Register 0x10 */
+ u32 fmqm_dtval2; /* Debug Trap Value 2 Register 0x14 */
+ u32 fmqm_dtm2; /* Debug Trap Mask 2 Register 0x18 */
+ u32 res001c; /* 0x1c */
+ } dbg_traps[3]; /* 0x90 - 0xef */
+ u8 res00f0[0x400 - 0xf0]; /* 0xf0 - 0x3ff */
+};
+
+struct fman_dma_regs {
+ u32 fmdmsr; /* FM DMA status register 0x00 */
+ u32 fmdmmr; /* FM DMA mode register 0x04 */
+ u32 fmdmtr; /* FM DMA bus threshold register 0x08 */
+ u32 fmdmhy; /* FM DMA bus hysteresis register 0x0c */
+ u32 fmdmsetr; /* FM DMA SOS emergency Threshold Register 0x10 */
+ u32 fmdmtah; /* FM DMA transfer bus address high reg 0x14 */
+ u32 fmdmtal; /* FM DMA transfer bus address low reg 0x18 */
+ u32 fmdmtcid; /* FM DMA transfer bus communication ID reg 0x1c */
+ u32 fmdmra; /* FM DMA bus internal ram address register 0x20 */
+ u32 fmdmrd; /* FM DMA bus internal ram data register 0x24 */
+ u32 fmdmwcr; /* FM DMA CAM watchdog counter value 0x28 */
+ u32 fmdmebcr; /* FM DMA CAM base in MURAM register 0x2c */
+ u32 fmdmccqdr; /* FM DMA CAM and CMD Queue Debug reg 0x30 */
+ u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */
+ u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */
+ u32 fmdmcqvr3; /* FM DMA CMD Queue Value register #3 0x3c */
+ u32 fmdmcqvr4; /* FM DMA CMD Queue Value register #4 0x40 */
+ u32 fmdmcqvr5; /* FM DMA CMD Queue Value register #5 0x44 */
+ u32 fmdmsefrc; /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */
+ u32 fmdmsqfrc; /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */
+ u32 fmdmssrc; /* FM DMA Semaphore SYNC Reject Counter 0x50 */
+ u32 fmdmdcr; /* FM DMA Debug Counter 0x54 */
+ u32 fmdmemsr; /* FM DMA Emergency Smoother Register 0x58 */
+ u32 res005c; /* 0x5c */
+ u32 fmdmplr[FMAN_LIODN_TBL / 2]; /* DMA LIODN regs 0x60-0xdf */
+ u32 res00e0[0x400 - 56];
+};
+
+struct fman_rg {
+ struct fman_fpm_regs __iomem *fpm_rg;
+ struct fman_dma_regs __iomem *dma_rg;
+ struct fman_bmi_regs __iomem *bmi_rg;
+ struct fman_qmi_regs __iomem *qmi_rg;
+};
+
+struct fman_state_struct {
+ u8 fm_id;
+ u16 fm_clk_freq;
+ struct fman_rev_info rev_info;
+ bool enabled_time_stamp;
+ u8 count1_micro_bit;
+ u8 total_num_of_tasks;
+ u8 accumulated_num_of_tasks;
+ u32 accumulated_fifo_size;
+ u8 accumulated_num_of_open_dmas;
+ u8 accumulated_num_of_deq_tnums;
+ bool low_end_restriction;
+ u32 exceptions;
+ u32 extra_fifo_pool_size;
+ u8 extra_tasks_pool_size;
+ u8 extra_open_dmas_pool_size;
+ u16 port_mfl[MAX_NUM_OF_MACS];
+ u16 mac_mfl[MAX_NUM_OF_MACS];
+
+ /* SOC specific */
+ u32 fm_iram_size;
+ /* DMA */
+ u32 dma_thresh_max_commq;
+ u32 dma_thresh_max_buf;
+ u32 max_num_of_open_dmas;
+ /* QMI */
+ u32 qmi_max_num_of_tnums;
+ u32 qmi_def_tnums_thresh;
+ /* BMI */
+ u32 bmi_max_num_of_tasks;
+ u32 bmi_max_fifo_size;
+ /* General */
+ u32 fm_port_num_of_cg;
+ u32 num_of_rx_ports;
+ u32 total_fifo_size;
+
+ u32 qman_channel_base;
+ u32 num_of_qman_channels;
+
+ struct resource *res;
+};
+
+struct fman_cfg {
+ u8 disp_limit_tsh;
+ u8 prs_disp_tsh;
+ u8 plcr_disp_tsh;
+ u8 kg_disp_tsh;
+ u8 bmi_disp_tsh;
+ u8 qmi_enq_disp_tsh;
+ u8 qmi_deq_disp_tsh;
+ u8 fm_ctl1_disp_tsh;
+ u8 fm_ctl2_disp_tsh;
+ int dma_cache_override;
+ enum fman_dma_aid_mode dma_aid_mode;
+ bool dma_aid_override;
+ u32 dma_axi_dbg_num_of_beats;
+ u32 dma_cam_num_of_entries;
+ u32 dma_watchdog;
+ u8 dma_comm_qtsh_asrt_emer;
+ u32 dma_write_buf_tsh_asrt_emer;
+ u32 dma_read_buf_tsh_asrt_emer;
+ u8 dma_comm_qtsh_clr_emer;
+ u32 dma_write_buf_tsh_clr_emer;
+ u32 dma_read_buf_tsh_clr_emer;
+ u32 dma_sos_emergency;
+ int dma_dbg_cnt_mode;
+ bool dma_stop_on_bus_error;
+ bool dma_en_emergency;
+ u32 dma_emergency_bus_select;
+ int dma_emergency_level;
+ bool dma_en_emergency_smoother;
+ u32 dma_emergency_switch_counter;
+ bool halt_on_external_activ;
+ bool halt_on_unrecov_ecc_err;
+ int catastrophic_err;
+ int dma_err;
+ bool en_muram_test_mode;
+ bool en_iram_test_mode;
+ bool external_ecc_rams_enable;
+ u16 tnum_aging_period;
+ u32 exceptions;
+ u16 clk_freq;
+ bool pedantic_dma;
+ u32 cam_base_addr;
+ u32 fifo_base_addr;
+ u32 total_fifo_size;
+ u32 total_num_of_tasks;
+ bool qmi_deq_option_support;
+ u32 qmi_def_tnums_thresh;
+};
+
+struct fman_dts_params {
+ void __iomem *base_addr; /* FMan virtual address */
+#ifndef __rtems__
+ struct resource *res; /* FMan memory resource */
+#endif /* __rtems__ */
+ u8 id; /* FMan ID */
+
+ int err_irq; /* FMan Error IRQ */
+
+ u16 clk_freq; /* FMan clock freq (In Mhz) */
+
+ u32 qman_channel_base; /* QMan channels base */
+ u32 num_of_qman_channels; /* Number of QMan channels */
+
+ phys_addr_t muram_phy_base_addr; /* MURAM physical address */
+ resource_size_t muram_size; /* MURAM size */
+};
+
+struct fman {
+ struct device *dev;
+ void __iomem *base_addr;
+ struct fman_intr_src intr_mng[FMAN_EV_CNT];
+
+ struct fman_fpm_regs __iomem *fpm_regs;
+ struct fman_bmi_regs __iomem *bmi_regs;
+ struct fman_qmi_regs __iomem *qmi_regs;
+ struct fman_dma_regs __iomem *dma_regs;
+ fman_exceptions_cb *exception_cb;
+ fman_bus_error_cb *bus_error_cb;
+ /* Spinlock for FMan use */
+ spinlock_t spinlock;
+ struct fman_state_struct *state;
+
+ struct fman_cfg *cfg;
+ struct muram_info *muram;
+ /* cam section in muram */
+ int cam_offset;
+ size_t cam_size;
+ /* Fifo in MURAM */
+ int fifo_offset;
+ size_t fifo_size;
+ bool reset_on_init;
+
+ u32 liodn_base[64];
+ u32 liodn_offset[64];
+
+ struct fman_dts_params dts_params;
+};
+
+static void fman_exceptions(struct fman *fman, enum fman_exceptions exception)
+{
+ pr_debug("FMan[%d] exception %d\n",
+ fman->state->fm_id, exception);
+}
+
+static void fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
+ u64 __maybe_unused addr, u8 __maybe_unused tnum,
+ u16 __maybe_unused liodn)
+{
+ pr_debug("FMan[%d] bus error: port_id[%d]\n",
+ fman->state->fm_id, port_id);
+}
+
+static inline void call_mac_isr(struct fman *fman, u8 id)
+{
+ if (fman->intr_mng[id].isr_cb)
+ fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
+}
+
+static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
+{
+ u8 sw_port_id = 0;
+
+ if (hw_port_id >= BASE_TX_PORTID) {
+ sw_port_id = hw_port_id - BASE_TX_PORTID;
+ } else if (hw_port_id >= BASE_RX_PORTID) {
+ sw_port_id = hw_port_id - BASE_RX_PORTID;
+ } else {
+ sw_port_id = 0;
+ WARN_ON(false);
+ }
+
+ return sw_port_id;
+}
+
+static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
+ u8 port_id)
+{
+ u32 tmp = 0;
+
+ tmp = (u32)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
+
+ tmp |= (FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1);
+
+ /* order restoration */
+ if (port_id % 2)
+ tmp |= (FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
+ else
+ tmp |= (FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
+
+ iowrite32be(tmp, &fpm_rg->fmfp_prc);
+}
+
+static void set_port_liodn(struct fman_rg *fman_rg, u8 port_id,
+ u32 liodn_base, u32 liodn_ofst)
+{
+ u32 tmp;
+
+ /* set LIODN base for this port */
+ tmp = ioread32be(&fman_rg->dma_rg->fmdmplr[port_id / 2]);
+ if (port_id % 2) {
+ tmp &= ~DMA_LIODN_BASE_MASK;
+ tmp |= liodn_base;
+ } else {
+ tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
+ tmp |= liodn_base << DMA_LIODN_SHIFT;
+ }
+ iowrite32be(tmp, &fman_rg->dma_rg->fmdmplr[port_id / 2]);
+ iowrite32be(liodn_ofst, &fman_rg->bmi_rg->fmbm_spliodn[port_id - 1]);
+}
+
+static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fpm_rg->fm_rcr);
+ if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
+ iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+ else
+ iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
+ FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+}
+
+static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fpm_rg->fm_rcr);
+ if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
+ iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+ else
+ iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
+ &fpm_rg->fm_rcr);
+}
+
+static void fman_defconfig(struct fman_cfg *cfg)
+{
+ memset(cfg, 0, sizeof(struct fman_cfg));
+
+ cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
+ cfg->dma_err = DEFAULT_DMA_ERR;
+ cfg->halt_on_external_activ = false;
+ cfg->halt_on_unrecov_ecc_err = false;
+ cfg->en_iram_test_mode = false;
+ cfg->en_muram_test_mode = false;
+ cfg->external_ecc_rams_enable = false;
+ cfg->dma_aid_override = false;
+ cfg->dma_aid_mode = DEFAULT_AID_MODE;
+ cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
+ cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
+ cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
+ cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
+ cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
+ cfg->dma_en_emergency = false;
+ cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
+ cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
+ cfg->dma_en_emergency_smoother = false;
+ cfg->dma_emergency_switch_counter =
+ DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER;
+ cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
+ cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
+ cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
+ cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
+ cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
+ cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
+ cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
+ cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
+ cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
+
+ cfg->pedantic_dma = false;
+ cfg->tnum_aging_period = 0;
+ cfg->dma_stop_on_bus_error = false;
+ cfg->qmi_deq_option_support = false;
+}
+
+static int dma_init(struct fman *fman)
+{
+ struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+ struct fman_cfg *cfg = fman->cfg;
+ u32 tmp_reg;
+
+ /* Init DMA Registers */
+
+ /* clear status reg events */
+ tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
+ DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
+ iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr);
+
+ /* configure mode register */
+ tmp_reg = 0;
+ tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
+ if (cfg->dma_aid_override)
+ tmp_reg |= DMA_MODE_AID_OR;
+ if (cfg->exceptions & EX_DMA_BUS_ERROR)
+ tmp_reg |= DMA_MODE_BER;
+ if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
+ (cfg->exceptions & EX_DMA_READ_ECC) |
+ (cfg->exceptions & EX_DMA_FM_WRITE_ECC))
+ tmp_reg |= DMA_MODE_ECC;
+ if (cfg->dma_stop_on_bus_error)
+ tmp_reg |= DMA_MODE_SBER;
+ if (cfg->dma_axi_dbg_num_of_beats)
+ tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
+ ((cfg->dma_axi_dbg_num_of_beats - 1)
+ << DMA_MODE_AXI_DBG_SHIFT));
+
+ if (cfg->dma_en_emergency) {
+ tmp_reg |= cfg->dma_emergency_bus_select;
+ tmp_reg |= cfg->dma_emergency_level << DMA_MODE_EMER_LVL_SHIFT;
+ if (cfg->dma_en_emergency_smoother)
+ iowrite32be(cfg->dma_emergency_switch_counter,
+ &dma_rg->fmdmemsr);
+ }
+ tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
+ DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
+ tmp_reg |= DMA_MODE_SECURE_PROT;
+ tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
+ tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
+
+ if (cfg->pedantic_dma)
+ tmp_reg |= DMA_MODE_EMER_READ;
+
+ iowrite32be(tmp_reg, &dma_rg->fmdmmr);
+
+ /* configure thresholds register */
+ tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer <<
+ DMA_THRESH_COMMQ_SHIFT);
+ tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer &
+ DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
+ tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer &
+ DMA_THRESH_WRITE_INT_BUF_MASK;
+
+ iowrite32be(tmp_reg, &dma_rg->fmdmtr);
+
+ /* configure hysteresis register */
+ tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer <<
+ DMA_THRESH_COMMQ_SHIFT);
+ tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer &
+ DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
+ tmp_reg |= cfg->dma_write_buf_tsh_clr_emer &
+ DMA_THRESH_WRITE_INT_BUF_MASK;
+
+ iowrite32be(tmp_reg, &dma_rg->fmdmhy);
+
+ /* configure emergency threshold */
+ iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
+
+ /* configure Watchdog */
+ iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr);
+
+ iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
+
+ /* Allocate MURAM for CAM */
+ fman->cam_size =
+ (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
+ fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
+ if (IS_ERR_VALUE(fman->cam_offset)) {
+ pr_err("MURAM alloc for DMA CAM failed\n");
+ return -ENOMEM;
+ }
+
+ if (fman->state->rev_info.major == 2) {
+ u32 __iomem *cam_base_addr;
+
+ fman_muram_free_mem(fman->muram, fman->cam_offset,
+ fman->cam_size);
+
+ fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128;
+ fman->cam_offset = fman_muram_alloc(fman->muram,
+ fman->cam_size);
+ if (IS_ERR_VALUE(fman->cam_offset)) {
+ pr_err("MURAM alloc for DMA CAM failed\n");
+ return -ENOMEM;
+ }
+
+ if (fman->cfg->dma_cam_num_of_entries % 8 ||
+ fman->cfg->dma_cam_num_of_entries > 32) {
+ pr_err("wrong dma_cam_num_of_entries\n");
+ return -EINVAL;
+ }
+
+ cam_base_addr = (u32 __iomem *)
+ fman_muram_offset_to_vbase(fman->muram,
+ fman->cam_offset);
+ out_be32(cam_base_addr,
+ ~((1 << (32 - fman->cfg->dma_cam_num_of_entries)) -
+ 1));
+ }
+
+ fman->cfg->cam_base_addr = fman->cam_offset;
+
+ return 0;
+}
+
+static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
+{
+ u32 tmp_reg;
+ int i;
+
+ /* Init FPM Registers */
+
+ tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
+
+ tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
+ ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
+ ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
+ ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
+
+ tmp_reg =
+ (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
+ ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
+ ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
+ ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
+
+ /* define exceptions and error behavior */
+ tmp_reg = 0;
+ /* Clear events */
+ tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
+ FPM_EV_MASK_SINGLE_ECC);
+ /* enable interrupts */
+ if (cfg->exceptions & EX_FPM_STALL_ON_TASKS)
+ tmp_reg |= FPM_EV_MASK_STALL_EN;
+ if (cfg->exceptions & EX_FPM_SINGLE_ECC)
+ tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
+ if (cfg->exceptions & EX_FPM_DOUBLE_ECC)
+ tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
+ tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
+ tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
+ if (!cfg->halt_on_external_activ)
+ tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
+ if (!cfg->halt_on_unrecov_ecc_err)
+ tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
+
+ /* clear all fmCtls event registers */
+ for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++)
+ iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
+
+ /* RAM ECC - enable and clear events */
+ /* first we need to clear all parser memory,
+ * as it is uninitialized and may cause ECC errors
+ */
+ /* event bits */
+ tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
+ /* Rams enable not effected by RCR bit,
+ * but by a COP configuration
+ */
+ if (cfg->external_ecc_rams_enable)
+ tmp_reg |= FPM_RAM_RAMS_ECC_EN_SRC_SEL;
+
+ /* enable test mode */
+ if (cfg->en_muram_test_mode)
+ tmp_reg |= FPM_RAM_MURAM_TEST_ECC;
+ if (cfg->en_iram_test_mode)
+ tmp_reg |= FPM_RAM_IRAM_TEST_ECC;
+ iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
+
+ tmp_reg = 0;
+ if (cfg->exceptions & EX_IRAM_ECC) {
+ tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
+ enable_rams_ecc(fpm_rg);
+ }
+ if (cfg->exceptions & EX_MURAM_ECC) {
+ tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
+ enable_rams_ecc(fpm_rg);
+ }
+ iowrite32be(tmp_reg, &fpm_rg->fm_rie);
+}
+
+static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg,
+ struct fman_cfg *cfg)
+{
+ u32 tmp_reg;
+
+ /* Init BMI Registers */
+
+ /* define common resources */
+ tmp_reg = cfg->fifo_base_addr;
+ tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
+
+ tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
+ BMI_CFG1_FIFO_SIZE_SHIFT);
+ iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
+
+ tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) <<
+ BMI_CFG2_TASKS_SHIFT;
+ /* num of DMA's will be dynamically updated when each port is set */
+ iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
+
+ /* define unmaskable exceptions, enable and clear events */
+ tmp_reg = 0;
+ iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
+ BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
+ BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
+ BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr);
+
+ if (cfg->exceptions & EX_BMI_LIST_RAM_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
+ if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+ if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+ if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+ iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
+}
+
+static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
+ struct fman_cfg *cfg)
+{
+ u32 tmp_reg;
+ u16 period_in_fm_clocks;
+ u8 remainder;
+
+ /* Init QMI Registers */
+
+ /* Clear error interrupt events */
+
+ iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
+ &qmi_rg->fmqm_eie);
+ tmp_reg = 0;
+ if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
+ tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+ if (cfg->exceptions & EX_QMI_DOUBLE_ECC)
+ tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
+ /* enable events */
+ iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
+
+ if (cfg->tnum_aging_period) {
+ /* tnum_aging_period is in units of usec, clk_freq in Mhz */
+ period_in_fm_clocks = (u16)
+ (cfg->tnum_aging_period * cfg->clk_freq);
+ /* period_in_fm_clocks must be a 64 multiple */
+ remainder = (u8)(period_in_fm_clocks % 64);
+ if (remainder) {
+ tmp_reg = (u32)((period_in_fm_clocks / 64) + 1);
+ } else {
+ tmp_reg = (u32)(period_in_fm_clocks / 64);
+ if (!tmp_reg)
+ tmp_reg = 1;
+ }
+ tmp_reg <<= QMI_TAPC_TAP;
+ iowrite32be(tmp_reg, &qmi_rg->fmqm_tapc);
+ }
+ tmp_reg = 0;
+ /* Clear interrupt events */
+ iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
+ if (cfg->exceptions & EX_QMI_SINGLE_ECC)
+ tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
+ /* enable events */
+ iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
+}
+
+static int enable(struct fman_rg *fman_rg, struct fman_cfg *cfg)
+{
+ u32 cfg_reg = 0;
+
+ /* Enable all modules */
+
+ /* clear&enable global counters - calculate reg and save for later,
+ * because it's the same reg for QMI enable
+ */
+ cfg_reg = QMI_CFG_EN_COUNTERS;
+ if (cfg->qmi_deq_option_support)
+ cfg_reg |= (u32)(((cfg->qmi_def_tnums_thresh) << 8) |
+ cfg->qmi_def_tnums_thresh);
+
+ iowrite32be(BMI_INIT_START, &fman_rg->bmi_rg->fmbm_init);
+ iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
+ &fman_rg->qmi_rg->fmqm_gc);
+
+ return 0;
+}
+
+static int set_exception(struct fman_rg *fman_rg,
+ enum fman_exceptions exception, bool enable)
+{
+ u32 tmp;
+
+ switch (exception) {
+ case FMAN_EX_DMA_BUS_ERROR:
+ tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
+ if (enable)
+ tmp |= DMA_MODE_BER;
+ else
+ tmp &= ~DMA_MODE_BER;
+ /* disable bus error */
+ iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
+ break;
+ case FMAN_EX_DMA_READ_ECC:
+ case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
+ case FMAN_EX_DMA_FM_WRITE_ECC:
+ tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
+ if (enable)
+ tmp |= DMA_MODE_ECC;
+ else
+ tmp &= ~DMA_MODE_ECC;
+ iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
+ break;
+ case FMAN_EX_FPM_STALL_ON_TASKS:
+ tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ if (enable)
+ tmp |= FPM_EV_MASK_STALL_EN;
+ else
+ tmp &= ~FPM_EV_MASK_STALL_EN;
+ iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ break;
+ case FMAN_EX_FPM_SINGLE_ECC:
+ tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ if (enable)
+ tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
+ else
+ tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
+ iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ break;
+ case FMAN_EX_FPM_DOUBLE_ECC:
+ tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ if (enable)
+ tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
+ else
+ tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
+ iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ break;
+ case FMAN_EX_QMI_SINGLE_ECC:
+ tmp = ioread32be(&fman_rg->qmi_rg->fmqm_ien);
+ if (enable)
+ tmp |= QMI_INTR_EN_SINGLE_ECC;
+ else
+ tmp &= ~QMI_INTR_EN_SINGLE_ECC;
+ iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_ien);
+ break;
+ case FMAN_EX_QMI_DOUBLE_ECC:
+ tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
+ if (enable)
+ tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
+ else
+ tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
+ iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
+ break;
+ case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
+ tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
+ if (enable)
+ tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+ else
+ tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+ iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
+ break;
+ case FMAN_EX_BMI_LIST_RAM_ECC:
+ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
+ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ break;
+ case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
+ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ break;
+ case FMAN_EX_BMI_STATISTICS_RAM_ECC:
+ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ break;
+ case FMAN_EX_BMI_DISPATCH_RAM_ECC:
+ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ break;
+ case FMAN_EX_IRAM_ECC:
+ tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
+ if (enable) {
+ /* enable ECC if not enabled */
+ enable_rams_ecc(fman_rg->fpm_rg);
+ /* enable ECC interrupts */
+ tmp |= FPM_IRAM_ECC_ERR_EX_EN;
+ } else {
+ /* ECC mechanism may be disabled,
+ * depending on driver status
+ */
+ disable_rams_ecc(fman_rg->fpm_rg);
+ tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
+ }
+ iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
+ break;
+ case FMAN_EX_MURAM_ECC:
+ tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
+ if (enable) {
+ /* enable ECC if not enabled */
+ enable_rams_ecc(fman_rg->fpm_rg);
+ /* enable ECC interrupts */
+ tmp |= FPM_MURAM_ECC_ERR_EX_EN;
+ } else {
+ /* ECC mechanism may be disabled,
+ * depending on driver status
+ */
+ disable_rams_ecc(fman_rg->fpm_rg);
+ tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
+ }
+ iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void resume(struct fman_fpm_regs __iomem *fpm_rg)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fpm_rg->fmfp_ee);
+ /* clear tmp_reg event bits in order not to clear standing events */
+ tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
+ FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC);
+ tmp |= FPM_EV_MASK_RELEASE_FM;
+
+ iowrite32be(tmp, &fpm_rg->fmfp_ee);
+}
+
+static int fill_soc_specific_params(struct fman_state_struct *state)
+{
+ u8 minor = state->rev_info.minor;
+ /* P4080 - Major 2
+ * P2041/P3041/P5020/P5040 - Major 3
+ * Tx/Bx - Major 6
+ */
+ switch (state->rev_info.major) {
+ case 3:
+ state->bmi_max_fifo_size = 160 * 1024;
+ state->fm_iram_size = 64 * 1024;
+ state->dma_thresh_max_commq = 31;
+ state->dma_thresh_max_buf = 127;
+ state->qmi_max_num_of_tnums = 64;
+ state->qmi_def_tnums_thresh = 48;
+ state->bmi_max_num_of_tasks = 128;
+ state->max_num_of_open_dmas = 32;
+ state->fm_port_num_of_cg = 256;
+ state->num_of_rx_ports = 6;
+ state->total_fifo_size = 122 * 1024;
+ break;
+
+ case 2:
+ state->bmi_max_fifo_size = 160 * 1024;
+ state->fm_iram_size = 64 * 1024;
+ state->dma_thresh_max_commq = 31;
+ state->dma_thresh_max_buf = 127;
+ state->qmi_max_num_of_tnums = 64;
+ state->qmi_def_tnums_thresh = 48;
+ state->bmi_max_num_of_tasks = 128;
+ state->max_num_of_open_dmas = 32;
+ state->fm_port_num_of_cg = 256;
+ state->num_of_rx_ports = 5;
+ state->total_fifo_size = 100 * 1024;
+ break;
+
+ case 6:
+ state->dma_thresh_max_commq = 83;
+ state->dma_thresh_max_buf = 127;
+ state->qmi_max_num_of_tnums = 64;
+ state->qmi_def_tnums_thresh = 32;
+ state->fm_port_num_of_cg = 256;
+
+ /* FManV3L */
+ if (minor == 1 || minor == 4) {
+ state->bmi_max_fifo_size = 192 * 1024;
+ state->bmi_max_num_of_tasks = 64;
+ state->max_num_of_open_dmas = 32;
+ state->num_of_rx_ports = 5;
+ if (minor == 1)
+ state->fm_iram_size = 32 * 1024;
+ else
+ state->fm_iram_size = 64 * 1024;
+ state->total_fifo_size = 156 * 1024;
+ }
+ /* FManV3H */
+ else if (minor == 0 || minor == 2 || minor == 3) {
+ state->bmi_max_fifo_size = 384 * 1024;
+ state->fm_iram_size = 64 * 1024;
+ state->bmi_max_num_of_tasks = 128;
+ state->max_num_of_open_dmas = 84;
+ state->num_of_rx_ports = 8;
+ state->total_fifo_size = 295 * 1024;
+ } else {
+ pr_err("Unsupported FManv3 version\n");
+ return -EINVAL;
+ }
+
+ break;
+ default:
+ pr_err("Unsupported FMan version\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool is_init_done(struct fman_cfg *cfg)
+{
+ /* Checks if FMan driver parameters were initialized */
+ if (!cfg)
+ return true;
+
+ return false;
+}
+
+static void free_init_resources(struct fman *fman)
+{
+ if (fman->cam_offset)
+ fman_muram_free_mem(fman->muram, fman->cam_offset,
+ fman->cam_size);
+ if (fman->fifo_offset)
+ fman_muram_free_mem(fman->muram, fman->fifo_offset,
+ fman->fifo_size);
+}
+
+static void bmi_err_event(struct fman *fman)
+{
+ u32 event, mask, force;
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+
+ event = ioread32be(&bmi_rg->fmbm_ievr);
+ mask = ioread32be(&bmi_rg->fmbm_ier);
+ event &= mask;
+ /* clear the forced events */
+ force = ioread32be(&bmi_rg->fmbm_ifr);
+ if (force & event)
+ iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
+ /* clear the acknowledged events */
+ iowrite32be(event, &bmi_rg->fmbm_ievr);
+
+ if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
+ fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
+ if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
+ fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
+ if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
+ fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
+ if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
+ fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+}
+
+static void qmi_err_event(struct fman *fman)
+{
+ u32 event, mask, force;
+ struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+
+ event = ioread32be(&qmi_rg->fmqm_eie);
+ mask = ioread32be(&qmi_rg->fmqm_eien);
+ event &= mask;
+
+ /* clear the forced events */
+ force = ioread32be(&qmi_rg->fmqm_eif);
+ if (force & event)
+ iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
+ /* clear the acknowledged events */
+ iowrite32be(event, &qmi_rg->fmqm_eie);
+
+ if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
+ fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
+ if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
+ fman->exception_cb(fman, FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+}
+
+static void dma_err_event(struct fman *fman)
+{
+ u32 status, mask, com_id;
+ u8 tnum, port_id, relative_port_id;
+ u16 liodn;
+ struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+
+ status = ioread32be(&dma_rg->fmdmsr);
+ mask = ioread32be(&dma_rg->fmdmmr);
+
+ /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
+ if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
+ status &= ~DMA_STATUS_BUS_ERR;
+
+ /* clear relevant bits if mask has no DMA_MODE_ECC */
+ if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
+ status &= ~(DMA_STATUS_FM_SPDAT_ECC |
+ DMA_STATUS_READ_ECC |
+ DMA_STATUS_SYSTEM_WRITE_ECC |
+ DMA_STATUS_FM_WRITE_ECC);
+
+ /* clear set events */
+ iowrite32be(status, &dma_rg->fmdmsr);
+
+ if (status & DMA_STATUS_BUS_ERR) {
+ u64 addr;
+
+ addr = (u64)ioread32be(&dma_rg->fmdmtal);
+ addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32);
+
+ com_id = ioread32be(&dma_rg->fmdmtcid);
+ port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >>
+ DMA_TRANSFER_PORTID_SHIFT));
+ relative_port_id =
+ hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
+ tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
+ DMA_TRANSFER_TNUM_SHIFT);
+ liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
+ fman->bus_error_cb(fman, relative_port_id, addr, tnum, liodn);
+ }
+ if (status & DMA_STATUS_FM_SPDAT_ECC)
+ fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
+ if (status & DMA_STATUS_READ_ECC)
+ fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
+ if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
+ fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
+ if (status & DMA_STATUS_FM_WRITE_ECC)
+ fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+}
+
+static void fpm_err_event(struct fman *fman)
+{
+ u32 event;
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+
+ event = ioread32be(&fpm_rg->fmfp_ee);
+ /* clear the all occurred events */
+ iowrite32be(event, &fpm_rg->fmfp_ee);
+
+ if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
+ (event & FPM_EV_MASK_DOUBLE_ECC_EN))
+ fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
+ if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
+ fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
+ if ((event & FPM_EV_MASK_SINGLE_ECC) &&
+ (event & FPM_EV_MASK_SINGLE_ECC_EN))
+ fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+}
+
+static void muram_err_intr(struct fman *fman)
+{
+ u32 event, mask;
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+
+ event = ioread32be(&fpm_rg->fm_rcr);
+ mask = ioread32be(&fpm_rg->fm_rie);
+
+ /* clear MURAM event bit (do not clear IRAM event) */
+ iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
+
+ if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
+ fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+}
+
+static void qmi_event(struct fman *fman)
+{
+ u32 event, mask, force;
+ struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+
+ event = ioread32be(&qmi_rg->fmqm_ie);
+ mask = ioread32be(&qmi_rg->fmqm_ien);
+ event &= mask;
+ /* clear the forced events */
+ force = ioread32be(&qmi_rg->fmqm_if);
+ if (force & event)
+ iowrite32be(force & ~event, &qmi_rg->fmqm_if);
+ /* clear the acknowledged events */
+ iowrite32be(event, &qmi_rg->fmqm_ie);
+
+ if (event & QMI_INTR_EN_SINGLE_ECC)
+ fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+}
+
+static void enable_time_stamp(struct fman *fman)
+{
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ u16 fm_clk_freq = fman->state->fm_clk_freq;
+ u32 tmp, intgr, ts_freq;
+ u64 frac;
+
+ ts_freq = (u32)(1 << fman->state->count1_micro_bit);
+ /* configure timestamp so that bit 8 will count 1 microsecond
+ * Find effective count rate at TIMESTAMP least significant bits:
+ * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
+ * Find frequency ratio between effective count rate and the clock:
+ * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
+ * 256/600 = 0.4266666...
+ */
+
+ intgr = ts_freq / fm_clk_freq;
+ /* we multiply by 2^16 to keep the fraction of the division
+ * we do not div back, since we write this value as a fraction
+ * see spec
+ */
+
+ frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq;
+ /* we check remainder of the division in order to round up if not int */
+ if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq)
+ frac++;
+
+ tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac;
+ iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
+
+ /* enable timestamp with original clock */
+ iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
+ fman->state->enabled_time_stamp = true;
+}
+
+static int clear_iram(struct fman *fman)
+{
+ struct fman_iram_regs __iomem *iram;
+ int i;
+
+ iram = (struct fman_iram_regs __iomem *)(fman->base_addr + IMEM_OFFSET);
+
+ /* Enable the auto-increment */
+ out_be32(&iram->iadd, IRAM_IADD_AIE);
+ while (in_be32(&iram->iadd) != IRAM_IADD_AIE)
+ ;
+
+ for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
+ out_be32(&iram->idata, 0xffffffff);
+
+ out_be32(&iram->iadd, fman->state->fm_iram_size - 4);
+ /* Memory barrier */
+ mb();
+ while (in_be32(&iram->idata) != 0xffffffff)
+ ;
+
+ return 0;
+}
+
+static u32 get_exception_flag(enum fman_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FMAN_EX_DMA_BUS_ERROR:
+ bit_mask = EX_DMA_BUS_ERROR;
+ break;
+ case FMAN_EX_DMA_SINGLE_PORT_ECC:
+ bit_mask = EX_DMA_SINGLE_PORT_ECC;
+ break;
+ case FMAN_EX_DMA_READ_ECC:
+ bit_mask = EX_DMA_READ_ECC;
+ break;
+ case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
+ bit_mask = EX_DMA_SYSTEM_WRITE_ECC;
+ break;
+ case FMAN_EX_DMA_FM_WRITE_ECC:
+ bit_mask = EX_DMA_FM_WRITE_ECC;
+ break;
+ case FMAN_EX_FPM_STALL_ON_TASKS:
+ bit_mask = EX_FPM_STALL_ON_TASKS;
+ break;
+ case FMAN_EX_FPM_SINGLE_ECC:
+ bit_mask = EX_FPM_SINGLE_ECC;
+ break;
+ case FMAN_EX_FPM_DOUBLE_ECC:
+ bit_mask = EX_FPM_DOUBLE_ECC;
+ break;
+ case FMAN_EX_QMI_SINGLE_ECC:
+ bit_mask = EX_QMI_SINGLE_ECC;
+ break;
+ case FMAN_EX_QMI_DOUBLE_ECC:
+ bit_mask = EX_QMI_DOUBLE_ECC;
+ break;
+ case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
+ bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID;
+ break;
+ case FMAN_EX_BMI_LIST_RAM_ECC:
+ bit_mask = EX_BMI_LIST_RAM_ECC;
+ break;
+ case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
+ bit_mask = EX_BMI_STORAGE_PROFILE_ECC;
+ break;
+ case FMAN_EX_BMI_STATISTICS_RAM_ECC:
+ bit_mask = EX_BMI_STATISTICS_RAM_ECC;
+ break;
+ case FMAN_EX_BMI_DISPATCH_RAM_ECC:
+ bit_mask = EX_BMI_DISPATCH_RAM_ECC;
+ break;
+ case FMAN_EX_MURAM_ECC:
+ bit_mask = EX_MURAM_ECC;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static int get_module_event(enum fman_event_modules module, u8 mod_id,
+ enum fman_intr_type intr_type)
+{
+ int event;
+
+ switch (module) {
+ case FMAN_MOD_MAC:
+ event = (intr_type == FMAN_INTR_TYPE_ERR) ?
+ (FMAN_EV_ERR_MAC0 + mod_id) :
+ (FMAN_EV_MAC0 + mod_id);
+ break;
+ case FMAN_MOD_FMAN_CTRL:
+ if (intr_type == FMAN_INTR_TYPE_ERR)
+ event = FMAN_EV_CNT;
+ else
+ event = (FMAN_EV_FMAN_CTRL_0 + mod_id);
+ break;
+ case FMAN_MOD_DUMMY_LAST:
+ event = FMAN_EV_CNT;
+ break;
+ default:
+ event = FMAN_EV_CNT;
+ break;
+ }
+
+ return event;
+}
+
+static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
+ u32 *extra_size_of_fifo)
+{
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ u32 fifo = *size_of_fifo;
+ u32 extra_fifo = *extra_size_of_fifo;
+ u32 tmp;
+
+ /* if this is the first time a port requires extra_fifo_pool_size,
+ * the total extra_fifo_pool_size must be initialized to 1 buffer per
+ * port
+ */
+ if (extra_fifo && !fman->state->extra_fifo_pool_size)
+ fman->state->extra_fifo_pool_size =
+ fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS;
+
+ fman->state->extra_fifo_pool_size =
+ max(fman->state->extra_fifo_pool_size, extra_fifo);
+
+ /* check that there are enough uncommitted fifo size */
+ if ((fman->state->accumulated_fifo_size + fifo) >
+ (fman->state->total_fifo_size -
+ fman->state->extra_fifo_pool_size)) {
+ pr_err("Requested fifo size and extra size exceed total FIFO size.\n");
+ return -EAGAIN;
+ }
+
+ /* Read, modify and write to HW */
+ tmp = (u32)((fifo / FMAN_BMI_FIFO_UNITS - 1) |
+ ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
+ BMI_EXTRA_FIFO_SIZE_SHIFT));
+ iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
+
+ /* update accumulated */
+ fman->state->accumulated_fifo_size += fifo;
+
+ return 0;
+}
+
+static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
+ u8 *num_of_extra_tasks)
+{
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ u8 tasks = *num_of_tasks;
+ u8 extra_tasks = *num_of_extra_tasks;
+ u32 tmp;
+
+ if (extra_tasks)
+ fman->state->extra_tasks_pool_size =
+ (u8)max(fman->state->extra_tasks_pool_size, extra_tasks);
+
+ /* check that there are enough uncommitted tasks */
+ if ((fman->state->accumulated_num_of_tasks + tasks) >
+ (fman->state->total_num_of_tasks -
+ fman->state->extra_tasks_pool_size)) {
+ pr_err("Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
+ fman->state->fm_id);
+ return -EAGAIN;
+ }
+ /* update accumulated */
+ fman->state->accumulated_num_of_tasks += tasks;
+
+ /* Write to HW */
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
+ ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
+ tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
+ (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
+ iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
+
+ return 0;
+}
+
+static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
+ u8 *num_of_open_dmas,
+ u8 *num_of_extra_open_dmas)
+{
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ u8 open_dmas = *num_of_open_dmas;
+ u8 extra_open_dmas = *num_of_extra_open_dmas;
+ u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0;
+ u32 tmp;
+
+ if (!open_dmas) {
+ /* Configuration according to values in the HW.
+ * read the current number of open Dma's
+ */
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
+ current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
+ BMI_EXTRA_NUM_OF_DMAS_SHIFT);
+
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
+ current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
+ BMI_NUM_OF_DMAS_SHIFT) + 1);
+
+ /* This is the first configuration and user did not
+ * specify value (!open_dmas), reset values will be used
+ * and we just save these values for resource management
+ */
+ fman->state->extra_open_dmas_pool_size =
+ (u8)max(fman->state->extra_open_dmas_pool_size,
+ current_extra_val);
+ fman->state->accumulated_num_of_open_dmas += current_val;
+ *num_of_open_dmas = current_val;
+ *num_of_extra_open_dmas = current_extra_val;
+ return 0;
+ }
+
+ if (extra_open_dmas > current_extra_val)
+ fman->state->extra_open_dmas_pool_size =
+ (u8)max(fman->state->extra_open_dmas_pool_size,
+ extra_open_dmas);
+
+ if ((fman->state->rev_info.major < 6) &&
+ (fman->state->accumulated_num_of_open_dmas - current_val +
+ open_dmas > fman->state->max_num_of_open_dmas)) {
+ pr_err("Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
+ fman->state->fm_id);
+ return -EAGAIN;
+ } else if ((fman->state->rev_info.major >= 6) &&
+ !((fman->state->rev_info.major == 6) &&
+ (fman->state->rev_info.minor == 0)) &&
+ (fman->state->accumulated_num_of_open_dmas -
+ current_val + open_dmas >
+ fman->state->dma_thresh_max_commq + 1)) {
+ pr_err("Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
+ fman->state->fm_id,
+ fman->state->dma_thresh_max_commq + 1);
+ return -EAGAIN;
+ }
+
+ WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val);
+ /* update acummulated */
+ fman->state->accumulated_num_of_open_dmas -= current_val;
+ fman->state->accumulated_num_of_open_dmas += open_dmas;
+
+ if (fman->state->rev_info.major < 6)
+ total_num_dmas =
+ (u8)(fman->state->accumulated_num_of_open_dmas +
+ fman->state->extra_open_dmas_pool_size);
+
+ /* calculate reg */
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
+ ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
+ tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) |
+ (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
+ iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
+
+ /* update total num of DMA's with committed number of open DMAS,
+ * and max uncommitted pool.
+ */
+ if (total_num_dmas) {
+ tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
+ tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
+ iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
+ }
+
+ return 0;
+}
+
+static int fman_config(struct fman *fman)
+{
+ void __iomem *base_addr;
+ int err;
+
+ base_addr = fman->dts_params.base_addr;
+
+ fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL);
+ if (!fman->state)
+ goto err_fm_state;
+
+ /* Allocate the FM driver's parameters structure */
+ fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL);
+ if (!fman->cfg)
+ goto err_fm_drv;
+
+ /* Initialize MURAM block */
+ fman->muram = fman_muram_init(fman->dts_params.muram_phy_base_addr,
+ fman->dts_params.muram_size);
+ if (!fman->muram)
+ goto err_fm_soc_specific;
+
+ /* Initialize FM parameters which will be kept by the driver */
+ fman->state->fm_id = fman->dts_params.id;
+ fman->state->fm_clk_freq = fman->dts_params.clk_freq;
+ fman->state->qman_channel_base = fman->dts_params.qman_channel_base;
+ fman->state->num_of_qman_channels =
+ fman->dts_params.num_of_qman_channels;
+#ifndef __rtems__
+ fman->state->res = fman->dts_params.res;
+#endif /* __rtems__ */
+ fman->exception_cb = fman_exceptions;
+ fman->bus_error_cb = fman_bus_error;
+ fman->fpm_regs =
+ (struct fman_fpm_regs __iomem *)(base_addr + FPM_OFFSET);
+ fman->bmi_regs =
+ (struct fman_bmi_regs __iomem *)(base_addr + BMI_OFFSET);
+ fman->qmi_regs =
+ (struct fman_qmi_regs __iomem *)(base_addr + QMI_OFFSET);
+ fman->dma_regs =
+ (struct fman_dma_regs __iomem *)(base_addr + DMA_OFFSET);
+ fman->base_addr = base_addr;
+
+ spin_lock_init(&fman->spinlock);
+ fman_defconfig(fman->cfg);
+
+ fman->cfg->qmi_deq_option_support = true;
+
+ fman->state->extra_fifo_pool_size = 0;
+ fman->state->exceptions = DFLT_EXCEPTIONS;
+ fman->reset_on_init = true;
+
+ /* Read FMan revision for future use*/
+ fman_get_revision(fman, &fman->state->rev_info);
+
+ err = fill_soc_specific_params(fman->state);
+ if (err)
+ goto err_fm_soc_specific;
+
+ /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */
+ if (fman->state->rev_info.major >= 6)
+ fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID;
+
+ fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh;
+
+ fman->state->total_num_of_tasks =
+ (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major,
+ fman->state->rev_info.minor,
+ fman->state->bmi_max_num_of_tasks);
+
+ if (fman->state->rev_info.major < 6) {
+ fman->cfg->dma_comm_qtsh_clr_emer =
+ (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major,
+ fman->state->dma_thresh_max_commq);
+
+ fman->cfg->dma_comm_qtsh_asrt_emer =
+ (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major,
+ fman->state->dma_thresh_max_commq);
+
+ fman->cfg->dma_cam_num_of_entries =
+ DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major);
+
+ fman->cfg->dma_read_buf_tsh_clr_emer =
+ DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_read_buf_tsh_asrt_emer =
+ DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_write_buf_tsh_clr_emer =
+ DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_write_buf_tsh_asrt_emer =
+ DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_axi_dbg_num_of_beats =
+ DFLT_AXI_DBG_NUM_OF_BEATS;
+ }
+
+ return 0;
+
+err_fm_soc_specific:
+ kfree(fman->cfg);
+err_fm_drv:
+ kfree(fman->state);
+err_fm_state:
+ kfree(fman);
+ return -EINVAL;
+}
+
+static int fman_init(struct fman *fman)
+{
+ struct fman_cfg *cfg = NULL;
+ struct fman_rg fman_rg;
+ int err = 0, i;
+
+ if (is_init_done(fman->cfg))
+ return -EINVAL;
+
+ fman_rg.bmi_rg = fman->bmi_regs;
+ fman_rg.qmi_rg = fman->qmi_regs;
+ fman_rg.fpm_rg = fman->fpm_regs;
+ fman_rg.dma_rg = fman->dma_regs;
+
+ fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
+
+ cfg = fman->cfg;
+
+ /* clear revision-dependent non existing exception */
+ if (fman->state->rev_info.major < 6)
+ fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC;
+
+ if (fman->state->rev_info.major >= 6)
+ fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC;
+
+ /* clear CPG */
+ memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0,
+ fman->state->fm_port_num_of_cg);
+
+ /* Save LIODN info before FMan reset
+ * Skipping non-existent port 0 (i = 1)
+ */
+ for (i = 1; i < FMAN_LIODN_TBL; i++) {
+ u32 liodn_base;
+
+ fman->liodn_offset[i] =
+ ioread32be(&fman_rg.bmi_rg->fmbm_spliodn[i - 1]);
+ liodn_base = ioread32be(&fman_rg.dma_rg->fmdmplr[i / 2]);
+ if (i % 2) {
+ /* FMDM_PLR LSB holds LIODN base for odd ports */
+ liodn_base &= DMA_LIODN_BASE_MASK;
+ } else {
+ /* FMDM_PLR MSB holds LIODN base for even ports */
+ liodn_base >>= DMA_LIODN_SHIFT;
+ liodn_base &= DMA_LIODN_BASE_MASK;
+ }
+ fman->liodn_base[i] = liodn_base;
+ }
+
+ /* Reset the FM if required. */
+ if (fman->reset_on_init) {
+ if (fman->state->rev_info.major >= 6) {
+ /* Errata A007273 */
+ pr_debug("FManV3 reset is not supported!\n");
+ } else {
+ out_be32(&fman->fpm_regs->fm_rstc, FPM_RSTC_FM_RESET);
+ /* Memory barrier */
+ mb();
+ usleep_range(100, 300);
+ }
+
+ if (!!(ioread32be(&fman_rg.qmi_rg->fmqm_gs) &
+ QMI_GS_HALT_NOT_BUSY)) {
+ resume(fman->fpm_regs);
+ usleep_range(100, 300);
+ }
+ }
+
+ if (clear_iram(fman) != 0)
+ return -EINVAL;
+
+ cfg->exceptions = fman->state->exceptions;
+
+ /* Init DMA Registers */
+
+ err = dma_init(fman);
+ if (err != 0) {
+ free_init_resources(fman);
+ return err;
+ }
+
+ /* Init FPM Registers */
+ fpm_init(fman->fpm_regs, fman->cfg);
+
+ /* define common resources */
+ /* allocate MURAM for FIFO according to total size */
+ fman->fifo_offset = fman_muram_alloc(fman->muram,
+ fman->state->total_fifo_size);
+ if (IS_ERR_VALUE(fman->cam_offset)) {
+ free_init_resources(fman);
+ pr_err("MURAM alloc for BMI FIFO failed\n");
+ return -ENOMEM;
+ }
+
+ cfg->fifo_base_addr = fman->fifo_offset;
+ cfg->total_fifo_size = fman->state->total_fifo_size;
+ cfg->total_num_of_tasks = fman->state->total_num_of_tasks;
+ cfg->clk_freq = fman->state->fm_clk_freq;
+
+ /* Init BMI Registers */
+ bmi_init(fman->bmi_regs, fman->cfg);
+
+ /* Init QMI Registers */
+ qmi_init(fman->qmi_regs, fman->cfg);
+
+ err = enable(&fman_rg, cfg);
+ if (err != 0)
+ return err;
+
+ enable_time_stamp(fman);
+
+ kfree(fman->cfg);
+ fman->cfg = NULL;
+
+ return 0;
+}
+
+static int fman_set_exception(struct fman *fman,
+ enum fman_exceptions exception, bool enable)
+{
+ u32 bit_mask = 0;
+ struct fman_rg fman_rg;
+
+ if (!is_init_done(fman->cfg))
+ return -EINVAL;
+
+ fman_rg.bmi_rg = fman->bmi_regs;
+ fman_rg.qmi_rg = fman->qmi_regs;
+ fman_rg.fpm_rg = fman->fpm_regs;
+ fman_rg.dma_rg = fman->dma_regs;
+
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ fman->state->exceptions |= bit_mask;
+ else
+ fman->state->exceptions &= ~bit_mask;
+ } else {
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+
+ return set_exception(&fman_rg, exception, enable);
+}
+
+void fman_register_intr(struct fman *fman, enum fman_event_modules module,
+ u8 mod_id, enum fman_intr_type intr_type,
+ void (*isr_cb)(void *src_arg), void *src_arg)
+{
+ int event = 0;
+
+ event = get_module_event(module, mod_id, intr_type);
+ WARN_ON(!(event < FMAN_EV_CNT));
+
+ /* register in local FM structure */
+ fman->intr_mng[event].isr_cb = isr_cb;
+ fman->intr_mng[event].src_handle = src_arg;
+}
+
+void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
+ u8 mod_id, enum fman_intr_type intr_type)
+{
+ int event = 0;
+
+ event = get_module_event(module, mod_id, intr_type);
+ WARN_ON(!(event < FMAN_EV_CNT));
+
+ fman->intr_mng[event].isr_cb = NULL;
+ fman->intr_mng[event].src_handle = NULL;
+}
+
+int fman_set_port_params(struct fman *fman,
+ struct fman_port_init_params *port_params)
+{
+ int err;
+ unsigned long int_flags;
+ u8 port_id = port_params->port_id, mac_id;
+ struct fman_rg fman_rg;
+
+ fman_rg.bmi_rg = fman->bmi_regs;
+ fman_rg.qmi_rg = fman->qmi_regs;
+ fman_rg.fpm_rg = fman->fpm_regs;
+ fman_rg.dma_rg = fman->dma_regs;
+
+ spin_lock_irqsave(&fman->spinlock, int_flags);
+
+ err = set_num_of_tasks(fman, port_params->port_id,
+ &port_params->num_of_tasks,
+ &port_params->num_of_extra_tasks);
+ if (err) {
+ spin_unlock_irqrestore(&fman->spinlock, int_flags);
+ return err;
+ }
+
+ /* TX Ports */
+ if (port_params->port_type != FMAN_PORT_TYPE_RX) {
+ u32 enq_th, deq_th, reg;
+
+ /* update qmi ENQ/DEQ threshold */
+ fman->state->accumulated_num_of_deq_tnums +=
+ port_params->deq_pipeline_depth;
+ enq_th = (ioread32be(&fman_rg.qmi_rg->fmqm_gc) &
+ QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
+ /* if enq_th is too big, we reduce it to the max value
+ * that is still 0
+ */
+ if (enq_th >= (fman->state->qmi_max_num_of_tnums -
+ fman->state->accumulated_num_of_deq_tnums)) {
+ enq_th =
+ fman->state->qmi_max_num_of_tnums -
+ fman->state->accumulated_num_of_deq_tnums - 1;
+
+ reg = ioread32be(&fman_rg.qmi_rg->fmqm_gc);
+ reg &= ~QMI_CFG_ENQ_MASK;
+ reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
+ iowrite32be(reg, &fman_rg.qmi_rg->fmqm_gc);
+ }
+
+ deq_th = ioread32be(&fman_rg.qmi_rg->fmqm_gc) &
+ QMI_CFG_DEQ_MASK;
+ /* if deq_th is too small, we enlarge it to the min
+ * value that is still 0.
+ * depTh may not be larger than 63
+ * (fman->state->qmi_max_num_of_tnums-1).
+ */
+ if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
+ (deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
+ deq_th =
+ fman->state->accumulated_num_of_deq_tnums + 1;
+ reg = ioread32be(&fman_rg.qmi_rg->fmqm_gc);
+ reg &= ~QMI_CFG_DEQ_MASK;
+ reg |= deq_th;
+ iowrite32be(reg, &fman_rg.qmi_rg->fmqm_gc);
+ }
+ }
+
+ err = set_size_of_fifo(fman, port_params->port_id,
+ &port_params->size_of_fifo,
+ &port_params->extra_size_of_fifo);
+ if (err) {
+ spin_unlock_irqrestore(&fman->spinlock, int_flags);
+ return err;
+ }
+
+ err = set_num_of_open_dmas(fman, port_params->port_id,
+ &port_params->num_of_open_dmas,
+ &port_params->num_of_extra_open_dmas);
+ if (err) {
+ spin_unlock_irqrestore(&fman->spinlock, int_flags);
+ return err;
+ }
+
+ set_port_liodn(&fman_rg, port_id, fman->liodn_base[port_id],
+ fman->liodn_offset[port_id]);
+
+ if (fman->state->rev_info.major < 6)
+ set_port_order_restoration(fman_rg.fpm_rg, port_id);
+
+ mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
+
+ if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
+ fman->state->port_mfl[mac_id] = port_params->max_frame_length;
+ } else {
+ pr_warn("Port max_frame_length is smaller than MAC current MTU\n");
+ spin_unlock_irqrestore(&fman->spinlock, int_flags);
+ return -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&fman->spinlock, int_flags);
+
+ return 0;
+}
+
+int fman_reset_mac(struct fman *fman, u8 mac_id)
+{
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ u32 msk, timeout = 100;
+
+ if (fman->state->rev_info.major >= 6) {
+ pr_warn("FMan MAC reset no available for FMan V3!\n");
+ return -EINVAL;
+ }
+
+ /* Get the relevant bit mask */
+ switch (mac_id) {
+ case 0:
+ msk = FPM_RSTC_MAC0_RESET;
+ break;
+ case 1:
+ msk = FPM_RSTC_MAC1_RESET;
+ break;
+ case 2:
+ msk = FPM_RSTC_MAC2_RESET;
+ break;
+ case 3:
+ msk = FPM_RSTC_MAC3_RESET;
+ break;
+ case 4:
+ msk = FPM_RSTC_MAC4_RESET;
+ break;
+ case 5:
+ msk = FPM_RSTC_MAC5_RESET;
+ break;
+ case 6:
+ msk = FPM_RSTC_MAC6_RESET;
+ break;
+ case 7:
+ msk = FPM_RSTC_MAC7_RESET;
+ break;
+ case 8:
+ msk = FPM_RSTC_MAC8_RESET;
+ break;
+ case 9:
+ msk = FPM_RSTC_MAC9_RESET;
+ break;
+ default:
+ pr_warn("Illegal MAC Id\n");
+ return -EINVAL;
+ }
+
+ /* reset */
+ iowrite32be(msk, &fpm_rg->fm_rstc);
+ while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
+ udelay(10);
+
+ if (!timeout)
+ return -EIO;
+
+ return 0;
+}
+
+int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
+{
+ /* if port is already initialized, check that MaxFrameLength is smaller
+ * or equal to the port's max
+ */
+ if ((!fman->state->port_mfl[mac_id]) ||
+ (fman->state->port_mfl[mac_id] &&
+ (mfl <= fman->state->port_mfl[mac_id]))) {
+ fman->state->mac_mfl[mac_id] = mfl;
+ } else {
+ pr_warn("MAC max_frame_length is larger than Port max_frame_length\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+u16 fman_get_clock_freq(struct fman *fman)
+{
+ return fman->state->fm_clk_freq;
+}
+
+u32 fman_get_bmi_max_fifo_size(struct fman *fman)
+{
+ return fman->state->bmi_max_fifo_size;
+}
+
+static void fman_event_isr(struct fman *fman)
+{
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+
+ if (!is_init_done(fman->cfg))
+ return;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* normal interrupts */
+ pending = ioread32be(&fpm_rg->fm_npi);
+ if (!pending)
+ return;
+
+ if (pending & INTR_EN_QMI)
+ qmi_event(fman);
+
+ /* MAC interrupts */
+ if (pending & INTR_EN_MAC0)
+ call_mac_isr(fman, FMAN_EV_MAC0 + 0);
+ if (pending & INTR_EN_MAC1)
+ call_mac_isr(fman, FMAN_EV_MAC0 + 1);
+ if (pending & INTR_EN_MAC2)
+ call_mac_isr(fman, FMAN_EV_MAC0 + 2);
+ if (pending & INTR_EN_MAC3)
+ call_mac_isr(fman, FMAN_EV_MAC0 + 3);
+ if (pending & INTR_EN_MAC4)
+ call_mac_isr(fman, FMAN_EV_MAC0 + 4);
+ if (pending & INTR_EN_MAC5)
+ call_mac_isr(fman, FMAN_EV_MAC0 + 5);
+ if (pending & INTR_EN_MAC6)
+ call_mac_isr(fman, FMAN_EV_MAC0 + 6);
+ if (pending & INTR_EN_MAC7)
+ call_mac_isr(fman, FMAN_EV_MAC0 + 7);
+ if (pending & INTR_EN_MAC8)
+ call_mac_isr(fman, FMAN_EV_MAC0 + 8);
+ if (pending & INTR_EN_MAC9)
+ call_mac_isr(fman, FMAN_EV_MAC0 + 9);
+}
+
+static int fman_error_isr(struct fman *fman)
+{
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+
+ if (!is_init_done(fman->cfg))
+ return -EINVAL;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* error interrupts */
+ pending = ioread32be(&fpm_rg->fm_epi);
+ if (!pending)
+ return -EINVAL;
+
+ if (pending & ERR_INTR_EN_BMI)
+ bmi_err_event(fman);
+ if (pending & ERR_INTR_EN_QMI)
+ qmi_err_event(fman);
+ if (pending & ERR_INTR_EN_FPM)
+ fpm_err_event(fman);
+ if (pending & ERR_INTR_EN_DMA)
+ dma_err_event(fman);
+ if (pending & ERR_INTR_EN_MURAM)
+ muram_err_intr(fman);
+
+ /* MAC error interrupts */
+ if (pending & ERR_INTR_EN_MAC0)
+ call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
+ if (pending & ERR_INTR_EN_MAC1)
+ call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
+ if (pending & ERR_INTR_EN_MAC2)
+ call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
+ if (pending & ERR_INTR_EN_MAC3)
+ call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
+ if (pending & ERR_INTR_EN_MAC4)
+ call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
+ if (pending & ERR_INTR_EN_MAC5)
+ call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
+ if (pending & ERR_INTR_EN_MAC6)
+ call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
+ if (pending & ERR_INTR_EN_MAC7)
+ call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
+ if (pending & ERR_INTR_EN_MAC8)
+ call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
+ if (pending & ERR_INTR_EN_MAC9)
+ call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
+
+ return 0;
+}
+
+void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1);
+ rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >>
+ FPM_REV1_MAJOR_SHIFT);
+ rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
+}
+
+u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
+{
+ int i;
+
+ if (fman->state->rev_info.major >= 6) {
+ u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b,
+ 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
+ for (i = 0; i < fman->state->num_of_qman_channels; i++) {
+ if (port_ids[i] == port_id)
+ break;
+ }
+ } else {
+ u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1,
+ 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
+ for (i = 0; i < fman->state->num_of_qman_channels; i++) {
+ if (port_ids[i] == port_id)
+ break;
+ }
+ }
+
+ if (i == fman->state->num_of_qman_channels)
+ return 0;
+
+ return fman->state->qman_channel_base + i;
+}
+
+struct resource *fman_get_mem_region(struct fman *fman)
+{
+ return fman->state->res;
+}
+
+/* Bootargs defines */
+/* Extra headroom for RX buffers - Default, min and max */
+#define FSL_FM_RX_EXTRA_HEADROOM 64
+#define FSL_FM_RX_EXTRA_HEADROOM_MIN 16
+#define FSL_FM_RX_EXTRA_HEADROOM_MAX 384
+
+/* Maximum frame length */
+#define FSL_FM_MAX_FRAME_SIZE 1522
+#define FSL_FM_MAX_POSSIBLE_FRAME_SIZE 9600
+#define FSL_FM_MIN_POSSIBLE_FRAME_SIZE 64
+
+/* Extra headroom for Rx buffers.
+ * FMan is instructed to allocate, on the Rx path, this amount of
+ * space at the beginning of a data buffer, beside the DPA private
+ * data area and the IC fields.
+ * Does not impact Tx buffer layout.
+ * Configurable from bootargs. 64 by default, it's needed on
+ * particular forwarding scenarios that add extra headers to the
+ * forwarded frame.
+ */
+int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+module_param(fsl_fm_rx_extra_headroom, int, 0);
+MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
+
+/* Max frame size, across all interfaces.
+ * Configurable from bootargs, to avoid allocating oversized (socket)
+ * buffers when not using jumbo frames.
+ * Must be large enough to accommodate the network MTU, but small enough
+ * to avoid wasting skb memory.
+ *
+ * Could be overridden once, at boot-time, via the
+ * fm_set_max_frm() callback.
+ */
+#ifndef __rtems__
+int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+#else /* __rtems__ */
+int fsl_fm_max_frm = FSL_FM_MAX_POSSIBLE_FRAME_SIZE;
+#endif /* __rtems__ */
+module_param(fsl_fm_max_frm, int, 0);
+MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
+
+u16 fman_get_max_frm(void)
+{
+ static bool fm_check_mfl;
+
+ if (!fm_check_mfl) {
+ if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE ||
+ fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) {
+ pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
+ fsl_fm_max_frm,
+ FSL_FM_MIN_POSSIBLE_FRAME_SIZE,
+ FSL_FM_MAX_POSSIBLE_FRAME_SIZE,
+ FSL_FM_MAX_FRAME_SIZE);
+ fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+ }
+ fm_check_mfl = true;
+ }
+
+ return fsl_fm_max_frm;
+}
+EXPORT_SYMBOL(fman_get_max_frm);
+
+int fman_get_rx_extra_headroom(void)
+{
+ static bool fm_check_rx_extra_headroom;
+
+ if (!fm_check_rx_extra_headroom) {
+ if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX ||
+ fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) {
+ pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
+ fsl_fm_rx_extra_headroom,
+ FSL_FM_RX_EXTRA_HEADROOM_MIN,
+ FSL_FM_RX_EXTRA_HEADROOM_MAX,
+ FSL_FM_RX_EXTRA_HEADROOM);
+ fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+ }
+
+ fsl_fm_rx_extra_headroom = true;
+ fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
+ }
+
+ return fsl_fm_rx_extra_headroom;
+}
+EXPORT_SYMBOL(fman_get_rx_extra_headroom);
+
+struct fman *fman_bind(struct device *fm_dev)
+{
+ return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
+}
+
+void fman_unbind(struct fman *fman)
+{
+ put_device(fman->dev);
+}
+
+struct device *fman_get_device(struct fman *fman)
+{
+ return fman->dev;
+}
+
+static irqreturn_t fman_irq(int irq, void *fman)
+{
+ fman_event_isr(fman);
+
+ return IRQ_HANDLED;
+}
+
+#ifndef __rtems__
+static const struct of_device_id fman_muram_match[] = {
+ {
+ .compatible = "fsl,fman-muram"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, fman_muram_match);
+#endif /* __rtems__ */
+
+static struct fman *read_dts_node(struct platform_device *of_dev)
+{
+ struct fman *fman;
+#ifndef __rtems__
+ struct device_node *fm_node, *muram_node;
+ struct resource *res;
+#else /* __rtems__ */
+ const char *fdt = bsp_fdt_get();
+ struct device_node *fm_node;
+#endif /* __rtems__ */
+ const u32 *u32_prop;
+ int lenp, err, irq;
+#ifndef __rtems__
+ struct clk *clk;
+ u32 clk_rate;
+#endif /* __rtems__ */
+ phys_addr_t phys_base_addr;
+#ifndef __rtems__
+ resource_size_t mem_size;
+#endif /* __rtems__ */
+
+ fman = kzalloc(sizeof(*fman), GFP_KERNEL);
+ if (!fman)
+ return NULL;
+
+ fm_node = of_node_get(of_dev->dev.of_node);
+
+ u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp);
+ if (!u32_prop) {
+ pr_err("of_get_property(%s, cell-index) failed\n",
+ fm_node->full_name);
+ goto fman_node_put;
+ }
+ if (WARN_ON(lenp != sizeof(u32)))
+ goto fman_node_put;
+
+ fman->dts_params.id = (u8)*u32_prop;
+
+#ifndef __rtems__
+ /* Get the FM interrupt */
+ res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ pr_err("Can't get FMan IRQ resource\n");
+ goto fman_node_put;
+ }
+ irq = res->start;
+
+ /* Get the FM error interrupt */
+ res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
+ if (!res) {
+ pr_err("Can't get FMan Error IRQ resource\n");
+ goto fman_node_put;
+ }
+ fman->dts_params.err_irq = res->start;
+
+ /* Get the FM address */
+ res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_err("Can't get FMan memory resouce\n");
+ goto fman_node_put;
+ }
+
+ phys_base_addr = res->start;
+ mem_size = res->end + 1 - res->start;
+#else /* __rtems__ */
+ irq = of_irq_to_resource(fm_node, 0, NULL);
+ fman->dts_params.err_irq = of_irq_to_resource(fm_node, 1, NULL);
+ phys_base_addr = of_dev->dev.base;
+ fman->dts_params.base_addr = (void *)(uintptr_t)phys_base_addr;
+#endif /* __rtems__ */
+
+#ifndef __rtems__
+ clk = of_clk_get_by_name(fm_node, NULL);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get FM%d clock structure\n",
+ fman->dts_params.id);
+ goto fman_node_put;
+ }
+
+ clk_rate = clk_get_rate(clk);
+ if (!clk_rate) {
+ pr_err("Failed to determine FM%d clock rate\n",
+ fman->dts_params.id);
+ goto fman_node_put;
+ }
+ /* Rounding to MHz */
+ fman->dts_params.clk_freq = (u16)((clk_rate + 500000) / 1000000);
+#else /* __rtems__ */
+ /* FIXME */
+ fman->dts_params.clk_freq = 733;
+#endif /* __rtems__ */
+
+ u32_prop = (const u32 *)of_get_property(fm_node,
+ "fsl,qman-channel-range",
+ &lenp);
+ if (!u32_prop) {
+ pr_err("of_get_property(%s, fsl,qman-channel-range) failed\n",
+ fm_node->full_name);
+ goto fman_node_put;
+ }
+ if (WARN_ON(lenp != sizeof(u32) * 2))
+ goto fman_node_put;
+ fman->dts_params.qman_channel_base = u32_prop[0];
+ fman->dts_params.num_of_qman_channels = u32_prop[1];
+
+ /* Get the MURAM base address and size */
+#ifndef __rtems__
+ /* FIXME */
+ muram_node = of_find_matching_node(fm_node, fman_muram_match);
+ if (!muram_node) {
+ pr_err("could not find MURAM node\n");
+ goto fman_node_put;
+ }
+
+ err = of_address_to_resource(muram_node, 0, res);
+ if (err) {
+ of_node_put(muram_node);
+ pr_err("of_address_to_resource() = %d\n", err);
+ goto fman_node_put;
+ }
+
+ fman->dts_params.muram_phy_base_addr = res->start;
+ fman->dts_params.muram_size = res->end + 1 - res->start;
+#else /* __rtems__ */
+ {
+ int node = fdt_node_offset_by_compatible(fdt,
+ fm_node->offset, "fsl,fman-muram");
+ struct device_node muram_node = {
+ .offset = node
+ };
+ struct resource res;
+
+ err = of_address_to_resource(&muram_node, 0, &res);
+ if (err != 0) {
+ pr_err("could not find MURAM node\n");
+ goto fman_node_put;
+ }
+ fman->dts_params.muram_phy_base_addr = phys_base_addr +
+ res.start;
+ fman->dts_params.muram_size = res.end - res.start;
+ }
+#endif /* __rtems__ */
+ {
+ /* In B4 rev 2.0 (and above) the MURAM size is 512KB.
+ * Check the SVR and update MURAM size if required.
+ */
+ u32 svr;
+
+ svr = mfspr(SPRN_SVR);
+
+ if ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_MAJ(svr) >= 2))
+ fman->dts_params.muram_size = 0x80000;
+ }
+
+#ifndef __rtems__
+ of_node_put(muram_node);
+#endif /* __rtems__ */
+ of_node_put(fm_node);
+
+ err = devm_request_irq(&of_dev->dev, irq, fman_irq,
+ IRQF_NO_SUSPEND, "fman", fman);
+ if (err < 0) {
+ pr_err("Error: allocating irq %d (error = %d)\n", irq, err);
+ goto fman_free;
+ }
+
+#ifndef __rtems__
+ fman->dts_params.res =
+ devm_request_mem_region(&of_dev->dev, phys_base_addr,
+ mem_size, "fman");
+ if (!fman->dts_params.res) {
+ pr_err("request_mem_region() failed\n");
+ goto fman_free;
+ }
+
+ fman->dts_params.base_addr =
+ devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
+ if (fman->dts_params.base_addr == 0) {
+ pr_err("devm_ioremap() failed\n");
+ goto fman_free;
+ }
+#endif /* __rtems__ */
+
+ return fman;
+
+fman_node_put:
+ of_node_put(fm_node);
+fman_free:
+ kfree(fman);
+ return NULL;
+}
+
+static irqreturn_t fman_err_irq(int irq, void *fman)
+{
+ if (fman_error_isr(fman) == 0)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+static int fman_probe(struct platform_device *of_dev)
+{
+ struct fman *fman;
+ struct device *dev;
+ int err;
+
+ dev = &of_dev->dev;
+
+ fman = read_dts_node(of_dev);
+ if (!fman)
+ return -EIO;
+
+ if (fman->dts_params.err_irq != 0) {
+ err = devm_request_irq(dev, fman->dts_params.err_irq,
+ fman_err_irq,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ "fman-err", fman);
+ if (err < 0) {
+ pr_err("Error: allocating irq %d (error = %d)\n",
+ fman->dts_params.err_irq, err);
+ return -EINVAL;
+ }
+ }
+
+ err = fman_config(fman);
+ if (err) {
+ pr_err("FMan config failed\n");
+ return -EINVAL;
+ }
+
+ if (fman_init(fman) != 0) {
+ pr_err("FMan init failed\n");
+ return -EINVAL;
+ }
+
+ if (fman->dts_params.err_irq == 0) {
+ fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false);
+ fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false);
+ fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false);
+ fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false);
+ fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false);
+ fman_set_exception(fman,
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false);
+ fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false);
+ fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC,
+ false);
+ fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false);
+ fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false);
+ }
+
+ dev_set_drvdata(dev, fman);
+
+ fman->dev = dev;
+
+ pr_debug("FM%d probed\n", fman->dts_params.id);
+
+ return 0;
+}
+
+#ifndef __rtems__
+static const struct of_device_id fman_match[] = {
+ {
+ .compatible = "fsl,fman"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, fm_match);
+
+static struct platform_driver fman_driver = {
+ .driver = {
+ .name = "fsl-fman",
+ .of_match_table = fman_match,
+ },
+ .probe = fman_probe,
+};
+
+builtin_platform_driver(fman_driver);
+#else /* __rtems__ */
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+
+void
+fman_reset(struct fman *fman)
+{
+
+ /*
+ * Ignore errata A007273, since we do not disable the Ethernet MAC
+ * clocks.
+ */
+
+ out_be32(&fman->fpm_regs->fm_rstc, FPM_RSTC_FM_RESET);
+ /* Memory barrier */
+ mb();
+ usleep_range(100, 300);
+
+ if (!!(ioread32be(&fman->qmi_regs->fmqm_gs) &
+ QMI_GS_HALT_NOT_BUSY)) {
+ usleep_range(100, 300);
+ }
+}
+
+struct fman_softc {
+ struct platform_device of_dev;
+ struct device_node dn;
+};
+
+static int
+fman_dev_probe_fdt(struct fman_softc *sc, int unit)
+{
+ const char *fdt = bsp_fdt_get();
+ const char *name = "fsl,fman";
+ int node = 0;
+
+ while (1) {
+ node = fdt_node_offset_by_compatible(fdt, node, name);
+ if (node >= 0) {
+ int len;
+ const fdt32_t *p = fdt_getprop(fdt, node, "cell-index", &len);
+
+ if (p != NULL && len == sizeof(*p)) {
+ if (fdt32_to_cpu(*p) == (uint32_t)unit) {
+ sc->dn.offset = node;
+ sc->dn.full_name = name;
+ sc->of_dev.dev.of_node = &sc->dn;
+ sc->of_dev.dev.base = (uintptr_t)&qoriq.fman[unit];
+ return (BUS_PROBE_DEFAULT);
+ }
+ } else {
+ return (ENXIO);
+ }
+ } else {
+ return (ENXIO);
+ }
+ }
+}
+
+static int
+fman_dev_probe(device_t dev)
+{
+ struct fman_softc *sc = device_get_softc(dev);
+
+ device_set_desc(dev, "FMan");
+
+ return (fman_dev_probe_fdt(sc, device_get_unit(dev)));
+}
+
+static int
+fman_dev_attach(device_t dev)
+{
+ const char *fdt = bsp_fdt_get();
+ struct fman_softc *sc = device_get_softc(dev);
+ int node;
+ int err;
+
+ err = fman_probe(&sc->of_dev);
+ if (err != 0) {
+ return (ENXIO);
+ }
+
+ node = fdt_first_subnode(fdt, sc->dn.offset);
+ while (node >= 0) {
+ struct fman_ivars *ivars =
+ kzalloc(sizeof(*ivars), GFP_KERNEL);
+ device_t child;
+
+ if (ivars == NULL) {
+ return (ENOMEM);
+ }
+
+ ivars->dn.offset = node;
+ ivars->of_dev.dev.of_node = &ivars->dn;
+ ivars->of_dev.dev.base = sc->of_dev.dev.base;
+ ivars->fman = dev_get_drvdata(&sc->of_dev.dev);
+
+ child = device_add_child(dev, NULL, -1);
+ if (child == NULL) {
+ kfree(ivars);
+ return (ENOMEM);
+ }
+
+ device_set_ivars(child, ivars);
+
+ err = device_probe_and_attach(child);
+ if (err != 0) {
+ kfree(ivars);
+ }
+
+ node = fdt_next_subnode(fdt, node);
+ }
+
+ return (0);
+}
+
+static int
+fman_dev_detach(device_t dev)
+{
+ struct fman_softc *sc = device_get_softc(dev);
+ int err;
+
+ err = bus_generic_detach(dev);
+ if (err == 0) {
+ fman_reset(dev_get_drvdata(&sc->of_dev.dev));
+ }
+
+ return (err);
+}
+
+static device_method_t fman_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, fman_dev_probe),
+ DEVMETHOD(device_attach, fman_dev_attach),
+ DEVMETHOD(device_detach, fman_dev_detach),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
+ DEVMETHOD_END
+};
+
+driver_t fman_driver = {
+ .name = "fman",
+ .methods = fman_methods,
+ .size = sizeof(struct fman_softc),
+};
+
+static devclass_t fman_devclass;
+
+DRIVER_MODULE(fman, nexus, fman_driver, fman_devclass, 0, 0);
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman.h b/linux/drivers/net/ethernet/freescale/fman/fman.h
new file mode 100644
index 00000000..291d263a
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman.h
@@ -0,0 +1,500 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FM_H
+#define __FM_H
+
+#include <linux/io.h>
+
+/* FM Frame descriptor macros */
+/* Frame queue Context Override */
+#define FM_FD_CMD_FCO 0x80000000
+#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
+#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
+
+/* TX-Port: Unsupported Format */
+#define FM_FD_ERR_UNSUPPORTED_FORMAT 0x04000000
+/* TX Port: Length Error */
+#define FM_FD_ERR_LENGTH 0x02000000
+#define FM_FD_ERR_DMA 0x01000000 /* DMA Data error */
+
+/* IPR frame (not error) */
+#define FM_FD_IPR 0x00000001
+/* IPR non-consistent-sp */
+#define FM_FD_ERR_IPR_NCSP (0x00100000 | FM_FD_IPR)
+/* IPR error */
+#define FM_FD_ERR_IPR (0x00200000 | FM_FD_IPR)
+/* IPR timeout */
+#define FM_FD_ERR_IPR_TO (0x00300000 | FM_FD_IPR)
+/* TX Port: Length Error */
+#define FM_FD_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
+
+
+/* Rx FIFO overflow, FCS error, code error, running disparity error
+ * (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
+ * PHY error control character detected.
+ */
+#define FM_FD_ERR_PHYSICAL 0x00080000
+/* Frame too long OR Frame size exceeds max_length_frame */
+#define FM_FD_ERR_SIZE 0x00040000
+/* classification discard */
+#define FM_FD_ERR_CLS_DISCARD 0x00020000
+/* Extract Out of Frame */
+#define FM_FD_ERR_EXTRACTION 0x00008000
+/* No Scheme Selected */
+#define FM_FD_ERR_NO_SCHEME 0x00004000
+/* Keysize Overflow */
+#define FM_FD_ERR_KEYSIZE_OVERFLOW 0x00002000
+/* Frame color is red */
+#define FM_FD_ERR_COLOR_RED 0x00000800
+/* Frame color is yellow */
+#define FM_FD_ERR_COLOR_YELLOW 0x00000400
+/* Parser Time out Exceed */
+#define FM_FD_ERR_PRS_TIMEOUT 0x00000080
+/* Invalid Soft Parser instruction */
+#define FM_FD_ERR_PRS_ILL_INSTRUCT 0x00000040
+/* Header error was identified during parsing */
+#define FM_FD_ERR_PRS_HDR_ERR 0x00000020
+/* Frame parsed beyind 256 first bytes */
+#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008
+
+/* non Frame-Manager error */
+#define FM_FD_RX_STATUS_ERR_NON_FM 0x00400000
+
+/* FMan driver defines */
+#define FMAN_BMI_FIFO_UNITS 0x100
+#define OFFSET_UNITS 16
+
+/* BMan defines */
+#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */
+#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */
+
+struct fman; /* FMan data */
+#ifdef __rtems__
+#include <linux/platform_device.h>
+
+struct fman_ivars {
+ struct platform_device of_dev;
+ struct device_node dn;
+ struct fman *fman;
+};
+#endif /* __rtems__ */
+
+/* Enum for defining port types */
+enum fman_port_type {
+ FMAN_PORT_TYPE_TX = 0, /* TX Port */
+ FMAN_PORT_TYPE_RX, /* RX Port */
+};
+
+struct fman_rev_info {
+ u8 major; /* Major revision */
+ u8 minor; /* Minor revision */
+};
+
+enum fman_exceptions {
+ FMAN_EX_DMA_BUS_ERROR = 0, /* DMA bus error. */
+ FMAN_EX_DMA_READ_ECC, /* Read Buffer ECC error */
+ FMAN_EX_DMA_SYSTEM_WRITE_ECC, /* Write Buffer ECC err on sys side */
+ FMAN_EX_DMA_FM_WRITE_ECC, /* Write Buffer ECC error on FM side */
+ FMAN_EX_DMA_SINGLE_PORT_ECC, /* Single Port ECC error on FM side */
+ FMAN_EX_FPM_STALL_ON_TASKS, /* Stall of tasks on FPM */
+ FMAN_EX_FPM_SINGLE_ECC, /* Single ECC on FPM. */
+ FMAN_EX_FPM_DOUBLE_ECC, /* Double ECC error on FPM ram access */
+ FMAN_EX_QMI_SINGLE_ECC, /* Single ECC on QMI. */
+ FMAN_EX_QMI_DOUBLE_ECC, /* Double bit ECC occurred on QMI */
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/* DeQ from unknown port id */
+ FMAN_EX_BMI_LIST_RAM_ECC, /* Linked List RAM ECC error */
+ FMAN_EX_BMI_STORAGE_PROFILE_ECC,/* storage profile */
+ FMAN_EX_BMI_STATISTICS_RAM_ECC,/* Statistics RAM ECC Err Enable */
+ FMAN_EX_BMI_DISPATCH_RAM_ECC, /* Dispatch RAM ECC Error Enable */
+ FMAN_EX_IRAM_ECC, /* Double bit ECC occurred on IRAM */
+ FMAN_EX_MURAM_ECC /* Double bit ECC occurred on MURAM */
+};
+
+/* Parse results memory layout */
+struct fman_prs_result {
+ u8 lpid; /* Logical port id */
+ u8 shimr; /* Shim header result */
+ u16 l2r; /* Layer 2 result */
+ u16 l3r; /* Layer 3 result */
+ u8 l4r; /* Layer 4 result */
+ u8 cplan; /* Classification plan id */
+ u16 nxthdr; /* Next Header */
+ u16 cksum; /* Running-sum */
+ /* Flags&fragment-offset field of the last IP-header */
+ u16 flags_frag_off;
+ /* Routing type field of a IPV6 routing extension header */
+ u8 route_type;
+ /* Routing Extension Header Present; last bit is IP valid */
+ u8 rhp_ip_valid;
+ u8 shim_off[2]; /* Shim offset */
+ u8 ip_pid_off; /* IP PID (last IP-proto) offset */
+ u8 eth_off; /* ETH offset */
+ u8 llc_snap_off; /* LLC_SNAP offset */
+ u8 vlan_off[2]; /* VLAN offset */
+ u8 etype_off; /* ETYPE offset */
+ u8 pppoe_off; /* PPP offset */
+ u8 mpls_off[2]; /* MPLS offset */
+ u8 ip_off[2]; /* IP offset */
+ u8 gre_off; /* GRE offset */
+ u8 l4_off; /* Layer 4 offset */
+ u8 nxthdr_off; /** Parser end point */
+} __attribute__((__packed__));
+
+/* A structure for defining buffer prefix area content. */
+struct fman_buffer_prefix_content {
+ /* Number of bytes to be left at the beginning of the external
+ * buffer; Note that the private-area will start from the base
+ * of the buffer address.
+ */
+ u16 priv_data_size;
+ /* true to pass the parse result to/from the FM;
+ * User may use FM_PORT_GetBufferPrsResult() in
+ * order to get the parser-result from a buffer.
+ */
+ bool pass_prs_result;
+ /* true to pass the timeStamp to/from the FM User */
+ bool pass_time_stamp;
+ /* true to pass the KG hash result to/from the FM User may
+ * use FM_PORT_GetBufferHashResult() in order to get the
+ * parser-result from a buffer.
+ */
+ bool pass_hash_result;
+ /* Add all other Internal-Context information: AD,
+ * hash-result, key, etc.
+ */
+ u16 data_align;
+};
+
+/* A structure of information about each of the external
+ * buffer pools used by a port or storage-profile.
+ */
+struct fman_ext_pool_params {
+ u8 id; /* External buffer pool id */
+ u16 size; /* External buffer pool buffer size */
+};
+
+/* A structure for informing the driver about the external
+ * buffer pools allocated in the BM and used by a port or a
+ * storage-profile.
+ */
+struct fman_ext_pools {
+ u8 num_of_pools_used; /* Number of pools use by this port */
+ struct fman_ext_pool_params ext_buf_pool[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /* Parameters for each port */
+};
+
+/* A structure for defining BM pool depletion criteria */
+struct fman_buf_pool_depletion {
+ /* select mode in which pause frames will be sent after a
+ * number of pools (all together!) are depleted
+ */
+ bool pools_grp_mode_enable;
+ /* the number of depleted pools that will invoke pause
+ * frames transmission.
+ */
+ u8 num_of_pools;
+ /* For each pool, true if it should be considered for
+ * depletion (Note - this pool must be used by this port!).
+ */
+ bool pools_to_consider[BM_MAX_NUM_OF_POOLS];
+ /* select mode in which pause frames will be sent
+ * after a single-pool is depleted;
+ */
+ bool single_pool_mode_enable;
+ /* For each pool, true if it should be considered
+ * for depletion (Note - this pool must be used by this port!)
+ */
+ bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
+};
+
+/** fman_exceptions_cb
+ * fman - Pointer to FMan
+ * exception - The exception.
+ *
+ * Exceptions user callback routine, will be called upon an exception
+ * passing the exception identification.
+ */
+typedef void (fman_exceptions_cb)(struct fman *fman,
+ enum fman_exceptions exception);
+
+/** fman_bus_error_cb
+ * fman - Pointer to FMan
+ * port_id - Port id
+ * addr - Address that caused the error
+ * tnum - Owner of error
+ * liodn - Logical IO device number
+ *
+ * Bus error user callback routine, will be called upon bus error,
+ * passing parameters describing the errors and the owner.
+ */
+typedef void (fman_bus_error_cb)(struct fman *fman, u8 port_id, u64 addr,
+ u8 tnum, u16 liodn);
+
+/* Enum for inter-module interrupts registration */
+enum fman_event_modules {
+ FMAN_MOD_MAC = 0, /* MAC event */
+ FMAN_MOD_FMAN_CTRL, /* FMAN Controller */
+ FMAN_MOD_DUMMY_LAST
+};
+
+/* Enum for interrupts types */
+enum fman_intr_type {
+ FMAN_INTR_TYPE_ERR,
+ FMAN_INTR_TYPE_NORMAL
+};
+
+/* Enum for inter-module interrupts registration */
+enum fman_inter_module_event {
+ FMAN_EV_ERR_MAC0 = 0, /* MAC 0 error event */
+ FMAN_EV_ERR_MAC1, /* MAC 1 error event */
+ FMAN_EV_ERR_MAC2, /* MAC 2 error event */
+ FMAN_EV_ERR_MAC3, /* MAC 3 error event */
+ FMAN_EV_ERR_MAC4, /* MAC 4 error event */
+ FMAN_EV_ERR_MAC5, /* MAC 5 error event */
+ FMAN_EV_ERR_MAC6, /* MAC 6 error event */
+ FMAN_EV_ERR_MAC7, /* MAC 7 error event */
+ FMAN_EV_ERR_MAC8, /* MAC 8 error event */
+ FMAN_EV_ERR_MAC9, /* MAC 9 error event */
+ FMAN_EV_MAC0, /* MAC 0 event (Magic packet detection) */
+ FMAN_EV_MAC1, /* MAC 1 event (Magic packet detection) */
+ FMAN_EV_MAC2, /* MAC 2 (Magic packet detection) */
+ FMAN_EV_MAC3, /* MAC 3 (Magic packet detection) */
+ FMAN_EV_MAC4, /* MAC 4 (Magic packet detection) */
+ FMAN_EV_MAC5, /* MAC 5 (Magic packet detection) */
+ FMAN_EV_MAC6, /* MAC 6 (Magic packet detection) */
+ FMAN_EV_MAC7, /* MAC 7 (Magic packet detection) */
+ FMAN_EV_MAC8, /* MAC 8 event (Magic packet detection) */
+ FMAN_EV_MAC9, /* MAC 9 event (Magic packet detection) */
+ FMAN_EV_FMAN_CTRL_0, /* Fman controller event 0 */
+ FMAN_EV_FMAN_CTRL_1, /* Fman controller event 1 */
+ FMAN_EV_FMAN_CTRL_2, /* Fman controller event 2 */
+ FMAN_EV_FMAN_CTRL_3, /* Fman controller event 3 */
+ FMAN_EV_CNT
+};
+
+struct fman_intr_src {
+ void (*isr_cb)(void *src_arg);
+ void *src_handle;
+};
+
+/* Structure for port-FM communication during fman_port_init. */
+struct fman_port_init_params {
+ u8 port_id; /* port Id */
+ enum fman_port_type port_type; /* Port type */
+ u16 port_speed; /* Port speed */
+ u16 liodn_offset; /* Port's requested resource */
+ u8 num_of_tasks; /* Port's requested resource */
+ u8 num_of_extra_tasks; /* Port's requested resource */
+ u8 num_of_open_dmas; /* Port's requested resource */
+ u8 num_of_extra_open_dmas; /* Port's requested resource */
+ u32 size_of_fifo; /* Port's requested resource */
+ u32 extra_size_of_fifo; /* Port's requested resource */
+ u8 deq_pipeline_depth; /* Port's requested resource */
+ u16 max_frame_length; /* Port's max frame length. */
+ u16 liodn_base;
+ /* LIODN base for this port, to be used together with LIODN offset. */
+};
+
+struct fman;
+
+/**
+ * fman_get_revision
+ * @fman - Pointer to the FMan module
+ * @rev_info - A structure of revision information parameters.
+ *
+ * Returns the FM revision
+ *
+ * Allowed only following fman_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
+
+/**
+ * fman_register_intr
+ * @fman: A Pointer to FMan device
+ * @mod: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ * @f_isr: The interrupt service routine.
+ * @h_src_arg: Argument to be passed to f_isr.
+ *
+ * Used to register an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
+ u8 mod_id, enum fman_intr_type intr_type,
+ void (*f_isr)(void *h_src_arg), void *h_src_arg);
+
+/**
+ * fman_unregister_intr
+ * @fman: A Pointer to FMan device
+ * @mod: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ *
+ * Used to unregister an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
+ u8 mod_id, enum fman_intr_type intr_type);
+
+/**
+ * fman_set_port_params
+ * @fman: A Pointer to FMan device
+ * @port_params: Port parameters
+ *
+ * Used by FMan Port to pass parameters to the FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_port_params(struct fman *fman,
+ struct fman_port_init_params *port_params);
+
+/**
+ * fman_reset_mac
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id to be reset
+ *
+ * Reset a specific MAC
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_reset_mac(struct fman *fman, u8 mac_id);
+
+/**
+ * fman_get_clock_freq
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan clock frequency
+ *
+ * Return: FMan clock frequency
+ */
+
+u16 fman_get_clock_freq(struct fman *fman);
+
+/**
+ * fman_get_bmi_max_fifo_size
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan maximum FIFO size
+ *
+ * Return: FMan Maximum FIFO size
+ */
+u32 fman_get_bmi_max_fifo_size(struct fman *fman);
+
+/**
+ * fman_set_mac_max_frame
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id
+ * @mfl: Maximum frame length
+ *
+ * Set maximum frame length of specific MAC in FMan driver
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
+
+/**
+ * fman_get_qman_channel_id
+ * @fman: A Pointer to FMan device
+ * @port_id: Port id
+ *
+ * Get QMan channel ID associated to the Port id
+ *
+ * Return: QMan channel ID
+ */
+u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
+
+/**
+ * fman_get_mem_region
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan memory region
+ *
+ * Return: A structure with FMan memory region information
+ */
+struct resource *fman_get_mem_region(struct fman *fman);
+
+/**
+ * fman_get_max_frm
+ *
+ * Return: Max frame length configured in the FM driver
+ */
+u16 fman_get_max_frm(void);
+
+/**
+ * fman_get_rx_extra_headroom
+ *
+ * Return: Extra headroom size configured in the FM driver
+ */
+int fman_get_rx_extra_headroom(void);
+
+/**
+ * fman_bind
+ * @dev: FMan OF device pointer
+ *
+ * Bind to a specific FMan device.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan device
+ */
+struct fman *fman_bind(struct device *dev);
+
+/**
+ * fman_unbind
+ * @fman: Pointer to the FMan device
+ *
+ * Un-bind from a specific FMan device.
+ *
+ * Allowed only after the port was created.
+ */
+void fman_unbind(struct fman *fman);
+
+/**
+ * fman_get_device
+ * @fman: A pointer to the FMan device.
+ *
+ * Get the FMan device pointer
+ *
+ * Return: Pointer to FMan device.
+ */
+struct device *fman_get_device(struct fman *fman);
+#ifdef __rtems__
+void fman_reset(struct fman *fman);
+#endif /* __rtems__ */
+
+#endif /* __FM_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c
new file mode 100644
index 00000000..5be951b8
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -0,0 +1,1786 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "crc_mac_addr_ext.h"
+
+#include "fman_dtsec.h"
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/phy.h>
+
+/* MII Management Command Register */
+#define MIIMCOM_READ_CYCLE 0x00000001
+
+/* MII Management Address Register */
+#define MIIMADD_PHY_ADDR_SHIFT 8
+
+/* MII Management Indicator Register */
+#define MIIMIND_BUSY 0x00000001
+
+/* PHY Control Register */
+#define PHY_CR_PHY_RESET 0x8000
+#define PHY_CR_SPEED0 0x2000
+#define PHY_CR_ANE 0x1000
+#define PHY_CR_RESET_AN 0x0200
+#define PHY_CR_FULLDUPLEX 0x0100
+#define PHY_CR_SPEED1 0x0040
+
+#define PHY_TBICON_SRESET 0x8000
+#define PHY_TBICON_CLK_SEL 0x0020
+#define PHY_TBIANA_SGMII 0x4001
+#define PHY_TBIANA_1000X 0x01a0
+
+#define DTSEC_TO_MII_OFFSET 0x1000
+
+/* Interrupt Mask Register (IMASK) */
+#define DTSEC_IMASK_BREN 0x80000000
+#define DTSEC_IMASK_RXCEN 0x40000000
+#define DTSEC_IMASK_MSROEN 0x04000000
+#define DTSEC_IMASK_GTSCEN 0x02000000
+#define DTSEC_IMASK_BTEN 0x01000000
+#define DTSEC_IMASK_TXCEN 0x00800000
+#define DTSEC_IMASK_TXEEN 0x00400000
+#define DTSEC_IMASK_LCEN 0x00040000
+#define DTSEC_IMASK_CRLEN 0x00020000
+#define DTSEC_IMASK_XFUNEN 0x00010000
+#define DTSEC_IMASK_ABRTEN 0x00008000
+#define DTSEC_IMASK_IFERREN 0x00004000
+#define DTSEC_IMASK_MAGEN 0x00000800
+#define DTSEC_IMASK_MMRDEN 0x00000400
+#define DTSEC_IMASK_MMWREN 0x00000200
+#define DTSEC_IMASK_GRSCEN 0x00000100
+#define DTSEC_IMASK_TDPEEN 0x00000002
+#define DTSEC_IMASK_RDPEEN 0x00000001
+
+#define DTSEC_EVENTS_MASK \
+ ((u32)(DTSEC_IMASK_BREN | \
+ DTSEC_IMASK_RXCEN | \
+ DTSEC_IMASK_BTEN | \
+ DTSEC_IMASK_TXCEN | \
+ DTSEC_IMASK_TXEEN | \
+ DTSEC_IMASK_ABRTEN | \
+ DTSEC_IMASK_LCEN | \
+ DTSEC_IMASK_CRLEN | \
+ DTSEC_IMASK_XFUNEN | \
+ DTSEC_IMASK_IFERREN | \
+ DTSEC_IMASK_MAGEN | \
+ DTSEC_IMASK_TDPEEN | \
+ DTSEC_IMASK_RDPEEN))
+
+/* dtsec timestamp event bits */
+#define TMR_PEMASK_TSREEN 0x00010000
+#define TMR_PEVENT_TSRE 0x00010000
+
+/* Group address bit indication */
+#define MAC_GROUP_ADDRESS 0x0000010000000000ULL
+
+/* Defaults */
+#define DEFAULT_HALFDUP_RETRANSMIT 0xf
+#define DEFAULT_HALFDUP_COLL_WINDOW 0x37
+#define DEFAULT_HALFDUP_ALT_BACKOFF_VAL 0x0A
+#define DEFAULT_TX_PAUSE_TIME 0xf000
+#define DEFAULT_TBIPA 5
+#define DEFAULT_RX_PREPEND 0
+#define DEFAULT_PREAMBLE_LEN 7
+#define DEFAULT_TX_PAUSE_TIME_EXTD 0
+#define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
+#define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
+#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
+#define DEFAULT_BACK_TO_BACK_IPG 0x60
+#define DEFAULT_MAXIMUM_FRAME 0x600
+#define DEFAULT_TBI_PHY_ADDR 5
+
+#define DTSEC_DEFAULT_EXCEPTIONS \
+ ((u32)((DTSEC_IMASK_BREN) |\
+ (DTSEC_IMASK_RXCEN) |\
+ (DTSEC_IMASK_BTEN) |\
+ (DTSEC_IMASK_TXCEN) |\
+ (DTSEC_IMASK_TXEEN) |\
+ (DTSEC_IMASK_ABRTEN) |\
+ (DTSEC_IMASK_LCEN) |\
+ (DTSEC_IMASK_CRLEN) |\
+ (DTSEC_IMASK_XFUNEN) |\
+ (DTSEC_IMASK_IFERREN) |\
+ (DTSEC_IMASK_MAGEN) |\
+ (DTSEC_IMASK_TDPEEN) |\
+ (DTSEC_IMASK_RDPEEN)))
+
+/* register related defines (bits, field offsets..) */
+#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
+
+#define DTSEC_ECNTRL_GMIIM 0x00000040
+#define DTSEC_ECNTRL_TBIM 0x00000020
+#define DTSEC_ECNTRL_SGMIIM 0x00000002
+#define DTSEC_ECNTRL_RPM 0x00000010
+#define DTSEC_ECNTRL_R100M 0x00000008
+#define DTSEC_ECNTRL_QSGMIIM 0x00000001
+
+#define DTSEC_TCTRL_THDF 0x00000800
+#define DTSEC_TCTRL_TTSE 0x00000040
+#define DTSEC_TCTRL_GTS 0x00000020
+
+#define RCTRL_PAL_MASK 0x001f0000
+#define RCTRL_PAL_SHIFT 16
+#define RCTRL_CFA 0x00008000
+#define RCTRL_GHTX 0x00000400
+#define RCTRL_RTSE 0x00000040
+#define RCTRL_GRS 0x00000020
+#define RCTRL_BC_REJ 0x00000010
+#define RCTRL_MPROM 0x00000008
+#define RCTRL_RSF 0x00000004
+#define RCTRL_UPROM 0x00000001
+#define RCTRL_PROM (RCTRL_UPROM | RCTRL_MPROM)
+
+#define MACCFG1_SOFT_RESET 0x80000000
+#define MACCFG1_LOOPBACK 0x00000100
+#define MACCFG1_RX_FLOW 0x00000020
+#define MACCFG1_TX_FLOW 0x00000010
+#define MACCFG1_TX_EN 0x00000001
+#define MACCFG1_RX_EN 0x00000004
+
+#define MACCFG2_NIBBLE_MODE 0x00000100
+#define MACCFG2_BYTE_MODE 0x00000200
+#define MACCFG2_PRE_AM_RX_EN 0x00000080
+#define MACCFG2_PRE_AM_TX_EN 0x00000040
+#define MACCFG2_LENGTH_CHECK 0x00000010
+#define MACCFG2_PAD_CRC_EN 0x00000004
+#define MACCFG2_CRC_EN 0x00000002
+#define MACCFG2_FULL_DUPLEX 0x00000001
+#define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
+#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
+
+#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
+#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
+#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
+
+#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
+#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
+#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
+#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
+
+#define HAFDUP_ALT_BEB 0x00080000
+#define HAFDUP_BP_NO_BACKOFF 0x00040000
+#define HAFDUP_NO_BACKOFF 0x00020000
+#define HAFDUP_EXCESS_DEFER 0x00010000
+#define HAFDUP_COLLISION_WINDOW 0x000003ff
+#define HAFDUP_ALTERNATE_BEB_TRUNCATION_MASK 0x00f00000
+#define HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT 20
+#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
+#define HAFDUP_RETRANSMISSION_MAX 0x0000f000
+
+#define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
+
+#define PTV_PTE_MASK 0xffff0000
+#define PTV_PT_MASK 0x0000ffff
+#define PTV_PTE_SHIFT 16
+
+#define MAX_PACKET_ALIGNMENT 31
+#define MAX_INTER_PACKET_GAP 0x7f
+#define MAX_INTER_PALTERNATE_BEB 0x0f
+#define MAX_RETRANSMISSION 0x0f
+#define MAX_COLLISION_WINDOW 0x03ff
+
+/* Hash table size (32 bits*8 regs) */
+#define DTSEC_HASH_TABLE_SIZE 256
+/* Extended Hash table size (32 bits*16 regs) */
+#define EXTENDED_HASH_TABLE_SIZE 512
+
+/* maximum number of phys */
+#define MAX_PHYS 32
+
+/* MII Configuration Control Memory Map Registers */
+struct dtsec_mii_regs {
+ u32 reserved1[72];
+ u32 miimcfg; /* MII Mgmt:configuration */
+ u32 miimcom; /* MII Mgmt:command */
+ u32 miimadd; /* MII Mgmt:address */
+ u32 miimcon; /* MII Mgmt:control 3 */
+ u32 miimstat; /* MII Mgmt:status */
+ u32 miimind; /* MII Mgmt:indicators */
+};
+
+/* dTSEC Memory Map registers */
+struct dtsec_regs {
+ /* dTSEC General Control and Status Registers */
+ u32 tsec_id; /* 0x000 ETSEC_ID register */
+ u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
+ u32 ievent; /* 0x008 Interrupt event register */
+ u32 imask; /* 0x00C Interrupt mask register */
+ u32 reserved0010[1];
+ u32 ecntrl; /* 0x014 E control register */
+ u32 ptv; /* 0x018 Pause time value register */
+ u32 tbipa; /* 0x01C TBI PHY address register */
+ u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
+ u32 tmr_pevent; /* 0x024 Time-stamp event register */
+ u32 tmr_pemask; /* 0x028 Timer event mask register */
+ u32 reserved002c[5];
+ u32 tctrl; /* 0x040 Transmit control register */
+ u32 reserved0044[3];
+ u32 rctrl; /* 0x050 Receive control register */
+ u32 reserved0054[11];
+ u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
+ u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
+ u32 reserved00c0[16];
+ u32 maccfg1; /* 0x100 MAC configuration #1 */
+ u32 maccfg2; /* 0x104 MAC configuration #2 */
+ u32 ipgifg; /* 0x108 IPG/IFG */
+ u32 hafdup; /* 0x10C Half-duplex */
+ u32 maxfrm; /* 0x110 Maximum frame */
+ u32 reserved0114[10];
+ u32 ifstat; /* 0x13C Interface status */
+ u32 macstnaddr1; /* 0x140 Station Address,part 1 */
+ u32 macstnaddr2; /* 0x144 Station Address,part 2 */
+ struct {
+ u32 exact_match1; /* octets 1-4 */
+ u32 exact_match2; /* octets 5-6 */
+ } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
+ u32 reserved01c0[16];
+ u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
+ u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
+ u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
+ u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
+ u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
+ u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
+ u32 trmgv;
+ /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
+ u32 rbyt; /* 0x21C receive byte counter */
+ u32 rpkt; /* 0x220 receive packet counter */
+ u32 rfcs; /* 0x224 receive FCS error counter */
+ u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
+ u32 rbca; /* 0x22C Rx broadcast packet counter */
+ u32 rxcf; /* 0x230 Rx control frame packet counter */
+ u32 rxpf; /* 0x234 Rx pause frame packet counter */
+ u32 rxuo; /* 0x238 Rx unknown OP code counter */
+ u32 raln; /* 0x23C Rx alignment error counter */
+ u32 rflr; /* 0x240 Rx frame length error counter */
+ u32 rcde; /* 0x244 Rx code error counter */
+ u32 rcse; /* 0x248 Rx carrier sense error counter */
+ u32 rund; /* 0x24C Rx undersize packet counter */
+ u32 rovr; /* 0x250 Rx oversize packet counter */
+ u32 rfrg; /* 0x254 Rx fragments counter */
+ u32 rjbr; /* 0x258 Rx jabber counter */
+ u32 rdrp; /* 0x25C Rx drop */
+ u32 tbyt; /* 0x260 Tx byte counter */
+ u32 tpkt; /* 0x264 Tx packet counter */
+ u32 tmca; /* 0x268 Tx multicast packet counter */
+ u32 tbca; /* 0x26C Tx broadcast packet counter */
+ u32 txpf; /* 0x270 Tx pause control frame counter */
+ u32 tdfr; /* 0x274 Tx deferral packet counter */
+ u32 tedf; /* 0x278 Tx excessive deferral packet counter */
+ u32 tscl; /* 0x27C Tx single collision packet counter */
+ u32 tmcl; /* 0x280 Tx multiple collision packet counter */
+ u32 tlcl; /* 0x284 Tx late collision packet counter */
+ u32 txcl; /* 0x288 Tx excessive collision packet counter */
+ u32 tncl; /* 0x28C Tx total collision counter */
+ u32 reserved0290[1];
+ u32 tdrp; /* 0x294 Tx drop frame counter */
+ u32 tjbr; /* 0x298 Tx jabber frame counter */
+ u32 tfcs; /* 0x29C Tx FCS error counter */
+ u32 txcf; /* 0x2A0 Tx control frame counter */
+ u32 tovr; /* 0x2A4 Tx oversize frame counter */
+ u32 tund; /* 0x2A8 Tx undersize frame counter */
+ u32 tfrg; /* 0x2AC Tx fragments frame counter */
+ u32 car1; /* 0x2B0 carry register one register* */
+ u32 car2; /* 0x2B4 carry register two register* */
+ u32 cam1; /* 0x2B8 carry register one mask register */
+ u32 cam2; /* 0x2BC carry register two mask register */
+ u32 reserved02c0[848];
+};
+
+/* struct dtsec_cfg - dTSEC configuration
+ * Transmit half-duplex flow control, under software control for 10/100-Mbps
+ * half-duplex media. If set, back pressure is applied to media by raising
+ * carrier.
+ * halfdup_retransmit:
+ * Number of retransmission attempts following a collision.
+ * If this is exceeded dTSEC aborts transmission due to excessive collisions.
+ * The standard specifies the attempt limit to be 15.
+ * halfdup_coll_window:
+ * The number of bytes of the frame during which collisions may occur.
+ * The default value of 55 corresponds to the frame byte at the end of the
+ * standard 512-bit slot time window. If collisions are detected after this
+ * byte, the late collision event is asserted and transmission of current
+ * frame is aborted.
+ * rx_drop_bcast:
+ * Discard broadcast frames. If set, all broadcast frames will be discarded
+ * by dTSEC.
+ * rx_short_frm:
+ * Accept short frames. If set, dTSEC will accept frames of length 14-63 bytes.
+ * rx_len_check:
+ * Length check for received frames. If set, the MAC checks the frame's length
+ * field on receive to ensure it matches the actual data field length.
+ * This only works for received frames with length field less than 1500.
+ * No check is performed for larger frames.
+ * tx_pad_crc:
+ * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
+ * appends a CRC to every frame regardless of padding requirement.
+ * tx_crc:
+ * Transmission CRC enable. If set, the MAC appends a CRC to all frames.
+ * If frames presented to the MAC have a valid length and contain a valid CRC,
+ * tx_crc should be reset. This field is ignored if tx_pad_crc is set.
+ * rx_ctrl_acc:
+ * Control frame accept. If set, this overrides 802.3 standard control frame
+ * behavior, and all Ethernet frames that have an ethertype of 0x8808 are
+ * treated as normal Ethernet frames and passed up to the packet interface on
+ * a DA match. Received pause control frames are passed to the packet
+ * interface only if Rx flow control is also disabled.
+ * See dtsec_accept_rx_pause_frames() function.
+ * tx_pause_time:
+ * Transmit pause time value. This pause value is used as part of the pause
+ * frame to be sent when a transmit pause frame is initiated.
+ * If set to 0 this disables transmission of pause frames.
+ * rx_preamble:
+ * Receive preamble enable. If set, the MAC recovers the received Ethernet
+ * 7-byte preamble and passes it to the packet interface at the start of each
+ * received frame.
+ * This field should be reset for internal MAC loop-back mode.
+ * tx_preamble:
+ * User defined preamble enable for transmitted frames.
+ * If set, a user-defined preamble must passed to the MAC and it is
+ * transmitted instead of the standard preamble.
+ * preamble_len:
+ * Length, in bytes, of the preamble field preceding each Ethernet
+ * start-of-frame delimiter byte. The default value of 0x7 should be used in
+ * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
+ * rx_prepend:
+ * Packet alignment padding length. The specified number of bytes (1-31)
+ * of zero padding are inserted before the start of each received frame.
+ * For Ethernet, where optional preamble extraction is enabled, the padding
+ * appears before the preamble, otherwise the padding precedes the
+ * layer 2 header.
+ *
+ * This structure contains basic dTSEC configuration and must be passed to
+ * init() function. A default set of configuration values can be
+ * obtained by calling set_dflts().
+ */
+struct dtsec_cfg {
+ bool halfdup_on;
+ bool halfdup_alt_backoff_en;
+ bool halfdup_excess_defer;
+ bool halfdup_no_backoff;
+ bool halfdup_bp_no_backoff;
+ u32 halfdup_alt_backoff_val;
+ u16 halfdup_retransmit;
+ u16 halfdup_coll_window;
+ bool rx_drop_bcast;
+ bool rx_short_frm;
+ bool rx_len_check;
+ bool tx_pad_crc;
+ bool tx_crc;
+ bool rx_ctrl_acc;
+ u16 tx_pause_time;
+ u16 tbipa;
+ bool ptp_tsu_en;
+ bool ptp_exception_en;
+ bool rx_preamble;
+ bool tx_preamble;
+ u32 preamble_len;
+ u32 rx_prepend;
+ bool loopback;
+ bool rx_time_stamp_en;
+ bool tx_time_stamp_en;
+ bool rx_flow;
+ bool tx_flow;
+ bool rx_group_hash_exd;
+ bool rx_promisc;
+ u8 tbi_phy_addr;
+ u16 tx_pause_time_extd;
+ u16 maximum_frame;
+ u32 non_back_to_back_ipg1;
+ u32 non_back_to_back_ipg2;
+ u32 min_ifg_enforcement;
+ u32 back_to_back_ipg;
+};
+
+struct fman_mac {
+ /* pointer to dTSEC memory mapped registers */
+ struct dtsec_regs __iomem *regs;
+ /* pointer to dTSEC MII memory mapped registers */
+ struct dtsec_mii_regs __iomem *mii_regs;
+ /* MII management clock */
+ u16 mii_mgmt_clk;
+ /* MAC address of device */
+ u64 addr;
+ /* Ethernet physical interface */
+ phy_interface_t phy_if;
+ u16 max_speed;
+ void *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *exception_cb;
+ fman_mac_exception_cb *event_cb;
+ /* Number of individual addresses in registers for this station */
+ u8 num_of_ind_addr_in_regs;
+ /* pointer to driver's global address hash table */
+ struct eth_hash_t *multicast_addr_hash;
+ /* pointer to driver's individual address hash table */
+ struct eth_hash_t *unicast_addr_hash;
+ u8 mac_id;
+ u8 tbi_phy_addr;
+ u32 exceptions;
+ bool ptp_tsu_enabled;
+ bool en_tsu_err_exeption;
+ struct dtsec_cfg *dtsec_drv_param;
+ void *fm;
+ struct fman_rev_info fm_rev_info;
+ bool basex_if;
+};
+
+static u32 calc_mii_mgmt_clk(struct fman_mac *dtsec)
+{
+ u16 fm_clk_freq, dtsec_freq;
+ u32 mgmt_clk;
+
+ fm_clk_freq = fman_get_clock_freq(dtsec->fm);
+ if (fm_clk_freq == 0) {
+ pr_err("Can't get clock for MAC!\n");
+ return 0;
+ }
+
+ dtsec_freq = (u16)(fm_clk_freq >> 1);
+
+ if (dtsec_freq < 80)
+ mgmt_clk = 1;
+ else if (dtsec_freq < 120)
+ mgmt_clk = 2;
+ else if (dtsec_freq < 160)
+ mgmt_clk = 3;
+ else if (dtsec_freq < 200)
+ mgmt_clk = 4;
+ else if (dtsec_freq < 280)
+ mgmt_clk = 5;
+ else if (dtsec_freq < 400)
+ mgmt_clk = 6;
+ else
+ mgmt_clk = 7;
+
+ return mgmt_clk;
+}
+
+static int mii_write_reg(struct fman_mac *dtsec, u8 addr, u8 reg, u16 data)
+{
+ struct dtsec_mii_regs __iomem *regs = dtsec->mii_regs;
+ u32 tmp;
+ int count;
+
+ /* Setup the MII Mgmt clock speed */
+ iowrite32be(dtsec->mii_mgmt_clk, &regs->miimcfg);
+
+ /* Stop the MII management read cycle */
+ iowrite32be(0, &regs->miimcom);
+ /* Dummy read to make sure MIIMCOM is written */
+ tmp = ioread32be(&regs->miimcom);
+
+ /* Setting up MII Management Address Register */
+ tmp = (u32)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
+ iowrite32be(tmp, &regs->miimadd);
+
+ /* Setting up MII Management Control Register with data */
+ iowrite32be((u32)data, &regs->miimcon);
+ /* Dummy read to make sure MIIMCON is written */
+ tmp = ioread32be(&regs->miimcon);
+
+ /* Wait until MII management write is complete */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&regs->miimind)) & MIIMIND_BUSY) && count--);
+
+ if (count == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int mii_read_reg(struct fman_mac *dtsec, u8 addr, u8 reg, u16 *data)
+{
+ struct dtsec_mii_regs __iomem *regs = dtsec->mii_regs;
+ u32 tmp;
+ int count;
+
+ /* Setup the MII Mgmt clock speed */
+ iowrite32be(dtsec->mii_mgmt_clk, &regs->miimcfg);
+
+ /* Setting up the MII Management Address Register */
+ tmp = (u32)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
+ iowrite32be(tmp, &regs->miimadd);
+
+ /* Perform an MII management read cycle */
+ iowrite32be(MIIMCOM_READ_CYCLE, &regs->miimcom);
+ /* Dummy read to make sure MIIMCOM is written */
+ tmp = ioread32be(&regs->miimcom);
+
+ /* Wait until MII management write is complete */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&regs->miimind)) & MIIMIND_BUSY) && count--);
+
+ if (count == 0)
+ return -EBUSY;
+
+ /* Read MII management status */
+ *data = (u16)ioread32be(&regs->miimstat);
+
+ iowrite32be(0, &regs->miimcom);
+ /* Dummy read to make sure MIIMCOM is written */
+ tmp = ioread32be(&regs->miimcom);
+
+ if (*data == 0xffff) {
+ pr_warn("Read wrong data(0xffff):phy_addr 0x%x,reg 0x%x",
+ addr, reg);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void set_dflts(struct dtsec_cfg *cfg)
+{
+ cfg->halfdup_on = false;
+ cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
+ cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
+ cfg->halfdup_excess_defer = true;
+ cfg->halfdup_no_backoff = false;
+ cfg->halfdup_bp_no_backoff = false;
+ cfg->halfdup_alt_backoff_val = DEFAULT_HALFDUP_ALT_BACKOFF_VAL;
+ cfg->halfdup_alt_backoff_en = false;
+ cfg->rx_drop_bcast = false;
+ cfg->rx_short_frm = true;
+ cfg->rx_len_check = false;
+ cfg->tx_pad_crc = true;
+ cfg->tx_crc = false;
+ cfg->rx_ctrl_acc = false;
+ cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
+ /* PHY address 0 is reserved (DPAA RM) */
+ cfg->tbipa = DEFAULT_TBIPA;
+ cfg->rx_prepend = DEFAULT_RX_PREPEND;
+ cfg->ptp_tsu_en = true;
+ cfg->ptp_exception_en = true;
+ cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
+ cfg->rx_preamble = false;
+ cfg->tx_preamble = false;
+ cfg->loopback = false;
+ cfg->rx_time_stamp_en = false;
+ cfg->tx_time_stamp_en = false;
+ cfg->rx_flow = true;
+ cfg->tx_flow = true;
+ cfg->rx_group_hash_exd = false;
+ cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
+ cfg->rx_promisc = false;
+ cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
+ cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
+ cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
+ cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
+ cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
+ cfg->tbi_phy_addr = DEFAULT_TBI_PHY_ADDR;
+}
+
+static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
+ phy_interface_t iface, u16 iface_speed, u8 *macaddr,
+ u32 exception_mask)
+{
+ bool is_rgmii, is_sgmii, is_qsgmii;
+ int i;
+ u32 tmp;
+
+ /* Soft reset */
+ iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
+ iowrite32be(0, &regs->maccfg1);
+
+ /* dtsec_id2 */
+ tmp = ioread32be(&regs->tsec_id2);
+
+ /* check RGMII support */
+ if (iface == PHY_INTERFACE_MODE_RGMII ||
+ iface == PHY_INTERFACE_MODE_RMII)
+ if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
+ return -EINVAL;
+
+ if (iface == PHY_INTERFACE_MODE_SGMII ||
+ iface == PHY_INTERFACE_MODE_MII)
+ if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
+ return -EINVAL;
+
+ is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
+ is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
+ is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
+
+ tmp = 0;
+ if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
+ tmp |= DTSEC_ECNTRL_GMIIM;
+ if (is_sgmii)
+ tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
+ if (is_qsgmii)
+ tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
+ DTSEC_ECNTRL_QSGMIIM);
+ if (is_rgmii)
+ tmp |= DTSEC_ECNTRL_RPM;
+ if (iface_speed == SPEED_100)
+ tmp |= DTSEC_ECNTRL_R100M;
+
+ iowrite32be(tmp, &regs->ecntrl);
+
+ tmp = 0;
+ if (cfg->halfdup_on)
+ tmp |= DTSEC_TCTRL_THDF;
+ if (cfg->tx_time_stamp_en)
+ tmp |= DTSEC_TCTRL_TTSE;
+
+ iowrite32be(tmp, &regs->tctrl);
+
+ tmp = 0;
+
+ if (cfg->tx_pause_time)
+ tmp |= cfg->tx_pause_time;
+ if (cfg->tx_pause_time_extd)
+ tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
+ iowrite32be(tmp, &regs->ptv);
+
+ tmp = 0;
+ tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
+ if (cfg->rx_ctrl_acc)
+ tmp |= RCTRL_CFA;
+ if (cfg->rx_group_hash_exd)
+ tmp |= RCTRL_GHTX;
+ if (cfg->rx_time_stamp_en)
+ tmp |= RCTRL_RTSE;
+ if (cfg->rx_drop_bcast)
+ tmp |= RCTRL_BC_REJ;
+ if (cfg->rx_short_frm)
+ tmp |= RCTRL_RSF;
+ if (cfg->rx_promisc)
+ tmp |= RCTRL_PROM;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ /* Assign a Phy Address to the TBI (TBIPA).
+ * Done also in cases where TBI is not selected to avoid conflict with
+ * the external PHY's Physical address
+ */
+ iowrite32be(cfg->tbipa, &regs->tbipa);
+
+ iowrite32be(0, &regs->tmr_ctrl);
+
+ if (cfg->ptp_tsu_en) {
+ tmp = 0;
+ tmp |= TMR_PEVENT_TSRE;
+ iowrite32be(tmp, &regs->tmr_pevent);
+
+ if (cfg->ptp_exception_en) {
+ tmp = 0;
+ tmp |= TMR_PEMASK_TSREEN;
+ iowrite32be(tmp, &regs->tmr_pemask);
+ }
+ }
+
+ tmp = 0;
+ if (cfg->loopback)
+ tmp |= MACCFG1_LOOPBACK;
+ if (cfg->rx_flow)
+ tmp |= MACCFG1_RX_FLOW;
+ if (cfg->tx_flow)
+ tmp |= MACCFG1_TX_FLOW;
+ iowrite32be(tmp, &regs->maccfg1);
+
+ tmp = 0;
+
+ if (iface_speed < SPEED_1000)
+ tmp |= MACCFG2_NIBBLE_MODE;
+ else if (iface_speed == SPEED_1000)
+ tmp |= MACCFG2_BYTE_MODE;
+
+ tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
+ MACCFG2_PREAMBLE_LENGTH_MASK;
+ if (cfg->rx_preamble)
+ tmp |= MACCFG2_PRE_AM_RX_EN;
+ if (cfg->tx_preamble)
+ tmp |= MACCFG2_PRE_AM_TX_EN;
+ if (cfg->rx_len_check)
+ tmp |= MACCFG2_LENGTH_CHECK;
+ if (cfg->tx_pad_crc)
+ tmp |= MACCFG2_PAD_CRC_EN;
+ if (cfg->tx_crc)
+ tmp |= MACCFG2_CRC_EN;
+ if (!cfg->halfdup_on)
+ tmp |= MACCFG2_FULL_DUPLEX;
+ iowrite32be(tmp, &regs->maccfg2);
+
+ tmp = (((cfg->non_back_to_back_ipg1 <<
+ IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
+ & IPGIFG_NON_BACK_TO_BACK_IPG_1)
+ | ((cfg->non_back_to_back_ipg2 <<
+ IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
+ & IPGIFG_NON_BACK_TO_BACK_IPG_2)
+ | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
+ & IPGIFG_MIN_IFG_ENFORCEMENT)
+ | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
+ iowrite32be(tmp, &regs->ipgifg);
+
+ tmp = 0;
+
+ if (cfg->halfdup_alt_backoff_en) {
+ tmp = HAFDUP_ALT_BEB;
+ tmp |= (cfg->halfdup_alt_backoff_val <<
+ HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT) &
+ HAFDUP_ALTERNATE_BEB_TRUNCATION_MASK;
+ }
+ if (cfg->halfdup_bp_no_backoff)
+ tmp |= HAFDUP_BP_NO_BACKOFF;
+ if (cfg->halfdup_no_backoff)
+ tmp |= HAFDUP_NO_BACKOFF;
+ if (cfg->halfdup_excess_defer)
+ tmp |= HAFDUP_EXCESS_DEFER;
+ tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
+ & HAFDUP_RETRANSMISSION_MAX);
+ tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
+
+ iowrite32be(tmp, &regs->hafdup);
+
+ /* Initialize Maximum frame length */
+ iowrite32be(cfg->maximum_frame, &regs->maxfrm);
+
+ iowrite32be(0xffffffff, &regs->cam1);
+ iowrite32be(0xffffffff, &regs->cam2);
+
+ iowrite32be(exception_mask, &regs->imask);
+
+ iowrite32be(0xffffffff, &regs->ievent);
+
+ tmp = (u32)((macaddr[5] << 24) |
+ (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
+ iowrite32be(tmp, &regs->macstnaddr1);
+
+ tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
+ iowrite32be(tmp, &regs->macstnaddr2);
+
+ /* HASH */
+ for (i = 0; i < NUM_OF_HASH_REGS; i++) {
+ /* Initialize IADDRx */
+ iowrite32be(0, &regs->igaddr[i]);
+ /* Initialize GADDRx */
+ iowrite32be(0, &regs->gaddr[i]);
+ }
+
+ return 0;
+}
+
+static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+{
+ u32 tmp;
+
+ tmp = (u32)((adr[5] << 24) |
+ (adr[4] << 16) | (adr[3] << 8) | adr[2]);
+ iowrite32be(tmp, &regs->macstnaddr1);
+
+ tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
+ iowrite32be(tmp, &regs->macstnaddr2);
+}
+
+static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
+ bool enable)
+{
+ int reg_idx = (bucket >> 5) & 0xf;
+ int bit_idx = bucket & 0x1f;
+ u32 bit_mask = 0x80000000 >> bit_idx;
+ u32 __iomem *reg;
+
+ if (reg_idx > 7)
+ reg = &regs->gaddr[reg_idx - 8];
+ else
+ reg = &regs->igaddr[reg_idx];
+
+ if (enable)
+ iowrite32be(ioread32be(reg) | bit_mask, reg);
+ else
+ iowrite32be(ioread32be(reg) & (~bit_mask), reg);
+}
+
+static int check_init_parameters(struct fman_mac *dtsec)
+{
+ if (dtsec->max_speed >= SPEED_10000) {
+ pr_err("1G MAC driver supports 1G or lower speeds\n");
+ return -EINVAL;
+ }
+ if (dtsec->addr == 0) {
+ pr_err("Ethernet MAC Must have a valid MAC Address\n");
+ return -EINVAL;
+ }
+ if (dtsec->max_speed >= SPEED_1000 &&
+ dtsec->dtsec_drv_param->halfdup_on) {
+ pr_err("Ethernet MAC 1G can't work in half duplex\n");
+ return -EINVAL;
+ }
+
+ /* FM_RX_PREAM_4_ERRATA_DTSEC_A001 Errata workaround */
+ if (dtsec->dtsec_drv_param->rx_preamble) {
+ pr_err("preamble_rx_en\n");
+ return -EINVAL;
+ }
+
+ if (((dtsec->dtsec_drv_param)->tx_preamble ||
+ (dtsec->dtsec_drv_param)->rx_preamble) &&
+ ((dtsec->dtsec_drv_param)->preamble_len != 0x7)) {
+ pr_err("Preamble length should be 0x7 bytes\n");
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->halfdup_on &&
+ (dtsec->dtsec_drv_param->tx_time_stamp_en ||
+ dtsec->dtsec_drv_param->rx_time_stamp_en)) {
+ pr_err("1588 timeStamp disabled in half duplex mode\n");
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->rx_flow &&
+ (dtsec->dtsec_drv_param)->rx_ctrl_acc) {
+ pr_err("Receive control frame can not be accepted\n");
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->rx_prepend >
+ MAX_PACKET_ALIGNMENT) {
+ pr_err("packetAlignmentPadding can't be > than %d\n",
+ MAX_PACKET_ALIGNMENT);
+ return -EINVAL;
+ }
+ if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
+ MAX_INTER_PACKET_GAP) ||
+ ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
+ MAX_INTER_PACKET_GAP) ||
+ ((dtsec->dtsec_drv_param)->back_to_back_ipg >
+ MAX_INTER_PACKET_GAP)) {
+ pr_err("Inter packet gap can't be greater than %d\n",
+ MAX_INTER_PACKET_GAP);
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->halfdup_alt_backoff_val >
+ MAX_INTER_PALTERNATE_BEB) {
+ pr_err("alternateBackoffVal can't be greater than %d\n",
+ MAX_INTER_PALTERNATE_BEB);
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
+ MAX_RETRANSMISSION) {
+ pr_err("maxRetransmission can't be greater than %d\n",
+ MAX_RETRANSMISSION);
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
+ MAX_COLLISION_WINDOW) {
+ pr_err("collisionWindow can't be greater than %d\n",
+ MAX_COLLISION_WINDOW);
+ return -EINVAL;
+ /* If Auto negotiation process is disabled, need to set up the PHY
+ * using the MII Management Interface
+ */
+ }
+ if (dtsec->dtsec_drv_param->tbipa > MAX_PHYS) {
+ pr_err("PHY address (should be 0-%d)\n", MAX_PHYS);
+ return -ERANGE;
+ }
+ if (!dtsec->exception_cb) {
+ pr_err("uninitialized exception_cb\n");
+ return -EINVAL;
+ }
+ if (!dtsec->event_cb) {
+ pr_err("uninitialized event_cb\n");
+ return -EINVAL;
+ }
+
+ /* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
+ if (dtsec->dtsec_drv_param->rx_len_check) {
+ pr_warn("Length Check!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FM_MAC_EX_1G_BAB_RX:
+ bit_mask = DTSEC_IMASK_BREN;
+ break;
+ case FM_MAC_EX_1G_RX_CTL:
+ bit_mask = DTSEC_IMASK_RXCEN;
+ break;
+ case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
+ bit_mask = DTSEC_IMASK_GTSCEN;
+ break;
+ case FM_MAC_EX_1G_BAB_TX:
+ bit_mask = DTSEC_IMASK_BTEN;
+ break;
+ case FM_MAC_EX_1G_TX_CTL:
+ bit_mask = DTSEC_IMASK_TXCEN;
+ break;
+ case FM_MAC_EX_1G_TX_ERR:
+ bit_mask = DTSEC_IMASK_TXEEN;
+ break;
+ case FM_MAC_EX_1G_LATE_COL:
+ bit_mask = DTSEC_IMASK_LCEN;
+ break;
+ case FM_MAC_EX_1G_COL_RET_LMT:
+ bit_mask = DTSEC_IMASK_CRLEN;
+ break;
+ case FM_MAC_EX_1G_TX_FIFO_UNDRN:
+ bit_mask = DTSEC_IMASK_XFUNEN;
+ break;
+ case FM_MAC_EX_1G_MAG_PCKT:
+ bit_mask = DTSEC_IMASK_MAGEN;
+ break;
+ case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
+ bit_mask = DTSEC_IMASK_MMRDEN;
+ break;
+ case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
+ bit_mask = DTSEC_IMASK_MMWREN;
+ break;
+ case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
+ bit_mask = DTSEC_IMASK_GRSCEN;
+ break;
+ case FM_MAC_EX_1G_DATA_ERR:
+ bit_mask = DTSEC_IMASK_TDPEEN;
+ break;
+ case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
+ bit_mask = DTSEC_IMASK_MSROEN;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
+{
+ /* Checks if dTSEC driver parameters were initialized */
+ if (!dtsec_drv_params)
+ return true;
+
+ return false;
+}
+
+static u32 get_mac_addr_hash_code(u64 eth_addr)
+{
+ u32 crc;
+
+ /* CRC calculation */
+ GET_MAC_ADDR_CRC(eth_addr, crc);
+
+ crc = bitrev32(crc);
+
+ return crc;
+}
+
+static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+
+ if (is_init_done(dtsec->dtsec_drv_param))
+ return 0;
+
+ return (u16)ioread32be(&regs->maxfrm);
+}
+
+static void dtsec_isr(void *handle)
+{
+ struct fman_mac *dtsec = (struct fman_mac *)handle;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 event;
+
+ /* do not handle MDIO events */
+ event = ioread32be(&regs->ievent) &
+ (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
+
+ event &= ioread32be(&regs->imask);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & DTSEC_IMASK_BREN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
+ if (event & DTSEC_IMASK_RXCEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
+ if (event & DTSEC_IMASK_GTSCEN)
+ dtsec->exception_cb(dtsec->dev_id,
+ FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
+ if (event & DTSEC_IMASK_BTEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
+ if (event & DTSEC_IMASK_TXCEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
+ if (event & DTSEC_IMASK_TXEEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
+ if (event & DTSEC_IMASK_LCEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
+ if (event & DTSEC_IMASK_CRLEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
+ if (event & DTSEC_IMASK_XFUNEN) {
+ /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
+ if (dtsec->fm_rev_info.major == 2) {
+ u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
+ /* a. Write 0x00E0_0C00 to DTSEC_ID
+ * This is a read only register
+ * b. Read and save the value of TPKT
+ */
+ tpkt1 = in_be32(&regs->tpkt);
+
+ /* c. Read the register at dTSEC address offset 0x32C */
+ tmp_reg1 = in_be32(&regs->reserved02c0[27]);
+
+ /* d. Compare bits [9:15] to bits [25:31] of the
+ * register at address offset 0x32C.
+ */
+ if ((tmp_reg1 & 0x007F0000) !=
+ (tmp_reg1 & 0x0000007F)) {
+ /* If they are not equal, save the value of
+ * this register and wait for at least
+ * MAXFRM*16 ns
+ */
+ usleep_range((u32)(min
+ (dtsec_get_max_frame_length(dtsec) *
+ 16 / 1000, 1)), (u32)
+ (min(dtsec_get_max_frame_length
+ (dtsec) * 16 / 1000, 1) + 1));
+ }
+
+ /* e. Read and save TPKT again and read the register
+ * at dTSEC address offset 0x32C again
+ */
+ tpkt2 = in_be32(&regs->tpkt);
+ tmp_reg2 = in_be32(&regs->reserved02c0[27]);
+
+ /* f. Compare the value of TPKT saved in step b to
+ * value read in step e. Also compare bits [9:15] of
+ * the register at offset 0x32C saved in step d to the
+ * value of bits [9:15] saved in step e. If the two
+ * registers values are unchanged, then the transmit
+ * portion of the dTSEC controller is locked up and
+ * the user should proceed to the recover sequence.
+ */
+ if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
+ (tmp_reg2 & 0x007F0000))) {
+ /* recover sequence */
+
+ /* a.Write a 1 to RCTRL[GRS] */
+
+ out_be32(&regs->rctrl,
+ in_be32(&regs->rctrl) | RCTRL_GRS);
+
+ /* b.Wait until IEVENT[GRSC]=1, or at least
+ * 100 us has elapsed.
+ */
+ for (i = 0; i < 100; i++) {
+ if (in_be32(&regs->ievent) &
+ DTSEC_IMASK_GRSCEN)
+ break;
+ udelay(1);
+ }
+ if (in_be32(&regs->ievent) & DTSEC_IMASK_GRSCEN)
+ out_be32(&regs->ievent,
+ DTSEC_IMASK_GRSCEN);
+ else
+ pr_debug("Rx lockup due to Tx lockup\n");
+
+ /* c.Write a 1 to bit n of FM_RSTC
+ * (offset 0x0CC of FPM)
+ */
+ fman_reset_mac(dtsec->fm, dtsec->mac_id);
+
+ /* d.Wait 4 Tx clocks (32 ns) */
+ udelay(1);
+
+ /* e.Write a 0 to bit n of FM_RSTC. */
+ /* cleared by FMAN
+ */
+ }
+ }
+
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
+ }
+ if (event & DTSEC_IMASK_MAGEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
+ if (event & DTSEC_IMASK_GRSCEN)
+ dtsec->exception_cb(dtsec->dev_id,
+ FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
+ if (event & DTSEC_IMASK_TDPEEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
+ if (event & DTSEC_IMASK_RDPEEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
+
+ /* masked interrupts */
+ WARN_ON(event & DTSEC_IMASK_ABRTEN);
+ WARN_ON(event & DTSEC_IMASK_IFERREN);
+}
+
+static void dtsec_1588_isr(void *handle)
+{
+ struct fman_mac *dtsec = (struct fman_mac *)handle;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 event;
+
+ if (dtsec->ptp_tsu_enabled) {
+ event = ioread32be(&regs->tmr_pevent);
+ event &= ioread32be(&regs->tmr_pemask);
+
+ if (event) {
+ iowrite32be(event, &regs->tmr_pevent);
+ WARN_ON(event & TMR_PEVENT_TSRE);
+ dtsec->exception_cb(dtsec->dev_id,
+ FM_MAC_EX_1G_1588_TS_RX_ERR);
+ }
+ }
+}
+
+static void free_init_resources(struct fman_mac *dtsec)
+{
+ fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_ERR);
+ fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_NORMAL);
+
+ /* release the driver's group hash table */
+ free_hash_table(dtsec->multicast_addr_hash);
+ dtsec->multicast_addr_hash = NULL;
+
+ /* release the driver's individual hash table */
+ free_hash_table(dtsec->unicast_addr_hash);
+ dtsec->unicast_addr_hash = NULL;
+}
+
+int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
+{
+ if (is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ dtsec->dtsec_drv_param->maximum_frame = new_val;
+
+ return 0;
+}
+
+int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
+{
+ if (is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ dtsec->dtsec_drv_param->tx_pad_crc = new_val;
+
+ return 0;
+}
+
+int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* Enable */
+ tmp = ioread32be(&regs->maccfg1);
+ if (mode & COMM_MODE_RX)
+ tmp |= MACCFG1_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp |= MACCFG1_TX_EN;
+
+ iowrite32be(tmp, &regs->maccfg1);
+
+ /* Graceful start - clear the graceful receive stop bit */
+ if (mode & COMM_MODE_TX)
+ iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_GTS,
+ &regs->tctrl);
+ if (mode & COMM_MODE_RX)
+ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS,
+ &regs->rctrl);
+
+ return 0;
+}
+
+int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* Gracefull stop - Assert the graceful transmit stop bit */
+ if (mode & COMM_MODE_RX) {
+ tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+ iowrite32be(tmp, &regs->rctrl);
+
+ if (dtsec->fm_rev_info.major == 2)
+ usleep_range(100, 200);
+ else
+ udelay(10);
+ }
+
+ if (mode & COMM_MODE_TX) {
+ if (dtsec->fm_rev_info.major == 2)
+ pr_debug("GTS not supported due to DTSEC_A004 errata.\n");
+ else
+ pr_debug("GTS not supported due to DTSEC_A0014 errata.\n");
+ }
+
+ tmp = ioread32be(&regs->maccfg1);
+ if (mode & COMM_MODE_RX)
+ tmp &= ~MACCFG1_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp &= ~MACCFG1_TX_EN;
+
+ iowrite32be(tmp, &regs->maccfg1);
+
+ return 0;
+}
+
+int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+ u8 __maybe_unused priority,
+ u16 pause_time, u16 __maybe_unused thresh_time)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 ptv = 0;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
+ if (dtsec->fm_rev_info.major == 2)
+ if (0 < pause_time && pause_time <= 320) {
+ pr_warn("pause-time: %d illegal.Should be > 320\n",
+ pause_time);
+ return -EINVAL;
+ }
+
+ if (pause_time) {
+ ptv = ioread32be(&regs->ptv);
+ ptv &= PTV_PTE_MASK;
+ ptv |= pause_time & PTV_PT_MASK;
+ iowrite32be(ptv, &regs->ptv);
+
+ /* trigger the transmission of a flow-control pause frame */
+ iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
+ &regs->maccfg1);
+ } else
+ iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
+ &regs->maccfg1);
+
+ return 0;
+}
+
+int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->maccfg1);
+ if (en)
+ tmp |= MACCFG1_RX_FLOW;
+ else
+ tmp &= ~MACCFG1_RX_FLOW;
+ iowrite32be(tmp, &regs->maccfg1);
+
+ return 0;
+}
+
+int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
+{
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* Initialize MAC Station Address registers (1 & 2)
+ * Station address have to be swapped (big endian to little endian
+ */
+ dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
+ set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
+
+ return 0;
+}
+
+int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct eth_hash_entry *hash_entry;
+ u64 addr;
+ s32 bucket;
+ u32 crc;
+ bool mcast, ghtx;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
+ mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
+
+ /* Cannot handle unicast mac addr when GHTX is on */
+ if (ghtx && !mcast) {
+ pr_err("Could not compute hash bucket\n");
+ return -EINVAL;
+ }
+ crc = get_mac_addr_hash_code(addr);
+
+ /* considering the 9 highest order bits in crc H[8:0]:
+ *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
+ *and H[5:1] (next 5 bits) identify the hash bit
+ *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
+ *and H[4:0] (next 5 bits) identify the hash bit.
+ *
+ *In bucket index output the low 5 bits identify the hash register
+ *bit, while the higher 4 bits identify the hash register
+ */
+
+ if (ghtx) {
+ bucket = (s32)((crc >> 23) & 0x1ff);
+ } else {
+ bucket = (s32)((crc >> 24) & 0xff);
+ /* if !ghtx and mcast the bit must be set in gaddr instead of
+ *igaddr.
+ */
+ if (mcast)
+ bucket += 0x100;
+ }
+
+ set_bucket(dtsec->regs, bucket, true);
+
+ /* Create element to be added to the driver hash table */
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+ INIT_LIST_HEAD(&hash_entry->node);
+
+ if (addr & MAC_GROUP_ADDRESS)
+ /* Group Address */
+ list_add_tail(&hash_entry->node,
+ &dtsec->multicast_addr_hash->lsts[bucket]);
+ else
+ list_add_tail(&hash_entry->node,
+ &dtsec->unicast_addr_hash->lsts[bucket]);
+
+ return 0;
+}
+
+int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct list_head *pos;
+ struct eth_hash_entry *hash_entry = NULL;
+ u64 addr;
+ s32 bucket;
+ u32 crc;
+ bool mcast, ghtx;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
+ mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
+
+ /* Cannot handle unicast mac addr when GHTX is on */
+ if (ghtx && !mcast) {
+ pr_err("Could not compute hash bucket\n");
+ return -EINVAL;
+ }
+ crc = get_mac_addr_hash_code(addr);
+
+ if (ghtx) {
+ bucket = (s32)((crc >> 23) & 0x1ff);
+ } else {
+ bucket = (s32)((crc >> 24) & 0xff);
+ /* if !ghtx and mcast the bit must be set
+ * in gaddr instead of igaddr.
+ */
+ if (mcast)
+ bucket += 0x100;
+ }
+
+ if (addr & MAC_GROUP_ADDRESS) {
+ /* Group Address */
+ list_for_each(pos,
+ &dtsec->multicast_addr_hash->lsts[bucket]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+ if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
+ set_bucket(dtsec->regs, bucket, false);
+ } else {
+ /* Individual Address */
+ list_for_each(pos,
+ &dtsec->unicast_addr_hash->lsts[bucket]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+ if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
+ set_bucket(dtsec->regs, bucket, false);
+ }
+
+ /* address does not exist */
+ WARN_ON(!hash_entry);
+
+ return 0;
+}
+
+int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* Set unicast promiscuous */
+ tmp = ioread32be(&regs->rctrl);
+ if (new_val)
+ tmp |= RCTRL_UPROM;
+ else
+ tmp &= ~RCTRL_UPROM;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ /* Set multicast promiscuous */
+ tmp = ioread32be(&regs->rctrl);
+ if (new_val)
+ tmp |= RCTRL_MPROM;
+ else
+ tmp &= ~RCTRL_MPROM;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ return 0;
+}
+
+int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->maccfg2);
+
+ /* Full Duplex */
+ tmp |= MACCFG2_FULL_DUPLEX;
+
+ tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
+ if (speed < SPEED_1000)
+ tmp |= MACCFG2_NIBBLE_MODE;
+ else if (speed == SPEED_1000)
+ tmp |= MACCFG2_BYTE_MODE;
+ iowrite32be(tmp, &regs->maccfg2);
+
+ tmp = ioread32be(&regs->ecntrl);
+ if (speed == SPEED_100)
+ tmp |= DTSEC_ECNTRL_R100M;
+ else
+ tmp &= ~DTSEC_ECNTRL_R100M;
+ iowrite32be(tmp, &regs->ecntrl);
+
+ return 0;
+}
+
+int dtsec_restart_autoneg(struct fman_mac *dtsec)
+{
+ u16 tmp_reg16;
+ int err;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ err = mii_read_reg(dtsec, dtsec->tbi_phy_addr, 0, &tmp_reg16);
+ if (err) {
+ pr_err("Autonegotiation restart failed\n");
+ return err;
+ }
+
+ tmp_reg16 &= ~(PHY_CR_SPEED0 | PHY_CR_SPEED1);
+ tmp_reg16 |=
+ (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
+
+ mii_write_reg(dtsec, dtsec->tbi_phy_addr, 0, tmp_reg16);
+
+ return 0;
+}
+
+int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ *mac_version = ioread32be(&regs->tsec_id);
+
+ return 0;
+}
+
+int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 bit_mask = 0;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ dtsec->exceptions |= bit_mask;
+ else
+ dtsec->exceptions &= ~bit_mask;
+ } else {
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ if (enable)
+ iowrite32be(ioread32be(&regs->imask) | bit_mask,
+ &regs->imask);
+ else
+ iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
+ &regs->imask);
+ } else {
+ if (!dtsec->ptp_tsu_enabled) {
+ pr_err("Exception valid for 1588 only\n");
+ return -EINVAL;
+ }
+ switch (exception) {
+ case FM_MAC_EX_1G_1588_TS_RX_ERR:
+ if (enable) {
+ dtsec->en_tsu_err_exeption = true;
+ iowrite32be(ioread32be(&regs->tmr_pemask) |
+ TMR_PEMASK_TSREEN,
+ &regs->tmr_pemask);
+ } else {
+ dtsec->en_tsu_err_exeption = false;
+ iowrite32be(ioread32be(&regs->tmr_pemask) &
+ ~TMR_PEMASK_TSREEN,
+ &regs->tmr_pemask);
+ }
+ break;
+ default:
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int dtsec_init(struct fman_mac *dtsec)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct dtsec_cfg *dtsec_drv_param;
+ int err;
+ u16 max_frm_ln;
+ enet_addr_t eth_addr;
+
+ if (is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ if (DEFAULT_RESET_ON_INIT &&
+ (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
+ pr_err("Can't reset MAC!\n");
+ return -EINVAL;
+ }
+
+ err = check_init_parameters(dtsec);
+ if (err)
+ return err;
+
+ dtsec_drv_param = dtsec->dtsec_drv_param;
+
+ MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
+
+ err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
+ dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions);
+ if (err) {
+ free_init_resources(dtsec);
+ pr_err("DTSEC version doesn't support this i/f mode\n");
+ return err;
+ }
+
+ if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ u16 tmp_reg16;
+
+ /* Configure the TBI PHY Control Register */
+ tmp_reg16 = PHY_TBICON_CLK_SEL | PHY_TBICON_SRESET;
+ mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 17,
+ tmp_reg16);
+
+ tmp_reg16 = PHY_TBICON_CLK_SEL;
+ mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 17,
+ tmp_reg16);
+
+ tmp_reg16 =
+ (PHY_CR_PHY_RESET | PHY_CR_ANE | PHY_CR_FULLDUPLEX |
+ PHY_CR_SPEED1);
+ mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 0, tmp_reg16);
+
+ if (dtsec->basex_if)
+ tmp_reg16 = PHY_TBIANA_1000X;
+ else
+ tmp_reg16 = PHY_TBIANA_SGMII;
+ mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 4, tmp_reg16);
+
+ tmp_reg16 =
+ (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX |
+ PHY_CR_SPEED1);
+
+ mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 0, tmp_reg16);
+ }
+
+ /* Max Frame Length */
+ max_frm_ln = (u16)ioread32be(&regs->maxfrm);
+ err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
+ if (err) {
+ pr_err("Setting max frame length failed\n");
+ free_init_resources(dtsec);
+ return -EINVAL;
+ }
+
+ dtsec->multicast_addr_hash =
+ alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
+ if (!dtsec->multicast_addr_hash) {
+ free_init_resources(dtsec);
+ pr_err("MC hash table is failed\n");
+ return -ENOMEM;
+ }
+
+ dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
+ if (!dtsec->unicast_addr_hash) {
+ free_init_resources(dtsec);
+ pr_err("UC hash table is failed\n");
+ return -ENOMEM;
+ }
+
+ /* register err intr handler for dtsec to FPM (err) */
+ fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
+ /* register 1588 intr handler for TMR to FPM (normal) */
+ fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
+
+ kfree(dtsec_drv_param);
+ dtsec->dtsec_drv_param = NULL;
+
+ return 0;
+}
+
+int dtsec_free(struct fman_mac *dtsec)
+{
+ free_init_resources(dtsec);
+
+ kfree(dtsec->dtsec_drv_param);
+ dtsec->dtsec_drv_param = NULL;
+ kfree(dtsec);
+
+ return 0;
+}
+
+struct fman_mac *dtsec_config(struct fman_mac_params *params)
+{
+ struct fman_mac *dtsec;
+ struct dtsec_cfg *dtsec_drv_param;
+ void __iomem *base_addr;
+
+ base_addr = params->base_addr;
+
+ /* allocate memory for the UCC GETH data structure. */
+ dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
+ if (!dtsec)
+ return NULL;
+
+ /* allocate memory for the d_tsec driver parameters data structure. */
+ dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
+ if (!dtsec_drv_param)
+ goto err_dtsec;
+
+ /* Plant parameter structure pointer */
+ dtsec->dtsec_drv_param = dtsec_drv_param;
+
+ set_dflts(dtsec_drv_param);
+
+ dtsec->regs = (struct dtsec_regs __iomem *)(base_addr);
+ dtsec->mii_regs = (struct dtsec_mii_regs __iomem *)
+ (base_addr + DTSEC_TO_MII_OFFSET);
+ dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ dtsec->max_speed = params->max_speed;
+ dtsec->phy_if = params->phy_if;
+ dtsec->mac_id = params->mac_id;
+ dtsec->exceptions = DTSEC_DEFAULT_EXCEPTIONS;
+ dtsec->exception_cb = params->exception_cb;
+ dtsec->event_cb = params->event_cb;
+ dtsec->dev_id = params->dev_id;
+ dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
+ dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
+ dtsec->tbi_phy_addr = dtsec->dtsec_drv_param->tbi_phy_addr;
+
+ dtsec->fm = params->fm;
+ dtsec->basex_if = params->basex_if;
+ dtsec->mii_mgmt_clk = calc_mii_mgmt_clk(dtsec);
+ if (dtsec->mii_mgmt_clk == 0) {
+ pr_err("Can't calculate MII management clock\n");
+ goto err_dtsec;
+ }
+
+ /* Save FMan revision */
+ fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
+
+ return dtsec;
+
+err_dtsec:
+ kfree(dtsec);
+ return NULL;
+}
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.h
new file mode 100644
index 00000000..c4467c07
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DTSEC_H
+#define __DTSEC_H
+
+#include "fman_mac.h"
+
+struct fman_mac *dtsec_config(struct fman_mac_params *params);
+int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
+int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr);
+int dtsec_adjust_link(struct fman_mac *dtsec,
+ u16 speed);
+int dtsec_restart_autoneg(struct fman_mac *dtsec);
+int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
+int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
+int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
+int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
+int dtsec_init(struct fman_mac *dtsec);
+int dtsec_free(struct fman_mac *dtsec);
+int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
+int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
+ u16 pause_time, u16 thresh_time);
+int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable);
+int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
+int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
+int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
+
+#endif /* __DTSEC_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_mac.h b/linux/drivers/net/ethernet/freescale/fman/fman_mac.h
new file mode 100644
index 00000000..7a5e752e
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* FM MAC ... */
+#ifndef __FM_MAC_H
+#define __FM_MAC_H
+
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/if_ether.h>
+
+struct fman_mac;
+
+/* Ethernet Address */
+typedef u8 enet_addr_t[ETH_ALEN];
+
+#define ENET_ADDR_TO_UINT64(_enet_addr) \
+ (u64)(((u64)(_enet_addr)[0] << 40) | \
+ ((u64)(_enet_addr)[1] << 32) | \
+ ((u64)(_enet_addr)[2] << 24) | \
+ ((u64)(_enet_addr)[3] << 16) | \
+ ((u64)(_enet_addr)[4] << 8) | \
+ ((u64)(_enet_addr)[5]))
+
+#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \
+ do { \
+ int i; \
+ for (i = 0; i < ETH_ALEN; i++) \
+ (_enet_addr)[i] = \
+ (u8)((_addr64) >> ((5 - i) * 8)); \
+ } while (0)
+
+/* defaults */
+#define DEFAULT_RESET_ON_INIT false
+
+/* PFC defines */
+#define FSL_FM_PAUSE_TIME_ENABLE 0xf000
+#define FSL_FM_PAUSE_TIME_DISABLE 0
+#define FSL_FM_PAUSE_THRESH_DEFAULT 0
+
+#define FM_MAC_NO_PFC 0xff
+
+/* HASH defines */
+#define ETH_HASH_ENTRY_OBJ(ptr) \
+ hlist_entry_safe(ptr, struct eth_hash_entry, node)
+
+/* Enumeration (bit flags) of communication modes (Transmit,
+ * receive or both).
+ */
+enum comm_mode {
+ COMM_MODE_NONE = 0, /* No transmit/receive communication */
+ COMM_MODE_RX = 1, /* Only receive communication */
+ COMM_MODE_TX = 2, /* Only transmit communication */
+ COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
+};
+
+/* FM MAC Exceptions */
+enum fman_mac_exceptions {
+ FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
+ /* 10GEC MDIO scan event interrupt */
+ , FM_MAC_EX_10G_MDIO_CMD_CMPL
+ /* 10GEC MDIO command completion interrupt */
+ , FM_MAC_EX_10G_REM_FAULT
+ /* 10GEC, mEMAC Remote fault interrupt */
+ , FM_MAC_EX_10G_LOC_FAULT
+ /* 10GEC, mEMAC Local fault interrupt */
+ , FM_MAC_EX_10G_TX_ECC_ER
+ /* 10GEC, mEMAC Transmit frame ECC error interrupt */
+ , FM_MAC_EX_10G_TX_FIFO_UNFL
+ /* 10GEC, mEMAC Transmit FIFO underflow interrupt */
+ , FM_MAC_EX_10G_TX_FIFO_OVFL
+ /* 10GEC, mEMAC Transmit FIFO overflow interrupt */
+ , FM_MAC_EX_10G_TX_ER
+ /* 10GEC Transmit frame error interrupt */
+ , FM_MAC_EX_10G_RX_FIFO_OVFL
+ /* 10GEC, mEMAC Receive FIFO overflow interrupt */
+ , FM_MAC_EX_10G_RX_ECC_ER
+ /* 10GEC, mEMAC Receive frame ECC error interrupt */
+ , FM_MAC_EX_10G_RX_JAB_FRM
+ /* 10GEC Receive jabber frame interrupt */
+ , FM_MAC_EX_10G_RX_OVRSZ_FRM
+ /* 10GEC Receive oversized frame interrupt */
+ , FM_MAC_EX_10G_RX_RUNT_FRM
+ /* 10GEC Receive runt frame interrupt */
+ , FM_MAC_EX_10G_RX_FRAG_FRM
+ /* 10GEC Receive fragment frame interrupt */
+ , FM_MAC_EX_10G_RX_LEN_ER
+ /* 10GEC Receive payload length error interrupt */
+ , FM_MAC_EX_10G_RX_CRC_ER
+ /* 10GEC Receive CRC error interrupt */
+ , FM_MAC_EX_10G_RX_ALIGN_ER
+ /* 10GEC Receive alignment error interrupt */
+ , FM_MAC_EX_1G_BAB_RX
+ /* dTSEC Babbling receive error */
+ , FM_MAC_EX_1G_RX_CTL
+ /* dTSEC Receive control (pause frame) interrupt */
+ , FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET
+ /* dTSEC Graceful transmit stop complete */
+ , FM_MAC_EX_1G_BAB_TX
+ /* dTSEC Babbling transmit error */
+ , FM_MAC_EX_1G_TX_CTL
+ /* dTSEC Transmit control (pause frame) interrupt */
+ , FM_MAC_EX_1G_TX_ERR
+ /* dTSEC Transmit error */
+ , FM_MAC_EX_1G_LATE_COL
+ /* dTSEC Late collision */
+ , FM_MAC_EX_1G_COL_RET_LMT
+ /* dTSEC Collision retry limit */
+ , FM_MAC_EX_1G_TX_FIFO_UNDRN
+ /* dTSEC Transmit FIFO underrun */
+ , FM_MAC_EX_1G_MAG_PCKT
+ /* dTSEC Magic Packet detection */
+ , FM_MAC_EX_1G_MII_MNG_RD_COMPLET
+ /* dTSEC MII management read completion */
+ , FM_MAC_EX_1G_MII_MNG_WR_COMPLET
+ /* dTSEC MII management write completion */
+ , FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET
+ /* dTSEC Graceful receive stop complete */
+ , FM_MAC_EX_1G_DATA_ERR
+ /* dTSEC Internal data error on transmit */
+ , FM_MAC_1G_RX_DATA_ERR
+ /* dTSEC Internal data error on receive */
+ , FM_MAC_EX_1G_1588_TS_RX_ERR
+ /* dTSEC Time-Stamp Receive Error */
+ , FM_MAC_EX_1G_RX_MIB_CNT_OVFL
+ /* dTSEC MIB counter overflow */
+ , FM_MAC_EX_TS_FIFO_ECC_ERR
+ /* mEMAC Time-stamp FIFO ECC error interrupt;
+ * not supported on T4240/B4860 rev1 chips
+ */
+ , FM_MAC_EX_MAGIC_PACKET_INDICATION = FM_MAC_EX_1G_MAG_PCKT
+ /* mEMAC Magic Packet Indication Interrupt */
+};
+
+struct eth_hash_entry {
+ u64 addr; /* Ethernet Address */
+ struct list_head node;
+};
+
+typedef void (fman_mac_exception_cb)(void *dev_id,
+ enum fman_mac_exceptions exceptions);
+
+/* FMan MAC config input */
+struct fman_mac_params {
+ /* Base of memory mapped FM MAC registers */
+ void __iomem *base_addr;
+ /* MAC address of device; First octet is sent first */
+ enet_addr_t addr;
+ /* MAC ID; numbering of dTSEC and 1G-mEMAC:
+ * 0 - FM_MAX_NUM_OF_1G_MACS;
+ * numbering of 10G-MAC (TGEC) and 10G-mEMAC:
+ * 0 - FM_MAX_NUM_OF_10G_MACS
+ */
+ u8 mac_id;
+ /* PHY interface */
+ phy_interface_t phy_if;
+ /* Note that the speed should indicate the maximum rate that
+ * this MAC should support rather than the actual speed;
+ */
+ u16 max_speed;
+ /* A handle to the FM object this port related to */
+ void *fm;
+ /* MDIO exceptions interrupt source - not valid for all
+ * MACs; MUST be set to 'NO_IRQ' for MACs that don't have
+ * mdio-irq, or for polling
+ */
+ void *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
+ fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
+ /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
+ * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
+ * to interface between MAC and phy/backplane, SGMII phy can still
+ * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
+ */
+ bool basex_if;
+};
+
+struct eth_hash_t {
+ u16 size;
+ struct list_head *lsts;
+};
+
+static inline struct eth_hash_entry
+*dequeue_addr_from_hash_entry(struct list_head *addr_lst)
+{
+ struct eth_hash_entry *hash_entry = NULL;
+
+ if (!list_empty(addr_lst)) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(addr_lst->next);
+ list_del_init(&hash_entry->node);
+ }
+ return hash_entry;
+}
+
+static inline void free_hash_table(struct eth_hash_t *hash)
+{
+ struct eth_hash_entry *hash_entry;
+ int i = 0;
+
+ if (hash) {
+ if (hash->lsts) {
+ for (i = 0; i < hash->size; i++) {
+ hash_entry =
+ dequeue_addr_from_hash_entry(&hash->lsts[i]);
+ while (hash_entry) {
+ kfree(hash_entry);
+ hash_entry =
+ dequeue_addr_from_hash_entry(&hash->
+ lsts[i]);
+ }
+ }
+
+ kfree(hash->lsts);
+ }
+
+ kfree(hash);
+ }
+}
+
+static inline struct eth_hash_t *alloc_hash_table(u16 size)
+{
+ u32 i;
+ struct eth_hash_t *hash;
+
+ /* Allocate address hash table */
+ hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
+ if (!hash)
+ return NULL;
+
+ hash->size = size;
+
+ hash->lsts = kmalloc_array(hash->size, sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!hash->lsts) {
+ kfree(hash);
+ return NULL;
+ }
+
+ for (i = 0; i < hash->size; i++)
+ INIT_LIST_HEAD(&hash->lsts[i]);
+
+ return hash;
+}
+
+#endif /* __FM_MAC_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_memac.c b/linux/drivers/net/ethernet/freescale/fman/fman_memac.c
new file mode 100644
index 00000000..5730194a
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -0,0 +1,1382 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_memac.h"
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/phy.h>
+
+/* MII Management Registers */
+#define MDIO_CFG_CLK_DIV_MASK 0x0080ff80
+#define MDIO_CFG_HOLD_MASK 0x0000001c
+#define MDIO_CFG_ENC45 0x00000040
+#define MDIO_CFG_BSY 0x00000001
+
+#define MDIO_CTL_PHY_ADDR_SHIFT 5
+
+#define MDIO_DATA_BSY 0x80000000
+
+/* Internal PHY access */
+#define PHY_MDIO_ADDR 0
+
+/* Internal PHY Registers - SGMII */
+#define PHY_SGMII_CR_RESET_AN 0x0200
+#define PHY_SGMII_CR_AN_ENABLE 0x1000
+#define PHY_SGMII_CR_DEF_VAL 0x1140
+#define PHY_SGMII_DEV_ABILITY_SGMII 0x4001
+#define PHY_SGMII_DEV_ABILITY_1000X 0x01A0
+#define PHY_SGMII_IF_MODE_DUPLEX_FULL 0x0000
+#define PHY_SGMII_IF_MODE_DUPLEX_HALF 0x0010
+#define PHY_SGMII_IF_MODE_SPEED_GB 0x0008
+#define PHY_SGMII_IF_MODE_SPEED_100M 0x0004
+#define PHY_SGMII_IF_MODE_SPEED_10M 0x0000
+#define PHY_SGMII_IF_MODE_AN 0x0002
+#define PHY_SGMII_IF_MODE_SGMII 0x0001
+#define PHY_SGMII_IF_MODE_1000X 0x0000
+
+/* Offset from the MEM map to the MDIO mem map */
+#define MEMAC_TO_MII_OFFSET 0x030
+/* Num of additional exact match MAC adr regs */
+#define MEMAC_NUM_OF_PADDRS 7
+
+/* Control and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_MG 0x80000000 /* 00 Magic Packet detection */
+#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
+#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */
+#define CMD_CFG_SFD_ANY 0x00200000 /* 10 Disable SFD check */
+#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */
+#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */
+#define CMD_CFG_SEND_IDLE 0x00010000 /* 15 Force idle generation */
+#define CMD_CFG_CNT_FRM_EN 0x00002000 /* 18 Control frame rx enable */
+#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */
+#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */
+#define CMD_CFG_LOOPBACK_EN 0x00000400 /* 21 XGMII/GMII loopback enable */
+#define CMD_CFG_TX_ADDR_INS 0x00000200 /* 22 Tx source MAC addr insertion */
+#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */
+#define CMD_CFG_PAUSE_FWD 0x00000080 /* 24 Terminate/frwd Pause frames */
+#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */
+#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */
+#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */
+#define CMD_CFG_WAN_MODE 0x00000008 /* 28 WAN mode enable */
+#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */
+#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */
+
+/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */
+#define TX_FIFO_SECTIONS_TX_EMPTY_MASK 0xFFFF0000
+#define TX_FIFO_SECTIONS_TX_AVAIL_MASK 0x0000FFFF
+#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G 0x00400000
+#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G 0x00100000
+#define TX_FIFO_SECTIONS_TX_AVAIL_10G 0x00000019
+#define TX_FIFO_SECTIONS_TX_AVAIL_1G 0x00000020
+#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G 0x00000060
+
+#define GET_TX_EMPTY_DEFAULT_VALUE(_val) \
+do { \
+ _val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \
+ ((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \
+ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) :\
+ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));\
+} while (0)
+
+/* Interface Mode Register (IF_MODE) */
+
+#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
+#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
+#define IF_MODE_RGMII 0x00000004
+#define IF_MODE_RGMII_AUTO 0x00008000
+#define IF_MODE_RGMII_1000 0x00004000 /* 10 - 1000Mbps RGMII */
+#define IF_MODE_RGMII_100 0x00000000 /* 00 - 100Mbps RGMII */
+#define IF_MODE_RGMII_10 0x00002000 /* 01 - 10Mbps RGMII */
+#define IF_MODE_RGMII_SP_MASK 0x00006000 /* Setsp mask bits */
+#define IF_MODE_RGMII_FD 0x00001000 /* Full duplex RGMII */
+#define IF_MODE_HD 0x00000040 /* Half duplex operation */
+
+/* Hash table Control Register (HASHTABLE_CTRL) */
+#define HASH_CTRL_MCAST_EN 0x00000100
+/* 26-31 Hash table address code */
+#define HASH_CTRL_ADDR_MASK 0x0000003F
+/* MAC mcast indication */
+#define GROUP_ADDRESS 0x0000010000000000LL
+#define HASH_TABLE_SIZE 64 /* Hash tbl size */
+
+/* Interrupt Mask Register (IMASK) */
+#define MEMAC_IMASK_MGI 0x40000000 /* 1 Magic pkt detect indication */
+#define MEMAC_IMASK_TSECC_ER 0x20000000 /* 2 Timestamp FIFO ECC error evnt */
+#define MEMAC_IMASK_TECC_ER 0x02000000 /* 6 Transmit frame ECC error evnt */
+#define MEMAC_IMASK_RECC_ER 0x01000000 /* 7 Receive frame ECC error evnt */
+
+#define MEMAC_ALL_ERRS_IMASK \
+ ((u32)(MEMAC_IMASK_TSECC_ER | \
+ MEMAC_IMASK_TECC_ER | \
+ MEMAC_IMASK_RECC_ER | \
+ MEMAC_IMASK_MGI))
+
+#define MEMAC_IEVNT_PCS 0x80000000 /* PCS (XG). Link sync (G) */
+#define MEMAC_IEVNT_AN 0x40000000 /* Auto-negotiation */
+#define MEMAC_IEVNT_LT 0x20000000 /* Link Training/New page */
+#define MEMAC_IEVNT_MGI 0x00004000 /* Magic pkt detection */
+#define MEMAC_IEVNT_TS_ECC_ER 0x00002000 /* Timestamp FIFO ECC error*/
+#define MEMAC_IEVNT_RX_FIFO_OVFL 0x00001000 /* Rx FIFO overflow */
+#define MEMAC_IEVNT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */
+#define MEMAC_IEVNT_TX_FIFO_OVFL 0x00000400 /* Tx FIFO overflow */
+#define MEMAC_IEVNT_TX_ECC_ER 0x00000200 /* Tx frame ECC error */
+#define MEMAC_IEVNT_RX_ECC_ER 0x00000100 /* Rx frame ECC error */
+#define MEMAC_IEVNT_LI_FAULT 0x00000080 /* Link Interruption flt */
+#define MEMAC_IEVNT_RX_EMPTY 0x00000040 /* Rx FIFO empty */
+#define MEMAC_IEVNT_TX_EMPTY 0x00000020 /* Tx FIFO empty */
+#define MEMAC_IEVNT_RX_LOWP 0x00000010 /* Low Power Idle */
+#define MEMAC_IEVNT_PHY_LOS 0x00000004 /* Phy loss of signal */
+#define MEMAC_IEVNT_REM_FAULT 0x00000002 /* Remote fault (XGMII) */
+#define MEMAC_IEVNT_LOC_FAULT 0x00000001 /* Local fault (XGMII) */
+
+#define DEFAULT_PAUSE_QUANTA 0xf000
+#define DEFAULT_FRAME_LENGTH 0x600
+#define DEFAULT_TX_IPG_LENGTH 12
+
+#define MEMAC_DEFAULT_EXCEPTIONS \
+ ((u32)(MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER | \
+ MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI))
+
+#define CLXY_PAUSE_QUANTA_CLX_PQNT 0x0000FFFF
+#define CLXY_PAUSE_QUANTA_CLY_PQNT 0xFFFF0000
+#define CLXY_PAUSE_THRESH_CLX_QTH 0x0000FFFF
+#define CLXY_PAUSE_THRESH_CLY_QTH 0xFFFF0000
+
+struct mac_addr {
+ /* Lower 32 bits of 48-bit MAC address */
+ u32 mac_addr_l;
+ /* Upper 16 bits of 48-bit MAC address */
+ u32 mac_addr_u;
+};
+
+/* MII Configuration Control Memory Map Registers */
+struct memac_mii_regs {
+ u32 mdio_cfg; /* 0x030 */
+ u32 mdio_ctrl; /* 0x034 */
+ u32 mdio_data; /* 0x038 */
+ u32 mdio_addr; /* 0x03c */
+};
+
+/* memory map */
+struct memac_regs {
+ u32 res0000[2]; /* General Control and Status */
+ u32 command_config; /* 0x008 Ctrl and cfg */
+ struct mac_addr mac_addr0; /* 0x00C-0x010 MAC_ADDR_0...1 */
+ u32 maxfrm; /* 0x014 Max frame length */
+ u32 res0018[1];
+ u32 rx_fifo_sections; /* Receive FIFO configuration reg */
+ u32 tx_fifo_sections; /* Transmit FIFO configuration reg */
+ u32 res0024[2];
+ u32 hashtable_ctrl; /* 0x02C Hash table control */
+ u32 res0030[4];
+ u32 ievent; /* 0x040 Interrupt event */
+ u32 tx_ipg_length; /* 0x044 Transmitter inter-packet-gap */
+ u32 res0048;
+ u32 imask; /* 0x04C Interrupt mask */
+ u32 res0050;
+ u32 pause_quanta[4]; /* 0x054 Pause quanta */
+ u32 pause_thresh[4]; /* 0x064 Pause quanta threshold */
+ u32 rx_pause_status; /* 0x074 Receive pause status */
+ u32 res0078[2];
+ struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];/* 0x80-0x0B4 mac padr */
+ u32 lpwake_timer; /* 0x0B8 Low Power Wakeup Timer */
+ u32 sleep_timer; /* 0x0BC Transmit EEE Low Power Timer */
+ u32 res00c0[8];
+ u32 statn_config; /* 0x0E0 Statistics configuration */
+ u32 res00e4[7];
+ /* Rx Statistics Counter */
+ u32 reoct_l;
+ u32 reoct_u;
+ u32 roct_l;
+ u32 roct_u;
+ u32 raln_l;
+ u32 raln_u;
+ u32 rxpf_l;
+ u32 rxpf_u;
+ u32 rfrm_l;
+ u32 rfrm_u;
+ u32 rfcs_l;
+ u32 rfcs_u;
+ u32 rvlan_l;
+ u32 rvlan_u;
+ u32 rerr_l;
+ u32 rerr_u;
+ u32 ruca_l;
+ u32 ruca_u;
+ u32 rmca_l;
+ u32 rmca_u;
+ u32 rbca_l;
+ u32 rbca_u;
+ u32 rdrp_l;
+ u32 rdrp_u;
+ u32 rpkt_l;
+ u32 rpkt_u;
+ u32 rund_l;
+ u32 rund_u;
+ u32 r64_l;
+ u32 r64_u;
+ u32 r127_l;
+ u32 r127_u;
+ u32 r255_l;
+ u32 r255_u;
+ u32 r511_l;
+ u32 r511_u;
+ u32 r1023_l;
+ u32 r1023_u;
+ u32 r1518_l;
+ u32 r1518_u;
+ u32 r1519x_l;
+ u32 r1519x_u;
+ u32 rovr_l;
+ u32 rovr_u;
+ u32 rjbr_l;
+ u32 rjbr_u;
+ u32 rfrg_l;
+ u32 rfrg_u;
+ u32 rcnp_l;
+ u32 rcnp_u;
+ u32 rdrntp_l;
+ u32 rdrntp_u;
+ u32 res01d0[12];
+ /* Tx Statistics Counter */
+ u32 teoct_l;
+ u32 teoct_u;
+ u32 toct_l;
+ u32 toct_u;
+ u32 res0210[2];
+ u32 txpf_l;
+ u32 txpf_u;
+ u32 tfrm_l;
+ u32 tfrm_u;
+ u32 tfcs_l;
+ u32 tfcs_u;
+ u32 tvlan_l;
+ u32 tvlan_u;
+ u32 terr_l;
+ u32 terr_u;
+ u32 tuca_l;
+ u32 tuca_u;
+ u32 tmca_l;
+ u32 tmca_u;
+ u32 tbca_l;
+ u32 tbca_u;
+ u32 res0258[2];
+ u32 tpkt_l;
+ u32 tpkt_u;
+ u32 tund_l;
+ u32 tund_u;
+ u32 t64_l;
+ u32 t64_u;
+ u32 t127_l;
+ u32 t127_u;
+ u32 t255_l;
+ u32 t255_u;
+ u32 t511_l;
+ u32 t511_u;
+ u32 t1023_l;
+ u32 t1023_u;
+ u32 t1518_l;
+ u32 t1518_u;
+ u32 t1519x_l;
+ u32 t1519x_u;
+ u32 res02a8[6];
+ u32 tcnp_l;
+ u32 tcnp_u;
+ u32 res02c8[14];
+ /* Line Interface Control */
+ u32 if_mode; /* 0x300 Interface Mode Control */
+ u32 if_status; /* 0x304 Interface Status */
+ u32 res0308[14];
+ /* HiGig/2 */
+ u32 hg_config; /* 0x340 Control and cfg */
+ u32 res0344[3];
+ u32 hg_pause_quanta; /* 0x350 Pause quanta */
+ u32 res0354[3];
+ u32 hg_pause_thresh; /* 0x360 Pause quanta threshold */
+ u32 res0364[3];
+ u32 hgrx_pause_status; /* 0x370 Receive pause status */
+ u32 hg_fifos_status; /* 0x374 fifos status */
+ u32 rhm; /* 0x378 rx messages counter */
+ u32 thm; /* 0x37C tx messages counter */
+};
+
+struct memac_cfg {
+ bool reset_on_init;
+ bool rx_error_discard;
+ bool pause_ignore;
+ bool pause_forward_enable;
+ bool no_length_check_enable;
+ bool cmd_frame_enable;
+ bool send_idle_enable;
+ bool wan_mode_enable;
+ bool promiscuous_mode_enable;
+ bool tx_addr_ins_enable;
+ bool loopback_enable;
+ bool lgth_check_nostdr;
+ bool time_stamp_enable;
+ bool pad_enable;
+ bool phy_tx_ena_on;
+ bool rx_sfd_any;
+ bool rx_pbl_fwd;
+ bool tx_pbl_fwd;
+ bool debug_mode;
+ bool wake_on_lan;
+ struct fixed_phy_status *fixed_link;
+ u16 max_frame_length;
+ u16 pause_quanta;
+ u32 tx_ipg_length;
+};
+
+struct fman_mac {
+ /* Pointer to MAC memory mapped registers */
+ struct memac_regs __iomem *regs;
+ /* Pointer to MII memory mapped registers */
+ struct memac_mii_regs __iomem *mii_regs;
+ /* MAC address of device */
+ u64 addr;
+ /* Ethernet physical interface */
+ phy_interface_t phy_if;
+ u16 max_speed;
+ void *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *exception_cb;
+ fman_mac_exception_cb *event_cb;
+ /* Pointer to driver's global address hash table */
+ struct eth_hash_t *multicast_addr_hash;
+ /* Pointer to driver's individual address hash table */
+ struct eth_hash_t *unicast_addr_hash;
+ bool debug_mode;
+ u8 mac_id;
+ u32 exceptions;
+ struct memac_cfg *memac_drv_param;
+ void *fm;
+ struct fman_rev_info fm_rev_info;
+ bool basex_if;
+};
+
+static int write_phy_reg_10g(struct memac_mii_regs __iomem *mii_regs,
+ u8 phy_addr, u8 reg, u16 data)
+{
+ u32 tmp_reg;
+ int count;
+
+ tmp_reg = ioread32be(&mii_regs->mdio_cfg);
+ /* Leave only MDIO_CLK_DIV bits set on */
+ tmp_reg &= MDIO_CFG_CLK_DIV_MASK;
+ /* Set maximum MDIO_HOLD value to allow phy to see
+ * change of data signal
+ */
+ tmp_reg |= MDIO_CFG_HOLD_MASK;
+ /* Add 10G interface mode */
+ tmp_reg |= MDIO_CFG_ENC45;
+ iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
+
+ /* Wait for command completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
+
+ if (count == 0)
+ return -EBUSY;
+
+ /* Specify phy and register to be accessed */
+ iowrite32be(phy_addr, &mii_regs->mdio_ctrl);
+ iowrite32be(reg, &mii_regs->mdio_addr);
+
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
+
+ if (count == 0)
+ return -EBUSY;
+
+ /* Write data */
+ iowrite32be(data, &mii_regs->mdio_data);
+
+ /* Wait for write transaction end */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY) &&
+ --count);
+
+ if (count == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int write_phy_reg_1g(struct memac_mii_regs __iomem *mii_regs,
+ u8 phy_addr, u8 reg, u16 data)
+{
+ u32 tmp_reg;
+ int count;
+
+ /* Leave only MDIO_CLK_DIV and MDIO_HOLD bits set on */
+ tmp_reg = ioread32be(&mii_regs->mdio_cfg);
+ tmp_reg &= (MDIO_CFG_CLK_DIV_MASK | MDIO_CFG_HOLD_MASK);
+ iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
+
+ /* Wait for command completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
+
+ if (count == 0)
+ return -EBUSY;
+
+ /* Write transaction */
+ tmp_reg = (phy_addr << MDIO_CTL_PHY_ADDR_SHIFT);
+ tmp_reg |= reg;
+ iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
+
+ /* Wait for command completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
+
+ if (count == 0)
+ return -EBUSY;
+
+ iowrite32be(data, &mii_regs->mdio_data);
+
+ /* Wait for write transaction to end */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY) &&
+ --count);
+
+ if (count == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int mii_write_phy_reg(struct fman_mac *memac, u8 phy_addr, u8 reg,
+ u16 data)
+{
+ int err = 0;
+ /* Figure out interface type - 10G vs 1G.
+ * In 10G interface both phy_addr and devAddr present.
+ */
+ if (memac->max_speed == SPEED_10000)
+ err = write_phy_reg_10g(memac->mii_regs, phy_addr, reg, data);
+ else
+ err = write_phy_reg_1g(memac->mii_regs, phy_addr, reg, data);
+
+ return err;
+}
+
+static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
+ u8 paddr_num)
+{
+ u32 tmp0, tmp1;
+
+ tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
+ tmp1 = (u32)(adr[4] | adr[5] << 8);
+
+ if (paddr_num == 0) {
+ iowrite32be(tmp0, &regs->mac_addr0.mac_addr_l);
+ iowrite32be(tmp1, &regs->mac_addr0.mac_addr_u);
+ } else {
+ iowrite32be(tmp0, &regs->mac_addr[paddr_num - 1].mac_addr_l);
+ iowrite32be(tmp1, &regs->mac_addr[paddr_num - 1].mac_addr_u);
+ }
+}
+
+static int reset(struct memac_regs __iomem *regs)
+{
+ u32 tmp;
+ int count;
+
+ tmp = ioread32be(&regs->command_config);
+
+ tmp |= CMD_CFG_SW_RESET;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&regs->command_config) & CMD_CFG_SW_RESET) &&
+ --count);
+
+ if (count == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void set_exception(struct memac_regs __iomem *regs, u32 val,
+ bool enable)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&regs->imask);
+ if (enable)
+ tmp |= val;
+ else
+ tmp &= ~val;
+
+ iowrite32be(tmp, &regs->imask);
+}
+
+static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
+ phy_interface_t phy_if, u16 speed, bool slow_10g_if,
+ u32 exceptions)
+{
+ u32 tmp;
+
+ /* Config */
+ tmp = 0;
+ if (cfg->wan_mode_enable)
+ tmp |= CMD_CFG_WAN_MODE;
+ if (cfg->promiscuous_mode_enable)
+ tmp |= CMD_CFG_PROMIS_EN;
+ if (cfg->pause_forward_enable)
+ tmp |= CMD_CFG_PAUSE_FWD;
+ if (cfg->pause_ignore)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+ if (cfg->tx_addr_ins_enable)
+ tmp |= CMD_CFG_TX_ADDR_INS;
+ if (cfg->loopback_enable)
+ tmp |= CMD_CFG_LOOPBACK_EN;
+ if (cfg->cmd_frame_enable)
+ tmp |= CMD_CFG_CNT_FRM_EN;
+ if (cfg->send_idle_enable)
+ tmp |= CMD_CFG_SEND_IDLE;
+ if (cfg->no_length_check_enable)
+ tmp |= CMD_CFG_NO_LEN_CHK;
+ if (cfg->rx_sfd_any)
+ tmp |= CMD_CFG_SFD_ANY;
+ if (cfg->pad_enable)
+ tmp |= CMD_CFG_TX_PAD_EN;
+ if (cfg->wake_on_lan)
+ tmp |= CMD_CFG_MG;
+
+ tmp |= CMD_CFG_CRC_FWD;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ /* Max Frame Length */
+ iowrite32be((u32)cfg->max_frame_length, &regs->maxfrm);
+
+ /* Pause Time */
+ iowrite32be((u32)cfg->pause_quanta, &regs->pause_quanta[0]);
+ iowrite32be((u32)0, &regs->pause_thresh[0]);
+
+ /* IF_MODE */
+ tmp = 0;
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_XGMII:
+ tmp |= IF_MODE_XGMII;
+ break;
+ default:
+ tmp |= IF_MODE_GMII;
+ if (phy_if == PHY_INTERFACE_MODE_RGMII && !cfg->loopback_enable)
+ tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
+ }
+ iowrite32be(tmp, &regs->if_mode);
+
+ /* TX_FIFO_SECTIONS */
+ tmp = 0;
+ if (phy_if == PHY_INTERFACE_MODE_XGMII) {
+ if (slow_10g_if) {
+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
+ } else {
+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
+ }
+ } else {
+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
+ }
+ iowrite32be(tmp, &regs->tx_fifo_sections);
+
+ /* clear all pending events and set-up interrupts */
+ iowrite32be(0xffffffff, &regs->ievent);
+ set_exception(regs, exceptions, true);
+
+ return 0;
+}
+
+static void set_dflts(struct memac_cfg *cfg)
+{
+ cfg->reset_on_init = false;
+ cfg->wan_mode_enable = false;
+ cfg->promiscuous_mode_enable = false;
+ cfg->pause_forward_enable = false;
+ cfg->pause_ignore = false;
+ cfg->tx_addr_ins_enable = false;
+ cfg->loopback_enable = false;
+ cfg->cmd_frame_enable = false;
+ cfg->rx_error_discard = false;
+ cfg->send_idle_enable = false;
+ cfg->no_length_check_enable = true;
+ cfg->lgth_check_nostdr = false;
+ cfg->time_stamp_enable = false;
+ cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
+ cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
+ cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
+ cfg->pad_enable = true;
+ cfg->phy_tx_ena_on = false;
+ cfg->rx_sfd_any = false;
+ cfg->rx_pbl_fwd = false;
+ cfg->tx_pbl_fwd = false;
+ cfg->debug_mode = false;
+ cfg->wake_on_lan = false;
+}
+
+static u32 get_mac_addr_hash_code(u64 eth_addr)
+{
+ u64 mask1, mask2;
+ u32 xor_val = 0;
+ u8 i, j;
+
+ for (i = 0; i < 6; i++) {
+ mask1 = eth_addr & (u64)0x01;
+ eth_addr >>= 1;
+
+ for (j = 0; j < 7; j++) {
+ mask2 = eth_addr & (u64)0x01;
+ mask1 ^= mask2;
+ eth_addr >>= 1;
+ }
+
+ xor_val |= (mask1 << (5 - i));
+ }
+
+ return xor_val;
+}
+
+static void setup_sgmii_internal_phy(struct fman_mac *memac, u8 phy_addr,
+ struct fixed_phy_status *fixed_link)
+{
+ u16 tmp_reg16, speed;
+
+ /* In case the higher MACs are used (i.e. the MACs that should
+ * support 10G), speed=10000 is provided for SGMII ports.
+ * Temporary modify enet mode to 1G one, so MII functions can
+ * work correctly.
+ */
+ speed = memac->max_speed;
+ memac->max_speed = SPEED_1000;
+
+ /* SGMII mode */
+ tmp_reg16 = PHY_SGMII_IF_MODE_SGMII;
+ if (!fixed_link)
+ /* AN enable */
+ tmp_reg16 |= PHY_SGMII_IF_MODE_AN;
+ else {
+#ifndef __rtems__
+ switch (fixed_link->speed) {
+ case 10:
+ tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_10M;
+ break;
+ case 100:
+ tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_100M;
+ break;
+ case 1000: /* fallthrough */
+ default:
+ tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_GB;
+ break;
+ }
+ if (fixed_link->duplex)
+ tmp_reg16 |= PHY_SGMII_IF_MODE_DUPLEX_FULL;
+ else
+ tmp_reg16 |= PHY_SGMII_IF_MODE_DUPLEX_HALF;
+#endif /* __rtems__ */
+ }
+ mii_write_phy_reg(memac, phy_addr, 0x14, tmp_reg16);
+
+ /* Device ability according to SGMII specification */
+ tmp_reg16 = PHY_SGMII_DEV_ABILITY_SGMII;
+ mii_write_phy_reg(memac, phy_addr, 0x4, tmp_reg16);
+
+ /* Adjust link timer for SGMII -
+ * According to Cisco SGMII specification the timer should be 1.6 ms.
+ * The link_timer register is configured in units of the clock.
+ * - When running as 1G SGMII, Serdes clock is 125 MHz, so
+ * unit = 1 / (125*10^6 Hz) = 8 ns.
+ * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
+ * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
+ * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
+ * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
+ * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
+ * we always set up here a value of 2.5 SGMII.
+ */
+ mii_write_phy_reg(memac, phy_addr, 0x13, 0x0007);
+ mii_write_phy_reg(memac, phy_addr, 0x12, 0xa120);
+
+ if (!fixed_link)
+ /* Restart AN */
+ tmp_reg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
+ else
+ /* AN disabled */
+ tmp_reg16 = PHY_SGMII_CR_DEF_VAL & ~PHY_SGMII_CR_AN_ENABLE;
+ mii_write_phy_reg(memac, phy_addr, 0x0, tmp_reg16);
+
+ /* Restore original speed */
+ memac->max_speed = speed;
+}
+
+static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac, u8 phy_addr)
+{
+ u16 tmp_reg16, speed;
+
+ /* In case the higher MACs are used (i.e. the MACs that
+ * should support 10G), speed=10000 is provided for SGMII ports.
+ * Temporary modify enet mode to 1G one, so MII functions can
+ * work correctly.
+ */
+ speed = memac->max_speed;
+ memac->max_speed = SPEED_1000;
+
+ /* 1000BaseX mode */
+ tmp_reg16 = PHY_SGMII_IF_MODE_1000X;
+ mii_write_phy_reg(memac, phy_addr, 0x14, tmp_reg16);
+
+ /* AN Device capability */
+ tmp_reg16 = PHY_SGMII_DEV_ABILITY_1000X;
+ mii_write_phy_reg(memac, phy_addr, 0x4, tmp_reg16);
+
+ /* Adjust link timer for SGMII -
+ * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
+ * The link_timer register is configured in units of the clock.
+ * - When running as 1G SGMII, Serdes clock is 125 MHz, so
+ * unit = 1 / (125*10^6 Hz) = 8 ns.
+ * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
+ * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
+ * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
+ * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
+ * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
+ * we always set up here a value of 2.5 SGMII.
+ */
+ mii_write_phy_reg(memac, phy_addr, 0x13, 0x002f);
+ mii_write_phy_reg(memac, phy_addr, 0x12, 0xaf08);
+
+ /* Restart AN */
+ tmp_reg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
+ mii_write_phy_reg(memac, phy_addr, 0x0, tmp_reg16);
+
+ /* Restore original speed */
+ memac->max_speed = speed;
+}
+
+static int check_init_parameters(struct fman_mac *memac)
+{
+ if (memac->addr == 0) {
+ pr_err("Ethernet MAC must have a valid MAC address\n");
+ return -EINVAL;
+ }
+ if (!memac->exception_cb) {
+ pr_err("Uninitialized exception handler\n");
+ return -EINVAL;
+ }
+ if (!memac->event_cb) {
+ pr_warn("Uninitialize event handler\n");
+ return -EINVAL;
+ }
+
+ /* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
+ if (!memac->memac_drv_param->no_length_check_enable) {
+ pr_err("Length Check!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FM_MAC_EX_10G_TX_ECC_ER:
+ bit_mask = MEMAC_IMASK_TECC_ER;
+ break;
+ case FM_MAC_EX_10G_RX_ECC_ER:
+ bit_mask = MEMAC_IMASK_RECC_ER;
+ break;
+ case FM_MAC_EX_TS_FIFO_ECC_ERR:
+ bit_mask = MEMAC_IMASK_TSECC_ER;
+ break;
+ case FM_MAC_EX_MAGIC_PACKET_INDICATION:
+ bit_mask = MEMAC_IMASK_MGI;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static void memac_err_exception(void *handle)
+{
+ struct fman_mac *memac = (struct fman_mac *)handle;
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 event, imask;
+
+ event = ioread32be(&regs->ievent);
+ imask = ioread32be(&regs->imask);
+
+ /* Imask include both error and notification/event bits.
+ * Leaving only error bits enabled by imask.
+ * The imask error bits are shifted by 16 bits offset from
+ * their corresponding location in the ievent - hence the >> 16
+ */
+ event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & MEMAC_IEVNT_TS_ECC_ER)
+ memac->exception_cb(memac->dev_id, FM_MAC_EX_TS_FIFO_ECC_ERR);
+ if (event & MEMAC_IEVNT_TX_ECC_ER)
+ memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
+ if (event & MEMAC_IEVNT_RX_ECC_ER)
+ memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
+}
+
+static void memac_exception(void *handle)
+{
+ struct fman_mac *memac = (struct fman_mac *)handle;
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 event, imask;
+
+ event = ioread32be(&regs->ievent);
+ imask = ioread32be(&regs->imask);
+
+ /* Imask include both error and notification/event bits.
+ * Leaving only error bits enabled by imask.
+ * The imask error bits are shifted by 16 bits offset from
+ * their corresponding location in the ievent - hence the >> 16
+ */
+ event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & MEMAC_IEVNT_MGI)
+ memac->exception_cb(memac->dev_id,
+ FM_MAC_EX_MAGIC_PACKET_INDICATION);
+}
+
+static void free_init_resources(struct fman_mac *memac)
+{
+ fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_ERR);
+
+ fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_NORMAL);
+
+ /* release the driver's group hash table */
+ free_hash_table(memac->multicast_addr_hash);
+ memac->multicast_addr_hash = NULL;
+
+ /* release the driver's individual hash table */
+ free_hash_table(memac->unicast_addr_hash);
+ memac->unicast_addr_hash = NULL;
+}
+
+static bool is_init_done(struct memac_cfg *memac_drv_params)
+{
+ /* Checks if mEMAC driver parameters were initialized */
+ if (!memac_drv_params)
+ return true;
+
+ return false;
+}
+
+int memac_enable(struct fman_mac *memac, enum comm_mode mode)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (mode & COMM_MODE_RX)
+ tmp |= CMD_CFG_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp |= CMD_CFG_TX_EN;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int memac_disable(struct fman_mac *memac, enum comm_mode mode)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (mode & COMM_MODE_RX)
+ tmp &= ~CMD_CFG_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp &= ~CMD_CFG_TX_EN;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (new_val)
+ tmp |= CMD_CFG_PROMIS_EN;
+ else
+ tmp &= ~CMD_CFG_PROMIS_EN;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int memac_adjust_link(struct fman_mac *memac, u16 speed)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->if_mode);
+
+ /* Set full duplex */
+ tmp &= ~IF_MODE_HD;
+
+ if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) {
+ /* Configure RGMII in manual mode */
+ tmp &= ~IF_MODE_RGMII_AUTO;
+ tmp &= ~IF_MODE_RGMII_SP_MASK;
+ /* Full duplex */
+ tmp |= IF_MODE_RGMII_FD;
+
+ switch (speed) {
+ case SPEED_1000:
+ tmp |= IF_MODE_RGMII_1000;
+ break;
+ case SPEED_100:
+ tmp |= IF_MODE_RGMII_100;
+ break;
+ case SPEED_10:
+ tmp |= IF_MODE_RGMII_10;
+ break;
+ default:
+ break;
+ }
+ }
+
+ iowrite32be(tmp, &regs->if_mode);
+
+ return 0;
+}
+
+int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
+{
+ if (is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ memac->memac_drv_param->max_frame_length = new_val;
+
+ return 0;
+}
+
+int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
+{
+ if (is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ memac->memac_drv_param->reset_on_init = enable;
+
+ return 0;
+}
+
+#ifndef __rtems__
+int memac_cfg_fixed_link(struct fman_mac *memac,
+ struct fixed_phy_status *fixed_link)
+{
+ if (is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ memac->memac_drv_param->fixed_link = fixed_link;
+
+ return 0;
+}
+#endif /* __rtems__ */
+
+int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->tx_fifo_sections);
+
+ GET_TX_EMPTY_DEFAULT_VALUE(tmp);
+ iowrite32be(tmp, &regs->tx_fifo_sections);
+
+ tmp = ioread32be(&regs->command_config);
+ tmp &= ~CMD_CFG_PFC_MODE;
+ priority = 0;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ tmp = ioread32be(&regs->pause_quanta[priority / 2]);
+ if (priority % 2)
+ tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
+ else
+ tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
+ tmp |= ((u32)pause_time << (16 * (priority % 2)));
+ iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
+
+ tmp = ioread32be(&regs->pause_thresh[priority / 2]);
+ if (priority % 2)
+ tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
+ else
+ tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
+ tmp |= ((u32)thresh_time << (16 * (priority % 2)));
+ iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
+
+ return 0;
+}
+
+int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (en)
+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
+ else
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr)
+{
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0);
+
+ return 0;
+}
+
+int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ struct eth_hash_entry *hash_entry;
+ u32 hash;
+ u64 addr;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ if (!(addr & GROUP_ADDRESS)) {
+ /* Unicast addresses not supported in hash */
+ pr_err("Unicast Address\n");
+ return -EINVAL;
+ }
+ hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
+
+ /* Create element to be added to the driver hash table */
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+ INIT_LIST_HEAD(&hash_entry->node);
+
+ list_add_tail(&hash_entry->node,
+ &memac->multicast_addr_hash->lsts[hash]);
+ iowrite32be(hash | HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
+
+ return 0;
+}
+
+int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ struct eth_hash_entry *hash_entry = NULL;
+ struct list_head *pos;
+ u32 hash;
+ u64 addr;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
+
+ list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+ if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
+ iowrite32be(hash & ~HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
+
+ return 0;
+}
+
+int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable)
+{
+ u32 bit_mask = 0;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ memac->exceptions |= bit_mask;
+ else
+ memac->exceptions &= ~bit_mask;
+ } else {
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ set_exception(memac->regs, bit_mask, enable);
+
+ return 0;
+}
+
+int memac_init(struct fman_mac *memac)
+{
+ struct memac_cfg *memac_drv_param;
+ u8 i, phy_addr;
+ enet_addr_t eth_addr;
+ bool slow_10g_if = false;
+ struct fixed_phy_status *fixed_link;
+ int err;
+ u32 reg32 = 0;
+
+ if (is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ err = check_init_parameters(memac);
+ if (err)
+ return err;
+
+ memac_drv_param = memac->memac_drv_param;
+
+ if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
+ slow_10g_if = true;
+
+ /* First, reset the MAC if desired. */
+ if (memac_drv_param->reset_on_init) {
+ err = reset(memac->regs);
+ if (err) {
+ pr_err("mEMAC reset failed\n");
+ return err;
+ }
+ }
+
+ /* MAC Address */
+ MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
+ add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+
+ fixed_link = memac_drv_param->fixed_link;
+
+ init(memac->regs, memac->memac_drv_param, memac->phy_if,
+ memac->max_speed, slow_10g_if, memac->exceptions);
+
+ /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
+ * Exists only in FMan 6.0 and 6.3.
+ */
+ if ((memac->fm_rev_info.major == 6) &&
+ ((memac->fm_rev_info.minor == 0) ||
+ (memac->fm_rev_info.minor == 3))) {
+ /* MAC strips CRC from received frames - this workaround
+ * should decrease the likelihood of bug appearance
+ */
+ reg32 = in_be32(&memac->regs->command_config);
+ reg32 &= ~CMD_CFG_CRC_FWD;
+ out_be32(&memac->regs->command_config, reg32);
+ }
+
+ if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ /* Configure internal SGMII PHY */
+ if (memac->basex_if)
+ setup_sgmii_internal_phy_base_x(memac, PHY_MDIO_ADDR);
+ else
+ setup_sgmii_internal_phy(memac, PHY_MDIO_ADDR,
+ fixed_link);
+ } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
+ /* Configure 4 internal SGMII PHYs */
+ for (i = 0; i < 4; i++) {
+ /* QSGMII PHY address occupies 3 upper bits of 5-bit
+ * phy_address; the lower 2 bits are used to extend
+ * register address space and access each one of 4
+ * ports inside QSGMII.
+ */
+ phy_addr = (u8)((PHY_MDIO_ADDR << 2) | i);
+ if (memac->basex_if)
+ setup_sgmii_internal_phy_base_x(memac,
+ phy_addr);
+ else
+ setup_sgmii_internal_phy(memac, phy_addr,
+ fixed_link);
+ }
+ }
+
+ /* Max Frame Length */
+ err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
+ memac_drv_param->max_frame_length);
+ if (err) {
+ pr_err("settings Mac max frame length is FAILED\n");
+ return err;
+ }
+
+ memac->multicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
+ if (!memac->multicast_addr_hash) {
+ free_init_resources(memac);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ memac->unicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
+ if (!memac->unicast_addr_hash) {
+ free_init_resources(memac);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_ERR, memac_err_exception, memac);
+
+ fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
+
+ kfree(memac_drv_param);
+ memac->memac_drv_param = NULL;
+
+ return 0;
+}
+
+int memac_free(struct fman_mac *memac)
+{
+ free_init_resources(memac);
+
+ kfree(memac->memac_drv_param);
+ kfree(memac);
+
+ return 0;
+}
+
+struct fman_mac *memac_config(struct fman_mac_params *params)
+{
+ struct fman_mac *memac;
+ struct memac_cfg *memac_drv_param;
+ void __iomem *base_addr;
+
+ base_addr = params->base_addr;
+ /* allocate memory for the m_emac data structure */
+ memac = kzalloc(sizeof(*memac), GFP_KERNEL);
+ if (!memac)
+ return NULL;
+
+ /* allocate memory for the m_emac driver parameters data structure */
+ memac_drv_param = kzalloc(sizeof(*memac_drv_param), GFP_KERNEL);
+ if (!memac_drv_param) {
+ memac_free(memac);
+ return NULL;
+ }
+
+ /* Plant parameter structure pointer */
+ memac->memac_drv_param = memac_drv_param;
+
+ set_dflts(memac_drv_param);
+
+ memac->addr = ENET_ADDR_TO_UINT64(params->addr);
+
+ memac->regs = (struct memac_regs __iomem *)(base_addr);
+ memac->mii_regs = (struct memac_mii_regs __iomem *)
+ (base_addr + MEMAC_TO_MII_OFFSET);
+ memac->max_speed = params->max_speed;
+ memac->phy_if = params->phy_if;
+ memac->mac_id = params->mac_id;
+ memac->exceptions = MEMAC_DEFAULT_EXCEPTIONS;
+ memac->exception_cb = params->exception_cb;
+ memac->event_cb = params->event_cb;
+ memac->dev_id = params->dev_id;
+ memac->fm = params->fm;
+ memac->basex_if = params->basex_if;
+
+ /* Save FMan revision */
+ fman_get_revision(memac->fm, &memac->fm_rev_info);
+
+ return memac;
+}
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_memac.h b/linux/drivers/net/ethernet/freescale/fman/fman_memac.h
new file mode 100644
index 00000000..ae01dd06
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEMAC_H
+#define __MEMAC_H
+
+#include "fman_mac.h"
+
+#include <linux/netdevice.h>
+
+struct fman_mac *memac_config(struct fman_mac_params *params);
+int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
+int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr);
+int memac_adjust_link(struct fman_mac *memac, u16 speed);
+int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
+int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
+#ifndef __rtems__
+int memac_cfg_fixed_link(struct fman_mac *memac,
+ struct fixed_phy_status *fixed_link);
+#endif /* __rtems__ */
+int memac_enable(struct fman_mac *memac, enum comm_mode mode);
+int memac_disable(struct fman_mac *memac, enum comm_mode mode);
+int memac_init(struct fman_mac *memac);
+int memac_free(struct fman_mac *memac);
+int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
+int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time);
+int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable);
+int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
+int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
+
+#endif /* __MEMAC_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_muram.c b/linux/drivers/net/ethernet/freescale/fman/fman_muram.c
new file mode 100644
index 00000000..9762d725
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -0,0 +1,124 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fman_muram.h"
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/genalloc.h>
+
+struct muram_info {
+ struct gen_pool *pool;
+ void __iomem *vbase;
+ size_t size;
+ phys_addr_t pbase;
+};
+
+static unsigned long fman_muram_vbase_to_offset(struct muram_info *muram,
+ unsigned long vaddr)
+{
+ return vaddr - (unsigned long)muram->vbase;
+}
+
+struct muram_info *fman_muram_init(phys_addr_t base, size_t size)
+{
+ struct muram_info *muram;
+ void __iomem *vaddr;
+ int ret;
+
+ muram = kzalloc(sizeof(*muram), GFP_KERNEL);
+ if (!muram)
+ return NULL;
+
+ muram->pool = gen_pool_create(ilog2(64), -1);
+ if (!muram->pool) {
+ pr_err("%s(): MURAM pool create failed\n", __func__);
+ goto muram_free;
+ }
+
+ vaddr = ioremap(base, size);
+ if (!vaddr) {
+ pr_err("%s(): MURAM ioremap failed\n", __func__);
+ goto pool_destroy;
+ }
+
+ ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr,
+ base, size, -1);
+ if (ret < 0) {
+ pr_err("%s(): MURAM pool add failed\n", __func__);
+ iounmap(vaddr);
+ goto pool_destroy;
+ }
+
+ memset_io(vaddr, 0, (int)size);
+
+ muram->vbase = vaddr;
+ muram->pbase = base;
+ return muram;
+
+pool_destroy:
+ gen_pool_destroy(muram->pool);
+muram_free:
+ kfree(muram);
+ return NULL;
+}
+
+unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
+ unsigned long offset)
+{
+ return offset + (unsigned long)muram->vbase;
+}
+
+int fman_muram_alloc(struct muram_info *muram, size_t size)
+{
+ unsigned long vaddr;
+
+ vaddr = gen_pool_alloc(muram->pool, size);
+ if (!vaddr)
+ return -ENOMEM;
+
+ memset_io((void __iomem *)vaddr, 0, size);
+
+ return fman_muram_vbase_to_offset(muram, vaddr);
+}
+
+void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size)
+{
+ unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
+
+ gen_pool_free(muram->pool, addr, size);
+}
+
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_muram.h b/linux/drivers/net/ethernet/freescale/fman/fman_muram.h
new file mode 100644
index 00000000..c715795b
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FM_MURAM_EXT
+#define __FM_MURAM_EXT
+
+#include <linux/types.h>
+
+#define FM_MURAM_INVALID_ALLOCATION -1
+
+/* Structure for FM MURAM information */
+struct muram_info;
+
+/**
+ * fman_muram_init
+ * @base: Pointer to base of memory mapped FM-MURAM.
+ * @size: Size of the FM-MURAM partition.
+ *
+ * Creates partition in the MURAM.
+ * The routine returns a pointer to the MURAM partition.
+ * This pointer must be passed as to all other FM-MURAM function calls.
+ * No actual initialization or configuration of FM_MURAM hardware is done by
+ * this routine.
+ *
+ * Return: pointer to FM-MURAM object, or NULL for Failure.
+ */
+struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
+
+/**
+ * fman_muram_offset_to_vbase
+ * @muram: FM-MURAM module pointer.
+ * @offset: the offset of the memory block
+ *
+ * Gives the address of the memory region from specific offset
+ *
+ * Return: The address of the memory block
+ */
+unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
+ unsigned long offset);
+
+/**
+ * fman_muram_alloc
+ * @muram: FM-MURAM module pointer.
+ * @size: Size of the memory to be allocated.
+ *
+ * Allocate some memory from FM-MURAM partition.
+ *
+ * Return: address of the allocated memory; NULL otherwise.
+ */
+int fman_muram_alloc(struct muram_info *muram, size_t size);
+
+/**
+ * fman_muram_free_mem
+ * muram: FM-MURAM module pointer.
+ * offset: offset of the memory region to be freed.
+ * size: size of the memory to be freed.
+ *
+ * Free an allocated memory from FM-MURAM partition.
+ */
+void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+
+#endif /* __FM_MURAM_EXT */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_port.c b/linux/drivers/net/ethernet/freescale/fman/fman_port.c
new file mode 100644
index 00000000..e42ac1c1
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -0,0 +1,1827 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_port.h"
+#include "fman.h"
+#include "fman_sp.h"
+
+#include <asm/mpc85xx.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+/* Queue ID */
+#define DFLT_FQ_ID 0x00FFFFFF
+
+/* General defines */
+#define PORT_BMI_FIFO_UNITS 0x100
+
+#define MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) \
+ min((u32)bmi_max_fifo_size, (u32)1024 * FMAN_BMI_FIFO_UNITS)
+
+#define PORT_CG_MAP_NUM 8
+#define PORT_PRS_RESULT_WORDS_NUM 8
+#define PORT_IC_OFFSET_UNITS 0x10
+
+#define MIN_EXT_BUF_SIZE 64
+
+#define BMI_PORT_REGS_OFFSET 0
+#define QMI_PORT_REGS_OFFSET 0x400
+
+/* Default values */
+#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
+ DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN
+
+#define DFLT_PORT_CUT_BYTES_FROM_END 4
+
+#define DFLT_PORT_ERRORS_TO_DISCARD FM_PORT_FRM_ERR_CLS_DISCARD
+#define DFLT_PORT_MAX_FRAME_LENGTH 9600
+
+#define DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(bmi_max_fifo_size) \
+ MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)
+
+#define DFLT_PORT_RX_FIFO_THRESHOLD(major, bmi_max_fifo_size) \
+ (major == 6 ? \
+ MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) : \
+ (MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) * 3 / 4)) \
+
+#define DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS 0
+
+/* QMI defines */
+#define QMI_DEQ_CFG_SUBPORTAL_MASK 0x1f
+
+#define QMI_PORT_CFG_EN 0x80000000
+#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000
+
+#define QMI_DEQ_CFG_PRI 0x80000000
+#define QMI_DEQ_CFG_TYPE1 0x10000000
+#define QMI_DEQ_CFG_TYPE2 0x20000000
+#define QMI_DEQ_CFG_TYPE3 0x30000000
+#define QMI_DEQ_CFG_PREFETCH_PARTIAL 0x01000000
+#define QMI_DEQ_CFG_PREFETCH_FULL 0x03000000
+#define QMI_DEQ_CFG_SP_MASK 0xf
+#define QMI_DEQ_CFG_SP_SHIFT 20
+
+#define QMI_BYTE_COUNT_LEVEL_CONTROL(_type) \
+ (_type == FMAN_PORT_TYPE_TX ? 0x1400 : 0x400)
+
+/* BMI defins */
+#define BMI_EBD_EN 0x80000000
+
+#define BMI_PORT_CFG_EN 0x80000000
+#define BMI_PORT_CFG_FDOVR 0x02000000
+
+#define BMI_PORT_STATUS_BSY 0x80000000
+
+#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT
+#define BMI_DMA_ATTR_IC_STASH_ON 0x10000000
+#define BMI_DMA_ATTR_HDR_STASH_ON 0x04000000
+#define BMI_DMA_ATTR_SG_STASH_ON 0x01000000
+#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
+
+#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
+#define BMI_RX_FIFO_THRESHOLD_ETHE 0x80000000
+
+#define BMI_FRAME_END_CS_IGNORE_SHIFT 24
+#define BMI_FRAME_END_CS_IGNORE_MASK 0x0000001f
+
+#define BMI_RX_FRAME_END_CUT_SHIFT 16
+#define BMI_RX_FRAME_END_CUT_MASK 0x0000001f
+
+#define BMI_IC_TO_EXT_SHIFT FMAN_SP_IC_TO_EXT_SHIFT
+#define BMI_IC_TO_EXT_MASK 0x0000001f
+#define BMI_IC_FROM_INT_SHIFT FMAN_SP_IC_FROM_INT_SHIFT
+#define BMI_IC_FROM_INT_MASK 0x0000000f
+#define BMI_IC_SIZE_MASK 0x0000001f
+
+#define BMI_INT_BUF_MARG_SHIFT 28
+#define BMI_INT_BUF_MARG_MASK 0x0000000f
+#define BMI_EXT_BUF_MARG_START_SHIFT FMAN_SP_EXT_BUF_MARG_START_SHIFT
+#define BMI_EXT_BUF_MARG_START_MASK 0x000001ff
+#define BMI_EXT_BUF_MARG_END_MASK 0x000001ff
+
+#define BMI_CMD_MR_LEAC 0x00200000
+#define BMI_CMD_MR_SLEAC 0x00100000
+#define BMI_CMD_MR_MA 0x00080000
+#define BMI_CMD_MR_DEAS 0x00040000
+#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \
+ BMI_CMD_MR_SLEAC | \
+ BMI_CMD_MR_MA | \
+ BMI_CMD_MR_DEAS)
+#define BMI_CMD_TX_MR_DEF 0
+
+#define BMI_CMD_ATTR_ORDER 0x80000000
+#define BMI_CMD_ATTR_SYNC 0x02000000
+#define BMI_CMD_ATTR_COLOR_SHIFT 26
+
+#define BMI_FIFO_PIPELINE_DEPTH_SHIFT 12
+#define BMI_FIFO_PIPELINE_DEPTH_MASK 0x0000000f
+#define BMI_NEXT_ENG_FD_BITS_SHIFT 24
+
+#define BMI_EXT_BUF_POOL_VALID FMAN_SP_EXT_BUF_POOL_VALID
+#define BMI_EXT_BUF_POOL_EN_COUNTER FMAN_SP_EXT_BUF_POOL_EN_COUNTER
+#define BMI_EXT_BUF_POOL_BACKUP FMAN_SP_EXT_BUF_POOL_BACKUP
+#define BMI_EXT_BUF_POOL_ID_SHIFT 16
+#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000
+#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT 16
+
+#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
+
+#define BMI_SG_DISABLE FMAN_SP_SG_DISABLE
+
+#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
+#define BMI_FIFO_THRESHOLD ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
+
+#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed) \
+ ((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
+
+#define BMI_PORT_RFNE_FRWD_RPD 0x40000000
+
+#define RX_ERRS_TO_ENQ \
+ (FM_PORT_FRM_ERR_DMA | \
+ FM_PORT_FRM_ERR_PHYSICAL | \
+ FM_PORT_FRM_ERR_SIZE | \
+ FM_PORT_FRM_ERR_EXTRACTION | \
+ FM_PORT_FRM_ERR_NO_SCHEME | \
+ FM_PORT_FRM_ERR_PRS_TIMEOUT | \
+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \
+ FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \
+ FM_PORT_FRM_ERR_PRS_HDR_ERR | \
+ FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW | \
+ FM_PORT_FRM_ERR_IPRE)
+
+/* NIA defines */
+#define NIA_ORDER_RESTOR 0x00800000
+#define NIA_ENG_FM_CTL 0x00000000
+#define NIA_ENG_BMI 0x00500000
+#define NIA_ENG_QMI_ENQ 0x00540000
+#define NIA_ENG_QMI_DEQ 0x00580000
+
+#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME 0x00000028
+#define NIA_BMI_AC_ENQ_FRAME 0x00000002
+#define NIA_BMI_AC_TX_RELEASE 0x000002C0
+#define NIA_BMI_AC_RELEASE 0x000000C0
+#define NIA_BMI_AC_TX 0x00000274
+#define NIA_BMI_AC_FETCH_ALL_FRAME 0x0000020c
+
+/* Port IDs */
+#define TX_10G_PORT_BASE 0x30
+#define RX_10G_PORT_BASE 0x10
+
+/* BMI Rx port register map */
+struct fman_port_rx_bmi_regs {
+ u32 fmbm_rcfg; /* Rx Configuration */
+ u32 fmbm_rst; /* Rx Status */
+ u32 fmbm_rda; /* Rx DMA attributes */
+ u32 fmbm_rfp; /* Rx FIFO Parameters */
+ u32 fmbm_rfed; /* Rx Frame End Data */
+ u32 fmbm_ricp; /* Rx Internal Context Parameters */
+ u32 fmbm_rim; /* Rx Internal Buffer Margins */
+ u32 fmbm_rebm; /* Rx External Buffer Margins */
+ u32 fmbm_rfne; /* Rx Frame Next Engine */
+ u32 fmbm_rfca; /* Rx Frame Command Attributes. */
+ u32 fmbm_rfpne; /* Rx Frame Parser Next Engine */
+ u32 fmbm_rpso; /* Rx Parse Start Offset */
+ u32 fmbm_rpp; /* Rx Policer Profile */
+ u32 fmbm_rccb; /* Rx Coarse Classification Base */
+ u32 fmbm_reth; /* Rx Excessive Threshold */
+ u32 reserved003c[1]; /* (0x03C 0x03F) */
+ u32 fmbm_rprai[PORT_PRS_RESULT_WORDS_NUM];
+ /* Rx Parse Results Array Init */
+ u32 fmbm_rfqid; /* Rx Frame Queue ID */
+ u32 fmbm_refqid; /* Rx Error Frame Queue ID */
+ u32 fmbm_rfsdm; /* Rx Frame Status Discard Mask */
+ u32 fmbm_rfsem; /* Rx Frame Status Error Mask */
+ u32 fmbm_rfene; /* Rx Frame Enqueue Next Engine */
+ u32 reserved0074[0x2]; /* (0x074-0x07C) */
+ u32 fmbm_rcmne; /* Rx Frame Continuous Mode Next Engine */
+ u32 reserved0080[0x20]; /* (0x080 0x0FF) */
+ u32 fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /* Buffer Manager pool Information- */
+ u32 fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM]; /* Allocate Counter- */
+ u32 reserved0130[8]; /* 0x130/0x140 - 0x15F reserved - */
+ u32 fmbm_rcgm[PORT_CG_MAP_NUM]; /* Congestion Group Map */
+ u32 fmbm_mpd; /* BM Pool Depletion */
+ u32 reserved0184[0x1F]; /* (0x184 0x1FF) */
+ u32 fmbm_rstc; /* Rx Statistics Counters */
+ u32 fmbm_rfrc; /* Rx Frame Counter */
+ u32 fmbm_rfbc; /* Rx Bad Frames Counter */
+ u32 fmbm_rlfc; /* Rx Large Frames Counter */
+ u32 fmbm_rffc; /* Rx Filter Frames Counter */
+ u32 fmbm_rfdc; /* Rx Frame Discard Counter */
+ u32 fmbm_rfldec; /* Rx Frames List DMA Error Counter */
+ u32 fmbm_rodc; /* Rx Out of Buffers Discard nntr */
+ u32 fmbm_rbdc; /* Rx Buffers Deallocate Counter */
+ u32 fmbm_rpec; /* RX Prepare to enqueue Counte */
+ u32 reserved0224[0x16]; /* (0x224 0x27F) */
+ u32 fmbm_rpc; /* Rx Performance Counters */
+ u32 fmbm_rpcp; /* Rx Performance Count Parameters */
+ u32 fmbm_rccn; /* Rx Cycle Counter */
+ u32 fmbm_rtuc; /* Rx Tasks Utilization Counter */
+ u32 fmbm_rrquc; /* Rx Receive Queue Utilization cntr */
+ u32 fmbm_rduc; /* Rx DMA Utilization Counter */
+ u32 fmbm_rfuc; /* Rx FIFO Utilization Counter */
+ u32 fmbm_rpac; /* Rx Pause Activation Counter */
+ u32 reserved02a0[0x18]; /* (0x2A0 0x2FF) */
+ u32 fmbm_rdcfg[0x3]; /* Rx Debug Configuration */
+ u32 fmbm_rgpr; /* Rx General Purpose Register */
+ u32 reserved0310[0x3a];
+};
+
+/* BMI Tx port register map */
+struct fman_port_tx_bmi_regs {
+ u32 fmbm_tcfg; /* Tx Configuration */
+ u32 fmbm_tst; /* Tx Status */
+ u32 fmbm_tda; /* Tx DMA attributes */
+ u32 fmbm_tfp; /* Tx FIFO Parameters */
+ u32 fmbm_tfed; /* Tx Frame End Data */
+ u32 fmbm_ticp; /* Tx Internal Context Parameters */
+ u32 fmbm_tfdne; /* Tx Frame Dequeue Next Engine. */
+ u32 fmbm_tfca; /* Tx Frame Command attribute. */
+ u32 fmbm_tcfqid; /* Tx Confirmation Frame Queue ID. */
+ u32 fmbm_tefqid; /* Tx Frame Error Queue ID */
+ u32 fmbm_tfene; /* Tx Frame Enqueue Next Engine */
+ u32 fmbm_trlmts; /* Tx Rate Limiter Scale */
+ u32 fmbm_trlmt; /* Tx Rate Limiter */
+ u32 reserved0034[0x0e]; /* (0x034-0x6c) */
+ u32 fmbm_tccb; /* Tx Coarse Classification base */
+ u32 fmbm_tfne; /* Tx Frame Next Engine */
+ u32 fmbm_tpfcm[0x02];
+ /* Tx Priority based Flow Control (PFC) Mapping */
+ u32 fmbm_tcmne; /* Tx Frame Continuous Mode Next Engine */
+ u32 reserved0080[0x60]; /* (0x080-0x200) */
+ u32 fmbm_tstc; /* Tx Statistics Counters */
+ u32 fmbm_tfrc; /* Tx Frame Counter */
+ u32 fmbm_tfdc; /* Tx Frames Discard Counter */
+ u32 fmbm_tfledc; /* Tx Frame len error discard cntr */
+ u32 fmbm_tfufdc; /* Tx Frame unsprt frmt discard cntr */
+ u32 fmbm_tbdc; /* Tx Buffers Deallocate Counter */
+ u32 reserved0218[0x1A]; /* (0x218-0x280) */
+ u32 fmbm_tpc; /* Tx Performance Counters */
+ u32 fmbm_tpcp; /* Tx Performance Count Parameters */
+ u32 fmbm_tccn; /* Tx Cycle Counter */
+ u32 fmbm_ttuc; /* Tx Tasks Utilization Counter */
+ u32 fmbm_ttcquc; /* Tx Transmit conf Q util Counter */
+ u32 fmbm_tduc; /* Tx DMA Utilization Counter */
+ u32 fmbm_tfuc; /* Tx FIFO Utilization Counter */
+ u32 reserved029c[16]; /* (0x29C-0x2FF) */
+ u32 fmbm_tdcfg[0x3]; /* Tx Debug Configuration */
+ u32 fmbm_tgpr; /* Tx General Purpose Register */
+ u32 reserved0310[0x3a]; /* (0x310-0x3FF) */
+};
+
+/* BMI port register map */
+union fman_port_bmi_regs {
+ struct fman_port_rx_bmi_regs rx;
+ struct fman_port_tx_bmi_regs tx;
+};
+
+/* QMI port register map */
+struct fman_port_qmi_regs {
+ u32 fmqm_pnc; /* PortID n Configuration Register */
+ u32 fmqm_pns; /* PortID n Status Register */
+ u32 fmqm_pnts; /* PortID n Task Status Register */
+ u32 reserved00c[4]; /* 0xn00C - 0xn01B */
+ u32 fmqm_pnen; /* PortID n Enqueue NIA Register */
+ u32 fmqm_pnetfc; /* PortID n Enq Total Frame Counter */
+ u32 reserved024[2]; /* 0xn024 - 0x02B */
+ u32 fmqm_pndn; /* PortID n Dequeue NIA Register */
+ u32 fmqm_pndc; /* PortID n Dequeue Config Register */
+ u32 fmqm_pndtfc; /* PortID n Dequeue tot Frame cntr */
+ u32 fmqm_pndfdc; /* PortID n Dequeue FQID Dflt Cntr */
+ u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
+};
+
+/* QMI dequeue prefetch modes */
+enum fman_port_deq_prefetch {
+ FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
+ FMAN_PORT_DEQ_PART_PREFETCH, /* Partial prefetch mode */
+ FMAN_PORT_DEQ_FULL_PREFETCH /* Full prefetch mode */
+};
+
+/* A structure for defining FM port resources */
+struct fman_port_rsrc {
+ u32 num; /* Committed required resource */
+ u32 extra; /* Extra (not committed) required resource */
+};
+
+enum fman_port_dma_swap {
+ FMAN_PORT_DMA_NO_SWAP, /* No swap, transfer data as is */
+ FMAN_PORT_DMA_SWAP_LE,
+ /* The transferred data should be swapped in PPC Little Endian mode */
+ FMAN_PORT_DMA_SWAP_BE
+ /* The transferred data should be swapped in Big Endian mode */
+};
+
+/* Default port color */
+enum fman_port_color {
+ FMAN_PORT_COLOR_GREEN, /* Default port color is green */
+ FMAN_PORT_COLOR_YELLOW, /* Default port color is yellow */
+ FMAN_PORT_COLOR_RED, /* Default port color is red */
+ FMAN_PORT_COLOR_OVERRIDE /* Ignore color */
+};
+
+/* QMI dequeue from the SP channel - types */
+enum fman_port_deq_type {
+ FMAN_PORT_DEQ_BY_PRI,
+ /* Priority precedence and Intra-Class scheduling */
+ FMAN_PORT_DEQ_ACTIVE_FQ,
+ /* Active FQ precedence and Intra-Class scheduling */
+ FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS
+ /* Active FQ precedence and override Intra-Class scheduling */
+};
+
+/* External buffer pools configuration */
+struct fman_port_bpools {
+ u8 count; /* Num of pools to set up */
+ bool counters_enable; /* Enable allocate counters */
+ u8 grp_bp_depleted_num;
+ /* Number of depleted pools - if reached the BMI indicates
+ * the MAC to send a pause frame
+ */
+ struct {
+ u8 bpid; /* BM pool ID */
+ u16 size;
+ /* Pool's size - must be in ascending order */
+ bool is_backup;
+ /* If this is a backup pool */
+ bool grp_bp_depleted;
+ /* Consider this buffer in multiple pools depletion criteria */
+ bool single_bp_depleted;
+ /* Consider this buffer in single pool depletion criteria */
+ } bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
+};
+
+struct fman_port_cfg {
+ u32 dflt_fqid;
+ u32 err_fqid;
+ u8 deq_sp;
+ bool deq_high_priority;
+ enum fman_port_deq_type deq_type;
+ enum fman_port_deq_prefetch deq_prefetch_option;
+ u16 deq_byte_cnt;
+ u8 cheksum_last_bytes_ignore;
+ u8 rx_cut_end_bytes;
+ struct fman_buf_pool_depletion buf_pool_depletion;
+ bool discard_override;
+ bool en_buf_pool_depletion;
+ struct fman_ext_pools ext_buf_pools;
+ u32 tx_fifo_min_level;
+ u32 tx_fifo_low_comf_level;
+ u32 rx_pri_elevation;
+ u32 rx_fifo_thr;
+ struct fman_sp_buf_margins buf_margins;
+ u32 int_buf_start_margin;
+ struct fman_sp_int_context_data_copy int_context;
+ u32 discard_mask;
+ u32 err_mask;
+ bool forward_reuse_int_context;
+ struct fman_buffer_prefix_content buffer_prefix_content;
+ bool dont_release_buf;
+ bool set_num_of_tasks;
+ bool set_num_of_open_dmas;
+ bool set_size_of_fifo;
+ bool bcb_workaround;
+
+ u8 rx_fd_bits;
+ u32 tx_fifo_deq_pipeline_depth;
+ bool errata_A006675;
+ bool errata_A006320;
+ bool excessive_threshold_register;
+ bool fmbm_rebm_has_sgd;
+ bool fmbm_tfne_has_features;
+ bool qmi_deq_options_support;
+
+ enum fman_port_dma_swap dma_swap_data;
+ bool dma_ic_stash_on;
+ bool dma_header_stash_on;
+ bool dma_sg_stash_on;
+ bool dma_write_optimize;
+ enum fman_port_color color;
+ bool sync_req;
+
+ bool no_scatter_gather;
+};
+
+struct fman_port_rx_pools_params {
+ u8 num_of_pools;
+ u16 second_largest_buf_size;
+ u16 largest_buf_size;
+};
+
+struct fman_port_dts_params {
+ void __iomem *base_addr; /* FMan port virtual memory */
+ enum fman_port_type type; /* Port type */
+ u16 speed; /* Port speed */
+ u8 id; /* HW Port Id */
+ u32 qman_channel_id; /* QMan channel id (non RX only) */
+ struct fman *fman; /* FMan Handle */
+};
+
+struct fman_port {
+ void *fm;
+ struct fman_rev_info rev_info;
+ u8 port_id;
+ enum fman_port_type port_type;
+ u16 port_speed;
+
+ union fman_port_bmi_regs __iomem *bmi_regs;
+ struct fman_port_qmi_regs __iomem *qmi_regs;
+
+ struct fman_sp_buffer_offsets buffer_offsets;
+
+ u8 internal_buf_offset;
+ struct fman_ext_pools ext_buf_pools;
+
+ u16 max_frame_length;
+ struct fman_port_rsrc open_dmas;
+ struct fman_port_rsrc tasks;
+ struct fman_port_rsrc fifo_bufs;
+ struct fman_port_rx_pools_params rx_pools_params;
+
+ struct fman_port_cfg *cfg;
+ struct fman_port_dts_params dts_params;
+
+ u8 ext_pools_num;
+ u32 max_port_fifo_size;
+ u32 max_num_of_ext_pools;
+ u32 max_num_of_sub_portals;
+ u32 bm_max_num_of_pools;
+};
+
+static int init_bmi_rx(struct fman_port *port)
+{
+ struct fman_port_rx_bmi_regs __iomem *regs = &port->bmi_regs->rx;
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp;
+
+ /* Rx Configuration register */
+ tmp = 0;
+ if (cfg->discard_override)
+ tmp |= BMI_PORT_CFG_FDOVR;
+ iowrite32be(tmp, &regs->fmbm_rcfg);
+
+ /* DMA attributes */
+ tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
+ if (cfg->dma_ic_stash_on)
+ tmp |= BMI_DMA_ATTR_IC_STASH_ON;
+ if (cfg->dma_header_stash_on)
+ tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
+ if (cfg->dma_sg_stash_on)
+ tmp |= BMI_DMA_ATTR_SG_STASH_ON;
+ if (cfg->dma_write_optimize)
+ tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
+ iowrite32be(tmp, &regs->fmbm_rda);
+
+ /* Rx FIFO parameters */
+ tmp = (cfg->rx_pri_elevation / PORT_BMI_FIFO_UNITS - 1) <<
+ BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
+ tmp |= cfg->rx_fifo_thr / PORT_BMI_FIFO_UNITS - 1;
+ iowrite32be(tmp, &regs->fmbm_rfp);
+
+ if (cfg->excessive_threshold_register)
+ /* always allow access to the extra resources */
+ iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, &regs->fmbm_reth);
+
+ /* Frame end data */
+ tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
+ BMI_FRAME_END_CS_IGNORE_SHIFT;
+ tmp |= (cfg->rx_cut_end_bytes & BMI_RX_FRAME_END_CUT_MASK) <<
+ BMI_RX_FRAME_END_CUT_SHIFT;
+ if (cfg->errata_A006320)
+ tmp &= 0xffe0ffff;
+ iowrite32be(tmp, &regs->fmbm_rfed);
+
+ /* Internal context parameters */
+ tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
+ tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
+ tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_SIZE_MASK;
+ iowrite32be(tmp, &regs->fmbm_ricp);
+
+ /* Internal buffer offset */
+ tmp = ((cfg->int_buf_start_margin / PORT_IC_OFFSET_UNITS) &
+ BMI_INT_BUF_MARG_MASK) << BMI_INT_BUF_MARG_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_rim);
+
+ /* External buffer margins */
+ tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
+ BMI_EXT_BUF_MARG_START_SHIFT;
+ tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
+ if (cfg->fmbm_rebm_has_sgd && cfg->no_scatter_gather)
+ tmp |= BMI_SG_DISABLE;
+ iowrite32be(tmp, &regs->fmbm_rebm);
+
+ /* Frame attributes */
+ tmp = BMI_CMD_RX_MR_DEF;
+ tmp |= BMI_CMD_ATTR_ORDER;
+ tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
+ if (cfg->sync_req)
+ tmp |= BMI_CMD_ATTR_SYNC;
+
+ iowrite32be(tmp, &regs->fmbm_rfca);
+
+ /* NIA */
+ tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
+
+ if (cfg->errata_A006675)
+ tmp |= NIA_ENG_FM_CTL |
+ NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME;
+ else
+ tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
+ iowrite32be(tmp, &regs->fmbm_rfne);
+
+ /* Enqueue NIA */
+ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
+
+ /* Default/error queues */
+ iowrite32be((cfg->dflt_fqid & DFLT_FQ_ID), &regs->fmbm_rfqid);
+ iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_refqid);
+
+ /* Discard/error masks */
+ iowrite32be(cfg->discard_mask, &regs->fmbm_rfsdm);
+ iowrite32be(cfg->err_mask, &regs->fmbm_rfsem);
+
+ return 0;
+}
+
+static int init_bmi_tx(struct fman_port *port)
+{
+ struct fman_port_tx_bmi_regs __iomem *regs = &port->bmi_regs->tx;
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp;
+
+ /* Tx Configuration register */
+ tmp = 0;
+ iowrite32be(tmp, &regs->fmbm_tcfg);
+
+ /* DMA attributes */
+ tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
+ if (cfg->dma_ic_stash_on)
+ tmp |= BMI_DMA_ATTR_IC_STASH_ON;
+ if (cfg->dma_header_stash_on)
+ tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
+ if (cfg->dma_sg_stash_on)
+ tmp |= BMI_DMA_ATTR_SG_STASH_ON;
+ iowrite32be(tmp, &regs->fmbm_tda);
+
+ /* Tx FIFO parameters */
+ tmp = (cfg->tx_fifo_min_level / PORT_BMI_FIFO_UNITS) <<
+ BMI_TX_FIFO_MIN_FILL_SHIFT;
+ tmp |= ((cfg->tx_fifo_deq_pipeline_depth - 1) &
+ BMI_FIFO_PIPELINE_DEPTH_MASK) << BMI_FIFO_PIPELINE_DEPTH_SHIFT;
+ tmp |= (cfg->tx_fifo_low_comf_level / PORT_BMI_FIFO_UNITS) - 1;
+ iowrite32be(tmp, &regs->fmbm_tfp);
+
+ /* Frame end data */
+ tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
+ BMI_FRAME_END_CS_IGNORE_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_tfed);
+
+ /* Internal context parameters */
+ tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
+ tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
+ tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_SIZE_MASK;
+ iowrite32be(tmp, &regs->fmbm_ticp);
+
+ /* Frame attributes */
+ tmp = BMI_CMD_TX_MR_DEF;
+ tmp |= BMI_CMD_ATTR_ORDER;
+ tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_tfca);
+
+ /* Dequeue NIA + enqueue NIA */
+ iowrite32be(NIA_ENG_QMI_DEQ, &regs->fmbm_tfdne);
+ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_tfene);
+ if (cfg->fmbm_tfne_has_features)
+ iowrite32be(!cfg->dflt_fqid ?
+ BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
+ NIA_BMI_AC_FETCH_ALL_FRAME, &regs->fmbm_tfne);
+ if (!cfg->dflt_fqid && cfg->dont_release_buf) {
+ iowrite32be(DFLT_FQ_ID, &regs->fmbm_tcfqid);
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+ &regs->fmbm_tfene);
+ if (cfg->fmbm_tfne_has_features)
+ iowrite32be(ioread32be(&regs->fmbm_tfne) & ~BMI_EBD_EN,
+ &regs->fmbm_tfne);
+ }
+
+ /* Confirmation/error queues */
+ if (cfg->dflt_fqid || !cfg->dont_release_buf)
+ iowrite32be(cfg->dflt_fqid & DFLT_FQ_ID, &regs->fmbm_tcfqid);
+ iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_tefqid);
+
+ return 0;
+}
+
+static int init_qmi(struct fman_port *port)
+{
+ struct fman_port_qmi_regs __iomem *regs = port->qmi_regs;
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp;
+
+ /* Rx port configuration */
+ if (port->port_type == FMAN_PORT_TYPE_RX) {
+ /* Enqueue NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, &regs->fmqm_pnen);
+ return 0;
+ }
+
+ /* Continue with Tx port configuration */
+ if (port->port_type == FMAN_PORT_TYPE_TX) {
+ /* Enqueue NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+ &regs->fmqm_pnen);
+ /* Dequeue NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, &regs->fmqm_pndn);
+ }
+
+ /* Dequeue Configuration register */
+ tmp = 0;
+ if (cfg->deq_high_priority)
+ tmp |= QMI_DEQ_CFG_PRI;
+
+ switch (cfg->deq_type) {
+ case FMAN_PORT_DEQ_BY_PRI:
+ tmp |= QMI_DEQ_CFG_TYPE1;
+ break;
+ case FMAN_PORT_DEQ_ACTIVE_FQ:
+ tmp |= QMI_DEQ_CFG_TYPE2;
+ break;
+ case FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
+ tmp |= QMI_DEQ_CFG_TYPE3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->qmi_deq_options_support) {
+ switch (cfg->deq_prefetch_option) {
+ case FMAN_PORT_DEQ_NO_PREFETCH:
+ break;
+ case FMAN_PORT_DEQ_PART_PREFETCH:
+ tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
+ break;
+ case FMAN_PORT_DEQ_FULL_PREFETCH:
+ tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
+ tmp |= cfg->deq_byte_cnt;
+ iowrite32be(tmp, &regs->fmqm_pndc);
+
+ return 0;
+}
+
+static int init(struct fman_port *port)
+{
+ int err;
+
+ /* Init BMI registers */
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ err = init_bmi_rx(port);
+ break;
+ case FMAN_PORT_TYPE_TX:
+ err = init_bmi_tx(port);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (err)
+ return err;
+
+ /* Init QMI registers */
+ err = init_qmi(port);
+ return err;
+
+ return 0;
+}
+
+static int set_bpools(const struct fman_port *port,
+ const struct fman_port_bpools *bp)
+{
+ u32 __iomem *bp_reg, *bp_depl_reg;
+ u32 tmp;
+ u8 i, max_bp_num;
+ bool grp_depl_used = false, rx_port;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ max_bp_num = port->ext_pools_num;
+ rx_port = true;
+ bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
+ bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rx_port) {
+ /* Check buffers are provided in ascending order */
+ for (i = 0; (i < (bp->count - 1) &&
+ (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) {
+ if (bp->bpool[i].size > bp->bpool[i + 1].size)
+ return -EINVAL;
+ }
+ }
+
+ /* Set up external buffers pools */
+ for (i = 0; i < bp->count; i++) {
+ tmp = BMI_EXT_BUF_POOL_VALID;
+ tmp |= ((u32)bp->bpool[i].bpid <<
+ BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
+
+ if (rx_port) {
+ if (bp->counters_enable)
+ tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
+
+ if (bp->bpool[i].is_backup)
+ tmp |= BMI_EXT_BUF_POOL_BACKUP;
+
+ tmp |= (u32)bp->bpool[i].size;
+ }
+
+ iowrite32be(tmp, &bp_reg[i]);
+ }
+
+ /* Clear unused pools */
+ for (i = bp->count; i < max_bp_num; i++)
+ iowrite32be(0, &bp_reg[i]);
+
+ /* Pools depletion */
+ tmp = 0;
+ for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
+ if (bp->bpool[i].grp_bp_depleted) {
+ grp_depl_used = true;
+ tmp |= 0x80000000 >> i;
+ }
+
+ if (bp->bpool[i].single_bp_depleted)
+ tmp |= 0x80 >> i;
+ }
+
+ if (grp_depl_used)
+ tmp |= ((u32)bp->grp_bp_depleted_num - 1) <<
+ BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
+
+ iowrite32be(tmp, bp_depl_reg);
+ return 0;
+}
+
+static bool is_init_done(struct fman_port_cfg *cfg)
+{
+ /* Checks if FMan port driver parameters were initialized */
+ if (!cfg)
+ return true;
+
+ return false;
+}
+
+static int verify_size_of_fifo(struct fman_port *port)
+{
+ u32 min_fifo_size_required = 0, opt_fifo_size_for_b2b = 0;
+
+ /* TX Ports */
+ if (port->port_type == FMAN_PORT_TYPE_TX) {
+ min_fifo_size_required = (u32)
+ (roundup(port->max_frame_length,
+ FMAN_BMI_FIFO_UNITS) + (3 * FMAN_BMI_FIFO_UNITS));
+
+ min_fifo_size_required +=
+ port->cfg->tx_fifo_deq_pipeline_depth *
+ FMAN_BMI_FIFO_UNITS;
+
+ opt_fifo_size_for_b2b = min_fifo_size_required;
+
+ /* Add some margin for back-to-back capability to improve
+ * performance, allows the hardware to pipeline new frame dma
+ * while the previous frame not yet transmitted.
+ */
+ if (port->port_speed == 10000)
+ opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
+ else
+ opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS;
+ }
+
+ /* RX Ports */
+ else if (port->port_type == FMAN_PORT_TYPE_RX) {
+ if (port->rev_info.major >= 6)
+ min_fifo_size_required = (u32)
+ (roundup(port->max_frame_length,
+ FMAN_BMI_FIFO_UNITS) +
+ (5 * FMAN_BMI_FIFO_UNITS));
+ /* 4 according to spec + 1 for FOF>0 */
+ else
+ min_fifo_size_required = (u32)
+ (roundup(min(port->max_frame_length,
+ port->rx_pools_params.largest_buf_size),
+ FMAN_BMI_FIFO_UNITS) +
+ (7 * FMAN_BMI_FIFO_UNITS));
+
+ opt_fifo_size_for_b2b = min_fifo_size_required;
+
+ /* Add some margin for back-to-back capability to improve
+ * performance,allows the hardware to pipeline new frame dma
+ * while the previous frame not yet transmitted.
+ */
+ if (port->port_speed == 10000)
+ opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS;
+ else
+ opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
+ }
+
+ WARN_ON(min_fifo_size_required <= 0);
+ WARN_ON(opt_fifo_size_for_b2b < min_fifo_size_required);
+
+ /* Verify the size */
+ if (port->fifo_bufs.num < min_fifo_size_required)
+ pr_debug("FIFO size should be enlarged to %d bytes\n",
+ min_fifo_size_required);
+ else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
+ pr_debug("For b2b processing,FIFO may be enlarged to %d bytes\n",
+ opt_fifo_size_for_b2b);
+
+ return 0;
+}
+
+static int set_ext_buffer_pools(struct fman_port *port)
+{
+ struct fman_ext_pools *ext_buf_pools = &port->cfg->ext_buf_pools;
+ struct fman_buf_pool_depletion *buf_pool_depletion =
+ &port->cfg->buf_pool_depletion;
+ u8 ordered_array[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ u16 sizes_array[BM_MAX_NUM_OF_POOLS];
+ int i = 0, j = 0, err;
+ struct fman_port_bpools bpools;
+
+ memset(&ordered_array, 0, sizeof(u8) * FMAN_PORT_MAX_EXT_POOLS_NUM);
+ memset(&sizes_array, 0, sizeof(u16) * BM_MAX_NUM_OF_POOLS);
+ memcpy(&port->ext_buf_pools, ext_buf_pools,
+ sizeof(struct fman_ext_pools));
+
+ fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(ext_buf_pools,
+ ordered_array,
+ sizes_array);
+
+ memset(&bpools, 0, sizeof(struct fman_port_bpools));
+ bpools.count = ext_buf_pools->num_of_pools_used;
+ bpools.counters_enable = true;
+ for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) {
+ bpools.bpool[i].bpid = ordered_array[i];
+ bpools.bpool[i].size = sizes_array[ordered_array[i]];
+ }
+
+ /* save pools parameters for later use */
+ port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
+ port->rx_pools_params.largest_buf_size =
+ sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
+ port->rx_pools_params.second_largest_buf_size =
+ sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
+
+ /* FMBM_RMPD reg. - pool depletion */
+ if (buf_pool_depletion->pools_grp_mode_enable) {
+ bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools;
+ for (i = 0; i < port->bm_max_num_of_pools; i++) {
+ if (buf_pool_depletion->pools_to_consider[i]) {
+ for (j = 0; j < ext_buf_pools->
+ num_of_pools_used; j++) {
+ if (i == ordered_array[j]) {
+ bpools.bpool[j].
+ grp_bp_depleted = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (buf_pool_depletion->single_pool_mode_enable) {
+ for (i = 0; i < port->bm_max_num_of_pools; i++) {
+ if (buf_pool_depletion->
+ pools_to_consider_for_single_mode[i]) {
+ for (j = 0; j < ext_buf_pools->
+ num_of_pools_used; j++) {
+ if (i == ordered_array[j]) {
+ bpools.bpool[j].
+ single_bp_depleted = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ err = set_bpools(port, &bpools);
+ if (err != 0) {
+ pr_err("FMan port: set_bpools\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int init_low_level_driver(struct fman_port *port)
+{
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp_val;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
+ if (cfg->forward_reuse_int_context)
+ cfg->rx_fd_bits = (u8)(BMI_PORT_RFNE_FRWD_RPD >> 24);
+ break;
+ default:
+ break;
+ }
+
+ tmp_val = (u32)((port->internal_buf_offset % OFFSET_UNITS) ?
+ (port->internal_buf_offset / OFFSET_UNITS + 1) :
+ (port->internal_buf_offset / OFFSET_UNITS));
+ port->internal_buf_offset = (u8)(tmp_val * OFFSET_UNITS);
+ port->cfg->int_buf_start_margin = port->internal_buf_offset;
+
+ if (init(port) != 0) {
+ pr_err("fman_port_init\n");
+ return -ENODEV;
+ }
+
+ /* The code bellow is a trick so the FM will not release the buffer
+ * to BM nor will try to enqueue the frame to QM
+ */
+ if (port->port_type == FMAN_PORT_TYPE_TX) {
+ if (!cfg->dflt_fqid && cfg->dont_release_buf) {
+ /* override fmbm_tcfqid 0 with a false non-0 value.
+ * This will force FM to act according to tfene.
+ * Otherwise, if fmbm_tcfqid is 0 the FM will release
+ * buffers to BM regardless of fmbm_tfene
+ */
+ out_be32(&port->bmi_regs->tx.fmbm_tcfqid, 0xFFFFFF);
+ out_be32(&port->bmi_regs->tx.fmbm_tfene,
+ NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE);
+ }
+ }
+
+ return 0;
+}
+
+static int fill_soc_specific_params(struct fman_port *port)
+{
+ u32 bmi_max_fifo_size;
+
+ bmi_max_fifo_size = fman_get_bmi_max_fifo_size(port->fm);
+ port->max_port_fifo_size = MAX_PORT_FIFO_SIZE(bmi_max_fifo_size);
+ port->bm_max_num_of_pools = 64;
+
+ /* P4080 - Major 2
+ * P2041/P3041/P5020/P5040 - Major 3
+ * Tx/Bx - Major 6
+ */
+ switch (port->rev_info.major) {
+ case 2:
+ case 3:
+ port->max_num_of_ext_pools = 4;
+ port->max_num_of_sub_portals = 12;
+ break;
+
+ case 6:
+ port->max_num_of_ext_pools = 8;
+ port->max_num_of_sub_portals = 16;
+ break;
+
+ default:
+ pr_err("Unsupported FMan version\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_dflt_fifo_deq_pipeline_depth(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ case FMAN_PORT_TYPE_TX:
+ switch (speed) {
+ case 10000:
+ return 4;
+ case 1000:
+ if (major >= 6)
+ return 2;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_num_of_tasks(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ case FMAN_PORT_TYPE_TX:
+ switch (speed) {
+ case 10000:
+ return 16;
+ case 1000:
+ if (major >= 6)
+ return 4;
+ else
+ return 3;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_extra_num_of_tasks(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ /* FMan V3 */
+ if (major >= 6)
+ return 0;
+
+ /* FMan V2 */
+ if (speed == 10000)
+ return 8;
+ else
+ return 2;
+ case FMAN_PORT_TYPE_TX:
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_num_of_open_dmas(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ int val;
+
+ if (major >= 6) {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ val = 12;
+ else
+ val = 3;
+ break;
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 8;
+ else
+ val = 2;
+ break;
+ default:
+ return 0;
+ }
+ } else {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 8;
+ else
+ val = 1;
+ break;
+ default:
+ val = 0;
+ }
+ }
+
+ return val;
+}
+
+static int get_dflt_extra_num_of_open_dmas(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ /* FMan V3 */
+ if (major >= 6)
+ return 0;
+
+ /* FMan V2 */
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ return 8;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_num_of_fifo_bufs(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ int val;
+
+ if (major >= 6) {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ val = 64;
+ else
+ val = 50;
+ break;
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 96;
+ else
+ val = 50;
+ break;
+ default:
+ val = 0;
+ }
+ } else {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ val = 48;
+ else
+ val = 44;
+ break;
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 48;
+ else
+ val = 45;
+ break;
+ default:
+ val = 0;
+ }
+ }
+
+ return val;
+}
+
+static void set_dflt_cfg(struct fman_port *port,
+ struct fman_port_params *port_params)
+{
+ struct fman_port_cfg *cfg = port->cfg;
+
+ cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
+ cfg->dma_write_optimize = true;
+ cfg->color = FMAN_PORT_COLOR_GREEN;
+ cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
+ cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
+ cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
+ cfg->tx_fifo_low_comf_level = (5 * 1024);
+ cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
+ cfg->sync_req = true;
+ cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
+ cfg->tx_fifo_deq_pipeline_depth =
+ BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
+ cfg->deq_byte_cnt = QMI_BYTE_COUNT_LEVEL_CONTROL(port->port_type);
+
+ cfg->rx_pri_elevation =
+ DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(port->max_port_fifo_size);
+ port->cfg->rx_fifo_thr =
+ DFLT_PORT_RX_FIFO_THRESHOLD(port->rev_info.major,
+ port->max_port_fifo_size);
+
+ if ((port->rev_info.major == 6) &&
+ ((port->rev_info.minor == 0) || (port->rev_info.minor == 3)))
+ cfg->errata_A006320 = true;
+
+ /* Excessive Threshold register - exists for pre-FMv3 chips only */
+ if (port->rev_info.major < 6) {
+ cfg->excessive_threshold_register = true;
+ } else {
+ cfg->fmbm_rebm_has_sgd = true;
+ cfg->fmbm_tfne_has_features = true;
+ }
+
+ cfg->qmi_deq_options_support = true;
+
+ cfg->buffer_prefix_content.data_align =
+ DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
+}
+
+static void set_rx_dflt_cfg(struct fman_port *port,
+ struct fman_port_params *port_params)
+{
+ port->cfg->discard_mask = DFLT_PORT_ERRORS_TO_DISCARD;
+
+ memcpy(&port->cfg->ext_buf_pools,
+ &port_params->specific_params.rx_params.ext_buf_pools,
+ sizeof(struct fman_ext_pools));
+ port->cfg->err_fqid =
+ port_params->specific_params.rx_params.err_fqid;
+ port->cfg->dflt_fqid =
+ port_params->specific_params.rx_params.dflt_fqid;
+
+ /* Set BCB workaround on Rx ports, only for B4860 rev1 */
+ if (port->rev_info.major >= 6) {
+ unsigned int svr;
+
+ svr = mfspr(SPRN_SVR);
+ if ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_MAJ(svr) == 1))
+ port->cfg->bcb_workaround = true;
+ }
+}
+
+static void set_tx_dflt_cfg(struct fman_port *port,
+ struct fman_port_params *port_params,
+ struct fman_port_dts_params *dts_params)
+{
+ port->cfg->tx_fifo_deq_pipeline_depth =
+ get_dflt_fifo_deq_pipeline_depth(port->rev_info.major,
+ port->port_type,
+ port->port_speed);
+ port->cfg->err_fqid =
+ port_params->specific_params.non_rx_params.err_fqid;
+ port->cfg->deq_sp =
+ (u8)(dts_params->qman_channel_id & QMI_DEQ_CFG_SUBPORTAL_MASK);
+ port->cfg->dflt_fqid =
+ port_params->specific_params.non_rx_params.dflt_fqid;
+ port->cfg->deq_high_priority = true;
+}
+
+int fman_port_config(struct fman_port *port, struct fman_port_params *params)
+{
+ void __iomem *base_addr = port->dts_params.base_addr;
+ int err;
+
+ /* Allocate the FM driver's parameters structure */
+ port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
+ if (!port->cfg)
+ goto err_params;
+
+ /* Initialize FM port parameters which will be kept by the driver */
+ port->port_type = port->dts_params.type;
+ port->port_speed = port->dts_params.speed;
+ port->port_id = port->dts_params.id;
+ port->fm = port->dts_params.fman;
+ port->ext_pools_num = (u8)8;
+
+ /* get FM revision */
+ fman_get_revision(port->fm, &port->rev_info);
+
+ err = fill_soc_specific_params(port);
+ if (err)
+ goto err_port_cfg;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ set_rx_dflt_cfg(port, params);
+ case FMAN_PORT_TYPE_TX:
+ set_tx_dflt_cfg(port, params, &port->dts_params);
+ default:
+ set_dflt_cfg(port, params);
+ }
+
+ /* Continue with other parameters */
+ /* set memory map pointers */
+ port->bmi_regs = (union fman_port_bmi_regs __iomem *)
+ (base_addr + BMI_PORT_REGS_OFFSET);
+ port->qmi_regs = (struct fman_port_qmi_regs __iomem *)
+ (base_addr + QMI_PORT_REGS_OFFSET);
+
+ port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
+ /* resource distribution. */
+
+ port->fifo_bufs.num =
+ get_dflt_num_of_fifo_bufs(port->rev_info.major, port->port_type,
+ port->port_speed) * FMAN_BMI_FIFO_UNITS;
+ port->fifo_bufs.extra =
+ DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS * FMAN_BMI_FIFO_UNITS;
+
+ port->open_dmas.num =
+ get_dflt_num_of_open_dmas(port->rev_info.major,
+ port->port_type, port->port_speed);
+ port->open_dmas.extra =
+ get_dflt_extra_num_of_open_dmas(port->rev_info.major,
+ port->port_type, port->port_speed);
+ port->tasks.num =
+ get_dflt_num_of_tasks(port->rev_info.major,
+ port->port_type, port->port_speed);
+ port->tasks.extra =
+ get_dflt_extra_num_of_tasks(port->rev_info.major,
+ port->port_type, port->port_speed);
+
+ /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 errata
+ * workaround
+ */
+ if ((port->rev_info.major == 6) && (port->rev_info.minor == 0) &&
+ (((port->port_type == FMAN_PORT_TYPE_TX) &&
+ (port->port_speed == 1000)))) {
+ port->open_dmas.num = 16;
+ port->open_dmas.extra = 0;
+ }
+
+ if (port->rev_info.major >= 6 &&
+ port->port_type == FMAN_PORT_TYPE_TX &&
+ port->port_speed == 1000) {
+ /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata
+ * workaround
+ */
+ if (port->rev_info.major >= 6) {
+ u32 reg;
+
+ reg = 0x00001013;
+ out_be32(&port->bmi_regs->tx.fmbm_tfp, reg);
+ }
+ }
+
+ return 0;
+
+err_port_cfg:
+ kfree(port->cfg);
+err_params:
+ kfree(port);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(fman_port_config);
+
+int fman_port_init(struct fman_port *port)
+{
+ struct fman_port_cfg *cfg;
+ int err;
+ struct fman_port_init_params params;
+
+ if (is_init_done(port->cfg))
+ return -EINVAL;
+
+ err = fman_sp_build_buffer_struct(&port->cfg->int_context,
+ &port->cfg->buffer_prefix_content,
+ &port->cfg->buf_margins,
+ &port->buffer_offsets,
+ &port->internal_buf_offset);
+ if (err)
+ return err;
+
+ /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 Errata workaround */
+ if (port->rev_info.major >= 6 && (port->cfg->bcb_workaround) &&
+ ((port->port_type == FMAN_PORT_TYPE_RX) &&
+ (port->port_speed == 1000))) {
+ port->cfg->discard_mask |= FM_PORT_FRM_ERR_PHYSICAL;
+ port->fifo_bufs.num += 4 * 1024;
+ }
+
+ cfg = port->cfg;
+
+ if (port->port_type == FMAN_PORT_TYPE_RX) {
+ /* Call the external Buffer routine which also checks fifo
+ * size and updates it if necessary
+ */
+ /* define external buffer pools and pool depletion */
+ err = set_ext_buffer_pools(port);
+ if (err)
+ return err;
+ /* check if the largest external buffer pool is large enough */
+ if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
+ cfg->buf_margins.end_margins >
+ port->rx_pools_params.largest_buf_size) {
+ pr_err("buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
+ cfg->buf_margins.start_margins,
+ cfg->buf_margins.end_margins,
+ port->rx_pools_params.largest_buf_size);
+ return -EINVAL;
+ }
+ }
+
+ /* Call FM module routine for communicating parameters */
+ memset(&params, 0, sizeof(params));
+ params.port_id = port->port_id;
+ params.port_type = port->port_type;
+ params.port_speed = port->port_speed;
+ params.num_of_tasks = (u8)port->tasks.num;
+ params.num_of_extra_tasks = (u8)port->tasks.extra;
+ params.num_of_open_dmas = (u8)port->open_dmas.num;
+ params.num_of_extra_open_dmas = (u8)port->open_dmas.extra;
+
+ if (port->fifo_bufs.num) {
+ err = verify_size_of_fifo(port);
+ if (err)
+ return err;
+ }
+ params.size_of_fifo = port->fifo_bufs.num;
+ params.extra_size_of_fifo = port->fifo_bufs.extra;
+ params.deq_pipeline_depth = port->cfg->tx_fifo_deq_pipeline_depth;
+ params.max_frame_length = port->max_frame_length;
+
+ err = fman_set_port_params(port->fm, &params);
+ if (err)
+ return err;
+
+ err = init_low_level_driver(port);
+ if (err)
+ return err;
+
+ kfree(port->cfg);
+ port->cfg = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_init);
+
+int fman_port_cfg_buf_prefix_content(struct fman_port *port,
+ struct fman_buffer_prefix_content *
+ buffer_prefix_content)
+{
+ if (is_init_done(port->cfg))
+ return -EINVAL;
+
+ memcpy(&port->cfg->buffer_prefix_content,
+ buffer_prefix_content,
+ sizeof(struct fman_buffer_prefix_content));
+ /* if data_align was not initialized by user,
+ * we return to driver's default
+ */
+ if (!port->cfg->buffer_prefix_content.data_align)
+ port->cfg->buffer_prefix_content.data_align =
+ DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
+
+int fman_port_disable(struct fman_port *port)
+{
+ u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp;
+ bool rx_port, failure = false;
+ int count;
+
+ if (!is_init_done(port->cfg))
+ return -EINVAL;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
+ bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
+ rx_port = true;
+ break;
+ case FMAN_PORT_TYPE_TX:
+ bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
+ bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
+ rx_port = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Disable QMI */
+ if (!rx_port) {
+ tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
+ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
+
+ /* Wait for QMI to finish FD handling */
+ count = 100;
+ do {
+ udelay(10);
+ tmp = ioread32be(&port->qmi_regs->fmqm_pns);
+ } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
+
+ if (count == 0) {
+ /* Timeout */
+ failure = true;
+ }
+ }
+
+ /* Disable BMI */
+ tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
+ iowrite32be(tmp, bmi_cfg_reg);
+
+ /* Wait for graceful stop end */
+ count = 500;
+ do {
+ udelay(10);
+ tmp = ioread32be(bmi_status_reg);
+ } while ((tmp & BMI_PORT_STATUS_BSY) && --count);
+
+ if (count == 0) {
+ /* Timeout */
+ failure = true;
+ }
+
+ if (failure)
+ pr_debug("FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
+ port->port_id);
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_disable);
+
+int fman_port_enable(struct fman_port *port)
+{
+ u32 __iomem *bmi_cfg_reg, tmp;
+ bool rx_port;
+
+ if (!is_init_done(port->cfg))
+ return -EINVAL;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
+ rx_port = true;
+ break;
+ case FMAN_PORT_TYPE_TX:
+ bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
+ rx_port = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Enable QMI */
+ if (!rx_port) {
+ tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
+ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
+ }
+
+ /* Enable BMI */
+ tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
+ iowrite32be(tmp, bmi_cfg_reg);
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_enable);
+
+struct fman_port *fman_port_bind(struct device *dev)
+{
+ return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
+}
+EXPORT_SYMBOL(fman_port_bind);
+
+u32 fman_port_get_qman_channel_id(struct fman_port *port)
+{
+ return port->dts_params.qman_channel_id;
+}
+EXPORT_SYMBOL(fman_port_get_qman_channel_id);
+
+#ifndef __rtems__
+static int fman_port_probe(struct platform_device *of_dev)
+#else /* __rtems__ */
+static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
+#endif /* __rtems__ */
+{
+ struct fman_port *port;
+#ifndef __rtems__
+ struct fman *fman;
+ struct device_node *fm_node, *port_node;
+#else /* __rtems__ */
+ struct device_node *port_node;
+#endif /* __rtems__ */
+ struct resource res;
+#ifndef __rtems__
+ struct resource *dev_res;
+#endif /* __rtems__ */
+ const u32 *u32_prop;
+ int err = 0, lenp;
+ enum fman_port_type port_type;
+ u16 port_speed;
+ u8 port_id;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port_node = of_node_get(of_dev->dev.of_node);
+
+ /* Get the FM node */
+#ifndef __rtems__
+ fm_node = of_get_parent(port_node);
+ if (!fm_node) {
+ pr_err("of_get_parent() failed\n");
+ err = -ENODEV;
+ goto return_err;
+ }
+
+ fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
+ of_node_put(fm_node);
+ if (!fman) {
+ err = -EINVAL;
+ goto return_err;
+ }
+#endif /* __rtems__ */
+
+ u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp);
+ if (!u32_prop) {
+ pr_err("of_get_property(%s, cell-index) failed\n",
+ port_node->full_name);
+ err = -EINVAL;
+ goto return_err;
+ }
+ if (WARN_ON(lenp != sizeof(u32))) {
+ err = -EINVAL;
+ goto return_err;
+ }
+ port_id = (u8)*u32_prop;
+
+ port->dts_params.id = port_id;
+
+ if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
+ port_type = FMAN_PORT_TYPE_TX;
+ port_speed = 1000;
+ u32_prop = (const u32 *)of_get_property(port_node,
+ "fsl,fman-10g-port",
+ &lenp);
+ if (u32_prop)
+ port_speed = 10000;
+
+ } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
+ if (port_id >= TX_10G_PORT_BASE)
+ port_speed = 10000;
+ else
+ port_speed = 1000;
+ port_type = FMAN_PORT_TYPE_TX;
+
+ } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
+ port_type = FMAN_PORT_TYPE_RX;
+ port_speed = 1000;
+ u32_prop = (const u32 *)of_get_property(port_node,
+ "fsl,fman-10g-port", &lenp);
+ if (u32_prop)
+ port_speed = 10000;
+
+ } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
+ if (port_id >= RX_10G_PORT_BASE)
+ port_speed = 10000;
+ else
+ port_speed = 1000;
+ port_type = FMAN_PORT_TYPE_RX;
+
+ } else {
+ pr_err("Illegal port type\n");
+ err = -EINVAL;
+ goto return_err;
+ }
+
+ port->dts_params.type = port_type;
+ port->dts_params.speed = port_speed;
+
+ if (port_type == FMAN_PORT_TYPE_TX) {
+ u32 qman_channel_id;
+
+ qman_channel_id = fman_get_qman_channel_id(fman, port_id);
+ if (qman_channel_id == 0) {
+ pr_err("incorrect qman-channel-id\n");
+ err = -EINVAL;
+ goto return_err;
+ }
+ port->dts_params.qman_channel_id = qman_channel_id;
+ }
+
+ err = of_address_to_resource(port_node, 0, &res);
+ if (err < 0) {
+ pr_err("of_address_to_resource() failed\n");
+ err = -ENOMEM;
+ goto return_err;
+ }
+
+ port->dts_params.fman = fman;
+
+ of_node_put(port_node);
+
+#ifndef __rtems__
+ dev_res = __devm_request_region(fman_get_device(fman), &res,
+ res.start, (res.end + 1 - res.start),
+ "fman-port");
+ if (!dev_res) {
+ pr_err("__devm_request_region() failed\n");
+ err = -EINVAL;
+ goto free_port;
+ }
+#endif /* __rtems__ */
+
+ port->dts_params.base_addr = devm_ioremap(fman_get_device(fman),
+ res.start,
+ (res.end + 1 - res.start));
+ if (port->dts_params.base_addr == 0)
+ pr_err("devm_ioremap() failed\n");
+
+ dev_set_drvdata(&of_dev->dev, port);
+
+ return 0;
+
+return_err:
+ of_node_put(port_node);
+#ifndef __rtems__
+free_port:
+#endif /* __rtems__ */
+ kfree(port);
+ return err;
+}
+
+#ifndef __rtems__
+static const struct of_device_id fman_port_match[] = {
+ {.compatible = "fsl,fman-v3-port-rx"},
+ {.compatible = "fsl,fman-v2-port-rx"},
+ {.compatible = "fsl,fman-v3-port-tx"},
+ {.compatible = "fsl,fman-v2-port-tx"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, fman_port_match);
+
+static struct platform_driver fman_port_driver = {
+ .driver = {
+ .name = "fsl-fman-port",
+ .of_match_table = fman_port_match,
+ },
+ .probe = fman_port_probe,
+};
+
+builtin_platform_driver(fman_port_driver);
+
+#else /* __rtems__ */
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+
+static int
+fman_port_dev_probe(device_t dev)
+{
+ struct fman_ivars *ivars = device_get_ivars(dev);
+ int err;
+
+ err = fman_port_probe(&ivars->of_dev, ivars->fman);
+ if (err == 0) {
+ device_set_desc(dev, "FMan Port");
+ return (BUS_PROBE_DEFAULT);
+ } else {
+ return (ENXIO);
+ }
+}
+
+static device_method_t fman_port_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, fman_port_dev_probe),
+ DEVMETHOD(device_attach, bus_generic_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
+ DEVMETHOD_END
+};
+
+driver_t fman_port_driver = {
+ .name = "fman_port",
+ .methods = fman_port_methods
+};
+
+static devclass_t fman_port_devclass;
+
+DRIVER_MODULE(fman_port, fman_mac, fman_port_driver, fman_port_devclass, 0, 0);
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_port.h b/linux/drivers/net/ethernet/freescale/fman/fman_port.h
new file mode 100644
index 00000000..56c1d023
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FMAN_PORT_H
+#define __FMAN_PORT_H
+
+#include "fman.h"
+
+/* FM Port API
+ * The FM uses a general module called "port" to represent a Tx port (MAC),
+ * an Rx port (MAC).
+ * The number of ports in an FM varies between SOCs.
+ * The SW driver manages these ports as sub-modules of the FM,i.e. after an
+ * FM is initialized, its ports may be initialized and operated upon.
+ * The port is initialized aware of its type, but other functions on a port
+ * may be indifferent to its type. When necessary, the driver verifies
+ * coherence and returns error if applicable.
+ * On initialization, user specifies the port type and it's index (relative
+ * to the port's type) - always starting at 0.
+ */
+
+/* FM Frame error */
+/* Frame Descriptor errors */
+/* Not for Rx-Port! Unsupported Format */
+#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT FM_FD_ERR_UNSUPPORTED_FORMAT
+/* Not for Rx-Port! Length Error */
+#define FM_PORT_FRM_ERR_LENGTH FM_FD_ERR_LENGTH
+/* DMA Data error */
+#define FM_PORT_FRM_ERR_DMA FM_FD_ERR_DMA
+/* non Frame-Manager error; probably come from SEC that was chained to FM */
+#define FM_PORT_FRM_ERR_NON_FM FM_FD_RX_STATUS_ERR_NON_FM
+ /* IPR error */
+#define FM_PORT_FRM_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
+/* IPR non-consistent-sp */
+#define FM_PORT_FRM_ERR_IPR_NCSP (FM_FD_ERR_IPR_NCSP & \
+ ~FM_FD_IPR)
+
+/* Rx FIFO overflow, FCS error, code error, running disparity
+ * error (SGMII and TBI modes), FIFO parity error.
+ * PHY Sequence error, PHY error control character detected.
+ */
+#define FM_PORT_FRM_ERR_PHYSICAL FM_FD_ERR_PHYSICAL
+/* Frame too long OR Frame size exceeds max_length_frame */
+#define FM_PORT_FRM_ERR_SIZE FM_FD_ERR_SIZE
+/* indicates a classifier "drop" operation */
+#define FM_PORT_FRM_ERR_CLS_DISCARD FM_FD_ERR_CLS_DISCARD
+/* Extract Out of Frame */
+#define FM_PORT_FRM_ERR_EXTRACTION FM_FD_ERR_EXTRACTION
+/* No Scheme Selected */
+#define FM_PORT_FRM_ERR_NO_SCHEME FM_FD_ERR_NO_SCHEME
+/* Keysize Overflow */
+#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW FM_FD_ERR_KEYSIZE_OVERFLOW
+/* Frame color is red */
+#define FM_PORT_FRM_ERR_COLOR_RED FM_FD_ERR_COLOR_RED
+/* Frame color is yellow */
+#define FM_PORT_FRM_ERR_COLOR_YELLOW FM_FD_ERR_COLOR_YELLOW
+/* Parser Time out Exceed */
+#define FM_PORT_FRM_ERR_PRS_TIMEOUT FM_FD_ERR_PRS_TIMEOUT
+/* Invalid Soft Parser instruction */
+#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT FM_FD_ERR_PRS_ILL_INSTRUCT
+/* Header error was identified during parsing */
+#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR
+/* Frame parsed beyind 256 first bytes */
+#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED
+/* FPM Frame Processing Timeout Exceeded */
+#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001
+
+struct fman_port;
+
+/* A structure for additional Rx port parameters */
+struct fman_port_rx_params {
+ u32 err_fqid; /* Error Queue Id. */
+ u32 dflt_fqid; /* Default Queue Id. */
+ /* Which external buffer pools are used
+ * (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes.
+ */
+ struct fman_ext_pools ext_buf_pools;
+};
+
+/* A structure for additional non-Rx port parameters */
+struct fman_port_non_rx_params {
+ /* Error Queue Id. */
+ u32 err_fqid;
+ /* For Tx - Default Confirmation queue, 0 means no Tx confirmation
+ * for processed frames. For OP port - default Rx queue.
+ */
+ u32 dflt_fqid;
+};
+
+/* A union for additional parameters depending on port type */
+union fman_port_specific_params {
+ /* Rx port parameters structure */
+ struct fman_port_rx_params rx_params;
+ /* Non-Rx port parameters structure */
+ struct fman_port_non_rx_params non_rx_params;
+};
+
+/* A structure representing FM initialization parameters */
+struct fman_port_params {
+ /* Virtual Address of memory mapped FM Port registers. */
+ void *fm;
+ union fman_port_specific_params specific_params;
+ /* Additional parameters depending on port type. */
+};
+
+/**
+ * fman_port_config
+ * @port: Pointer to the port structure
+ * @params: Pointer to data structure of parameters
+ *
+ * Creates a descriptor for the FM PORT module.
+ * The routine returns a pointer to the FM PORT object.
+ * This descriptor must be passed as first parameter to all other FM PORT
+ * function calls.
+ * No actual initialization or configuration of FM hardware is done by this
+ * routine.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_config(struct fman_port *port, struct fman_port_params *params);
+
+/**
+ * fman_port_init
+ * port: A pointer to a FM Port module.
+ * Initializes the FM PORT module by defining the software structure and
+ * configuring the hardware registers.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_init(struct fman_port *port);
+
+/**
+ * fman_port_cfg_buf_prefix_content
+ * @port A pointer to a FM Port module.
+ * @buffer_prefix_content A structure of parameters describing
+ * the structure of the buffer.
+ * Out parameter:
+ * Start margin - offset of data from
+ * start of external buffer.
+ * Defines the structure, size and content of the application buffer.
+ * The prefix, in Tx ports, if 'pass_prs_result', the application should set
+ * a value to their offsets in the prefix of the FM will save the first
+ * 'priv_data_size', than, depending on 'pass_prs_result' and
+ * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
+ * (in this order), to the application buffer, and to offset.
+ * Calling this routine changes the buffer margins definitions in the internal
+ * driver data base from its default configuration:
+ * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
+ * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
+ * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
+ * May be used for all ports
+ *
+ * Allowed only following fman_port_config() and before fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_cfg_buf_prefix_content(struct fman_port *port,
+ struct fman_buffer_prefix_content
+ *buffer_prefix_content);
+
+/**
+ * fman_port_disable
+ * port: A pointer to a FM Port module.
+ *
+ * Gracefully disable an FM port. The port will not start new tasks after all
+ * tasks associated with the port are terminated.
+ *
+ * This is a blocking routine, it returns after port is gracefully stopped,
+ * i.e. the port will not except new frames, but it will finish all frames
+ * or tasks which were already began.
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_disable(struct fman_port *port);
+
+/**
+ * fman_port_enable
+ * port: A pointer to a FM Port module.
+ *
+ * A runtime routine provided to allow disable/enable of port.
+ *
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_enable(struct fman_port *port);
+
+/**
+ * fman_port_get_qman_channel_id
+ * port: Pointer to the FMan port devuce
+ *
+ * Get the QMan channel ID for the specific port
+ *
+ * Return: QMan channel ID
+ */
+u32 fman_port_get_qman_channel_id(struct fman_port *port);
+
+/**
+ * fman_port_bind
+ * dev: FMan Port OF device pointer
+ *
+ * Bind to a specific FMan Port.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan port device.
+ */
+struct fman_port *fman_port_bind(struct device *dev);
+
+#endif /* __FMAN_PORT_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_sp.c b/linux/drivers/net/ethernet/freescale/fman/fman_sp.c
new file mode 100644
index 00000000..2fcfa6c0
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -0,0 +1,171 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fman_sp.h"
+#include "fman.h"
+
+void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
+ *fm_ext_pools,
+ u8 *ordered_array,
+ u16 *sizes_array)
+{
+ u16 buf_size = 0;
+ int i = 0, j = 0, k = 0;
+
+ /* First we copy the external buffers pools information
+ * to an ordered local array
+ */
+ for (i = 0; i < fm_ext_pools->num_of_pools_used; i++) {
+ /* get pool size */
+ buf_size = fm_ext_pools->ext_buf_pool[i].size;
+
+ /* keep sizes in an array according to poolId
+ * for direct access
+ */
+ sizes_array[fm_ext_pools->ext_buf_pool[i].id] = buf_size;
+
+ /* save poolId in an ordered array according to size */
+ for (j = 0; j <= i; j++) {
+ /* this is the next free place in the array */
+ if (j == i)
+ ordered_array[i] =
+ fm_ext_pools->ext_buf_pool[i].id;
+ else {
+ /* find the right place for this poolId */
+ if (buf_size < sizes_array[ordered_array[j]]) {
+ /* move the pool_ids one place ahead
+ * to make room for this poolId
+ */
+ for (k = i; k > j; k--)
+ ordered_array[k] =
+ ordered_array[k - 1];
+
+ /* now k==j, this is the place for
+ * the new size
+ */
+ ordered_array[k] =
+ fm_ext_pools->ext_buf_pool[i].id;
+ break;
+ }
+ }
+ }
+ }
+}
+
+int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
+ int_context_data_copy,
+ struct fman_buffer_prefix_content *
+ buffer_prefix_content,
+ struct fman_sp_buf_margins *buf_margins,
+ struct fman_sp_buffer_offsets *buffer_offsets,
+ u8 *internal_buf_offset)
+{
+ u32 tmp;
+
+ /* Align start of internal context data to 16 byte */
+ int_context_data_copy->ext_buf_offset = (u16)
+ ((buffer_prefix_content->priv_data_size & (OFFSET_UNITS - 1)) ?
+ ((buffer_prefix_content->priv_data_size + OFFSET_UNITS) &
+ ~(u16)(OFFSET_UNITS - 1)) :
+ buffer_prefix_content->priv_data_size);
+
+ /* Translate margin and int_context params to FM parameters */
+ /* Initialize with illegal value. Later we'll set legal values. */
+ buffer_offsets->prs_result_offset = (u32)ILLEGAL_BASE;
+ buffer_offsets->time_stamp_offset = (u32)ILLEGAL_BASE;
+ buffer_offsets->hash_result_offset = (u32)ILLEGAL_BASE;
+
+ /* Internally the driver supports 4 options
+ * 1. prsResult/timestamp/hashResult selection (in fact 8 options,
+ * but for simplicity we'll
+ * relate to it as 1).
+ * 2. All IC context (from AD) not including debug.
+ */
+
+ /* This case covers the options under 1 */
+ /* Copy size must be in 16-byte granularity. */
+ int_context_data_copy->size =
+ (u16)((buffer_prefix_content->pass_prs_result ? 32 : 0) +
+ ((buffer_prefix_content->pass_time_stamp ||
+ buffer_prefix_content->pass_hash_result) ? 16 : 0));
+
+ /* Align start of internal context data to 16 byte */
+ int_context_data_copy->int_context_offset =
+ (u8)(buffer_prefix_content->pass_prs_result ? 32 :
+ ((buffer_prefix_content->pass_time_stamp ||
+ buffer_prefix_content->pass_hash_result) ? 64 : 0));
+
+ if (buffer_prefix_content->pass_prs_result)
+ buffer_offsets->prs_result_offset =
+ int_context_data_copy->ext_buf_offset;
+ if (buffer_prefix_content->pass_time_stamp)
+ buffer_offsets->time_stamp_offset =
+ buffer_prefix_content->pass_prs_result ?
+ (int_context_data_copy->ext_buf_offset +
+ sizeof(struct fman_prs_result)) :
+ int_context_data_copy->ext_buf_offset;
+ if (buffer_prefix_content->pass_hash_result)
+ /* If PR is not requested, whether TS is
+ * requested or not, IC will be copied from TS
+ */
+ buffer_offsets->hash_result_offset =
+ buffer_prefix_content->pass_prs_result ?
+ (int_context_data_copy->ext_buf_offset +
+ sizeof(struct fman_prs_result) + 8) :
+ int_context_data_copy->ext_buf_offset + 8;
+
+ if (int_context_data_copy->size)
+ buf_margins->start_margins =
+ (u16)(int_context_data_copy->ext_buf_offset +
+ int_context_data_copy->size);
+ else
+ /* No Internal Context passing, STartMargin is
+ * immediately after private_info
+ */
+ buf_margins->start_margins =
+ buffer_prefix_content->priv_data_size;
+
+ /* align data start */
+ tmp = (u32)(buf_margins->start_margins %
+ buffer_prefix_content->data_align);
+ if (tmp)
+ buf_margins->start_margins +=
+ (buffer_prefix_content->data_align - tmp);
+ buffer_offsets->data_offset = buf_margins->start_margins;
+
+ return 0;
+}
+
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_sp.h b/linux/drivers/net/ethernet/freescale/fman/fman_sp.h
new file mode 100644
index 00000000..820b7f63
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_sp.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FM_SP_H
+#define __FM_SP_H
+
+#include "fman.h"
+#include <linux/types.h>
+
+#define ILLEGAL_BASE (~0)
+
+/* defaults */
+#define DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN 64
+
+/* Registers bit fields */
+#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER 0x40000000
+#define FMAN_SP_EXT_BUF_POOL_VALID 0x80000000
+#define FMAN_SP_EXT_BUF_POOL_BACKUP 0x20000000
+#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE 0x00100000
+#define FMAN_SP_SG_DISABLE 0x80000000
+
+/* shifts */
+#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
+#define FMAN_SP_DMA_ATTR_SWP_SHIFT 30
+#define FMAN_SP_IC_TO_EXT_SHIFT 16
+#define FMAN_SP_IC_FROM_INT_SHIFT 8
+
+/* structure for defining internal context copying */
+struct fman_sp_int_context_data_copy {
+ /* < Offset in External buffer to which internal
+ * context is copied to (Rx) or taken from (Tx, Op).
+ */
+ u16 ext_buf_offset;
+ /* Offset within internal context to copy from
+ * (Rx) or to copy to (Tx, Op).
+ */
+ u8 int_context_offset;
+ /* Internal offset size to be copied */
+ u16 size;
+};
+
+/* struct for defining external buffer margins */
+struct fman_sp_buf_margins {
+ /* Number of bytes to be left at the beginning
+ * of the external buffer (must be divisible by 16)
+ */
+ u16 start_margins;
+ /* number of bytes to be left at the end
+ * of the external buffer(must be divisible by 16)
+ */
+ u16 end_margins;
+};
+
+struct fman_sp_buffer_offsets {
+ u32 data_offset;
+ u32 prs_result_offset;
+ u32 time_stamp_offset;
+ u32 hash_result_offset;
+};
+
+int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy
+ *int_context_data_copy,
+ struct fman_buffer_prefix_content
+ *buffer_prefix_content,
+ struct fman_sp_buf_margins *buf_margins,
+ struct fman_sp_buffer_offsets
+ *buffer_offsets,
+ u8 *internal_buf_offset);
+
+void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
+ *fm_ext_pools,
+ u8 *ordered_array,
+ u16 *sizes_array);
+
+#endif /* __FM_SP_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c
new file mode 100644
index 00000000..5b22a044
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -0,0 +1,853 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "crc_mac_addr_ext.h"
+
+#include "fman_tgec.h"
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+
+/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
+#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
+
+/* Command and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_EN_TIMESTAMP 0x00100000
+#define CMD_CFG_NO_LEN_CHK 0x00020000
+#define CMD_CFG_SEND_IDLE 0x00010000
+#define CMD_CFG_RX_ER_DISC 0x00004000
+#define CMD_CFG_CMD_FRM_EN 0x00002000
+#define CMD_CFG_LOOPBACK_EN 0x00000400
+#define CMD_CFG_TX_ADDR_INS 0x00000200
+#define CMD_CFG_PAUSE_IGNORE 0x00000100
+#define CMD_CFG_PAUSE_FWD 0x00000080
+#define CMF_CFG_CRC_FWD 0x00000040
+#define CMD_CFG_PROMIS_EN 0x00000010
+#define CMD_CFG_WAN_MODE 0x00000008
+#define CMD_CFG_RX_EN 0x00000002
+#define CMD_CFG_TX_EN 0x00000001
+
+/* Interrupt Mask Register (IMASK) */
+#define TGEC_IMASK_MDIO_SCAN_EVENT 0x00010000
+#define TGEC_IMASK_MDIO_CMD_CMPL 0x00008000
+#define TGEC_IMASK_REM_FAULT 0x00004000
+#define TGEC_IMASK_LOC_FAULT 0x00002000
+#define TGEC_IMASK_TX_ECC_ER 0x00001000
+#define TGEC_IMASK_TX_FIFO_UNFL 0x00000800
+#define TGEC_IMASK_TX_FIFO_OVFL 0x00000400
+#define TGEC_IMASK_TX_ER 0x00000200
+#define TGEC_IMASK_RX_FIFO_OVFL 0x00000100
+#define TGEC_IMASK_RX_ECC_ER 0x00000080
+#define TGEC_IMASK_RX_JAB_FRM 0x00000040
+#define TGEC_IMASK_RX_OVRSZ_FRM 0x00000020
+#define TGEC_IMASK_RX_RUNT_FRM 0x00000010
+#define TGEC_IMASK_RX_FRAG_FRM 0x00000008
+#define TGEC_IMASK_RX_LEN_ER 0x00000004
+#define TGEC_IMASK_RX_CRC_ER 0x00000002
+#define TGEC_IMASK_RX_ALIGN_ER 0x00000001
+
+/* Hashtable Control Register (HASHTABLE_CTRL) */
+#define TGEC_HASH_MCAST_SHIFT 23
+#define TGEC_HASH_MCAST_EN 0x00000200
+#define TGEC_HASH_ADR_MSK 0x000001ff
+
+#define DEFAULT_TX_IPG_LENGTH 12
+#define DEFAULT_MAX_FRAME_LENGTH 0x600
+#define DEFAULT_PAUSE_QUANT 0xf000
+
+#define TGEC_DEFAULT_EXCEPTIONS \
+ ((u32)((TGEC_IMASK_MDIO_SCAN_EVENT) |\
+ (TGEC_IMASK_REM_FAULT) |\
+ (TGEC_IMASK_LOC_FAULT) |\
+ (TGEC_IMASK_TX_ECC_ER) |\
+ (TGEC_IMASK_TX_FIFO_UNFL) |\
+ (TGEC_IMASK_TX_FIFO_OVFL) |\
+ (TGEC_IMASK_TX_ER) |\
+ (TGEC_IMASK_RX_FIFO_OVFL) |\
+ (TGEC_IMASK_RX_ECC_ER) |\
+ (TGEC_IMASK_RX_JAB_FRM) |\
+ (TGEC_IMASK_RX_OVRSZ_FRM) |\
+ (TGEC_IMASK_RX_RUNT_FRM) |\
+ (TGEC_IMASK_RX_FRAG_FRM) |\
+ (TGEC_IMASK_RX_CRC_ER) |\
+ (TGEC_IMASK_RX_ALIGN_ER)))
+
+/* number of pattern match registers (entries) */
+#define TGEC_NUM_OF_PADDRS 1
+
+/* Group address bit indication */
+#define GROUP_ADDRESS 0x0000010000000000LL
+
+/* Hash table size (= 32 bits*8 regs) */
+#define TGEC_HASH_TABLE_SIZE 512
+
+/* tGEC memory map */
+struct tgec_regs {
+ u32 tgec_id; /* 0x000 Controller ID */
+ u32 reserved001[1]; /* 0x004 */
+ u32 command_config; /* 0x008 Control and configuration */
+ u32 mac_addr_0; /* 0x00c Lower 32 bits of the MAC adr */
+ u32 mac_addr_1; /* 0x010 Upper 16 bits of the MAC adr */
+ u32 maxfrm; /* 0x014 Maximum frame length */
+ u32 pause_quant; /* 0x018 Pause quanta */
+ u32 rx_fifo_sections; /* 0x01c */
+ u32 tx_fifo_sections; /* 0x020 */
+ u32 rx_fifo_almost_f_e; /* 0x024 */
+ u32 tx_fifo_almost_f_e; /* 0x028 */
+ u32 hashtable_ctrl; /* 0x02c Hash table control */
+ u32 mdio_cfg_status; /* 0x030 */
+ u32 mdio_command; /* 0x034 */
+ u32 mdio_data; /* 0x038 */
+ u32 mdio_regaddr; /* 0x03c */
+ u32 status; /* 0x040 */
+ u32 tx_ipg_len; /* 0x044 Transmitter inter-packet-gap */
+ u32 mac_addr_2; /* 0x048 Lower 32 bits of 2nd MAC adr */
+ u32 mac_addr_3; /* 0x04c Upper 16 bits of 2nd MAC adr */
+ u32 rx_fifo_ptr_rd; /* 0x050 */
+ u32 rx_fifo_ptr_wr; /* 0x054 */
+ u32 tx_fifo_ptr_rd; /* 0x058 */
+ u32 tx_fifo_ptr_wr; /* 0x05c */
+ u32 imask; /* 0x060 Interrupt mask */
+ u32 ievent; /* 0x064 Interrupt event */
+ u32 udp_port; /* 0x068 Defines a UDP Port number */
+ u32 type_1588v2; /* 0x06c Type field for 1588v2 */
+ u32 reserved070[4]; /* 0x070 */
+ /* 10Ge Statistics Counter */
+ u32 tfrm_u; /* 80 aFramesTransmittedOK */
+ u32 tfrm_l; /* 84 aFramesTransmittedOK */
+ u32 rfrm_u; /* 88 aFramesReceivedOK */
+ u32 rfrm_l; /* 8c aFramesReceivedOK */
+ u32 rfcs_u; /* 90 aFrameCheckSequenceErrors */
+ u32 rfcs_l; /* 94 aFrameCheckSequenceErrors */
+ u32 raln_u; /* 98 aAlignmentErrors */
+ u32 raln_l; /* 9c aAlignmentErrors */
+ u32 txpf_u; /* A0 aPAUSEMACCtrlFramesTransmitted */
+ u32 txpf_l; /* A4 aPAUSEMACCtrlFramesTransmitted */
+ u32 rxpf_u; /* A8 aPAUSEMACCtrlFramesReceived */
+ u32 rxpf_l; /* Ac aPAUSEMACCtrlFramesReceived */
+ u32 rlong_u; /* B0 aFrameTooLongErrors */
+ u32 rlong_l; /* B4 aFrameTooLongErrors */
+ u32 rflr_u; /* B8 aInRangeLengthErrors */
+ u32 rflr_l; /* Bc aInRangeLengthErrors */
+ u32 tvlan_u; /* C0 VLANTransmittedOK */
+ u32 tvlan_l; /* C4 VLANTransmittedOK */
+ u32 rvlan_u; /* C8 VLANReceivedOK */
+ u32 rvlan_l; /* Cc VLANReceivedOK */
+ u32 toct_u; /* D0 if_out_octets */
+ u32 toct_l; /* D4 if_out_octets */
+ u32 roct_u; /* D8 if_in_octets */
+ u32 roct_l; /* Dc if_in_octets */
+ u32 ruca_u; /* E0 if_in_ucast_pkts */
+ u32 ruca_l; /* E4 if_in_ucast_pkts */
+ u32 rmca_u; /* E8 ifInMulticastPkts */
+ u32 rmca_l; /* Ec ifInMulticastPkts */
+ u32 rbca_u; /* F0 ifInBroadcastPkts */
+ u32 rbca_l; /* F4 ifInBroadcastPkts */
+ u32 terr_u; /* F8 if_out_errors */
+ u32 terr_l; /* Fc if_out_errors */
+ u32 reserved100[2]; /* 100-108 */
+ u32 tuca_u; /* 108 if_out_ucast_pkts */
+ u32 tuca_l; /* 10c if_out_ucast_pkts */
+ u32 tmca_u; /* 110 ifOutMulticastPkts */
+ u32 tmca_l; /* 114 ifOutMulticastPkts */
+ u32 tbca_u; /* 118 ifOutBroadcastPkts */
+ u32 tbca_l; /* 11c ifOutBroadcastPkts */
+ u32 rdrp_u; /* 120 etherStatsDropEvents */
+ u32 rdrp_l; /* 124 etherStatsDropEvents */
+ u32 reoct_u; /* 128 etherStatsOctets */
+ u32 reoct_l; /* 12c etherStatsOctets */
+ u32 rpkt_u; /* 130 etherStatsPkts */
+ u32 rpkt_l; /* 134 etherStatsPkts */
+ u32 trund_u; /* 138 etherStatsUndersizePkts */
+ u32 trund_l; /* 13c etherStatsUndersizePkts */
+ u32 r64_u; /* 140 etherStatsPkts64Octets */
+ u32 r64_l; /* 144 etherStatsPkts64Octets */
+ u32 r127_u; /* 148 etherStatsPkts65to127Octets */
+ u32 r127_l; /* 14c etherStatsPkts65to127Octets */
+ u32 r255_u; /* 150 etherStatsPkts128to255Octets */
+ u32 r255_l; /* 154 etherStatsPkts128to255Octets */
+ u32 r511_u; /* 158 etherStatsPkts256to511Octets */
+ u32 r511_l; /* 15c etherStatsPkts256to511Octets */
+ u32 r1023_u; /* 160 etherStatsPkts512to1023Octets */
+ u32 r1023_l; /* 164 etherStatsPkts512to1023Octets */
+ u32 r1518_u; /* 168 etherStatsPkts1024to1518Octets */
+ u32 r1518_l; /* 16c etherStatsPkts1024to1518Octets */
+ u32 r1519x_u; /* 170 etherStatsPkts1519toX */
+ u32 r1519x_l; /* 174 etherStatsPkts1519toX */
+ u32 trovr_u; /* 178 etherStatsOversizePkts */
+ u32 trovr_l; /* 17c etherStatsOversizePkts */
+ u32 trjbr_u; /* 180 etherStatsJabbers */
+ u32 trjbr_l; /* 184 etherStatsJabbers */
+ u32 trfrg_u; /* 188 etherStatsFragments */
+ u32 trfrg_l; /* 18C etherStatsFragments */
+ u32 rerr_u; /* 190 if_in_errors */
+ u32 rerr_l; /* 194 if_in_errors */
+};
+
+struct tgec_cfg {
+ bool rx_error_discard;
+ bool pause_ignore;
+ bool pause_forward_enable;
+ bool no_length_check_enable;
+ bool cmd_frame_enable;
+ bool send_idle_enable;
+ bool wan_mode_enable;
+ bool promiscuous_mode_enable;
+ bool tx_addr_ins_enable;
+ bool loopback_enable;
+ bool time_stamp_enable;
+ u16 max_frame_length;
+ u16 pause_quant;
+ u32 tx_ipg_length;
+};
+
+struct fman_mac {
+ /* Pointer to the memory mapped registers. */
+ struct tgec_regs __iomem *regs;
+ /* MAC address of device; */
+ u64 addr;
+ u16 max_speed;
+ void *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *exception_cb;
+ fman_mac_exception_cb *event_cb;
+ /* pointer to driver's global address hash table */
+ struct eth_hash_t *multicast_addr_hash;
+ /* pointer to driver's individual address hash table */
+ struct eth_hash_t *unicast_addr_hash;
+ u8 mac_id;
+ u32 exceptions;
+ struct tgec_cfg *cfg;
+ void *fm;
+ struct fman_rev_info fm_rev_info;
+};
+
+static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
+{
+ u32 tmp0, tmp1;
+
+ tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
+ tmp1 = (u32)(adr[4] | adr[5] << 8);
+ iowrite32be(tmp0, &regs->mac_addr_0);
+ iowrite32be(tmp1, &regs->mac_addr_1);
+}
+
+static void set_dflts(struct tgec_cfg *cfg)
+{
+ cfg->wan_mode_enable = false;
+ cfg->promiscuous_mode_enable = false;
+ cfg->pause_forward_enable = false;
+ cfg->pause_ignore = false;
+ cfg->tx_addr_ins_enable = false;
+ cfg->loopback_enable = false;
+ cfg->cmd_frame_enable = false;
+ cfg->rx_error_discard = false;
+ cfg->send_idle_enable = false;
+ cfg->no_length_check_enable = true;
+ cfg->time_stamp_enable = false;
+ cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
+ cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
+ cfg->pause_quant = DEFAULT_PAUSE_QUANT;
+}
+
+static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
+ u32 exception_mask)
+{
+ u32 tmp;
+
+ /* Config */
+ tmp = CMF_CFG_CRC_FWD;
+ if (cfg->wan_mode_enable)
+ tmp |= CMD_CFG_WAN_MODE;
+ if (cfg->promiscuous_mode_enable)
+ tmp |= CMD_CFG_PROMIS_EN;
+ if (cfg->pause_forward_enable)
+ tmp |= CMD_CFG_PAUSE_FWD;
+ if (cfg->pause_ignore)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+ if (cfg->tx_addr_ins_enable)
+ tmp |= CMD_CFG_TX_ADDR_INS;
+ if (cfg->loopback_enable)
+ tmp |= CMD_CFG_LOOPBACK_EN;
+ if (cfg->cmd_frame_enable)
+ tmp |= CMD_CFG_CMD_FRM_EN;
+ if (cfg->rx_error_discard)
+ tmp |= CMD_CFG_RX_ER_DISC;
+ if (cfg->send_idle_enable)
+ tmp |= CMD_CFG_SEND_IDLE;
+ if (cfg->no_length_check_enable)
+ tmp |= CMD_CFG_NO_LEN_CHK;
+ if (cfg->time_stamp_enable)
+ tmp |= CMD_CFG_EN_TIMESTAMP;
+ iowrite32be(tmp, &regs->command_config);
+
+ /* Max Frame Length */
+ iowrite32be((u32)cfg->max_frame_length, &regs->maxfrm);
+ /* Pause Time */
+ iowrite32be(cfg->pause_quant, &regs->pause_quant);
+
+ /* clear all pending events and set-up interrupts */
+ iowrite32be(0xffffffff, &regs->ievent);
+ iowrite32be(ioread32be(&regs->imask) | exception_mask, &regs->imask);
+
+ return 0;
+}
+
+static int check_init_parameters(struct fman_mac *tgec)
+{
+ if (tgec->max_speed < SPEED_10000) {
+ pr_err("10G MAC driver only support 10G speed\n");
+ return -EINVAL;
+ }
+ if (tgec->addr == 0) {
+ pr_err("Ethernet 10G MAC Must have valid MAC Address\n");
+ return -EINVAL;
+ }
+ if (!tgec->exception_cb) {
+ pr_err("uninitialized exception_cb\n");
+ return -EINVAL;
+ }
+ if (!tgec->event_cb) {
+ pr_err("uninitialized event_cb\n");
+ return -EINVAL;
+ }
+
+ /* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
+ if (!tgec->cfg->no_length_check_enable) {
+ pr_warn("Length Check!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FM_MAC_EX_10G_MDIO_SCAN_EVENT:
+ bit_mask = TGEC_IMASK_MDIO_SCAN_EVENT;
+ break;
+ case FM_MAC_EX_10G_MDIO_CMD_CMPL:
+ bit_mask = TGEC_IMASK_MDIO_CMD_CMPL;
+ break;
+ case FM_MAC_EX_10G_REM_FAULT:
+ bit_mask = TGEC_IMASK_REM_FAULT;
+ break;
+ case FM_MAC_EX_10G_LOC_FAULT:
+ bit_mask = TGEC_IMASK_LOC_FAULT;
+ break;
+ case FM_MAC_EX_10G_TX_ECC_ER:
+ bit_mask = TGEC_IMASK_TX_ECC_ER;
+ break;
+ case FM_MAC_EX_10G_TX_FIFO_UNFL:
+ bit_mask = TGEC_IMASK_TX_FIFO_UNFL;
+ break;
+ case FM_MAC_EX_10G_TX_FIFO_OVFL:
+ bit_mask = TGEC_IMASK_TX_FIFO_OVFL;
+ break;
+ case FM_MAC_EX_10G_TX_ER:
+ bit_mask = TGEC_IMASK_TX_ER;
+ break;
+ case FM_MAC_EX_10G_RX_FIFO_OVFL:
+ bit_mask = TGEC_IMASK_RX_FIFO_OVFL;
+ break;
+ case FM_MAC_EX_10G_RX_ECC_ER:
+ bit_mask = TGEC_IMASK_RX_ECC_ER;
+ break;
+ case FM_MAC_EX_10G_RX_JAB_FRM:
+ bit_mask = TGEC_IMASK_RX_JAB_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_OVRSZ_FRM:
+ bit_mask = TGEC_IMASK_RX_OVRSZ_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_RUNT_FRM:
+ bit_mask = TGEC_IMASK_RX_RUNT_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_FRAG_FRM:
+ bit_mask = TGEC_IMASK_RX_FRAG_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_LEN_ER:
+ bit_mask = TGEC_IMASK_RX_LEN_ER;
+ break;
+ case FM_MAC_EX_10G_RX_CRC_ER:
+ bit_mask = TGEC_IMASK_RX_CRC_ER;
+ break;
+ case FM_MAC_EX_10G_RX_ALIGN_ER:
+ bit_mask = TGEC_IMASK_RX_ALIGN_ER;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static u32 get_mac_addr_hash_code(u64 eth_addr)
+{
+ u32 crc;
+
+ /* CRC calculation */
+ GET_MAC_ADDR_CRC(eth_addr, crc);
+
+ crc = bitrev32(crc);
+
+ return crc;
+}
+
+static void tgec_err_exception(void *handle)
+{
+ struct fman_mac *tgec = (struct fman_mac *)handle;
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 event;
+
+ /* do not handle MDIO events */
+ event = ioread32be(&regs->ievent) &
+ ~(TGEC_IMASK_MDIO_SCAN_EVENT |
+ TGEC_IMASK_MDIO_CMD_CMPL);
+
+ event &= ioread32be(&regs->imask);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & TGEC_IMASK_REM_FAULT)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_REM_FAULT);
+ if (event & TGEC_IMASK_LOC_FAULT)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_LOC_FAULT);
+ if (event & TGEC_IMASK_TX_ECC_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
+ if (event & TGEC_IMASK_TX_FIFO_UNFL)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_UNFL);
+ if (event & TGEC_IMASK_TX_FIFO_OVFL)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_OVFL);
+ if (event & TGEC_IMASK_TX_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ER);
+ if (event & TGEC_IMASK_RX_FIFO_OVFL)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FIFO_OVFL);
+ if (event & TGEC_IMASK_RX_ECC_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
+ if (event & TGEC_IMASK_RX_JAB_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_JAB_FRM);
+ if (event & TGEC_IMASK_RX_OVRSZ_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_OVRSZ_FRM);
+ if (event & TGEC_IMASK_RX_RUNT_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_RUNT_FRM);
+ if (event & TGEC_IMASK_RX_FRAG_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FRAG_FRM);
+ if (event & TGEC_IMASK_RX_LEN_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_LEN_ER);
+ if (event & TGEC_IMASK_RX_CRC_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_CRC_ER);
+ if (event & TGEC_IMASK_RX_ALIGN_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ALIGN_ER);
+}
+
+static void free_init_resources(struct fman_mac *tgec)
+{
+ fman_unregister_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
+ FMAN_INTR_TYPE_ERR);
+
+ /* release the driver's group hash table */
+ free_hash_table(tgec->multicast_addr_hash);
+ tgec->multicast_addr_hash = NULL;
+
+ /* release the driver's individual hash table */
+ free_hash_table(tgec->unicast_addr_hash);
+ tgec->unicast_addr_hash = NULL;
+}
+
+static bool is_init_done(struct tgec_cfg *cfg)
+{
+ /* Checks if tGEC driver parameters were initialized */
+ if (!cfg)
+ return true;
+
+ return false;
+}
+
+int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (mode & COMM_MODE_RX)
+ tmp |= CMD_CFG_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp |= CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (mode & COMM_MODE_RX)
+ tmp &= ~CMD_CFG_RX_EN;
+ if (mode & COMM_MODE_TX)
+ tmp &= ~CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (new_val)
+ tmp |= CMD_CFG_PROMIS_EN;
+ else
+ tmp &= ~CMD_CFG_PROMIS_EN;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
+{
+ if (is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tgec->cfg->max_frame_length = new_val;
+
+ return 0;
+}
+
+int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
+ u16 pause_time, u16 __maybe_unused thresh_time)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ iowrite32be((u32)pause_time, &regs->pause_quant);
+
+ return 0;
+}
+
+int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (!en)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+ else
+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr)
+{
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
+ set_mac_address(tgec->regs, (u8 *)(*p_enet_addr));
+
+ return 0;
+}
+
+int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ struct eth_hash_entry *hash_entry;
+ u32 crc, hash;
+ u64 addr;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ if (!(addr & GROUP_ADDRESS)) {
+ /* Unicast addresses not supported in hash */
+ pr_err("Unicast Address\n");
+ return -EINVAL;
+ }
+ /* CRC calculation */
+ crc = get_mac_addr_hash_code(addr);
+
+ /* Take 9 MSB bits */
+ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
+
+ /* Create element to be added to the driver hash table */
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+ INIT_LIST_HEAD(&hash_entry->node);
+
+ list_add_tail(&hash_entry->node,
+ &tgec->multicast_addr_hash->lsts[hash]);
+ iowrite32be((hash | TGEC_HASH_MCAST_EN), &regs->hashtable_ctrl);
+
+ return 0;
+}
+
+int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ struct eth_hash_entry *hash_entry = NULL;
+ struct list_head *pos;
+ u32 crc, hash;
+ u64 addr;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ addr = ((*(u64 *)eth_addr) >> 16);
+
+ /* CRC calculation */
+ crc = get_mac_addr_hash_code(addr);
+ /* Take 9 MSB bits */
+ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
+
+ list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+ if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
+ iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
+ &regs->hashtable_ctrl);
+
+ return 0;
+}
+
+int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ *mac_version = ioread32be(&regs->tgec_id);
+
+ return 0;
+}
+
+int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 bit_mask = 0;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ tgec->exceptions |= bit_mask;
+ else
+ tgec->exceptions &= ~bit_mask;
+ } else {
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ if (enable)
+ iowrite32be(ioread32be(&regs->imask) | bit_mask, &regs->imask);
+ else
+ iowrite32be(ioread32be(&regs->imask) & ~bit_mask, &regs->imask);
+
+ return 0;
+}
+
+int tgec_init(struct fman_mac *tgec)
+{
+ struct tgec_cfg *cfg;
+ enet_addr_t eth_addr;
+ int err;
+
+ if (is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ if (DEFAULT_RESET_ON_INIT &&
+ (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
+ pr_err("Can't reset MAC!\n");
+ return -EINVAL;
+ }
+
+ err = check_init_parameters(tgec);
+ if (err)
+ return err;
+
+ cfg = tgec->cfg;
+
+ MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
+ set_mac_address(tgec->regs, (u8 *)eth_addr);
+
+ /* interrupts */
+ /* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */
+ if (tgec->fm_rev_info.major <= 2)
+ tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT |
+ TGEC_IMASK_LOC_FAULT);
+
+ err = init(tgec->regs, cfg, tgec->exceptions);
+ if (err) {
+ free_init_resources(tgec);
+ pr_err("TGEC version doesn't support this i/f mode\n");
+ return err;
+ }
+
+ /* Max Frame Length */
+ err = fman_set_mac_max_frame(tgec->fm, tgec->mac_id,
+ cfg->max_frame_length);
+ if (err) {
+ pr_err("Setting max frame length FAILED\n");
+ free_init_resources(tgec);
+ return -EINVAL;
+ }
+
+ /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 Errata workaround */
+ if (tgec->fm_rev_info.major == 2) {
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ /* restore the default tx ipg Length */
+ tmp = (ioread32be(&regs->tx_ipg_len) &
+ ~TGEC_TX_IPG_LENGTH_MASK) | 12;
+
+ iowrite32be(tmp, &regs->tx_ipg_len);
+ }
+
+ tgec->multicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
+ if (!tgec->multicast_addr_hash) {
+ free_init_resources(tgec);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ tgec->unicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
+ if (!tgec->unicast_addr_hash) {
+ free_init_resources(tgec);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ fman_register_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
+ FMAN_INTR_TYPE_ERR, tgec_err_exception, tgec);
+
+ kfree(cfg);
+ tgec->cfg = NULL;
+
+ return 0;
+}
+
+int tgec_free(struct fman_mac *tgec)
+{
+ free_init_resources(tgec);
+
+ if (tgec->cfg)
+ tgec->cfg = NULL;
+
+ kfree(tgec->cfg);
+ kfree(tgec);
+
+ return 0;
+}
+
+struct fman_mac *tgec_config(struct fman_mac_params *params)
+{
+ struct fman_mac *tgec;
+ struct tgec_cfg *cfg;
+ void __iomem *base_addr;
+
+ base_addr = params->base_addr;
+ /* allocate memory for the UCC GETH data structure. */
+ tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
+ if (!tgec)
+ return NULL;
+
+ /* allocate memory for the 10G MAC driver parameters data structure. */
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ tgec_free(tgec);
+ return NULL;
+ }
+
+ /* Plant parameter structure pointer */
+ tgec->cfg = cfg;
+
+ set_dflts(cfg);
+
+ tgec->regs = (struct tgec_regs __iomem *)(base_addr);
+ tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ tgec->max_speed = params->max_speed;
+ tgec->mac_id = params->mac_id;
+ tgec->exceptions = TGEC_DEFAULT_EXCEPTIONS;
+ tgec->exception_cb = params->exception_cb;
+ tgec->event_cb = params->event_cb;
+ tgec->dev_id = params->dev_id;
+ tgec->fm = params->fm;
+
+ /* Save FMan revision */
+ fman_get_revision(tgec->fm, &tgec->fm_rev_info);
+
+ return tgec;
+}
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_tgec.h b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.h
new file mode 100644
index 00000000..514bba9f
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TGEC_H
+#define __TGEC_H
+
+#include "fman_mac.h"
+
+struct fman_mac *tgec_config(struct fman_mac_params *params);
+int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
+int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr);
+int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
+int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
+int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
+int tgec_init(struct fman_mac *tgec);
+int tgec_free(struct fman_mac *tgec);
+int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
+int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
+ u16 pause_time, u16 thresh_time);
+int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable);
+int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
+int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
+int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
+
+#endif /* __TGEC_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/mac.c b/linux/drivers/net/ethernet/freescale/fman/mac.c
new file mode 100644
index 00000000..4b26211e
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/mac.c
@@ -0,0 +1,1180 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008-2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#ifdef __rtems__
+#include <sys/types.h>
+#include <net/if_dl.h>
+#include <bsp/fdt.h>
+#include "../../../../../../rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h"
+#endif /* __rtems__ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/device.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
+#include <linux/etherdevice.h>
+
+#include "mac.h"
+#include "fman_mac.h"
+#include "fman_dtsec.h"
+#include "fman_tgec.h"
+#include "fman_memac.h"
+
+#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
+
+MODULE_DESCRIPTION(MAC_DESCRIPTION);
+
+struct mac_priv_s {
+ struct device *dev;
+ void __iomem *vaddr;
+ u8 cell_index;
+ phy_interface_t phy_if;
+ struct fman *fman;
+ struct device_node *phy_node;
+ /* List of multicast addresses */
+ struct list_head mc_addr_list;
+ struct platform_device *eth_dev;
+ struct fixed_phy_status *fixed_link;
+ u16 speed;
+ u16 max_speed;
+
+ int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
+ int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
+};
+
+struct mac_address {
+ u8 addr[ETH_ALEN];
+ struct list_head list;
+};
+
+static void mac_exception(void *_mac_dev, enum fman_mac_exceptions ex)
+{
+ struct mac_device *mac_dev;
+ struct mac_priv_s *priv;
+
+ mac_dev = (struct mac_device *)_mac_dev;
+ priv = mac_dev->priv;
+
+ if (FM_MAC_EX_10G_RX_FIFO_OVFL == ex) {
+ /* don't flag RX FIFO after the first */
+ mac_dev->set_exception(mac_dev->fman_mac,
+ FM_MAC_EX_10G_RX_FIFO_OVFL, false);
+ dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
+ }
+
+#ifndef __rtems__
+ dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
+ __func__, ex);
+#endif /* __rtems__ */
+}
+
+static void set_fman_mac_params(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
+{
+ struct mac_priv_s *priv = mac_dev->priv;
+
+#ifndef __rtems__
+ params->base_addr = (typeof(params->base_addr))
+ devm_ioremap(priv->dev, mac_dev->res->start, 0x2000);
+#else /* __rtems__ */
+ params->base_addr = priv->vaddr;
+#endif /* __rtems__ */
+ memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
+ params->max_speed = priv->max_speed;
+ params->phy_if = priv->phy_if;
+ params->basex_if = false;
+ params->mac_id = priv->cell_index;
+ params->fm = (void *)priv->fman;
+ params->exception_cb = mac_exception;
+ params->event_cb = mac_exception;
+ params->dev_id = mac_dev;
+}
+
+static int tgec_initialization(struct mac_device *mac_dev)
+{
+ int err;
+ struct mac_priv_s *priv;
+ struct fman_mac_params params;
+ u32 version;
+
+ priv = mac_dev->priv;
+
+ set_fman_mac_params(mac_dev, &params);
+
+ mac_dev->fman_mac = tgec_config(&params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = tgec_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 10G MAC, disable Tx ECC exception */
+ err = mac_dev->set_exception(mac_dev->fman_mac,
+ FM_MAC_EX_10G_TX_ECC_ER, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = tgec_get_version(mac_dev->fman_mac, &version);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
+
+ goto _return;
+
+_return_fm_mac_free:
+ tgec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
+
+static int dtsec_initialization(struct mac_device *mac_dev)
+{
+ int err;
+ struct mac_priv_s *priv;
+ struct fman_mac_params params;
+ u32 version;
+
+ priv = mac_dev->priv;
+
+ set_fman_mac_params(mac_dev, &params);
+
+ mac_dev->fman_mac = dtsec_config(&params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = dtsec_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ err = mac_dev->set_exception(mac_dev->fman_mac,
+ FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = dtsec_get_version(mac_dev->fman_mac, &version);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
+
+ goto _return;
+
+_return_fm_mac_free:
+ dtsec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
+
+static int memac_initialization(struct mac_device *mac_dev)
+{
+ int err;
+ struct mac_priv_s *priv;
+ struct fman_mac_params params;
+
+ priv = mac_dev->priv;
+
+ set_fman_mac_params(mac_dev, &params);
+
+ if (priv->max_speed == SPEED_10000)
+ params.phy_if = PHY_INTERFACE_MODE_XGMII;
+
+ mac_dev->fman_mac = memac_config(&params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+#ifndef __rtems__
+ err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
+ if (err < 0)
+ goto _return_fm_mac_free;
+#endif /* __rtems__ */
+
+ err = memac_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(priv->dev, "FMan MEMAC\n");
+
+ goto _return;
+
+_return_fm_mac_free:
+ memac_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
+
+static int start(struct mac_device *mac_dev)
+{
+ int err;
+#ifndef __rtems__
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+#endif /* __rtems__ */
+ struct mac_priv_s *priv = mac_dev->priv;
+
+ err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
+#ifndef __rtems__
+ if (!err && phy_dev)
+ phy_start(phy_dev);
+#endif /* __rtems__ */
+
+ return err;
+}
+
+static int stop(struct mac_device *mac_dev)
+{
+ struct mac_priv_s *priv = mac_dev->priv;
+
+#ifndef __rtems__
+ if (mac_dev->phy_dev)
+ phy_stop(mac_dev->phy_dev);
+#endif /* __rtems__ */
+
+ return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
+}
+
+static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
+{
+ struct mac_priv_s *priv;
+ struct mac_address *old_addr, *tmp;
+#ifndef __rtems__
+ struct netdev_hw_addr *ha;
+#endif /* __rtems__ */
+ int err;
+ enet_addr_t *addr;
+#ifdef __rtems__
+ struct ifnet *ifp;
+ struct ifmultiaddr *ifma;
+#endif /* __rtems__ */
+
+ priv = mac_dev->priv;
+
+ /* Clear previous address list */
+ list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) {
+ addr = (enet_addr_t *)old_addr->addr;
+ err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr);
+ if (err < 0)
+ return err;
+
+ list_del(&old_addr->list);
+ kfree(old_addr);
+ }
+
+ /* Add all the addresses from the new list */
+#ifndef __rtems__
+ netdev_for_each_mc_addr(ha, net_dev) {
+ addr = (enet_addr_t *)ha->addr;
+#else /* __rtems__ */
+ ifp = mac_dev->net_dev.ifp;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ addr = (enet_addr_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+#endif /* __rtems__ */
+ err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr);
+ if (err < 0)
+ return err;
+
+ tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+
+ ether_addr_copy(tmp->addr, *addr);
+ list_add(&tmp->list, &priv->mc_addr_list);
+ }
+#ifdef __rtems__
+ if_maddr_runlock(ifp);
+#endif /* __rtems__ */
+ return 0;
+}
+
+/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
+ * active PAUSE settings. Otherwise, the new active settings should be reflected
+ * in FMan.
+ */
+int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
+{
+ struct fman_mac *fman_mac = mac_dev->fman_mac;
+ int err = 0;
+
+ if (rx != mac_dev->rx_pause_active) {
+ err = mac_dev->set_rx_pause(fman_mac, rx);
+ if (likely(err == 0))
+ mac_dev->rx_pause_active = rx;
+ }
+
+ if (tx != mac_dev->tx_pause_active) {
+ u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE);
+
+ err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
+
+ if (likely(err == 0))
+ mac_dev->tx_pause_active = tx;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(fman_set_mac_active_pause);
+
+#ifndef __rtems__
+/* Determine the MAC RX/TX PAUSE frames settings based on PHY
+ * autonegotiation or values set by eththool.
+ */
+void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
+ bool *tx_pause)
+{
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ u16 lcl_adv, rmt_adv;
+ u8 flowctrl;
+
+ *rx_pause = *tx_pause = false;
+
+ if (!phy_dev->duplex)
+ return;
+
+ /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
+ * are those set by ethtool.
+ */
+ if (!mac_dev->autoneg_pause) {
+ *rx_pause = mac_dev->rx_pause_req;
+ *tx_pause = mac_dev->tx_pause_req;
+ return;
+ }
+
+ /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
+ * settings depend on the result of the link negotiation.
+ */
+
+ /* get local capabilities */
+ lcl_adv = 0;
+ if (phy_dev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phy_dev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ /* get link partner capabilities */
+ rmt_adv = 0;
+ if (phy_dev->pause)
+ rmt_adv |= LPA_PAUSE_CAP;
+ if (phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ /* Calculate TX/RX settings based on local and peer advertised
+ * symmetric/asymmetric PAUSE capabilities.
+ */
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (flowctrl & FLOW_CTRL_RX)
+ *rx_pause = true;
+ if (flowctrl & FLOW_CTRL_TX)
+ *tx_pause = true;
+}
+EXPORT_SYMBOL(fman_get_pause_cfg);
+
+static void adjust_link_void(struct net_device *net_dev)
+{
+}
+
+static void adjust_link_dtsec(struct net_device *net_dev)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa_eth_data *eth_data = dev->platform_data;
+ struct mac_device *mac_dev = eth_data->mac_dev;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
+
+ fman_mac = mac_dev->fman_mac;
+ if (!phy_dev->link) {
+ dtsec_restart_autoneg(fman_mac);
+
+ return;
+ }
+
+ dtsec_adjust_link(fman_mac, phy_dev->speed);
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
+}
+
+static void adjust_link_memac(struct net_device *net_dev)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa_eth_data *eth_data = dev->platform_data;
+ struct mac_device *mac_dev = eth_data->mac_dev;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
+
+ fman_mac = mac_dev->fman_mac;
+ memac_adjust_link(fman_mac, phy_dev->speed);
+
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
+}
+
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
+ */
+static int init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev,
+ void (*adj_lnk)(struct net_device *))
+{
+ struct phy_device *phy_dev;
+ struct mac_priv_s *priv = mac_dev->priv;
+
+ phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0,
+ priv->phy_if);
+ if (!phy_dev) {
+ netdev_err(net_dev, "Could not connect to PHY\n");
+ return -ENODEV;
+ }
+
+ /* Remove any features not supported by the controller */
+ phy_dev->supported &= mac_dev->if_support;
+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
+ * as most of the PHY drivers do not enable them by default.
+ */
+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ phy_dev->advertising = phy_dev->supported;
+
+ mac_dev->phy_dev = phy_dev;
+
+ return 0;
+}
+
+static int dtsec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
+{
+ return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
+}
+
+static int tgec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
+{
+ return init_phy(net_dev, mac_dev, adjust_link_void);
+}
+
+static int memac_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
+{
+ return init_phy(net_dev, mac_dev, &adjust_link_memac);
+}
+#else /* __rtems__ */
+static void dtsec_do_adjust_link(struct mac_device *mac_dev, u16 speed)
+{
+ dtsec_adjust_link(mac_dev->fman_mac, speed);
+}
+
+static void tgec_do_adjust_link(struct mac_device *mac_dev, u16 speed)
+{
+ /* VOID */
+}
+
+static void memac_do_adjust_link(struct mac_device *mac_dev, u16 speed)
+{
+ memac_adjust_link(mac_dev->fman_mac, speed);
+}
+#endif /* __rtems__ */
+
+static void setup_dtsec(struct mac_device *mac_dev)
+{
+#ifndef __rtems__
+ mac_dev->init_phy = dtsec_init_phy;
+#else /* __rtems__ */
+ mac_dev->adjust_link = dtsec_do_adjust_link;
+#endif /* __rtems__ */
+ mac_dev->init = dtsec_initialization;
+ mac_dev->set_promisc = dtsec_set_promiscuous;
+ mac_dev->change_addr = dtsec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
+ mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
+ mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_multi = set_multi;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+
+ mac_dev->priv->enable = dtsec_enable;
+ mac_dev->priv->disable = dtsec_disable;
+}
+
+static void setup_tgec(struct mac_device *mac_dev)
+{
+#ifndef __rtems__
+ mac_dev->init_phy = tgec_init_phy;
+#else /* __rtems__ */
+ mac_dev->adjust_link = tgec_do_adjust_link;
+#endif /* __rtems__ */
+ mac_dev->init = tgec_initialization;
+ mac_dev->set_promisc = tgec_set_promiscuous;
+ mac_dev->change_addr = tgec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
+ mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
+ mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_multi = set_multi;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+
+ mac_dev->priv->enable = tgec_enable;
+ mac_dev->priv->disable = tgec_disable;
+}
+
+static void setup_memac(struct mac_device *mac_dev)
+{
+#ifndef __rtems__
+ mac_dev->init_phy = memac_init_phy;
+#else /* __rtems__ */
+ mac_dev->adjust_link = memac_do_adjust_link;
+#endif /* __rtems__ */
+ mac_dev->init = memac_initialization;
+ mac_dev->set_promisc = memac_set_promiscuous;
+ mac_dev->change_addr = memac_modify_mac_address;
+ mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
+ mac_dev->set_tx_pause = memac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
+ mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_multi = set_multi;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+
+ mac_dev->priv->enable = memac_enable;
+ mac_dev->priv->disable = memac_disable;
+}
+
+#define DTSEC_SUPPORTED \
+ (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg \
+ | SUPPORTED_Pause \
+ | SUPPORTED_Asym_Pause \
+ | SUPPORTED_MII)
+
+#ifndef __rtems__
+static DEFINE_MUTEX(eth_lock);
+#endif /* __rtems__ */
+
+static const char phy_str[][11] = {
+ [PHY_INTERFACE_MODE_MII] = "mii",
+ [PHY_INTERFACE_MODE_GMII] = "gmii",
+ [PHY_INTERFACE_MODE_SGMII] = "sgmii",
+ [PHY_INTERFACE_MODE_TBI] = "tbi",
+ [PHY_INTERFACE_MODE_RMII] = "rmii",
+ [PHY_INTERFACE_MODE_RGMII] = "rgmii",
+ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
+ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
+ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
+ [PHY_INTERFACE_MODE_RTBI] = "rtbi",
+ [PHY_INTERFACE_MODE_XGMII] = "xgmii"
+};
+
+static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phy_str); i++)
+ if (strcmp(str, phy_str[i]) == 0)
+ return (phy_interface_t)i;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+static const u16 phy2speed[] = {
+ [PHY_INTERFACE_MODE_MII] = SPEED_100,
+ [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RMII] = SPEED_100,
+ [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_XGMII] = SPEED_10000
+};
+
+static struct platform_device *dpaa_eth_add_device(int fman_id,
+ struct mac_device *mac_dev,
+ struct device_node *node)
+{
+ struct platform_device *pdev;
+ struct dpaa_eth_data data;
+ struct mac_priv_s *priv;
+#ifndef __rtems__
+ static int dpaa_eth_dev_cnt;
+#endif /* __rtems__ */
+ int ret;
+
+ priv = mac_dev->priv;
+
+ data.mac_dev = mac_dev;
+ data.mac_hw_id = priv->cell_index;
+ data.fman_hw_id = fman_id;
+ data.mac_node = node;
+
+#ifndef __rtems__
+ mutex_lock(&eth_lock);
+
+ pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt);
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto no_mem;
+ }
+
+ ret = platform_device_add_data(pdev, &data, sizeof(data));
+ if (ret)
+ goto err;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err;
+
+ dpaa_eth_dev_cnt++;
+ mutex_unlock(&eth_lock);
+
+ return pdev;
+
+err:
+ platform_device_put(pdev);
+no_mem:
+ mutex_unlock(&eth_lock);
+
+ return ERR_PTR(ret);
+#else /* __rtems__ */
+ pdev = &mac_dev->pdev;
+ mac_dev->data = data;
+ pdev->platform_data = &mac_dev->data;
+ ret = dpaa_eth_priv_probe(pdev, mac_dev);
+ BSD_ASSERT(ret == 0);
+ return pdev;
+#endif /* __rtems__ */
+}
+
+#ifndef __rtems__
+static const struct of_device_id mac_match[] = {
+ { .compatible = "fsl,fman-dtsec" },
+ { .compatible = "fsl,fman-xgec" },
+ { .compatible = "fsl,fman-memac" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mac_match);
+#endif /* __rtems__ */
+
+#ifndef __rtems__
+static int mac_probe(struct platform_device *_of_dev)
+#else /* __rtems__ */
+static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman *fman)
+#endif /* __rtems__ */
+{
+#ifdef __rtems__
+ struct fman_mac_softc *sc = device_get_softc(_dev);
+#endif /* __rtems__ */
+ int err, i, lenp;
+ struct device *dev;
+#ifndef __rtems__
+ struct device_node *mac_node, *dev_node, *tbi_node;
+#else /* __rtems__ */
+ struct device_node *mac_node;
+#endif /* __rtems__ */
+ struct mac_device *mac_dev;
+#ifndef __rtems__
+ struct platform_device *of_dev;
+#endif /* __rtems__ */
+ struct resource res;
+ struct mac_priv_s *priv;
+ const u8 *mac_addr;
+ const char *char_prop;
+ const u32 *u32_prop;
+ u8 fman_id;
+ const phandle *phandle_prop;
+
+ dev = &_of_dev->dev;
+ mac_node = dev->of_node;
+
+#ifndef __rtems__
+ mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
+ if (!mac_dev) {
+ err = -ENOMEM;
+ dev_err(dev, "devm_kzalloc() = %d\n", err);
+ goto _return;
+ }
+#else /* __rtems__ */
+ mac_dev = &sc->mac_dev;
+#endif /* __rtems__ */
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto _return;
+ }
+
+ /* Save private information */
+ mac_dev->priv = priv;
+ priv->dev = dev;
+
+ if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
+ setup_dtsec(mac_dev);
+ } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
+ setup_tgec(mac_dev);
+ } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
+ setup_memac(mac_dev);
+ } else {
+#ifndef __rtems__
+ dev_err(dev, "MAC node (%s) contains unsupported MAC\n",
+ mac_node->full_name);
+#endif /* __rtems__ */
+ err = -EINVAL;
+ goto _return;
+ }
+
+ /* Register mac_dev */
+ dev_set_drvdata(dev, mac_dev);
+
+ INIT_LIST_HEAD(&priv->mc_addr_list);
+
+ /* Get the FM node */
+#ifndef __rtems__
+ dev_node = of_get_parent(mac_node);
+ if (!dev_node) {
+ dev_err(dev, "of_get_parent(%s) failed\n",
+ mac_node->full_name);
+ err = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+ if (!of_dev) {
+ dev_err(dev, "of_find_device_by_node(%s) failed\n",
+ dev_node->full_name);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ /* Get the FMan cell-index */
+ u32_prop = of_get_property(dev_node, "cell-index", &lenp);
+ if (!u32_prop) {
+ dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+ dev_node->full_name);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+ WARN_ON(lenp != sizeof(u32));
+ fman_id = (u8)*u32_prop + 1; /* cell-index 0 => FMan id 1 */
+
+ priv->fman = fman_bind(&of_dev->dev);
+ if (!priv->fman) {
+ dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name);
+ err = -ENODEV;
+ goto _return_of_node_put;
+ }
+
+ of_node_put(dev_node);
+#else /* __rtems__ */
+ priv->fman = fman;
+ fman_id = (u8)device_get_unit(_dev);
+#endif /* __rtems__ */
+
+ /* Get the address of the memory mapped registers */
+ err = of_address_to_resource(mac_node, 0, &res);
+ if (err < 0) {
+ dev_err(dev, "of_address_to_resource(%s) = %d\n",
+ mac_node->full_name, err);
+ goto _return_dev_set_drvdata;
+ }
+
+#ifndef __rtems__
+ mac_dev->res = __devm_request_region(dev,
+ fman_get_mem_region(priv->fman),
+ res.start, res.end + 1 - res.start,
+ "mac");
+ if (!mac_dev->res) {
+ dev_err(dev, "__devm_request_mem_region(mac) failed\n");
+ err = -EBUSY;
+ goto _return_dev_set_drvdata;
+ }
+
+ priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
+ mac_dev->res->end + 1 - mac_dev->res->start);
+ if (!priv->vaddr) {
+ dev_err(dev, "devm_ioremap() failed\n");
+ err = -EIO;
+ goto _return_dev_set_drvdata;
+ }
+#else /* __rtems__ */
+ priv->vaddr = devm_ioremap(dev, res.start, res.end + 1 - res.start);
+#endif /* __rtems__ */
+
+#ifndef __rtems__
+#define TBIPA_OFFSET 0x1c
+#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
+ tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ if (tbi_node) {
+ u32 tbiaddr = TBIPA_DEFAULT_ADDR;
+
+ u32_prop = of_get_property(tbi_node, "reg", NULL);
+ if (u32_prop)
+ tbiaddr = *u32_prop;
+ out_be32(priv->vaddr + TBIPA_OFFSET, tbiaddr);
+ }
+#endif /* __rtems__ */
+
+ if (!of_device_is_available(mac_node)) {
+#ifndef __rtems__
+ devm_iounmap(dev, priv->vaddr);
+ __devm_release_region(dev, fman_get_mem_region(priv->fman),
+ res.start, res.end + 1 - res.start);
+ fman_unbind(priv->fman);
+ devm_kfree(dev, mac_dev);
+#endif /* __rtems__ */
+ dev_set_drvdata(dev, NULL);
+ return -ENODEV;
+ }
+
+ /* Get the cell-index */
+ u32_prop = of_get_property(mac_node, "cell-index", &lenp);
+ if (!u32_prop) {
+ dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+ mac_node->full_name);
+ err = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+ WARN_ON(lenp != sizeof(u32));
+ priv->cell_index = (u8)*u32_prop;
+
+ /* Get the MAC address */
+ mac_addr = of_get_mac_address(mac_node);
+ if (!mac_addr) {
+ dev_err(dev, "of_get_mac_address(%s) failed\n",
+ mac_node->full_name);
+ err = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+ memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
+
+ /* Get the port handles */
+ phandle_prop = of_get_property(mac_node, "fsl,fman-ports", &lenp);
+ if (!phandle_prop) {
+ dev_err(dev, "of_get_property(%s, fsl,fman-ports) failed\n",
+ mac_node->full_name);
+ err = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+ BUG_ON(lenp != sizeof(phandle) * ARRAY_SIZE(mac_dev->port));
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+#ifndef __rtems__
+ /* Find the port node */
+ dev_node = of_find_node_by_phandle(phandle_prop[i]);
+ if (!dev_node) {
+ dev_err(dev, "of_find_node_by_phandle() failed\n");
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+ if (!of_dev) {
+ dev_err(dev, "of_find_device_by_node(%s) failed\n",
+ dev_node->full_name);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ mac_dev->port[i] = fman_port_bind(&of_dev->dev);
+ if (!mac_dev->port[i]) {
+ dev_err(dev, "dev_get_drvdata(%s) failed\n",
+ dev_node->full_name);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+ of_node_put(dev_node);
+#else /* __rtems__ */
+ int node;
+ struct fman_ivars *ivars;
+ device_t child;
+
+ node = fdt_node_offset_by_phandle(bsp_fdt_get(), phandle_prop[i]);
+ if (node < 0) {
+ goto _return_of_node_put;
+ }
+
+ ivars = kzalloc(sizeof(*ivars), GFP_KERNEL);
+ if (ivars == NULL) {
+ goto _return_of_node_put;
+ }
+
+ ivars->dn.offset = node;
+ ivars->of_dev.dev.of_node = &ivars->dn;
+ ivars->of_dev.dev.base = _of_dev->dev.base;
+ ivars->fman = fman;
+
+ child = device_add_child(_dev, "fman_port", -1);
+ if (child == NULL) {
+ kfree(ivars);
+ goto _return_of_node_put;
+ }
+
+ device_set_ivars(child, ivars);
+
+ err = device_probe_and_attach(child);
+ if (err != 0) {
+ kfree(ivars);
+ goto _return_of_node_put;
+ }
+
+ mac_dev->port[i] = dev_get_drvdata(&ivars->of_dev.dev);
+#endif /* __rtems__ */
+ }
+
+ /* Get the PHY connection type */
+ char_prop = (const char *)of_get_property(mac_node,
+ "phy-connection-type", NULL);
+ if (!char_prop) {
+ dev_warn(dev,
+ "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n",
+ mac_node->full_name);
+ priv->phy_if = PHY_INTERFACE_MODE_MII;
+ } else {
+ priv->phy_if = str2phy(char_prop);
+ }
+
+ priv->speed = phy2speed[priv->phy_if];
+ priv->max_speed = priv->speed;
+#ifndef __rtems__
+ mac_dev->if_support = DTSEC_SUPPORTED;
+ /* We don't support half-duplex in SGMII mode */
+ if (strstr(char_prop, "sgmii"))
+ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_100baseT_Half);
+
+ /* Gigabit support (no half-duplex) */
+ if (priv->max_speed == 1000)
+ mac_dev->if_support |= SUPPORTED_1000baseT_Full;
+
+ /* The 10G interface only supports one mode */
+ if (strstr(char_prop, "xgmii"))
+ mac_dev->if_support = SUPPORTED_10000baseT_Full;
+#endif /* __rtems__ */
+
+ /* Get the rest of the PHY information */
+#ifndef __rtems__
+ priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
+ if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(mac_node);
+ if (err)
+ goto _return_dev_set_drvdata;
+
+ priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
+ GFP_KERNEL);
+ if (!priv->fixed_link)
+ goto _return_dev_set_drvdata;
+
+ priv->phy_node = of_node_get(mac_node);
+ phy = of_phy_find_device(priv->phy_node);
+ if (!phy)
+ goto _return_dev_set_drvdata;
+
+ priv->fixed_link->link = phy->link;
+ priv->fixed_link->speed = phy->speed;
+ priv->fixed_link->duplex = phy->duplex;
+ priv->fixed_link->pause = phy->pause;
+ priv->fixed_link->asym_pause = phy->asym_pause;
+ }
+#endif /* __rtems__ */
+
+ err = mac_dev->init(mac_dev);
+ if (err < 0) {
+ dev_err(dev, "mac_dev->init() = %d\n", err);
+ of_node_put(priv->phy_node);
+ goto _return_dev_set_drvdata;
+ }
+
+ /* pause frame autonegotiation enabled */
+ mac_dev->autoneg_pause = true;
+
+ /* by intializing the values to false, force FMD to enable PAUSE frames
+ * on RX and TX
+ */
+ mac_dev->rx_pause_req = true;
+ mac_dev->tx_pause_req = true;
+ mac_dev->rx_pause_active = false;
+ mac_dev->tx_pause_active = false;
+ err = fman_set_mac_active_pause(mac_dev, true, true);
+ if (err < 0)
+ dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
+
+ dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
+
+ priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node);
+ if (IS_ERR(priv->eth_dev)) {
+ dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
+ priv->cell_index);
+ priv->eth_dev = NULL;
+ }
+
+ goto _return;
+
+_return_of_node_put:
+#ifndef __rtems__
+ of_node_put(dev_node);
+#endif /* __rtems__ */
+_return_dev_set_drvdata:
+ kfree(priv->fixed_link);
+ kfree(priv);
+ dev_set_drvdata(dev, NULL);
+_return:
+ return err;
+}
+
+#ifndef __rtems__
+static struct platform_driver mac_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = mac_match,
+ },
+ .probe = mac_probe,
+};
+
+builtin_platform_driver(mac_driver);
+#else /* __rtems__ */
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+
+#include <rtems/bsd/local/miibus_if.h>
+
+static int
+fman_mac_dev_probe(device_t dev)
+{
+ struct fman_ivars *ivars = device_get_ivars(dev);
+ int err;
+
+ err = mac_probe(dev, &ivars->of_dev, ivars->fman);
+ if (err == 0) {
+ device_set_desc(dev, "FMan MAC");
+ return (BUS_PROBE_SPECIFIC);
+ } else {
+ return (ENXIO);
+ }
+}
+
+static device_method_t fman_mac_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, fman_mac_dev_probe),
+ DEVMETHOD(device_attach, fman_mac_dev_attach),
+ DEVMETHOD(device_detach, fman_mac_dev_detach),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
+ /* MII Interface */
+ DEVMETHOD(miibus_readreg, fman_mac_miibus_read_reg),
+ DEVMETHOD(miibus_writereg, fman_mac_miibus_write_reg),
+ DEVMETHOD(miibus_statchg, fman_mac_miibus_statchg),
+
+ DEVMETHOD_END
+};
+
+driver_t fman_mac_driver = {
+ .name = "fman_mac",
+ .methods = fman_mac_methods,
+ .size = sizeof(struct fman_mac_softc)
+};
+
+static devclass_t fman_mac_devclass;
+
+DRIVER_MODULE(fman_mac, fman, fman_mac_driver, fman_mac_devclass, 0, 0);
+DRIVER_MODULE(miibus, fman_mac, miibus_driver, miibus_devclass, 0, 0);
+
+MODULE_DEPEND(fman_mac, ether, 1, 1, 1);
+MODULE_DEPEND(fman_mac, miibus, 1, 1, 1);
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/fman/mac.h b/linux/drivers/net/ethernet/freescale/fman/mac.h
new file mode 100644
index 00000000..727320e8
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/mac.h
@@ -0,0 +1,147 @@
+/* Copyright 2008-2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MAC_H
+#define __MAC_H
+
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+#include <linux/list.h>
+#ifdef __rtems__
+#include <linux/netdevice.h>
+#endif /* __rtems__ */
+
+#include "fman_port.h"
+#include "fman.h"
+#include "fman_mac.h"
+
+struct fman_mac;
+struct mac_priv_s;
+
+#ifdef __rtems__
+struct dpaa_eth_data {
+ struct device_node *mac_node;
+ struct mac_device *mac_dev;
+ int mac_hw_id;
+ int fman_hw_id;
+};
+int dpaa_eth_priv_probe(struct platform_device *pdev,
+ struct mac_device *mac_dev);
+int dpa_eth_priv_start(struct net_device *net_dev);
+int dpa_eth_priv_stop(struct net_device *net_dev);
+#endif /* __rtems__ */
+struct mac_device {
+#ifndef __rtems__
+ struct resource *res;
+#endif /* __rtems__ */
+ u8 addr[ETH_ALEN];
+ struct fman_port *port[2];
+#ifndef __rtems__
+ u32 if_support;
+ struct phy_device *phy_dev;
+#endif /* __rtems__ */
+#ifdef __rtems__
+ struct platform_device pdev;
+ struct dpaa_eth_data data;
+ struct net_device net_dev;
+#endif /* __rtems__ */
+
+ bool autoneg_pause;
+ bool rx_pause_req;
+ bool tx_pause_req;
+ bool rx_pause_active;
+ bool tx_pause_active;
+ bool promisc;
+
+#ifndef __rtems__
+ int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
+#else /* __rtems__ */
+ void (*adjust_link)(struct mac_device *mac_dev, u16 speed);
+#endif /* __rtems__ */
+ int (*init)(struct mac_device *mac_dev);
+ int (*start)(struct mac_device *mac_dev);
+ int (*stop)(struct mac_device *mac_dev);
+ int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
+ int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
+ int (*set_multi)(struct net_device *net_dev,
+ struct mac_device *mac_dev);
+ int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
+ int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority,
+ u16 pause_time, u16 thresh_time);
+ int (*set_exception)(struct fman_mac *mac_dev,
+ enum fman_mac_exceptions exception, bool enable);
+ int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
+ enet_addr_t *eth_addr);
+ int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
+ enet_addr_t *eth_addr);
+
+ struct fman_mac *fman_mac;
+ struct mac_priv_s *priv;
+};
+
+#ifndef __rtems__
+struct dpaa_eth_data {
+ struct device_node *mac_node;
+ struct mac_device *mac_dev;
+ int mac_hw_id;
+ int fman_hw_id;
+};
+#endif /* __rtems__ */
+
+extern const char *mac_driver_description;
+
+/**
+ * fman_set_mac_active_pause
+ * @mac_dev: A pointer to the MAC device
+ * @rx: Pause frame setting for RX
+ * @tx: Pause frame setting for TX
+ *
+ * Set the MAC RX/TX PAUSE frames settings
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
+
+/**
+ * fman_get_pause_cfg
+ * @mac_dev: A pointer to the MAC device
+ * @rx: Return value for RX setting
+ * @tx: Return value for TX setting
+ *
+ * Determine the MAC RX/TX PAUSE frames settings
+ *
+ * Return: Pointer to FMan device.
+ */
+void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
+ bool *tx_pause);
+
+#endif /* __MAC_H */
diff --git a/linux/drivers/soc/fsl/qbman/bman-debugfs.c b/linux/drivers/soc/fsl/qbman/bman-debugfs.c
new file mode 100644
index 00000000..09f5a283
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman-debugfs.c
@@ -0,0 +1,121 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2010 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct dentry *dfs_root; /* debugfs root directory */
+
+/* Query Buffer Pool State */
+
+static int query_bp_state_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct bm_pool_state state;
+ int i, j;
+ u32 mask;
+
+ memset(&state, 0, sizeof(state));
+ ret = bman_query_pools(&state);
+ if (ret) {
+ seq_printf(file, "Error %d\n", ret);
+ return ret;
+ }
+
+ seq_puts(file, "bp_id free_buffers_avail bp_depleted\n");
+ for (i = 0; i < 2; i++) {
+ mask = 0x80000000;
+ for (j = 0; j < 32; j++) {
+ seq_printf(file,
+ " %-2u %-3s %-3s\n",
+ (i * 32) + j,
+ state.as.state.__state[i] & mask ? "no" : "yes",
+ state.ds.state.__state[i] & mask ? "yes" : "no");
+ mask >>= 1;
+ }
+ }
+
+ return 0;
+}
+
+static int query_bp_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, query_bp_state_show, NULL);
+}
+
+static const struct file_operations query_bp_state_fops = {
+ .owner = THIS_MODULE,
+ .open = query_bp_state_open,
+ .read = seq_read,
+ .release = single_release,
+};
+
+static int __init bman_debugfs_init(void)
+{
+ int ret = 0;
+ struct dentry *d;
+
+ dfs_root = debugfs_create_dir("bman", NULL);
+ if (dfs_root == NULL) {
+ pr_err("Cannot create dir\n");
+ return -ENOMEM;
+ }
+
+ d = debugfs_create_file("query_bp_state",
+ S_IRUGO,
+ dfs_root,
+ NULL,
+ &query_bp_state_fops);
+ if (d == NULL) {
+ ret = -ENOMEM;
+ pr_err("Cannot create query_bp_state\n");
+ goto _return;
+ }
+
+ return 0;
+
+_return:
+ debugfs_remove_recursive(dfs_root);
+
+ return ret;
+}
+
+static void __exit bman_debugfs_exit(void)
+{
+ debugfs_remove_recursive(dfs_root);
+}
+
+module_init(bman_debugfs_init);
+module_exit(bman_debugfs_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/linux/drivers/soc/fsl/qbman/bman.c b/linux/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 00000000..35459bc7
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,692 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright (c) 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+/* Last updated for v00.79 of the BG */
+
+struct bman;
+
+/* Register offsets */
+#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04))
+#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04))
+#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
+#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
+#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
+#define REG_FBPR_FPC 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FBPR_BARE 0x0c00
+#define REG_FBPR_BAR 0x0c04
+#define REG_FBPR_AR 0x0c10
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
+
+/* BMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI)
+
+union bman_ecir {
+ u32 ecir_raw;
+ struct {
+ u32 __reserved1:4;
+ u32 portal_num:4;
+ u32 __reserved2:12;
+ u32 numb:4;
+ u32 __reserved3:2;
+ u32 pid:6;
+ } __packed info;
+};
+
+union bman_eadr {
+ u32 eadr_raw;
+ struct {
+ u32 __reserved1:5;
+ u32 memid:3;
+ u32 __reserved2:14;
+ u32 eadr:10;
+ } __packed info;
+};
+
+struct bman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+ BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
+ BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
+ BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
+ BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
+ BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
+};
+#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
+
+struct bman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
+static const struct bman_error_info_mdata error_mdata[] = {
+ BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
+ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
+ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
+};
+#define BMAN_ERR_MDATA_COUNT \
+ (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
+
+/* Add this in Kconfig */
+#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
+
+/**
+ * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
+ * @v: for accessors that write values, this is the 32-bit value
+ *
+ * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
+ * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
+ * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
+ * "write the enable register" rather than "enable the write register"!
+ */
+#define bm_err_isr_status_read(bm) \
+ __bm_err_isr_read(bm, bm_isr_status)
+#define bm_err_isr_status_clear(bm, m) \
+ __bm_err_isr_write(bm, bm_isr_status, m)
+#define bm_err_isr_enable_read(bm) \
+ __bm_err_isr_read(bm, bm_isr_enable)
+#define bm_err_isr_enable_write(bm, v) \
+ __bm_err_isr_write(bm, bm_isr_enable, v)
+#define bm_err_isr_disable_read(bm) \
+ __bm_err_isr_read(bm, bm_isr_disable)
+#define bm_err_isr_disable_write(bm, v) \
+ __bm_err_isr_write(bm, bm_isr_disable, v)
+#define bm_err_isr_inhibit(bm) \
+ __bm_err_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_err_isr_uninhibit(bm) \
+ __bm_err_isr_write(bm, bm_isr_inhibit, 0)
+
+#ifndef __rtems__
+static u16 bman_pool_max;
+#else /* __rtems__ */
+/* FIXME */
+extern u16 bman_ip_rev;
+extern u16 bman_pool_max;
+#endif /* __rtems__ */
+
+/*
+ * TODO: unimplemented registers
+ *
+ * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
+ * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
+ */
+
+/* Encapsulate "struct bman *" as a cast of the register space address. */
+
+static struct bman *bm_create(void *regs)
+{
+ return (struct bman *)regs;
+}
+
+static inline u32 __bm_in(struct bman *bm, u32 offset)
+{
+ return ioread32be((void *)bm + offset);
+}
+static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
+{
+ iowrite32be(val, (void*) bm + offset);
+}
+#define bm_in(reg) __bm_in(bm, REG_##reg)
+#define bm_out(reg, val) __bm_out(bm, REG_##reg, val)
+
+static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
+{
+ return __bm_in(bm, REG_ERR_ISR + (n << 2));
+}
+
+static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
+{
+ __bm_out(bm, REG_ERR_ISR + (n << 2), val);
+}
+
+static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = bm_in(IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+static u32 __generate_thresh(u32 val, int roundup)
+{
+ u32 e = 0; /* co-efficient, exponent */
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ DPA_ASSERT(e < 0x10);
+ return val | (e << 8);
+}
+
+static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
+ u32 hwdet, u32 hwdxt)
+{
+ DPA_ASSERT(pool < bman_pool_max);
+
+ bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
+ bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
+ bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
+ bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
+}
+
+static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
+{
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
+ is_power_of_2(size));
+ /* choke if '[e]ba' has lower-alignment than 'size' */
+ DPA_ASSERT(!(ba & (size - 1)));
+ bm_out(FBPR_BARE, upper_32_bits(ba));
+ bm_out(FBPR_BAR, lower_32_bits(ba));
+ bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
+}
+
+/*****************/
+/* Config driver */
+/*****************/
+
+/* We support only one of these. */
+static struct bman *bm;
+
+/* And this state belongs to 'bm' */
+#ifndef __rtems__
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+ fbpr_a = rmem->base;
+ fbpr_sz = rmem->size;
+
+ WARN_ON(!(fbpr_a && fbpr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+#else /* __rtems__ */
+static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fbpr, 16777216);
+#define fbpr_a ((uintptr_t)&fbpr[0])
+#define fbpr_sz sizeof(fbpr)
+#endif /* __rtems__ */
+
+int bm_pool_set(u32 bpid, const u32 *thresholds)
+{
+ if (!bm)
+ return -ENODEV;
+ bm_set_pool(bm, bpid, thresholds[0], thresholds[1],
+ thresholds[2], thresholds[3]);
+ return 0;
+}
+EXPORT_SYMBOL(bm_pool_set);
+
+static void log_edata_bits(u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ pr_warn("ErrInt, EDATA:\n");
+ i = bit_count/32;
+ if (bit_count%32) {
+ i++;
+ mask = ~(mask << bit_count%32);
+ }
+ j = 16-i;
+ pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ pr_warn(" 0x%08x\n", bm_in(EDATA(j)));
+}
+
+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+{
+ union bman_ecir ecir_val;
+ union bman_eadr eadr_val;
+
+ ecir_val.ecir_raw = bm_in(ECIR);
+ /* Is portal info valid */
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ pr_warn("ErrInt: SWP id %d, numb %d, pid %d\n",
+ ecir_val.info.portal_num, ecir_val.info.numb,
+ ecir_val.info.pid);
+ }
+ if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
+ eadr_val.eadr_raw = bm_in(EADR);
+ pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[eadr_val.info.memid].txt,
+ error_mdata[eadr_val.info.memid].addr_mask
+ & eadr_val.info.eadr);
+ log_edata_bits(error_mdata[eadr_val.info.memid].bits);
+ }
+}
+
+/* BMan interrupt handler */
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+
+ ier_val = bm_err_isr_enable_read(bm);
+ isr_val = bm_err_isr_status_read(bm);
+ ecsr_val = bm_in(ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < BMAN_HWE_COUNT; i++) {
+ if (bman_hwerr_txts[i].mask & isr_mask) {
+ pr_warn("ErrInt: %s\n", bman_hwerr_txts[i].txt);
+ if (bman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(isr_mask, ecsr_val);
+ /* Re-arm error capture registers */
+ bm_out(ECSR, ecsr_val);
+ }
+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
+ pr_devel("Un-enabling error 0x%x\n",
+ bman_hwerr_txts[i].mask);
+ ier_val &= ~bman_hwerr_txts[i].mask;
+ bm_err_isr_enable_write(bm, ier_val);
+ }
+ }
+ }
+ bm_err_isr_status_clear(bm, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+u32 bm_pool_free_buffers(u32 bpid)
+{
+ return bm_in(POOL_CONTENT(bpid));
+}
+EXPORT_SYMBOL(bm_pool_free_buffers);
+
+#ifndef __rtems__
+static ssize_t show_fbpr_fpc(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
+};
+
+static ssize_t show_pool_count(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ u32 data;
+ int i;
+
+ if (kstrtoint(dev_attr->attr.name, 10, &i))
+ return -EINVAL;
+ data = bm_in(POOL_CONTENT(i));
+ return snprintf(buf, PAGE_SIZE, "%d\n", data);
+};
+
+static ssize_t show_err_isr(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
+};
+
+static ssize_t show_sbec(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ int i;
+
+ if (sscanf(dev_attr->attr.name, "sbec_%d", &i) != 1)
+ return -EINVAL;
+ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
+};
+
+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
+static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
+
+/* Didn't use DEVICE_ATTR as 64 of this would be required.
+ * Initialize them when needed. */
+static char *name_attrs_pool_count; /* "xx" + null-terminator */
+static struct device_attribute *dev_attr_buffer_pool_count;
+
+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
+
+static struct attribute *bman_dev_attributes[] = {
+ &dev_attr_fbpr_fpc.attr,
+ &dev_attr_err_isr.attr,
+ NULL
+};
+
+static struct attribute *bman_dev_ecr_attributes[] = {
+ &dev_attr_sbec_0.attr,
+ &dev_attr_sbec_1.attr,
+ NULL
+};
+
+static struct attribute **bman_dev_pool_count_attributes;
+
+/* root level */
+static const struct attribute_group bman_dev_attr_grp = {
+ .name = NULL,
+ .attrs = bman_dev_attributes
+};
+static const struct attribute_group bman_dev_ecr_grp = {
+ .name = "error_capture",
+ .attrs = bman_dev_ecr_attributes
+};
+static struct attribute_group bman_dev_pool_countent_grp = {
+ .name = "pool_count",
+};
+
+static int of_fsl_bman_remove(struct platform_device *ofdev)
+{
+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
+ return 0;
+};
+#endif /* __rtems__ */
+
+static int of_fsl_bman_probe(struct platform_device *ofdev)
+{
+ int ret, err_irq, i;
+ struct device *dev = &ofdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource res;
+ u32 __iomem *regs;
+ u16 id;
+ u8 major, minor;
+
+ if (!of_device_is_available(node))
+ return -ENODEV;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Can't get %s property 'reg'\n", node->full_name);
+ return ret;
+ }
+ regs = devm_ioremap(dev, res.start, res.end - res.start + 1);
+ if (!regs)
+ return -ENXIO;
+
+ bm = bm_create(regs);
+
+ bm_get_version(bm, &id, &major, &minor);
+ dev_info(dev, "Bman ver:%04x,%02x,%02x\n", id, major, minor);
+ if ((major == 1) && (minor == 0))
+ bman_pool_max = 64;
+ else if ((major == 2) && (minor == 0))
+ bman_pool_max = 8;
+ else if ((major == 2) && (minor == 1))
+ bman_pool_max = 64;
+ else
+ dev_warn(dev, "unknown Bman version, default to rev1.0\n");
+#ifdef __rtems__
+ bman_ip_rev = (u16)((major << 8) | minor);
+#endif /* __rtems__ */
+
+
+ bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
+
+ err_irq = of_irq_to_resource(node, 0, NULL);
+ if (err_irq == NO_IRQ) {
+ dev_info(dev, "Can't get %s property 'interrupts'\n",
+ node->full_name);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+ node);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return ret;
+ }
+ /* Disable Buffer Pool State Change */
+ bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
+ /* Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init). */
+ bm_err_isr_status_clear(bm, 0xffffffff);
+ /* Enable Error Interrupts */
+ bm_err_isr_enable_write(bm, 0xffffffff);
+
+#ifndef __rtems__
+ ret = sysfs_create_group(&dev->kobj, &bman_dev_attr_grp);
+ if (ret)
+ goto done;
+ ret = sysfs_create_group(&dev->kobj, &bman_dev_ecr_grp);
+ if (ret)
+ goto del_group_0;
+
+ name_attrs_pool_count = devm_kmalloc(dev,
+ sizeof(char) * bman_pool_max * 3, GFP_KERNEL);
+ if (!name_attrs_pool_count)
+ goto del_group_1;
+
+ dev_attr_buffer_pool_count = devm_kmalloc(dev,
+ sizeof(struct device_attribute) * bman_pool_max, GFP_KERNEL);
+ if (!dev_attr_buffer_pool_count)
+ goto del_group_1;
+
+ bman_dev_pool_count_attributes = devm_kmalloc(dev,
+ sizeof(struct attribute *) * (bman_pool_max + 1), GFP_KERNEL);
+ if (!bman_dev_pool_count_attributes)
+ goto del_group_1;
+
+ for (i = 0; i < bman_pool_max; i++) {
+ ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
+ if (!ret)
+ goto del_group_1;
+ dev_attr_buffer_pool_count[i].attr.name =
+ (name_attrs_pool_count + i * 3);
+ dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
+ dev_attr_buffer_pool_count[i].show = show_pool_count;
+ bman_dev_pool_count_attributes[i] =
+ &dev_attr_buffer_pool_count[i].attr;
+ }
+ bman_dev_pool_count_attributes[bman_pool_max] = NULL;
+
+ bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
+
+ ret = sysfs_create_group(&dev->kobj, &bman_dev_pool_countent_grp);
+ if (ret)
+ goto del_group_1;
+
+ goto done;
+
+del_group_1:
+ sysfs_remove_group(&dev->kobj, &bman_dev_ecr_grp);
+del_group_0:
+ sysfs_remove_group(&dev->kobj, &bman_dev_attr_grp);
+done:
+ if (ret)
+ dev_err(dev, "Cannot create dev attributes ret=%d\n", ret);
+#else /* __rtems__ */
+ (void)i;
+#endif /* __rtems__ */
+
+ return ret;
+};
+
+#ifndef __rtems__
+static const struct of_device_id of_fsl_bman_ids[] = {
+ {
+ .compatible = "fsl,bman",
+ },
+ {}
+};
+
+static struct platform_driver of_fsl_bman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_fsl_bman_ids,
+ },
+ .probe = of_fsl_bman_probe,
+ .remove = of_fsl_bman_remove,
+};
+
+builtin_platform_driver(of_fsl_bman_driver);
+#else /* __rtems__ */
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <rtems.h>
+#include <bsp/fdt.h>
+#include <bsp/qoriq.h>
+
+static struct bm_portal_config bman_configs[NR_CPUS];
+
+u16 bman_ip_rev;
+
+u16 bman_pool_max;
+
+SYSINIT_REFERENCE(irqs);
+
+static void
+bman_sysinit(void)
+{
+ const char *fdt = bsp_fdt_get();
+ struct device_node dn;
+ struct platform_device ofdev = {
+ .dev = {
+ .of_node = &dn,
+ .base = (uintptr_t)&qoriq
+ }
+ };
+ const char *name;
+ int cpu_count = (int)rtems_get_processor_count();
+ int cpu;
+ int ret;
+ int node;
+ int parent;
+
+ qoriq_reset_qman_and_bman();
+ qoriq_clear_ce_portal(&qoriq_bman_portal[0][0],
+ sizeof(qoriq_bman_portal[0]));
+ qoriq_clear_ci_portal(&qoriq_bman_portal[1][0],
+ sizeof(qoriq_bman_portal[1]));
+
+ memset(&dn, 0, sizeof(dn));
+
+ name = "fsl,bman";
+ node = fdt_node_offset_by_compatible(fdt, 0, name);
+ if (node < 0)
+ panic("bman: no bman in FDT");
+
+ dn.full_name = name;
+ dn.offset = node;
+ ret = of_fsl_bman_probe(&ofdev);
+ if (ret != 0)
+ panic("bman: probe failed");
+
+ name = "fsl,bman-portal";
+ node = fdt_node_offset_by_compatible(fdt, 0, name);
+ if (node < 0)
+ panic("bman: no portals in FDT");
+ parent = fdt_parent_offset(fdt, node);
+ if (parent < 0)
+ panic("bman: no parent of portals in FDT");
+ node = fdt_first_subnode(fdt, parent);
+
+ dn.full_name = name;
+ dn.offset = node;
+
+ for (cpu = 0; cpu < cpu_count; ++cpu) {
+ struct bm_portal_config *pcfg = &bman_configs[cpu];
+ struct bman_portal *portal;
+ struct resource res;
+
+ if (node < 0)
+ panic("bman: missing portal in FDT");
+
+ ret = of_address_to_resource(&dn, 0, &res);
+ if (ret != 0)
+ panic("bman: no portal CE address");
+ pcfg->addr_virt[0] = (__iomem void *)
+ ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] >=
+ (uintptr_t)&qoriq_bman_portal[0][0]);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] <
+ (uintptr_t)&qoriq_bman_portal[1][0]);
+
+ ret = of_address_to_resource(&dn, 1, &res);
+ if (ret != 0)
+ panic("bman: no portal CI address");
+ pcfg->addr_virt[1] = (__iomem void *)
+ ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] >=
+ (uintptr_t)&qoriq_bman_portal[1][0]);
+ BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
+ (uintptr_t)&qoriq_bman_portal[2][0]);
+
+ pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL);
+ if (pcfg->public_cfg.irq == NO_IRQ)
+ panic("bman: no portal interrupt");
+
+ pcfg->public_cfg.cpu = cpu;
+ bman_depletion_fill(&pcfg->public_cfg.mask);
+
+ portal = bman_create_affine_portal(pcfg);
+ if (portal == NULL)
+ panic("bman: cannot create portal");
+
+ bman_p_irqsource_add(portal, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
+
+ node = fdt_next_subnode(fdt, node);
+ dn.offset = node;
+ }
+
+ bman_seed_bpid_range(0, bman_pool_max);
+}
+SYSINIT(bman_sysinit, SI_SUB_CPU, SI_ORDER_FIRST, bman_sysinit, NULL);
+#endif /* __rtems__ */
diff --git a/linux/drivers/soc/fsl/qbman/bman.h b/linux/drivers/soc/fsl/qbman/bman.h
new file mode 100644
index 00000000..c9879386
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman.h
@@ -0,0 +1,542 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+extern u16 bman_pool_max;
+
+/***************************/
+/* Portal register assists */
+/***************************/
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x0000
+#define BM_REG_RCR_CI_CINH 0x0004
+#define BM_REG_RCR_ITR 0x0008
+#define BM_REG_CFG 0x0100
+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
+#define BM_REG_ISR 0x0e00
+#define BM_REG_IIR 0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent). */
+
+/* Cache-inhibited register access. */
+#define __bm_in(bm, o) __raw_readl((bm)->addr_ci + (o))
+#define __bm_out(bm, o, val) __raw_writel((val), (bm)->addr_ci + (o))
+#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
+#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o))
+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o))
+#define __bm_cl_in(bm, o) __raw_readl((bm)->addr_ce + (o))
+#define __bm_cl_out(bm, o, val) \
+ do { \
+ u32 *__tmpclout = (bm)->addr_ce + (o); \
+ __raw_writel((val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o))
+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
+#define bm_cl_invalidate(reg)\
+ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues. */
+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ struct bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void __iomem *addr_ce; /* cache-enabled */
+ void __iomem *addr_ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+ struct bm_portal_config config;
+} ____cacheline_aligned;
+
+/* --- RCR API --- */
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define RCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void RCR_INC(struct bm_rcr *rcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates
+ * fast code with essentially no branching overheads. We increment to
+ * the next RCR pointer and handle overflow and 'vbit'. */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = RCR_CARRYCLEAR(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ __maybe_unused enum bm_rcr_cmode cmode)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case. */
+ register struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.addr_ce + BM_CL_RCR;
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(RCR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void bm_rcr_finish(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+ DPA_ASSERT(!rcr->busy);
+ if (pi != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("losing uncommited RCR entries\n");
+ if (ci != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 1;
+#endif
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->busy);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
+ struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode != bm_rcr_pvb);
+ if (rcr->available == 1)
+ return NULL;
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcr->cursor);
+ RCR_INC(rcr);
+ rcr->available--;
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode == bm_rcr_pci);
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ hwsync();
+ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->pmode == bm_rcr_pce);
+ bm_cl_invalidate(RCR_PI);
+ bm_cl_touch_rw(RCR_PI);
+}
+
+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode == bm_rcr_pce);
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ lwsync();
+ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ lwsync();
+ rcursor = rcr->cursor;
+ rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcursor);
+ RCR_INC(rcr);
+ rcr->available--;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPA_ASSERT(rcr->cmode == bm_rcr_cci);
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+ DPA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(RCR_CI);
+}
+
+static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(RCR_CI);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->ithresh;
+}
+
+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(RCR_ITR, ithresh);
+}
+
+static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+/* --- Management command API --- */
+
+static inline int bm_mc_init(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.addr_ce + BM_CL_CR;
+ mc->rr = portal->addr.addr_ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ BM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static inline void bm_mc_finish(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+ DPA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+ DPA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+ DPA_ASSERT(mc->state == mc_user);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_idle;
+#endif
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPA_ASSERT(mc->state == mc_user);
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_hw;
+#endif
+}
+
+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPA_ASSERT(mc->state == mc_hw);
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering... */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+/* --- Portal interrupt register API --- */
+
+static inline int bm_isr_init(__always_unused struct bm_portal *portal)
+{
+ return 0;
+}
+
+static inline void bm_isr_finish(__always_unused struct bm_portal *portal)
+{
+}
+
+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
+ int enable)
+{
+ u32 val;
+
+ DPA_ASSERT(bpid < bman_pool_max);
+
+ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
+ val = __bm_in(&portal->addr, SCN_REG(bpid));
+ if (enable)
+ val |= SCN_BIT(bpid);
+ else
+ val &= ~SCN_BIT(bpid);
+ __bm_out(&portal->addr, SCN_REG(bpid), val);
+}
+
+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
+{
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
+}
+
+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
+ u32 val)
+{
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
+}
+
+/* Buffer Pool Cleanup */
+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ struct bm_mc_result *bm_res;
+ int aq_count = 0;
+ bool stop = false;
+
+ while (!stop) {
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(p);
+ bm_cmd->acquire.bpid = bpid;
+ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ while (!(bm_res = bm_mc_result(p)))
+ cpu_relax();
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ /* Pool is empty */
+ /* TBD : Should we do a few extra iterations in
+ case some other some blocks keep buffers 'on deck',
+ which may also be problematic */
+ stop = true;
+ } else
+ ++aq_count;
+ }
+ return 0;
+}
diff --git a/linux/drivers/soc/fsl/qbman/bman_api.c b/linux/drivers/soc/fsl/qbman/bman_api.c
new file mode 100644
index 00000000..cdfcebbd
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_api.c
@@ -0,0 +1,1123 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman.h"
+
+/* Compilation constants */
+#define RCR_THRESH 2 /* reread h/w CI when running out of space */
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+#define FSL_DPA_PORTAL_SHARE 1 /* Allow portals to be shared */
+
+struct bman_portal {
+ struct bm_portal p;
+ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
+ struct bman_depletion *pools;
+ int thresh_set;
+ unsigned long irq_sources;
+ u32 slowpoll; /* only used when interrupts are off */
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+ raw_spinlock_t sharing_lock; /* only used if is_shared */
+#ifndef __rtems__
+ int is_shared;
+ struct bman_portal *sharing_redirect;
+#endif /* __rtems__ */
+#endif
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct bm_portal_config *config;
+ /* 64-entry hash-table of pool objects that are tracking depletion
+ * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so
+ * we're not fussy about cache-misses and so forth - whereas the above
+ * members should all fit in one cacheline.
+ * BTW, with 64 entries in the hash table and 64 buffer pools to track,
+ * you'll never guess the hash-function ... */
+ struct bman_pool *cb[64];
+ char irqname[MAX_IRQNAME];
+ /* Track if the portal was alloced by the driver */
+ u8 alloced;
+};
+
+
+#ifdef FSL_DPA_PORTAL_SHARE
+/* For an explanation of the locking, redirection, or affine-portal logic,
+ * please consult the QMan driver for details. This is the same, only simpler
+ * (no fiddly QMan-specific bits.) */
+#ifndef __rtems__
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+ do { \
+ if ((p)->is_shared) \
+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
+ else \
+ local_irq_save(irqflags); \
+ } while (0)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+ do { \
+ if ((p)->is_shared) \
+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
+ irqflags); \
+ else \
+ local_irq_restore(irqflags); \
+ } while (0)
+#else /* __rtems__ */
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, irqflags)
+#endif /* __rtems__ */
+#else
+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
+#endif
+
+#ifndef __rtems__
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+#endif /* __rtems__ */
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+static inline struct bman_portal *get_raw_affine_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+#ifdef FSL_DPA_PORTAL_SHARE
+static inline struct bman_portal *get_affine_portal(void)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+
+#ifndef __rtems__
+ if (p->sharing_redirect)
+ return p->sharing_redirect;
+#endif /* __rtems__ */
+ return p;
+}
+#else
+#define get_affine_portal() get_raw_affine_portal()
+#endif
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(bman_affine_portal);
+}
+static inline struct bman_portal *get_poll_portal(void)
+{
+ return this_cpu_ptr(&bman_affine_portal);
+}
+#define put_poll_portal()
+
+/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals. */
+struct bman_pool {
+ struct bman_pool_params params;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+ /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */
+ struct bm_buffer *sp;
+ unsigned int sp_fill;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_t in_use;
+#endif
+};
+
+/* (De)Registration of depletion notification callbacks */
+static void depletion_link(struct bman_portal *portal, struct bman_pool *pool)
+{
+ __maybe_unused unsigned long irqflags;
+
+ pool->portal = portal;
+ PORTAL_IRQ_LOCK(portal, irqflags);
+ pool->next = portal->cb[pool->params.bpid];
+ portal->cb[pool->params.bpid] = pool;
+ if (!pool->next)
+ /* First object for that bpid on this portal, enable the BSCN
+ * mask bit. */
+ bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1);
+ PORTAL_IRQ_UNLOCK(portal, irqflags);
+}
+static void depletion_unlink(struct bman_pool *pool)
+{
+ struct bman_pool *it, *last = NULL;
+ struct bman_pool **base = &pool->portal->cb[pool->params.bpid];
+ __maybe_unused unsigned long irqflags;
+
+ PORTAL_IRQ_LOCK(pool->portal, irqflags);
+ it = *base; /* <-- gotcha, don't do this prior to the irq_save */
+ while (it != pool) {
+ last = it;
+ it = it->next;
+ }
+ if (!last)
+ *base = pool->next;
+ else
+ last->next = pool->next;
+ if (!last && !pool->next) {
+ /* Last object for that bpid on this portal, disable the BSCN
+ * mask bit. */
+ bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0);
+ /* And "forget" that we last saw this pool as depleted */
+ bman_depletion_unset(&pool->portal->pools[1],
+ pool->params.bpid);
+ }
+ PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
+}
+
+/* In the case that the application's core loop calls qman_poll() and
+ * bman_poll(), we ought to balance how often we incur the overheads of the
+ * slow-path poll. We'll use two decrementer sources. The idle decrementer
+ * constant is used when the last slow-poll detected no work to do, and the busy
+ * decrementer constant when the last slow-poll had work to do. */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct bman_portal *p, u32 is);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+ struct bman_portal *p = ptr;
+ u32 clear = p->irq_sources;
+ u32 is = bm_isr_status_read(&p->p) & p->irq_sources;
+
+ clear |= __poll_portal_slow(p, is);
+ bm_isr_status_clear(&p->p, clear);
+ return IRQ_HANDLED;
+}
+
+
+struct bman_portal *bman_create_portal(
+ struct bman_portal *portal,
+ const struct bm_portal_config *config)
+{
+ struct bm_portal *__p;
+ const struct bman_depletion *pools = &config->public_cfg.mask;
+ int ret;
+ u8 bpid = 0;
+
+ if (!portal) {
+ portal = kmalloc(sizeof(*portal), GFP_KERNEL);
+ if (!portal)
+ return portal;
+ portal->alloced = 1;
+ } else
+ portal->alloced = 0;
+
+ __p = &portal->p;
+
+ /* prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference... */
+ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+ if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) {
+ pr_err("RCR initialisation failed\n");
+ goto fail_rcr;
+ }
+ if (bm_mc_init(__p)) {
+ pr_err("MC initialisation failed\n");
+ goto fail_mc;
+ }
+ if (bm_isr_init(__p)) {
+ pr_err("ISR initialisation failed\n");
+ goto fail_isr;
+ }
+ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
+ if (!portal->pools)
+ goto fail_pools;
+ portal->pools[0] = *pools;
+ bman_depletion_init(portal->pools + 1);
+ while (bpid < bman_pool_max) {
+ /* Default to all BPIDs disabled, we enable as required at
+ * run-time. */
+ bm_isr_bscn_mask(__p, bpid, 0);
+ bpid++;
+ }
+ portal->slowpoll = 0;
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ portal->rcri_owned = NULL;
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+ raw_spin_lock_init(&portal->sharing_lock);
+#ifndef __rtems__
+ portal->is_shared = config->public_cfg.is_shared;
+ portal->sharing_redirect = NULL;
+#endif /* __rtems__ */
+#endif
+ memset(&portal->cb, 0, sizeof(portal->cb));
+ /* Write-to-clear any stale interrupt status bits */
+ bm_isr_disable_write(__p, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_isr_enable_write(__p, portal->irq_sources);
+ bm_isr_status_clear(__p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
+ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+#ifndef __rtems__
+ if ((config->public_cfg.cpu != -1) &&
+ irq_can_set_affinity(config->public_cfg.irq) &&
+ irq_set_affinity(config->public_cfg.irq,
+ cpumask_of(config->public_cfg.cpu))) {
+ pr_err("irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+#endif /* __rtems__ */
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(__p);
+ if (ret) {
+ pr_err("RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = config;
+
+ bm_isr_disable_write(__p, 0);
+ bm_isr_uninhibit(__p);
+ return portal;
+fail_rcr_empty:
+#ifndef __rtems__
+fail_affinity:
+#endif /* __rtems__ */
+ free_irq(config->public_cfg.irq, portal);
+fail_irq:
+ kfree(portal->pools);
+fail_pools:
+ bm_isr_finish(__p);
+fail_isr:
+ bm_mc_finish(__p);
+fail_mc:
+ bm_rcr_finish(__p);
+fail_rcr:
+ if (portal->alloced)
+ kfree(portal);
+ return NULL;
+}
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config)
+{
+ struct bman_portal *portal;
+
+ portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu);
+ portal = bman_create_portal(portal, config);
+#ifndef __rtems__
+ if (portal) {
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ }
+#endif /* __rtems__ */
+ return portal;
+}
+
+
+#ifndef __rtems__
+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
+ int cpu)
+{
+#ifdef FSL_DPA_PORTAL_SHARE
+ struct bman_portal *p = &per_cpu(bman_affine_portal, cpu);
+
+ BUG_ON(p->config);
+ BUG_ON(p->is_shared);
+ BUG_ON(!redirect->config->public_cfg.is_shared);
+ p->irq_sources = 0;
+ p->sharing_redirect = redirect;
+ put_affine_portal();
+ return p;
+#else
+ BUG();
+ return NULL;
+#endif
+}
+#endif /* __rtems__ */
+
+void bman_destroy_portal(struct bman_portal *bm)
+{
+ const struct bm_portal_config *pcfg = bm->config;
+
+ bm_rcr_cce_update(&bm->p);
+ bm_rcr_cce_update(&bm->p);
+
+ free_irq(pcfg->public_cfg.irq, bm);
+
+ kfree(bm->pools);
+ bm_isr_finish(&bm->p);
+ bm_mc_finish(&bm->p);
+ bm_rcr_finish(&bm->p);
+ bm->config = NULL;
+ if (bm->alloced)
+ kfree(bm);
+}
+
+const struct bm_portal_config *bman_destroy_affine_portal(void)
+{
+ struct bman_portal *bm = get_raw_affine_portal();
+ const struct bm_portal_config *pcfg;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (bm->sharing_redirect) {
+ bm->sharing_redirect = NULL;
+ put_affine_portal();
+ return NULL;
+ }
+ bm->is_shared = 0;
+#endif /* __rtems__ */
+#endif
+ pcfg = bm->config;
+ bman_destroy_portal(bm);
+#ifndef __rtems__
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+#endif /* __rtems__ */
+ put_affine_portal();
+ return pcfg;
+}
+
+/* When release logic waits on available RCR space, we need a global waitqueue
+ * in the case of "affine" use (as the waits wake on different cpus which means
+ * different portals - so we can't wait on any per-portal waitqueue). */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static u32 __poll_portal_slow(struct bman_portal *p, u32 is)
+{
+ struct bman_depletion tmp;
+ u32 ret = is;
+
+ /* There is a gotcha to be aware of. If we do the query before clearing
+ * the status register, we may miss state changes that occur between the
+ * two. If we write to clear the status register before the query, the
+ * cache-enabled query command may overtake the status register write
+ * unless we use a heavyweight sync (which we don't want). Instead, we
+ * write-to-clear the status register then *read it back* before doing
+ * the query, hence the odd while loop with the 'is' accumulation. */
+ if (is & BM_PIRQ_BSCN) {
+ struct bm_mc_result *mcr;
+ __maybe_unused unsigned long irqflags;
+ unsigned int i, j;
+ u32 __is;
+
+ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
+ while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) {
+ is |= __is;
+ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
+ }
+ is &= ~BM_PIRQ_BSCN;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bm_mc_start(&p->p);
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ tmp = mcr->query.ds.state;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ for (i = 0; i < 2; i++) {
+ int idx = i * 32;
+ /* tmp is a mask of currently-depleted pools.
+ * pools[0] is mask of those we care about.
+ * pools[1] is our previous view (we only want to
+ * be told about changes). */
+ tmp.__state[i] &= p->pools[0].__state[i];
+ if (tmp.__state[i] == p->pools[1].__state[i])
+ /* fast-path, nothing to see, move along */
+ continue;
+ for (j = 0; j <= 31; j++, idx++) {
+ struct bman_pool *pool = p->cb[idx];
+ int b4 = bman_depletion_get(&p->pools[1], idx);
+ int af = bman_depletion_get(&tmp, idx);
+
+ if (b4 == af)
+ continue;
+ while (pool) {
+ pool->params.cb(p, pool,
+ pool->params.cb_ctx, af);
+ pool = pool->next;
+ }
+ }
+ }
+ p->pools[1] = tmp;
+ }
+
+ if (is & BM_PIRQ_RCRI) {
+ __maybe_unused unsigned long irqflags;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bm_rcr_cce_update(&p->p);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ /* If waiting for sync, we only cancel the interrupt threshold
+ * when the ring utilisation hits zero. */
+ if (p->rcri_owned) {
+ if (!bm_rcr_get_fill(&p->p)) {
+ p->rcri_owned = NULL;
+ bm_rcr_set_ithresh(&p->p, 0);
+ }
+ } else
+#endif
+ bm_rcr_set_ithresh(&p->p, 0);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ wake_up(&affine_queue);
+ bm_isr_status_clear(&p->p, BM_PIRQ_RCRI);
+ is &= ~BM_PIRQ_RCRI;
+ }
+
+ /* There should be no status register bits left undefined */
+ DPA_ASSERT(!is);
+ return ret;
+}
+
+const struct bman_portal_config *bman_get_portal_config(void)
+{
+ struct bman_portal *p = get_affine_portal();
+ const struct bman_portal_config *ret = &p->config->public_cfg;
+
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_get_portal_config);
+
+u32 bman_irqsource_get(void)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+ u32 ret = p->irq_sources & BM_PIRQ_VISIBLE;
+
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_irqsource_get);
+
+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits)
+{
+ __maybe_unused unsigned long irqflags;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (p->sharing_redirect)
+ return -EINVAL;
+#endif /* __rtems__ */
+#endif
+ PORTAL_IRQ_LOCK(p, irqflags);
+ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+ bm_isr_enable_write(&p->p, p->irq_sources);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ return 0;
+}
+EXPORT_SYMBOL(bman_p_irqsource_add);
+
+int bman_irqsource_add(__maybe_unused u32 bits)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+ int ret = bman_p_irqsource_add(p, bits);
+
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_irqsource_add);
+
+int bman_irqsource_remove(u32 bits)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+ __maybe_unused unsigned long irqflags;
+ u32 ier;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (p->sharing_redirect) {
+ put_affine_portal();
+ return -EINVAL;
+ }
+#endif /* __rtems__ */
+#endif
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert. */
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bits &= BM_PIRQ_VISIBLE;
+ clear_bits(bits, &p->irq_sources);
+ bm_isr_enable_write(&p->p, p->irq_sources);
+ ier = bm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering. */
+ bm_isr_status_clear(&p->p, ~ier);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_irqsource_remove);
+
+#ifndef __rtems__
+const cpumask_t *bman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(bman_affine_cpus);
+#endif /* __rtems__ */
+
+u32 bman_poll_slow(void)
+{
+ struct bman_portal *p = get_poll_portal();
+ u32 ret;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (unlikely(p->sharing_redirect))
+ ret = (u32)-1;
+ else
+#endif /* __rtems__ */
+#endif
+ {
+ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
+
+ ret = __poll_portal_slow(p, is);
+ bm_isr_status_clear(&p->p, ret);
+ }
+ put_poll_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_poll_slow);
+
+/* Legacy wrapper */
+void bman_poll(void)
+{
+ struct bman_portal *p = get_poll_portal();
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+ if (unlikely(p->sharing_redirect))
+ goto done;
+#endif /* __rtems__ */
+#endif
+ if (!(p->slowpoll--)) {
+ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
+ u32 active = __poll_portal_slow(p, is);
+
+ if (active)
+ p->slowpoll = SLOW_POLL_BUSY;
+ else
+ p->slowpoll = SLOW_POLL_IDLE;
+ }
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+done:
+#endif /* __rtems__ */
+#endif
+ put_poll_portal();
+}
+EXPORT_SYMBOL(bman_poll);
+
+static const u32 zero_thresholds[4] = {0, 0, 0, 0};
+
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
+ if (bman_alloc_bpid(&bpid))
+ return NULL;
+ } else {
+ if (params->bpid >= bman_pool_max)
+ return NULL;
+ bpid = params->bpid;
+ }
+#ifdef CONFIG_FSL_BMAN
+ if (params->flags & BMAN_POOL_FLAG_THRESH) {
+ if (bm_pool_set(bpid, params->thresholds))
+ goto err;
+ }
+#else
+ if (params->flags & BMAN_POOL_FLAG_THRESH)
+ goto err;
+#endif
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+ pool->sp = NULL;
+ pool->sp_fill = 0;
+ pool->params = *params;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_set(&pool->in_use, 1);
+#endif
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ pool->params.bpid = bpid;
+ if (params->flags & BMAN_POOL_FLAG_STOCKPILE) {
+ pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ,
+ GFP_KERNEL);
+ if (!pool->sp)
+ goto err;
+ }
+ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) {
+ struct bman_portal *p = get_affine_portal();
+
+ if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) {
+ pr_err("Depletion events disabled for bpid %d\n", bpid);
+ goto err;
+ }
+ depletion_link(p, pool);
+ put_affine_portal();
+ }
+ return pool;
+err:
+#ifdef CONFIG_FSL_BMAN
+ if (params->flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(bpid, zero_thresholds);
+#endif
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(bpid);
+ if (pool) {
+ kfree(pool->sp);
+ kfree(pool);
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+#ifdef CONFIG_FSL_BMAN
+ if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(pool->params.bpid, zero_thresholds);
+#endif
+ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION)
+ depletion_unlink(pool);
+ if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) {
+ if (pool->sp_fill)
+ pr_err("Stockpile not flushed, has %u in bpid %u.\n",
+ pool->sp_fill, pool->params.bpid);
+ kfree(pool->sp);
+ pool->sp = NULL;
+ pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE;
+ }
+ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(pool->params.bpid);
+ kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
+{
+ return &pool->params;
+}
+EXPORT_SYMBOL(bman_get_params);
+
+static noinline void update_rcr_ci(struct bman_portal *p, u8 avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+int bman_rcr_is_empty(void)
+{
+ __maybe_unused unsigned long irqflags;
+ struct bman_portal *p = get_affine_portal();
+ u8 avail;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ update_rcr_ci(p, 0);
+ avail = bm_rcr_get_fill(&p->p);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return avail == 0;
+}
+EXPORT_SYMBOL(bman_rcr_is_empty);
+
+static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p,
+#ifdef FSL_DPA_CAN_WAIT
+ __maybe_unused struct bman_pool *pool,
+#endif
+ __maybe_unused unsigned long *irqflags,
+ __maybe_unused u32 flags)
+{
+ struct bm_rcr_entry *r;
+ u8 avail;
+
+ *p = get_affine_portal();
+ PORTAL_IRQ_LOCK(*p, (*irqflags));
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+ if ((*p)->rcri_owned) {
+ PORTAL_IRQ_UNLOCK(*p, (*irqflags));
+ put_affine_portal();
+ return NULL;
+ }
+ (*p)->rcri_owned = pool;
+ }
+#endif
+ avail = bm_rcr_get_avail(&(*p)->p);
+ if (avail < 2)
+ update_rcr_ci(*p, avail);
+ r = bm_rcr_start(&(*p)->p);
+ if (unlikely(!r)) {
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC)))
+ (*p)->rcri_owned = NULL;
+#endif
+ PORTAL_IRQ_UNLOCK(*p, (*irqflags));
+ put_affine_portal();
+ }
+ return r;
+}
+
+#ifdef FSL_DPA_CAN_WAIT
+static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p,
+ struct bman_pool *pool,
+ __maybe_unused unsigned long *irqflags,
+ u32 flags)
+{
+ struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags);
+
+ if (!rcr)
+ bm_rcr_set_ithresh(&(*p)->p, 1);
+ return rcr;
+}
+
+static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p,
+ struct bman_pool *pool,
+ __maybe_unused unsigned long *irqflags,
+ u32 flags)
+{
+ struct bm_rcr_entry *rcr;
+#ifndef FSL_DPA_CAN_WAIT_SYNC
+ pool = NULL;
+#endif
+#ifndef __rtems__
+ if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (rcr = __wait_rel_start(p, pool, irqflags, flags)));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue,
+ (rcr = __wait_rel_start(p, pool, irqflags, flags)));
+ return rcr;
+}
+#endif
+
+/* to facilitate better copying of bufs into the ring without either (a) copying
+ * noise into the first byte (prematurely triggering the command), nor (b) being
+ * very inefficient by copying small fields using read-modify-write */
+struct overlay_bm_buffer {
+ u32 first;
+ u32 second;
+};
+
+static inline int __bman_release(struct bman_pool *pool,
+ const struct bm_buffer *bufs, u8 num, u32 flags)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ struct overlay_bm_buffer *o_dest;
+ struct overlay_bm_buffer *o_src = (struct overlay_bm_buffer *)&bufs[0];
+ __maybe_unused unsigned long irqflags;
+ u32 i = num - 1;
+
+#ifdef FSL_DPA_CAN_WAIT
+ if (flags & BMAN_RELEASE_FLAG_WAIT)
+ r = wait_rel_start(&p, pool, &irqflags, flags);
+ else
+ r = try_rel_start(&p, pool, &irqflags, flags);
+#else
+ r = try_rel_start(&p, &irqflags, flags);
+#endif
+ if (!r)
+ return -EBUSY;
+ /* We can copy all but the first entry, as this can trigger badness
+ * with the valid-bit. Use the overlay to mask the verb byte. */
+ o_dest = (struct overlay_bm_buffer *)&r->bufs[0];
+ o_dest->first = (o_src->first & 0x0000ffff) |
+ (((u32)pool->params.bpid << 16) & 0x00ff0000);
+ o_dest->second = o_src->second;
+ if (i)
+ copy_words(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ /* if we wish to sync we need to set the threshold after h/w sees the
+ * new ring entry. As we're mixing cache-enabled and cache-inhibited
+ * accesses, this requires a heavy-weight sync. */
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+ hwsync();
+ bm_rcr_set_ithresh(&p->p, 1);
+ }
+#endif
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+ if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
+ wait_event_interruptible(affine_queue,
+ (p->rcri_owned != pool));
+ else
+#endif /* __rtems__ */
+ wait_event(affine_queue, (p->rcri_owned != pool));
+ }
+#endif
+ return 0;
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags)
+{
+ int ret = 0;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
+ return -EINVAL;
+#endif
+ /* Without stockpile, this API is a pass-through to the h/w operation */
+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+ return __bman_release(pool, bufs, num, flags);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!atomic_dec_and_test(&pool->in_use)) {
+ pr_crit("Parallel attempts to enter bman_released() detected.");
+ panic("only one instance of bman_released/acquired allowed");
+ }
+#endif
+ /* This needs some explanation. Adding the given buffers may take the
+ * stockpile over the threshold, but in fact the stockpile may already
+ * *be* over the threshold if a previous release-to-hw attempt had
+ * failed. So we have 3 cases to cover;
+ * 1. we add to the stockpile and don't hit the threshold,
+ * 2. we add to the stockpile, hit the threshold and release-to-hw,
+ * 3. we have to release-to-hw before adding to the stockpile
+ * (not enough room in the stockpile for case 2).
+ * Our constraints on thresholds guarantee that in case 3, there must be
+ * at least 8 bufs already in the stockpile, so all release-to-hw ops
+ * are for 8 bufs. Despite all this, the API must indicate whether the
+ * given buffers were taken off the caller's hands, irrespective of
+ * whether a release-to-hw was attempted. */
+ while (num) {
+ /* Add buffers to stockpile if they fit */
+ if ((pool->sp_fill + num) < BMAN_STOCKPILE_SZ) {
+ copy_words(pool->sp + pool->sp_fill, bufs,
+ sizeof(struct bm_buffer) * num);
+ pool->sp_fill += num;
+ num = 0; /* --> will return success no matter what */
+ }
+ /* Do hw op if hitting the high-water threshold */
+ if ((pool->sp_fill + num) >= BMAN_STOCKPILE_HIGH) {
+ ret = __bman_release(pool,
+ pool->sp + (pool->sp_fill - 8), 8, flags);
+ if (ret) {
+ ret = (num ? ret : 0);
+ goto release_done;
+ }
+ pool->sp_fill -= 8;
+ }
+ }
+release_done:
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_inc(&pool->in_use);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL(bman_release);
+
+static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs,
+ u8 num)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ struct bm_mc_result *mcr;
+ __maybe_unused unsigned long irqflags;
+ int ret;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = bm_mc_start(&p->p);
+ mcc->acquire.bpid = pool->params.bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs)
+ copy_words(&bufs[0], &mcr->acquire.bufs[0],
+ num * sizeof(bufs[0]));
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags)
+{
+ int ret = 0;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
+ return -EINVAL;
+#endif
+ /* Without stockpile, this API is a pass-through to the h/w operation */
+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+ return __bman_acquire(pool, bufs, num);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!atomic_dec_and_test(&pool->in_use)) {
+ pr_crit("Parallel attempts to enter bman_acquire() detected.");
+ panic("only one instance of bman_released/acquired allowed");
+ }
+#endif
+ /* Only need a h/w op if we'll hit the low-water thresh */
+ if (!(flags & BMAN_ACQUIRE_FLAG_STOCKPILE) &&
+ (pool->sp_fill <= (BMAN_STOCKPILE_LOW + num))) {
+ /* refill stockpile with max amount, but if max amount
+ * isn't available, try amount the user wants */
+ int bufcount = 8;
+
+ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, bufcount);
+ if (ret < 0 && bufcount != num) {
+ bufcount = num;
+ /* Maybe buffer pool has less than 8 */
+ ret = __bman_acquire(pool, pool->sp + pool->sp_fill,