summaryrefslogtreecommitdiffstats
path: root/linux/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
Diffstat (limited to 'linux/drivers/net/ethernet/freescale')
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2987
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h440
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c1491
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h113
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c710
-rw-r--r--linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h12
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman.c1293
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman.h169
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c523
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_mac.h6
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_memac.c402
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_muram.h44
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_port.c338
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_port.h89
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_sp.c2
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman_tgec.c116
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/mac.c241
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/mac.h26
18 files changed, 3952 insertions, 5050 deletions
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 73173b89..f8ed4516 100644
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2,7 +2,7 @@
#include <rtems/bsd/local/opt_dpaa.h>
-/* Copyright 2008 - 2015 Freescale Semiconductor Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -36,9 +36,9 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/kthread.h>
#include <linux/io.h>
#ifndef __rtems__
#include <linux/if_arp.h>
@@ -49,32 +49,50 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
+#include <linux/sort.h>
#endif /* __rtems__ */
#include <soc/fsl/bman.h>
+#include <soc/fsl/qman.h>
#include "fman.h"
#include "fman_port.h"
-
#include "mac.h"
#include "dpaa_eth.h"
-#include "dpaa_eth_common.h"
-/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
* using trace events only need to #include <trace/events/sched.h>
*/
#define CREATE_TRACE_POINTS
#include "dpaa_eth_trace.h"
-#define DPA_NAPI_WEIGHT 64
+static int debug = -1;
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
+
+static u16 tx_timeout = 1000;
+module_param(tx_timeout, ushort, 0444);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+
+#define FM_FD_STAT_RX_ERRORS \
+ (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
+ FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
+ FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
+ FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
+ FM_FD_ERR_PRS_HDR_ERR)
-/* Valid checksum indication */
-#define DPA_CSUM_VALID 0xFFFF
+#define FM_FD_STAT_TX_ERRORS \
+ (FM_FD_ERR_UNSUPPORTED_FORMAT | \
+ FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
-#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
+#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFUP | \
+ NETIF_MSG_IFDOWN)
#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
/* Ingress congestion threshold on FMan ports
@@ -83,62 +101,1999 @@
* by FMan.
*/
+/* Size in bytes of the FQ taildrop threshold */
+#define DPAA_FQ_TD 0x200000
+
+#define DPAA_CS_THRESHOLD_1G 0x06000000
+/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
+ * The size in bytes of the egress Congestion State notification threshold on
+ * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
+ * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
+ * and the larger the frame size, the more acute the problem.
+ * So we have to find a balance between these factors:
+ * - avoiding the device staying congested for a prolonged time (risking
+ * the netdev watchdog to fire - see also the tx_timeout module param);
+ * - affecting performance of protocols such as TCP, which otherwise
+ * behave well under the congestion notification mechanism;
+ * - preventing the Tx cores from tightly-looping (as if the congestion
+ * threshold was too low to be effective);
+ * - running out of memory if the CS threshold is set too high.
+ */
+
+#define DPAA_CS_THRESHOLD_10G 0x10000000
+/* The size in bytes of the egress Congestion State notification threshold on
+ * 10G ports, range 0x1000 .. 0x10000000
+ */
+
+/* Largest value that the FQD's OAL field can hold */
+#define FSL_QMAN_MAX_OAL 127
+
+/* Default alignment for start of data in an Rx FD */
+#define DPAA_FD_DATA_ALIGNMENT 16
+
+/* Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define FM_L3_PARSE_RESULT_IPV4 0x8000
+/* L3 Type field: First IP Present IPv6 */
+#define FM_L3_PARSE_RESULT_IPV6 0x4000
+/* Values for the L4R field of the FM Parse Results */
+/* L4 Type field: UDP */
+#define FM_L4_PARSE_RESULT_UDP 0x40
+/* L4 Type field: TCP */
+#define FM_L4_PARSE_RESULT_TCP 0x20
+
+#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
+
+#define FSL_DPAA_BPID_INV 0xff
+#define FSL_DPAA_ETH_MAX_BUF_COUNT 128
+#define FSL_DPAA_ETH_REFILL_THRESHOLD 80
+
+#define DPAA_TX_PRIV_DATA_SIZE 16
+#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
+#define DPAA_TIME_STAMP_SIZE 8
+#define DPAA_HASH_RESULTS_SIZE 8
+#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
+ dpaa_rx_extra_headroom)
+
+#define DPAA_ETH_RX_QUEUES 128
+
+#define DPAA_ENQUEUE_RETRIES 100000
+
+enum port_type {RX, TX};
+
+struct fm_port_fqs {
+ struct dpaa_fq *tx_defq;
+ struct dpaa_fq *tx_errq;
+ struct dpaa_fq *rx_defq;
+ struct dpaa_fq *rx_errq;
+};
+
+/* All the dpa bps in use at any moment */
+static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
+
+/* The raw buffer size must be cacheline aligned */
#ifndef __rtems__
-static u8 debug = -1;
-module_param(debug, byte, S_IRUGO);
-MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+#define DPAA_BP_RAW_SIZE 4096
+#else /* __rtems__ */
+/*
+ * FIXME: Support multiple buffer pools.
+ */
+#define DPAA_BP_RAW_SIZE 2048
-/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
-static u16 tx_timeout = 1000;
-module_param(tx_timeout, ushort, S_IRUGO);
-MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+/*
+ * FIXME: 4 bytes would be enough for the mbuf pointer. However, jumbo receive
+ * frames overwrite this area if < 64 bytes.
+ */
+#define DPAA_OUT_OF_BAND_SIZE 64
+
+#define DPAA_MBUF_POINTER_OFFSET (DPAA_BP_RAW_SIZE - DPAA_OUT_OF_BAND_SIZE)
+#endif /* __rtems__ */
+/* When using more than one buffer pool, the raw sizes are as follows:
+ * 1 bp: 4KB
+ * 2 bp: 2KB, 4KB
+ * 3 bp: 1KB, 2KB, 4KB
+ * 4 bp: 1KB, 2KB, 4KB, 8KB
+ */
+static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
+{
+ size_t res = DPAA_BP_RAW_SIZE / 4;
+ u8 i;
+
+ for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
+ res *= 2;
+ return res;
+}
+
+/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
+ * half-page-aligned buffers, so we reserve some more space for start-of-buffer
+ * alignment.
+ */
+#ifndef __rtems__
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
+#else /* __rtems__ */
+#define dpaa_bp_size(raw_size) DPAA_MBUF_POINTER_OFFSET
#endif /* __rtems__ */
-/* BM */
+#ifndef __rtems__
+static int dpaa_max_frm;
+#endif /* __rtems__ */
-#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
+#ifndef __rtems__
+static int dpaa_rx_extra_headroom;
+#else /* __rtems__ */
+#define dpaa_rx_extra_headroom fman_get_rx_extra_headroom()
+#endif /* __rtems__ */
-static u8 dpa_priv_common_bpid;
+#define dpaa_get_max_mtu() \
+ (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
-static void _dpa_rx_error(struct net_device *net_dev,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_fd *fd,
- u32 fqid)
+#ifndef __rtems__
+static int dpaa_netdev_init(struct net_device *net_dev,
+ const struct net_device_ops *dpaa_ops,
+ u16 tx_timeout)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa_percpu_priv *percpu_priv;
+ const u8 *mac_addr;
+ int i, err;
+
+ /* Although we access another CPU's private data here
+ * we do it at initialization so it is safe
+ */
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ percpu_priv->net_dev = net_dev;
+ }
+
+ net_dev->netdev_ops = dpaa_ops;
+ mac_addr = priv->mac_dev->addr;
+
+ net_dev->mem_start = priv->mac_dev->res->start;
+ net_dev->mem_end = priv->mac_dev->res->end;
+
+ net_dev->min_mtu = ETH_MIN_MTU;
+ net_dev->max_mtu = dpaa_get_max_mtu();
+
+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_LLTX);
+
+ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
+ /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
+ * For conformity, we'll still declare GSO explicitly.
+ */
+ net_dev->features |= NETIF_F_GSO;
+
+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ /* we do not want shared skbs on TX */
+ net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+
+ net_dev->features |= net_dev->hw_features;
+ net_dev->vlan_features = net_dev->features;
+
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+
+ net_dev->ethtool_ops = &dpaa_ethtool_ops;
+
+ net_dev->needed_headroom = priv->tx_headroom;
+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+
+ /* start without the RUNNING flag, phylib controls it later */
+ netif_carrier_off(net_dev);
+
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+#endif /* __rtems__ */
+
+static int dpaa_stop(struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+ int i, err, error;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+#ifndef __rtems__
+ netif_tx_stop_all_queues(net_dev);
+#endif /* __rtems__ */
+ /* Allow the Fman (Tx) port to process in-flight frames before we
+ * try switching it off.
+ */
+ usleep_range(5000, 10000);
+
+ err = mac_dev->stop(mac_dev);
+ if (err < 0)
+ netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
+ err);
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ error = fman_port_disable(mac_dev->port[i]);
+ if (error)
+ err = error;
+ }
+
+#ifndef __rtems__
+ if (net_dev->phydev)
+ phy_disconnect(net_dev->phydev);
+ net_dev->phydev = NULL;
+#endif /* __rtems__ */
+
+ return err;
+}
+
+#ifndef __rtems__
+static void dpaa_tx_timeout(struct net_device *net_dev)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
+ jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
+
+ percpu_priv->stats.tx_errors++;
+}
+
+/* Calculates the statistics for the given device by adding the statistics
+ * collected by each CPU.
+ */
+static void dpaa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *s)
+{
+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ u64 *netstats = (u64 *)s;
+ u64 *cpustats;
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ cpustats = (u64 *)&percpu_priv->stats;
+
+ /* add stats from all CPUs */
+ for (j = 0; j < numstats; j++)
+ netstats[j] += cpustats[j];
+ }
+}
+
+static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
+{
+ struct platform_device *of_dev;
+ struct dpaa_eth_data *eth_data;
+ struct device *dpaa_dev, *dev;
+ struct device_node *mac_node;
+ struct mac_device *mac_dev;
+
+ dpaa_dev = &pdev->dev;
+ eth_data = dpaa_dev->platform_data;
+ if (!eth_data)
+ return ERR_PTR(-ENODEV);
+
+ mac_node = eth_data->mac_node;
+
+ of_dev = of_find_device_by_node(mac_node);
+ if (!of_dev) {
+ dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n",
+ mac_node->full_name);
+ of_node_put(mac_node);
+ return ERR_PTR(-EINVAL);
+ }
+ of_node_put(mac_node);
+
+ dev = &of_dev->dev;
+
+ mac_dev = dev_get_drvdata(dev);
+ if (!mac_dev) {
+ dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n",
+ dev_name(dev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ return mac_dev;
+}
+
+static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ const struct dpaa_priv *priv;
+ struct mac_device *mac_dev;
+ struct sockaddr old_addr;
+ int err;
+
+ priv = netdev_priv(net_dev);
+
+ memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
+
+ err = eth_mac_addr(net_dev, addr);
+ if (err < 0) {
+ netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
+ return err;
+ }
+
+ mac_dev = priv->mac_dev;
+
+ err = mac_dev->change_addr(mac_dev->fman_mac,
+ (enet_addr_t *)net_dev->dev_addr);
+ if (err < 0) {
+ netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
+ err);
+ /* reverting to previous address */
+ eth_mac_addr(net_dev, &old_addr);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static void dpaa_set_rx_mode(struct net_device *net_dev)
+{
+ const struct dpaa_priv *priv;
+ int err;
+
+ priv = netdev_priv(net_dev);
+
+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
+ priv->mac_dev->promisc = !priv->mac_dev->promisc;
+ err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
+ priv->mac_dev->promisc);
+ if (err < 0)
+ netif_err(priv, drv, net_dev,
+ "mac_dev->set_promisc() = %d\n",
+ err);
+ }
+
+ err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
+ if (err < 0)
+ netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
+ err);
+}
+#endif /* __rtems__ */
+
+static struct dpaa_bp *dpaa_bpid2pool(int bpid)
+{
+ if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
+ return NULL;
+
+ return dpaa_bp_array[bpid];
+}
+
+/* checks if this bpool is already allocated */
+static bool dpaa_bpid2pool_use(int bpid)
+{
+ if (dpaa_bpid2pool(bpid)) {
+ atomic_inc(&dpaa_bp_array[bpid]->refs);
+ return true;
+ }
+
+ return false;
+}
+
+/* called only once per bpid by dpaa_bp_alloc_pool() */
+static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
+{
+ dpaa_bp_array[bpid] = dpaa_bp;
+ atomic_set(&dpaa_bp->refs, 1);
+}
+
+static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
+{
+ int err;
+
+ if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
+ pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* If the pool is already specified, we only create one per bpid */
+ if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
+ dpaa_bpid2pool_use(dpaa_bp->bpid))
+ return 0;
+
+ if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
+ dpaa_bp->pool = bman_new_pool();
+ if (!dpaa_bp->pool) {
+ pr_err("%s: bman_new_pool() failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
+ }
+
+ if (dpaa_bp->seed_cb) {
+ err = dpaa_bp->seed_cb(dpaa_bp);
+ if (err)
+ goto pool_seed_failed;
+ }
+
+ dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
+
+ return 0;
+
+pool_seed_failed:
+ pr_err("%s: pool seeding failed\n", __func__);
+ bman_free_pool(dpaa_bp->pool);
+
+ return err;
+}
+
+/* remove and free all the buffers from the given buffer pool */
+static void dpaa_bp_drain(struct dpaa_bp *bp)
+{
+ u8 num = 8;
+ int ret;
+
+ do {
+ struct bm_buffer bmb[8];
+ int i;
+
+ ret = bman_acquire(bp->pool, bmb, num);
+ if (ret < 0) {
+ if (num == 8) {
+ /* we have less than 8 buffers left;
+ * drain them one by one
+ */
+ num = 1;
+ ret = 1;
+ continue;
+ } else {
+ /* Pool is fully drained */
+ break;
+ }
+ }
+
+ if (bp->free_buf_cb)
+ for (i = 0; i < num; i++)
+ bp->free_buf_cb(bp, &bmb[i]);
+ } while (ret > 0);
+}
+
+static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
+{
+ struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
+
+ /* the mapping between bpid and dpaa_bp is done very late in the
+ * allocation procedure; if something failed before the mapping, the bp
+ * was not configured, therefore we don't need the below instructions
+ */
+ if (!bp)
+ return;
+
+ if (!atomic_dec_and_test(&bp->refs))
+ return;
+
+ if (bp->free_buf_cb)
+ dpaa_bp_drain(bp);
+
+ dpaa_bp_array[bp->bpid] = NULL;
+ bman_free_pool(bp->pool);
+}
+
+static void dpaa_bps_free(struct dpaa_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < DPAA_BPS_NUM; i++)
+ dpaa_bp_free(priv->dpaa_bps[i]);
+}
+
+/* Use multiple WQs for FQ assignment:
+ * - Tx Confirmation queues go to WQ1.
+ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
+ * to be scheduled, in case there are many more FQs in WQ3).
+ * - Rx Default and Tx queues go to WQ3 (no differentiation between
+ * Rx and Tx traffic).
+ * This ensures that Tx-confirmed buffers are timely released. In particular,
+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
+ * are greatly outnumbered by other FQs in the system, while
+ * dequeue scheduling is round-robin.
+ */
+static inline void dpaa_assign_wq(struct dpaa_fq *fq)
+{
+ switch (fq->fq_type) {
+ case FQ_TYPE_TX_CONFIRM:
+ case FQ_TYPE_TX_CONF_MQ:
+ fq->wq = 1;
+ break;
+ case FQ_TYPE_RX_ERROR:
+ case FQ_TYPE_TX_ERROR:
+ fq->wq = 2;
+ break;
+ case FQ_TYPE_RX_DEFAULT:
+ case FQ_TYPE_TX:
+ fq->wq = 3;
+ break;
+ default:
+ WARN(1, "Invalid FQ type %d for FQID %d!\n",
+ fq->fq_type, fq->fqid);
+ }
+}
+
+static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
+ u32 start, u32 count,
+ struct list_head *list,
+ enum dpaa_fq_type fq_type)
+{
+ struct dpaa_fq *dpaa_fq;
+ int i;
+
+ dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count,
+ GFP_KERNEL);
+ if (!dpaa_fq)
+ return NULL;
+
+ for (i = 0; i < count; i++) {
+ dpaa_fq[i].fq_type = fq_type;
+ dpaa_fq[i].fqid = start ? start + i : 0;
+ list_add_tail(&dpaa_fq[i].list, list);
+ }
+
+ for (i = 0; i < count; i++)
+ dpaa_assign_wq(dpaa_fq + i);
+
+ return dpaa_fq;
+}
+
+static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs)
+{
+ struct dpaa_fq *dpaa_fq;
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->rx_errq = &dpaa_fq[0];
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->rx_defq = &dpaa_fq[0];
+
+ if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
+ goto fq_alloc_failed;
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->tx_errq = &dpaa_fq[0];
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->tx_defq = &dpaa_fq[0];
+
+ if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
+ goto fq_alloc_failed;
+
+ return 0;
+
+fq_alloc_failed:
+ dev_err(dev, "dpaa_fq_alloc() failed\n");
+ return -ENOMEM;
+}
+
+static u32 rx_pool_channel;
+static DEFINE_SPINLOCK(rx_pool_channel_init);
+
+static int dpaa_get_channel(void)
+{
+ spin_lock(&rx_pool_channel_init);
+ if (!rx_pool_channel) {
+ u32 pool;
+ int ret;
+
+ ret = qman_alloc_pool(&pool);
+
+ if (!ret)
+ rx_pool_channel = pool;
+ }
+ spin_unlock(&rx_pool_channel_init);
+ if (!rx_pool_channel)
+ return -ENOMEM;
+ return rx_pool_channel;
+}
+
+#ifndef __rtems__
+static void dpaa_release_channel(void)
+{
+ qman_release_pool(rx_pool_channel);
+}
+#endif /* __rtems__ */
+
+static void dpaa_eth_add_channel(u16 channel)
+{
+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
+#ifndef __rtems__
+ const cpumask_t *cpus = qman_affine_cpus();
+#endif /* __rtems__ */
+ struct qman_portal *portal;
+ int cpu;
+
+ for_each_cpu(cpu, cpus) {
+ portal = qman_get_affine_portal(cpu);
+ qman_p_static_dequeue_add(portal, pool);
+ }
+}
+
+/* Congestion group state change notification callback.
+ * Stops the device's egress queues while they are congested and
+ * wakes them upon exiting congested state.
+ * Also updates some CGR-related stats.
+ */
+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
+ int congested)
+{
+ struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
+ struct dpaa_priv, cgr_data.cgr);
+
+ if (congested) {
+ priv->cgr_data.congestion_start_jiffies = jiffies;
+#ifndef __rtems__
+ netif_tx_stop_all_queues(priv->net_dev);
+#endif /* __rtems__ */
+ priv->cgr_data.cgr_congested_count++;
+ } else {
+ priv->cgr_data.congested_jiffies +=
+ (jiffies - priv->cgr_data.congestion_start_jiffies);
+#ifndef __rtems__
+ netif_tx_wake_all_queues(priv->net_dev);
+#endif /* __rtems__ */
+ }
+}
+
+static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
{
- /* limit common, possibly innocuous Rx FIFO Overflow errors'
- * interference with zero-loss convergence benchmark results.
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("%s: Error %d allocating CGR ID\n",
+ __func__, err);
+ goto out_error;
+ }
+ priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
+
+ /* Enable Congestion State Change Notifications and CS taildrop */
+ memset(&initcgr, 0, sizeof(initcgr));
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+
+ /* Set different thresholds based on the MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at a speed
+ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
+ * In such cases, we ought to reconfigure the threshold, too.
*/
- if (likely(fd->status & FM_FD_ERR_PHYSICAL))
- pr_warn_once("non-zero error counters in fman statistics (sysfs)\n");
+ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+ cs_th = DPAA_CS_THRESHOLD_10G;
else
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("%s: Error %d creating CGR with ID %d\n",
+ __func__, err, priv->cgr_data.cgr.cgrid);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ goto out_error;
+ }
+ if (netif_msg_drv(priv))
+ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
+ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
+ priv->cgr_data.cgr.chan);
+
+out_error:
+ return err;
+}
+
+static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
+ struct dpaa_fq *fq,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ fq->channel = priv->channel;
+}
+
+static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
+ struct dpaa_fq *fq,
+ struct fman_port *port,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ if (port) {
+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ fq->channel = (u16)fman_port_get_qman_channel_id(port);
+ } else {
+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
+ }
+}
+
+static void dpaa_fq_setup(struct dpaa_priv *priv,
+ const struct dpaa_fq_cbs *fq_cbs,
+ struct fman_port *tx_port)
+{
#ifndef __rtems__
- if (net_ratelimit())
- netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_RX_ERRORS);
+ int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu;
+ const cpumask_t *affine_cpus = qman_affine_cpus();
+ u16 portals[NR_CPUS];
+#else /* __rtems__ */
+ int egress_cnt = 0, conf_cnt = 0;
+#endif /* __rtems__ */
+ struct dpaa_fq *fq;
+
+#ifndef __rtems__
+ for_each_cpu(cpu, affine_cpus)
+ portals[num_portals++] = qman_affine_channel(cpu);
+ if (num_portals == 0)
+ dev_err(priv->net_dev->dev.parent,
+ "No Qman software (affine) channels found");
+#endif /* __rtems__ */
+
+ /* Initialize each FQ in the list */
+ list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ break;
+ case FQ_TYPE_RX_ERROR:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
+ break;
+ case FQ_TYPE_TX:
+ dpaa_setup_egress(priv, fq, tx_port,
+ &fq_cbs->egress_ern);
+ /* If we have more Tx queues than the number of cores,
+ * just ignore the extra ones.
+ */
+ if (egress_cnt < DPAA_ETH_TXQ_NUM)
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ priv->conf_fqs[conf_cnt++] = &fq->fq_base;
+ /* fall through */
+ case FQ_TYPE_TX_CONFIRM:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+ break;
+ case FQ_TYPE_TX_ERROR:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
+ break;
+ default:
+#ifndef __rtems__
+ dev_warn(priv->net_dev->dev.parent,
+ "Unknown FQ type detected!\n");
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+ break;
+ }
+ }
+
+ /* Make sure all CPUs receive a corresponding Tx queue. */
+ while (egress_cnt < DPAA_ETH_TXQ_NUM) {
+ list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
+ if (fq->fq_type != FQ_TYPE_TX)
+ continue;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ if (egress_cnt == DPAA_ETH_TXQ_NUM)
+ break;
+ }
+ }
+}
+
+static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
+ struct qman_fq *tx_fq)
+{
+ int i;
+
+ for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
+ if (priv->egress_fqs[i] == tx_fq)
+ return i;
+
+ return -EINVAL;
+}
+
+static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
+{
+ const struct dpaa_priv *priv;
+ struct qman_fq *confq = NULL;
+ struct qm_mcc_initfq initfq;
+#ifndef __rtems__
+ struct device *dev;
+#endif /* __rtems__ */
+ struct qman_fq *fq;
+ int queue_id;
+ int err;
+
+ priv = netdev_priv(dpaa_fq->net_dev);
+#ifndef __rtems__
+ dev = dpaa_fq->net_dev->dev.parent;
+#endif /* __rtems__ */
+
+ if (dpaa_fq->fqid == 0)
+ dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
+
+ err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
+ if (err) {
+#ifndef __rtems__
+ dev_err(dev, "qman_create_fq() failed\n");
#else /* __rtems__ */
BSD_ASSERT(0);
#endif /* __rtems__ */
+ return err;
+ }
+ fq = &dpaa_fq->fq_base;
+
+ if (dpaa_fq->init) {
+ memset(&initfq, 0, sizeof(initfq));
+
+ initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
+ /* Note: we may get to keep an empty FQ in cache */
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
+
+ /* Try to reduce the number of portal interrupts for
+ * Tx Confirmation FQs.
+ */
+ if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+
+ /* FQ placement */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
+
+ qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
+
+ /* Put all egress queues in a congestion group of their own.
+ * Sensu stricto, the Tx confirmation queues are Rx FQs,
+ * rather than Tx - but they nonetheless account for the
+ * memory footprint on behalf of egress traffic. We therefore
+ * place them in the netdev's CGR, along with the Tx FQs.
+ */
+ if (dpaa_fq->fq_type == FQ_TYPE_TX ||
+ dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
+ dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
+ initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
+ /* Set a fixed overhead accounting, in an attempt to
+ * reduce the impact of fixed-size skb shells and the
+ * driver's needed headroom on system memory. This is
+ * especially the case when the egress traffic is
+ * composed of small datagrams.
+ * Unfortunately, QMan's OAL value is capped to an
+ * insufficient value, but even that is better than
+ * no overhead accounting at all.
+ */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
+ qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
+ qm_fqd_set_oal(&initfq.fqd,
+#ifndef __rtems__
+ min(sizeof(struct sk_buff) +
+#else /* __rtems__ */
+ min(
+#endif /* __rtems__ */
+ priv->tx_headroom,
+ (size_t)FSL_QMAN_MAX_OAL));
+ }
+
+ if (td_enable) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
+ qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
+ }
+
+ if (dpaa_fq->fq_type == FQ_TYPE_TX) {
+ queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
+ if (queue_id >= 0)
+ confq = priv->conf_fqs[queue_id];
+ if (confq) {
+ initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
+ * A2V=1 (contextA A2 field is valid)
+ * A0V=1 (contextA A0 field is valid)
+ * B0V=1 (contextB field is valid)
+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
+ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
+ */
+ qm_fqd_context_a_set64(&initfq.fqd,
+ 0x1e00000080000000ULL);
+ }
+ }
+
+ /* Put all the ingress queues in our "ingress CGR". */
+ if (priv->use_ingress_cgr &&
+ (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+ dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
+ initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
+ /* Set a fixed overhead accounting, just like for the
+ * egress CGR.
+ */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
+ qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
+ qm_fqd_set_oal(&initfq.fqd,
+#ifndef __rtems__
+ min(sizeof(struct sk_buff) +
+#else /* __rtems__ */
+ min(
+#endif /* __rtems__ */
+ priv->tx_headroom,
+ (size_t)FSL_QMAN_MAX_OAL));
+ }
+
+ /* Initialization common to all ingress queues */
+ if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+ initfq.fqd.context_a.stashing.exclusive =
+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_ANNOTATION;
+ qm_fqd_set_stashing(&initfq.fqd, 1, 2,
+ DIV_ROUND_UP(sizeof(struct qman_fq),
+ 64));
+ }
+
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (err < 0) {
+#ifndef __rtems__
+ dev_err(dev, "qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), err);
+#else /* __rtems__ */
+ BSD_ASSERT(0);
+#endif /* __rtems__ */
+ qman_destroy_fq(fq);
+ return err;
+ }
+ }
+
+ dpaa_fq->fqid = qman_fq_fqid(fq);
+
+ return 0;
+}
+
+#ifndef __rtems__
+static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
+{
+#ifndef __rtems__
+ const struct dpaa_priv *priv;
+#endif /* __rtems__ */
+ struct dpaa_fq *dpaa_fq;
+ int err, error;
+
+ err = 0;
+
+ dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+#ifndef __rtems__
+ priv = netdev_priv(dpaa_fq->net_dev);
+#endif /* __rtems__ */
+
+ if (dpaa_fq->init) {
+ err = qman_retire_fq(fq, NULL);
+ if (err < 0 && netif_msg_drv(priv))
+ dev_err(dev, "qman_retire_fq(%u) = %d\n",
+ qman_fq_fqid(fq), err);
+
+ error = qman_oos_fq(fq);
+ if (error < 0 && netif_msg_drv(priv)) {
+ dev_err(dev, "qman_oos_fq(%u) = %d\n",
+ qman_fq_fqid(fq), error);
+ if (err >= 0)
+ err = error;
+ }
+ }
+
+ qman_destroy_fq(fq);
+ list_del(&dpaa_fq->list);
+
+ return err;
+}
+
+static int dpaa_fq_free(struct device *dev, struct list_head *list)
+{
+ struct dpaa_fq *dpaa_fq, *tmp;
+ int err, error;
+
+ err = 0;
+ list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
+ error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
+ if (error < 0 && err >= 0)
+ err = error;
+ }
+
+ return err;
+}
+#endif /* __rtems__ */
+
+static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
+{
+ struct fman_buffer_prefix_content buf_prefix_content;
+ struct fman_port_params params;
+ int err;
+
+ memset(&params, 0, sizeof(params));
+ memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = true;
+ buf_prefix_content.pass_hash_result = true;
+ buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+
+ params.specific_params.non_rx_params.err_fqid = errq->fqid;
+ params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
+
+ err = fman_port_config(port, &params);
+ if (err)
+ pr_err("%s: fman_port_config failed\n", __func__);
+
+ err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+ if (err)
+ pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
+ __func__);
+
+ err = fman_port_init(port);
+ if (err)
+ pr_err("%s: fm_port_init failed\n", __func__);
+}
+
+static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
+ size_t count, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
+{
+ struct fman_buffer_prefix_content buf_prefix_content;
+ struct fman_port_rx_params *rx_p;
+ struct fman_port_params params;
+ int i, err;
+
+ memset(&params, 0, sizeof(params));
+ memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = true;
+ buf_prefix_content.pass_hash_result = true;
+ buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+
+ rx_p = &params.specific_params.rx_params;
+ rx_p->err_fqid = errq->fqid;
+ rx_p->dflt_fqid = defq->fqid;
+
+ count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
+ rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
+ for (i = 0; i < count; i++) {
+ rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
+ rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
+ }
+
+ err = fman_port_config(port, &params);
+ if (err)
+ pr_err("%s: fman_port_config failed\n", __func__);
+
+ err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+ if (err)
+ pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
+ __func__);
+
+ err = fman_port_init(port);
+ if (err)
+ pr_err("%s: fm_port_init failed\n", __func__);
+}
+
+static void dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpaa_bp **bps, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpaa_buffer_layout *buf_layout,
+ struct device *dev)
+{
+ struct fman_port *rxport = mac_dev->port[RX];
+ struct fman_port *txport = mac_dev->port[TX];
+
+ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+ port_fqs->tx_defq, &buf_layout[TX]);
+ dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+ port_fqs->rx_defq, &buf_layout[RX]);
+}
+
+static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
+ struct bm_buffer *bmb, int cnt)
+{
+ int err;
+
+ err = bman_release(dpaa_bp->pool, bmb, cnt);
+ /* Should never occur, address anyway to avoid leaking the buffers */
+ if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
+ while (cnt-- > 0)
+ dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
+
+ return cnt;
+}
+
+static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
+{
+ struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
+ struct dpaa_bp *dpaa_bp;
+ int i = 0, j;
+
+ memset(bmb, 0, sizeof(bmb));
+
+ do {
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (!dpaa_bp)
+ return;
+
+ j = 0;
+ do {
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
+
+ j++; i++;
+ } while (j < ARRAY_SIZE(bmb) &&
+ !qm_sg_entry_is_final(&sgt[i - 1]) &&
+ sgt[i - 1].bpid == sgt[i].bpid);
+
+ dpaa_bman_release(dpaa_bp, bmb, j);
+ } while (!qm_sg_entry_is_final(&sgt[i - 1]));
+}
+
+static void dpaa_fd_release(const struct net_device *net_dev,
+ const struct qm_fd *fd)
+{
+ struct qm_sg_entry *sgt;
+ struct dpaa_bp *dpaa_bp;
+ struct bm_buffer bmb;
+ dma_addr_t addr;
+ void *vaddr;
+
+ bmb.data = 0;
+ bm_buffer_set64(&bmb, qm_fd_addr(fd));
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ return;
+
+ if (qm_fd_get_format(fd) == qm_fd_sg) {
+ vaddr = phys_to_virt(qm_fd_addr(fd));
+ sgt = vaddr + qm_fd_get_offset(fd);
+
+#ifndef __rtems__
+ dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
+ DMA_FROM_DEVICE);
+#endif /* __rtems__ */
+
+ dpaa_release_sgt_members(sgt);
+
+#ifndef __rtems__
+ addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpaa_bp->dev, addr)) {
+ dev_err(dpaa_bp->dev, "DMA mapping failed");
+ return;
+ }
+#else /* __rtems__ */
+ addr = (dma_addr_t)vaddr;
+#endif /* __rtems__ */
+ bm_buffer_set64(&bmb, addr);
+ }
+
+ dpaa_bman_release(dpaa_bp, &bmb, 1);
+}
+
+static void count_ern(struct dpaa_percpu_priv *percpu_priv,
+ const union qm_mr_entry *msg)
+{
+ switch (msg->ern.rc & QM_MR_RC_MASK) {
+ case QM_MR_RC_CGR_TAILDROP:
+ percpu_priv->ern_cnt.cg_tdrop++;
+ break;
+ case QM_MR_RC_WRED:
+ percpu_priv->ern_cnt.wred++;
+ break;
+ case QM_MR_RC_ERROR:
+ percpu_priv->ern_cnt.err_cond++;
+ break;
+ case QM_MR_RC_ORPWINDOW_EARLY:
+ percpu_priv->ern_cnt.early_window++;
+ break;
+ case QM_MR_RC_ORPWINDOW_LATE:
+ percpu_priv->ern_cnt.late_window++;
+ break;
+ case QM_MR_RC_FQ_TAILDROP:
+ percpu_priv->ern_cnt.fq_tdrop++;
+ break;
+ case QM_MR_RC_ORPWINDOW_RETIRED:
+ percpu_priv->ern_cnt.fq_retired++;
+ break;
+ case QM_MR_RC_ORP_ZERO:
+ percpu_priv->ern_cnt.orp_zero++;
+ break;
+ }
+}
+
+#ifndef __rtems__
+/* Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
+ struct sk_buff *skb,
+ struct qm_fd *fd,
+ char *parse_results)
+{
+ struct fman_prs_result *parse_result;
+ u16 ethertype = ntohs(skb->protocol);
+ struct ipv6hdr *ipv6h = NULL;
+ struct iphdr *iph;
+ int retval = 0;
+ u8 l4_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
+ * L4 alone from the FM configuration anyway.
+ */
+
+ /* Fill in some fields of the Parse Results array, so the FMan
+ * can find them as if they came from the FMan Parser.
+ */
+ parse_result = (struct fman_prs_result *)parse_results;
+
+ /* If we're dealing with VLAN, get the real Ethernet type */
+ if (ethertype == ETH_P_8021Q) {
+ /* We can't always assume the MAC header is set correctly
+ * by the stack, so reset to beginning of skb->data
+ */
+ skb_reset_mac_header(skb);
+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ }
+
+ /* Fill in the relevant L3 parse result fields
+ * and read the L4 protocol type
+ */
+ switch (ethertype) {
+ case ETH_P_IP:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
+ iph = ip_hdr(skb);
+ WARN_ON(!iph);
+ l4_proto = iph->protocol;
+ break;
+ case ETH_P_IPV6:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
+ ipv6h = ipv6_hdr(skb);
+ WARN_ON(!ipv6h);
+ l4_proto = ipv6h->nexthdr;
+ break;
+ default:
+ /* We shouldn't even be here */
+ if (net_ratelimit())
+ netif_alert(priv, tx_err, priv->net_dev,
+ "Can't compute HW csum for L3 proto 0x%x\n",
+ ntohs(skb->protocol));
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* Fill in the relevant L4 parse result fields */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+ break;
+ case IPPROTO_TCP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+ break;
+ default:
+ if (net_ratelimit())
+ netif_alert(priv, tx_err, priv->net_dev,
+ "Can't compute HW csum for L4 proto 0x%x\n",
+ l4_proto);
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
+ parse_result->ip_off[0] = (u8)skb_network_offset(skb);
+ parse_result->l4_off = (u8)skb_transport_offset(skb);
+
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
+
+ /* On P1023 and similar platforms fd->cmd interpretation could
+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
+ * is not set so we do not need to check; in the future, if/when
+ * using context_a we need to check this bit
+ */
+
+return_error:
+ return retval;
+}
+#endif /* __rtems__ */
+
+static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
+{
+#ifndef __rtems__
+ struct device *dev = dpaa_bp->dev;
+#endif /* __rtems__ */
+ struct bm_buffer bmb[8];
+ dma_addr_t addr;
+#ifndef __rtems__
+ void *new_buf;
+#endif /* __rtems__ */
+ u8 i;
+
+ for (i = 0; i < 8; i++) {
+#ifndef __rtems__
+ new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
+ if (unlikely(!new_buf)) {
+ dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
+ dpaa_bp->raw_size);
+ goto release_previous_buffs;
+ }
+ new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
+
+ addr = dma_map_single(dev, new_buf,
+ dpaa_bp->size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dpaa_bp->dev, "DMA map failed");
+ goto release_previous_buffs;
+ }
+#else /* __rtems__ */
+ struct mbuf *m;
+
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (unlikely(m == NULL)) {
+ goto release_previous_buffs;
+ }
+
+ RTEMS_STATIC_ASSERT(DPAA_BP_RAW_SIZE == MCLBYTES,
+ DPAA_BP_RAW_SIZE);
+ *(struct mbuf **)(mtod(m, char *) + DPAA_MBUF_POINTER_OFFSET) =
+ m;
+ addr = mtod(m, dma_addr_t);
+#endif /* __rtems__ */
+
+ bmb[i].data = 0;
+ bm_buffer_set64(&bmb[i], addr);
+ }
+
+release_bufs:
+ return dpaa_bman_release(dpaa_bp, bmb, i);
+
+release_previous_buffs:
+#ifndef __rtems__
+ WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
+#endif /* __rtems__ */
+
+ bm_buffer_set64(&bmb[i], 0);
+ /* Avoid releasing a completely null buffer; bman_release() requires
+ * at least one buffer.
+ */
+ if (likely(i))
+ goto release_bufs;
+
+ return 0;
+}
+
+static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
+{
+ int i;
+
+ /* Give each CPU an allotment of "config_count" buffers */
+ for_each_possible_cpu(i) {
+ int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
+ int j;
+
+ /* Although we access another CPU's counters here
+ * we do it at boot time so it is safe
+ */
+ for (j = 0; j < dpaa_bp->config_count; j += 8)
+ *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
+ }
+ return 0;
+}
+
+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
+{
+ int count = *countptr;
+ int new_bufs;
+
+ if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
+ do {
+ new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
+ if (unlikely(!new_bufs)) {
+ /* Avoid looping forever if we've temporarily
+ * run out of memory. We'll try again at the
+ * next NAPI cycle.
+ */
+ break;
+ }
+ count += new_bufs;
+ } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
+
+ *countptr = count;
+ if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
+{
+ struct dpaa_bp *dpaa_bp;
+ int *countptr;
+ int res, i;
+
+ for (i = 0; i < DPAA_BPS_NUM; i++) {
+ dpaa_bp = priv->dpaa_bps[i];
+ if (!dpaa_bp)
+ return -EINVAL;
+ countptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+#ifndef __rtems__
+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * either contiguous frames or scatter/gather ones.
+ * Skb freeing is not handled here.
+ *
+ * This function may be called on error paths in the Tx function, so guard
+ * against cases when not all fd relevant fields were filled in.
+ *
+ * Return the skb backpointer, since for S/G frames the buffer containing it
+ * gets freed here.
+ */
+static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ struct sk_buff **skbh, *skb;
+ int nr_frags, i;
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+ if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+
+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
+ * it's from lowmem.
+ */
+ sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
+
+ /* sgt[0] is from lowmem, was dma_map_single()-ed */
+ dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
+ qm_sg_entry_get_len(&sgt[0]), dma_dir);
+
+ /* remaining pages were mapped with skb_frag_dma_map() */
+ for (i = 1; i < nr_frags; i++) {
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
+ qm_sg_entry_get_len(&sgt[i]), dma_dir);
+ }
+
+ /* Free the page frag that we allocated on Tx */
+ skb_free_frag(phys_to_virt(addr));
+ } else {
+ dma_unmap_single(dev, addr,
+ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+ }
+
+ return skb;
+}
+
+/* Build a linear skb around the received buffer.
+ * We are guaranteed there is enough room at the end of the data buffer to
+ * accommodate the shared info area of the skb.
+ */
+static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ struct dpaa_bp *dpaa_bp;
+ struct sk_buff *skb;
+ void *vaddr;
+
+ vaddr = phys_to_virt(addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ goto free_buffer;
+
+ skb = build_skb(vaddr, dpaa_bp->size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb)) {
+ WARN_ONCE(1, "Build skb failure on Rx\n");
+ goto free_buffer;
+ }
+ WARN_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, qm_fd_get_length(fd));
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return skb;
+
+free_buffer:
+ skb_free_frag(vaddr);
+ return NULL;
+}
+
+/* Build an skb with the data of the first S/G entry in the linear portion and
+ * the rest of the frame as skb fragments.
+ *
+ * The page fragment holding the S/G Table is recycled here.
+ */
+static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ struct page *page, *head_page;
+ struct dpaa_bp *dpaa_bp;
+ void *vaddr, *sg_vaddr;
+ int frag_off, frag_len;
+ struct sk_buff *skb;
+ dma_addr_t sg_addr;
+ int page_offset;
+ unsigned int sz;
+ int *count_ptr;
+ int i;
+
+ vaddr = phys_to_virt(addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ /* Iterate through the SGT entries and add data buffers to the skb */
+ sgt = vaddr + fd_off;
+ for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
+ /* Extension bit is not supported */
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_vaddr = phys_to_virt(sg_addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
+ SMP_CACHE_BYTES));
+
+ /* We may use multiple Rx pools */
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (!dpaa_bp)
+ goto free_buffers;
+
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
+ DMA_FROM_DEVICE);
+ if (i == 0) {
+ sz = dpaa_bp->size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb = build_skb(sg_vaddr, sz);
+ if (WARN_ON(unlikely(!skb)))
+ goto free_buffers;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Make sure forwarded skbs will have enough space
+ * on Tx, if extra headers are added.
+ */
+ WARN_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
+ } else {
+ /* Not the first S/G entry; all data from buffer will
+ * be added in an skb fragment; fragment index is offset
+ * by one since first S/G entry was incorporated in the
+ * linear part of the skb.
+ *
+ * Caution: 'page' may be a tail page.
+ */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Compute offset in (possibly tail) page */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+ /* page_offset only refers to the beginning of sgt[i];
+ * but the buffer itself may have an internal offset.
+ */
+ frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
+ frag_len = qm_sg_entry_get_len(&sgt[i]);
+ /* skb_add_rx_frag() does no checking on the page; if
+ * we pass it a tail page, we'll end up with
+ * bad page accounting and eventually with segafults.
+ */
+ skb_add_rx_frag(skb, i - 1, head_page, frag_off,
+ frag_len, dpaa_bp->size);
+ }
+ /* Update the pool count for the current {cpu x bpool} */
+ (*count_ptr)--;
+
+ if (qm_sg_entry_is_final(&sgt[i]))
+ break;
+ }
+ WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
+
+ /* free the SG table buffer */
+ skb_free_frag(vaddr);
+
+ return skb;
+
+free_buffers:
+ /* compensate sw bpool counter changes */
+ for (i--; i >= 0; i--) {
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (dpaa_bp) {
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)++;
+ }
+ }
+ /* free all the SG entries */
+ for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_vaddr = phys_to_virt(sg_addr);
+ skb_free_frag(sg_vaddr);
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (dpaa_bp) {
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+ }
+
+ if (qm_sg_entry_is_final(&sgt[i]))
+ break;
+ }
+ /* free the SGT fragment */
+ skb_free_frag(vaddr);
+
+ return NULL;
+}
+
+static int skb_to_contig_fd(struct dpaa_priv *priv,
+ struct sk_buff *skb, struct qm_fd *fd,
+ int *offset)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ enum dma_data_direction dma_dir;
+ unsigned char *buffer_start;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ int err;
+
+ /* We are guaranteed to have at least tx_headroom bytes
+ * available, so just use that for offset.
+ */
+ fd->bpid = FSL_DPAA_BPID_INV;
+ buffer_start = skb->data - priv->tx_headroom;
+ dma_dir = DMA_TO_DEVICE;
+
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpaa_enable_tx_csum(priv, skb, fd,
+ ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+ err);
+ return err;
+ }
+
+ /* Fill in the rest of the FD fields */
+ qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
+
+ /* Map the entire buffer size that may be seen by FMan, but no more */
+ addr = dma_map_single(dev, skbh,
+ skb_tail_pointer(skb) - buffer_start, dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
+ return -EINVAL;
+ }
+ qm_fd_addr_set64(fd, addr);
+
+ return 0;
+}
+
+static int skb_to_sg_fd(struct dpaa_priv *priv,
+ struct sk_buff *skb, struct qm_fd *fd)
+{
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ const int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct qm_sg_entry *sgt;
+ struct sk_buff **skbh;
+ int i, j, err, sz;
+ void *buffer_start;
+ skb_frag_t *frag;
+ dma_addr_t addr;
+ size_t frag_len;
+ void *sgt_buf;
+
+ /* get a page frag to store the SGTable */
+ sz = SKB_DATA_ALIGN(priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags));
+ sgt_buf = netdev_alloc_frag(sz);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
+ sz);
+ return -ENOMEM;
+ }
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpaa_enable_tx_csum(priv, skb, fd,
+ sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+ err);
+ goto csum_failed;
+ }
+
+ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+ qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
+ sgt[0].bpid = FSL_DPAA_BPID_INV;
+ sgt[0].offset = 0;
+ addr = dma_map_single(dev, skb->data,
+ skb_headlen(skb), dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg0_map_failed;
+ }
+ qm_sg_entry_set64(&sgt[0], addr);
+
+ /* populate the rest of SGT entries */
+ frag = &skb_shinfo(skb)->frags[0];
+ frag_len = frag->size;
+ for (i = 1; i <= nr_frags; i++, frag++) {
+ WARN_ON(!skb_frag_page(frag));
+ addr = skb_frag_dma_map(dev, frag, 0,
+ frag_len, dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg_map_failed;
+ }
+
+ qm_sg_entry_set_len(&sgt[i], frag_len);
+ sgt[i].bpid = FSL_DPAA_BPID_INV;
+ sgt[i].offset = 0;
+
+ /* keep the offset in the address */
+ qm_sg_entry_set64(&sgt[i], addr);
+ frag_len = frag->size;
+ }
+ qm_sg_entry_set_f(&sgt[i - 1], frag_len);
+
+ qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
+
+ /* DMA map the SGT page */
+ buffer_start = (void *)sgt - priv->tx_headroom;
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+ addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sgt_map_failed;
+ }
+
+ fd->bpid = FSL_DPAA_BPID_INV;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
+ qm_fd_addr_set64(fd, addr);
+
+ return 0;
+
+sgt_map_failed:
+sg_map_failed:
+ for (j = 0; j < i; j++)
+ dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
+ qm_sg_entry_get_len(&sgt[j]), dma_dir);
+sg0_map_failed:
+csum_failed:
+ skb_free_frag(sgt_buf);
+
+ return err;
+}
+
+static inline int dpaa_xmit(struct dpaa_priv *priv,
+ struct rtnl_link_stats64 *percpu_stats,
+ int queue,
+ struct qm_fd *fd)
+{
+ struct qman_fq *egress_fq;
+ int err, i;
+
+ egress_fq = priv->egress_fqs[queue];
+ if (fd->bpid == FSL_DPAA_BPID_INV)
+ fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
+
+ /* Trace this Tx fd */
+ trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
+
+ for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
+ err = qman_enqueue(egress_fq, fd);
+ if (err != -EBUSY)
+ break;
+ }
+
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ percpu_stats->tx_fifo_errors++;
+ return err;
+ }
+
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += qm_fd_get_length(fd);
+
+ return 0;
+}
+
+static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+{
+ const int queue_mapping = skb_get_queue_mapping(skb);
+ bool nonlinear = skb_is_nonlinear(skb);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ struct dpaa_priv *priv;
+ struct qm_fd fd;
+ int offset = 0;
+ int err = 0;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
+
+ qm_fd_clear_fd(&fd);
+
+ if (!nonlinear) {
+ /* We're going to store the skb backpointer at the beginning
+ * of the data buffer, so we need a privately owned skb
+ *
+ * We've made sure skb is not shared in dev->priv_flags,
+ * we need to verify the skb head is not cloned
+ */
+ if (skb_cow_head(skb, priv->tx_headroom))
+ goto enomem;
+
+ WARN_ON(skb_is_nonlinear(skb));
+ }
+
+ /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
+ * make sure we don't feed FMan with more fragments than it supports.
+ */
+ if (nonlinear &&
+ likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) {
+ /* Just create a S/G fd based on the skb */
+ err = skb_to_sg_fd(priv, skb, &fd);
+ percpu_priv->tx_frag_skbuffs++;
+ } else {
+ /* If the egress skb contains more fragments than we support
+ * we have no choice but to linearize it ourselves.
+ */
+ if (unlikely(nonlinear) && __skb_linearize(skb))
+ goto enomem;
+
+ /* Finally, create a contig FD from this skb */
+ err = skb_to_contig_fd(priv, skb, &fd, &offset);
+ }
+ if (unlikely(err < 0))
+ goto skb_to_fd_failed;
+
+ if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
+ return NETDEV_TX_OK;
+
+ dpaa_cleanup_tx_fd(priv, &fd);
+skb_to_fd_failed:
+enomem:
+ percpu_stats->tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+#endif /* __rtems__ */
+static void dpaa_rx_error(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
#ifndef __rtems__
+ if (net_ratelimit())
+ netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
+ be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
+
percpu_priv->stats.rx_errors++;
#endif /* __rtems__ */
- if (fd->status & FM_FD_ERR_DMA)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
percpu_priv->rx_errors.dme++;
- if (fd->status & FM_FD_ERR_PHYSICAL)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
percpu_priv->rx_errors.fpe++;
- if (fd->status & FM_FD_ERR_SIZE)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
percpu_priv->rx_errors.fse++;
- if (fd->status & FM_FD_ERR_PRS_HDR_ERR)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
percpu_priv->rx_errors.phe++;
- dpa_fd_release(net_dev, fd);
+ dpaa_fd_release(net_dev, fd);
}
-static void _dpa_tx_error(struct net_device *net_dev,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
+static void dpaa_tx_error(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
const struct qm_fd *fd,
u32 fqid)
{
@@ -147,7 +2102,7 @@ static void _dpa_tx_error(struct net_device *net_dev,
if (net_ratelimit())
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_TX_ERRORS);
+ be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
percpu_priv->stats.tx_errors++;
#else /* __rtems__ */
@@ -156,64 +2111,57 @@ static void _dpa_tx_error(struct net_device *net_dev,
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
#endif /* __rtems__ */
- /* If we intended the buffers from this frame to go into the bpools
- * when the FMan transmit was done, we need to put it in manually.
- */
- if (fd->bpid != 0xff) {
- dpa_fd_release(net_dev, fd);
- return;
- }
-
#ifndef __rtems__
- skb = _dpa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd);
dev_kfree_skb(skb);
#else /* __rtems__ */
- _dpa_cleanup_tx_fd(ifp, fd);
+ dpaa_cleanup_tx_fd(ifp, fd);
#endif /* __rtems__ */
}
#ifndef __rtems__
static int dpaa_eth_poll(struct napi_struct *napi, int budget)
{
- struct dpa_napi_portal *np =
- container_of(napi, struct dpa_napi_portal, napi);
+ struct dpaa_napi_portal *np =
+ container_of(napi, struct dpaa_napi_portal, napi);
int cleaned = qman_p_poll_dqrr(np->p, budget);
if (cleaned < budget) {
- int tmp;
+ napi_complete_done(napi, cleaned);
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
- napi_complete(napi);
- tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
- DPA_ERR_ON(tmp);
+ } else if (np->down) {
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
return cleaned;
}
#endif /* __rtems__ */
-static void _dpa_tx_conf(struct net_device *net_dev,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
+static void dpaa_tx_conf(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
const struct qm_fd *fd,
u32 fqid)
{
#ifndef __rtems__
struct sk_buff *skb;
- if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+ if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
if (net_ratelimit())
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_TX_ERRORS);
+ be32_to_cpu(fd->status) &
+ FM_FD_STAT_TX_ERRORS);
percpu_priv->stats.tx_errors++;
}
percpu_priv->tx_confirm++;
- skb = _dpa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd);
- dev_kfree_skb(skb);
+ consume_skb(skb);
#else /* __rtems__ */
struct ifnet *ifp = net_dev->ifp;
@@ -221,159 +2169,366 @@ static void _dpa_tx_conf(struct net_device *net_dev,
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
- _dpa_cleanup_tx_fd(ifp, fd);
+ dpaa_cleanup_tx_fd(ifp, fd);
#endif /* __rtems__ */
}
-static enum qman_cb_dqrr_result
-priv_rx_error_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
+ struct qman_portal *portal)
{
+#ifndef __rtems__
+ if (unlikely(in_irq() || !in_serving_softirq())) {
+ /* Disable QMan IRQ and invoke NAPI */
+ qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
+
+ percpu_priv->np.p = portal;
+ napi_schedule(&percpu_priv->np.napi);
+ percpu_priv->in_interrupt++;
+ return 1;
+ }
+#endif /* __rtems__ */
+ return 0;
+}
+
+static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+ struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
- int *count_ptr;
+ struct dpaa_bp *dpaa_bp;
+ struct dpaa_priv *priv;
- net_dev = ((struct dpa_fq *)fq)->net_dev;
+ net_dev = dpaa_fq->net_dev;
priv = netdev_priv(net_dev);
+ dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
if (dpaa_eth_napi_schedule(percpu_priv, portal))
return qman_cb_dqrr_stop;
- if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
+ if (dpaa_eth_refill_bpools(priv))
/* Unable to refill the buffer pool due to insufficient
* system memory. Just release the frame back into the pool,
* otherwise we'll soon end up with an empty buffer pool.
*/
- dpa_fd_release(net_dev, &dq->fd);
+ dpaa_fd_release(net_dev, &dq->fd);
else
- _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+ dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
}
-static enum qman_cb_dqrr_result
-priv_rx_default_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+#ifdef __rtems__
+static struct mbuf *
+dpaa_bp_addr_to_mbuf(dma_addr_t addr)
+{
+ void *vaddr = phys_to_virt(addr);
+
+ return (*(struct mbuf **)(vaddr + DPAA_MBUF_POINTER_OFFSET));
+}
+
+static struct mbuf *
+contig_fd_to_mbuf(const struct qm_fd *fd, struct ifnet *ifp)
+{
+ struct mbuf *m;
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+
+ m = dpaa_bp_addr_to_mbuf(addr);
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = m->m_len = qm_fd_get_length(fd);
+ m->m_data = mtod(m, char *) + fd_off;
+
+ return (m);
+}
+
+static void
+dpaa_bp_recycle_frag(struct dpaa_bp *dpaa_bp, dma_addr_t addr, int *count_ptr)
+{
+ struct bm_buffer bmb;
+
+ bm_buffer_set64(&bmb, addr);
+
+ while (bman_release(dpaa_bp->pool, &bmb, 1))
+ cpu_relax();
+
+ ++(*count_ptr);
+}
+
+static struct mbuf *
+sg_fd_to_mbuf(struct dpaa_bp *dpaa_bp, const struct qm_fd *fd,
+ struct ifnet *ifp, int *count_ptr)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ int i;
+ int len;
+ struct mbuf *m;
+ struct mbuf *last;
+
+ sgt = (const struct qm_sg_entry *)((char *)phys_to_virt(addr) + fd_off);
+ len = 0;
+
+ for (i = 0; i < DPAA_SGT_MAX_ENTRIES; ++i) {
+ dma_addr_t sg_addr;
+ int sg_len;
+ struct mbuf *n;
+
+ BSD_ASSERT(!qm_sg_entry_is_ext(&sgt[i]));
+ BSD_ASSERT(dpaa_bp == dpaa_bpid2pool(sgt[i].bpid));
+
+ sg_addr = qm_sg_addr(&sgt[i]);
+ n = dpaa_bp_addr_to_mbuf(sg_addr);
+
+ sg_len = qm_sg_entry_get_len(&sgt[i]);
+ len += sg_len;
+
+ if (i == 0) {
+ m = n;
+ } else {
+ last->m_next = n;
+ }
+
+ n->m_len = sg_len;
+ n->m_data = mtod(n, char *) + sgt[i].offset;
+ last = n;
+
+ --(*count_ptr);
+
+ if (qm_sg_entry_is_final(&sgt[i])) {
+ break;
+ }
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = len;
+
+ dpaa_bp_recycle_frag(dpaa_bp, addr, count_ptr);
+
+ return (m);
+}
+
+static void
+dpaa_rx(struct net_device *net_dev, struct qman_portal *portal,
+ const struct dpaa_priv *priv, struct dpaa_percpu_priv *percpu_priv,
+ const struct qm_fd *fd, u32 fqid, int *count_ptr)
{
+ struct dpaa_bp *dpaa_bp;
+ enum qm_fd_format fd_format;
+ struct mbuf *m;
+ struct ifnet *ifp;
+
+ ifp = net_dev->ifp;
+
+ if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ dpaa_fd_release(net_dev, fd);
+ return;
+ }
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ fd_format = qm_fd_get_format(fd);
+
+ if (likely(fd_format == qm_fd_contig)) {
+ m = contig_fd_to_mbuf(fd, ifp);
+ } else {
+ BSD_ASSERT(fd_format == qm_fd_sg);
+ m = sg_fd_to_mbuf(dpaa_bp, fd, ifp, count_ptr);
+ }
+
+ /* Account for either the contig buffer or the SGT buffer (depending on
+ * which case we were in) having been removed from the pool.
+ */
+ (*count_ptr)--;
+
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+ (*ifp->if_input)(ifp, m);
+}
+#endif /* __rtems__ */
+static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+#ifndef __rtems__
+ struct rtnl_link_stats64 *percpu_stats;
+#endif /* __rtems__ */
+ struct dpaa_percpu_priv *percpu_priv;
+#ifndef __rtems__
+ const struct qm_fd *fd = &dq->fd;
+ dma_addr_t addr = qm_fd_addr(fd);
+ enum qm_fd_format fd_format;
+#endif /* __rtems__ */
struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
+#ifndef __rtems__
+ u32 fd_status = fd->status;
+#endif /* __rtems__ */
+ struct dpaa_bp *dpaa_bp;
+ struct dpaa_priv *priv;
+#ifndef __rtems__
+ unsigned int skb_len;
+ struct sk_buff *skb;
+#endif /* __rtems__ */
int *count_ptr;
- struct dpa_bp *dpa_bp;
- net_dev = ((struct dpa_fq *)fq)->net_dev;
+#ifndef __rtems__
+ fd_status = be32_to_cpu(fd->status);
+ fd_format = qm_fd_get_format(fd);
+#endif /* __rtems__ */
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
- dpa_bp = priv->dpa_bp;
+ dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
#ifndef __rtems__
/* Trace the Rx fd */
- trace_dpa_rx_fd(net_dev, fq, &dq->fd);
+ trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
#endif /* __rtems__ */
- /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+#ifndef __rtems__
+ percpu_stats = &percpu_priv->stats;
+#endif /* __rtems__ */
if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
return qman_cb_dqrr_stop;
- /* Vale of plenty: make sure we didn't run out of buffers */
-
- if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
+ /* Make sure we didn't run out of buffers */
+ if (unlikely(dpaa_eth_refill_bpools(priv))) {
#ifdef __rtems__
- {
struct ifnet *ifp = net_dev->ifp;
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
#endif /* __rtems__ */
- /* Unable to refill the buffer pool due to insufficient
- * system memory. Just release the frame back into the pool,
- * otherwise we'll soon end up with an empty buffer pool.
- */
- dpa_fd_release(net_dev, &dq->fd);
-#ifdef __rtems__
+ dpaa_fd_release(net_dev, &dq->fd);
+ return qman_cb_dqrr_consume;
}
-#endif /* __rtems__ */
+
+#ifndef __rtems__
+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ fd_status & FM_FD_STAT_RX_ERRORS);
+
+ percpu_stats->rx_errors++;
+ dpaa_fd_release(net_dev, fd);
+ return qman_cb_dqrr_consume;
+ }
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
+
+ dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
+
+ /* prefetch the first 64 bytes of the frame or the SGT start */
+ prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd));
+
+ fd_format = qm_fd_get_format(fd);
+ /* The only FD types that we may receive are contig and S/G */
+ WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
+
+ /* Account for either the contig buffer or the SGT buffer (depending on
+ * which case we were in) having been removed from the pool.
+ */
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+
+ if (likely(fd_format == qm_fd_contig))
+ skb = contig_fd_to_skb(priv, fd);
+ dpa_fd_release(net_dev, &dq->fd);
else
- _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
- count_ptr);
+ skb = sg_fd_to_skb(priv, fd);
+ if (!skb)
+ return qman_cb_dqrr_consume;
+
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ skb_len = skb->len;
+
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ return qman_cb_dqrr_consume;
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += skb_len;
+#else /* __rtems__ */
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ dpaa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
+ count_ptr);
+#endif /* __rtems__ */
return qman_cb_dqrr_consume;
}
-static enum qman_cb_dqrr_result
-priv_tx_conf_error_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
{
+ struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
+ struct dpaa_priv *priv;
- net_dev = ((struct dpa_fq *)fq)->net_dev;
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
if (dpaa_eth_napi_schedule(percpu_priv, portal))
return qman_cb_dqrr_stop;
- _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+ dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
}
-static enum qman_cb_dqrr_result
-priv_tx_conf_default_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
{
+ struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
+ struct dpaa_priv *priv;
- net_dev = ((struct dpa_fq *)fq)->net_dev;
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
#ifndef __rtems__
/* Trace the fd */
- trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
+ trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
#endif /* __rtems__ */
- /* Non-migratable context, safe to use raw_cpu_ptr */
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
if (dpaa_eth_napi_schedule(percpu_priv, portal))
return qman_cb_dqrr_stop;
- _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+ dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
}
-static void priv_ern(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_mr_entry *msg)
+static void egress_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const union qm_mr_entry *msg)
{
+ const struct qm_fd *fd = &msg->ern.fd;
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct dpaa_priv *priv;
struct net_device *net_dev;
- const struct dpa_priv_s *priv;
#ifndef __rtems__
struct sk_buff *skb;
#else /* __rtems__ */
struct ifnet *ifp;
#endif /* __rtems__ */
- struct dpa_percpu_priv_s *percpu_priv;
- const struct qm_fd *fd = &msg->ern.fd;
- net_dev = ((struct dpa_fq *)fq)->net_dev;
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
- /* Non-migratable context, safe to use raw_cpu_ptr */
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
#ifndef __rtems__
percpu_priv->stats.tx_dropped++;
@@ -384,97 +2539,115 @@ static void priv_ern(struct qman_portal *portal,
#endif /* __rtems__ */
count_ern(percpu_priv, msg);
- /* If we intended this buffer to go into the pool
- * when the FM was done, we need to put it in
- * manually.
- */
- if (msg->ern.fd.bpid != 0xff) {
- dpa_fd_release(net_dev, fd);
- return;
- }
-
#ifndef __rtems__
- skb = _dpa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd);
dev_kfree_skb_any(skb);
#else /* __rtems__ */
- _dpa_cleanup_tx_fd(ifp, fd);
+ dpaa_cleanup_tx_fd(ifp, fd);
#endif /* __rtems__ */
}
-static const struct dpa_fq_cbs_t private_fq_cbs = {
- .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
- .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
- .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
- .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
- .egress_ern = { .cb = { .ern = priv_ern } }
+static const struct dpaa_fq_cbs dpaa_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
+ .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
+ .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
+ .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
+ .egress_ern = { .cb = { .ern = egress_ern } }
};
-static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
+static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
{
#ifndef __rtems__
- struct dpa_percpu_priv_s *percpu_priv;
- int i, j;
+ struct dpaa_percpu_priv *percpu_priv;
+ int i;
for_each_possible_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- for (j = 0; j < qman_portal_max; j++)
- napi_enable(&percpu_priv->np[j].napi);
+ percpu_priv->np.down = 0;
+ napi_enable(&percpu_priv->np.napi);
}
#endif /* __rtems__ */
}
-static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
+static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
{
#ifndef __rtems__
- struct dpa_percpu_priv_s *percpu_priv;
- int i, j;
+ struct dpaa_percpu_priv *percpu_priv;
+ int i;
for_each_possible_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- for (j = 0; j < qman_portal_max; j++)
- napi_disable(&percpu_priv->np[j].napi);
+ percpu_priv->np.down = 1;
+ napi_disable(&percpu_priv->np.napi);
}
#endif /* __rtems__ */
}
#ifndef __rtems__
-static int dpa_eth_priv_start(struct net_device *net_dev)
+static int dpaa_open(struct net_device *net_dev)
#else /* __rtems__ */
int dpa_eth_priv_start(struct net_device *net_dev)
#endif /* __rtems__ */
{
- int err;
- struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+ int err, i;
priv = netdev_priv(net_dev);
-
+ mac_dev = priv->mac_dev;
dpaa_eth_napi_enable(priv);
- err = dpa_start(net_dev);
- if (err < 0)
- dpaa_eth_napi_disable(priv);
+#ifndef __rtems__
+ net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
+ if (!net_dev->phydev) {
+ netif_err(priv, ifup, net_dev, "init_phy() failed\n");
+ err = -ENODEV;
+ goto phy_init_failed;
+ }
+#endif /* __rtems__ */
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ err = fman_port_enable(mac_dev->port[i]);
+ if (err)
+ goto mac_start_failed;
+ }
+
+ err = priv->mac_dev->start(mac_dev);
+ if (err < 0) {
+ netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+ goto mac_start_failed;
+ }
+
+#ifndef __rtems__
+ netif_tx_start_all_queues(net_dev);
+#endif /* __rtems__ */
+
+ return 0;
+
+mac_start_failed:
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
+ fman_port_disable(mac_dev->port[i]);
+
+#ifndef __rtems__
+phy_init_failed:
+#endif /* __rtems__ */
+ dpaa_eth_napi_disable(priv);
return err;
}
#ifndef __rtems__
-static int dpa_eth_priv_stop(struct net_device *net_dev)
+static int dpaa_eth_stop(struct net_device *net_dev)
#else /* __rtems__ */
int dpa_eth_priv_stop(struct net_device *net_dev)
#endif /* __rtems__ */
{
+ struct dpaa_priv *priv;
int err;
- struct dpa_priv_s *priv;
- err = dpa_stop(net_dev);
- /* Allow NAPI to consume any frame still in the Rx/TxConfirm
- * ingress queues. This is to avoid a race between the current
- * context and ksoftirqd which could leave NAPI disabled while
- * in fact there's still Rx traffic to be processed.
- */
- usleep_range(5000, 10000);
+ err = dpaa_stop(net_dev);
priv = netdev_priv(net_dev);
dpaa_eth_napi_disable(priv);
@@ -483,131 +2656,89 @@ int dpa_eth_priv_stop(struct net_device *net_dev)
}
#ifndef __rtems__
-static const struct net_device_ops dpa_private_ops = {
- .ndo_open = dpa_eth_priv_start,
- .ndo_start_xmit = dpa_tx,
- .ndo_stop = dpa_eth_priv_stop,
- .ndo_tx_timeout = dpa_timeout,
- .ndo_get_stats64 = dpa_get_stats64,
- .ndo_set_mac_address = dpa_set_mac_address,
+static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
+{
+ if (!net_dev->phydev)
+ return -EINVAL;
+ return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+}
+
+static const struct net_device_ops dpaa_ops = {
+ .ndo_open = dpaa_open,
+ .ndo_start_xmit = dpaa_start_xmit,
+ .ndo_stop = dpaa_eth_stop,
+ .ndo_tx_timeout = dpaa_tx_timeout,
+ .ndo_get_stats64 = dpaa_get_stats64,
+ .ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
-#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- .ndo_select_queue = dpa_select_queue,
-#endif
- .ndo_change_mtu = dpa_change_mtu,
- .ndo_set_rx_mode = dpa_set_rx_mode,
- .ndo_init = dpa_ndo_init,
- .ndo_set_features = dpa_set_features,
- .ndo_fix_features = dpa_fix_features,
+ .ndo_set_rx_mode = dpaa_set_rx_mode,
+ .ndo_do_ioctl = dpaa_ioctl,
};
-#endif /* __rtems__ */
-static int dpa_private_napi_add(struct net_device *net_dev)
+static int dpaa_napi_add(struct net_device *net_dev)
{
-#ifndef __rtems__
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct dpa_percpu_priv_s *percpu_priv;
- int i, cpu;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ int cpu;
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
- qman_portal_max * sizeof(struct dpa_napi_portal),
- GFP_KERNEL);
-
- if (!percpu_priv->np)
- return -ENOMEM;
-
- for (i = 0; i < qman_portal_max; i++)
- netif_napi_add(net_dev, &percpu_priv->np[i].napi,
- dpaa_eth_poll, DPA_NAPI_WEIGHT);
+ netif_napi_add(net_dev, &percpu_priv->np.napi,
+ dpaa_eth_poll, NAPI_POLL_WEIGHT);
}
-#endif /* __rtems__ */
return 0;
}
+#endif /* __rtems__ */
-void dpa_private_napi_del(struct net_device *net_dev)
+static void dpaa_napi_del(struct net_device *net_dev)
{
#ifndef __rtems__
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct dpa_percpu_priv_s *percpu_priv;
- int i, cpu;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ int cpu;
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- if (percpu_priv->np) {
- for (i = 0; i < qman_portal_max; i++)
- netif_napi_del(&percpu_priv->np[i].napi);
-
- devm_kfree(net_dev->dev.parent, percpu_priv->np);
- }
+ netif_napi_del(&percpu_priv->np.napi);
}
#endif /* __rtems__ */
}
-static int dpa_private_netdev_init(struct net_device *net_dev)
+static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
+ struct bm_buffer *bmb)
{
- int i;
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct dpa_percpu_priv_s *percpu_priv;
-#ifndef __rtems__
- const u8 *mac_addr;
-#endif /* __rtems__ */
+ dma_addr_t addr = bm_buf_addr(bmb);
- /* Although we access another CPU's private data here
- * we do it at initialization so it is safe
- */
#ifndef __rtems__
- for_each_possible_cpu(i) {
-#else /* __rtems__ */
- for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
-#endif /* __rtems__ */
- percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- percpu_priv->net_dev = net_dev;
- }
-
-#ifndef __rtems__
- net_dev->netdev_ops = &dpa_private_ops;
- mac_addr = priv->mac_dev->addr;
+ dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
- net_dev->mem_start = priv->mac_dev->res->start;
- net_dev->mem_end = priv->mac_dev->res->end;
-
- net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_LLTX);
-
- /* Advertise S/G and HIGHDMA support for private interfaces */
- net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
- /* Recent kernels enable GSO automatically, if
- * we declare NETIF_F_SG. For conformity, we'll
- * still declare GSO explicitly.
- */
- net_dev->features |= NETIF_F_GSO;
-
- return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
+ skb_free_frag(phys_to_virt(addr));
#else /* __rtems__ */
- return 0;
+ BSD_ASSERT(0);
+ m_freem(dpaa_bp_addr_to_mbuf(addr));
#endif /* __rtems__ */
}
-static struct dpa_bp *dpa_priv_bp_probe(struct device *dev)
+/* Alloc the dpaa_bp struct and configure default values */
+static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
{
- struct dpa_bp *dpa_bp;
+ struct dpaa_bp *dpaa_bp;
- dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
- if (!dpa_bp)
+ dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
+ if (!dpaa_bp)
return ERR_PTR(-ENOMEM);
- dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
- dpa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
+ dpaa_bp->bpid = FSL_DPAA_BPID_INV;
+ dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
+ dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
- dpa_bp->seed_cb = dpa_bp_priv_seed;
- dpa_bp->free_buf_cb = _dpa_bp_free_pf;
+ dpaa_bp->seed_cb = dpaa_bp_seed;
+ dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
- return dpa_bp;
+ return dpaa_bp;
}
/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
@@ -615,7 +2746,7 @@ static struct dpa_bp *dpa_priv_bp_probe(struct device *dev)
* this CGR to generate enqueue rejections to FMan in order to drop the frames
* before they reach our ingress queues and eat up memory.
*/
-static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
+static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
{
struct qm_mcc_initcgr initcgr;
u32 cs_th;
@@ -623,112 +2754,95 @@ static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
if (err < 0) {
- pr_err("Error %d allocating CGR ID\n", err);
+ if (netif_msg_drv(priv))
+ pr_err("Error %d allocating CGR ID\n", err);
goto out_error;
}
/* Enable CS TD, but disable Congestion State Change Notifications. */
- initcgr.we_mask = QM_CGR_WE_CS_THRES;
+ memset(&initcgr, 0, sizeof(initcgr));
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
cs_th = DPAA_INGRESS_CS_THRESHOLD;
qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
initcgr.cgr.cstd_en = QM_CGR_EN;
- /* This is actually a hack, because this CGR will be associated with
- * our affine SWP. However, we'll place our ingress FQs in it.
+ /* This CGR will be associated with the SWP affined to the current CPU.
+ * However, we'll place all our ingress FQs in it.
*/
err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
&initcgr);
if (err < 0) {
- pr_err("Error %d creating ingress CGR with ID %d\n", err,
- priv->ingress_cgr.cgrid);
+ if (netif_msg_drv(priv))
+ pr_err("Error %d creating ingress CGR with ID %d\n",
+ err, priv->ingress_cgr.cgrid);
qman_release_cgrid(priv->ingress_cgr.cgrid);
goto out_error;
}
- pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
- priv->ingress_cgr.cgrid, priv->mac_dev->addr);
+ if (netif_msg_drv(priv))
+ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
+ priv->ingress_cgr.cgrid, priv->mac_dev->addr);
- /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
- * range), but we have no common initialization path between the
- * different variants of the DPAA Eth driver, so we do it here rather
- * than modifying every other variant than "private Eth".
- */
priv->use_ingress_cgr = true;
out_error:
return err;
}
-static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
- size_t count)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- int i;
-
- netif_dbg(priv, probe, net_dev,
- "Using private BM buffer pools\n");
-
- priv->bp_count = count;
-
- for (i = 0; i < count; i++) {
- int err;
-
- err = dpa_bp_alloc(&dpa_bp[i]);
- if (err < 0) {
- dpa_bp_free(priv);
- priv->dpa_bp = NULL;
- return err;
- }
+#ifndef __rtems__
+static const struct of_device_id dpaa_match[];
+#endif /* __rtems__ */
- priv->dpa_bp = &dpa_bp[i];
- }
+static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
+{
+ u16 headroom;
+
+ /* The frame headroom must accommodate:
+ * - the driver private data area
+ * - parse results, hash results, timestamp if selected
+ * If either hash results or time stamp are selected, both will
+ * be copied to/from the frame headroom, as TS is located between PR and
+ * HR in the IC and IC copy size has a granularity of 16bytes
+ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
+ *
+ * Also make sure the headroom is a multiple of data_align bytes
+ */
+ headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
+ DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
- dpa_priv_common_bpid = priv->dpa_bp->bpid;
- return 0;
+ return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
+ DPAA_FD_DATA_ALIGNMENT) :
+ headroom;
}
#ifndef __rtems__
-static const struct of_device_id dpa_match[];
-
-static int
-dpaa_eth_priv_probe(struct platform_device *pdev)
+static int dpaa_eth_probe(struct platform_device *pdev)
#else /* __rtems__ */
int
dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
#endif /* __rtems__ */
{
- int err = 0, i, channel;
- struct device *dev;
- struct dpa_bp *dpa_bp;
- struct dpa_fq *dpa_fq, *tmp;
- size_t count = 1;
+ struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
+ struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev = NULL;
- struct dpa_priv_s *priv = NULL;
- struct dpa_percpu_priv_s *percpu_priv;
+ struct dpaa_fq *dpaa_fq, *tmp;
+ struct dpaa_priv *priv = NULL;
struct fm_port_fqs port_fqs;
- struct dpa_buffer_layout_s *buf_layout = NULL;
#ifndef __rtems__
struct mac_device *mac_dev;
- struct task_struct *kth;
#endif /* __rtems__ */
+ int err = 0, i, channel;
+ struct device *dev;
dev = &pdev->dev;
- /* Get the buffer pool assigned to this interface;
- * run only once the default pool probing code
- */
- dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
- dpa_priv_bp_probe(dev);
- if (IS_ERR(dpa_bp))
- return PTR_ERR(dpa_bp);
-
#ifndef __rtems__
/* Allocate this early, so we can store relevant information in
* the private area
*/
- net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
goto alloc_etherdev_mq_failed;
@@ -738,12 +2852,6 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
net_dev->priv = malloc(sizeof(*priv), M_KMALLOC, M_WAITOK | M_ZERO);
#endif /* __rtems__ */
-#ifdef CONFIG_FSL_DPAA_ETH_FRIENDLY_IF_NAME
- snprintf(net_dev->name, IFNAMSIZ, "fm%d-mac%d",
- dpa_mac_fman_index_get(pdev),
- dpa_mac_hw_index_get(pdev));
-#endif
-
/* Do this here, so we can be verbose early */
#ifndef __rtems__
SET_NETDEV_DEV(net_dev, dev);
@@ -754,77 +2862,90 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
priv->net_dev = net_dev;
#ifndef __rtems__
- priv->msg_enable = netif_msg_init(debug, -1);
+ priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
- mac_dev = dpa_mac_dev_get(pdev);
- if (IS_ERR(mac_dev) || !mac_dev) {
+ mac_dev = dpaa_mac_dev_get(pdev);
+ if (IS_ERR(mac_dev)) {
+ dev_err(dev, "dpaa_mac_dev_get() failed\n");
err = PTR_ERR(mac_dev);
goto mac_probe_failed;
}
-#endif /* __rtems__ */
- /* We have physical ports, so we need to establish
- * the buffer layout.
+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
+ * we choose conservatively and let the user explicitly set a higher
+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
+ * in the same LAN.
+ * If on the other hand fsl_fm_max_frm has been chosen below 1500,
+ * start with the maximum allowed.
*/
- buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
- GFP_KERNEL);
- if (!buf_layout)
- goto alloc_failed;
+ net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
- dpa_set_buffers_layout(mac_dev, buf_layout);
+ netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
+ net_dev->mtu);
+#endif /* __rtems__ */
- /* For private ports, need to compute the size of the default
- * buffer pool, based on FMan port buffer layout;also update
- * the maximum buffer size for private ports if necessary
- */
- dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
+ priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
+ priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
- INIT_LIST_HEAD(&priv->dpa_fq_list);
+#ifndef __rtems__
+ /* device used for DMA mapping */
+ arch_setup_dma_ops(dev, 0, 0, NULL, false);
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err) {
+ dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
+ goto dev_mask_failed;
+ }
+#endif /* __rtems__ */
- memset(&port_fqs, 0, sizeof(port_fqs));
+ /* bp init */
+ for (i = 0; i < DPAA_BPS_NUM; i++) {
+ int err;
- err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
- if (!err)
- err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
- &port_fqs, true, TX);
+ dpaa_bps[i] = dpaa_bp_alloc(dev);
+ if (IS_ERR(dpaa_bps[i]))
+ return PTR_ERR(dpaa_bps[i]);
+ /* the raw size of the buffers used for reception */
+ dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
+ /* avoid runtime computations by keeping the usable size here */
+ dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
+ dpaa_bps[i]->dev = dev;
- if (err < 0)
- goto fq_probe_failed;
+ err = dpaa_bp_alloc_pool(dpaa_bps[i]);
+ if (err < 0) {
+ dpaa_bps_free(priv);
+ priv->dpaa_bps[i] = NULL;
+ goto bp_create_failed;
+ }
+ priv->dpaa_bps[i] = dpaa_bps[i];
+ }
- /* bp init */
+ INIT_LIST_HEAD(&priv->dpaa_fq_list);
- err = dpa_priv_bp_create(net_dev, dpa_bp, count);
+ memset(&port_fqs, 0, sizeof(port_fqs));
- if (err < 0)
- goto bp_create_failed;
+ err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
+ if (err < 0) {
+ dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
+ goto fq_probe_failed;
+ }
priv->mac_dev = mac_dev;
- channel = dpa_get_channel();
-
+ channel = dpaa_get_channel();
if (channel < 0) {
+ dev_err(dev, "dpaa_get_channel() failed\n");
err = channel;
goto get_channel_failed;
}
priv->channel = (u16)channel;
-#ifndef __rtems__
- /* Start a thread that will walk the cpus with affine portals
+ /* Start a thread that will walk the CPUs with affine portals
* and add this pool channel to each's dequeue mask.
*/
- kth = kthread_run(dpaa_eth_add_channel,
- (void *)(unsigned long)priv->channel,
- "dpaa_%p:%d", net_dev, priv->channel);
- if (!kth) {
- err = -ENOMEM;
- goto add_channel_failed;
- }
-#else /* __rtems__ */
- dpaa_eth_add_channel((void *)(unsigned long)priv->channel);
-#endif /* __rtems__ */
+ dpaa_eth_add_channel(priv->channel);
- dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port[TX]);
+ dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
/* Create a congestion group for this netdev, with
* dynamically-allocated CGR ID.
@@ -836,29 +2957,28 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
dev_err(dev, "Error initializing CGR\n");
goto tx_cgr_init_failed;
}
- err = dpaa_eth_priv_ingress_cgr_init(priv);
+
+ err = dpaa_ingress_cgr_init(priv);
if (err < 0) {
dev_err(dev, "Error initializing ingress CGR\n");
goto rx_cgr_init_failed;
}
/* Add the FQs to the interface, and make them active */
- list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
- err = dpa_fq_init(dpa_fq, false);
+ list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
+ err = dpaa_fq_init(dpaa_fq, false);
if (err < 0)
goto fq_alloc_failed;
}
- priv->buf_layout = buf_layout;
- priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
- priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
+ priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
+ priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
/* All real interfaces need their ports initialized */
- dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
- buf_layout, dev);
+ dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+ &priv->buf_layout[0], dev);
priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
-
if (!priv->percpu_priv) {
dev_err(dev, "devm_alloc_percpu() failed\n");
err = -ENOMEM;
@@ -873,31 +2993,32 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
memset(percpu_priv, 0, sizeof(*percpu_priv));
}
+#ifndef __rtems__
/* Initialize NAPI */
- err = dpa_private_napi_add(net_dev);
-
+ err = dpaa_napi_add(net_dev);
if (err < 0)
goto napi_add_failed;
- err = dpa_private_netdev_init(net_dev);
-
+ err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
if (err < 0)
goto netdev_init_failed;
-#ifndef __rtems__
dpaa_eth_sysfs_init(&net_dev->dev);
- pr_info("Probed interface %s\n", net_dev->name);
+ netif_info(priv, probe, net_dev, "Probed interface %s\n",
+ net_dev->name);
#endif /* __rtems__ */
return 0;
+#ifndef __rtems__
netdev_init_failed:
napi_add_failed:
- dpa_private_napi_del(net_dev);
+#endif /* __rtems__ */
+ dpaa_napi_del(net_dev);
alloc_percpu_failed:
#ifndef __rtems__
- dpa_fq_free(dev, &priv->dpa_fq_list);
+ dpaa_fq_free(dev, &priv->dpaa_fq_list);
#endif /* __rtems__ */
fq_alloc_failed:
#ifndef __rtems__
@@ -910,79 +3031,111 @@ rx_cgr_init_failed:
qman_release_cgrid(priv->cgr_data.cgr.cgrid);
#endif /* __rtems__ */
tx_cgr_init_failed:
-#ifndef __rtems__
-add_channel_failed:
-#endif /* __rtems__ */
get_channel_failed:
- dpa_bp_free(priv);
+ dpaa_bps_free(priv);
bp_create_failed:
fq_probe_failed:
-alloc_failed:
#ifndef __rtems__
+dev_mask_failed:
mac_probe_failed:
#endif /* __rtems__ */
dev_set_drvdata(dev, NULL);
#ifndef __rtems__
free_netdev(net_dev);
alloc_etherdev_mq_failed:
- if (atomic_read(&dpa_bp->refs) == 0)
- devm_kfree(dev, dpa_bp);
+ for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) {
+ if (atomic_read(&dpaa_bps[i]->refs) == 0)
+ devm_kfree(dev, dpaa_bps[i]);
+ }
#else /* __rtems__ */
BSD_ASSERT(0);
#endif /* __rtems__ */
+ return err;
+}
+
+#ifndef __rtems__
+static int dpaa_remove(struct platform_device *pdev)
+{
+ struct net_device *net_dev;
+ struct dpaa_priv *priv;
+ struct device *dev;
+ int err;
+
+ dev = &pdev->dev;
+ net_dev = dev_get_drvdata(dev);
+
+ priv = netdev_priv(net_dev);
+
+ dpaa_eth_sysfs_remove(dev);
+
+ dev_set_drvdata(dev, NULL);
+ unregister_netdev(net_dev);
+
+ err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
+
+ qman_delete_cgr_safe(&priv->ingress_cgr);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+
+ dpaa_napi_del(net_dev);
+
+ dpaa_bps_free(priv);
+
+ free_netdev(net_dev);
return err;
}
+#endif /* __rtems__ */
#ifndef __rtems__
-static struct platform_device_id dpa_devtype[] = {
+static struct platform_device_id dpaa_devtype[] = {
{
.name = "dpaa-ethernet",
.driver_data = 0,
}, {
}
};
-MODULE_DEVICE_TABLE(platform, dpa_devtype);
+MODULE_DEVICE_TABLE(platform, dpaa_devtype);
-static struct platform_driver dpa_driver = {
+static struct platform_driver dpaa_driver = {
.driver = {
.name = KBUILD_MODNAME,
},
- .id_table = dpa_devtype,
- .probe = dpaa_eth_priv_probe,
- .remove = dpa_remove
+ .id_table = dpaa_devtype,
+ .probe = dpaa_eth_probe,
+ .remove = dpaa_remove
};
-static int __init dpa_load(void)
+static int __init dpaa_load(void)
{
int err;
- pr_info(DPA_DESCRIPTION "\n");
+ pr_debug("FSL DPAA Ethernet driver\n");
- /* initialise dpaa_eth mirror values */
- dpa_rx_extra_headroom = fman_get_rx_extra_headroom();
- dpa_max_frm = fman_get_max_frm();
+ /* initialize dpaa_eth mirror values */
+ dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
+ dpaa_max_frm = fman_get_max_frm();
- err = platform_driver_register(&dpa_driver);
+ err = platform_driver_register(&dpaa_driver);
if (err < 0)
pr_err("Error, platform_driver_register() = %d\n", err);
return err;
}
-module_init(dpa_load);
+module_init(dpaa_load);
-static void __exit dpa_unload(void)
+static void __exit dpaa_unload(void)
{
- platform_driver_unregister(&dpa_driver);
+ platform_driver_unregister(&dpaa_driver);
- /* Only one channel is used and needs to be relased after all
+ /* Only one channel is used and needs to be released after all
* interfaces are removed
*/
- dpa_release_channel();
+ dpaa_release_channel();
}
-module_exit(dpa_unload);
+module_exit(dpaa_unload);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
-MODULE_DESCRIPTION(DPA_DESCRIPTION);
+MODULE_DESCRIPTION("FSL DPAA Ethernet driver");
#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index 11b11e65..8a2b1189 100644
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -1,4 +1,4 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor Inc.
+/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -28,131 +28,36 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __DPA_H
-#define __DPA_H
+#ifndef __DPAA_H
+#define __DPAA_H
#include <linux/netdevice.h>
#include <soc/fsl/qman.h>
+#include <soc/fsl/bman.h>
#include "fman.h"
#include "mac.h"
#include "dpaa_eth_trace.h"
-#ifndef __rtems__
-extern int dpa_rx_extra_headroom;
-extern int dpa_max_frm;
-
-#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
-#define dpa_get_max_frm() dpa_max_frm
-#else /* __rtems__ */
-#define dpa_get_rx_extra_headroom fman_get_rx_extra_headroom
-#define dpa_get_max_frm fman_get_max_frm
-#endif /* __rtems__ */
-
-#define dpa_get_max_mtu() \
- (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
-
-/* Simple enum of FQ types - used for array indexing */
-enum port_type {RX, TX};
-
-struct dpa_buffer_layout_s {
- u16 priv_data_size;
- bool parse_results;
- bool time_stamp;
- bool hash_results;
- u16 data_align;
-};
-
-#define DPA_ERR_ON(cond)
-
-#define DPA_TX_PRIV_DATA_SIZE 16
-#define DPA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
-#define DPA_TIME_STAMP_SIZE 8
-#define DPA_HASH_RESULTS_SIZE 8
-#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
- dpa_get_rx_extra_headroom())
-
-#define FM_FD_STAT_RX_ERRORS \
- (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
- FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
- FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
- FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
- FM_FD_ERR_PRS_HDR_ERR)
+#define DPAA_ETH_TXQ_NUM NR_CPUS
-#define FM_FD_STAT_TX_ERRORS \
- (FM_FD_ERR_UNSUPPORTED_FORMAT | \
- FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
-
-/* The raw buffer size must be cacheline aligned.
- * Normally we use 2K buffers.
- */
-#define DPA_BP_RAW_SIZE 2048
-
-/* This is what FMan is ever allowed to use.
- * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
- * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
- * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
- * half-page-aligned buffers (can we?), so we reserve some more space
- * for start-of-buffer alignment.
- */
#ifndef __rtems__
-#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
- SMP_CACHE_BYTES)
+#define DPAA_BPS_NUM 3 /* number of bpools per interface */
#else /* __rtems__ */
-/*
- * FIXME: 4 bytes would be enough for the mbuf pointer. However, jumbo receive
- * frames overwrite this area if < 64 bytes.
- */
-#define DPA_OUT_OF_BAND_SIZE 64
-#define DPA_MBUF_POINTER_OFFSET (DPA_BP_RAW_SIZE - DPA_OUT_OF_BAND_SIZE)
-#define dpa_bp_size(buffer_layout) DPA_MBUF_POINTER_OFFSET
+#define DPAA_BPS_NUM 1
#endif /* __rtems__ */
-/* We must ensure that skb_shinfo is always cacheline-aligned. */
-#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
-
-/* Largest value that the FQD's OAL field can hold.
- * This is DPAA-1.x specific.
- */
-#define FSL_QMAN_MAX_OAL 127
-
-/* Default alignment for start of data in an Rx FD */
-#define DPA_FD_DATA_ALIGNMENT 16
-
-/* Values for the L3R field of the FM Parse Results
- */
-/* L3 Type field: First IP Present IPv4 */
-#define FM_L3_PARSE_RESULT_IPV4 0x8000
-/* L3 Type field: First IP Present IPv6 */
-#define FM_L3_PARSE_RESULT_IPV6 0x4000
-
-/* Values for the L4R field of the FM Parse Results
- * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
- */
-/* L4 Type field: UDP */
-#define FM_L4_PARSE_RESULT_UDP 0x40
-/* L4 Type field: TCP */
-#define FM_L4_PARSE_RESULT_TCP 0x20
-
-/* number of Tx queues to FMan */
-#define DPAA_ETH_TX_QUEUES NR_CPUS
-
-#define DPAA_ETH_RX_QUEUES 128
-
-#define FSL_DPAA_ETH_MAX_BUF_COUNT 128
-#define FSL_DPAA_ETH_REFILL_THRESHOLD 80
/* More detailed FQ types - used for fine-grained WQ assignments */
-enum dpa_fq_type {
+enum dpaa_fq_type {
FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
FQ_TYPE_RX_ERROR, /* Rx Error FQs */
- FQ_TYPE_RX_PCD, /* User-defined PCDs */
FQ_TYPE_TX, /* "Real" Tx FQs */
FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
};
-struct dpa_fq {
+struct dpaa_fq {
struct qman_fq fq_base;
struct list_head list;
struct net_device *net_dev;
@@ -161,10 +66,10 @@ struct dpa_fq {
u32 flags;
u16 channel;
u8 wq;
- enum dpa_fq_type fq_type;
+ enum dpaa_fq_type fq_type;
};
-struct dpa_fq_cbs_t {
+struct dpaa_fq_cbs {
struct qman_fq rx_defq;
struct qman_fq tx_defq;
struct qman_fq rx_errq;
@@ -172,45 +77,30 @@ struct dpa_fq_cbs_t {
struct qman_fq egress_ern;
};
-struct fqid_cell {
- u32 start;
- u32 count;
-};
-
-struct dpa_bp {
- struct bman_pool *pool;
- u8 bpid;
-#ifndef __rtems__
+struct dpaa_bp {
+ /* device used in the DMA mapping operations */
struct device *dev;
-#endif /* __rtems__ */
- /* the buffer pools used for the private ports are initialized
- * with config_count buffers for each CPU; at runtime the
- * number of buffers per CPU is constantly brought back to this
- * level
- */
- int config_count;
+ /* current number of buffers in the buffer pool alloted to each CPU */
+ int __percpu *percpu_count;
+ /* all buffers allocated for this pool have this raw size */
+ size_t raw_size;
+ /* all buffers in this pool have this same usable size */
size_t size;
- bool seed_pool;
- /* physical address of the contiguous memory used by the pool to store
- * the buffers
- */
- dma_addr_t paddr;
- /* virtual address of the contiguous memory used by the pool to store
- * the buffers
+ /* the buffer pools are initialized with config_count buffers for each
+ * CPU; at runtime the number of buffers per CPU is constantly brought
+ * back to this level
*/
- void __iomem *vaddr;
- /* current number of buffers in the bpool alloted to this CPU */
- int __percpu *percpu_count;
+ u16 config_count;
+ u8 bpid;
+ struct bman_pool *pool;
+ /* bpool can be seeded before use by this cb */
+ int (*seed_cb)(struct dpaa_bp *);
+ /* bpool can be emptied before freeing by this cb */
+ void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
atomic_t refs;
- /* some bpools need to be seeded before use by this cb */
- int (*seed_cb)(struct dpa_bp *);
- /* some bpools need to be emptied before freeing; this cb is used
- * for freeing of individual buffers taken from the pool
- */
- void (*free_buf_cb)(void *addr);
};
-struct dpa_rx_errors {
+struct dpaa_rx_errors {
u64 dme; /* DMA Error */
u64 fpe; /* Frame Physical Error */
u64 fse; /* Frame Size Error */
@@ -218,7 +108,7 @@ struct dpa_rx_errors {
};
/* Counters for QMan ERN frames - one counter per rejection code */
-struct dpa_ern_cnt {
+struct dpaa_ern_cnt {
u64 cg_tdrop; /* Congestion group taildrop */
u64 wred; /* WRED congestion */
u64 err_cond; /* Error condition */
@@ -229,16 +119,17 @@ struct dpa_ern_cnt {
u64 orp_zero; /* ORP disabled */
};
-struct dpa_napi_portal {
+struct dpaa_napi_portal {
#ifndef __rtems__
struct napi_struct napi;
#endif /* __rtems__ */
struct qman_portal *p;
+ bool down;
};
-struct dpa_percpu_priv_s {
+struct dpaa_percpu_priv {
struct net_device *net_dev;
- struct dpa_napi_portal *np;
+ struct dpaa_napi_portal np;
u64 in_interrupt;
u64 tx_confirm;
/* fragmented (non-linear) skbuffs received from the stack */
@@ -246,26 +137,28 @@ struct dpa_percpu_priv_s {
#ifndef __rtems__
struct rtnl_link_stats64 stats;
#endif /* __rtems__ */
- struct dpa_rx_errors rx_errors;
- struct dpa_ern_cnt ern_cnt;
+ struct dpaa_rx_errors rx_errors;
+ struct dpaa_ern_cnt ern_cnt;
};
-struct dpa_priv_s {
- struct dpa_percpu_priv_s __percpu *percpu_priv;
- struct dpa_bp *dpa_bp;
+struct dpaa_buffer_layout {
+ u16 priv_data_size;
+};
+
+struct dpaa_priv {
+ struct dpaa_percpu_priv __percpu *percpu_priv;
+ struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
/* Store here the needed Tx headroom for convenience and speed
* (even though it can be computed based on the fields of buf_layout)
*/
u16 tx_headroom;
struct net_device *net_dev;
struct mac_device *mac_dev;
- struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
- struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
-
- size_t bp_count;
+ struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
+ struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
- u16 channel; /* "fsl,qman-channel-id" */
- struct list_head dpa_fq_list;
+ u16 channel;
+ struct list_head dpaa_fq_list;
#ifndef __rtems__
u32 msg_enable; /* net_device message level */
@@ -289,231 +182,28 @@ struct dpa_priv_s {
bool use_ingress_cgr;
struct qman_cgr ingress_cgr;
- struct dpa_buffer_layout_s *buf_layout;
+ struct dpaa_buffer_layout buf_layout[2];
u16 rx_headroom;
};
-struct fm_port_fqs {
- struct dpa_fq *tx_defq;
- struct dpa_fq *tx_errq;
- struct dpa_fq *rx_defq;
- struct dpa_fq *rx_errq;
-};
+/* from dpaa_ethtool.c */
+extern const struct ethtool_ops dpaa_ethtool_ops;
-int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
-int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
-void _dpa_rx(struct net_device *net_dev,
- struct qman_portal *portal,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_fd *fd,
- u32 fqid,
- int *count_ptr);
-#ifndef __rtems__
-int dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
-struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
- const struct qm_fd *fd);
-
-/* Turn on HW checksum computation for this outgoing frame.
- * If the current protocol is not something we support in this regard
- * (or if the stack has already computed the SW checksum), we do nothing.
- *
- * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
- * otherwise.
- *
- * Note that this function may modify the fd->cmd field and the skb data buffer
- * (the Parse Results area).
- */
-int dpa_enable_tx_csum(struct dpa_priv_s *priv, struct sk_buff *skb,
- struct qm_fd *fd, char *parse_results);
-#else /* __rtems__ */
-void _dpa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd);
-#endif /* __rtems__ */
-
-static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
- struct qman_portal *portal)
-{
-#ifndef __rtems__
- /* In case of threaded ISR for RT enable kernel,
- * in_irq() does not return appropriate value, so use
- * in_serving_softirq to distinguish softirq or irq context.
- */
- if (unlikely(in_irq() || !in_serving_softirq())) {
- /* Disable QMan IRQ and invoke NAPI */
- int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
-
- if (likely(!ret)) {
- const struct qman_portal_config *pc =
- qman_p_get_portal_config(portal);
- struct dpa_napi_portal *np =
- &percpu_priv->np[pc->channel];
-
- np->p = portal;
- napi_schedule(&np->napi);
- percpu_priv->in_interrupt++;
- return 1;
- }
- }
-#else /* __rtems__ */
- /* FIXME */
-#endif /* __rtems__ */
- return 0;
-}
-
-static inline ssize_t __const dpa_fd_length(const struct qm_fd *fd)
-{
- return fd->length20;
-}
-
-static inline ssize_t __const dpa_fd_offset(const struct qm_fd *fd)
-{
- return fd->offset;
-}
-
-#ifndef __rtems__
-/* Verifies if the skb length is below the interface MTU */
-static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
-{
- if (unlikely(skb->len > mtu))
- if ((skb->protocol != htons(ETH_P_8021Q)) ||
- (skb->len > mtu + 4))
- return -1;
-
- return 0;
-}
-#endif /* __rtems__ */
-
-static inline u16 dpa_get_headroom(struct dpa_buffer_layout_s *bl)
-{
- u16 headroom;
- /* The frame headroom must accommodate:
- * - the driver private data area
- * - parse results, hash results, timestamp if selected
- * If either hash results or time stamp are selected, both will
- * be copied to/from the frame headroom, as TS is located between PR and
- * HR in the IC and IC copy size has a granularity of 16bytes
- * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
- *
- * Also make sure the headroom is a multiple of data_align bytes
- */
- headroom = (u16)(bl->priv_data_size +
- (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
- (bl->hash_results || bl->time_stamp ?
- DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0));
-
- return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
-}
-
-#ifndef __rtems__
+/* from dpaa_eth_sysfs.c */
void dpaa_eth_sysfs_remove(struct device *dev);
void dpaa_eth_sysfs_init(struct device *dev);
-
-void dpa_private_napi_del(struct net_device *net_dev);
+#ifdef __rtems__
+#include <sys/mbuf.h>
+
+#define DPAA_ENQUEUE_RETRIES 100000
+#define DPAA_SGT_MAX_ENTRIES 16
+#define DPAA_TX_PRIV_DATA_SIZE 16
+#define FM_L3_PARSE_RESULT_IPV4 0x8000
+#define FM_L3_PARSE_RESULT_IPV6 0x4000
+#define FM_L4_PARSE_RESULT_UDP 0x40
+#define FM_L4_PARSE_RESULT_TCP 0x20
+#define FSL_DPAA_BPID_INV 0xff
+
+void dpaa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd);
#endif /* __rtems__ */
-
-static inline void clear_fd(struct qm_fd *fd)
-{
- fd->opaque_addr = 0;
- fd->opaque = 0;
- fd->cmd = 0;
-}
-
-static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
- struct qman_fq *tx_fq)
-{
- int i;
-
- for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
- if (priv->egress_fqs[i] == tx_fq)
- return i;
-
- return -EINVAL;
-}
-
-#ifndef __rtems__
-static inline int dpa_xmit(struct dpa_priv_s *priv,
- struct rtnl_link_stats64 *percpu_stats,
- int queue,
- struct qm_fd *fd)
-{
- int err, i;
- struct qman_fq *egress_fq;
-
- egress_fq = priv->egress_fqs[queue];
- if (fd->bpid == 0xff)
- fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
-
- /* Trace this Tx fd */
- trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
-
- for (i = 0; i < 100000; i++) {
- err = qman_enqueue(egress_fq, fd, 0);
- if (err != -EBUSY)
- break;
- }
-
- if (unlikely(err < 0)) {
- percpu_stats->tx_errors++;
- percpu_stats->tx_fifo_errors++;
- return err;
- }
-
- percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpa_fd_length(fd);
-
- return 0;
-}
-#endif /* __rtems__ */
-
-/* Use multiple WQs for FQ assignment:
- * - Tx Confirmation queues go to WQ1.
- * - Rx Default and Tx queues go to WQ3 (no differentiation between
- * Rx and Tx traffic).
- * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
- * to be scheduled, in case there are many more FQs in WQ3).
- * This ensures that Tx-confirmed buffers are timely released. In particular,
- * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
- * are greatly outnumbered by other FQs in the system, while
- * dequeue scheduling is round-robin.
- */
-static inline void _dpa_assign_wq(struct dpa_fq *fq)
-{
- switch (fq->fq_type) {
- case FQ_TYPE_TX_CONFIRM:
- case FQ_TYPE_TX_CONF_MQ:
- fq->wq = 1;
- break;
- case FQ_TYPE_RX_DEFAULT:
- case FQ_TYPE_TX:
- fq->wq = 3;
- break;
- case FQ_TYPE_RX_ERROR:
- case FQ_TYPE_TX_ERROR:
- fq->wq = 2;
- break;
- default:
- WARN(1, "Invalid FQ type %d for FQID %d!\n",
- fq->fq_type, fq->fqid);
- }
-}
-
-#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-/* Use in lieu of skb_get_queue_mapping() */
-#define dpa_get_queue_mapping(skb) \
- raw_smp_processor_id()
-#else
-/* Use the queue selected by XPS */
-#define dpa_get_queue_mapping(skb) \
- skb_get_queue_mapping(skb)
-#endif
-
-static inline void _dpa_bp_free_pf(void *addr)
-{
-#ifndef __rtems__
- put_page(virt_to_head_page(addr));
-#else /* __rtems__ */
- BSD_ASSERT(0);
-#endif /* __rtems__ */
-}
-
-#endif /* __DPA_H */
+#endif /* __DPAA_H */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c
deleted file mode 100644
index 9a4a2184..00000000
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c
+++ /dev/null
@@ -1,1491 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/of_net.h>
-#include <linux/etherdevice.h>
-#include <linux/kthread.h>
-#include <linux/percpu.h>
-#ifndef __rtems__
-#include <linux/highmem.h>
-#include <linux/sort.h>
-#endif /* __rtems__ */
-#include <soc/fsl/qman.h>
-#ifndef __rtems__
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/if_vlan.h>
-#endif /* __rtems__ */
-#include "dpaa_eth.h"
-#include "dpaa_eth_common.h"
-#include "mac.h"
-
-/* Size in bytes of the FQ taildrop threshold */
-#define DPA_FQ_TD 0x200000
-
-#define DPAA_CS_THRESHOLD_1G 0x06000000
-/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
- * The size in bytes of the egress Congestion State notification threshold on
- * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
- * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
- * and the larger the frame size, the more acute the problem.
- * So we have to find a balance between these factors:
- * - avoiding the device staying congested for a prolonged time (risking
- * the netdev watchdog to fire - see also the tx_timeout module param);
- * - affecting performance of protocols such as TCP, which otherwise
- * behave well under the congestion notification mechanism;
- * - preventing the Tx cores from tightly-looping (as if the congestion
- * threshold was too low to be effective);
- * - running out of memory if the CS threshold is set too high.
- */
-
-#define DPAA_CS_THRESHOLD_10G 0x10000000
-/* The size in bytes of the egress Congestion State notification threshold on
- * 10G ports, range 0x1000 .. 0x10000000
- */
-
-static struct dpa_bp *dpa_bp_array[64];
-
-#ifndef __rtems__
-int dpa_max_frm;
-
-int dpa_rx_extra_headroom;
-#endif /* __rtems__ */
-
-static const struct fqid_cell tx_confirm_fqids[] = {
- {0, DPAA_ETH_TX_QUEUES}
-};
-
-static const struct fqid_cell default_fqids[][3] = {
- [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
- [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
-};
-
-#ifndef __rtems__
-int dpa_netdev_init(struct net_device *net_dev,
- const u8 *mac_addr,
- u16 tx_timeout)
-{
- int err;
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct device *dev = net_dev->dev.parent;
-
- net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- /* we do not want shared skbs on TX */
- net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
-
- net_dev->features |= net_dev->hw_features;
- net_dev->vlan_features = net_dev->features;
-
- memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
-
- net_dev->ethtool_ops = &dpa_ethtool_ops;
-
- net_dev->needed_headroom = priv->tx_headroom;
- net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
-
- /* start without the RUNNING flag, phylib controls it later */
- netif_carrier_off(net_dev);
-
- err = register_netdev(net_dev);
- if (err < 0) {
- dev_err(dev, "register_netdev() = %d\n", err);
- return err;
- }
-
- return 0;
-}
-#endif /* __rtems__ */
-
-int dpa_start(struct net_device *net_dev)
-{
- int err, i;
- struct dpa_priv_s *priv;
- struct mac_device *mac_dev;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
-#ifndef __rtems__
- err = mac_dev->init_phy(net_dev, priv->mac_dev);
- if (err < 0) {
- netif_err(priv, ifup, net_dev, "init_phy() = %d\n", err);
- return err;
- }
-#endif /* __rtems__ */
-
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
- err = fman_port_enable(mac_dev->port[i]);
- if (err)
- goto mac_start_failed;
- }
-
- err = priv->mac_dev->start(mac_dev);
- if (err < 0) {
- netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
- goto mac_start_failed;
- }
-
-#ifndef __rtems__
- netif_tx_start_all_queues(net_dev);
-#endif /* __rtems__ */
-
- return 0;
-
-mac_start_failed:
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
- fman_port_disable(mac_dev->port[i]);
-
- return err;
-}
-
-int dpa_stop(struct net_device *net_dev)
-{
- int i, err, error;
- struct dpa_priv_s *priv;
- struct mac_device *mac_dev;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
-#ifndef __rtems__
- netif_tx_stop_all_queues(net_dev);
-#endif /* __rtems__ */
- /* Allow the Fman (Tx) port to process in-flight frames before we
- * try switching it off.
- */
- usleep_range(5000, 10000);
-
- err = mac_dev->stop(mac_dev);
- if (err < 0)
- netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
- err);
-
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
- error = fman_port_disable(mac_dev->port[i]);
- if (error)
- err = error;
- }
-
-#ifndef __rtems__
- if (mac_dev->phy_dev)
- phy_disconnect(mac_dev->phy_dev);
- mac_dev->phy_dev = NULL;
-#endif /* __rtems__ */
-
- return err;
-}
-
-#ifndef __rtems__
-void dpa_timeout(struct net_device *net_dev)
-{
- const struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
-
- priv = netdev_priv(net_dev);
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-
- netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
- jiffies_to_msecs(jiffies - net_dev->trans_start));
-
- percpu_priv->stats.tx_errors++;
-}
-
-/* Calculates the statistics for the given device by adding the statistics
- * collected by each CPU.
- */
-struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev,
- struct rtnl_link_stats64 *stats)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- u64 *cpustats;
- u64 *netstats = (u64 *)stats;
- int i, j;
- struct dpa_percpu_priv_s *percpu_priv;
- int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
-
- for_each_possible_cpu(i) {
- percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-
- cpustats = (u64 *)&percpu_priv->stats;
-
- for (j = 0; j < numstats; j++)
- netstats[j] += cpustats[j];
- }
-
- return stats;
-}
-#endif /* __rtems__ */
-
-int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
-{
- const int max_mtu = dpa_get_max_mtu();
-
- /* Make sure we don't exceed the Ethernet controller's MAXFRM */
- if (new_mtu < 68 || new_mtu > max_mtu) {
- netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
- new_mtu, 68, max_mtu);
- return -EINVAL;
- }
-#ifndef __rtems__
- net_dev->mtu = new_mtu;
-#endif /* __rtems__ */
-
- return 0;
-}
-
-#ifndef __rtems__
-/* .ndo_init callback */
-int dpa_ndo_init(struct net_device *net_dev)
-{
- /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
- * we choose conservatively and let the user explicitly set a higher
- * MTU via ifconfig. Otherwise, the user may end up with different MTUs
- * in the same LAN.
- * If on the other hand fsl_fm_max_frm has been chosen below 1500,
- * start with the maximum allowed.
- */
- int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
-
- netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
- init_mtu);
- net_dev->mtu = init_mtu;
-
- return 0;
-}
-
-int dpa_set_features(struct net_device *dev, netdev_features_t features)
-{
- /* Not much to do here for now */
- dev->features = features;
- return 0;
-}
-
-netdev_features_t dpa_fix_features(struct net_device *dev,
- netdev_features_t features)
-{
- netdev_features_t unsupported_features = 0;
-
- /* In theory we should never be requested to enable features that
- * we didn't set in netdev->features and netdev->hw_features at probe
- * time, but double check just to be on the safe side.
- * We don't support enabling Rx csum through ethtool yet
- */
- unsupported_features |= NETIF_F_RXCSUM;
-
- features &= ~unsupported_features;
-
- return features;
-}
-
-int dpa_remove(struct platform_device *pdev)
-{
- int err;
- struct device *dev;
- struct net_device *net_dev;
- struct dpa_priv_s *priv;
-
- dev = &pdev->dev;
- net_dev = dev_get_drvdata(dev);
-
- priv = netdev_priv(net_dev);
-
- dpaa_eth_sysfs_remove(dev);
-
- dev_set_drvdata(dev, NULL);
- unregister_netdev(net_dev);
-
- err = dpa_fq_free(dev, &priv->dpa_fq_list);
-
- qman_delete_cgr_safe(&priv->ingress_cgr);
- qman_release_cgrid(priv->ingress_cgr.cgrid);
- qman_delete_cgr_safe(&priv->cgr_data.cgr);
- qman_release_cgrid(priv->cgr_data.cgr.cgrid);
-
- dpa_private_napi_del(net_dev);
-
- dpa_bp_free(priv);
-
- if (priv->buf_layout)
- devm_kfree(dev, priv->buf_layout);
-
- free_netdev(net_dev);
-
- return err;
-}
-
-struct mac_device *dpa_mac_dev_get(struct platform_device *pdev)
-{
- struct device *dpa_dev, *dev;
- struct device_node *mac_node;
- struct platform_device *of_dev;
- struct mac_device *mac_dev;
- struct dpaa_eth_data *eth_data;
-
- dpa_dev = &pdev->dev;
- eth_data = dpa_dev->platform_data;
- if (!eth_data)
- return ERR_PTR(-ENODEV);
-
- mac_node = eth_data->mac_node;
-
- of_dev = of_find_device_by_node(mac_node);
- if (!of_dev) {
- dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
- mac_node->full_name);
- of_node_put(mac_node);
- return ERR_PTR(-EINVAL);
- }
- of_node_put(mac_node);
-
- dev = &of_dev->dev;
-
- mac_dev = dev_get_drvdata(dev);
- if (!mac_dev) {
- dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
- dev_name(dev));
- return ERR_PTR(-EINVAL);
- }
-
- return mac_dev;
-}
-
-int dpa_mac_hw_index_get(struct platform_device *pdev)
-{
- struct device *dpa_dev;
- struct dpaa_eth_data *eth_data;
-
- dpa_dev = &pdev->dev;
- eth_data = dpa_dev->platform_data;
-
- return eth_data->mac_hw_id;
-}
-
-int dpa_mac_fman_index_get(struct platform_device *pdev)
-{
- struct device *dpa_dev;
- struct dpaa_eth_data *eth_data;
-
- dpa_dev = &pdev->dev;
- eth_data = dpa_dev->platform_data;
-
- return eth_data->fman_hw_id;
-}
-
-int dpa_set_mac_address(struct net_device *net_dev, void *addr)
-{
- const struct dpa_priv_s *priv;
- int err;
- struct mac_device *mac_dev;
-
- priv = netdev_priv(net_dev);
-
- err = eth_mac_addr(net_dev, addr);
- if (err < 0) {
- netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
- return err;
- }
-
- mac_dev = priv->mac_dev;
-
- err = mac_dev->change_addr(mac_dev->fman_mac,
- (enet_addr_t *)net_dev->dev_addr);
- if (err < 0) {
- netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
- err);
- return err;
- }
-
- return 0;
-}
-
-void dpa_set_rx_mode(struct net_device *net_dev)
-{
- int err;
- const struct dpa_priv_s *priv;
-
- priv = netdev_priv(net_dev);
-
- if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
- priv->mac_dev->promisc = !priv->mac_dev->promisc;
- err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
- priv->mac_dev->promisc);
- if (err < 0)
- netif_err(priv, drv, net_dev,
- "mac_dev->set_promisc() = %d\n",
- err);
- }
-
- err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
- if (err < 0)
- netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
- err);
-}
-#endif /* __rtems__ */
-
-void dpa_set_buffers_layout(struct mac_device *mac_dev,
- struct dpa_buffer_layout_s *layout)
-{
- /* Rx */
- layout[RX].priv_data_size = (u16)DPA_RX_PRIV_DATA_SIZE;
- layout[RX].parse_results = true;
- layout[RX].hash_results = true;
- layout[RX].data_align = DPA_FD_DATA_ALIGNMENT;
-
- /* Tx */
- layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
- layout[TX].parse_results = true;
- layout[TX].hash_results = true;
- layout[TX].data_align = DPA_FD_DATA_ALIGNMENT;
-}
-
-int dpa_bp_alloc(struct dpa_bp *dpa_bp)
-{
- int err;
- struct bman_pool_params bp_params;
-#ifndef __rtems__
- struct platform_device *pdev;
-#endif /* __rtems__ */
-
- if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
- pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
- return -EINVAL;
- }
-
- memset(&bp_params, 0, sizeof(struct bman_pool_params));
-
- /* If the pool is already specified, we only create one per bpid */
- if (dpa_bpid2pool_use(dpa_bp->bpid))
- return 0;
-
- if (dpa_bp->bpid == 0)
- bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
- else
- bp_params.bpid = dpa_bp->bpid;
-
- dpa_bp->pool = bman_new_pool(&bp_params);
- if (!dpa_bp->pool) {
- pr_err("bman_new_pool() failed\n");
- return -ENODEV;
- }
-
- dpa_bp->bpid = (u8)bman_get_params(dpa_bp->pool)->bpid;
-
-#ifndef __rtems__
- pdev = platform_device_register_simple("DPAA_bpool",
- dpa_bp->bpid, NULL, 0);
- if (IS_ERR(pdev)) {
- err = PTR_ERR(pdev);
- goto pdev_register_failed;
- }
-
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
- if (err)
- goto pdev_mask_failed;
-
- dpa_bp->dev = &pdev->dev;
-#endif /* __rtems__ */
-
- if (dpa_bp->seed_cb) {
- err = dpa_bp->seed_cb(dpa_bp);
- if (err)
- goto pool_seed_failed;
- }
-
- dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
-
- return 0;
-
-pool_seed_failed:
-#ifndef __rtems__
-pdev_mask_failed:
- platform_device_unregister(pdev);
-pdev_register_failed:
-#endif /* __rtems__ */
- bman_free_pool(dpa_bp->pool);
-
- return err;
-}
-
-void dpa_bp_drain(struct dpa_bp *bp)
-{
- int ret;
- u8 num = 8;
-
- do {
- struct bm_buffer bmb[8];
- int i;
-
- ret = bman_acquire(bp->pool, bmb, num, 0);
- if (ret < 0) {
- if (num == 8) {
- /* we have less than 8 buffers left;
- * drain them one by one
- */
- num = 1;
- ret = 1;
- continue;
- } else {
- /* Pool is fully drained */
- break;
- }
- }
-
- for (i = 0; i < num; i++) {
- dma_addr_t addr = bm_buf_addr(&bmb[i]);
-
-#ifndef __rtems__
- dma_unmap_single(bp->dev, addr, bp->size,
- DMA_BIDIRECTIONAL);
-#endif /* __rtems__ */
-
- bp->free_buf_cb(phys_to_virt(addr));
- }
- } while (ret > 0);
-}
-
-static void _dpa_bp_free(struct dpa_bp *dpa_bp)
-{
- struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
-
- /* the mapping between bpid and dpa_bp is done very late in the
- * allocation procedure; if something failed before the mapping, the bp
- * was not configured, therefore we don't need the below instructions
- */
- if (!bp)
- return;
-
- if (!atomic_dec_and_test(&bp->refs))
- return;
-
- if (bp->free_buf_cb)
- dpa_bp_drain(bp);
-
- dpa_bp_array[bp->bpid] = NULL;
- bman_free_pool(bp->pool);
-
-#ifndef __rtems__
- if (bp->dev)
- platform_device_unregister(to_platform_device(bp->dev));
-#endif /* __rtems__ */
-}
-
-void dpa_bp_free(struct dpa_priv_s *priv)
-{
- int i;
-
- for (i = 0; i < priv->bp_count; i++)
- _dpa_bp_free(&priv->dpa_bp[i]);
-}
-
-struct dpa_bp *dpa_bpid2pool(int bpid)
-{
- return dpa_bp_array[bpid];
-}
-
-void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
-{
- dpa_bp_array[bpid] = dpa_bp;
- atomic_set(&dpa_bp->refs, 1);
-}
-
-bool dpa_bpid2pool_use(int bpid)
-{
- if (dpa_bpid2pool(bpid)) {
- atomic_inc(&dpa_bp_array[bpid]->refs);
- return true;
- }
-
- return false;
-}
-
-#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
-{
- return dpa_get_queue_mapping(skb);
-}
-#endif
-
-struct dpa_fq *dpa_fq_alloc(struct device *dev,
- const struct fqid_cell *fqids,
- struct list_head *list,
- enum dpa_fq_type fq_type)
-{
- int i;
- struct dpa_fq *dpa_fq;
-
- dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids->count, GFP_KERNEL);
- if (!dpa_fq)
- return NULL;
-
- for (i = 0; i < fqids->count; i++) {
- dpa_fq[i].fq_type = fq_type;
- dpa_fq[i].fqid = fqids->start ? fqids->start + i : 0;
- list_add_tail(&dpa_fq[i].list, list);
- }
-
- for (i = 0; i < fqids->count; i++)
- _dpa_assign_wq(dpa_fq + i);
-
- return dpa_fq;
-}
-
-int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
- struct fm_port_fqs *port_fqs,
- bool alloc_tx_conf_fqs,
- enum port_type ptype)
-{
- const struct fqid_cell *fqids;
- struct dpa_fq *dpa_fq;
- int num_ranges;
- int i;
-
- if (ptype == TX && alloc_tx_conf_fqs) {
- if (!dpa_fq_alloc(dev, tx_confirm_fqids, list,
- FQ_TYPE_TX_CONF_MQ))
- goto fq_alloc_failed;
- }
-
- fqids = default_fqids[ptype];
- num_ranges = 3;
-
- for (i = 0; i < num_ranges; i++) {
- switch (i) {
- case 0:
- /* The first queue is the error queue */
- if (fqids[i].count != 1)
- goto invalid_error_queue;
-
- dpa_fq = dpa_fq_alloc(dev, &fqids[i], list,
- ptype == RX ?
- FQ_TYPE_RX_ERROR :
- FQ_TYPE_TX_ERROR);
- if (!dpa_fq)
- goto fq_alloc_failed;
-
- if (ptype == RX)
- port_fqs->rx_errq = &dpa_fq[0];
- else
- port_fqs->tx_errq = &dpa_fq[0];
- break;
- case 1:
- /* the second queue is the default queue */
- if (fqids[i].count != 1)
- goto invalid_default_queue;
-
- dpa_fq = dpa_fq_alloc(dev, &fqids[i], list,
- ptype == RX ?
- FQ_TYPE_RX_DEFAULT :
- FQ_TYPE_TX_CONFIRM);
- if (!dpa_fq)
- goto fq_alloc_failed;
-
- if (ptype == RX)
- port_fqs->rx_defq = &dpa_fq[0];
- else
- port_fqs->tx_defq = &dpa_fq[0];
- break;
- default:
- /* all subsequent queues are Tx */
- if (!dpa_fq_alloc(dev, &fqids[i], list, FQ_TYPE_TX))
- goto fq_alloc_failed;
- break;
- }
- }
-
- return 0;
-
-fq_alloc_failed:
- dev_err(dev, "dpa_fq_alloc() failed\n");
- return -ENOMEM;
-
-invalid_default_queue:
-invalid_error_queue:
- dev_err(dev, "Too many default or error queues\n");
- return -EINVAL;
-}
-
-static u32 rx_pool_channel;
-static DEFINE_SPINLOCK(rx_pool_channel_init);
-
-int dpa_get_channel(void)
-{
- spin_lock(&rx_pool_channel_init);
- if (!rx_pool_channel) {
- u32 pool;
- int ret = qman_alloc_pool(&pool);
-
- if (!ret)
- rx_pool_channel = pool;
- }
- spin_unlock(&rx_pool_channel_init);
- if (!rx_pool_channel)
- return -ENOMEM;
- return rx_pool_channel;
-}
-
-void dpa_release_channel(void)
-{
- qman_release_pool(rx_pool_channel);
-}
-
-int dpaa_eth_add_channel(void *__arg)
-{
-#ifndef __rtems__
- const cpumask_t *cpus = qman_affine_cpus();
-#endif /* __rtems__ */
- u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg);
- int cpu;
- struct qman_portal *portal;
-
-#ifndef __rtems__
- for_each_cpu(cpu, cpus) {
-#else /* __rtems__ */
- for (cpu = 0; cpu < (int)rtems_get_processor_count(); ++cpu) {
-#endif /* __rtems__ */
-
- portal = (struct qman_portal *)qman_get_affine_portal(cpu);
- qman_p_static_dequeue_add(portal, pool);
- }
- return 0;
-}
-
-/* Congestion group state change notification callback.
- * Stops the device's egress queues while they are congested and
- * wakes them upon exiting congested state.
- * Also updates some CGR-related stats.
- */
-static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
- int congested)
-{
- struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
- struct dpa_priv_s, cgr_data.cgr);
-
- if (congested) {
- priv->cgr_data.congestion_start_jiffies = jiffies;
-#ifndef __rtems__
- netif_tx_stop_all_queues(priv->net_dev);
-#else /* __rtems__ */
- BSD_ASSERT(0);
-#endif /* __rtems__ */
- priv->cgr_data.cgr_congested_count++;
- } else {
- priv->cgr_data.congested_jiffies +=
- (jiffies - priv->cgr_data.congestion_start_jiffies);
-#ifndef __rtems__
- netif_tx_wake_all_queues(priv->net_dev);
-#else /* __rtems__ */
- BSD_ASSERT(0);
-#endif /* __rtems__ */
- }
-}
-
-int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
-{
- struct qm_mcc_initcgr initcgr;
- u32 cs_th;
- int err;
-
- err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
- if (err < 0) {
- pr_err("Error %d allocating CGR ID\n", err);
- goto out_error;
- }
- priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
-
- /* Enable Congestion State Change Notifications and CS taildrop */
- initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
- initcgr.cgr.cscn_en = QM_CGR_EN;
-
- /* Set different thresholds based on the MAC speed.
- * This may turn suboptimal if the MAC is reconfigured at a speed
- * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
- * In such cases, we ought to reconfigure the threshold, too.
- */
-#ifndef __rtems__
- if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
- cs_th = DPAA_CS_THRESHOLD_10G;
- else
- cs_th = DPAA_CS_THRESHOLD_1G;
-#else /* __rtems__ */
- /* FIXME */
- cs_th = DPAA_CS_THRESHOLD_1G;
-#endif /* __rtems__ */
- qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
-
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
- initcgr.cgr.cstd_en = QM_CGR_EN;
-
- err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
- &initcgr);
- if (err < 0) {
- pr_err("Error %d creating CGR with ID %d\n", err,
- priv->cgr_data.cgr.cgrid);
- qman_release_cgrid(priv->cgr_data.cgr.cgrid);
- goto out_error;
- }
- pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
- priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
- priv->cgr_data.cgr.chan);
-
-out_error:
- return err;
-}
-
-static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
- struct dpa_fq *fq,
- const struct qman_fq *template)
-{
- fq->fq_base = *template;
- fq->net_dev = priv->net_dev;
-
- fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
- fq->channel = priv->channel;
-}
-
-static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
- struct dpa_fq *fq,
- struct fman_port *port,
- const struct qman_fq *template)
-{
- fq->fq_base = *template;
- fq->net_dev = priv->net_dev;
-
- if (port) {
- fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
- fq->channel = (u16)fman_port_get_qman_channel_id(port);
- } else {
- fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
- }
-}
-
-void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
- struct fman_port *tx_port)
-{
- struct dpa_fq *fq;
-#ifndef __rtems__
- u16 portals[NR_CPUS];
- int cpu, num_portals = 0;
- const cpumask_t *affine_cpus = qman_affine_cpus();
-#endif /* __rtems__ */
- int egress_cnt = 0, conf_cnt = 0;
-
-#ifndef __rtems__
- for_each_cpu(cpu, affine_cpus)
- portals[num_portals++] = qman_affine_channel(cpu);
- if (num_portals == 0)
- dev_err(priv->net_dev->dev.parent,
- "No Qman software (affine) channels found");
-#else /* __rtems__ */
- /* FIXME */
-#endif /* __rtems__ */
-
- /* Initialize each FQ in the list */
- list_for_each_entry(fq, &priv->dpa_fq_list, list) {
- switch (fq->fq_type) {
- case FQ_TYPE_RX_DEFAULT:
- DPA_ERR_ON(!priv->mac_dev);
- dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
- break;
- case FQ_TYPE_RX_ERROR:
- DPA_ERR_ON(!priv->mac_dev);
- dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
- break;
- case FQ_TYPE_TX:
- dpa_setup_egress(priv, fq, tx_port,
- &fq_cbs->egress_ern);
- /* If we have more Tx queues than the number of cores,
- * just ignore the extra ones.
- */
- if (egress_cnt < DPAA_ETH_TX_QUEUES)
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- break;
- case FQ_TYPE_TX_CONFIRM:
- DPA_ERR_ON(!priv->mac_dev);
- dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
- break;
- case FQ_TYPE_TX_CONF_MQ:
- DPA_ERR_ON(!priv->mac_dev);
- dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
- priv->conf_fqs[conf_cnt++] = &fq->fq_base;
- break;
- case FQ_TYPE_TX_ERROR:
- DPA_ERR_ON(!priv->mac_dev);
- dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
- break;
- default:
-#ifndef __rtems__
- dev_warn(priv->net_dev->dev.parent,
- "Unknown FQ type detected!\n");
-#else /* __rtems__ */
- BSD_ASSERT(0);
-#endif /* __rtems__ */
- break;
- }
- }
-
- /* The number of Tx queues may be smaller than the number of cores, if
- * the Tx queue range is specified in the device tree instead of being
- * dynamically allocated.
- * Make sure all CPUs receive a corresponding Tx queue.
- */
- while (egress_cnt < DPAA_ETH_TX_QUEUES) {
- list_for_each_entry(fq, &priv->dpa_fq_list, list) {
- if (fq->fq_type != FQ_TYPE_TX)
- continue;
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- if (egress_cnt == DPAA_ETH_TX_QUEUES)
- break;
- }
- }
-}
-
-int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
-{
- int err;
- const struct dpa_priv_s *priv;
-#ifndef __rtems__
- struct device *dev;
-#endif /* __rtems__ */
- struct qman_fq *fq;
- struct qm_mcc_initfq initfq;
- struct qman_fq *confq = NULL;
- int queue_id;
-
- priv = netdev_priv(dpa_fq->net_dev);
-#ifndef __rtems__
- dev = dpa_fq->net_dev->dev.parent;
-#endif /* __rtems__ */
-
- if (dpa_fq->fqid == 0)
- dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
-
- dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
-
- err = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
- if (err) {
-#ifndef __rtems__
- dev_err(dev, "qman_create_fq() failed\n");
-#else /* __rtems__ */
- BSD_ASSERT(0);
-#endif /* __rtems__ */
- return err;
- }
- fq = &dpa_fq->fq_base;
-
- if (dpa_fq->init) {
- memset(&initfq, 0, sizeof(initfq));
-
- initfq.we_mask = QM_INITFQ_WE_FQCTRL;
- /* Note: we may get to keep an empty FQ in cache */
- initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
-
- /* Try to reduce the number of portal interrupts for
- * Tx Confirmation FQs.
- */
- if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
- initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
-
- /* FQ placement */
- initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
-
- initfq.fqd.dest.channel = dpa_fq->channel;
- initfq.fqd.dest.wq = dpa_fq->wq;
-
- /* Put all egress queues in a congestion group of their own.
- * Sensu stricto, the Tx confirmation queues are Rx FQs,
- * rather than Tx - but they nonetheless account for the
- * memory footprint on behalf of egress traffic. We therefore
- * place them in the netdev's CGR, along with the Tx FQs.
- */
- if (dpa_fq->fq_type == FQ_TYPE_TX ||
- dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
- dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
- initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
- /* Set a fixed overhead accounting, in an attempt to
- * reduce the impact of fixed-size skb shells and the
- * driver's needed headroom on system memory. This is
- * especially the case when the egress traffic is
- * composed of small datagrams.
- * Unfortunately, QMan's OAL value is capped to an
- * insufficient value, but even that is better than
- * no overhead accounting at all.
- */
- initfq.we_mask |= QM_INITFQ_WE_OAC;
- initfq.fqd.oac_init.oac = QM_OAC_CG;
-#ifndef __rtems__
- initfq.fqd.oac_init.oal =
- (signed char)(min(sizeof(struct sk_buff) +
- priv->tx_headroom,
- (size_t)FSL_QMAN_MAX_OAL));
-#else /* __rtems__ */
- /* FIXME */
- initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL;
-#endif /* __rtems__ */
- }
-
- if (td_enable) {
- initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
- qm_fqd_taildrop_set(&initfq.fqd.td,
- DPA_FQ_TD, 1);
- initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
- }
-
- /* Configure the Tx confirmation queue, now that we know
- * which Tx queue it pairs with.
- */
- if (dpa_fq->fq_type == FQ_TYPE_TX) {
- queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
- if (queue_id >= 0)
- confq = priv->conf_fqs[queue_id];
- if (confq) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
- * A2V=1 (contextA A2 field is valid)
- * A0V=1 (contextA A0 field is valid)
- * B0V=1 (contextB field is valid)
- * ContextA A2: EBD=1 (deallocate buffers inside FMan)
- * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
- */
- initfq.fqd.context_a.hi = 0x1e000000;
- initfq.fqd.context_a.lo = 0x80000000;
- }
- }
-
- /* Put all *private* ingress queues in our "ingress CGR". */
- if (priv->use_ingress_cgr &&
- (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
- dpa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
- initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
- /* Set a fixed overhead accounting, just like for the
- * egress CGR.
- */
- initfq.we_mask |= QM_INITFQ_WE_OAC;
- initfq.fqd.oac_init.oac = QM_OAC_CG;
-#ifndef __rtems__
- initfq.fqd.oac_init.oal =
- (signed char)(min(sizeof(struct sk_buff) +
- priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
-#else /* __rtems__ */
- /* FIXME */
- initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL;
-#endif /* __rtems__ */
- }
-
- /* Initialization common to all ingress queues */
- if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- initfq.fqd.fq_ctrl |=
- QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
- initfq.fqd.context_a.stashing.exclusive =
- QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
- QM_STASHING_EXCL_ANNOTATION;
- initfq.fqd.context_a.stashing.data_cl = 2;
- initfq.fqd.context_a.stashing.annotation_cl = 1;
- initfq.fqd.context_a.stashing.context_cl =
- DIV_ROUND_UP(sizeof(struct qman_fq), 64);
- }
-
- err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
- if (err < 0) {
-#ifndef __rtems__
- dev_err(dev, "qman_init_fq(%u) = %d\n",
- qman_fq_fqid(fq), err);
-#endif /* __rtems__ */
- qman_destroy_fq(fq, 0);
- return err;
- }
- }
-
- dpa_fq->fqid = qman_fq_fqid(fq);
-
- return 0;
-}
-
-#ifndef __rtems__
-static int _dpa_fq_free(struct device *dev, struct qman_fq *fq)
-{
- int err, error;
- struct dpa_fq *dpa_fq;
- const struct dpa_priv_s *priv;
-
- err = 0;
-
- dpa_fq = container_of(fq, struct dpa_fq, fq_base);
- priv = netdev_priv(dpa_fq->net_dev);
-
- if (dpa_fq->init) {
- err = qman_retire_fq(fq, NULL);
- if (err < 0 && netif_msg_drv(priv))
- dev_err(dev, "qman_retire_fq(%u) = %d\n",
- qman_fq_fqid(fq), err);
-
- error = qman_oos_fq(fq);
- if (error < 0 && netif_msg_drv(priv)) {
- dev_err(dev, "qman_oos_fq(%u) = %d\n",
- qman_fq_fqid(fq), error);
- if (err >= 0)
- err = error;
- }
- }
-
- qman_destroy_fq(fq, 0);
- list_del(&dpa_fq->list);
-
- return err;
-}
-
-int dpa_fq_free(struct device *dev, struct list_head *list)
-{
- int err, error;
- struct dpa_fq *dpa_fq, *tmp;
-
- err = 0;
- list_for_each_entry_safe(dpa_fq, tmp, list, list) {
- error = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
- if (error < 0 && err >= 0)
- err = error;
- }
-
- return err;
-}
-#endif /* __rtems__ */
-
-static void
-dpaa_eth_init_tx_port(struct fman_port *port, struct dpa_fq *errq,
- struct dpa_fq *defq,
- struct dpa_buffer_layout_s *buf_layout)
-{
- struct fman_port_params params;
- struct fman_buffer_prefix_content buf_prefix_content;
- int err;
-
- memset(&params, 0, sizeof(params));
- memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
-
- buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
- buf_prefix_content.pass_prs_result = buf_layout->parse_results;
- buf_prefix_content.pass_hash_result = buf_layout->hash_results;
- buf_prefix_content.pass_time_stamp = buf_layout->time_stamp;
- buf_prefix_content.data_align = buf_layout->data_align;
-
- params.specific_params.non_rx_params.err_fqid = errq->fqid;
- params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
-
- err = fman_port_config(port, &params);
- if (err)
- pr_info("fman_port_config failed\n");
-
- err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
- if (err)
- pr_info("fman_port_cfg_buf_prefix_content failed\n");
-
- err = fman_port_init(port);
- if (err)
- pr_err("fm_port_init failed\n");
-}
-
-static void
-dpaa_eth_init_rx_port(struct fman_port *port, struct dpa_bp *bp,
- size_t count, struct dpa_fq *errq, struct dpa_fq *defq,
- struct dpa_buffer_layout_s *buf_layout)
-{
- struct fman_port_params params;
- struct fman_buffer_prefix_content buf_prefix_content;
- struct fman_port_rx_params *rx_p;
- int i, err;
-
- memset(&params, 0, sizeof(params));
- memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
-
- buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
- buf_prefix_content.pass_prs_result = buf_layout->parse_results;
- buf_prefix_content.pass_hash_result = buf_layout->hash_results;
- buf_prefix_content.pass_time_stamp = buf_layout->time_stamp;
- buf_prefix_content.data_align = buf_layout->data_align;
-
- rx_p = &params.specific_params.rx_params;
- rx_p->err_fqid = errq->fqid;
- rx_p->dflt_fqid = defq->fqid;
-
- count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
- rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
- for (i = 0; i < count; i++) {
- rx_p->ext_buf_pools.ext_buf_pool[i].id = bp[i].bpid;
- rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bp[i].size;
- }
-
- err = fman_port_config(port, &params);
- if (err)
- pr_info("fman_port_config failed\n");
-
- err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
- if (err)
- pr_info("fman_port_cfg_buf_prefix_content failed\n");
-
- err = fman_port_init(port);
- if (err)
- pr_err("fm_port_init failed\n");
-}
-
-void dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpa_bp *bp, size_t count,
- struct fm_port_fqs *port_fqs,
- struct dpa_buffer_layout_s *buf_layout,
- struct device *dev)
-{
- struct fman_port *rxport = mac_dev->port[RX];
- struct fman_port *txport = mac_dev->port[TX];
-
- dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
- port_fqs->tx_defq, &buf_layout[TX]);
- dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
- port_fqs->rx_defq, &buf_layout[RX]);
-}
-
-void dpa_release_sgt(struct qm_sg_entry *sgt)
-{
- struct dpa_bp *dpa_bp;
- struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
- u8 i = 0, j;
-
- memset(bmb, 0, sizeof(bmb));
-
- do {
- dpa_bp = dpa_bpid2pool(sgt[i].bpid);
- DPA_ERR_ON(!dpa_bp);
-
- j = 0;
- do {
- DPA_ERR_ON(sgt[i].extension);
-
- bmb[j].hi = sgt[i].addr_hi;
- bmb[j].lo = be32_to_cpu(sgt[i].addr_lo);
-
- j++; i++;
- } while (j < ARRAY_SIZE(bmb) &&
- !sgt[i - 1].final &&
- sgt[i - 1].bpid == sgt[i].bpid);
-
- while (bman_release(dpa_bp->pool, bmb, j, 0))
- cpu_relax();
- } while (!sgt[i - 1].final);
-}
-
-void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
-{
- struct qm_sg_entry *sgt;
- struct dpa_bp *dpa_bp;
- struct bm_buffer bmb;
- dma_addr_t addr;
- void *vaddr;
-
- memset(&bmb, 0, sizeof(bmb));
- bm_buffer_set64(&bmb, fd->addr);
-
- dpa_bp = dpa_bpid2pool(fd->bpid);
- DPA_ERR_ON(!dpa_bp);
-
- if (fd->format == qm_fd_sg) {
- vaddr = phys_to_virt(fd->addr);
- sgt = vaddr + dpa_fd_offset(fd);
-
-#ifndef __rtems__
- dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
- DMA_BIDIRECTIONAL);
-#endif /* __rtems__ */
-
- dpa_release_sgt(sgt);
-
-#ifndef __rtems__
- addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dpa_bp->dev, addr)) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- return;
- }
-#else /* __rtems__ */
- addr = (dma_addr_t)vaddr;
-#endif /* __rtems__ */
- bm_buffer_set64(&bmb, addr);
- }
-
- while (bman_release(dpa_bp->pool, &bmb, 1, 0))
- cpu_relax();
-}
-
-void count_ern(struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_mr_entry *msg)
-{
- switch (msg->ern.rc & QM_MR_RC_MASK) {
- case QM_MR_RC_CGR_TAILDROP:
- percpu_priv->ern_cnt.cg_tdrop++;
- break;
- case QM_MR_RC_WRED:
- percpu_priv->ern_cnt.wred++;
- break;
- case QM_MR_RC_ERROR:
- percpu_priv->ern_cnt.err_cond++;
- break;
- case QM_MR_RC_ORPWINDOW_EARLY:
- percpu_priv->ern_cnt.early_window++;
- break;
- case QM_MR_RC_ORPWINDOW_LATE:
- percpu_priv->ern_cnt.late_window++;
- break;
- case QM_MR_RC_FQ_TAILDROP:
- percpu_priv->ern_cnt.fq_tdrop++;
- break;
- case QM_MR_RC_ORPWINDOW_RETIRED:
- percpu_priv->ern_cnt.fq_retired++;
- break;
- case QM_MR_RC_ORP_ZERO:
- percpu_priv->ern_cnt.orp_zero++;
- break;
- }
-}
-
-#ifndef __rtems__
-/* Turn on HW checksum computation for this outgoing frame.
- * If the current protocol is not something we support in this regard
- * (or if the stack has already computed the SW checksum), we do nothing.
- *
- * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
- * otherwise.
- *
- * Note that this function may modify the fd->cmd field and the skb data buffer
- * (the Parse Results area).
- */
-int dpa_enable_tx_csum(struct dpa_priv_s *priv,
- struct sk_buff *skb,
- struct qm_fd *fd,
- char *parse_results)
-{
- struct fman_prs_result *parse_result;
- struct iphdr *iph;
- struct ipv6hdr *ipv6h = NULL;
- u8 l4_proto;
- u16 ethertype = ntohs(skb->protocol);
- int retval = 0;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
-
- /* Note: L3 csum seems to be already computed in sw, but we can't choose
- * L4 alone from the FM configuration anyway.
- */
-
- /* Fill in some fields of the Parse Results array, so the FMan
- * can find them as if they came from the FMan Parser.
- */
- parse_result = (struct fman_prs_result *)parse_results;
-
- /* If we're dealing with VLAN, get the real Ethernet type */
- if (ethertype == ETH_P_8021Q) {
- /* We can't always assume the MAC header is set correctly
- * by the stack, so reset to beginning of skb->data
- */
- skb_reset_mac_header(skb);
- ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
- }
-
- /* Fill in the relevant L3 parse result fields
- * and read the L4 protocol type
- */
- switch (ethertype) {
- case ETH_P_IP:
- parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
- iph = ip_hdr(skb);
- DPA_ERR_ON(!iph);
- l4_proto = iph->protocol;
- break;
- case ETH_P_IPV6:
- parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
- ipv6h = ipv6_hdr(skb);
- DPA_ERR_ON(!ipv6h);
- l4_proto = ipv6h->nexthdr;
- break;
- default:
- /* We shouldn't even be here */
- if (net_ratelimit())
- netif_alert(priv, tx_err, priv->net_dev,
- "Can't compute HW csum for L3 proto 0x%x\n",
- ntohs(skb->protocol));
- retval = -EIO;
- goto return_error;
- }
-
- /* Fill in the relevant L4 parse result fields */
- switch (l4_proto) {
- case IPPROTO_UDP:
- parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
- break;
- case IPPROTO_TCP:
- parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
- break;
- default:
- /* This can as well be a BUG() */
- if (net_ratelimit())
- netif_alert(priv, tx_err, priv->net_dev,
- "Can't compute HW csum for L4 proto 0x%x\n",
- l4_proto);
- retval = -EIO;
- goto return_error;
- }
-
- /* At index 0 is IPOffset_1 as defined in the Parse Results */
- parse_result->ip_off[0] = (u8)skb_network_offset(skb);
- parse_result->l4_off = (u8)skb_transport_offset(skb);
-
- /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
- fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
-
- /* On P1023 and similar platforms fd->cmd interpretation could
- * be disabled by setting CONTEXT_A bit ICMD; currently this bit
- * is not set so we do not need to check; in the future, if/when
- * using context_a we need to check this bit
- */
-
-return_error:
- return retval;
-}
-#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h
deleted file mode 100644
index 954de393..00000000
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __DPAA_ETH_COMMON_H
-#define __DPAA_ETH_COMMON_H
-
-#include <linux/etherdevice.h>
-#include <soc/fsl/bman.h>
-#include <linux/of_platform.h>
-
-#include "dpaa_eth.h"
-
-#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
-#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
-
-/* used in napi related functions */
-extern u16 qman_portal_max;
-
-/* from dpa_ethtool.c */
-extern const struct ethtool_ops dpa_ethtool_ops;
-
-int dpa_netdev_init(struct net_device *net_dev,
- const u8 *mac_addr,
- u16 tx_timeout);
-int dpa_start(struct net_device *net_dev);
-int dpa_stop(struct net_device *net_dev);
-void dpa_timeout(struct net_device *net_dev);
-struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev,
- struct rtnl_link_stats64 *stats);
-int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
-int dpa_ndo_init(struct net_device *net_dev);
-#ifndef __rtems__
-int dpa_set_features(struct net_device *dev, netdev_features_t features);
-netdev_features_t dpa_fix_features(struct net_device *dev,
- netdev_features_t features);
-#endif /* __rtems__ */
-int dpa_remove(struct platform_device *pdev);
-struct mac_device *dpa_mac_dev_get(struct platform_device *pdev);
-int dpa_mac_hw_index_get(struct platform_device *pdev);
-int dpa_mac_fman_index_get(struct platform_device *pdev);
-int dpa_set_mac_address(struct net_device *net_dev, void *addr);
-void dpa_set_rx_mode(struct net_device *net_dev);
-void dpa_set_buffers_layout(struct mac_device *mac_dev,
- struct dpa_buffer_layout_s *layout);
-int dpa_bp_alloc(struct dpa_bp *dpa_bp);
-void dpa_bp_free(struct dpa_priv_s *priv);
-struct dpa_bp *dpa_bpid2pool(int bpid);
-void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
-bool dpa_bpid2pool_use(int bpid);
-void dpa_bp_drain(struct dpa_bp *bp);
-#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
-#endif
-struct dpa_fq *dpa_fq_alloc(struct device *dev,
- const struct fqid_cell *fqids,
- struct list_head *list,
- enum dpa_fq_type fq_type);
-int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
- struct fm_port_fqs *port_fqs,
- bool tx_conf_fqs_per_core,
- enum port_type ptype);
-int dpa_get_channel(void);
-void dpa_release_channel(void);
-int dpaa_eth_add_channel(void *__arg);
-int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
-void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
- struct fman_port *tx_port);
-int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
-int dpa_fq_free(struct device *dev, struct list_head *list);
-void dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpa_bp *bp, size_t count,
- struct fm_port_fqs *port_fqs,
- struct dpa_buffer_layout_s *buf_layout,
- struct device *dev);
-void dpa_release_sgt(struct qm_sg_entry *sgt);
-void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
-void count_ern(struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_mr_entry *msg);
-#ifndef __rtems__
-int dpa_enable_tx_csum(struct dpa_priv_s *priv,
- struct sk_buff *skb,
- struct qm_fd *fd,
- char *parse_results);
-#endif /* __rtems__ */
-#endif /* __DPAA_ETH_COMMON_H */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c
deleted file mode 100644
index 2d0903e3..00000000
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c
+++ /dev/null
@@ -1,710 +0,0 @@
-#include <machine/rtems-bsd-kernel-space.h>
-
-#include <rtems/bsd/local/opt_dpaa.h>
-
-/* Copyright 2012 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/highmem.h>
-#include <soc/fsl/bman.h>
-
-#include "dpaa_eth.h"
-#include "dpaa_eth_common.h"
-
-/* Convenience macros for storing/retrieving the skb back-pointers.
- *
- * NB: @off is an offset from a (struct sk_buff **) pointer!
- */
-#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
- { \
- skbh = (struct sk_buff **)addr; \
- *(skbh + (off)) = skb; \
- }
-#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
- { \
- skbh = (struct sk_buff **)addr; \
- skb = *(skbh + (off)); \
- }
-
-/* DMA map and add a page frag back into the bpool.
- * @vaddr fragment must have been allocated with netdev_alloc_frag(),
- * specifically for fitting into @dpa_bp.
- */
-static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
- int *count_ptr)
-{
- struct bm_buffer bmb;
- dma_addr_t addr;
-
- addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- return;
- }
-
- bm_buffer_set64(&bmb, addr);
-
- while (bman_release(dpa_bp->pool, &bmb, 1, 0))
- cpu_relax();
-
- (*count_ptr)++;
-}
-
-static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
-{
- struct bm_buffer bmb[8];
- void *new_buf;
- dma_addr_t addr;
- u8 i;
- struct device *dev = dpa_bp->dev;
- struct sk_buff *skb, **skbh;
-
- memset(bmb, 0, sizeof(bmb));
-
- for (i = 0; i < 8; i++) {
- /* We'll prepend the skb back-pointer; can't use the DPA
- * priv space, because FMan will overwrite it (from offset 0)
- * if it ends up being the second, third, etc. fragment
- * in a S/G frame.
- *
- * We only need enough space to store a pointer, but allocate
- * an entire cacheline for performance reasons.
- */
- new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
- if (unlikely(!new_buf))
- goto netdev_alloc_failed;
- new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
-
- skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- if (unlikely(!skb)) {
- put_page(virt_to_head_page(new_buf));
- goto build_skb_failed;
- }
- DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
-
- addr = dma_map_single(dev, new_buf,
- dpa_bp->size, DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, addr)))
- goto dma_map_failed;
-
- bm_buffer_set64(&bmb[i], addr);
- }
-
-release_bufs:
- /* Release the buffers. In case bman is busy, keep trying
- * until successful. bman_release() is guaranteed to succeed
- * in a reasonable amount of time
- */
- while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
- cpu_relax();
- return i;
-
-dma_map_failed:
- kfree_skb(skb);
-
-build_skb_failed:
-netdev_alloc_failed:
- net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
- WARN_ONCE(1, "Memory allocation failure on Rx\n");
-
- bm_buffer_set64(&bmb[i], 0);
- /* Avoid releasing a completely null buffer; bman_release() requires
- * at least one buffer.
- */
- if (likely(i))
- goto release_bufs;
-
- return 0;
-}
-
-/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
-static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
-{
- int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
- *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
-}
-
-int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
-{
- int i;
-
- /* Give each CPU an allotment of "config_count" buffers */
- for_each_possible_cpu(i) {
- int j;
-
- /* Although we access another CPU's counters here
- * we do it at boot time so it is safe
- */
- for (j = 0; j < dpa_bp->config_count; j += 8)
- dpa_bp_add_8_bufs(dpa_bp, i);
- }
- return 0;
-}
-
-/* Add buffers/(pages) for Rx processing whenever bpool count falls below
- * REFILL_THRESHOLD.
- */
-int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
-{
- int count = *countptr;
- int new_bufs;
-
- if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
- do {
- new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
- if (unlikely(!new_bufs)) {
- /* Avoid looping forever if we've temporarily
- * run out of memory. We'll try again at the
- * next NAPI cycle.
- */
- break;
- }
- count += new_bufs;
- } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
-
- *countptr = count;
- if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/* Cleanup function for outgoing frame descriptors that were built on Tx path,
- * either contiguous frames or scatter/gather ones.
- * Skb freeing is not handled here.
- *
- * This function may be called on error paths in the Tx function, so guard
- * against cases when not all fd relevant fields were filled in.
- *
- * Return the skb backpointer, since for S/G frames the buffer containing it
- * gets freed here.
- */
-struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
- const struct qm_fd *fd)
-{
- const struct qm_sg_entry *sgt;
- int i;
- struct dpa_bp *dpa_bp = priv->dpa_bp;
- dma_addr_t addr = qm_fd_addr(fd);
- struct sk_buff **skbh;
- struct sk_buff *skb = NULL;
- const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
- int nr_frags;
-
-
- /* retrieve skb back pointer */
- DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
-
- if (unlikely(fd->format == qm_fd_sg)) {
- nr_frags = skb_shinfo(skb)->nr_frags;
- dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
- sizeof(struct qm_sg_entry) * (1 + nr_frags),
- dma_dir);
-
- /* The sgt buffer has been allocated with netdev_alloc_frag(),
- * it's from lowmem.
- */
- sgt = phys_to_virt(addr + dpa_fd_offset(fd));
-
- /* sgt[0] is from lowmem, was dma_map_single()-ed */
- dma_unmap_single(dpa_bp->dev, (dma_addr_t)sgt[0].addr,
- sgt[0].length, dma_dir);
-
- /* remaining pages were mapped with dma_map_page() */
- for (i = 1; i < nr_frags; i++) {
- DPA_ERR_ON(sgt[i].extension);
-
- dma_unmap_page(dpa_bp->dev, (dma_addr_t)sgt[i].addr,
- sgt[i].length, dma_dir);
- }
-
- /* Free the page frag that we allocated on Tx */
- put_page(virt_to_head_page(sgt));
- } else {
- dma_unmap_single(dpa_bp->dev, addr,
- skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
- }
-
- return skb;
-}
-
-/* Build a linear skb around the received buffer.
- * We are guaranteed there is enough room at the end of the data buffer to
- * accommodate the shared info area of the skb.
- */
-static struct sk_buff *contig_fd_to_skb(const struct dpa_priv_s *priv,
- const struct qm_fd *fd)
-{
- struct sk_buff *skb = NULL, **skbh;
- ssize_t fd_off = dpa_fd_offset(fd);
- dma_addr_t addr = qm_fd_addr(fd);
- void *vaddr;
-
- vaddr = phys_to_virt(addr);
- DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-
- /* Retrieve the skb and adjust data and tail pointers, to make sure
- * forwarded skbs will have enough space on Tx if extra headers
- * are added.
- */
- DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
-
- DPA_ERR_ON(fd_off != priv->rx_headroom);
- skb_reserve(skb, fd_off);
- skb_put(skb, dpa_fd_length(fd));
-
- skb->ip_summed = CHECKSUM_NONE;
-
- return skb;
-}
-
-/* Build an skb with the data of the first S/G entry in the linear portion and
- * the rest of the frame as skb fragments.
- *
- * The page fragment holding the S/G Table is recycled here.
- */
-static struct sk_buff *sg_fd_to_skb(const struct dpa_priv_s *priv,
- const struct qm_fd *fd,
- int *count_ptr)
-{
- const struct qm_sg_entry *sgt;
- dma_addr_t addr = qm_fd_addr(fd);
- ssize_t fd_off = dpa_fd_offset(fd);
- dma_addr_t sg_addr;
- void *vaddr, *sg_vaddr;
- struct dpa_bp *dpa_bp;
- struct page *page, *head_page;
- int frag_offset, frag_len;
- int page_offset;
- int i;
- struct sk_buff *skb = NULL, *skb_tmp, **skbh;
-
- vaddr = phys_to_virt(addr);
- DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-
- dpa_bp = priv->dpa_bp;
- /* Iterate through the SGT entries and add data buffers to the skb */
- sgt = vaddr + fd_off;
- for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
- /* Extension bit is not supported */
- DPA_ERR_ON(sgt[i].extension);
-
- /* We use a single global Rx pool */
- DPA_ERR_ON(dpa_bp != dpa_bpid2pool(sgt[i].bpid));
-
- sg_addr = qm_sg_addr(&sgt[i]);
- sg_vaddr = phys_to_virt(sg_addr);
- DPA_ERR_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
- SMP_CACHE_BYTES));
-
- dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
- DMA_BIDIRECTIONAL);
- if (i == 0) {
- DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
- DPA_ERR_ON(skb->head != sg_vaddr);
-
- skb->ip_summed = CHECKSUM_NONE;
-
- /* Make sure forwarded skbs will have enough space
- * on Tx, if extra headers are added.
- */
- DPA_ERR_ON(fd_off != priv->rx_headroom);
- skb_reserve(skb, fd_off);
- skb_put(skb, sgt[i].length);
- } else {
- /* Not the first S/G entry; all data from buffer will
- * be added in an skb fragment; fragment index is offset
- * by one since first S/G entry was incorporated in the
- * linear part of the skb.
- *
- * Caution: 'page' may be a tail page.
- */
- DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
- page = virt_to_page(sg_vaddr);
- head_page = virt_to_head_page(sg_vaddr);
-
- /* Free (only) the skbuff shell because its data buffer
- * is already a frag in the main skb.
- */
- get_page(head_page);
- dev_kfree_skb(skb_tmp);
-
- /* Compute offset in (possibly tail) page */
- page_offset = ((unsigned long)sg_vaddr &
- (PAGE_SIZE - 1)) +
- (page_address(page) - page_address(head_page));
- /* page_offset only refers to the beginning of sgt[i];
- * but the buffer itself may have an internal offset.
- */
- frag_offset = sgt[i].offset + page_offset;
- frag_len = sgt[i].length;
- /* skb_add_rx_frag() does no checking on the page; if
- * we pass it a tail page, we'll end up with
- * bad page accounting and eventually with segafults.
- */
- skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
- frag_len, dpa_bp->size);
- }
- /* Update the pool count for the current {cpu x bpool} */
- (*count_ptr)--;
-
- if (sgt[i].final)
- break;
- }
- WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
-
- /* recycle the SGT fragment */
- DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
- dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
- return skb;
-}
-
-void _dpa_rx(struct net_device *net_dev,
- struct qman_portal *portal,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_fd *fd,
- u32 fqid,
- int *count_ptr)
-{
- struct dpa_bp *dpa_bp;
- struct sk_buff *skb;
- dma_addr_t addr = qm_fd_addr(fd);
- u32 fd_status = fd->status;
- unsigned int skb_len;
- struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
-
- if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
- if (net_ratelimit())
- netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd_status & FM_FD_STAT_RX_ERRORS);
-
- percpu_stats->rx_errors++;
- goto _release_frame;
- }
-
- dpa_bp = priv->dpa_bp;
- DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
-
- /* prefetch the first 64 bytes of the frame or the SGT start */
- dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
- prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
-
- /* The only FD types that we may receive are contig and S/G */
- DPA_ERR_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
-
- if (likely(fd->format == qm_fd_contig))
- skb = contig_fd_to_skb(priv, fd);
- else
- skb = sg_fd_to_skb(priv, fd, count_ptr);
-
- /* Account for either the contig buffer or the SGT buffer (depending on
- * which case we were in) having been removed from the pool.
- */
- (*count_ptr)--;
- skb->protocol = eth_type_trans(skb, net_dev);
-
- /* IP Reassembled frames are allowed to be larger than MTU */
- if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
- !(fd_status & FM_FD_IPR))) {
- percpu_stats->rx_dropped++;
- goto drop_bad_frame;
- }
-
- skb_len = skb->len;
-
- if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
- goto packet_dropped;
-
- percpu_stats->rx_packets++;
- percpu_stats->rx_bytes += skb_len;
-
-packet_dropped:
- return;
-
-drop_bad_frame:
- dev_kfree_skb(skb);
- return;
-
-_release_frame:
- dpa_fd_release(net_dev, fd);
-}
-
-static int skb_to_contig_fd(struct dpa_priv_s *priv,
- struct sk_buff *skb, struct qm_fd *fd,
- int *count_ptr, int *offset)
-{
- struct sk_buff **skbh;
- dma_addr_t addr;
- struct dpa_bp *dpa_bp = priv->dpa_bp;
- struct net_device *net_dev = priv->net_dev;
- int err;
- enum dma_data_direction dma_dir;
- unsigned char *buffer_start;
-
- {
- /* We are guaranteed to have at least tx_headroom bytes
- * available, so just use that for offset.
- */
- fd->bpid = 0xff;
- buffer_start = skb->data - priv->tx_headroom;
- fd->offset = priv->tx_headroom;
- dma_dir = DMA_TO_DEVICE;
-
- DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
- }
-
- /* Enable L3/L4 hardware checksum computation.
- *
- * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
- * need to write into the skb.
- */
- err = dpa_enable_tx_csum(priv, skb, fd,
- ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
- if (unlikely(err < 0)) {
- if (net_ratelimit())
- netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
- err);
- return err;
- }
-
- /* Fill in the rest of the FD fields */
- fd->format = qm_fd_contig;
- fd->length20 = skb->len;
- fd->cmd |= FM_FD_CMD_FCO;
-
- /* Map the entire buffer size that may be seen by FMan, but no more */
- addr = dma_map_single(dpa_bp->dev, skbh,
- skb_tail_pointer(skb) - buffer_start, dma_dir);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- if (net_ratelimit())
- netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
- return -EINVAL;
- }
- fd->addr_hi = (u8)upper_32_bits(addr);
- fd->addr_lo = lower_32_bits(addr);
-
- return 0;
-}
-
-static int skb_to_sg_fd(struct dpa_priv_s *priv,
- struct sk_buff *skb, struct qm_fd *fd)
-{
- struct dpa_bp *dpa_bp = priv->dpa_bp;
- dma_addr_t addr;
- struct sk_buff **skbh;
- struct net_device *net_dev = priv->net_dev;
- int err;
-
- struct qm_sg_entry *sgt;
- void *sgt_buf;
- void *buffer_start;
- skb_frag_t *frag;
- int i, j;
- const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
- const int nr_frags = skb_shinfo(skb)->nr_frags;
-
- fd->format = qm_fd_sg;
-
- /* get a page frag to store the SGTable */
- sgt_buf = netdev_alloc_frag(priv->tx_headroom +
- sizeof(struct qm_sg_entry) * (1 + nr_frags));
- if (unlikely(!sgt_buf)) {
- netdev_err(net_dev, "netdev_alloc_frag() failed\n");
- return -ENOMEM;
- }
-
- /* Enable L3/L4 hardware checksum computation.
- *
- * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
- * need to write into the skb.
- */
- err = dpa_enable_tx_csum(priv, skb, fd,
- sgt_buf + DPA_TX_PRIV_DATA_SIZE);
- if (unlikely(err < 0)) {
- if (net_ratelimit())
- netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
- err);
- goto csum_failed;
- }
-
- sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
- sgt[0].bpid = 0xff;
- sgt[0].offset = 0;
- sgt[0].length = cpu_to_be32(skb_headlen(skb));
- sgt[0].extension = 0;
- sgt[0].final = 0;
- addr = dma_map_single(dpa_bp->dev, skb->data, sgt[0].length, dma_dir);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- err = -EINVAL;
- goto sg0_map_failed;
- }
- sgt[0].addr_hi = (u8)upper_32_bits(addr);
- sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
-
- /* populate the rest of SGT entries */
- for (i = 1; i <= nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i - 1];
- sgt[i].bpid = 0xff;
- sgt[i].offset = 0;
- sgt[i].length = cpu_to_be32(frag->size);
- sgt[i].extension = 0;
- sgt[i].final = 0;
-
- DPA_ERR_ON(!skb_frag_page(frag));
- addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
- dma_dir);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- err = -EINVAL;
- goto sg_map_failed;
- }
-
- /* keep the offset in the address */
- sgt[i].addr_hi = (u8)upper_32_bits(addr);
- sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
- }
- sgt[i - 1].final = 1;
-
- fd->length20 = skb->len;
- fd->offset = priv->tx_headroom;
-
- /* DMA map the SGT page */
- buffer_start = (void *)sgt - priv->tx_headroom;
- DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
-
- addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
- sizeof(struct qm_sg_entry) * (1 + nr_frags),
- dma_dir);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- err = -EINVAL;
- goto sgt_map_failed;
- }
-
- fd->bpid = 0xff;
- fd->cmd |= FM_FD_CMD_FCO;
- fd->addr_hi = (u8)upper_32_bits(addr);
- fd->addr_lo = lower_32_bits(addr);
-
- return 0;
-
-sgt_map_failed:
-sg_map_failed:
- for (j = 0; j < i; j++)
- dma_unmap_page(dpa_bp->dev, qm_sg_addr(&sgt[j]),
- cpu_to_be32(sgt[j].length), dma_dir);
-sg0_map_failed:
-csum_failed:
- put_page(virt_to_head_page(sgt_buf));
-
- return err;
-}
-
-int dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
-{
- struct dpa_priv_s *priv;
- struct qm_fd fd;
- struct dpa_percpu_priv_s *percpu_priv;
- struct rtnl_link_stats64 *percpu_stats;
- int err = 0;
- const int queue_mapping = dpa_get_queue_mapping(skb);
- bool nonlinear = skb_is_nonlinear(skb);
- int *countptr, offset = 0;
-
- priv = netdev_priv(net_dev);
- /* Non-migratable context, safe to use raw_cpu_ptr */
- percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- percpu_stats = &percpu_priv->stats;
- countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
-
- clear_fd(&fd);
-
- if (!nonlinear) {
- /* We're going to store the skb backpointer at the beginning
- * of the data buffer, so we need a privately owned skb
- *
- * We've made sure skb is not shared in dev->priv_flags,
- * we need to verify the skb head is not cloned
- */
- if (skb_cow_head(skb, priv->tx_headroom))
- goto enomem;
-
- BUG_ON(skb_is_nonlinear(skb));
- }
-
- /* MAX_SKB_FRAGS is equal or larger than our DPA_SGT_MAX_ENTRIES;
- * make sure we don't feed FMan with more fragments than it supports.
- * Btw, we're using the first sgt entry to store the linear part of
- * the skb, so we're one extra frag short.
- */
- if (nonlinear &&
- likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
- /* Just create a S/G fd based on the skb */
- err = skb_to_sg_fd(priv, skb, &fd);
- percpu_priv->tx_frag_skbuffs++;
- } else {
- /* If the egress skb contains more fragments than we support
- * we have no choice but to linearize it ourselves.
- */
- if (unlikely(nonlinear) && __skb_linearize(skb))
- goto enomem;
-
- /* Finally, create a contig FD from this skb */
- err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
- }
- if (unlikely(err < 0))
- goto skb_to_fd_failed;
-
- if (likely(dpa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
- return NETDEV_TX_OK;
-
- /* dpa_xmit failed */
- if (fd.bpid != 0xff) {
- (*countptr)--;
- dpa_fd_release(net_dev, &fd);
- percpu_stats->tx_errors++;
- return NETDEV_TX_OK;
- }
- _dpa_cleanup_tx_fd(priv, &fd);
-skb_to_fd_failed:
-enomem:
- percpu_stats->tx_errors++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
index 46eca272..ee6dfc9a 100644
--- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -80,9 +80,9 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
TP_fast_assign(
__entry->fqid = fq->fqid;
__entry->fd_addr = qm_fd_addr_get64(fd);
- __entry->fd_format = fd->format;
- __entry->fd_offset = dpa_fd_offset(fd);
- __entry->fd_length = dpa_fd_length(fd);
+ __entry->fd_format = qm_fd_get_format(fd);
+ __entry->fd_offset = qm_fd_get_offset(fd);
+ __entry->fd_length = qm_fd_get_length(fd);
__entry->fd_status = fd->status;
__assign_str(name, netdev->name);
),
@@ -99,7 +99,7 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
*/
/* Tx (egress) fd */
-DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
+DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_fd,
TP_PROTO(struct net_device *netdev,
struct qman_fq *fq,
@@ -109,7 +109,7 @@ DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
);
/* Rx fd */
-DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
+DEFINE_EVENT(dpaa_eth_fd, dpaa_rx_fd,
TP_PROTO(struct net_device *netdev,
struct qman_fq *fq,
@@ -119,7 +119,7 @@ DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
);
/* Tx confirmation fd */
-DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
+DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_conf_fd,
TP_PROTO(struct net_device *netdev,
struct qman_fq *fq,
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman.c b/linux/drivers/net/ethernet/freescale/fman/fman.c
index 5119b400..c0f26b31 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman.c
@@ -16,7 +16,7 @@
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
-// *
+ *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -38,8 +38,8 @@
#include "fman.h"
#include "fman_muram.h"
-#include <asm/mpc85xx.h>
+#include <linux/fsl/guts.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -48,12 +48,12 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
+#include <linux/libfdt_env.h>
#ifdef __rtems__
#include <bsp/fdt.h>
#include <bsp/qoriq.h>
#endif /* __rtems__ */
-
/* General defines */
#define FMAN_LIODN_TBL 64 /* size of LIODN table */
#define MAX_NUM_OF_MACS 10
@@ -88,31 +88,11 @@
#define EX_BMI_DISPATCH_RAM_ECC 0x00010000
#define EX_DMA_SINGLE_PORT_ECC 0x00008000
-#define DFLT_EXCEPTIONS \
- ((EX_DMA_BUS_ERROR) | \
- (EX_DMA_READ_ECC) | \
- (EX_DMA_SYSTEM_WRITE_ECC) | \
- (EX_DMA_FM_WRITE_ECC) | \
- (EX_FPM_STALL_ON_TASKS) | \
- (EX_FPM_SINGLE_ECC) | \
- (EX_FPM_DOUBLE_ECC) | \
- (EX_QMI_DEQ_FROM_UNKNOWN_PORTID) | \
- (EX_BMI_LIST_RAM_ECC) | \
- (EX_BMI_STORAGE_PROFILE_ECC) | \
- (EX_BMI_STATISTICS_RAM_ECC) | \
- (EX_MURAM_ECC) | \
- (EX_BMI_DISPATCH_RAM_ECC) | \
- (EX_QMI_DOUBLE_ECC) | \
- (EX_QMI_SINGLE_ECC))
-
/* DMA defines */
/* masks */
-#define DMA_MODE_AID_OR 0x20000000
-#define DMA_MODE_SBER 0x10000000
#define DMA_MODE_BER 0x00200000
#define DMA_MODE_ECC 0x00000020
#define DMA_MODE_SECURE_PROT 0x00000800
-#define DMA_MODE_EMER_READ 0x00080000
#define DMA_MODE_AXI_DBG_MASK 0x0F000000
#define DMA_TRANSFER_PORTID_MASK 0xFF000000
@@ -130,7 +110,6 @@
#define DMA_MODE_CEN_SHIFT 13
#define DMA_MODE_CEN_MASK 0x00000007
#define DMA_MODE_DBG_SHIFT 7
-#define DMA_MODE_EMER_LVL_SHIFT 6
#define DMA_MODE_AID_MODE_SHIFT 4
#define DMA_THRESH_COMMQ_SHIFT 24
@@ -160,8 +139,6 @@
#define FPM_RAM_MURAM_ECC 0x00008000
#define FPM_RAM_IRAM_ECC 0x00004000
-#define FPM_RAM_MURAM_TEST_ECC 0x20000000
-#define FPM_RAM_IRAM_TEST_ECC 0x10000000
#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000
#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000
#define FPM_RAM_IRAM_ECC_EN 0x40000000
@@ -247,8 +224,6 @@
#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
#define QMI_INTR_EN_SINGLE_ECC 0x80000000
-#define QMI_TAPC_TAP 22
-
#define QMI_GS_HALT_NOT_BUSY 0x00000002
/* IRAM defines */
@@ -266,7 +241,6 @@
#define DEFAULT_DMA_DBG_CNT_MODE 0
#define DEFAULT_DMA_SOS_EMERGENCY 0
#define DEFAULT_DMA_WATCHDOG 0
-#define DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER 0
#define DEFAULT_DISP_LIMIT 0
#define DEFAULT_PRS_DISP_TH 16
#define DEFAULT_PLCR_DISP_TH 16
@@ -509,13 +483,9 @@ struct fman_dma_regs {
u32 res00e0[0x400 - 56];
};
-struct fman_rg {
- struct fman_fpm_regs __iomem *fpm_rg;
- struct fman_dma_regs __iomem *dma_rg;
- struct fman_bmi_regs __iomem *bmi_rg;
- struct fman_qmi_regs __iomem *qmi_rg;
-};
-
+/* Structure that holds current FMan state.
+ * Used for saving run time information.
+ */
struct fman_state_struct {
u8 fm_id;
u16 fm_clk_freq;
@@ -527,7 +497,6 @@ struct fman_state_struct {
u32 accumulated_fifo_size;
u8 accumulated_num_of_open_dmas;
u8 accumulated_num_of_deq_tnums;
- bool low_end_restriction;
u32 exceptions;
u32 extra_fifo_pool_size;
u8 extra_tasks_pool_size;
@@ -558,6 +527,7 @@ struct fman_state_struct {
struct resource *res;
};
+/* Structure that holds FMan initial configuration */
struct fman_cfg {
u8 disp_limit_tsh;
u8 prs_disp_tsh;
@@ -570,7 +540,6 @@ struct fman_cfg {
u8 fm_ctl2_disp_tsh;
int dma_cache_override;
enum fman_dma_aid_mode dma_aid_mode;
- bool dma_aid_override;
u32 dma_axi_dbg_num_of_beats;
u32 dma_cam_num_of_entries;
u32 dma_watchdog;
@@ -582,31 +551,18 @@ struct fman_cfg {
u32 dma_read_buf_tsh_clr_emer;
u32 dma_sos_emergency;
int dma_dbg_cnt_mode;
- bool dma_stop_on_bus_error;
- bool dma_en_emergency;
- u32 dma_emergency_bus_select;
- int dma_emergency_level;
- bool dma_en_emergency_smoother;
- u32 dma_emergency_switch_counter;
- bool halt_on_external_activ;
- bool halt_on_unrecov_ecc_err;
int catastrophic_err;
int dma_err;
- bool en_muram_test_mode;
- bool en_iram_test_mode;
- bool external_ecc_rams_enable;
- u16 tnum_aging_period;
u32 exceptions;
u16 clk_freq;
- bool pedantic_dma;
u32 cam_base_addr;
u32 fifo_base_addr;
u32 total_fifo_size;
u32 total_num_of_tasks;
- bool qmi_deq_option_support;
u32 qmi_def_tnums_thresh;
};
+/* Structure that holds information received from device tree */
struct fman_dts_params {
void __iomem *base_addr; /* FMan virtual address */
#ifndef __rtems__
@@ -621,10 +577,36 @@ struct fman_dts_params {
u32 qman_channel_base; /* QMan channels base */
u32 num_of_qman_channels; /* Number of QMan channels */
- phys_addr_t muram_phy_base_addr; /* MURAM physical address */
- resource_size_t muram_size; /* MURAM size */
+ struct resource muram_res; /* MURAM resource */
};
+/** fman_exceptions_cb
+ * fman - Pointer to FMan
+ * exception - The exception.
+ *
+ * Exceptions user callback routine, will be called upon an exception
+ * passing the exception identification.
+ *
+ * Return: irq status
+ */
+typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
+ enum fman_exceptions exception);
+
+/** fman_bus_error_cb
+ * fman - Pointer to FMan
+ * port_id - Port id
+ * addr - Address that caused the error
+ * tnum - Owner of error
+ * liodn - Logical IO device number
+ *
+ * Bus error user callback routine, will be called upon bus error,
+ * passing parameters describing the errors and the owner.
+ *
+ * Return: IRQ status
+ */
+typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
+ u64 addr, u8 tnum, u16 liodn);
+
struct fman {
struct device *dev;
void __iomem *base_addr;
@@ -643,12 +625,11 @@ struct fman {
struct fman_cfg *cfg;
struct muram_info *muram;
/* cam section in muram */
- int cam_offset;
+ unsigned long cam_offset;
size_t cam_size;
/* Fifo in MURAM */
- int fifo_offset;
+ unsigned long fifo_offset;
size_t fifo_size;
- bool reset_on_init;
u32 liodn_base[64];
u32 liodn_offset[64];
@@ -656,38 +637,47 @@ struct fman {
struct fman_dts_params dts_params;
};
-static void fman_exceptions(struct fman *fman, enum fman_exceptions exception)
+static irqreturn_t fman_exceptions(struct fman *fman,
+ enum fman_exceptions exception)
{
- pr_debug("FMan[%d] exception %d\n",
- fman->state->fm_id, exception);
+ dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n",
+ __func__, fman->state->fm_id, exception);
+
+ return IRQ_HANDLED;
}
-static void fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
- u64 __maybe_unused addr, u8 __maybe_unused tnum,
- u16 __maybe_unused liodn)
+static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
+ u64 __maybe_unused addr,
+ u8 __maybe_unused tnum,
+ u16 __maybe_unused liodn)
{
- pr_debug("FMan[%d] bus error: port_id[%d]\n",
- fman->state->fm_id, port_id);
+ dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n",
+ __func__, fman->state->fm_id, port_id);
+
+ return IRQ_HANDLED;
}
-static inline void call_mac_isr(struct fman *fman, u8 id)
+static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id)
{
- if (fman->intr_mng[id].isr_cb)
+ if (fman->intr_mng[id].isr_cb) {
fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
}
static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
{
u8 sw_port_id = 0;
- if (hw_port_id >= BASE_TX_PORTID) {
+ if (hw_port_id >= BASE_TX_PORTID)
sw_port_id = hw_port_id - BASE_TX_PORTID;
- } else if (hw_port_id >= BASE_RX_PORTID) {
+ else if (hw_port_id >= BASE_RX_PORTID)
sw_port_id = hw_port_id - BASE_RX_PORTID;
- } else {
+ else
sw_port_id = 0;
- WARN_ON(false);
- }
return sw_port_id;
}
@@ -697,26 +687,26 @@ static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
{
u32 tmp = 0;
- tmp = (u32)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
+ tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT;
- tmp |= (FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1);
+ tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
/* order restoration */
if (port_id % 2)
- tmp |= (FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
+ tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
else
- tmp |= (FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
+ tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
iowrite32be(tmp, &fpm_rg->fmfp_prc);
}
-static void set_port_liodn(struct fman_rg *fman_rg, u8 port_id,
+static void set_port_liodn(struct fman *fman, u8 port_id,
u32 liodn_base, u32 liodn_ofst)
{
u32 tmp;
/* set LIODN base for this port */
- tmp = ioread32be(&fman_rg->dma_rg->fmdmplr[port_id / 2]);
+ tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
if (port_id % 2) {
tmp &= ~DMA_LIODN_BASE_MASK;
tmp |= liodn_base;
@@ -724,8 +714,8 @@ static void set_port_liodn(struct fman_rg *fman_rg, u8 port_id,
tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
tmp |= liodn_base << DMA_LIODN_SHIFT;
}
- iowrite32be(tmp, &fman_rg->dma_rg->fmdmplr[port_id / 2]);
- iowrite32be(liodn_ofst, &fman_rg->bmi_rg->fmbm_spliodn[port_id - 1]);
+ iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
+ iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
}
static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
@@ -758,24 +748,14 @@ static void fman_defconfig(struct fman_cfg *cfg)
cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
cfg->dma_err = DEFAULT_DMA_ERR;
- cfg->halt_on_external_activ = false;
- cfg->halt_on_unrecov_ecc_err = false;
- cfg->en_iram_test_mode = false;
- cfg->en_muram_test_mode = false;
- cfg->external_ecc_rams_enable = false;
- cfg->dma_aid_override = false;
cfg->dma_aid_mode = DEFAULT_AID_MODE;
cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
- cfg->dma_en_emergency = false;
cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
- cfg->dma_en_emergency_smoother = false;
- cfg->dma_emergency_switch_counter =
- DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER;
cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
@@ -785,11 +765,6 @@ static void fman_defconfig(struct fman_cfg *cfg)
cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
-
- cfg->pedantic_dma = false;
- cfg->tnum_aging_period = 0;
- cfg->dma_stop_on_bus_error = false;
- cfg->qmi_deq_option_support = false;
}
static int dma_init(struct fman *fman)
@@ -808,37 +783,23 @@ static int dma_init(struct fman *fman)
/* configure mode register */
tmp_reg = 0;
tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
- if (cfg->dma_aid_override)
- tmp_reg |= DMA_MODE_AID_OR;
if (cfg->exceptions & EX_DMA_BUS_ERROR)
tmp_reg |= DMA_MODE_BER;
if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
(cfg->exceptions & EX_DMA_READ_ECC) |
(cfg->exceptions & EX_DMA_FM_WRITE_ECC))
tmp_reg |= DMA_MODE_ECC;
- if (cfg->dma_stop_on_bus_error)
- tmp_reg |= DMA_MODE_SBER;
if (cfg->dma_axi_dbg_num_of_beats)
tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
((cfg->dma_axi_dbg_num_of_beats - 1)
<< DMA_MODE_AXI_DBG_SHIFT));
- if (cfg->dma_en_emergency) {
- tmp_reg |= cfg->dma_emergency_bus_select;
- tmp_reg |= cfg->dma_emergency_level << DMA_MODE_EMER_LVL_SHIFT;
- if (cfg->dma_en_emergency_smoother)
- iowrite32be(cfg->dma_emergency_switch_counter,
- &dma_rg->fmdmemsr);
- }
tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
tmp_reg |= DMA_MODE_SECURE_PROT;
tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
- if (cfg->pedantic_dma)
- tmp_reg |= DMA_MODE_EMER_READ;
-
iowrite32be(tmp_reg, &dma_rg->fmdmmr);
/* configure thresholds register */
@@ -874,7 +835,8 @@ static int dma_init(struct fman *fman)
(u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
if (IS_ERR_VALUE(fman->cam_offset)) {
- pr_err("MURAM alloc for DMA CAM failed\n");
+ dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+ __func__);
return -ENOMEM;
}
@@ -888,22 +850,24 @@ static int dma_init(struct fman *fman)
fman->cam_offset = fman_muram_alloc(fman->muram,
fman->cam_size);
if (IS_ERR_VALUE(fman->cam_offset)) {
- pr_err("MURAM alloc for DMA CAM failed\n");
+ dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+ __func__);
return -ENOMEM;
}
if (fman->cfg->dma_cam_num_of_entries % 8 ||
fman->cfg->dma_cam_num_of_entries > 32) {
- pr_err("wrong dma_cam_num_of_entries\n");
+ dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n",
+ __func__);
return -EINVAL;
}
cam_base_addr = (u32 __iomem *)
fman_muram_offset_to_vbase(fman->muram,
fman->cam_offset);
- out_be32(cam_base_addr,
- ~((1 << (32 - fman->cfg->dma_cam_num_of_entries)) -
- 1));
+ iowrite32be(~((1 <<
+ (32 - fman->cfg->dma_cam_num_of_entries)) - 1),
+ cam_base_addr);
}
fman->cfg->cam_base_addr = fman->cam_offset;
@@ -948,10 +912,10 @@ static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
- if (!cfg->halt_on_external_activ)
- tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
- if (!cfg->halt_on_unrecov_ecc_err)
- tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
+ /* FMan is not halted upon external halt activation */
+ tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
+ /* Man is not halted upon Unrecoverable ECC error behavior */
+ tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
/* clear all fmCtls event registers */
@@ -964,17 +928,7 @@ static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
*/
/* event bits */
tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
- /* Rams enable not effected by RCR bit,
- * but by a COP configuration
- */
- if (cfg->external_ecc_rams_enable)
- tmp_reg |= FPM_RAM_RAMS_ECC_EN_SRC_SEL;
-
- /* enable test mode */
- if (cfg->en_muram_test_mode)
- tmp_reg |= FPM_RAM_MURAM_TEST_ECC;
- if (cfg->en_iram_test_mode)
- tmp_reg |= FPM_RAM_IRAM_TEST_ECC;
+
iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
tmp_reg = 0;
@@ -1031,8 +985,6 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
struct fman_cfg *cfg)
{
u32 tmp_reg;
- u16 period_in_fm_clocks;
- u8 remainder;
/* Init QMI Registers */
@@ -1048,22 +1000,6 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
/* enable events */
iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
- if (cfg->tnum_aging_period) {
- /* tnum_aging_period is in units of usec, clk_freq in Mhz */
- period_in_fm_clocks = (u16)
- (cfg->tnum_aging_period * cfg->clk_freq);
- /* period_in_fm_clocks must be a 64 multiple */
- remainder = (u8)(period_in_fm_clocks % 64);
- if (remainder) {
- tmp_reg = (u32)((period_in_fm_clocks / 64) + 1);
- } else {
- tmp_reg = (u32)(period_in_fm_clocks / 64);
- if (!tmp_reg)
- tmp_reg = 1;
- }
- tmp_reg <<= QMI_TAPC_TAP;
- iowrite32be(tmp_reg, &qmi_rg->fmqm_tapc);
- }
tmp_reg = 0;
/* Clear interrupt events */
iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
@@ -1073,163 +1009,163 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
}
-static int enable(struct fman_rg *fman_rg, struct fman_cfg *cfg)
+static int enable(struct fman *fman, struct fman_cfg *cfg)
{
u32 cfg_reg = 0;
/* Enable all modules */
- /* clear&enable global counters - calculate reg and save for later,
+ /* clear&enable global counters - calculate reg and save for later,
* because it's the same reg for QMI enable
*/
cfg_reg = QMI_CFG_EN_COUNTERS;
- if (cfg->qmi_deq_option_support)
- cfg_reg |= (u32)(((cfg->qmi_def_tnums_thresh) << 8) |
- cfg->qmi_def_tnums_thresh);
- iowrite32be(BMI_INIT_START, &fman_rg->bmi_rg->fmbm_init);
+ /* Set enqueue and dequeue thresholds */
+ cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh;
+
+ iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init);
iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
- &fman_rg->qmi_rg->fmqm_gc);
+ &fman->qmi_regs->fmqm_gc);
return 0;
}
-static int set_exception(struct fman_rg *fman_rg,
+static int set_exception(struct fman *fman,
enum fman_exceptions exception, bool enable)
{
u32 tmp;
switch (exception) {
case FMAN_EX_DMA_BUS_ERROR:
- tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
+ tmp = ioread32be(&fman->dma_regs->fmdmmr);
if (enable)
tmp |= DMA_MODE_BER;
else
tmp &= ~DMA_MODE_BER;
/* disable bus error */
- iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
+ iowrite32be(tmp, &fman->dma_regs->fmdmmr);
break;
case FMAN_EX_DMA_READ_ECC:
case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
case FMAN_EX_DMA_FM_WRITE_ECC:
- tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
+ tmp = ioread32be(&fman->dma_regs->fmdmmr);
if (enable)
tmp |= DMA_MODE_ECC;
else
tmp &= ~DMA_MODE_ECC;
- iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
+ iowrite32be(tmp, &fman->dma_regs->fmdmmr);
break;
case FMAN_EX_FPM_STALL_ON_TASKS:
- tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
if (enable)
tmp |= FPM_EV_MASK_STALL_EN;
else
tmp &= ~FPM_EV_MASK_STALL_EN;
- iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
break;
case FMAN_EX_FPM_SINGLE_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
if (enable)
tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
else
tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
- iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
break;
case FMAN_EX_FPM_DOUBLE_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
if (enable)
tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
else
tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
- iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
break;
case FMAN_EX_QMI_SINGLE_ECC:
- tmp = ioread32be(&fman_rg->qmi_rg->fmqm_ien);
+ tmp = ioread32be(&fman->qmi_regs->fmqm_ien);
if (enable)
tmp |= QMI_INTR_EN_SINGLE_ECC;
else
tmp &= ~QMI_INTR_EN_SINGLE_ECC;
- iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_ien);
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_ien);
break;
case FMAN_EX_QMI_DOUBLE_ECC:
- tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
+ tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
if (enable)
tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
else
tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
- iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
break;
case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
- tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
+ tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
if (enable)
tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
else
tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
- iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
break;
case FMAN_EX_BMI_LIST_RAM_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_BMI_STATISTICS_RAM_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_BMI_DISPATCH_RAM_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_IRAM_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
+ tmp = ioread32be(&fman->fpm_regs->fm_rie);
if (enable) {
/* enable ECC if not enabled */
- enable_rams_ecc(fman_rg->fpm_rg);
+ enable_rams_ecc(fman->fpm_regs);
/* enable ECC interrupts */
tmp |= FPM_IRAM_ECC_ERR_EX_EN;
} else {
/* ECC mechanism may be disabled,
* depending on driver status
*/
- disable_rams_ecc(fman_rg->fpm_rg);
+ disable_rams_ecc(fman->fpm_regs);
tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
}
- iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
+ iowrite32be(tmp, &fman->fpm_regs->fm_rie);
break;
case FMAN_EX_MURAM_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
+ tmp = ioread32be(&fman->fpm_regs->fm_rie);
if (enable) {
/* enable ECC if not enabled */
- enable_rams_ecc(fman_rg->fpm_rg);
+ enable_rams_ecc(fman->fpm_regs);
/* enable ECC interrupts */
tmp |= FPM_MURAM_ECC_ERR_EX_EN;
} else {
/* ECC mechanism may be disabled,
* depending on driver status
*/
- disable_rams_ecc(fman_rg->fpm_rg);
+ disable_rams_ecc(fman->fpm_regs);
tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
}
- iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
+ iowrite32be(tmp, &fman->fpm_regs->fm_rie);
break;
default:
return -EINVAL;
@@ -1346,10 +1282,11 @@ static void free_init_resources(struct fman *fman)
fman->fifo_size);
}
-static void bmi_err_event(struct fman *fman)
+static irqreturn_t bmi_err_event(struct fman *fman)
{
u32 event, mask, force;
struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&bmi_rg->fmbm_ievr);
mask = ioread32be(&bmi_rg->fmbm_ier);
@@ -1362,19 +1299,22 @@ static void bmi_err_event(struct fman *fman)
iowrite32be(event, &bmi_rg->fmbm_ievr);
if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+
+ return ret;
}
-static void qmi_err_event(struct fman *fman)
+static irqreturn_t qmi_err_event(struct fman *fman)
{
u32 event, mask, force;
struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&qmi_rg->fmqm_eie);
mask = ioread32be(&qmi_rg->fmqm_eien);
@@ -1388,17 +1328,21 @@ static void qmi_err_event(struct fman *fman)
iowrite32be(event, &qmi_rg->fmqm_eie);
if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
- fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
- fman->exception_cb(fman, FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+ ret = fman->exception_cb(fman,
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+
+ return ret;
}
-static void dma_err_event(struct fman *fman)
+static irqreturn_t dma_err_event(struct fman *fman)
{
u32 status, mask, com_id;
u8 tnum, port_id, relative_port_id;
u16 liodn;
struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+ irqreturn_t ret = IRQ_NONE;
status = ioread32be(&dma_rg->fmdmsr);
mask = ioread32be(&dma_rg->fmdmmr);
@@ -1431,22 +1375,26 @@ static void dma_err_event(struct fman *fman)
tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
DMA_TRANSFER_TNUM_SHIFT);
liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
- fman->bus_error_cb(fman, relative_port_id, addr, tnum, liodn);
+ ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum,
+ liodn);
}
if (status & DMA_STATUS_FM_SPDAT_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
if (status & DMA_STATUS_READ_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
if (status & DMA_STATUS_FM_WRITE_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+
+ return ret;
}
-static void fpm_err_event(struct fman *fman)
+static irqreturn_t fpm_err_event(struct fman *fman)
{
u32 event;
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&fpm_rg->fmfp_ee);
/* clear the all occurred events */
@@ -1454,18 +1402,21 @@ static void fpm_err_event(struct fman *fman)
if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
(event & FPM_EV_MASK_DOUBLE_ECC_EN))
- fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
- fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
if ((event & FPM_EV_MASK_SINGLE_ECC) &&
(event & FPM_EV_MASK_SINGLE_ECC_EN))
- fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+
+ return ret;
}
-static void muram_err_intr(struct fman *fman)
+static irqreturn_t muram_err_intr(struct fman *fman)
{
u32 event, mask;
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&fpm_rg->fm_rcr);
mask = ioread32be(&fpm_rg->fm_rie);
@@ -1474,13 +1425,16 @@ static void muram_err_intr(struct fman *fman)
iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
- fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+
+ return ret;
}
-static void qmi_event(struct fman *fman)
+static irqreturn_t qmi_event(struct fman *fman)
{
u32 event, mask, force;
struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&qmi_rg->fmqm_ie);
mask = ioread32be(&qmi_rg->fmqm_ien);
@@ -1493,7 +1447,9 @@ static void qmi_event(struct fman *fman)
iowrite32be(event, &qmi_rg->fmqm_ie);
if (event & QMI_INTR_EN_SINGLE_ECC)
- fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+
+ return ret;
}
static void enable_time_stamp(struct fman *fman)
@@ -1534,23 +1490,29 @@ static void enable_time_stamp(struct fman *fman)
static int clear_iram(struct fman *fman)
{
struct fman_iram_regs __iomem *iram;
- int i;
+ int i, count;
- iram = (struct fman_iram_regs __iomem *)(fman->base_addr + IMEM_OFFSET);
+ iram = fman->base_addr + IMEM_OFFSET;
/* Enable the auto-increment */
- out_be32(&iram->iadd, IRAM_IADD_AIE);
- while (in_be32(&iram->iadd) != IRAM_IADD_AIE)
- ;
+ iowrite32be(IRAM_IADD_AIE, &iram->iadd);
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count);
+ if (count == 0)
+ return -EBUSY;
for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
- out_be32(&iram->idata, 0xffffffff);
+ iowrite32be(0xffffffff, &iram->idata);
- out_be32(&iram->iadd, fman->state->fm_iram_size - 4);
- /* Memory barrier */
- mb();
- while (in_be32(&iram->idata) != 0xffffffff)
- ;
+ iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd);
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&iram->idata) != 0xffffffff) && --count);
+ if (count == 0)
+ return -EBUSY;
return 0;
}
@@ -1623,9 +1585,10 @@ static int get_module_event(enum fman_event_modules module, u8 mod_id,
switch (module) {
case FMAN_MOD_MAC:
- event = (intr_type == FMAN_INTR_TYPE_ERR) ?
- (FMAN_EV_ERR_MAC0 + mod_id) :
- (FMAN_EV_MAC0 + mod_id);
+ if (intr_type == FMAN_INTR_TYPE_ERR)
+ event = FMAN_EV_ERR_MAC0 + mod_id;
+ else
+ event = FMAN_EV_MAC0 + mod_id;
break;
case FMAN_MOD_FMAN_CTRL:
if (intr_type == FMAN_INTR_TYPE_ERR)
@@ -1667,14 +1630,15 @@ static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
if ((fman->state->accumulated_fifo_size + fifo) >
(fman->state->total_fifo_size -
fman->state->extra_fifo_pool_size)) {
- pr_err("Requested fifo size and extra size exceed total FIFO size.\n");
+ dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n",
+ __func__);
return -EAGAIN;
}
/* Read, modify and write to HW */
- tmp = (u32)((fifo / FMAN_BMI_FIFO_UNITS - 1) |
- ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
- BMI_EXTRA_FIFO_SIZE_SHIFT));
+ tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) |
+ ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
+ BMI_EXTRA_FIFO_SIZE_SHIFT);
iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
/* update accumulated */
@@ -1693,14 +1657,14 @@ static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
if (extra_tasks)
fman->state->extra_tasks_pool_size =
- (u8)max(fman->state->extra_tasks_pool_size, extra_tasks);
+ max(fman->state->extra_tasks_pool_size, extra_tasks);
/* check that there are enough uncommitted tasks */
if ((fman->state->accumulated_num_of_tasks + tasks) >
(fman->state->total_num_of_tasks -
fman->state->extra_tasks_pool_size)) {
- pr_err("Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
- fman->state->fm_id);
+ dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
+ __func__, fman->state->fm_id);
return -EAGAIN;
}
/* update accumulated */
@@ -1759,8 +1723,8 @@ static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
if ((fman->state->rev_info.major < 6) &&
(fman->state->accumulated_num_of_open_dmas - current_val +
open_dmas > fman->state->max_num_of_open_dmas)) {
- pr_err("Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
- fman->state->fm_id);
+ dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
+ __func__, fman->state->fm_id);
return -EAGAIN;
} else if ((fman->state->rev_info.major >= 6) &&
!((fman->state->rev_info.major == 6) &&
@@ -1768,8 +1732,8 @@ static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
(fman->state->accumulated_num_of_open_dmas -
current_val + open_dmas >
fman->state->dma_thresh_max_commq + 1)) {
- pr_err("Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
- fman->state->fm_id,
+ dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
+ __func__, fman->state->fm_id,
fman->state->dma_thresh_max_commq + 1);
return -EAGAIN;
}
@@ -1820,8 +1784,9 @@ static int fman_config(struct fman *fman)
goto err_fm_drv;
/* Initialize MURAM block */
- fman->muram = fman_muram_init(fman->dts_params.muram_phy_base_addr,
- fman->dts_params.muram_size);
+ fman->muram =
+ fman_muram_init(fman->dts_params.muram_res.start,
+ resource_size(&fman->dts_params.muram_res));
if (!fman->muram)
goto err_fm_soc_specific;
@@ -1836,24 +1801,31 @@ static int fman_config(struct fman *fman)
#endif /* __rtems__ */
fman->exception_cb = fman_exceptions;
fman->bus_error_cb = fman_bus_error;
- fman->fpm_regs =
- (struct fman_fpm_regs __iomem *)(base_addr + FPM_OFFSET);
- fman->bmi_regs =
- (struct fman_bmi_regs __iomem *)(base_addr + BMI_OFFSET);
- fman->qmi_regs =
- (struct fman_qmi_regs __iomem *)(base_addr + QMI_OFFSET);
- fman->dma_regs =
- (struct fman_dma_regs __iomem *)(base_addr + DMA_OFFSET);
+ fman->fpm_regs = base_addr + FPM_OFFSET;
+ fman->bmi_regs = base_addr + BMI_OFFSET;
+ fman->qmi_regs = base_addr + QMI_OFFSET;
+ fman->dma_regs = base_addr + DMA_OFFSET;
fman->base_addr = base_addr;
spin_lock_init(&fman->spinlock);
fman_defconfig(fman->cfg);
- fman->cfg->qmi_deq_option_support = true;
-
fman->state->extra_fifo_pool_size = 0;
- fman->state->exceptions = DFLT_EXCEPTIONS;
- fman->reset_on_init = true;
+ fman->state->exceptions = (EX_DMA_BUS_ERROR |
+ EX_DMA_READ_ECC |
+ EX_DMA_SYSTEM_WRITE_ECC |
+ EX_DMA_FM_WRITE_ECC |
+ EX_FPM_STALL_ON_TASKS |
+ EX_FPM_SINGLE_ECC |
+ EX_FPM_DOUBLE_ECC |
+ EX_QMI_DEQ_FROM_UNKNOWN_PORTID |
+ EX_BMI_LIST_RAM_ECC |
+ EX_BMI_STORAGE_PROFILE_ECC |
+ EX_BMI_STATISTICS_RAM_ECC |
+ EX_MURAM_ECC |
+ EX_BMI_DISPATCH_RAM_ECC |
+ EX_QMI_DOUBLE_ECC |
+ EX_QMI_SINGLE_ECC);
/* Read FMan revision for future use*/
fman_get_revision(fman, &fman->state->rev_info);
@@ -1912,20 +1884,108 @@ err_fm_state:
return -EINVAL;
}
+#ifndef __rtems__
+static int fman_reset(struct fman *fman)
+{
+ u32 count;
+ int err = 0;
+
+ if (fman->state->rev_info.major < 6) {
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0)
+ err = -EBUSY;
+
+ goto _return;
+ } else {
+#ifdef CONFIG_PPC
+ struct device_node *guts_node;
+ struct ccsr_guts __iomem *guts_regs;
+ u32 devdisr2, reg;
+
+ /* Errata A007273 */
+ guts_node =
+ of_find_compatible_node(NULL, NULL,
+ "fsl,qoriq-device-config-2.0");
+ if (!guts_node) {
+ dev_err(fman->dev, "%s: Couldn't find guts node\n",
+ __func__);
+ goto guts_node;
+ }
+
+ guts_regs = of_iomap(guts_node, 0);
+ if (!guts_regs) {
+ dev_err(fman->dev, "%s: Couldn't map %s regs\n",
+ __func__, guts_node->full_name);
+ goto guts_regs;
+ }
+#define FMAN1_ALL_MACS_MASK 0xFCC00000
+#define FMAN2_ALL_MACS_MASK 0x000FCC00
+ /* Read current state */
+ devdisr2 = ioread32be(&guts_regs->devdisr2);
+ if (fman->dts_params.id == 0)
+ reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
+ else
+ reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
+
+ /* Enable all MACs */
+ iowrite32be(reg, &guts_regs->devdisr2);
+#endif
+
+ /* Perform FMan reset */
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0) {
+#ifdef CONFIG_PPC
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+#endif
+ err = -EBUSY;
+ goto _return;
+ }
+#ifdef CONFIG_PPC
+
+ /* Restore devdisr2 value */
+ iowrite32be(devdisr2, &guts_regs->devdisr2);
+
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+#endif
+
+ goto _return;
+
+#ifdef CONFIG_PPC
+guts_regs:
+ of_node_put(guts_node);
+guts_node:
+ dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
+ __func__);
+#endif
+ }
+_return:
+ return err;
+}
+#endif /* __rtems__ */
+
static int fman_init(struct fman *fman)
{
struct fman_cfg *cfg = NULL;
- struct fman_rg fman_rg;
- int err = 0, i;
+ int err = 0, i, count;
if (is_init_done(fman->cfg))
return -EINVAL;
- fman_rg.bmi_rg = fman->bmi_regs;
- fman_rg.qmi_rg = fman->qmi_regs;
- fman_rg.fpm_rg = fman->fpm_regs;
- fman_rg.dma_rg = fman->dma_regs;
-
fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
cfg = fman->cfg;
@@ -1948,8 +2008,8 @@ static int fman_init(struct fman *fman)
u32 liodn_base;
fman->liodn_offset[i] =
- ioread32be(&fman_rg.bmi_rg->fmbm_spliodn[i - 1]);
- liodn_base = ioread32be(&fman_rg.dma_rg->fmdmplr[i / 2]);
+ ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
+ liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
if (i % 2) {
/* FMDM_PLR LSB holds LIODN base for odd ports */
liodn_base &= DMA_LIODN_BASE_MASK;
@@ -1961,23 +2021,21 @@ static int fman_init(struct fman *fman)
fman->liodn_base[i] = liodn_base;
}
- /* Reset the FM if required. */
- if (fman->reset_on_init) {
- if (fman->state->rev_info.major >= 6) {
- /* Errata A007273 */
- pr_debug("FManV3 reset is not supported!\n");
- } else {
- out_be32(&fman->fpm_regs->fm_rstc, FPM_RSTC_FM_RESET);
- /* Memory barrier */
- mb();
- usleep_range(100, 300);
- }
+ err = fman_reset(fman);
+ if (err)
+ return err;
- if (!!(ioread32be(&fman_rg.qmi_rg->fmqm_gs) &
- QMI_GS_HALT_NOT_BUSY)) {
- resume(fman->fpm_regs);
- usleep_range(100, 300);
- }
+ if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
+ resume(fman->fpm_regs);
+ /* Wait until QMI is not in halt not busy state */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) &
+ QMI_GS_HALT_NOT_BUSY) && --count);
+ if (count == 0)
+ dev_warn(fman->dev, "%s: QMI is in halt not busy state\n",
+ __func__);
}
if (clear_iram(fman) != 0)
@@ -2000,9 +2058,10 @@ static int fman_init(struct fman *fman)
/* allocate MURAM for FIFO according to total size */
fman->fifo_offset = fman_muram_alloc(fman->muram,
fman->state->total_fifo_size);
- if (IS_ERR_VALUE(fman->cam_offset)) {
+ if (IS_ERR_VALUE(fman->fifo_offset)) {
free_init_resources(fman);
- pr_err("MURAM alloc for BMI FIFO failed\n");
+ dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
+ __func__);
return -ENOMEM;
}
@@ -2017,7 +2076,7 @@ static int fman_init(struct fman *fman)
/* Init QMI Registers */
qmi_init(fman->qmi_regs, fman->cfg);
- err = enable(&fman_rg, cfg);
+ err = enable(fman, cfg);
if (err != 0)
return err;
@@ -2033,16 +2092,10 @@ static int fman_set_exception(struct fman *fman,
enum fman_exceptions exception, bool enable)
{
u32 bit_mask = 0;
- struct fman_rg fman_rg;
if (!is_init_done(fman->cfg))
return -EINVAL;
- fman_rg.bmi_rg = fman->bmi_regs;
- fman_rg.qmi_rg = fman->qmi_regs;
- fman_rg.fpm_rg = fman->fpm_regs;
- fman_rg.dma_rg = fman->dma_regs;
-
bit_mask = get_exception_flag(exception);
if (bit_mask) {
if (enable)
@@ -2050,13 +2103,27 @@ static int fman_set_exception(struct fman *fman,
else
fman->state->exceptions &= ~bit_mask;
} else {
- pr_err("Undefined exception\n");
+ dev_err(fman->dev, "%s: Undefined exception (%d)\n",
+ __func__, exception);
return -EINVAL;
}
- return set_exception(&fman_rg, exception, enable);
+ return set_exception(fman, exception, enable);
}
+/**
+ * fman_register_intr
+ * @fman: A Pointer to FMan device
+ * @mod: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ * @f_isr: The interrupt service routine.
+ * @h_src_arg: Argument to be passed to f_isr.
+ *
+ * Used to register an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
void fman_register_intr(struct fman *fman, enum fman_event_modules module,
u8 mod_id, enum fman_intr_type intr_type,
void (*isr_cb)(void *src_arg), void *src_arg)
@@ -2064,47 +2131,61 @@ void fman_register_intr(struct fman *fman, enum fman_event_modules module,
int event = 0;
event = get_module_event(module, mod_id, intr_type);
- WARN_ON(!(event < FMAN_EV_CNT));
+ WARN_ON(event >= FMAN_EV_CNT);
/* register in local FM structure */
fman->intr_mng[event].isr_cb = isr_cb;
fman->intr_mng[event].src_handle = src_arg;
}
-
+EXPORT_SYMBOL(fman_register_intr);
+
+/**
+ * fman_unregister_intr
+ * @fman: A Pointer to FMan device
+ * @mod: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ *
+ * Used to unregister an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
u8 mod_id, enum fman_intr_type intr_type)
{
int event = 0;
event = get_module_event(module, mod_id, intr_type);
- WARN_ON(!(event < FMAN_EV_CNT));
+ WARN_ON(event >= FMAN_EV_CNT);
fman->intr_mng[event].isr_cb = NULL;
fman->intr_mng[event].src_handle = NULL;
}
+EXPORT_SYMBOL(fman_unregister_intr);
+/**
+ * fman_set_port_params
+ * @fman: A Pointer to FMan device
+ * @port_params: Port parameters
+ *
+ * Used by FMan Port to pass parameters to the FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_set_port_params(struct fman *fman,
struct fman_port_init_params *port_params)
{
int err;
- unsigned long int_flags;
+ unsigned long flags;
u8 port_id = port_params->port_id, mac_id;
- struct fman_rg fman_rg;
-
- fman_rg.bmi_rg = fman->bmi_regs;
- fman_rg.qmi_rg = fman->qmi_regs;
- fman_rg.fpm_rg = fman->fpm_regs;
- fman_rg.dma_rg = fman->dma_regs;
- spin_lock_irqsave(&fman->spinlock, int_flags);
+ spin_lock_irqsave(&fman->spinlock, flags);
err = set_num_of_tasks(fman, port_params->port_id,
&port_params->num_of_tasks,
&port_params->num_of_extra_tasks);
- if (err) {
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return err;
- }
+ if (err)
+ goto return_err;
/* TX Ports */
if (port_params->port_type != FMAN_PORT_TYPE_RX) {
@@ -2113,7 +2194,7 @@ int fman_set_port_params(struct fman *fman,
/* update qmi ENQ/DEQ threshold */
fman->state->accumulated_num_of_deq_tnums +=
port_params->deq_pipeline_depth;
- enq_th = (ioread32be(&fman_rg.qmi_rg->fmqm_gc) &
+ enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) &
QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
/* if enq_th is too big, we reduce it to the max value
* that is still 0
@@ -2124,13 +2205,13 @@ int fman_set_port_params(struct fman *fman,
fman->state->qmi_max_num_of_tnums -
fman->state->accumulated_num_of_deq_tnums - 1;
- reg = ioread32be(&fman_rg.qmi_rg->fmqm_gc);
+ reg = ioread32be(&fman->qmi_regs->fmqm_gc);
reg &= ~QMI_CFG_ENQ_MASK;
reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
- iowrite32be(reg, &fman_rg.qmi_rg->fmqm_gc);
+ iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
}
- deq_th = ioread32be(&fman_rg.qmi_rg->fmqm_gc) &
+ deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) &
QMI_CFG_DEQ_MASK;
/* if deq_th is too small, we enlarge it to the min
* value that is still 0.
@@ -2139,59 +2220,70 @@ int fman_set_port_params(struct fman *fman,
*/
if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
(deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
- deq_th =
- fman->state->accumulated_num_of_deq_tnums + 1;
- reg = ioread32be(&fman_rg.qmi_rg->fmqm_gc);
+ deq_th = fman->state->accumulated_num_of_deq_tnums + 1;
+ reg = ioread32be(&fman->qmi_regs->fmqm_gc);
reg &= ~QMI_CFG_DEQ_MASK;
reg |= deq_th;
- iowrite32be(reg, &fman_rg.qmi_rg->fmqm_gc);
+ iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
}
}
err = set_size_of_fifo(fman, port_params->port_id,
&port_params->size_of_fifo,
&port_params->extra_size_of_fifo);
- if (err) {
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return err;
- }
+ if (err)
+ goto return_err;
err = set_num_of_open_dmas(fman, port_params->port_id,
&port_params->num_of_open_dmas,
&port_params->num_of_extra_open_dmas);
- if (err) {
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return err;
- }
+ if (err)
+ goto return_err;
- set_port_liodn(&fman_rg, port_id, fman->liodn_base[port_id],
+ set_port_liodn(fman, port_id, fman->liodn_base[port_id],
fman->liodn_offset[port_id]);
if (fman->state->rev_info.major < 6)
- set_port_order_restoration(fman_rg.fpm_rg, port_id);
+ set_port_order_restoration(fman->fpm_regs, port_id);
mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
fman->state->port_mfl[mac_id] = port_params->max_frame_length;
} else {
- pr_warn("Port max_frame_length is smaller than MAC current MTU\n");
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return -EINVAL;
+ dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n",
+ __func__, port_id, mac_id);
+ err = -EINVAL;
+ goto return_err;
}
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
+ spin_unlock_irqrestore(&fman->spinlock, flags);
return 0;
+
+return_err:
+ spin_unlock_irqrestore(&fman->spinlock, flags);
+ return err;
}
+EXPORT_SYMBOL(fman_set_port_params);
+/**
+ * fman_reset_mac
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id to be reset
+ *
+ * Reset a specific MAC
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_reset_mac(struct fman *fman, u8 mac_id)
{
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
u32 msk, timeout = 100;
if (fman->state->rev_info.major >= 6) {
- pr_warn("FMan MAC reset no available for FMan V3!\n");
+ dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n",
+ __func__);
return -EINVAL;
}
@@ -2228,7 +2320,8 @@ int fman_reset_mac(struct fman *fman, u8 mac_id)
msk = FPM_RSTC_MAC9_RESET;
break;
default:
- pr_warn("Illegal MAC Id\n");
+ dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n",
+ __func__, mac_id);
return -EINVAL;
}
@@ -2242,125 +2335,73 @@ int fman_reset_mac(struct fman *fman, u8 mac_id)
return 0;
}
+EXPORT_SYMBOL(fman_reset_mac);
+/**
+ * fman_set_mac_max_frame
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id
+ * @mfl: Maximum frame length
+ *
+ * Set maximum frame length of specific MAC in FMan driver
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
{
/* if port is already initialized, check that MaxFrameLength is smaller
* or equal to the port's max
*/
if ((!fman->state->port_mfl[mac_id]) ||
- (fman->state->port_mfl[mac_id] &&
- (mfl <= fman->state->port_mfl[mac_id]))) {
+ (mfl <= fman->state->port_mfl[mac_id])) {
fman->state->mac_mfl[mac_id] = mfl;
} else {
- pr_warn("MAC max_frame_length is larger than Port max_frame_length\n");
+ dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
+ __func__);
return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL(fman_set_mac_max_frame);
+/**
+ * fman_get_clock_freq
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan clock frequency
+ *
+ * Return: FMan clock frequency
+ */
u16 fman_get_clock_freq(struct fman *fman)
{
return fman->state->fm_clk_freq;
}
+/**
+ * fman_get_bmi_max_fifo_size
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan maximum FIFO size
+ *
+ * Return: FMan Maximum FIFO size
+ */
u32 fman_get_bmi_max_fifo_size(struct fman *fman)
{
return fman->state->bmi_max_fifo_size;
}
+EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
-static void fman_event_isr(struct fman *fman)
-{
- u32 pending;
- struct fman_fpm_regs __iomem *fpm_rg;
-
- if (!is_init_done(fman->cfg))
- return;
-
- fpm_rg = fman->fpm_regs;
-
- /* normal interrupts */
- pending = ioread32be(&fpm_rg->fm_npi);
- if (!pending)
- return;
-
- if (pending & INTR_EN_QMI)
- qmi_event(fman);
-
- /* MAC interrupts */
- if (pending & INTR_EN_MAC0)
- call_mac_isr(fman, FMAN_EV_MAC0 + 0);
- if (pending & INTR_EN_MAC1)
- call_mac_isr(fman, FMAN_EV_MAC0 + 1);
- if (pending & INTR_EN_MAC2)
- call_mac_isr(fman, FMAN_EV_MAC0 + 2);
- if (pending & INTR_EN_MAC3)
- call_mac_isr(fman, FMAN_EV_MAC0 + 3);
- if (pending & INTR_EN_MAC4)
- call_mac_isr(fman, FMAN_EV_MAC0 + 4);
- if (pending & INTR_EN_MAC5)
- call_mac_isr(fman, FMAN_EV_MAC0 + 5);
- if (pending & INTR_EN_MAC6)
- call_mac_isr(fman, FMAN_EV_MAC0 + 6);
- if (pending & INTR_EN_MAC7)
- call_mac_isr(fman, FMAN_EV_MAC0 + 7);
- if (pending & INTR_EN_MAC8)
- call_mac_isr(fman, FMAN_EV_MAC0 + 8);
- if (pending & INTR_EN_MAC9)
- call_mac_isr(fman, FMAN_EV_MAC0 + 9);
-}
-
-static int fman_error_isr(struct fman *fman)
-{
- u32 pending;
- struct fman_fpm_regs __iomem *fpm_rg;
-
- if (!is_init_done(fman->cfg))
- return -EINVAL;
-
- fpm_rg = fman->fpm_regs;
-
- /* error interrupts */
- pending = ioread32be(&fpm_rg->fm_epi);
- if (!pending)
- return -EINVAL;
-
- if (pending & ERR_INTR_EN_BMI)
- bmi_err_event(fman);
- if (pending & ERR_INTR_EN_QMI)
- qmi_err_event(fman);
- if (pending & ERR_INTR_EN_FPM)
- fpm_err_event(fman);
- if (pending & ERR_INTR_EN_DMA)
- dma_err_event(fman);
- if (pending & ERR_INTR_EN_MURAM)
- muram_err_intr(fman);
-
- /* MAC error interrupts */
- if (pending & ERR_INTR_EN_MAC0)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
- if (pending & ERR_INTR_EN_MAC1)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
- if (pending & ERR_INTR_EN_MAC2)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
- if (pending & ERR_INTR_EN_MAC3)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
- if (pending & ERR_INTR_EN_MAC4)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
- if (pending & ERR_INTR_EN_MAC5)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
- if (pending & ERR_INTR_EN_MAC6)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
- if (pending & ERR_INTR_EN_MAC7)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
- if (pending & ERR_INTR_EN_MAC8)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
- if (pending & ERR_INTR_EN_MAC9)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
-
- return 0;
-}
-
+/**
+ * fman_get_revision
+ * @fman - Pointer to the FMan module
+ * @rev_info - A structure of revision information parameters.
+ *
+ * Returns the FM revision
+ *
+ * Allowed only following fman_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
{
u32 tmp;
@@ -2370,7 +2411,17 @@ void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
FPM_REV1_MAJOR_SHIFT);
rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
}
+EXPORT_SYMBOL(fman_get_revision);
+/**
+ * fman_get_qman_channel_id
+ * @fman: A Pointer to FMan device
+ * @port_id: Port id
+ *
+ * Get QMan channel ID associated to the Port id
+ *
+ * Return: QMan channel ID
+ */
u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
{
int i;
@@ -2396,11 +2447,21 @@ u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
return fman->state->qman_channel_base + i;
}
+EXPORT_SYMBOL(fman_get_qman_channel_id);
+/**
+ * fman_get_mem_region
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan memory region
+ *
+ * Return: A structure with FMan memory region information
+ */
struct resource *fman_get_mem_region(struct fman *fman)
{
return fman->state->res;
}
+EXPORT_SYMBOL(fman_get_mem_region);
/* Bootargs defines */
/* Extra headroom for RX buffers - Default, min and max */
@@ -2422,7 +2483,7 @@ struct resource *fman_get_mem_region(struct fman *fman)
* particular forwarding scenarios that add extra headers to the
* forwarded frame.
*/
-int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
module_param(fsl_fm_rx_extra_headroom, int, 0);
MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
@@ -2436,13 +2497,18 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
* fm_set_max_frm() callback.
*/
#ifndef __rtems__
-int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
#else /* __rtems__ */
-int fsl_fm_max_frm = FSL_FM_MAX_POSSIBLE_FRAME_SIZE;
+static int fsl_fm_max_frm = FSL_FM_MAX_POSSIBLE_FRAME_SIZE;
#endif /* __rtems__ */
module_param(fsl_fm_max_frm, int, 0);
MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
+/**
+ * fman_get_max_frm
+ *
+ * Return: Max frame length configured in the FM driver
+ */
u16 fman_get_max_frm(void)
{
static bool fm_check_mfl;
@@ -2464,6 +2530,11 @@ u16 fman_get_max_frm(void)
}
EXPORT_SYMBOL(fman_get_max_frm);
+/**
+ * fman_get_rx_extra_headroom
+ *
+ * Return: Extra headroom size configured in the FM driver
+ */
int fman_get_rx_extra_headroom(void)
{
static bool fm_check_rx_extra_headroom;
@@ -2479,7 +2550,7 @@ int fman_get_rx_extra_headroom(void)
fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
}
- fsl_fm_rx_extra_headroom = true;
+ fm_check_rx_extra_headroom = true;
fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
}
@@ -2487,32 +2558,202 @@ int fman_get_rx_extra_headroom(void)
}
EXPORT_SYMBOL(fman_get_rx_extra_headroom);
+/**
+ * fman_bind
+ * @dev: FMan OF device pointer
+ *
+ * Bind to a specific FMan device.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan device
+ */
struct fman *fman_bind(struct device *fm_dev)
{
return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
}
+EXPORT_SYMBOL(fman_bind);
-void fman_unbind(struct fman *fman)
+static irqreturn_t fman_err_irq(int irq, void *handle)
{
- put_device(fman->dev);
-}
+ struct fman *fman = (struct fman *)handle;
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+ irqreturn_t single_ret, ret = IRQ_NONE;
-struct device *fman_get_device(struct fman *fman)
-{
- return fman->dev;
+ if (!is_init_done(fman->cfg))
+ return IRQ_NONE;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* error interrupts */
+ pending = ioread32be(&fpm_rg->fm_epi);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & ERR_INTR_EN_BMI) {
+ single_ret = bmi_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_QMI) {
+ single_ret = qmi_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_FPM) {
+ single_ret = fpm_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_DMA) {
+ single_ret = dma_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MURAM) {
+ single_ret = muram_err_intr(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ /* MAC error interrupts */
+ if (pending & ERR_INTR_EN_MAC0) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC1) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC2) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC3) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC4) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC5) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC6) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC7) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC8) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC9) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
}
-static irqreturn_t fman_irq(int irq, void *fman)
+static irqreturn_t fman_irq(int irq, void *handle)
{
- fman_event_isr(fman);
+ struct fman *fman = (struct fman *)handle;
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+ irqreturn_t single_ret, ret = IRQ_NONE;
- return IRQ_HANDLED;
+ if (!is_init_done(fman->cfg))
+ return IRQ_NONE;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* normal interrupts */
+ pending = ioread32be(&fpm_rg->fm_npi);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & INTR_EN_QMI) {
+ single_ret = qmi_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ /* MAC interrupts */
+ if (pending & INTR_EN_MAC0) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC1) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC2) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC3) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC4) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC5) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC6) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC7) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC8) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC9) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
}
#ifndef __rtems__
static const struct of_device_id fman_muram_match[] = {
{
- .compatible = "fsl,fman-muram"},
+ .compatible = "fsl,fman-muram"},
{}
};
MODULE_DEVICE_TABLE(of, fman_muram_match);
@@ -2528,8 +2769,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
const char *fdt = bsp_fdt_get();
struct device_node *fm_node;
#endif /* __rtems__ */
- const u32 *u32_prop;
- int lenp, err, irq;
+ u32 val, range[2];
+ int err, irq;
#ifndef __rtems__
struct clk *clk;
u32 clk_rate;
@@ -2545,22 +2786,20 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fm_node = of_node_get(of_dev->dev.of_node);
- u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp);
- if (!u32_prop) {
- pr_err("of_get_property(%s, cell-index) failed\n",
- fm_node->full_name);
+ err = of_property_read_u32(fm_node, "cell-index", &val);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
+ __func__, fm_node->full_name);
goto fman_node_put;
}
- if (WARN_ON(lenp != sizeof(u32)))
- goto fman_node_put;
-
- fman->dts_params.id = (u8)*u32_prop;
+ fman->dts_params.id = (u8)val;
#ifndef __rtems__
/* Get the FM interrupt */
res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
if (!res) {
- pr_err("Can't get FMan IRQ resource\n");
+ dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
+ __func__);
goto fman_node_put;
}
irq = res->start;
@@ -2568,7 +2807,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the FM error interrupt */
res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
if (!res) {
- pr_err("Can't get FMan Error IRQ resource\n");
+ dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
+ __func__);
goto fman_node_put;
}
fman->dts_params.err_irq = res->start;
@@ -2576,12 +2816,14 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the FM address */
res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
if (!res) {
- pr_err("Can't get FMan memory resouce\n");
+ dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
+ __func__);
goto fman_node_put;
}
phys_base_addr = res->start;
- mem_size = res->end + 1 - res->start;
+ mem_size = resource_size(res);
+
#else /* __rtems__ */
irq = of_irq_to_resource(fm_node, 0, NULL);
fman->dts_params.err_irq = of_irq_to_resource(fm_node, 1, NULL);
@@ -2590,57 +2832,54 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
#endif /* __rtems__ */
#ifndef __rtems__
- clk = of_clk_get_by_name(fm_node, NULL);
+ clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
- pr_err("Failed to get FM%d clock structure\n",
- fman->dts_params.id);
+ dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
+ __func__, fman->dts_params.id);
goto fman_node_put;
}
clk_rate = clk_get_rate(clk);
if (!clk_rate) {
- pr_err("Failed to determine FM%d clock rate\n",
- fman->dts_params.id);
+ dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
+ __func__, fman->dts_params.id);
goto fman_node_put;
}
/* Rounding to MHz */
- fman->dts_params.clk_freq = (u16)((clk_rate + 500000) / 1000000);
+ fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
#else /* __rtems__ */
/* FIXME */
fman->dts_params.clk_freq = 733;
#endif /* __rtems__ */
- u32_prop = (const u32 *)of_get_property(fm_node,
- "fsl,qman-channel-range",
- &lenp);
- if (!u32_prop) {
- pr_err("of_get_property(%s, fsl,qman-channel-range) failed\n",
- fm_node->full_name);
+ err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
+ &range[0], 2);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
+ __func__, fm_node->full_name);
goto fman_node_put;
}
- if (WARN_ON(lenp != sizeof(u32) * 2))
- goto fman_node_put;
- fman->dts_params.qman_channel_base = u32_prop[0];
- fman->dts_params.num_of_qman_channels = u32_prop[1];
+ fman->dts_params.qman_channel_base = range[0];
+ fman->dts_params.num_of_qman_channels = range[1];
/* Get the MURAM base address and size */
#ifndef __rtems__
/* FIXME */
muram_node = of_find_matching_node(fm_node, fman_muram_match);
if (!muram_node) {
- pr_err("could not find MURAM node\n");
+ dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
+ __func__);
goto fman_node_put;
}
- err = of_address_to_resource(muram_node, 0, res);
+ err = of_address_to_resource(muram_node, 0,
+ &fman->dts_params.muram_res);
if (err) {
of_node_put(muram_node);
- pr_err("of_address_to_resource() = %d\n", err);
+ dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
+ __func__, err);
goto fman_node_put;
}
-
- fman->dts_params.muram_phy_base_addr = res->start;
- fman->dts_params.muram_size = res->end + 1 - res->start;
#else /* __rtems__ */
{
int node = fdt_node_offset_by_compatible(fdt,
@@ -2655,48 +2894,55 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
pr_err("could not find MURAM node\n");
goto fman_node_put;
}
- fman->dts_params.muram_phy_base_addr = phys_base_addr +
- res.start;
- fman->dts_params.muram_size = res.end - res.start;
+ fman->dts_params.muram_res.start = phys_base_addr + res.start;
+ fman->dts_params.muram_res.end = phys_base_addr + res.end - 1;
}
#endif /* __rtems__ */
- {
- /* In B4 rev 2.0 (and above) the MURAM size is 512KB.
- * Check the SVR and update MURAM size if required.
- */
- u32 svr;
-
- svr = mfspr(SPRN_SVR);
-
- if ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_MAJ(svr) >= 2))
- fman->dts_params.muram_size = 0x80000;
- }
-
#ifndef __rtems__
of_node_put(muram_node);
#endif /* __rtems__ */
- of_node_put(fm_node);
- err = devm_request_irq(&of_dev->dev, irq, fman_irq,
- IRQF_NO_SUSPEND, "fman", fman);
+ err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
if (err < 0) {
- pr_err("Error: allocating irq %d (error = %d)\n", irq, err);
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, irq, err);
goto fman_free;
}
+ if (fman->dts_params.err_irq != 0) {
+ err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq,
+ fman_err_irq, IRQF_SHARED,
+ "fman-err", fman);
+ if (err < 0) {
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, fman->dts_params.err_irq, err);
+ goto fman_free;
+ }
+ }
+
#ifndef __rtems__
fman->dts_params.res =
devm_request_mem_region(&of_dev->dev, phys_base_addr,
mem_size, "fman");
if (!fman->dts_params.res) {
- pr_err("request_mem_region() failed\n");
+ dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
+ __func__);
goto fman_free;
}
fman->dts_params.base_addr =
devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (fman->dts_params.base_addr == 0) {
- pr_err("devm_ioremap() failed\n");
+ if (!fman->dts_params.base_addr) {
+ dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
+ goto fman_free;
+ }
+
+ fman->dev = &of_dev->dev;
+
+ err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: of_platform_populate() failed\n",
+ __func__);
goto fman_free;
}
#endif /* __rtems__ */
@@ -2710,14 +2956,6 @@ fman_free:
return NULL;
}
-static irqreturn_t fman_err_irq(int irq, void *fman)
-{
- if (fman_error_isr(fman) == 0)
- return IRQ_HANDLED;
-
- return IRQ_NONE;
-}
-
static int fman_probe(struct platform_device *of_dev)
{
struct fman *fman;
@@ -2730,26 +2968,14 @@ static int fman_probe(struct platform_device *of_dev)
if (!fman)
return -EIO;
- if (fman->dts_params.err_irq != 0) {
- err = devm_request_irq(dev, fman->dts_params.err_irq,
- fman_err_irq,
- IRQF_SHARED | IRQF_NO_SUSPEND,
- "fman-err", fman);
- if (err < 0) {
- pr_err("Error: allocating irq %d (error = %d)\n",
- fman->dts_params.err_irq, err);
- return -EINVAL;
- }
- }
-
err = fman_config(fman);
if (err) {
- pr_err("FMan config failed\n");
+ dev_err(dev, "%s: FMan config failed\n", __func__);
return -EINVAL;
}
if (fman_init(fman) != 0) {
- pr_err("FMan init failed\n");
+ dev_err(dev, "%s: FMan init failed\n", __func__);
return -EINVAL;
}
@@ -2775,9 +3001,7 @@ static int fman_probe(struct platform_device *of_dev)
dev_set_drvdata(dev, fman);
- fman->dev = dev;
-
- pr_debug("FM%d probed\n", fman->dts_params.id);
+ dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
return 0;
}
@@ -2785,21 +3009,42 @@ static int fman_probe(struct platform_device *of_dev)
#ifndef __rtems__
static const struct of_device_id fman_match[] = {
{
- .compatible = "fsl,fman"},
+ .compatible = "fsl,fman"},
{}
};
-MODULE_DEVICE_TABLE(of, fm_match);
+MODULE_DEVICE_TABLE(of, fman_match);
static struct platform_driver fman_driver = {
.driver = {
- .name = "fsl-fman",
- .of_match_table = fman_match,
- },
+ .name = "fsl-fman",
+ .of_match_table = fman_match,
+ },
.probe = fman_probe,
};
-builtin_platform_driver(fman_driver);
+static int __init fman_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA FMan driver\n");
+
+ err = platform_driver_register(&fman_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(fman_load);
+
+static void __exit fman_unload(void)
+{
+ platform_driver_unregister(&fman_driver);
+}
+module_exit(fman_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
#else /* __rtems__ */
#include <sys/cdefs.h>
#include <sys/param.h>
@@ -2807,7 +3052,7 @@ builtin_platform_driver(fman_driver);
#include <sys/bus.h>
#include <sys/kernel.h>
-void
+int
fman_reset(struct fman *fman)
{
@@ -2825,6 +3070,8 @@ fman_reset(struct fman *fman)
QMI_GS_HALT_NOT_BUSY)) {
usleep_range(100, 300);
}
+
+ return (0);
}
struct fman_softc {
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman.h b/linux/drivers/net/ethernet/freescale/fman/fman.h
index 291d263a..4af36c66 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman.h
+++ b/linux/drivers/net/ethernet/freescale/fman/fman.h
@@ -58,7 +58,6 @@
/* TX Port: Length Error */
#define FM_FD_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
-
/* Rx FIFO overflow, FCS error, code error, running disparity error
* (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
* PHY error control character detected.
@@ -167,8 +166,8 @@ struct fman_prs_result {
u8 ip_off[2]; /* IP offset */
u8 gre_off; /* GRE offset */
u8 l4_off; /* Layer 4 offset */
- u8 nxthdr_off; /** Parser end point */
-} __attribute__((__packed__));
+ u8 nxthdr_off; /* Parser end point */
+};
/* A structure for defining buffer prefix area content. */
struct fman_buffer_prefix_content {
@@ -237,29 +236,6 @@ struct fman_buf_pool_depletion {
bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
};
-/** fman_exceptions_cb
- * fman - Pointer to FMan
- * exception - The exception.
- *
- * Exceptions user callback routine, will be called upon an exception
- * passing the exception identification.
- */
-typedef void (fman_exceptions_cb)(struct fman *fman,
- enum fman_exceptions exception);
-
-/** fman_bus_error_cb
- * fman - Pointer to FMan
- * port_id - Port id
- * addr - Address that caused the error
- * tnum - Owner of error
- * liodn - Logical IO device number
- *
- * Bus error user callback routine, will be called upon bus error,
- * passing parameters describing the errors and the owner.
- */
-typedef void (fman_bus_error_cb)(struct fman *fman, u8 port_id, u64 addr,
- u8 tnum, u16 liodn);
-
/* Enum for inter-module interrupts registration */
enum fman_event_modules {
FMAN_MOD_MAC = 0, /* MAC event */
@@ -325,176 +301,37 @@ struct fman_port_init_params {
/* LIODN base for this port, to be used together with LIODN offset. */
};
-struct fman;
-
-/**
- * fman_get_revision
- * @fman - Pointer to the FMan module
- * @rev_info - A structure of revision information parameters.
- *
- * Returns the FM revision
- *
- * Allowed only following fman_init().
- *
- * Return: 0 on success; Error code otherwise.
- */
void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
-/**
- * fman_register_intr
- * @fman: A Pointer to FMan device
- * @mod: Calling module
- * @mod_id: Module id (if more than 1 exists, '0' if not)
- * @intr_type: Interrupt type (error/normal) selection.
- * @f_isr: The interrupt service routine.
- * @h_src_arg: Argument to be passed to f_isr.
- *
- * Used to register an event handler to be processed by FMan
- *
- * Return: 0 on success; Error code otherwise.
- */
void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
u8 mod_id, enum fman_intr_type intr_type,
void (*f_isr)(void *h_src_arg), void *h_src_arg);
-/**
- * fman_unregister_intr
- * @fman: A Pointer to FMan device
- * @mod: Calling module
- * @mod_id: Module id (if more than 1 exists, '0' if not)
- * @intr_type: Interrupt type (error/normal) selection.
- *
- * Used to unregister an event handler to be processed by FMan
- *
- * Return: 0 on success; Error code otherwise.
- */
void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
u8 mod_id, enum fman_intr_type intr_type);
-/**
- * fman_set_port_params
- * @fman: A Pointer to FMan device
- * @port_params: Port parameters
- *
- * Used by FMan Port to pass parameters to the FMan
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_set_port_params(struct fman *fman,
struct fman_port_init_params *port_params);
-/**
- * fman_reset_mac
- * @fman: A Pointer to FMan device
- * @mac_id: MAC id to be reset
- *
- * Reset a specific MAC
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_reset_mac(struct fman *fman, u8 mac_id);
-/**
- * fman_get_clock_freq
- * @fman: A Pointer to FMan device
- *
- * Get FMan clock frequency
- *
- * Return: FMan clock frequency
- */
-
u16 fman_get_clock_freq(struct fman *fman);
-/**
- * fman_get_bmi_max_fifo_size
- * @fman: A Pointer to FMan device
- *
- * Get FMan maximum FIFO size
- *
- * Return: FMan Maximum FIFO size
- */
u32 fman_get_bmi_max_fifo_size(struct fman *fman);
-/**
- * fman_set_mac_max_frame
- * @fman: A Pointer to FMan device
- * @mac_id: MAC id
- * @mfl: Maximum frame length
- *
- * Set maximum frame length of specific MAC in FMan driver
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
-/**
- * fman_get_qman_channel_id
- * @fman: A Pointer to FMan device
- * @port_id: Port id
- *
- * Get QMan channel ID associated to the Port id
- *
- * Return: QMan channel ID
- */
u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
-/**
- * fman_get_mem_region
- * @fman: A Pointer to FMan device
- *
- * Get FMan memory region
- *
- * Return: A structure with FMan memory region information
- */
struct resource *fman_get_mem_region(struct fman *fman);
-/**
- * fman_get_max_frm
- *
- * Return: Max frame length configured in the FM driver
- */
u16 fman_get_max_frm(void);
-/**
- * fman_get_rx_extra_headroom
- *
- * Return: Extra headroom size configured in the FM driver
- */
int fman_get_rx_extra_headroom(void);
-/**
- * fman_bind
- * @dev: FMan OF device pointer
- *
- * Bind to a specific FMan device.
- *
- * Allowed only after the port was created.
- *
- * Return: A pointer to the FMan device
- */
struct fman *fman_bind(struct device *dev);
-
-/**
- * fman_unbind
- * @fman: Pointer to the FMan device
- *
- * Un-bind from a specific FMan device.
- *
- * Allowed only after the port was created.
- */
-void fman_unbind(struct fman *fman);
-
-/**
- * fman_get_device
- * @fman: A pointer to the FMan device.
- *
- * Get the FMan device pointer
- *
- * Return: Pointer to FMan device.
- */
-struct device *fman_get_device(struct fman *fman);
#ifdef __rtems__
-void fman_reset(struct fman *fman);
+int fman_reset(struct fman *fman);
#endif /* __rtems__ */
#endif /* __FM_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 5be951b8..3bc70a43 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -36,8 +36,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "crc_mac_addr_ext.h"
-
#include "fman_dtsec.h"
#include "fman.h"
@@ -46,30 +44,23 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/phy.h>
+#include <linux/crc32.h>
+#include <linux/of_mdio.h>
+#include <linux/mii.h>
-/* MII Management Command Register */
-#define MIIMCOM_READ_CYCLE 0x00000001
-
-/* MII Management Address Register */
-#define MIIMADD_PHY_ADDR_SHIFT 8
-
-/* MII Management Indicator Register */
-#define MIIMIND_BUSY 0x00000001
+/* TBI register addresses */
+#define MII_TBICON 0x11
-/* PHY Control Register */
-#define PHY_CR_PHY_RESET 0x8000
-#define PHY_CR_SPEED0 0x2000
-#define PHY_CR_ANE 0x1000
-#define PHY_CR_RESET_AN 0x0200
-#define PHY_CR_FULLDUPLEX 0x0100
-#define PHY_CR_SPEED1 0x0040
+/* TBICON register bit fields */
+#define TBICON_SOFT_RESET 0x8000 /* Soft reset */
+#define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
+#define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
+#define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
+#define TBICON_CLK_SELECT 0x0020 /* Clock select */
+#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
-#define PHY_TBICON_SRESET 0x8000
-#define PHY_TBICON_CLK_SEL 0x0020
-#define PHY_TBIANA_SGMII 0x4001
-#define PHY_TBIANA_1000X 0x01a0
-
-#define DTSEC_TO_MII_OFFSET 0x1000
+#define TBIANA_SGMII 0x4001
+#define TBIANA_1000X 0x01a0
/* Interrupt Mask Register (IMASK) */
#define DTSEC_IMASK_BREN 0x80000000
@@ -116,9 +107,7 @@
/* Defaults */
#define DEFAULT_HALFDUP_RETRANSMIT 0xf
#define DEFAULT_HALFDUP_COLL_WINDOW 0x37
-#define DEFAULT_HALFDUP_ALT_BACKOFF_VAL 0x0A
#define DEFAULT_TX_PAUSE_TIME 0xf000
-#define DEFAULT_TBIPA 5
#define DEFAULT_RX_PREPEND 0
#define DEFAULT_PREAMBLE_LEN 7
#define DEFAULT_TX_PAUSE_TIME_EXTD 0
@@ -127,22 +116,6 @@
#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
#define DEFAULT_BACK_TO_BACK_IPG 0x60
#define DEFAULT_MAXIMUM_FRAME 0x600
-#define DEFAULT_TBI_PHY_ADDR 5
-
-#define DTSEC_DEFAULT_EXCEPTIONS \
- ((u32)((DTSEC_IMASK_BREN) |\
- (DTSEC_IMASK_RXCEN) |\
- (DTSEC_IMASK_BTEN) |\
- (DTSEC_IMASK_TXCEN) |\
- (DTSEC_IMASK_TXEEN) |\
- (DTSEC_IMASK_ABRTEN) |\
- (DTSEC_IMASK_LCEN) |\
- (DTSEC_IMASK_CRLEN) |\
- (DTSEC_IMASK_XFUNEN) |\
- (DTSEC_IMASK_IFERREN) |\
- (DTSEC_IMASK_MAGEN) |\
- (DTSEC_IMASK_TDPEEN) |\
- (DTSEC_IMASK_RDPEEN)))
/* register related defines (bits, field offsets..) */
#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
@@ -154,24 +127,17 @@
#define DTSEC_ECNTRL_R100M 0x00000008
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
-#define DTSEC_TCTRL_THDF 0x00000800
-#define DTSEC_TCTRL_TTSE 0x00000040
#define DTSEC_TCTRL_GTS 0x00000020
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_PAL_SHIFT 16
-#define RCTRL_CFA 0x00008000
#define RCTRL_GHTX 0x00000400
-#define RCTRL_RTSE 0x00000040
#define RCTRL_GRS 0x00000020
-#define RCTRL_BC_REJ 0x00000010
#define RCTRL_MPROM 0x00000008
#define RCTRL_RSF 0x00000004
#define RCTRL_UPROM 0x00000001
-#define RCTRL_PROM (RCTRL_UPROM | RCTRL_MPROM)
#define MACCFG1_SOFT_RESET 0x80000000
-#define MACCFG1_LOOPBACK 0x00000100
#define MACCFG1_RX_FLOW 0x00000020
#define MACCFG1_TX_FLOW 0x00000010
#define MACCFG1_TX_EN 0x00000001
@@ -179,11 +145,7 @@
#define MACCFG2_NIBBLE_MODE 0x00000100
#define MACCFG2_BYTE_MODE 0x00000200
-#define MACCFG2_PRE_AM_RX_EN 0x00000080
-#define MACCFG2_PRE_AM_TX_EN 0x00000040
-#define MACCFG2_LENGTH_CHECK 0x00000010
#define MACCFG2_PAD_CRC_EN 0x00000004
-#define MACCFG2_CRC_EN 0x00000002
#define MACCFG2_FULL_DUPLEX 0x00000001
#define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
@@ -197,13 +159,8 @@
#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
-#define HAFDUP_ALT_BEB 0x00080000
-#define HAFDUP_BP_NO_BACKOFF 0x00040000
-#define HAFDUP_NO_BACKOFF 0x00020000
#define HAFDUP_EXCESS_DEFER 0x00010000
#define HAFDUP_COLLISION_WINDOW 0x000003ff
-#define HAFDUP_ALTERNATE_BEB_TRUNCATION_MASK 0x00f00000
-#define HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT 20
#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
#define HAFDUP_RETRANSMISSION_MAX 0x0000f000
@@ -215,7 +172,6 @@
#define MAX_PACKET_ALIGNMENT 31
#define MAX_INTER_PACKET_GAP 0x7f
-#define MAX_INTER_PALTERNATE_BEB 0x0f
#define MAX_RETRANSMISSION 0x0f
#define MAX_COLLISION_WINDOW 0x03ff
@@ -224,20 +180,6 @@
/* Extended Hash table size (32 bits*16 regs) */
#define EXTENDED_HASH_TABLE_SIZE 512
-/* maximum number of phys */
-#define MAX_PHYS 32
-
-/* MII Configuration Control Memory Map Registers */
-struct dtsec_mii_regs {
- u32 reserved1[72];
- u32 miimcfg; /* MII Mgmt:configuration */
- u32 miimcom; /* MII Mgmt:command */
- u32 miimadd; /* MII Mgmt:address */
- u32 miimcon; /* MII Mgmt:control 3 */
- u32 miimstat; /* MII Mgmt:status */
- u32 miimind; /* MII Mgmt:indicators */
-};
-
/* dTSEC Memory Map registers */
struct dtsec_regs {
/* dTSEC General Control and Status Registers */
@@ -340,43 +282,13 @@ struct dtsec_regs {
* standard 512-bit slot time window. If collisions are detected after this
* byte, the late collision event is asserted and transmission of current
* frame is aborted.
- * rx_drop_bcast:
- * Discard broadcast frames. If set, all broadcast frames will be discarded
- * by dTSEC.
- * rx_short_frm:
- * Accept short frames. If set, dTSEC will accept frames of length 14-63 bytes.
- * rx_len_check:
- * Length check for received frames. If set, the MAC checks the frame's length
- * field on receive to ensure it matches the actual data field length.
- * This only works for received frames with length field less than 1500.
- * No check is performed for larger frames.
* tx_pad_crc:
* Pad and append CRC. If set, the MAC pads all ransmitted short frames and
* appends a CRC to every frame regardless of padding requirement.
- * tx_crc:
- * Transmission CRC enable. If set, the MAC appends a CRC to all frames.
- * If frames presented to the MAC have a valid length and contain a valid CRC,
- * tx_crc should be reset. This field is ignored if tx_pad_crc is set.
- * rx_ctrl_acc:
- * Control frame accept. If set, this overrides 802.3 standard control frame
- * behavior, and all Ethernet frames that have an ethertype of 0x8808 are
- * treated as normal Ethernet frames and passed up to the packet interface on
- * a DA match. Received pause control frames are passed to the packet
- * interface only if Rx flow control is also disabled.
- * See dtsec_accept_rx_pause_frames() function.
* tx_pause_time:
* Transmit pause time value. This pause value is used as part of the pause
* frame to be sent when a transmit pause frame is initiated.
* If set to 0 this disables transmission of pause frames.
- * rx_preamble:
- * Receive preamble enable. If set, the MAC recovers the received Ethernet
- * 7-byte preamble and passes it to the packet interface at the start of each
- * received frame.
- * This field should be reset for internal MAC loop-back mode.
- * tx_preamble:
- * User defined preamble enable for transmitted frames.
- * If set, a user-defined preamble must passed to the MAC and it is
- * transmitted instead of the standard preamble.
* preamble_len:
* Length, in bytes, of the preamble field preceding each Ethernet
* start-of-frame delimiter byte. The default value of 0x7 should be used in
@@ -393,36 +305,14 @@ struct dtsec_regs {
* obtained by calling set_dflts().
*/
struct dtsec_cfg {
- bool halfdup_on;
- bool halfdup_alt_backoff_en;
- bool halfdup_excess_defer;
- bool halfdup_no_backoff;
- bool halfdup_bp_no_backoff;
- u32 halfdup_alt_backoff_val;
u16 halfdup_retransmit;
u16 halfdup_coll_window;
- bool rx_drop_bcast;
- bool rx_short_frm;
- bool rx_len_check;
bool tx_pad_crc;
- bool tx_crc;
- bool rx_ctrl_acc;
u16 tx_pause_time;
- u16 tbipa;
bool ptp_tsu_en;
bool ptp_exception_en;
- bool rx_preamble;
- bool tx_preamble;
u32 preamble_len;
u32 rx_prepend;
- bool loopback;
- bool rx_time_stamp_en;
- bool tx_time_stamp_en;
- bool rx_flow;
- bool tx_flow;
- bool rx_group_hash_exd;
- bool rx_promisc;
- u8 tbi_phy_addr;
u16 tx_pause_time_extd;
u16 maximum_frame;
u32 non_back_to_back_ipg1;
@@ -434,10 +324,6 @@ struct dtsec_cfg {
struct fman_mac {
/* pointer to dTSEC memory mapped registers */
struct dtsec_regs __iomem *regs;
- /* pointer to dTSEC MII memory mapped registers */
- struct dtsec_mii_regs __iomem *mii_regs;
- /* MII management clock */
- u16 mii_mgmt_clk;
/* MAC address of device */
u64 addr;
/* Ethernet physical interface */
@@ -453,169 +339,38 @@ struct fman_mac {
/* pointer to driver's individual address hash table */
struct eth_hash_t *unicast_addr_hash;
u8 mac_id;
- u8 tbi_phy_addr;
u32 exceptions;
bool ptp_tsu_enabled;
- bool en_tsu_err_exeption;
+ bool en_tsu_err_exception;
struct dtsec_cfg *dtsec_drv_param;
void *fm;
struct fman_rev_info fm_rev_info;
bool basex_if;
+ struct phy_device *tbiphy;
};
-static u32 calc_mii_mgmt_clk(struct fman_mac *dtsec)
-{
- u16 fm_clk_freq, dtsec_freq;
- u32 mgmt_clk;
-
- fm_clk_freq = fman_get_clock_freq(dtsec->fm);
- if (fm_clk_freq == 0) {
- pr_err("Can't get clock for MAC!\n");
- return 0;
- }
-
- dtsec_freq = (u16)(fm_clk_freq >> 1);
-
- if (dtsec_freq < 80)
- mgmt_clk = 1;
- else if (dtsec_freq < 120)
- mgmt_clk = 2;
- else if (dtsec_freq < 160)
- mgmt_clk = 3;
- else if (dtsec_freq < 200)
- mgmt_clk = 4;
- else if (dtsec_freq < 280)
- mgmt_clk = 5;
- else if (dtsec_freq < 400)
- mgmt_clk = 6;
- else
- mgmt_clk = 7;
-
- return mgmt_clk;
-}
-
-static int mii_write_reg(struct fman_mac *dtsec, u8 addr, u8 reg, u16 data)
-{
- struct dtsec_mii_regs __iomem *regs = dtsec->mii_regs;
- u32 tmp;
- int count;
-
- /* Setup the MII Mgmt clock speed */
- iowrite32be(dtsec->mii_mgmt_clk, &regs->miimcfg);
-
- /* Stop the MII management read cycle */
- iowrite32be(0, &regs->miimcom);
- /* Dummy read to make sure MIIMCOM is written */
- tmp = ioread32be(&regs->miimcom);
-
- /* Setting up MII Management Address Register */
- tmp = (u32)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
- iowrite32be(tmp, &regs->miimadd);
-
- /* Setting up MII Management Control Register with data */
- iowrite32be((u32)data, &regs->miimcon);
- /* Dummy read to make sure MIIMCON is written */
- tmp = ioread32be(&regs->miimcon);
-
- /* Wait until MII management write is complete */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&regs->miimind)) & MIIMIND_BUSY) && count--);
-
- if (count == 0)
- return -EBUSY;
-
- return 0;
-}
-
-static int mii_read_reg(struct fman_mac *dtsec, u8 addr, u8 reg, u16 *data)
-{
- struct dtsec_mii_regs __iomem *regs = dtsec->mii_regs;
- u32 tmp;
- int count;
-
- /* Setup the MII Mgmt clock speed */
- iowrite32be(dtsec->mii_mgmt_clk, &regs->miimcfg);
-
- /* Setting up the MII Management Address Register */
- tmp = (u32)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
- iowrite32be(tmp, &regs->miimadd);
-
- /* Perform an MII management read cycle */
- iowrite32be(MIIMCOM_READ_CYCLE, &regs->miimcom);
- /* Dummy read to make sure MIIMCOM is written */
- tmp = ioread32be(&regs->miimcom);
-
- /* Wait until MII management write is complete */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&regs->miimind)) & MIIMIND_BUSY) && count--);
-
- if (count == 0)
- return -EBUSY;
-
- /* Read MII management status */
- *data = (u16)ioread32be(&regs->miimstat);
-
- iowrite32be(0, &regs->miimcom);
- /* Dummy read to make sure MIIMCOM is written */
- tmp = ioread32be(&regs->miimcom);
-
- if (*data == 0xffff) {
- pr_warn("Read wrong data(0xffff):phy_addr 0x%x,reg 0x%x",
- addr, reg);
- return -ENXIO;
- }
-
- return 0;
-}
-
static void set_dflts(struct dtsec_cfg *cfg)
{
- cfg->halfdup_on = false;
cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
- cfg->halfdup_excess_defer = true;
- cfg->halfdup_no_backoff = false;
- cfg->halfdup_bp_no_backoff = false;
- cfg->halfdup_alt_backoff_val = DEFAULT_HALFDUP_ALT_BACKOFF_VAL;
- cfg->halfdup_alt_backoff_en = false;
- cfg->rx_drop_bcast = false;
- cfg->rx_short_frm = true;
- cfg->rx_len_check = false;
cfg->tx_pad_crc = true;
- cfg->tx_crc = false;
- cfg->rx_ctrl_acc = false;
cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
/* PHY address 0 is reserved (DPAA RM) */
- cfg->tbipa = DEFAULT_TBIPA;
cfg->rx_prepend = DEFAULT_RX_PREPEND;
cfg->ptp_tsu_en = true;
cfg->ptp_exception_en = true;
cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
- cfg->rx_preamble = false;
- cfg->tx_preamble = false;
- cfg->loopback = false;
- cfg->rx_time_stamp_en = false;
- cfg->tx_time_stamp_en = false;
- cfg->rx_flow = true;
- cfg->tx_flow = true;
- cfg->rx_group_hash_exd = false;
cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
- cfg->rx_promisc = false;
cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
- cfg->tbi_phy_addr = DEFAULT_TBI_PHY_ADDR;
}
static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
phy_interface_t iface, u16 iface_speed, u8 *macaddr,
- u32 exception_mask)
+ u32 exception_mask, u8 tbi_addr)
{
bool is_rgmii, is_sgmii, is_qsgmii;
int i;
@@ -659,14 +414,6 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
iowrite32be(tmp, &regs->ecntrl);
tmp = 0;
- if (cfg->halfdup_on)
- tmp |= DTSEC_TCTRL_THDF;
- if (cfg->tx_time_stamp_en)
- tmp |= DTSEC_TCTRL_TTSE;
-
- iowrite32be(tmp, &regs->tctrl);
-
- tmp = 0;
if (cfg->tx_pause_time)
tmp |= cfg->tx_pause_time;
@@ -676,18 +423,8 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
tmp = 0;
tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
- if (cfg->rx_ctrl_acc)
- tmp |= RCTRL_CFA;
- if (cfg->rx_group_hash_exd)
- tmp |= RCTRL_GHTX;
- if (cfg->rx_time_stamp_en)
- tmp |= RCTRL_RTSE;
- if (cfg->rx_drop_bcast)
- tmp |= RCTRL_BC_REJ;
- if (cfg->rx_short_frm)
- tmp |= RCTRL_RSF;
- if (cfg->rx_promisc)
- tmp |= RCTRL_PROM;
+ /* Accept short frames */
+ tmp |= RCTRL_RSF;
iowrite32be(tmp, &regs->rctrl);
@@ -695,7 +432,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
* Done also in cases where TBI is not selected to avoid conflict with
* the external PHY's Physical address
*/
- iowrite32be(cfg->tbipa, &regs->tbipa);
+ iowrite32be(tbi_addr, &regs->tbipa);
iowrite32be(0, &regs->tmr_ctrl);
@@ -712,12 +449,8 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
}
tmp = 0;
- if (cfg->loopback)
- tmp |= MACCFG1_LOOPBACK;
- if (cfg->rx_flow)
- tmp |= MACCFG1_RX_FLOW;
- if (cfg->tx_flow)
- tmp |= MACCFG1_TX_FLOW;
+ tmp |= MACCFG1_RX_FLOW;
+ tmp |= MACCFG1_TX_FLOW;
iowrite32be(tmp, &regs->maccfg1);
tmp = 0;
@@ -729,18 +462,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
MACCFG2_PREAMBLE_LENGTH_MASK;
- if (cfg->rx_preamble)
- tmp |= MACCFG2_PRE_AM_RX_EN;
- if (cfg->tx_preamble)
- tmp |= MACCFG2_PRE_AM_TX_EN;
- if (cfg->rx_len_check)
- tmp |= MACCFG2_LENGTH_CHECK;
if (cfg->tx_pad_crc)
tmp |= MACCFG2_PAD_CRC_EN;
- if (cfg->tx_crc)
- tmp |= MACCFG2_CRC_EN;
- if (!cfg->halfdup_on)
- tmp |= MACCFG2_FULL_DUPLEX;
+ /* Full Duplex */
+ tmp |= MACCFG2_FULL_DUPLEX;
iowrite32be(tmp, &regs->maccfg2);
tmp = (((cfg->non_back_to_back_ipg1 <<
@@ -755,19 +480,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
iowrite32be(tmp, &regs->ipgifg);
tmp = 0;
-
- if (cfg->halfdup_alt_backoff_en) {
- tmp = HAFDUP_ALT_BEB;
- tmp |= (cfg->halfdup_alt_backoff_val <<
- HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT) &
- HAFDUP_ALTERNATE_BEB_TRUNCATION_MASK;
- }
- if (cfg->halfdup_bp_no_backoff)
- tmp |= HAFDUP_BP_NO_BACKOFF;
- if (cfg->halfdup_no_backoff)
- tmp |= HAFDUP_NO_BACKOFF;
- if (cfg->halfdup_excess_defer)
- tmp |= HAFDUP_EXCESS_DEFER;
+ tmp |= HAFDUP_EXCESS_DEFER;
tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
& HAFDUP_RETRANSMISSION_MAX);
tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
@@ -843,35 +556,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
pr_err("Ethernet MAC Must have a valid MAC Address\n");
return -EINVAL;
}
- if (dtsec->max_speed >= SPEED_1000 &&
- dtsec->dtsec_drv_param->halfdup_on) {
- pr_err("Ethernet MAC 1G can't work in half duplex\n");
- return -EINVAL;
- }
-
- /* FM_RX_PREAM_4_ERRATA_DTSEC_A001 Errata workaround */
- if (dtsec->dtsec_drv_param->rx_preamble) {
- pr_err("preamble_rx_en\n");
- return -EINVAL;
- }
-
- if (((dtsec->dtsec_drv_param)->tx_preamble ||
- (dtsec->dtsec_drv_param)->rx_preamble) &&
- ((dtsec->dtsec_drv_param)->preamble_len != 0x7)) {
- pr_err("Preamble length should be 0x7 bytes\n");
- return -EINVAL;
- }
- if ((dtsec->dtsec_drv_param)->halfdup_on &&
- (dtsec->dtsec_drv_param->tx_time_stamp_en ||
- dtsec->dtsec_drv_param->rx_time_stamp_en)) {
- pr_err("1588 timeStamp disabled in half duplex mode\n");
- return -EINVAL;
- }
- if ((dtsec->dtsec_drv_param)->rx_flow &&
- (dtsec->dtsec_drv_param)->rx_ctrl_acc) {
- pr_err("Receive control frame can not be accepted\n");
- return -EINVAL;
- }
if ((dtsec->dtsec_drv_param)->rx_prepend >
MAX_PACKET_ALIGNMENT) {
pr_err("packetAlignmentPadding can't be > than %d\n",
@@ -888,12 +572,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
MAX_INTER_PACKET_GAP);
return -EINVAL;
}
- if ((dtsec->dtsec_drv_param)->halfdup_alt_backoff_val >
- MAX_INTER_PALTERNATE_BEB) {
- pr_err("alternateBackoffVal can't be greater than %d\n",
- MAX_INTER_PALTERNATE_BEB);
- return -EINVAL;
- }
if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
MAX_RETRANSMISSION) {
pr_err("maxRetransmission can't be greater than %d\n",
@@ -909,10 +587,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
* using the MII Management Interface
*/
}
- if (dtsec->dtsec_drv_param->tbipa > MAX_PHYS) {
- pr_err("PHY address (should be 0-%d)\n", MAX_PHYS);
- return -ERANGE;
- }
if (!dtsec->exception_cb) {
pr_err("uninitialized exception_cb\n");
return -EINVAL;
@@ -922,12 +596,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
return -EINVAL;
}
- /* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
- if (dtsec->dtsec_drv_param->rx_len_check) {
- pr_warn("Length Check!\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -998,18 +666,6 @@ static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
return false;
}
-static u32 get_mac_addr_hash_code(u64 eth_addr)
-{
- u32 crc;
-
- /* CRC calculation */
- GET_MAC_ADDR_CRC(eth_addr, crc);
-
- crc = bitrev32(crc);
-
- return crc;
-}
-
static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
@@ -1059,10 +715,10 @@ static void dtsec_isr(void *handle)
* This is a read only register
* b. Read and save the value of TPKT
*/
- tpkt1 = in_be32(&regs->tpkt);
+ tpkt1 = ioread32be(&regs->tpkt);
/* c. Read the register at dTSEC address offset 0x32C */
- tmp_reg1 = in_be32(&regs->reserved02c0[27]);
+ tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
/* d. Compare bits [9:15] to bits [25:31] of the
* register at address offset 0x32C.
@@ -1083,8 +739,8 @@ static void dtsec_isr(void *handle)
/* e. Read and save TPKT again and read the register
* at dTSEC address offset 0x32C again
*/
- tpkt2 = in_be32(&regs->tpkt);
- tmp_reg2 = in_be32(&regs->reserved02c0[27]);
+ tpkt2 = ioread32be(&regs->tpkt);
+ tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
/* f. Compare the value of TPKT saved in step b to
* value read in step e. Also compare bits [9:15] of
@@ -1100,21 +756,22 @@ static void dtsec_isr(void *handle)
/* a.Write a 1 to RCTRL[GRS] */
- out_be32(&regs->rctrl,
- in_be32(&regs->rctrl) | RCTRL_GRS);
+ iowrite32be(ioread32be(&regs->rctrl) |
+ RCTRL_GRS, &regs->rctrl);
/* b.Wait until IEVENT[GRSC]=1, or at least
* 100 us has elapsed.
*/
for (i = 0; i < 100; i++) {
- if (in_be32(&regs->ievent) &
+ if (ioread32be(&regs->ievent) &
DTSEC_IMASK_GRSCEN)
break;
udelay(1);
}
- if (in_be32(&regs->ievent) & DTSEC_IMASK_GRSCEN)
- out_be32(&regs->ievent,
- DTSEC_IMASK_GRSCEN);
+ if (ioread32be(&regs->ievent) &
+ DTSEC_IMASK_GRSCEN)
+ iowrite32be(DTSEC_IMASK_GRSCEN,
+ &regs->ievent);
else
pr_debug("Rx lockup due to Tx lockup\n");
@@ -1279,15 +936,14 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
- if (dtsec->fm_rev_info.major == 2)
- if (0 < pause_time && pause_time <= 320) {
+ if (pause_time) {
+ /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
+ if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
pr_warn("pause-time: %d illegal.Should be > 320\n",
pause_time);
return -EINVAL;
}
- if (pause_time) {
ptv = ioread32be(&regs->ptv);
ptv &= PTV_PTE_MASK;
ptv |= pause_time & PTV_PT_MASK;
@@ -1341,7 +997,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
struct eth_hash_entry *hash_entry;
u64 addr;
s32 bucket;
- u32 crc;
+ u32 crc = 0xFFFFFFFF;
bool mcast, ghtx;
if (!is_init_done(dtsec->dtsec_drv_param))
@@ -1357,7 +1013,8 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
pr_err("Could not compute hash bucket\n");
return -EINVAL;
}
- crc = get_mac_addr_hash_code(addr);
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
/* considering the 9 highest order bits in crc H[8:0]:
*if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
@@ -1407,7 +1064,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
struct eth_hash_entry *hash_entry = NULL;
u64 addr;
s32 bucket;
- u32 crc;
+ u32 crc = 0xFFFFFFFF;
bool mcast, ghtx;
if (!is_init_done(dtsec->dtsec_drv_param))
@@ -1423,7 +1080,8 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
pr_err("Could not compute hash bucket\n");
return -EINVAL;
}
- crc = get_mac_addr_hash_code(addr);
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
if (ghtx) {
bucket = (s32)((crc >> 23) & 0x1ff);
@@ -1532,22 +1190,17 @@ int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
int dtsec_restart_autoneg(struct fman_mac *dtsec)
{
u16 tmp_reg16;
- int err;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- err = mii_read_reg(dtsec, dtsec->tbi_phy_addr, 0, &tmp_reg16);
- if (err) {
- pr_err("Autonegotiation restart failed\n");
- return err;
- }
+ tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
- tmp_reg16 &= ~(PHY_CR_SPEED0 | PHY_CR_SPEED1);
- tmp_reg16 |=
- (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
+ tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+ tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
- mii_write_reg(dtsec, dtsec->tbi_phy_addr, 0, tmp_reg16);
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
return 0;
}
@@ -1598,12 +1251,12 @@ int dtsec_set_exception(struct fman_mac *dtsec,
switch (exception) {
case FM_MAC_EX_1G_1588_TS_RX_ERR:
if (enable) {
- dtsec->en_tsu_err_exeption = true;
+ dtsec->en_tsu_err_exception = true;
iowrite32be(ioread32be(&regs->tmr_pemask) |
TMR_PEMASK_TSREEN,
&regs->tmr_pemask);
} else {
- dtsec->en_tsu_err_exeption = false;
+ dtsec->en_tsu_err_exception = false;
iowrite32be(ioread32be(&regs->tmr_pemask) &
~TMR_PEMASK_TSREEN,
&regs->tmr_pemask);
@@ -1644,7 +1297,8 @@ int dtsec_init(struct fman_mac *dtsec)
MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
- dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions);
+ dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
+ dtsec->tbiphy->mdio.addr);
if (err) {
free_init_resources(dtsec);
pr_err("DTSEC version doesn't support this i/f mode\n");
@@ -1655,30 +1309,26 @@ int dtsec_init(struct fman_mac *dtsec)
u16 tmp_reg16;
/* Configure the TBI PHY Control Register */
- tmp_reg16 = PHY_TBICON_CLK_SEL | PHY_TBICON_SRESET;
- mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 17,
- tmp_reg16);
+ tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
+ phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
- tmp_reg16 = PHY_TBICON_CLK_SEL;
- mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 17,
- tmp_reg16);
+ tmp_reg16 = TBICON_CLK_SELECT;
+ phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
- tmp_reg16 =
- (PHY_CR_PHY_RESET | PHY_CR_ANE | PHY_CR_FULLDUPLEX |
- PHY_CR_SPEED1);
- mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 0, tmp_reg16);
+ tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
if (dtsec->basex_if)
- tmp_reg16 = PHY_TBIANA_1000X;
+ tmp_reg16 = TBIANA_1000X;
else
- tmp_reg16 = PHY_TBIANA_SGMII;
- mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 4, tmp_reg16);
+ tmp_reg16 = TBIANA_SGMII;
+ phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
- tmp_reg16 =
- (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX |
- PHY_CR_SPEED1);
+ tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
- mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 0, tmp_reg16);
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
}
/* Max Frame Length */
@@ -1752,34 +1402,53 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
set_dflts(dtsec_drv_param);
- dtsec->regs = (struct dtsec_regs __iomem *)(base_addr);
- dtsec->mii_regs = (struct dtsec_mii_regs __iomem *)
- (base_addr + DTSEC_TO_MII_OFFSET);
+ dtsec->regs = base_addr;
dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
dtsec->max_speed = params->max_speed;
dtsec->phy_if = params->phy_if;
dtsec->mac_id = params->mac_id;
- dtsec->exceptions = DTSEC_DEFAULT_EXCEPTIONS;
+ dtsec->exceptions = (DTSEC_IMASK_BREN |
+ DTSEC_IMASK_RXCEN |
+ DTSEC_IMASK_BTEN |
+ DTSEC_IMASK_TXCEN |
+ DTSEC_IMASK_TXEEN |
+ DTSEC_IMASK_ABRTEN |
+ DTSEC_IMASK_LCEN |
+ DTSEC_IMASK_CRLEN |
+ DTSEC_IMASK_XFUNEN |
+ DTSEC_IMASK_IFERREN |
+ DTSEC_IMASK_MAGEN |
+ DTSEC_IMASK_TDPEEN |
+ DTSEC_IMASK_RDPEEN);
dtsec->exception_cb = params->exception_cb;
dtsec->event_cb = params->event_cb;
dtsec->dev_id = params->dev_id;
dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
- dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
- dtsec->tbi_phy_addr = dtsec->dtsec_drv_param->tbi_phy_addr;
+ dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
dtsec->fm = params->fm;
dtsec->basex_if = params->basex_if;
- dtsec->mii_mgmt_clk = calc_mii_mgmt_clk(dtsec);
- if (dtsec->mii_mgmt_clk == 0) {
- pr_err("Can't calculate MII management clock\n");
- goto err_dtsec;
+
+ if (!params->internal_phy_node) {
+ pr_err("TBI PHY node is not available\n");
+ goto err_dtsec_drv_param;
+ }
+
+ dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
+ if (!dtsec->tbiphy) {
+ pr_err("of_phy_find_device (TBI PHY) failed\n");
+ goto err_dtsec_drv_param;
}
+ put_device(&dtsec->tbiphy->mdio.dev);
+
/* Save FMan revision */
fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
return dtsec;
+err_dtsec_drv_param:
+ kfree(dtsec_drv_param);
err_dtsec:
kfree(dtsec);
return NULL;
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_mac.h b/linux/drivers/net/ethernet/freescale/fman/fman_mac.h
index 7a5e752e..dd6d0526 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -191,10 +191,6 @@ struct fman_mac_params {
u16 max_speed;
/* A handle to the FM object this port related to */
void *fm;
- /* MDIO exceptions interrupt source - not valid for all
- * MACs; MUST be set to 'NO_IRQ' for MACs that don't have
- * mdio-irq, or for polling
- */
void *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
@@ -204,6 +200,8 @@ struct fman_mac_params {
* synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
*/
bool basex_if;
+ /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
+ struct device_node *internal_phy_node;
};
struct eth_hash_t {
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_memac.c b/linux/drivers/net/ethernet/freescale/fman/fman_memac.c
index 5730194a..bc4dfb18 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -42,59 +42,55 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/of_mdio.h>
+
+/* PCS registers */
+#define MDIO_SGMII_CR 0x00
+#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
+#define MDIO_SGMII_LINK_TMR_L 0x12
+#define MDIO_SGMII_LINK_TMR_H 0x13
+#define MDIO_SGMII_IF_MODE 0x14
+
+/* SGMII Control defines */
+#define SGMII_CR_AN_EN 0x1000
+#define SGMII_CR_RESTART_AN 0x0200
+#define SGMII_CR_FD 0x0100
+#define SGMII_CR_SPEED_SEL1_1G 0x0040
+#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
+ SGMII_CR_SPEED_SEL1_1G)
+
+/* SGMII Device Ability for SGMII defines */
+#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
+#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
+
+/* Link timer define */
+#define LINK_TMR_L 0xa120
+#define LINK_TMR_H 0x0007
+#define LINK_TMR_L_BASEX 0xaf08
+#define LINK_TMR_H_BASEX 0x002f
+
+/* SGMII IF Mode defines */
+#define IF_MODE_USE_SGMII_AN 0x0002
+#define IF_MODE_SGMII_EN 0x0001
+#define IF_MODE_SGMII_SPEED_100M 0x0004
+#define IF_MODE_SGMII_SPEED_1G 0x0008
+#define IF_MODE_SGMII_DUPLEX_HALF 0x0010
-/* MII Management Registers */
-#define MDIO_CFG_CLK_DIV_MASK 0x0080ff80
-#define MDIO_CFG_HOLD_MASK 0x0000001c
-#define MDIO_CFG_ENC45 0x00000040
-#define MDIO_CFG_BSY 0x00000001
-
-#define MDIO_CTL_PHY_ADDR_SHIFT 5
-
-#define MDIO_DATA_BSY 0x80000000
-
-/* Internal PHY access */
-#define PHY_MDIO_ADDR 0
-
-/* Internal PHY Registers - SGMII */
-#define PHY_SGMII_CR_RESET_AN 0x0200
-#define PHY_SGMII_CR_AN_ENABLE 0x1000
-#define PHY_SGMII_CR_DEF_VAL 0x1140
-#define PHY_SGMII_DEV_ABILITY_SGMII 0x4001
-#define PHY_SGMII_DEV_ABILITY_1000X 0x01A0
-#define PHY_SGMII_IF_MODE_DUPLEX_FULL 0x0000
-#define PHY_SGMII_IF_MODE_DUPLEX_HALF 0x0010
-#define PHY_SGMII_IF_MODE_SPEED_GB 0x0008
-#define PHY_SGMII_IF_MODE_SPEED_100M 0x0004
-#define PHY_SGMII_IF_MODE_SPEED_10M 0x0000
-#define PHY_SGMII_IF_MODE_AN 0x0002
-#define PHY_SGMII_IF_MODE_SGMII 0x0001
-#define PHY_SGMII_IF_MODE_1000X 0x0000
-
-/* Offset from the MEM map to the MDIO mem map */
-#define MEMAC_TO_MII_OFFSET 0x030
/* Num of additional exact match MAC adr regs */
#define MEMAC_NUM_OF_PADDRS 7
/* Control and Configuration Register (COMMAND_CONFIG) */
-#define CMD_CFG_MG 0x80000000 /* 00 Magic Packet detection */
#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */
-#define CMD_CFG_SFD_ANY 0x00200000 /* 10 Disable SFD check */
#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */
#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */
-#define CMD_CFG_SEND_IDLE 0x00010000 /* 15 Force idle generation */
-#define CMD_CFG_CNT_FRM_EN 0x00002000 /* 18 Control frame rx enable */
#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */
#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */
-#define CMD_CFG_LOOPBACK_EN 0x00000400 /* 21 XGMII/GMII loopback enable */
-#define CMD_CFG_TX_ADDR_INS 0x00000200 /* 22 Tx source MAC addr insertion */
#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */
-#define CMD_CFG_PAUSE_FWD 0x00000080 /* 24 Terminate/frwd Pause frames */
#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */
#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */
#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */
-#define CMD_CFG_WAN_MODE 0x00000008 /* 28 WAN mode enable */
#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */
#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */
@@ -171,10 +167,6 @@ do { \
#define DEFAULT_FRAME_LENGTH 0x600
#define DEFAULT_TX_IPG_LENGTH 12
-#define MEMAC_DEFAULT_EXCEPTIONS \
- ((u32)(MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER | \
- MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI))
-
#define CLXY_PAUSE_QUANTA_CLX_PQNT 0x0000FFFF
#define CLXY_PAUSE_QUANTA_CLY_PQNT 0xFFFF0000
#define CLXY_PAUSE_THRESH_CLX_QTH 0x0000FFFF
@@ -187,14 +179,6 @@ struct mac_addr {
u32 mac_addr_u;
};
-/* MII Configuration Control Memory Map Registers */
-struct memac_mii_regs {
- u32 mdio_cfg; /* 0x030 */
- u32 mdio_ctrl; /* 0x034 */
- u32 mdio_data; /* 0x038 */
- u32 mdio_addr; /* 0x03c */
-};
-
/* memory map */
struct memac_regs {
u32 res0000[2]; /* General Control and Status */
@@ -340,25 +324,8 @@ struct memac_regs {
struct memac_cfg {
bool reset_on_init;
- bool rx_error_discard;
bool pause_ignore;
- bool pause_forward_enable;
- bool no_length_check_enable;
- bool cmd_frame_enable;
- bool send_idle_enable;
- bool wan_mode_enable;
bool promiscuous_mode_enable;
- bool tx_addr_ins_enable;
- bool loopback_enable;
- bool lgth_check_nostdr;
- bool time_stamp_enable;
- bool pad_enable;
- bool phy_tx_ena_on;
- bool rx_sfd_any;
- bool rx_pbl_fwd;
- bool tx_pbl_fwd;
- bool debug_mode;
- bool wake_on_lan;
struct fixed_phy_status *fixed_link;
u16 max_frame_length;
u16 pause_quanta;
@@ -368,8 +335,6 @@ struct memac_cfg {
struct fman_mac {
/* Pointer to MAC memory mapped registers */
struct memac_regs __iomem *regs;
- /* Pointer to MII memory mapped registers */
- struct memac_mii_regs __iomem *mii_regs;
/* MAC address of device */
u64 addr;
/* Ethernet physical interface */
@@ -382,133 +347,15 @@ struct fman_mac {
struct eth_hash_t *multicast_addr_hash;
/* Pointer to driver's individual address hash table */
struct eth_hash_t *unicast_addr_hash;
- bool debug_mode;
u8 mac_id;
u32 exceptions;
struct memac_cfg *memac_drv_param;
void *fm;
struct fman_rev_info fm_rev_info;
bool basex_if;
+ struct phy_device *pcsphy;
};
-static int write_phy_reg_10g(struct memac_mii_regs __iomem *mii_regs,
- u8 phy_addr, u8 reg, u16 data)
-{
- u32 tmp_reg;
- int count;
-
- tmp_reg = ioread32be(&mii_regs->mdio_cfg);
- /* Leave only MDIO_CLK_DIV bits set on */
- tmp_reg &= MDIO_CFG_CLK_DIV_MASK;
- /* Set maximum MDIO_HOLD value to allow phy to see
- * change of data signal
- */
- tmp_reg |= MDIO_CFG_HOLD_MASK;
- /* Add 10G interface mode */
- tmp_reg |= MDIO_CFG_ENC45;
- iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
-
- /* Wait for command completion */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
-
- if (count == 0)
- return -EBUSY;
-
- /* Specify phy and register to be accessed */
- iowrite32be(phy_addr, &mii_regs->mdio_ctrl);
- iowrite32be(reg, &mii_regs->mdio_addr);
-
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
-
- if (count == 0)
- return -EBUSY;
-
- /* Write data */
- iowrite32be(data, &mii_regs->mdio_data);
-
- /* Wait for write transaction end */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY) &&
- --count);
-
- if (count == 0)
- return -EBUSY;
-
- return 0;
-}
-
-static int write_phy_reg_1g(struct memac_mii_regs __iomem *mii_regs,
- u8 phy_addr, u8 reg, u16 data)
-{
- u32 tmp_reg;
- int count;
-
- /* Leave only MDIO_CLK_DIV and MDIO_HOLD bits set on */
- tmp_reg = ioread32be(&mii_regs->mdio_cfg);
- tmp_reg &= (MDIO_CFG_CLK_DIV_MASK | MDIO_CFG_HOLD_MASK);
- iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
-
- /* Wait for command completion */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
-
- if (count == 0)
- return -EBUSY;
-
- /* Write transaction */
- tmp_reg = (phy_addr << MDIO_CTL_PHY_ADDR_SHIFT);
- tmp_reg |= reg;
- iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
-
- /* Wait for command completion */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
-
- if (count == 0)
- return -EBUSY;
-
- iowrite32be(data, &mii_regs->mdio_data);
-
- /* Wait for write transaction to end */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY) &&
- --count);
-
- if (count == 0)
- return -EBUSY;
-
- return 0;
-}
-
-static int mii_write_phy_reg(struct fman_mac *memac, u8 phy_addr, u8 reg,
- u16 data)
-{
- int err = 0;
- /* Figure out interface type - 10G vs 1G.
- * In 10G interface both phy_addr and devAddr present.
- */
- if (memac->max_speed == SPEED_10000)
- err = write_phy_reg_10g(memac->mii_regs, phy_addr, reg, data);
- else
- err = write_phy_reg_1g(memac->mii_regs, phy_addr, reg, data);
-
- return err;
-}
-
static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
u8 paddr_num)
{
@@ -571,30 +418,15 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
/* Config */
tmp = 0;
- if (cfg->wan_mode_enable)
- tmp |= CMD_CFG_WAN_MODE;
if (cfg->promiscuous_mode_enable)
tmp |= CMD_CFG_PROMIS_EN;
- if (cfg->pause_forward_enable)
- tmp |= CMD_CFG_PAUSE_FWD;
if (cfg->pause_ignore)
tmp |= CMD_CFG_PAUSE_IGNORE;
- if (cfg->tx_addr_ins_enable)
- tmp |= CMD_CFG_TX_ADDR_INS;
- if (cfg->loopback_enable)
- tmp |= CMD_CFG_LOOPBACK_EN;
- if (cfg->cmd_frame_enable)
- tmp |= CMD_CFG_CNT_FRM_EN;
- if (cfg->send_idle_enable)
- tmp |= CMD_CFG_SEND_IDLE;
- if (cfg->no_length_check_enable)
- tmp |= CMD_CFG_NO_LEN_CHK;
- if (cfg->rx_sfd_any)
- tmp |= CMD_CFG_SFD_ANY;
- if (cfg->pad_enable)
- tmp |= CMD_CFG_TX_PAD_EN;
- if (cfg->wake_on_lan)
- tmp |= CMD_CFG_MG;
+
+ /* Payload length check disable */
+ tmp |= CMD_CFG_NO_LEN_CHK;
+ /* Enable padding of frames in transmit direction */
+ tmp |= CMD_CFG_TX_PAD_EN;
tmp |= CMD_CFG_CRC_FWD;
@@ -615,7 +447,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
break;
default:
tmp |= IF_MODE_GMII;
- if (phy_if == PHY_INTERFACE_MODE_RGMII && !cfg->loopback_enable)
+ if (phy_if == PHY_INTERFACE_MODE_RGMII)
tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
}
iowrite32be(tmp, &regs->if_mode);
@@ -646,28 +478,11 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
static void set_dflts(struct memac_cfg *cfg)
{
cfg->reset_on_init = false;
- cfg->wan_mode_enable = false;
cfg->promiscuous_mode_enable = false;
- cfg->pause_forward_enable = false;
cfg->pause_ignore = false;
- cfg->tx_addr_ins_enable = false;
- cfg->loopback_enable = false;
- cfg->cmd_frame_enable = false;
- cfg->rx_error_discard = false;
- cfg->send_idle_enable = false;
- cfg->no_length_check_enable = true;
- cfg->lgth_check_nostdr = false;
- cfg->time_stamp_enable = false;
cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
- cfg->pad_enable = true;
- cfg->phy_tx_ena_on = false;
- cfg->rx_sfd_any = false;
- cfg->rx_pbl_fwd = false;
- cfg->tx_pbl_fwd = false;
- cfg->debug_mode = false;
- cfg->wake_on_lan = false;
}
static u32 get_mac_addr_hash_code(u64 eth_addr)
@@ -692,49 +507,42 @@ static u32 get_mac_addr_hash_code(u64 eth_addr)
return xor_val;
}
-static void setup_sgmii_internal_phy(struct fman_mac *memac, u8 phy_addr,
+static void setup_sgmii_internal_phy(struct fman_mac *memac,
struct fixed_phy_status *fixed_link)
{
- u16 tmp_reg16, speed;
+ u16 tmp_reg16;
- /* In case the higher MACs are used (i.e. the MACs that should
- * support 10G), speed=10000 is provided for SGMII ports.
- * Temporary modify enet mode to 1G one, so MII functions can
- * work correctly.
- */
- speed = memac->max_speed;
- memac->max_speed = SPEED_1000;
+ if (WARN_ON(!memac->pcsphy))
+ return;
/* SGMII mode */
- tmp_reg16 = PHY_SGMII_IF_MODE_SGMII;
+ tmp_reg16 = IF_MODE_SGMII_EN;
if (!fixed_link)
/* AN enable */
- tmp_reg16 |= PHY_SGMII_IF_MODE_AN;
+ tmp_reg16 |= IF_MODE_USE_SGMII_AN;
else {
#ifndef __rtems__
switch (fixed_link->speed) {
case 10:
- tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_10M;
+ /* For 10M: IF_MODE[SPEED_10M] = 0 */
break;
case 100:
- tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_100M;
+ tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
break;
case 1000: /* fallthrough */
default:
- tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_GB;
+ tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
break;
}
- if (fixed_link->duplex)
- tmp_reg16 |= PHY_SGMII_IF_MODE_DUPLEX_FULL;
- else
- tmp_reg16 |= PHY_SGMII_IF_MODE_DUPLEX_HALF;
+ if (!fixed_link->duplex)
+ tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
#endif /* __rtems__ */
}
- mii_write_phy_reg(memac, phy_addr, 0x14, tmp_reg16);
+ phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
/* Device ability according to SGMII specification */
- tmp_reg16 = PHY_SGMII_DEV_ABILITY_SGMII;
- mii_write_phy_reg(memac, phy_addr, 0x4, tmp_reg16);
+ tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
+ phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
/* Adjust link timer for SGMII -
* According to Cisco SGMII specification the timer should be 1.6 ms.
@@ -748,40 +556,25 @@ static void setup_sgmii_internal_phy(struct fman_mac *memac, u8 phy_addr,
* Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
* we always set up here a value of 2.5 SGMII.
*/
- mii_write_phy_reg(memac, phy_addr, 0x13, 0x0007);
- mii_write_phy_reg(memac, phy_addr, 0x12, 0xa120);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
if (!fixed_link)
/* Restart AN */
- tmp_reg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
+ tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
else
/* AN disabled */
- tmp_reg16 = PHY_SGMII_CR_DEF_VAL & ~PHY_SGMII_CR_AN_ENABLE;
- mii_write_phy_reg(memac, phy_addr, 0x0, tmp_reg16);
-
- /* Restore original speed */
- memac->max_speed = speed;
+ tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
+ phy_write(memac->pcsphy, 0x0, tmp_reg16);
}
-static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac, u8 phy_addr)
+static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
{
- u16 tmp_reg16, speed;
-
- /* In case the higher MACs are used (i.e. the MACs that
- * should support 10G), speed=10000 is provided for SGMII ports.
- * Temporary modify enet mode to 1G one, so MII functions can
- * work correctly.
- */
- speed = memac->max_speed;
- memac->max_speed = SPEED_1000;
-
- /* 1000BaseX mode */
- tmp_reg16 = PHY_SGMII_IF_MODE_1000X;
- mii_write_phy_reg(memac, phy_addr, 0x14, tmp_reg16);
+ u16 tmp_reg16;
/* AN Device capability */
- tmp_reg16 = PHY_SGMII_DEV_ABILITY_1000X;
- mii_write_phy_reg(memac, phy_addr, 0x4, tmp_reg16);
+ tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
+ phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
/* Adjust link timer for SGMII -
* For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
@@ -795,15 +588,12 @@ static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac, u8 phy_addr)
* Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
* we always set up here a value of 2.5 SGMII.
*/
- mii_write_phy_reg(memac, phy_addr, 0x13, 0x002f);
- mii_write_phy_reg(memac, phy_addr, 0x12, 0xaf08);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
/* Restart AN */
- tmp_reg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
- mii_write_phy_reg(memac, phy_addr, 0x0, tmp_reg16);
-
- /* Restore original speed */
- memac->max_speed = speed;
+ tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
+ phy_write(memac->pcsphy, 0x0, tmp_reg16);
}
static int check_init_parameters(struct fman_mac *memac)
@@ -821,12 +611,6 @@ static int check_init_parameters(struct fman_mac *memac)
return -EINVAL;
}
- /* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
- if (!memac->memac_drv_param->no_length_check_enable) {
- pr_err("Length Check!\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -1216,7 +1000,7 @@ int memac_set_exception(struct fman_mac *memac,
int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
- u8 i, phy_addr;
+ u8 i;
enet_addr_t eth_addr;
bool slow_10g_if = false;
struct fixed_phy_status *fixed_link;
@@ -1262,33 +1046,35 @@ int memac_init(struct fman_mac *memac)
/* MAC strips CRC from received frames - this workaround
* should decrease the likelihood of bug appearance
*/
- reg32 = in_be32(&memac->regs->command_config);
+ reg32 = ioread32be(&memac->regs->command_config);
reg32 &= ~CMD_CFG_CRC_FWD;
- out_be32(&memac->regs->command_config, reg32);
+ iowrite32be(reg32, &memac->regs->command_config);
}
if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
/* Configure internal SGMII PHY */
if (memac->basex_if)
- setup_sgmii_internal_phy_base_x(memac, PHY_MDIO_ADDR);
+ setup_sgmii_internal_phy_base_x(memac);
else
- setup_sgmii_internal_phy(memac, PHY_MDIO_ADDR,
- fixed_link);
+ setup_sgmii_internal_phy(memac, fixed_link);
} else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
/* Configure 4 internal SGMII PHYs */
for (i = 0; i < 4; i++) {
+ u8 qsmgii_phy_addr, phy_addr;
/* QSGMII PHY address occupies 3 upper bits of 5-bit
* phy_address; the lower 2 bits are used to extend
* register address space and access each one of 4
* ports inside QSGMII.
*/
- phy_addr = (u8)((PHY_MDIO_ADDR << 2) | i);
+ phy_addr = memac->pcsphy->mdio.addr;
+ qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
+ memac->pcsphy->mdio.addr = qsmgii_phy_addr;
if (memac->basex_if)
- setup_sgmii_internal_phy_base_x(memac,
- phy_addr);
+ setup_sgmii_internal_phy_base_x(memac);
else
- setup_sgmii_internal_phy(memac, phy_addr,
- fixed_link);
+ setup_sgmii_internal_phy(memac, fixed_link);
+
+ memac->pcsphy->mdio.addr = phy_addr;
}
}
@@ -1330,6 +1116,9 @@ int memac_free(struct fman_mac *memac)
{
free_init_resources(memac);
+ if (memac->pcsphy)
+ put_device(&memac->pcsphy->mdio.dev);
+
kfree(memac->memac_drv_param);
kfree(memac);
@@ -1362,13 +1151,12 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
memac->addr = ENET_ADDR_TO_UINT64(params->addr);
- memac->regs = (struct memac_regs __iomem *)(base_addr);
- memac->mii_regs = (struct memac_mii_regs __iomem *)
- (base_addr + MEMAC_TO_MII_OFFSET);
+ memac->regs = base_addr;
memac->max_speed = params->max_speed;
memac->phy_if = params->phy_if;
memac->mac_id = params->mac_id;
- memac->exceptions = MEMAC_DEFAULT_EXCEPTIONS;
+ memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
+ MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
memac->exception_cb = params->exception_cb;
memac->event_cb = params->event_cb;
memac->dev_id = params->dev_id;
@@ -1378,5 +1166,21 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
/* Save FMan revision */
fman_get_revision(memac->fm, &memac->fm_rev_info);
+ if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
+ memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
+ if (!params->internal_phy_node) {
+ pr_err("PCS PHY node is not available\n");
+ memac_free(memac);
+ return NULL;
+ }
+
+ memac->pcsphy = of_phy_find_device(params->internal_phy_node);
+ if (!memac->pcsphy) {
+ pr_err("of_phy_find_device (PCS PHY) failed\n");
+ memac_free(memac);
+ return NULL;
+ }
+ }
+
return memac;
}
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_muram.h b/linux/drivers/net/ethernet/freescale/fman/fman_muram.h
index c715795b..453bf849 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -39,52 +39,14 @@
/* Structure for FM MURAM information */
struct muram_info;
-/**
- * fman_muram_init
- * @base: Pointer to base of memory mapped FM-MURAM.
- * @size: Size of the FM-MURAM partition.
- *
- * Creates partition in the MURAM.
- * The routine returns a pointer to the MURAM partition.
- * This pointer must be passed as to all other FM-MURAM function calls.
- * No actual initialization or configuration of FM_MURAM hardware is done by
- * this routine.
- *
- * Return: pointer to FM-MURAM object, or NULL for Failure.
- */
struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
-/**
- * fman_muram_offset_to_vbase
- * @muram: FM-MURAM module pointer.
- * @offset: the offset of the memory block
- *
- * Gives the address of the memory region from specific offset
- *
- * Return: The address of the memory block
- */
unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
unsigned long offset);
-/**
- * fman_muram_alloc
- * @muram: FM-MURAM module pointer.
- * @size: Size of the memory to be allocated.
- *
- * Allocate some memory from FM-MURAM partition.
- *
- * Return: address of the allocated memory; NULL otherwise.
- */
-int fman_muram_alloc(struct muram_info *muram, size_t size);
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
-/**
- * fman_muram_free_mem
- * muram: FM-MURAM module pointer.
- * offset: offset of the memory region to be freed.
- * size: size of the memory to be freed.
- *
- * Free an allocated memory from FM-MURAM partition.
- */
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
+ size_t size);
#endif /* __FM_MURAM_EXT */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_port.c b/linux/drivers/net/ethernet/freescale/fman/fman_port.c
index e42ac1c1..7adba4f3 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -40,13 +40,14 @@
#include "fman.h"
#include "fman_sp.h"
-#include <asm/mpc85xx.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/libfdt_env.h>
/* Queue ID */
#define DFLT_FQ_ID 0x00FFFFFF
@@ -107,14 +108,10 @@
#define BMI_EBD_EN 0x80000000
#define BMI_PORT_CFG_EN 0x80000000
-#define BMI_PORT_CFG_FDOVR 0x02000000
#define BMI_PORT_STATUS_BSY 0x80000000
#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT
-#define BMI_DMA_ATTR_IC_STASH_ON 0x10000000
-#define BMI_DMA_ATTR_HDR_STASH_ON 0x04000000
-#define BMI_DMA_ATTR_SG_STASH_ON 0x01000000
#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
@@ -165,16 +162,12 @@
#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
-#define BMI_SG_DISABLE FMAN_SP_SG_DISABLE
-
#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
#define BMI_FIFO_THRESHOLD ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed) \
((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
-#define BMI_PORT_RFNE_FRWD_RPD 0x40000000
-
#define RX_ERRS_TO_ENQ \
(FM_PORT_FRM_ERR_DMA | \
FM_PORT_FRM_ERR_PHYSICAL | \
@@ -190,12 +183,10 @@
/* NIA defines */
#define NIA_ORDER_RESTOR 0x00800000
-#define NIA_ENG_FM_CTL 0x00000000
#define NIA_ENG_BMI 0x00500000
#define NIA_ENG_QMI_ENQ 0x00540000
#define NIA_ENG_QMI_DEQ 0x00580000
-#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME 0x00000028
#define NIA_BMI_AC_ENQ_FRAME 0x00000002
#define NIA_BMI_AC_TX_RELEASE 0x000002C0
#define NIA_BMI_AC_RELEASE 0x000000C0
@@ -401,8 +392,6 @@ struct fman_port_cfg {
u8 cheksum_last_bytes_ignore;
u8 rx_cut_end_bytes;
struct fman_buf_pool_depletion buf_pool_depletion;
- bool discard_override;
- bool en_buf_pool_depletion;
struct fman_ext_pools ext_buf_pools;
u32 tx_fifo_min_level;
u32 tx_fifo_low_comf_level;
@@ -413,32 +402,17 @@ struct fman_port_cfg {
struct fman_sp_int_context_data_copy int_context;
u32 discard_mask;
u32 err_mask;
- bool forward_reuse_int_context;
struct fman_buffer_prefix_content buffer_prefix_content;
bool dont_release_buf;
- bool set_num_of_tasks;
- bool set_num_of_open_dmas;
- bool set_size_of_fifo;
- bool bcb_workaround;
u8 rx_fd_bits;
u32 tx_fifo_deq_pipeline_depth;
- bool errata_A006675;
bool errata_A006320;
bool excessive_threshold_register;
- bool fmbm_rebm_has_sgd;
bool fmbm_tfne_has_features;
- bool qmi_deq_options_support;
enum fman_port_dma_swap dma_swap_data;
- bool dma_ic_stash_on;
- bool dma_header_stash_on;
- bool dma_sg_stash_on;
- bool dma_write_optimize;
enum fman_port_color color;
- bool sync_req;
-
- bool no_scatter_gather;
};
struct fman_port_rx_pools_params {
@@ -458,6 +432,7 @@ struct fman_port_dts_params {
struct fman_port {
void *fm;
+ struct device *dev;
struct fman_rev_info rev_info;
u8 port_id;
enum fman_port_type port_type;
@@ -493,22 +468,10 @@ static int init_bmi_rx(struct fman_port *port)
struct fman_port_cfg *cfg = port->cfg;
u32 tmp;
- /* Rx Configuration register */
- tmp = 0;
- if (cfg->discard_override)
- tmp |= BMI_PORT_CFG_FDOVR;
- iowrite32be(tmp, &regs->fmbm_rcfg);
-
/* DMA attributes */
tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
- if (cfg->dma_ic_stash_on)
- tmp |= BMI_DMA_ATTR_IC_STASH_ON;
- if (cfg->dma_header_stash_on)
- tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
- if (cfg->dma_sg_stash_on)
- tmp |= BMI_DMA_ATTR_SG_STASH_ON;
- if (cfg->dma_write_optimize)
- tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
+ /* Enable write optimization */
+ tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
iowrite32be(tmp, &regs->fmbm_rda);
/* Rx FIFO parameters */
@@ -548,27 +511,21 @@ static int init_bmi_rx(struct fman_port *port)
tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
BMI_EXT_BUF_MARG_START_SHIFT;
tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
- if (cfg->fmbm_rebm_has_sgd && cfg->no_scatter_gather)
- tmp |= BMI_SG_DISABLE;
iowrite32be(tmp, &regs->fmbm_rebm);
/* Frame attributes */
tmp = BMI_CMD_RX_MR_DEF;
tmp |= BMI_CMD_ATTR_ORDER;
tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
- if (cfg->sync_req)
- tmp |= BMI_CMD_ATTR_SYNC;
+ /* Synchronization request */
+ tmp |= BMI_CMD_ATTR_SYNC;
iowrite32be(tmp, &regs->fmbm_rfca);
/* NIA */
tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
- if (cfg->errata_A006675)
- tmp |= NIA_ENG_FM_CTL |
- NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME;
- else
- tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
+ tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
iowrite32be(tmp, &regs->fmbm_rfne);
/* Enqueue NIA */
@@ -597,12 +554,6 @@ static int init_bmi_tx(struct fman_port *port)
/* DMA attributes */
tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
- if (cfg->dma_ic_stash_on)
- tmp |= BMI_DMA_ATTR_IC_STASH_ON;
- if (cfg->dma_header_stash_on)
- tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
- if (cfg->dma_sg_stash_on)
- tmp |= BMI_DMA_ATTR_SG_STASH_ON;
iowrite32be(tmp, &regs->fmbm_tda);
/* Tx FIFO parameters */
@@ -698,20 +649,19 @@ static int init_qmi(struct fman_port *port)
return -EINVAL;
}
- if (cfg->qmi_deq_options_support) {
- switch (cfg->deq_prefetch_option) {
- case FMAN_PORT_DEQ_NO_PREFETCH:
- break;
- case FMAN_PORT_DEQ_PART_PREFETCH:
- tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
- break;
- case FMAN_PORT_DEQ_FULL_PREFETCH:
- tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
- break;
- default:
- return -EINVAL;
- }
+ switch (cfg->deq_prefetch_option) {
+ case FMAN_PORT_DEQ_NO_PREFETCH:
+ break;
+ case FMAN_PORT_DEQ_PART_PREFETCH:
+ tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
+ break;
+ case FMAN_PORT_DEQ_FULL_PREFETCH:
+ tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
+ break;
+ default:
+ return -EINVAL;
}
+
tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
tmp |= cfg->deq_byte_cnt;
iowrite32be(tmp, &regs->fmqm_pndc);
@@ -883,11 +833,11 @@ static int verify_size_of_fifo(struct fman_port *port)
/* Verify the size */
if (port->fifo_bufs.num < min_fifo_size_required)
- pr_debug("FIFO size should be enlarged to %d bytes\n",
- min_fifo_size_required);
+ dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n",
+ __func__, min_fifo_size_required);
else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
- pr_debug("For b2b processing,FIFO may be enlarged to %d bytes\n",
- opt_fifo_size_for_b2b);
+ dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n",
+ __func__, opt_fifo_size_for_b2b);
return 0;
}
@@ -961,7 +911,7 @@ static int set_ext_buffer_pools(struct fman_port *port)
err = set_bpools(port, &bpools);
if (err != 0) {
- pr_err("FMan port: set_bpools\n");
+ dev_err(port->dev, "%s: set_bpools() failed\n", __func__);
return -EINVAL;
}
@@ -976,8 +926,6 @@ static int init_low_level_driver(struct fman_port *port)
switch (port->port_type) {
case FMAN_PORT_TYPE_RX:
cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
- if (cfg->forward_reuse_int_context)
- cfg->rx_fd_bits = (u8)(BMI_PORT_RFNE_FRWD_RPD >> 24);
break;
default:
break;
@@ -990,7 +938,8 @@ static int init_low_level_driver(struct fman_port *port)
port->cfg->int_buf_start_margin = port->internal_buf_offset;
if (init(port) != 0) {
- pr_err("fman_port_init\n");
+ dev_err(port->dev, "%s: fman port initialization failed\n",
+ __func__);
return -ENODEV;
}
@@ -1004,9 +953,9 @@ static int init_low_level_driver(struct fman_port *port)
* Otherwise, if fmbm_tcfqid is 0 the FM will release
* buffers to BM regardless of fmbm_tfene
*/
- out_be32(&port->bmi_regs->tx.fmbm_tcfqid, 0xFFFFFF);
- out_be32(&port->bmi_regs->tx.fmbm_tfene,
- NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE);
+ iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid);
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+ &port->bmi_regs->tx.fmbm_tfene);
}
}
@@ -1038,7 +987,7 @@ static int fill_soc_specific_params(struct fman_port *port)
break;
default:
- pr_err("Unsupported FMan version\n");
+ dev_err(port->dev, "%s: Unsupported FMan version\n", __func__);
return -EINVAL;
}
@@ -1218,14 +1167,12 @@ static void set_dflt_cfg(struct fman_port *port,
struct fman_port_cfg *cfg = port->cfg;
cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
- cfg->dma_write_optimize = true;
cfg->color = FMAN_PORT_COLOR_GREEN;
cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
cfg->tx_fifo_low_comf_level = (5 * 1024);
cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
- cfg->sync_req = true;
cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
cfg->tx_fifo_deq_pipeline_depth =
BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
@@ -1242,14 +1189,10 @@ static void set_dflt_cfg(struct fman_port *port,
cfg->errata_A006320 = true;
/* Excessive Threshold register - exists for pre-FMv3 chips only */
- if (port->rev_info.major < 6) {
+ if (port->rev_info.major < 6)
cfg->excessive_threshold_register = true;
- } else {
- cfg->fmbm_rebm_has_sgd = true;
+ else
cfg->fmbm_tfne_has_features = true;
- }
-
- cfg->qmi_deq_options_support = true;
cfg->buffer_prefix_content.data_align =
DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
@@ -1267,15 +1210,6 @@ static void set_rx_dflt_cfg(struct fman_port *port,
port_params->specific_params.rx_params.err_fqid;
port->cfg->dflt_fqid =
port_params->specific_params.rx_params.dflt_fqid;
-
- /* Set BCB workaround on Rx ports, only for B4860 rev1 */
- if (port->rev_info.major >= 6) {
- unsigned int svr;
-
- svr = mfspr(SPRN_SVR);
- if ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_MAJ(svr) == 1))
- port->cfg->bcb_workaround = true;
- }
}
static void set_tx_dflt_cfg(struct fman_port *port,
@@ -1295,6 +1229,20 @@ static void set_tx_dflt_cfg(struct fman_port *port,
port->cfg->deq_high_priority = true;
}
+/**
+ * fman_port_config
+ * @port: Pointer to the port structure
+ * @params: Pointer to data structure of parameters
+ *
+ * Creates a descriptor for the FM PORT module.
+ * The routine returns a pointer to the FM PORT object.
+ * This descriptor must be passed as first parameter to all other FM PORT
+ * function calls.
+ * No actual initialization or configuration of FM hardware is done by this
+ * routine.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_port_config(struct fman_port *port, struct fman_port_params *params)
{
void __iomem *base_addr = port->dts_params.base_addr;
@@ -1330,10 +1278,8 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* Continue with other parameters */
/* set memory map pointers */
- port->bmi_regs = (union fman_port_bmi_regs __iomem *)
- (base_addr + BMI_PORT_REGS_OFFSET);
- port->qmi_regs = (struct fman_port_qmi_regs __iomem *)
- (base_addr + QMI_PORT_REGS_OFFSET);
+ port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
+ port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
/* resource distribution. */
@@ -1377,7 +1323,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
u32 reg;
reg = 0x00001013;
- out_be32(&port->bmi_regs->tx.fmbm_tfp, reg);
+ iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
}
}
@@ -1391,6 +1337,14 @@ err_params:
}
EXPORT_SYMBOL(fman_port_config);
+/**
+ * fman_port_init
+ * port: A pointer to a FM Port module.
+ * Initializes the FM PORT module by defining the software structure and
+ * configuring the hardware registers.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_port_init(struct fman_port *port)
{
struct fman_port_cfg *cfg;
@@ -1408,14 +1362,6 @@ int fman_port_init(struct fman_port *port)
if (err)
return err;
- /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 Errata workaround */
- if (port->rev_info.major >= 6 && (port->cfg->bcb_workaround) &&
- ((port->port_type == FMAN_PORT_TYPE_RX) &&
- (port->port_speed == 1000))) {
- port->cfg->discard_mask |= FM_PORT_FRM_ERR_PHYSICAL;
- port->fifo_bufs.num += 4 * 1024;
- }
-
cfg = port->cfg;
if (port->port_type == FMAN_PORT_TYPE_RX) {
@@ -1430,10 +1376,10 @@ int fman_port_init(struct fman_port *port)
if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
cfg->buf_margins.end_margins >
port->rx_pools_params.largest_buf_size) {
- pr_err("buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
- cfg->buf_margins.start_margins,
- cfg->buf_margins.end_margins,
- port->rx_pools_params.largest_buf_size);
+ dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
+ __func__, cfg->buf_margins.start_margins,
+ cfg->buf_margins.end_margins,
+ port->rx_pools_params.largest_buf_size);
return -EINVAL;
}
}
@@ -1473,6 +1419,31 @@ int fman_port_init(struct fman_port *port)
}
EXPORT_SYMBOL(fman_port_init);
+/**
+ * fman_port_cfg_buf_prefix_content
+ * @port A pointer to a FM Port module.
+ * @buffer_prefix_content A structure of parameters describing
+ * the structure of the buffer.
+ * Out parameter:
+ * Start margin - offset of data from
+ * start of external buffer.
+ * Defines the structure, size and content of the application buffer.
+ * The prefix, in Tx ports, if 'pass_prs_result', the application should set
+ * a value to their offsets in the prefix of the FM will save the first
+ * 'priv_data_size', than, depending on 'pass_prs_result' and
+ * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
+ * (in this order), to the application buffer, and to offset.
+ * Calling this routine changes the buffer margins definitions in the internal
+ * driver data base from its default configuration:
+ * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
+ * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
+ * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
+ * May be used for all ports
+ *
+ * Allowed only following fman_port_config() and before fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_port_cfg_buf_prefix_content(struct fman_port *port,
struct fman_buffer_prefix_content *
buffer_prefix_content)
@@ -1494,9 +1465,24 @@ int fman_port_cfg_buf_prefix_content(struct fman_port *port,
}
EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
+/**
+ * fman_port_disable
+ * port: A pointer to a FM Port module.
+ *
+ * Gracefully disable an FM port. The port will not start new tasks after all
+ * tasks associated with the port are terminated.
+ *
+ * This is a blocking routine, it returns after port is gracefully stopped,
+ * i.e. the port will not except new frames, but it will finish all frames
+ * or tasks which were already began.
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_port_disable(struct fman_port *port)
{
- u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp;
+ u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
+ u32 tmp;
bool rx_port, failure = false;
int count;
@@ -1553,16 +1539,27 @@ int fman_port_disable(struct fman_port *port)
}
if (failure)
- pr_debug("FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
- port->port_id);
+ dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
+ __func__, port->port_id);
return 0;
}
EXPORT_SYMBOL(fman_port_disable);
+/**
+ * fman_port_enable
+ * port: A pointer to a FM Port module.
+ *
+ * A runtime routine provided to allow disable/enable of port.
+ *
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_port_enable(struct fman_port *port)
{
- u32 __iomem *bmi_cfg_reg, tmp;
+ u32 __iomem *bmi_cfg_reg;
+ u32 tmp;
bool rx_port;
if (!is_init_done(port->cfg))
@@ -1595,12 +1592,30 @@ int fman_port_enable(struct fman_port *port)
}
EXPORT_SYMBOL(fman_port_enable);
+/**
+ * fman_port_bind
+ * dev: FMan Port OF device pointer
+ *
+ * Bind to a specific FMan Port.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan port device.
+ */
struct fman_port *fman_port_bind(struct device *dev)
{
return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
}
EXPORT_SYMBOL(fman_port_bind);
+/**
+ * fman_port_get_qman_channel_id
+ * port: Pointer to the FMan port devuce
+ *
+ * Get the QMan channel ID for the specific port
+ *
+ * Return: QMan channel ID
+ */
u32 fman_port_get_qman_channel_id(struct fman_port *port)
{
return port->dts_params.qman_channel_id;
@@ -1624,7 +1639,7 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
#ifndef __rtems__
struct resource *dev_res;
#endif /* __rtems__ */
- const u32 *u32_prop;
+ u32 val;
int err = 0, lenp;
enum fman_port_type port_type;
u16 port_speed;
@@ -1634,13 +1649,15 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
if (!port)
return -ENOMEM;
+ port->dev = &of_dev->dev;
+
port_node = of_node_get(of_dev->dev.of_node);
/* Get the FM node */
#ifndef __rtems__
fm_node = of_get_parent(port_node);
if (!fm_node) {
- pr_err("of_get_parent() failed\n");
+ dev_err(port->dev, "%s: of_get_parent() failed\n", __func__);
err = -ENODEV;
goto return_err;
}
@@ -1653,28 +1670,20 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
}
#endif /* __rtems__ */
- u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp);
- if (!u32_prop) {
- pr_err("of_get_property(%s, cell-index) failed\n",
- port_node->full_name);
- err = -EINVAL;
- goto return_err;
- }
- if (WARN_ON(lenp != sizeof(u32))) {
+ err = of_property_read_u32(port_node, "cell-index", &val);
+ if (err) {
+ dev_err(port->dev, "%s: reading cell-index for %s failed\n",
+ __func__, port_node->full_name);
err = -EINVAL;
goto return_err;
}
- port_id = (u8)*u32_prop;
-
+ port_id = (u8)val;
port->dts_params.id = port_id;
if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
port_type = FMAN_PORT_TYPE_TX;
port_speed = 1000;
- u32_prop = (const u32 *)of_get_property(port_node,
- "fsl,fman-10g-port",
- &lenp);
- if (u32_prop)
+ if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
@@ -1687,9 +1696,7 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
port_type = FMAN_PORT_TYPE_RX;
port_speed = 1000;
- u32_prop = (const u32 *)of_get_property(port_node,
- "fsl,fman-10g-port", &lenp);
- if (u32_prop)
+ if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
@@ -1700,7 +1707,7 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
port_type = FMAN_PORT_TYPE_RX;
} else {
- pr_err("Illegal port type\n");
+ dev_err(port->dev, "%s: Illegal port type\n", __func__);
err = -EINVAL;
goto return_err;
}
@@ -1713,7 +1720,8 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
qman_channel_id = fman_get_qman_channel_id(fman, port_id);
if (qman_channel_id == 0) {
- pr_err("incorrect qman-channel-id\n");
+ dev_err(port->dev, "%s: incorrect qman-channel-id\n",
+ __func__);
err = -EINVAL;
goto return_err;
}
@@ -1722,7 +1730,8 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
err = of_address_to_resource(port_node, 0, &res);
if (err < 0) {
- pr_err("of_address_to_resource() failed\n");
+ dev_err(port->dev, "%s: of_address_to_resource() failed\n",
+ __func__);
err = -ENOMEM;
goto return_err;
}
@@ -1732,21 +1741,20 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
of_node_put(port_node);
#ifndef __rtems__
- dev_res = __devm_request_region(fman_get_device(fman), &res,
- res.start, (res.end + 1 - res.start),
- "fman-port");
+ dev_res = __devm_request_region(port->dev, &res, res.start,
+ resource_size(&res), "fman-port");
if (!dev_res) {
- pr_err("__devm_request_region() failed\n");
+ dev_err(port->dev, "%s: __devm_request_region() failed\n",
+ __func__);
err = -EINVAL;
goto free_port;
}
#endif /* __rtems__ */
- port->dts_params.base_addr = devm_ioremap(fman_get_device(fman),
- res.start,
- (res.end + 1 - res.start));
- if (port->dts_params.base_addr == 0)
- pr_err("devm_ioremap() failed\n");
+ port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
+ resource_size(&res));
+ if (!port->dts_params.base_addr)
+ dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
dev_set_drvdata(&of_dev->dev, port);
@@ -1774,14 +1782,34 @@ MODULE_DEVICE_TABLE(of, fman_port_match);
static struct platform_driver fman_port_driver = {
.driver = {
- .name = "fsl-fman-port",
- .of_match_table = fman_port_match,
- },
+ .name = "fsl-fman-port",
+ .of_match_table = fman_port_match,
+ },
.probe = fman_port_probe,
};
-builtin_platform_driver(fman_port_driver);
+static int __init fman_port_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA FMan driver\n");
+
+ err = platform_driver_register(&fman_port_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(fman_port_load);
+
+static void __exit fman_port_unload(void)
+{
+ platform_driver_unregister(&fman_port_driver);
+}
+module_exit(fman_port_unload);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
#else /* __rtems__ */
#include <sys/cdefs.h>
#include <sys/param.h>
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_port.h b/linux/drivers/net/ethernet/freescale/fman/fman_port.h
index 56c1d023..8ba90173 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -132,109 +132,20 @@ struct fman_port_params {
/* Additional parameters depending on port type. */
};
-/**
- * fman_port_config
- * @port: Pointer to the port structure
- * @params: Pointer to data structure of parameters
- *
- * Creates a descriptor for the FM PORT module.
- * The routine returns a pointer to the FM PORT object.
- * This descriptor must be passed as first parameter to all other FM PORT
- * function calls.
- * No actual initialization or configuration of FM hardware is done by this
- * routine.
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_port_config(struct fman_port *port, struct fman_port_params *params);
-/**
- * fman_port_init
- * port: A pointer to a FM Port module.
- * Initializes the FM PORT module by defining the software structure and
- * configuring the hardware registers.
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_port_init(struct fman_port *port);
-/**
- * fman_port_cfg_buf_prefix_content
- * @port A pointer to a FM Port module.
- * @buffer_prefix_content A structure of parameters describing
- * the structure of the buffer.
- * Out parameter:
- * Start margin - offset of data from
- * start of external buffer.
- * Defines the structure, size and content of the application buffer.
- * The prefix, in Tx ports, if 'pass_prs_result', the application should set
- * a value to their offsets in the prefix of the FM will save the first
- * 'priv_data_size', than, depending on 'pass_prs_result' and
- * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
- * (in this order), to the application buffer, and to offset.
- * Calling this routine changes the buffer margins definitions in the internal
- * driver data base from its default configuration:
- * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
- * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
- * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
- * May be used for all ports
- *
- * Allowed only following fman_port_config() and before fman_port_init().
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_port_cfg_buf_prefix_content(struct fman_port *port,
struct fman_buffer_prefix_content
*buffer_prefix_content);
-/**
- * fman_port_disable
- * port: A pointer to a FM Port module.
- *
- * Gracefully disable an FM port. The port will not start new tasks after all
- * tasks associated with the port are terminated.
- *
- * This is a blocking routine, it returns after port is gracefully stopped,
- * i.e. the port will not except new frames, but it will finish all frames
- * or tasks which were already began.
- * Allowed only following fman_port_init().
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_port_disable(struct fman_port *port);
-/**
- * fman_port_enable
- * port: A pointer to a FM Port module.
- *
- * A runtime routine provided to allow disable/enable of port.
- *
- * Allowed only following fman_port_init().
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_port_enable(struct fman_port *port);
-/**
- * fman_port_get_qman_channel_id
- * port: Pointer to the FMan port devuce
- *
- * Get the QMan channel ID for the specific port
- *
- * Return: QMan channel ID
- */
u32 fman_port_get_qman_channel_id(struct fman_port *port);
-/**
- * fman_port_bind
- * dev: FMan Port OF device pointer
- *
- * Bind to a specific FMan Port.
- *
- * Allowed only after the port was created.
- *
- * Return: A pointer to the FMan port device.
- */
struct fman_port *fman_port_bind(struct device *dev);
#endif /* __FMAN_PORT_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_sp.c b/linux/drivers/net/ethernet/freescale/fman/fman_sp.c
index 2fcfa6c0..a059cb21 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_sp.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -84,6 +84,7 @@ void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
}
}
}
+EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes);
int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
int_context_data_copy,
@@ -168,4 +169,5 @@ int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
return 0;
}
+EXPORT_SYMBOL(fman_sp_build_buffer_struct);
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 5b22a044..0b5e971e 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -36,31 +36,22 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "crc_mac_addr_ext.h"
-
#include "fman_tgec.h"
#include "fman.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
#include <linux/io.h>
+#include <linux/crc32.h>
/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
/* Command and Configuration Register (COMMAND_CONFIG) */
-#define CMD_CFG_EN_TIMESTAMP 0x00100000
#define CMD_CFG_NO_LEN_CHK 0x00020000
-#define CMD_CFG_SEND_IDLE 0x00010000
-#define CMD_CFG_RX_ER_DISC 0x00004000
-#define CMD_CFG_CMD_FRM_EN 0x00002000
-#define CMD_CFG_LOOPBACK_EN 0x00000400
-#define CMD_CFG_TX_ADDR_INS 0x00000200
#define CMD_CFG_PAUSE_IGNORE 0x00000100
-#define CMD_CFG_PAUSE_FWD 0x00000080
#define CMF_CFG_CRC_FWD 0x00000040
#define CMD_CFG_PROMIS_EN 0x00000010
-#define CMD_CFG_WAN_MODE 0x00000008
#define CMD_CFG_RX_EN 0x00000002
#define CMD_CFG_TX_EN 0x00000001
@@ -92,23 +83,6 @@
#define DEFAULT_MAX_FRAME_LENGTH 0x600
#define DEFAULT_PAUSE_QUANT 0xf000
-#define TGEC_DEFAULT_EXCEPTIONS \
- ((u32)((TGEC_IMASK_MDIO_SCAN_EVENT) |\
- (TGEC_IMASK_REM_FAULT) |\
- (TGEC_IMASK_LOC_FAULT) |\
- (TGEC_IMASK_TX_ECC_ER) |\
- (TGEC_IMASK_TX_FIFO_UNFL) |\
- (TGEC_IMASK_TX_FIFO_OVFL) |\
- (TGEC_IMASK_TX_ER) |\
- (TGEC_IMASK_RX_FIFO_OVFL) |\
- (TGEC_IMASK_RX_ECC_ER) |\
- (TGEC_IMASK_RX_JAB_FRM) |\
- (TGEC_IMASK_RX_OVRSZ_FRM) |\
- (TGEC_IMASK_RX_RUNT_FRM) |\
- (TGEC_IMASK_RX_FRAG_FRM) |\
- (TGEC_IMASK_RX_CRC_ER) |\
- (TGEC_IMASK_RX_ALIGN_ER)))
-
/* number of pattern match registers (entries) */
#define TGEC_NUM_OF_PADDRS 1
@@ -222,17 +196,8 @@ struct tgec_regs {
};
struct tgec_cfg {
- bool rx_error_discard;
bool pause_ignore;
- bool pause_forward_enable;
- bool no_length_check_enable;
- bool cmd_frame_enable;
- bool send_idle_enable;
- bool wan_mode_enable;
bool promiscuous_mode_enable;
- bool tx_addr_ins_enable;
- bool loopback_enable;
- bool time_stamp_enable;
u16 max_frame_length;
u16 pause_quant;
u32 tx_ipg_length;
@@ -270,17 +235,8 @@ static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
static void set_dflts(struct tgec_cfg *cfg)
{
- cfg->wan_mode_enable = false;
cfg->promiscuous_mode_enable = false;
- cfg->pause_forward_enable = false;
cfg->pause_ignore = false;
- cfg->tx_addr_ins_enable = false;
- cfg->loopback_enable = false;
- cfg->cmd_frame_enable = false;
- cfg->rx_error_discard = false;
- cfg->send_idle_enable = false;
- cfg->no_length_check_enable = true;
- cfg->time_stamp_enable = false;
cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
cfg->pause_quant = DEFAULT_PAUSE_QUANT;
@@ -293,28 +249,12 @@ static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
/* Config */
tmp = CMF_CFG_CRC_FWD;
- if (cfg->wan_mode_enable)
- tmp |= CMD_CFG_WAN_MODE;
if (cfg->promiscuous_mode_enable)
tmp |= CMD_CFG_PROMIS_EN;
- if (cfg->pause_forward_enable)
- tmp |= CMD_CFG_PAUSE_FWD;
if (cfg->pause_ignore)
tmp |= CMD_CFG_PAUSE_IGNORE;
- if (cfg->tx_addr_ins_enable)
- tmp |= CMD_CFG_TX_ADDR_INS;
- if (cfg->loopback_enable)
- tmp |= CMD_CFG_LOOPBACK_EN;
- if (cfg->cmd_frame_enable)
- tmp |= CMD_CFG_CMD_FRM_EN;
- if (cfg->rx_error_discard)
- tmp |= CMD_CFG_RX_ER_DISC;
- if (cfg->send_idle_enable)
- tmp |= CMD_CFG_SEND_IDLE;
- if (cfg->no_length_check_enable)
- tmp |= CMD_CFG_NO_LEN_CHK;
- if (cfg->time_stamp_enable)
- tmp |= CMD_CFG_EN_TIMESTAMP;
+ /* Payload length check disable */
+ tmp |= CMD_CFG_NO_LEN_CHK;
iowrite32be(tmp, &regs->command_config);
/* Max Frame Length */
@@ -348,12 +288,6 @@ static int check_init_parameters(struct fman_mac *tgec)
return -EINVAL;
}
- /* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
- if (!tgec->cfg->no_length_check_enable) {
- pr_warn("Length Check!\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -421,18 +355,6 @@ static int get_exception_flag(enum fman_mac_exceptions exception)
return bit_mask;
}
-static u32 get_mac_addr_hash_code(u64 eth_addr)
-{
- u32 crc;
-
- /* CRC calculation */
- GET_MAC_ADDR_CRC(eth_addr, crc);
-
- crc = bitrev32(crc);
-
- return crc;
-}
-
static void tgec_err_exception(void *handle)
{
struct fman_mac *tgec = (struct fman_mac *)handle;
@@ -613,7 +535,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry;
- u32 crc, hash;
+ u32 crc = 0xFFFFFFFF, hash;
u64 addr;
if (!is_init_done(tgec->cfg))
@@ -627,8 +549,8 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return -EINVAL;
}
/* CRC calculation */
- crc = get_mac_addr_hash_code(addr);
-
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
/* Take 9 MSB bits */
hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
@@ -651,7 +573,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry = NULL;
struct list_head *pos;
- u32 crc, hash;
+ u32 crc = 0xFFFFFFFF, hash;
u64 addr;
if (!is_init_done(tgec->cfg))
@@ -660,7 +582,8 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
addr = ((*(u64 *)eth_addr) >> 16);
/* CRC calculation */
- crc = get_mac_addr_hash_code(addr);
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
/* Take 9 MSB bits */
hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
@@ -803,9 +726,6 @@ int tgec_free(struct fman_mac *tgec)
{
free_init_resources(tgec);
- if (tgec->cfg)
- tgec->cfg = NULL;
-
kfree(tgec->cfg);
kfree(tgec);
@@ -836,11 +756,25 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
set_dflts(cfg);
- tgec->regs = (struct tgec_regs __iomem *)(base_addr);
+ tgec->regs = base_addr;
tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
tgec->max_speed = params->max_speed;
tgec->mac_id = params->mac_id;
- tgec->exceptions = TGEC_DEFAULT_EXCEPTIONS;
+ tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
+ TGEC_IMASK_REM_FAULT |
+ TGEC_IMASK_LOC_FAULT |
+ TGEC_IMASK_TX_ECC_ER |
+ TGEC_IMASK_TX_FIFO_UNFL |
+ TGEC_IMASK_TX_FIFO_OVFL |
+ TGEC_IMASK_TX_ER |
+ TGEC_IMASK_RX_FIFO_OVFL |
+ TGEC_IMASK_RX_ECC_ER |
+ TGEC_IMASK_RX_JAB_FRM |
+ TGEC_IMASK_RX_OVRSZ_FRM |
+ TGEC_IMASK_RX_RUNT_FRM |
+ TGEC_IMASK_RX_FRAG_FRM |
+ TGEC_IMASK_RX_CRC_ER |
+ TGEC_IMASK_RX_ALIGN_ER);
tgec->exception_cb = params->exception_cb;
tgec->event_cb = params->event_cb;
tgec->dev_id = params->dev_id;
diff --git a/linux/drivers/net/ethernet/freescale/fman/mac.c b/linux/drivers/net/ethernet/freescale/fman/mac.c
index 4b26211e..f04ae704 100644
--- a/linux/drivers/net/ethernet/freescale/fman/mac.c
+++ b/linux/drivers/net/ethernet/freescale/fman/mac.c
@@ -37,6 +37,7 @@
#ifdef __rtems__
#include <sys/types.h>
+#include <sys/socket.h>
#include <net/if_dl.h>
#include <bsp/fdt.h>
#include "../../../../../../rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h"
@@ -52,6 +53,7 @@
#include <linux/netdevice.h>
#include <linux/phy_fixed.h>
#include <linux/etherdevice.h>
+#include <linux/libfdt_env.h>
#include "mac.h"
#include "fman_mac.h"
@@ -59,13 +61,8 @@
#include "fman_tgec.h"
#include "fman_memac.h"
-#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
-
MODULE_LICENSE("Dual BSD/GPL");
-
-MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
-
-MODULE_DESCRIPTION(MAC_DESCRIPTION);
+MODULE_DESCRIPTION("FSL FMan MAC API based driver");
struct mac_priv_s {
struct device *dev;
@@ -74,6 +71,11 @@ struct mac_priv_s {
phy_interface_t phy_if;
struct fman *fman;
struct device_node *phy_node;
+ struct device_node *internal_phy_node;
+#ifdef __rtems__
+ struct device_node phy_node_storage;
+ struct device_node internal_phy_node_storage;
+#endif /* __rtems__ */
/* List of multicast addresses */
struct list_head mc_addr_list;
struct platform_device *eth_dev;
@@ -90,15 +92,15 @@ struct mac_address {
struct list_head list;
};
-static void mac_exception(void *_mac_dev, enum fman_mac_exceptions ex)
+static void mac_exception(void *handle, enum fman_mac_exceptions ex)
{
struct mac_device *mac_dev;
struct mac_priv_s *priv;
- mac_dev = (struct mac_device *)_mac_dev;
+ mac_dev = handle;
priv = mac_dev->priv;
- if (FM_MAC_EX_10G_RX_FIFO_OVFL == ex) {
+ if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
/* don't flag RX FIFO after the first */
mac_dev->set_exception(mac_dev->fman_mac,
FM_MAC_EX_10G_RX_FIFO_OVFL, false);
@@ -118,7 +120,8 @@ static void set_fman_mac_params(struct mac_device *mac_dev,
#ifndef __rtems__
params->base_addr = (typeof(params->base_addr))
- devm_ioremap(priv->dev, mac_dev->res->start, 0x2000);
+ devm_ioremap(priv->dev, mac_dev->res->start,
+ resource_size(mac_dev->res));
#else /* __rtems__ */
params->base_addr = priv->vaddr;
#endif /* __rtems__ */
@@ -131,6 +134,7 @@ static void set_fman_mac_params(struct mac_device *mac_dev,
params->exception_cb = mac_exception;
params->event_cb = mac_exception;
params->dev_id = mac_dev;
+ params->internal_phy_node = priv->internal_phy_node;
}
static int tgec_initialization(struct mac_device *mac_dev)
@@ -362,9 +366,19 @@ static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
return 0;
}
-/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
+/**
+ * fman_set_mac_active_pause
+ * @mac_dev: A pointer to the MAC device
+ * @rx: Pause frame setting for RX
+ * @tx: Pause frame setting for TX
+ *
+ * Set the MAC RX/TX PAUSE frames settings
+ *
+ * Avoid redundant calls to FMD, if the MAC driver already contains the desired
* active PAUSE settings. Otherwise, the new active settings should be reflected
* in FMan.
+ *
+ * Return: 0 on success; Error code otherwise.
*/
int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
{
@@ -392,8 +406,16 @@ int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
EXPORT_SYMBOL(fman_set_mac_active_pause);
#ifndef __rtems__
-/* Determine the MAC RX/TX PAUSE frames settings based on PHY
+/**
+ * fman_get_pause_cfg
+ * @mac_dev: A pointer to the MAC device
+ * @rx: Return value for RX setting
+ * @tx: Return value for TX setting
+ *
+ * Determine the MAC RX/TX PAUSE frames settings based on PHY
* autonegotiation or values set by eththool.
+ *
+ * Return: Pointer to FMan device.
*/
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
bool *tx_pause)
@@ -495,9 +517,9 @@ static void adjust_link_memac(struct net_device *net_dev)
/* Initializes driver's PHY state, and attaches to the PHY.
* Returns 0 on success.
*/
-static int init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev,
- void (*adj_lnk)(struct net_device *))
+static struct phy_device *init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev,
+ void (*adj_lnk)(struct net_device *))
{
struct phy_device *phy_dev;
struct mac_priv_s *priv = mac_dev->priv;
@@ -506,7 +528,7 @@ static int init_phy(struct net_device *net_dev,
priv->phy_if);
if (!phy_dev) {
netdev_err(net_dev, "Could not connect to PHY\n");
- return -ENODEV;
+ return NULL;
}
/* Remove any features not supported by the controller */
@@ -519,23 +541,23 @@ static int init_phy(struct net_device *net_dev,
mac_dev->phy_dev = phy_dev;
- return 0;
+ return phy_dev;
}
-static int dtsec_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
+static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
{
return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
}
-static int tgec_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
+static struct phy_device *tgec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
{
return init_phy(net_dev, mac_dev, adjust_link_void);
}
-static int memac_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
+static struct phy_device *memac_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
{
return init_phy(net_dev, mac_dev, &adjust_link_memac);
}
@@ -639,31 +661,6 @@ static void setup_memac(struct mac_device *mac_dev)
static DEFINE_MUTEX(eth_lock);
#endif /* __rtems__ */
-static const char phy_str[][11] = {
- [PHY_INTERFACE_MODE_MII] = "mii",
- [PHY_INTERFACE_MODE_GMII] = "gmii",
- [PHY_INTERFACE_MODE_SGMII] = "sgmii",
- [PHY_INTERFACE_MODE_TBI] = "tbi",
- [PHY_INTERFACE_MODE_RMII] = "rmii",
- [PHY_INTERFACE_MODE_RGMII] = "rgmii",
- [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
- [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
- [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
- [PHY_INTERFACE_MODE_RTBI] = "rtbi",
- [PHY_INTERFACE_MODE_XGMII] = "xgmii"
-};
-
-static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(phy_str); i++)
- if (strcmp(str, phy_str[i]) == 0)
- return (phy_interface_t)i;
-
- return PHY_INTERFACE_MODE_MII;
-}
-
static const u16 phy2speed[] = {
[PHY_INTERFACE_MODE_MII] = SPEED_100,
[PHY_INTERFACE_MODE_GMII] = SPEED_1000,
@@ -675,6 +672,7 @@ static const u16 phy2speed[] = {
[PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
[PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
[PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
[PHY_INTERFACE_MODE_XGMII] = SPEED_10000
};
@@ -754,13 +752,9 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
#ifdef __rtems__
struct fman_mac_softc *sc = device_get_softc(_dev);
#endif /* __rtems__ */
- int err, i, lenp;
+ int err, i, nph;
struct device *dev;
-#ifndef __rtems__
- struct device_node *mac_node, *dev_node, *tbi_node;
-#else /* __rtems__ */
- struct device_node *mac_node;
-#endif /* __rtems__ */
+ struct device_node *mac_node, *dev_node;
struct mac_device *mac_dev;
#ifndef __rtems__
struct platform_device *of_dev;
@@ -768,10 +762,9 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
struct resource res;
struct mac_priv_s *priv;
const u8 *mac_addr;
- const char *char_prop;
- const u32 *u32_prop;
+ u32 val;
u8 fman_id;
- const phandle *phandle_prop;
+ int phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
@@ -798,10 +791,26 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
setup_dtsec(mac_dev);
+#ifndef __rtems__
+ priv->internal_phy_node = of_parse_phandle(mac_node,
+ "tbi-handle", 0);
+#else /* __rtems__ */
+ priv->internal_phy_node = of_parse_phandle(
+ &priv->internal_phy_node_storage, mac_node, "tbi-handle",
+ 0);
+#endif /* __rtems__ */
} else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
setup_tgec(mac_dev);
} else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
setup_memac(mac_dev);
+#ifndef __rtems__
+ priv->internal_phy_node = of_parse_phandle(mac_node,
+ "pcsphy-handle", 0);
+#else /* __rtems__ */
+ priv->internal_phy_node = of_parse_phandle(
+ &priv->internal_phy_node_storage, mac_node, "pcsphy-handle",
+ 0);
+#endif /* __rtems__ */
} else {
#ifndef __rtems__
dev_err(dev, "MAC node (%s) contains unsupported MAC\n",
@@ -835,15 +844,15 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
}
/* Get the FMan cell-index */
- u32_prop = of_get_property(dev_node, "cell-index", &lenp);
- if (!u32_prop) {
- dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+ err = of_property_read_u32(dev_node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "failed to read cell-index for %s\n",
dev_node->full_name);
err = -EINVAL;
goto _return_of_node_put;
}
- WARN_ON(lenp != sizeof(u32));
- fman_id = (u8)*u32_prop + 1; /* cell-index 0 => FMan id 1 */
+ /* cell-index 0 => FMan id 1 */
+ fman_id = (u8)(val + 1);
priv->fman = fman_bind(&of_dev->dev);
if (!priv->fman) {
@@ -888,26 +897,11 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
priv->vaddr = devm_ioremap(dev, res.start, res.end + 1 - res.start);
#endif /* __rtems__ */
-#ifndef __rtems__
-#define TBIPA_OFFSET 0x1c
-#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
- tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
- if (tbi_node) {
- u32 tbiaddr = TBIPA_DEFAULT_ADDR;
-
- u32_prop = of_get_property(tbi_node, "reg", NULL);
- if (u32_prop)
- tbiaddr = *u32_prop;
- out_be32(priv->vaddr + TBIPA_OFFSET, tbiaddr);
- }
-#endif /* __rtems__ */
-
if (!of_device_is_available(mac_node)) {
#ifndef __rtems__
devm_iounmap(dev, priv->vaddr);
__devm_release_region(dev, fman_get_mem_region(priv->fman),
res.start, res.end + 1 - res.start);
- fman_unbind(priv->fman);
devm_kfree(dev, mac_dev);
#endif /* __rtems__ */
dev_set_drvdata(dev, NULL);
@@ -915,15 +909,14 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
}
/* Get the cell-index */
- u32_prop = of_get_property(mac_node, "cell-index", &lenp);
- if (!u32_prop) {
- dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+ err = of_property_read_u32(mac_node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "failed to read cell-index for %s\n",
mac_node->full_name);
err = -EINVAL;
goto _return_dev_set_drvdata;
}
- WARN_ON(lenp != sizeof(u32));
- priv->cell_index = (u8)*u32_prop;
+ priv->cell_index = (u8)val;
/* Get the MAC address */
mac_addr = of_get_mac_address(mac_node);
@@ -936,25 +929,43 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
/* Get the port handles */
- phandle_prop = of_get_property(mac_node, "fsl,fman-ports", &lenp);
- if (!phandle_prop) {
- dev_err(dev, "of_get_property(%s, fsl,fman-ports) failed\n",
+ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
+ if (unlikely(nph < 0)) {
+ dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n",
+ mac_node->full_name);
+ err = nph;
+ goto _return_dev_set_drvdata;
+ }
+
+ if (nph != ARRAY_SIZE(mac_dev->port)) {
+ dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n",
mac_node->full_name);
err = -EINVAL;
goto _return_dev_set_drvdata;
}
- BUG_ON(lenp != sizeof(phandle) * ARRAY_SIZE(mac_dev->port));
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
-#ifndef __rtems__
+#ifdef __rtems__
+ struct fman_ivars *ivars;
+ device_t child;
+
+ ivars = &mac_dev->ivars[i];
+#endif /* __rtems__ */
/* Find the port node */
- dev_node = of_find_node_by_phandle(phandle_prop[i]);
+#ifndef __rtems__
+ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
+#else /* __rtems__ */
+ dev_node = of_parse_phandle(&ivars->dn, mac_node,
+ "fsl,fman-ports", i);
+#endif /* __rtems__ */
if (!dev_node) {
- dev_err(dev, "of_find_node_by_phandle() failed\n");
+ dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n",
+ mac_node->full_name);
err = -EINVAL;
goto _return_of_node_put;
}
+#ifndef __rtems__
of_dev = of_find_device_by_node(dev_node);
if (!of_dev) {
dev_err(dev, "of_find_device_by_node(%s) failed\n",
@@ -972,22 +983,7 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
}
of_node_put(dev_node);
#else /* __rtems__ */
- int node;
- struct fman_ivars *ivars;
- device_t child;
-
- node = fdt_node_offset_by_phandle(bsp_fdt_get(), phandle_prop[i]);
- if (node < 0) {
- goto _return_of_node_put;
- }
-
- ivars = kzalloc(sizeof(*ivars), GFP_KERNEL);
- if (ivars == NULL) {
- goto _return_of_node_put;
- }
-
- ivars->dn.offset = node;
- ivars->of_dev.dev.of_node = &ivars->dn;
+ ivars->of_dev.dev.of_node = dev_node;
ivars->of_dev.dev.base = _of_dev->dev.base;
ivars->fman = fman;
@@ -1010,23 +1006,20 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
}
/* Get the PHY connection type */
- char_prop = (const char *)of_get_property(mac_node,
- "phy-connection-type", NULL);
- if (!char_prop) {
+ phy_if = of_get_phy_mode(mac_node);
+ if (phy_if < 0) {
dev_warn(dev,
- "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n",
+ "of_get_phy_mode() for %s failed. Defaulting to SGMII\n",
mac_node->full_name);
- priv->phy_if = PHY_INTERFACE_MODE_MII;
- } else {
- priv->phy_if = str2phy(char_prop);
+ phy_if = PHY_INTERFACE_MODE_SGMII;
}
+ priv->phy_if = phy_if;
priv->speed = phy2speed[priv->phy_if];
priv->max_speed = priv->speed;
-#ifndef __rtems__
mac_dev->if_support = DTSEC_SUPPORTED;
/* We don't support half-duplex in SGMII mode */
- if (strstr(char_prop, "sgmii"))
+ if (priv->phy_if == PHY_INTERFACE_MODE_SGMII)
mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
SUPPORTED_100baseT_Half);
@@ -1035,9 +1028,8 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
mac_dev->if_support |= SUPPORTED_1000baseT_Full;
/* The 10G interface only supports one mode */
- if (strstr(char_prop, "xgmii"))
+ if (priv->phy_if == PHY_INTERFACE_MODE_XGMII)
mac_dev->if_support = SUPPORTED_10000baseT_Full;
-#endif /* __rtems__ */
/* Get the rest of the PHY information */
#ifndef __rtems__
@@ -1051,20 +1043,30 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
GFP_KERNEL);
- if (!priv->fixed_link)
+ if (!priv->fixed_link) {
+ err = -ENOMEM;
goto _return_dev_set_drvdata;
+ }
priv->phy_node = of_node_get(mac_node);
phy = of_phy_find_device(priv->phy_node);
- if (!phy)
+ if (!phy) {
+ err = -EINVAL;
goto _return_dev_set_drvdata;
+ }
priv->fixed_link->link = phy->link;
priv->fixed_link->speed = phy->speed;
priv->fixed_link->duplex = phy->duplex;
priv->fixed_link->pause = phy->pause;
priv->fixed_link->asym_pause = phy->asym_pause;
+
+ put_device(&phy->mdio.dev);
}
+#else /* __rtems__ */
+ priv->phy_node = of_parse_phandle(&priv->phy_node_storage, mac_node,
+ "phy-handle", 0);
+ mac_dev->phy_dev = of_phy_find_device(priv->phy_node);
#endif /* __rtems__ */
err = mac_dev->init(mac_dev);
@@ -1077,7 +1079,7 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
/* pause frame autonegotiation enabled */
mac_dev->autoneg_pause = true;
- /* by intializing the values to false, force FMD to enable PAUSE frames
+ /* By intializing the values to false, force FMD to enable PAUSE frames
* on RX and TX
*/
mac_dev->rx_pause_req = true;
@@ -1107,7 +1109,6 @@ _return_of_node_put:
#endif /* __rtems__ */
_return_dev_set_drvdata:
kfree(priv->fixed_link);
- kfree(priv);
dev_set_drvdata(dev, NULL);
_return:
return err;
diff --git a/linux/drivers/net/ethernet/freescale/fman/mac.h b/linux/drivers/net/ethernet/freescale/fman/mac.h
index 727320e8..066072ab 100644
--- a/linux/drivers/net/ethernet/freescale/fman/mac.h
+++ b/linux/drivers/net/ethernet/freescale/fman/mac.h
@@ -65,11 +65,10 @@ struct mac_device {
#endif /* __rtems__ */
u8 addr[ETH_ALEN];
struct fman_port *port[2];
-#ifndef __rtems__
u32 if_support;
struct phy_device *phy_dev;
-#endif /* __rtems__ */
#ifdef __rtems__
+ struct fman_ivars ivars[2];
struct platform_device pdev;
struct dpaa_eth_data data;
struct net_device net_dev;
@@ -83,7 +82,8 @@ struct mac_device {
bool promisc;
#ifndef __rtems__
- int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
+ struct phy_device *(*init_phy)(struct net_device *net_dev,
+ struct mac_device *mac_dev);
#else /* __rtems__ */
void (*adjust_link)(struct mac_device *mac_dev, u16 speed);
#endif /* __rtems__ */
@@ -119,28 +119,8 @@ struct dpaa_eth_data {
extern const char *mac_driver_description;
-/**
- * fman_set_mac_active_pause
- * @mac_dev: A pointer to the MAC device
- * @rx: Pause frame setting for RX
- * @tx: Pause frame setting for TX
- *
- * Set the MAC RX/TX PAUSE frames settings
- *
- * Return: 0 on success; Error code otherwise.
- */
int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
-/**
- * fman_get_pause_cfg
- * @mac_dev: A pointer to the MAC device
- * @rx: Return value for RX setting
- * @tx: Return value for TX setting
- *
- * Determine the MAC RX/TX PAUSE frames settings
- *
- * Return: Pointer to FMan device.
- */
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
bool *tx_pause);