diff options
Diffstat (limited to 'linux/drivers/net/ethernet/freescale/dpaa')
-rw-r--r-- | linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2987 | ||||
-rw-r--r-- | linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h | 440 | ||||
-rw-r--r-- | linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c | 1491 | ||||
-rw-r--r-- | linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h | 113 | ||||
-rw-r--r-- | linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c | 710 | ||||
-rw-r--r-- | linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h | 12 |
6 files changed, 2641 insertions, 3112 deletions
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 73173b89..f8ed4516 100644 --- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2,7 +2,7 @@ #include <rtems/bsd/local/opt_dpaa.h> -/* Copyright 2008 - 2015 Freescale Semiconductor Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -36,9 +36,9 @@ #include <linux/init.h> #include <linux/module.h> +#include <linux/of_platform.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/kthread.h> #include <linux/io.h> #ifndef __rtems__ #include <linux/if_arp.h> @@ -49,32 +49,50 @@ #include <linux/udp.h> #include <linux/tcp.h> #include <linux/net.h> +#include <linux/skbuff.h> +#include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/highmem.h> #include <linux/percpu.h> #include <linux/dma-mapping.h> +#include <linux/sort.h> #endif /* __rtems__ */ #include <soc/fsl/bman.h> +#include <soc/fsl/qman.h> #include "fman.h" #include "fman_port.h" - #include "mac.h" #include "dpaa_eth.h" -#include "dpaa_eth_common.h" -/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files * using trace events only need to #include <trace/events/sched.h> */ #define CREATE_TRACE_POINTS #include "dpaa_eth_trace.h" -#define DPA_NAPI_WEIGHT 64 +static int debug = -1; +module_param(debug, int, 0444); +MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)"); + +static u16 tx_timeout = 1000; +module_param(tx_timeout, ushort, 0444); +MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); + +#define FM_FD_STAT_RX_ERRORS \ + (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \ + FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \ + FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \ + FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \ + FM_FD_ERR_PRS_HDR_ERR) -/* Valid checksum indication */ -#define DPA_CSUM_VALID 0xFFFF +#define FM_FD_STAT_TX_ERRORS \ + (FM_FD_ERR_UNSUPPORTED_FORMAT | \ + FM_FD_ERR_LENGTH | FM_FD_ERR_DMA) -#define DPA_DESCRIPTION "FSL DPAA Ethernet driver" +#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | NETIF_MSG_IFUP | \ + NETIF_MSG_IFDOWN) #define DPAA_INGRESS_CS_THRESHOLD 0x10000000 /* Ingress congestion threshold on FMan ports @@ -83,62 +101,1999 @@ * by FMan. */ +/* Size in bytes of the FQ taildrop threshold */ +#define DPAA_FQ_TD 0x200000 + +#define DPAA_CS_THRESHOLD_1G 0x06000000 +/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 + * The size in bytes of the egress Congestion State notification threshold on + * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a + * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), + * and the larger the frame size, the more acute the problem. + * So we have to find a balance between these factors: + * - avoiding the device staying congested for a prolonged time (risking + * the netdev watchdog to fire - see also the tx_timeout module param); + * - affecting performance of protocols such as TCP, which otherwise + * behave well under the congestion notification mechanism; + * - preventing the Tx cores from tightly-looping (as if the congestion + * threshold was too low to be effective); + * - running out of memory if the CS threshold is set too high. + */ + +#define DPAA_CS_THRESHOLD_10G 0x10000000 +/* The size in bytes of the egress Congestion State notification threshold on + * 10G ports, range 0x1000 .. 0x10000000 + */ + +/* Largest value that the FQD's OAL field can hold */ +#define FSL_QMAN_MAX_OAL 127 + +/* Default alignment for start of data in an Rx FD */ +#define DPAA_FD_DATA_ALIGNMENT 16 + +/* Values for the L3R field of the FM Parse Results + */ +/* L3 Type field: First IP Present IPv4 */ +#define FM_L3_PARSE_RESULT_IPV4 0x8000 +/* L3 Type field: First IP Present IPv6 */ +#define FM_L3_PARSE_RESULT_IPV6 0x4000 +/* Values for the L4R field of the FM Parse Results */ +/* L4 Type field: UDP */ +#define FM_L4_PARSE_RESULT_UDP 0x40 +/* L4 Type field: TCP */ +#define FM_L4_PARSE_RESULT_TCP 0x20 + +#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ +#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ + +#define FSL_DPAA_BPID_INV 0xff +#define FSL_DPAA_ETH_MAX_BUF_COUNT 128 +#define FSL_DPAA_ETH_REFILL_THRESHOLD 80 + +#define DPAA_TX_PRIV_DATA_SIZE 16 +#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) +#define DPAA_TIME_STAMP_SIZE 8 +#define DPAA_HASH_RESULTS_SIZE 8 +#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ + dpaa_rx_extra_headroom) + +#define DPAA_ETH_RX_QUEUES 128 + +#define DPAA_ENQUEUE_RETRIES 100000 + +enum port_type {RX, TX}; + +struct fm_port_fqs { + struct dpaa_fq *tx_defq; + struct dpaa_fq *tx_errq; + struct dpaa_fq *rx_defq; + struct dpaa_fq *rx_errq; +}; + +/* All the dpa bps in use at any moment */ +static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; + +/* The raw buffer size must be cacheline aligned */ #ifndef __rtems__ -static u8 debug = -1; -module_param(debug, byte, S_IRUGO); -MODULE_PARM_DESC(debug, "Module/Driver verbosity level"); +#define DPAA_BP_RAW_SIZE 4096 +#else /* __rtems__ */ +/* + * FIXME: Support multiple buffer pools. + */ +#define DPAA_BP_RAW_SIZE 2048 -/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */ -static u16 tx_timeout = 1000; -module_param(tx_timeout, ushort, S_IRUGO); -MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); +/* + * FIXME: 4 bytes would be enough for the mbuf pointer. However, jumbo receive + * frames overwrite this area if < 64 bytes. + */ +#define DPAA_OUT_OF_BAND_SIZE 64 + +#define DPAA_MBUF_POINTER_OFFSET (DPAA_BP_RAW_SIZE - DPAA_OUT_OF_BAND_SIZE) +#endif /* __rtems__ */ +/* When using more than one buffer pool, the raw sizes are as follows: + * 1 bp: 4KB + * 2 bp: 2KB, 4KB + * 3 bp: 1KB, 2KB, 4KB + * 4 bp: 1KB, 2KB, 4KB, 8KB + */ +static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt) +{ + size_t res = DPAA_BP_RAW_SIZE / 4; + u8 i; + + for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++) + res *= 2; + return res; +} + +/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is + * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, + * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us + * half-page-aligned buffers, so we reserve some more space for start-of-buffer + * alignment. + */ +#ifndef __rtems__ +#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES) +#else /* __rtems__ */ +#define dpaa_bp_size(raw_size) DPAA_MBUF_POINTER_OFFSET #endif /* __rtems__ */ -/* BM */ +#ifndef __rtems__ +static int dpaa_max_frm; +#endif /* __rtems__ */ -#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8) +#ifndef __rtems__ +static int dpaa_rx_extra_headroom; +#else /* __rtems__ */ +#define dpaa_rx_extra_headroom fman_get_rx_extra_headroom() +#endif /* __rtems__ */ -static u8 dpa_priv_common_bpid; +#define dpaa_get_max_mtu() \ + (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN)) -static void _dpa_rx_error(struct net_device *net_dev, - const struct dpa_priv_s *priv, - struct dpa_percpu_priv_s *percpu_priv, - const struct qm_fd *fd, - u32 fqid) +#ifndef __rtems__ +static int dpaa_netdev_init(struct net_device *net_dev, + const struct net_device_ops *dpaa_ops, + u16 tx_timeout) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + struct dpaa_percpu_priv *percpu_priv; + const u8 *mac_addr; + int i, err; + + /* Although we access another CPU's private data here + * we do it at initialization so it is safe + */ + for_each_possible_cpu(i) { + percpu_priv = per_cpu_ptr(priv->percpu_priv, i); + percpu_priv->net_dev = net_dev; + } + + net_dev->netdev_ops = dpaa_ops; + mac_addr = priv->mac_dev->addr; + + net_dev->mem_start = priv->mac_dev->res->start; + net_dev->mem_end = priv->mac_dev->res->end; + + net_dev->min_mtu = ETH_MIN_MTU; + net_dev->max_mtu = dpaa_get_max_mtu(); + + net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_LLTX); + + net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; + /* The kernels enables GSO automatically, if we declare NETIF_F_SG. + * For conformity, we'll still declare GSO explicitly. + */ + net_dev->features |= NETIF_F_GSO; + + net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + /* we do not want shared skbs on TX */ + net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; + + net_dev->features |= net_dev->hw_features; + net_dev->vlan_features = net_dev->features; + + memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + + net_dev->ethtool_ops = &dpaa_ethtool_ops; + + net_dev->needed_headroom = priv->tx_headroom; + net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); + + /* start without the RUNNING flag, phylib controls it later */ + netif_carrier_off(net_dev); + + err = register_netdev(net_dev); + if (err < 0) { + dev_err(dev, "register_netdev() = %d\n", err); + return err; + } + + return 0; +} +#endif /* __rtems__ */ + +static int dpaa_stop(struct net_device *net_dev) +{ + struct mac_device *mac_dev; + struct dpaa_priv *priv; + int i, err, error; + + priv = netdev_priv(net_dev); + mac_dev = priv->mac_dev; + +#ifndef __rtems__ + netif_tx_stop_all_queues(net_dev); +#endif /* __rtems__ */ + /* Allow the Fman (Tx) port to process in-flight frames before we + * try switching it off. + */ + usleep_range(5000, 10000); + + err = mac_dev->stop(mac_dev); + if (err < 0) + netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", + err); + + for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { + error = fman_port_disable(mac_dev->port[i]); + if (error) + err = error; + } + +#ifndef __rtems__ + if (net_dev->phydev) + phy_disconnect(net_dev->phydev); + net_dev->phydev = NULL; +#endif /* __rtems__ */ + + return err; +} + +#ifndef __rtems__ +static void dpaa_tx_timeout(struct net_device *net_dev) +{ + struct dpaa_percpu_priv *percpu_priv; + const struct dpaa_priv *priv; + + priv = netdev_priv(net_dev); + percpu_priv = this_cpu_ptr(priv->percpu_priv); + + netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", + jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); + + percpu_priv->stats.tx_errors++; +} + +/* Calculates the statistics for the given device by adding the statistics + * collected by each CPU. + */ +static void dpaa_get_stats64(struct net_device *net_dev, + struct rtnl_link_stats64 *s) +{ + int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); + struct dpaa_priv *priv = netdev_priv(net_dev); + struct dpaa_percpu_priv *percpu_priv; + u64 *netstats = (u64 *)s; + u64 *cpustats; + int i, j; + + for_each_possible_cpu(i) { + percpu_priv = per_cpu_ptr(priv->percpu_priv, i); + + cpustats = (u64 *)&percpu_priv->stats; + + /* add stats from all CPUs */ + for (j = 0; j < numstats; j++) + netstats[j] += cpustats[j]; + } +} + +static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) +{ + struct platform_device *of_dev; + struct dpaa_eth_data *eth_data; + struct device *dpaa_dev, *dev; + struct device_node *mac_node; + struct mac_device *mac_dev; + + dpaa_dev = &pdev->dev; + eth_data = dpaa_dev->platform_data; + if (!eth_data) + return ERR_PTR(-ENODEV); + + mac_node = eth_data->mac_node; + + of_dev = of_find_device_by_node(mac_node); + if (!of_dev) { + dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n", + mac_node->full_name); + of_node_put(mac_node); + return ERR_PTR(-EINVAL); + } + of_node_put(mac_node); + + dev = &of_dev->dev; + + mac_dev = dev_get_drvdata(dev); + if (!mac_dev) { + dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n", + dev_name(dev)); + return ERR_PTR(-EINVAL); + } + + return mac_dev; +} + +static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) +{ + const struct dpaa_priv *priv; + struct mac_device *mac_dev; + struct sockaddr old_addr; + int err; + + priv = netdev_priv(net_dev); + + memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); + + err = eth_mac_addr(net_dev, addr); + if (err < 0) { + netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); + return err; + } + + mac_dev = priv->mac_dev; + + err = mac_dev->change_addr(mac_dev->fman_mac, + (enet_addr_t *)net_dev->dev_addr); + if (err < 0) { + netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", + err); + /* reverting to previous address */ + eth_mac_addr(net_dev, &old_addr); + + return err; + } + + return 0; +} + +static void dpaa_set_rx_mode(struct net_device *net_dev) +{ + const struct dpaa_priv *priv; + int err; + + priv = netdev_priv(net_dev); + + if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { + priv->mac_dev->promisc = !priv->mac_dev->promisc; + err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, + priv->mac_dev->promisc); + if (err < 0) + netif_err(priv, drv, net_dev, + "mac_dev->set_promisc() = %d\n", + err); + } + + err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); + if (err < 0) + netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", + err); +} +#endif /* __rtems__ */ + +static struct dpaa_bp *dpaa_bpid2pool(int bpid) +{ + if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS)) + return NULL; + + return dpaa_bp_array[bpid]; +} + +/* checks if this bpool is already allocated */ +static bool dpaa_bpid2pool_use(int bpid) +{ + if (dpaa_bpid2pool(bpid)) { + atomic_inc(&dpaa_bp_array[bpid]->refs); + return true; + } + + return false; +} + +/* called only once per bpid by dpaa_bp_alloc_pool() */ +static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) +{ + dpaa_bp_array[bpid] = dpaa_bp; + atomic_set(&dpaa_bp->refs, 1); +} + +static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) +{ + int err; + + if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { + pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n", + __func__); + return -EINVAL; + } + + /* If the pool is already specified, we only create one per bpid */ + if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && + dpaa_bpid2pool_use(dpaa_bp->bpid)) + return 0; + + if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { + dpaa_bp->pool = bman_new_pool(); + if (!dpaa_bp->pool) { + pr_err("%s: bman_new_pool() failed\n", + __func__); + return -ENODEV; + } + + dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); + } + + if (dpaa_bp->seed_cb) { + err = dpaa_bp->seed_cb(dpaa_bp); + if (err) + goto pool_seed_failed; + } + + dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); + + return 0; + +pool_seed_failed: + pr_err("%s: pool seeding failed\n", __func__); + bman_free_pool(dpaa_bp->pool); + + return err; +} + +/* remove and free all the buffers from the given buffer pool */ +static void dpaa_bp_drain(struct dpaa_bp *bp) +{ + u8 num = 8; + int ret; + + do { + struct bm_buffer bmb[8]; + int i; + + ret = bman_acquire(bp->pool, bmb, num); + if (ret < 0) { + if (num == 8) { + /* we have less than 8 buffers left; + * drain them one by one + */ + num = 1; + ret = 1; + continue; + } else { + /* Pool is fully drained */ + break; + } + } + + if (bp->free_buf_cb) + for (i = 0; i < num; i++) + bp->free_buf_cb(bp, &bmb[i]); + } while (ret > 0); +} + +static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) +{ + struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); + + /* the mapping between bpid and dpaa_bp is done very late in the + * allocation procedure; if something failed before the mapping, the bp + * was not configured, therefore we don't need the below instructions + */ + if (!bp) + return; + + if (!atomic_dec_and_test(&bp->refs)) + return; + + if (bp->free_buf_cb) + dpaa_bp_drain(bp); + + dpaa_bp_array[bp->bpid] = NULL; + bman_free_pool(bp->pool); +} + +static void dpaa_bps_free(struct dpaa_priv *priv) +{ + int i; + + for (i = 0; i < DPAA_BPS_NUM; i++) + dpaa_bp_free(priv->dpaa_bps[i]); +} + +/* Use multiple WQs for FQ assignment: + * - Tx Confirmation queues go to WQ1. + * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance + * to be scheduled, in case there are many more FQs in WQ3). + * - Rx Default and Tx queues go to WQ3 (no differentiation between + * Rx and Tx traffic). + * This ensures that Tx-confirmed buffers are timely released. In particular, + * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they + * are greatly outnumbered by other FQs in the system, while + * dequeue scheduling is round-robin. + */ +static inline void dpaa_assign_wq(struct dpaa_fq *fq) +{ + switch (fq->fq_type) { + case FQ_TYPE_TX_CONFIRM: + case FQ_TYPE_TX_CONF_MQ: + fq->wq = 1; + break; + case FQ_TYPE_RX_ERROR: + case FQ_TYPE_TX_ERROR: + fq->wq = 2; + break; + case FQ_TYPE_RX_DEFAULT: + case FQ_TYPE_TX: + fq->wq = 3; + break; + default: + WARN(1, "Invalid FQ type %d for FQID %d!\n", + fq->fq_type, fq->fqid); + } +} + +static struct dpaa_fq *dpaa_fq_alloc(struct device *dev, + u32 start, u32 count, + struct list_head *list, + enum dpaa_fq_type fq_type) +{ + struct dpaa_fq *dpaa_fq; + int i; + + dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count, + GFP_KERNEL); + if (!dpaa_fq) + return NULL; + + for (i = 0; i < count; i++) { + dpaa_fq[i].fq_type = fq_type; + dpaa_fq[i].fqid = start ? start + i : 0; + list_add_tail(&dpaa_fq[i].list, list); + } + + for (i = 0; i < count; i++) + dpaa_assign_wq(dpaa_fq + i); + + return dpaa_fq; +} + +static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, + struct fm_port_fqs *port_fqs) +{ + struct dpaa_fq *dpaa_fq; + + dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR); + if (!dpaa_fq) + goto fq_alloc_failed; + + port_fqs->rx_errq = &dpaa_fq[0]; + + dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT); + if (!dpaa_fq) + goto fq_alloc_failed; + + port_fqs->rx_defq = &dpaa_fq[0]; + + if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) + goto fq_alloc_failed; + + dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); + if (!dpaa_fq) + goto fq_alloc_failed; + + port_fqs->tx_errq = &dpaa_fq[0]; + + dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM); + if (!dpaa_fq) + goto fq_alloc_failed; + + port_fqs->tx_defq = &dpaa_fq[0]; + + if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) + goto fq_alloc_failed; + + return 0; + +fq_alloc_failed: + dev_err(dev, "dpaa_fq_alloc() failed\n"); + return -ENOMEM; +} + +static u32 rx_pool_channel; +static DEFINE_SPINLOCK(rx_pool_channel_init); + +static int dpaa_get_channel(void) +{ + spin_lock(&rx_pool_channel_init); + if (!rx_pool_channel) { + u32 pool; + int ret; + + ret = qman_alloc_pool(&pool); + + if (!ret) + rx_pool_channel = pool; + } + spin_unlock(&rx_pool_channel_init); + if (!rx_pool_channel) + return -ENOMEM; + return rx_pool_channel; +} + +#ifndef __rtems__ +static void dpaa_release_channel(void) +{ + qman_release_pool(rx_pool_channel); +} +#endif /* __rtems__ */ + +static void dpaa_eth_add_channel(u16 channel) +{ + u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); +#ifndef __rtems__ + const cpumask_t *cpus = qman_affine_cpus(); +#endif /* __rtems__ */ + struct qman_portal *portal; + int cpu; + + for_each_cpu(cpu, cpus) { + portal = qman_get_affine_portal(cpu); + qman_p_static_dequeue_add(portal, pool); + } +} + +/* Congestion group state change notification callback. + * Stops the device's egress queues while they are congested and + * wakes them upon exiting congested state. + * Also updates some CGR-related stats. + */ +static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, + int congested) +{ + struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr, + struct dpaa_priv, cgr_data.cgr); + + if (congested) { + priv->cgr_data.congestion_start_jiffies = jiffies; +#ifndef __rtems__ + netif_tx_stop_all_queues(priv->net_dev); +#endif /* __rtems__ */ + priv->cgr_data.cgr_congested_count++; + } else { + priv->cgr_data.congested_jiffies += + (jiffies - priv->cgr_data.congestion_start_jiffies); +#ifndef __rtems__ + netif_tx_wake_all_queues(priv->net_dev); +#endif /* __rtems__ */ + } +} + +static int dpaa_eth_cgr_init(struct dpaa_priv *priv) { - /* limit common, possibly innocuous Rx FIFO Overflow errors' - * interference with zero-loss convergence benchmark results. + struct qm_mcc_initcgr initcgr; + u32 cs_th; + int err; + + err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); + if (err < 0) { + if (netif_msg_drv(priv)) + pr_err("%s: Error %d allocating CGR ID\n", + __func__, err); + goto out_error; + } + priv->cgr_data.cgr.cb = dpaa_eth_cgscn; + + /* Enable Congestion State Change Notifications and CS taildrop */ + memset(&initcgr, 0, sizeof(initcgr)); + initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES); + initcgr.cgr.cscn_en = QM_CGR_EN; + + /* Set different thresholds based on the MAC speed. + * This may turn suboptimal if the MAC is reconfigured at a speed + * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. + * In such cases, we ought to reconfigure the threshold, too. */ - if (likely(fd->status & FM_FD_ERR_PHYSICAL)) - pr_warn_once("non-zero error counters in fman statistics (sysfs)\n"); + if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) + cs_th = DPAA_CS_THRESHOLD_10G; else + cs_th = DPAA_CS_THRESHOLD_1G; + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); + + initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); + initcgr.cgr.cstd_en = QM_CGR_EN; + + err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, + &initcgr); + if (err < 0) { + if (netif_msg_drv(priv)) + pr_err("%s: Error %d creating CGR with ID %d\n", + __func__, err, priv->cgr_data.cgr.cgrid); + qman_release_cgrid(priv->cgr_data.cgr.cgrid); + goto out_error; + } + if (netif_msg_drv(priv)) + pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", + priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, + priv->cgr_data.cgr.chan); + +out_error: + return err; +} + +static inline void dpaa_setup_ingress(const struct dpaa_priv *priv, + struct dpaa_fq *fq, + const struct qman_fq *template) +{ + fq->fq_base = *template; + fq->net_dev = priv->net_dev; + + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; + fq->channel = priv->channel; +} + +static inline void dpaa_setup_egress(const struct dpaa_priv *priv, + struct dpaa_fq *fq, + struct fman_port *port, + const struct qman_fq *template) +{ + fq->fq_base = *template; + fq->net_dev = priv->net_dev; + + if (port) { + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; + fq->channel = (u16)fman_port_get_qman_channel_id(port); + } else { + fq->flags = QMAN_FQ_FLAG_NO_MODIFY; + } +} + +static void dpaa_fq_setup(struct dpaa_priv *priv, + const struct dpaa_fq_cbs *fq_cbs, + struct fman_port *tx_port) +{ #ifndef __rtems__ - if (net_ratelimit()) - netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", - fd->status & FM_FD_STAT_RX_ERRORS); + int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu; + const cpumask_t *affine_cpus = qman_affine_cpus(); + u16 portals[NR_CPUS]; +#else /* __rtems__ */ + int egress_cnt = 0, conf_cnt = 0; +#endif /* __rtems__ */ + struct dpaa_fq *fq; + +#ifndef __rtems__ + for_each_cpu(cpu, affine_cpus) + portals[num_portals++] = qman_affine_channel(cpu); + if (num_portals == 0) + dev_err(priv->net_dev->dev.parent, + "No Qman software (affine) channels found"); +#endif /* __rtems__ */ + + /* Initialize each FQ in the list */ + list_for_each_entry(fq, &priv->dpaa_fq_list, list) { + switch (fq->fq_type) { + case FQ_TYPE_RX_DEFAULT: + dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); + break; + case FQ_TYPE_RX_ERROR: + dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); + break; + case FQ_TYPE_TX: + dpaa_setup_egress(priv, fq, tx_port, + &fq_cbs->egress_ern); + /* If we have more Tx queues than the number of cores, + * just ignore the extra ones. + */ + if (egress_cnt < DPAA_ETH_TXQ_NUM) + priv->egress_fqs[egress_cnt++] = &fq->fq_base; + break; + case FQ_TYPE_TX_CONF_MQ: + priv->conf_fqs[conf_cnt++] = &fq->fq_base; + /* fall through */ + case FQ_TYPE_TX_CONFIRM: + dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); + break; + case FQ_TYPE_TX_ERROR: + dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); + break; + default: +#ifndef __rtems__ + dev_warn(priv->net_dev->dev.parent, + "Unknown FQ type detected!\n"); +#else /* __rtems__ */ + BSD_ASSERT(0); +#endif /* __rtems__ */ + break; + } + } + + /* Make sure all CPUs receive a corresponding Tx queue. */ + while (egress_cnt < DPAA_ETH_TXQ_NUM) { + list_for_each_entry(fq, &priv->dpaa_fq_list, list) { + if (fq->fq_type != FQ_TYPE_TX) + continue; + priv->egress_fqs[egress_cnt++] = &fq->fq_base; + if (egress_cnt == DPAA_ETH_TXQ_NUM) + break; + } + } +} + +static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, + struct qman_fq *tx_fq) +{ + int i; + + for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) + if (priv->egress_fqs[i] == tx_fq) + return i; + + return -EINVAL; +} + +static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) +{ + const struct dpaa_priv *priv; + struct qman_fq *confq = NULL; + struct qm_mcc_initfq initfq; +#ifndef __rtems__ + struct device *dev; +#endif /* __rtems__ */ + struct qman_fq *fq; + int queue_id; + int err; + + priv = netdev_priv(dpaa_fq->net_dev); +#ifndef __rtems__ + dev = dpaa_fq->net_dev->dev.parent; +#endif /* __rtems__ */ + + if (dpaa_fq->fqid == 0) + dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; + + dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); + + err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); + if (err) { +#ifndef __rtems__ + dev_err(dev, "qman_create_fq() failed\n"); #else /* __rtems__ */ BSD_ASSERT(0); #endif /* __rtems__ */ + return err; + } + fq = &dpaa_fq->fq_base; + + if (dpaa_fq->init) { + memset(&initfq, 0, sizeof(initfq)); + + initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL); + /* Note: we may get to keep an empty FQ in cache */ + initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE); + + /* Try to reduce the number of portal interrupts for + * Tx Confirmation FQs. + */ + if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) + initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE); + + /* FQ placement */ + initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ); + + qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); + + /* Put all egress queues in a congestion group of their own. + * Sensu stricto, the Tx confirmation queues are Rx FQs, + * rather than Tx - but they nonetheless account for the + * memory footprint on behalf of egress traffic. We therefore + * place them in the netdev's CGR, along with the Tx FQs. + */ + if (dpaa_fq->fq_type == FQ_TYPE_TX || + dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || + dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { + initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); + initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); + initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; + /* Set a fixed overhead accounting, in an attempt to + * reduce the impact of fixed-size skb shells and the + * driver's needed headroom on system memory. This is + * especially the case when the egress traffic is + * composed of small datagrams. + * Unfortunately, QMan's OAL value is capped to an + * insufficient value, but even that is better than + * no overhead accounting at all. + */ + initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); + qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); + qm_fqd_set_oal(&initfq.fqd, +#ifndef __rtems__ + min(sizeof(struct sk_buff) + +#else /* __rtems__ */ + min( +#endif /* __rtems__ */ + priv->tx_headroom, + (size_t)FSL_QMAN_MAX_OAL)); + } + + if (td_enable) { + initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH); + qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); + initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE); + } + + if (dpaa_fq->fq_type == FQ_TYPE_TX) { + queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); + if (queue_id >= 0) + confq = priv->conf_fqs[queue_id]; + if (confq) { + initfq.we_mask |= + cpu_to_be16(QM_INITFQ_WE_CONTEXTA); + /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) + * A2V=1 (contextA A2 field is valid) + * A0V=1 (contextA A0 field is valid) + * B0V=1 (contextB field is valid) + * ContextA A2: EBD=1 (deallocate buffers inside FMan) + * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) + */ + qm_fqd_context_a_set64(&initfq.fqd, + 0x1e00000080000000ULL); + } + } + + /* Put all the ingress queues in our "ingress CGR". */ + if (priv->use_ingress_cgr && + (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || + dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) { + initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); + initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); + initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; + /* Set a fixed overhead accounting, just like for the + * egress CGR. + */ + initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); + qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); + qm_fqd_set_oal(&initfq.fqd, +#ifndef __rtems__ + min(sizeof(struct sk_buff) + +#else /* __rtems__ */ + min( +#endif /* __rtems__ */ + priv->tx_headroom, + (size_t)FSL_QMAN_MAX_OAL)); + } + + /* Initialization common to all ingress queues */ + if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { + initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); + initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE); + initfq.fqd.context_a.stashing.exclusive = + QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | + QM_STASHING_EXCL_ANNOTATION; + qm_fqd_set_stashing(&initfq.fqd, 1, 2, + DIV_ROUND_UP(sizeof(struct qman_fq), + 64)); + } + + err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); + if (err < 0) { +#ifndef __rtems__ + dev_err(dev, "qman_init_fq(%u) = %d\n", + qman_fq_fqid(fq), err); +#else /* __rtems__ */ + BSD_ASSERT(0); +#endif /* __rtems__ */ + qman_destroy_fq(fq); + return err; + } + } + + dpaa_fq->fqid = qman_fq_fqid(fq); + + return 0; +} + +#ifndef __rtems__ +static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) +{ +#ifndef __rtems__ + const struct dpaa_priv *priv; +#endif /* __rtems__ */ + struct dpaa_fq *dpaa_fq; + int err, error; + + err = 0; + + dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); +#ifndef __rtems__ + priv = netdev_priv(dpaa_fq->net_dev); +#endif /* __rtems__ */ + + if (dpaa_fq->init) { + err = qman_retire_fq(fq, NULL); + if (err < 0 && netif_msg_drv(priv)) + dev_err(dev, "qman_retire_fq(%u) = %d\n", + qman_fq_fqid(fq), err); + + error = qman_oos_fq(fq); + if (error < 0 && netif_msg_drv(priv)) { + dev_err(dev, "qman_oos_fq(%u) = %d\n", + qman_fq_fqid(fq), error); + if (err >= 0) + err = error; + } + } + + qman_destroy_fq(fq); + list_del(&dpaa_fq->list); + + return err; +} + +static int dpaa_fq_free(struct device *dev, struct list_head *list) +{ + struct dpaa_fq *dpaa_fq, *tmp; + int err, error; + + err = 0; + list_for_each_entry_safe(dpaa_fq, tmp, list, list) { + error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq); + if (error < 0 && err >= 0) + err = error; + } + + return err; +} +#endif /* __rtems__ */ + +static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, + struct dpaa_fq *defq, + struct dpaa_buffer_layout *buf_layout) +{ + struct fman_buffer_prefix_content buf_prefix_content; + struct fman_port_params params; + int err; + + memset(¶ms, 0, sizeof(params)); + memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); + + buf_prefix_content.priv_data_size = buf_layout->priv_data_size; + buf_prefix_content.pass_prs_result = true; + buf_prefix_content.pass_hash_result = true; + buf_prefix_content.pass_time_stamp = false; + buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; + + params.specific_params.non_rx_params.err_fqid = errq->fqid; + params.specific_params.non_rx_params.dflt_fqid = defq->fqid; + + err = fman_port_config(port, ¶ms); + if (err) + pr_err("%s: fman_port_config failed\n", __func__); + + err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); + if (err) + pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", + __func__); + + err = fman_port_init(port); + if (err) + pr_err("%s: fm_port_init failed\n", __func__); +} + +static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, + size_t count, struct dpaa_fq *errq, + struct dpaa_fq *defq, + struct dpaa_buffer_layout *buf_layout) +{ + struct fman_buffer_prefix_content buf_prefix_content; + struct fman_port_rx_params *rx_p; + struct fman_port_params params; + int i, err; + + memset(¶ms, 0, sizeof(params)); + memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); + + buf_prefix_content.priv_data_size = buf_layout->priv_data_size; + buf_prefix_content.pass_prs_result = true; + buf_prefix_content.pass_hash_result = true; + buf_prefix_content.pass_time_stamp = false; + buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; + + rx_p = ¶ms.specific_params.rx_params; + rx_p->err_fqid = errq->fqid; + rx_p->dflt_fqid = defq->fqid; + + count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); + rx_p->ext_buf_pools.num_of_pools_used = (u8)count; + for (i = 0; i < count; i++) { + rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid; + rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size; + } + + err = fman_port_config(port, ¶ms); + if (err) + pr_err("%s: fman_port_config failed\n", __func__); + + err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); + if (err) + pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", + __func__); + + err = fman_port_init(port); + if (err) + pr_err("%s: fm_port_init failed\n", __func__); +} + +static void dpaa_eth_init_ports(struct mac_device *mac_dev, + struct dpaa_bp **bps, size_t count, + struct fm_port_fqs *port_fqs, + struct dpaa_buffer_layout *buf_layout, + struct device *dev) +{ + struct fman_port *rxport = mac_dev->port[RX]; + struct fman_port *txport = mac_dev->port[TX]; + + dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, + port_fqs->tx_defq, &buf_layout[TX]); + dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, + port_fqs->rx_defq, &buf_layout[RX]); +} + +static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, + struct bm_buffer *bmb, int cnt) +{ + int err; + + err = bman_release(dpaa_bp->pool, bmb, cnt); + /* Should never occur, address anyway to avoid leaking the buffers */ + if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) + while (cnt-- > 0) + dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); + + return cnt; +} + +static void dpaa_release_sgt_members(struct qm_sg_entry *sgt) +{ + struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX]; + struct dpaa_bp *dpaa_bp; + int i = 0, j; + + memset(bmb, 0, sizeof(bmb)); + + do { + dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); + if (!dpaa_bp) + return; + + j = 0; + do { + WARN_ON(qm_sg_entry_is_ext(&sgt[i])); + + bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i])); + + j++; i++; + } while (j < ARRAY_SIZE(bmb) && + !qm_sg_entry_is_final(&sgt[i - 1]) && + sgt[i - 1].bpid == sgt[i].bpid); + + dpaa_bman_release(dpaa_bp, bmb, j); + } while (!qm_sg_entry_is_final(&sgt[i - 1])); +} + +static void dpaa_fd_release(const struct net_device *net_dev, + const struct qm_fd *fd) +{ + struct qm_sg_entry *sgt; + struct dpaa_bp *dpaa_bp; + struct bm_buffer bmb; + dma_addr_t addr; + void *vaddr; + + bmb.data = 0; + bm_buffer_set64(&bmb, qm_fd_addr(fd)); + + dpaa_bp = dpaa_bpid2pool(fd->bpid); + if (!dpaa_bp) + return; + + if (qm_fd_get_format(fd) == qm_fd_sg) { + vaddr = phys_to_virt(qm_fd_addr(fd)); + sgt = vaddr + qm_fd_get_offset(fd); + +#ifndef __rtems__ + dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size, + DMA_FROM_DEVICE); +#endif /* __rtems__ */ + + dpaa_release_sgt_members(sgt); + +#ifndef __rtems__ + addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size, + DMA_FROM_DEVICE); + if (dma_mapping_error(dpaa_bp->dev, addr)) { + dev_err(dpaa_bp->dev, "DMA mapping failed"); + return; + } +#else /* __rtems__ */ + addr = (dma_addr_t)vaddr; +#endif /* __rtems__ */ + bm_buffer_set64(&bmb, addr); + } + + dpaa_bman_release(dpaa_bp, &bmb, 1); +} + +static void count_ern(struct dpaa_percpu_priv *percpu_priv, + const union qm_mr_entry *msg) +{ + switch (msg->ern.rc & QM_MR_RC_MASK) { + case QM_MR_RC_CGR_TAILDROP: + percpu_priv->ern_cnt.cg_tdrop++; + break; + case QM_MR_RC_WRED: + percpu_priv->ern_cnt.wred++; + break; + case QM_MR_RC_ERROR: + percpu_priv->ern_cnt.err_cond++; + break; + case QM_MR_RC_ORPWINDOW_EARLY: + percpu_priv->ern_cnt.early_window++; + break; + case QM_MR_RC_ORPWINDOW_LATE: + percpu_priv->ern_cnt.late_window++; + break; + case QM_MR_RC_FQ_TAILDROP: + percpu_priv->ern_cnt.fq_tdrop++; + break; + case QM_MR_RC_ORPWINDOW_RETIRED: + percpu_priv->ern_cnt.fq_retired++; + break; + case QM_MR_RC_ORP_ZERO: + percpu_priv->ern_cnt.orp_zero++; + break; + } +} + +#ifndef __rtems__ +/* Turn on HW checksum computation for this outgoing frame. + * If the current protocol is not something we support in this regard + * (or if the stack has already computed the SW checksum), we do nothing. + * + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value + * otherwise. + * + * Note that this function may modify the fd->cmd field and the skb data buffer + * (the Parse Results area). + */ +static int dpaa_enable_tx_csum(struct dpaa_priv *priv, + struct sk_buff *skb, + struct qm_fd *fd, + char *parse_results) +{ + struct fman_prs_result *parse_result; + u16 ethertype = ntohs(skb->protocol); + struct ipv6hdr *ipv6h = NULL; + struct iphdr *iph; + int retval = 0; + u8 l4_proto; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + /* Note: L3 csum seems to be already computed in sw, but we can't choose + * L4 alone from the FM configuration anyway. + */ + + /* Fill in some fields of the Parse Results array, so the FMan + * can find them as if they came from the FMan Parser. + */ + parse_result = (struct fman_prs_result *)parse_results; + + /* If we're dealing with VLAN, get the real Ethernet type */ + if (ethertype == ETH_P_8021Q) { + /* We can't always assume the MAC header is set correctly + * by the stack, so reset to beginning of skb->data + */ + skb_reset_mac_header(skb); + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); + } + + /* Fill in the relevant L3 parse result fields + * and read the L4 protocol type + */ + switch (ethertype) { + case ETH_P_IP: + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); + iph = ip_hdr(skb); + WARN_ON(!iph); + l4_proto = iph->protocol; + break; + case ETH_P_IPV6: + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); + ipv6h = ipv6_hdr(skb); + WARN_ON(!ipv6h); + l4_proto = ipv6h->nexthdr; + break; + default: + /* We shouldn't even be here */ + if (net_ratelimit()) + netif_alert(priv, tx_err, priv->net_dev, + "Can't compute HW csum for L3 proto 0x%x\n", + ntohs(skb->protocol)); + retval = -EIO; + goto return_error; + } + + /* Fill in the relevant L4 parse result fields */ + switch (l4_proto) { + case IPPROTO_UDP: + parse_result->l4r = FM_L4_PARSE_RESULT_UDP; + break; + case IPPROTO_TCP: + parse_result->l4r = FM_L4_PARSE_RESULT_TCP; + break; + default: + if (net_ratelimit()) + netif_alert(priv, tx_err, priv->net_dev, + "Can't compute HW csum for L4 proto 0x%x\n", + l4_proto); + retval = -EIO; + goto return_error; + } + + /* At index 0 is IPOffset_1 as defined in the Parse Results */ + parse_result->ip_off[0] = (u8)skb_network_offset(skb); + parse_result->l4_off = (u8)skb_transport_offset(skb); + + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ + fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC); + + /* On P1023 and similar platforms fd->cmd interpretation could + * be disabled by setting CONTEXT_A bit ICMD; currently this bit + * is not set so we do not need to check; in the future, if/when + * using context_a we need to check this bit + */ + +return_error: + return retval; +} +#endif /* __rtems__ */ + +static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) +{ +#ifndef __rtems__ + struct device *dev = dpaa_bp->dev; +#endif /* __rtems__ */ + struct bm_buffer bmb[8]; + dma_addr_t addr; +#ifndef __rtems__ + void *new_buf; +#endif /* __rtems__ */ + u8 i; + + for (i = 0; i < 8; i++) { +#ifndef __rtems__ + new_buf = netdev_alloc_frag(dpaa_bp->raw_size); + if (unlikely(!new_buf)) { + dev_err(dev, "netdev_alloc_frag() failed, size %zu\n", + dpaa_bp->raw_size); + goto release_previous_buffs; + } + new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES); + + addr = dma_map_single(dev, new_buf, + dpaa_bp->size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, addr))) { + dev_err(dpaa_bp->dev, "DMA map failed"); + goto release_previous_buffs; + } +#else /* __rtems__ */ + struct mbuf *m; + + m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + if (unlikely(m == NULL)) { + goto release_previous_buffs; + } + + RTEMS_STATIC_ASSERT(DPAA_BP_RAW_SIZE == MCLBYTES, + DPAA_BP_RAW_SIZE); + *(struct mbuf **)(mtod(m, char *) + DPAA_MBUF_POINTER_OFFSET) = + m; + addr = mtod(m, dma_addr_t); +#endif /* __rtems__ */ + + bmb[i].data = 0; + bm_buffer_set64(&bmb[i], addr); + } + +release_bufs: + return dpaa_bman_release(dpaa_bp, bmb, i); + +release_previous_buffs: +#ifndef __rtems__ + WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n"); +#endif /* __rtems__ */ + + bm_buffer_set64(&bmb[i], 0); + /* Avoid releasing a completely null buffer; bman_release() requires + * at least one buffer. + */ + if (likely(i)) + goto release_bufs; + + return 0; +} + +static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp) +{ + int i; + + /* Give each CPU an allotment of "config_count" buffers */ + for_each_possible_cpu(i) { + int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); + int j; + + /* Although we access another CPU's counters here + * we do it at boot time so it is safe + */ + for (j = 0; j < dpaa_bp->config_count; j += 8) + *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp); + } + return 0; +} + +/* Add buffers/(pages) for Rx processing whenever bpool count falls below + * REFILL_THRESHOLD. + */ +static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr) +{ + int count = *countptr; + int new_bufs; + + if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) { + do { + new_bufs = dpaa_bp_add_8_bufs(dpaa_bp); + if (unlikely(!new_bufs)) { + /* Avoid looping forever if we've temporarily + * run out of memory. We'll try again at the + * next NAPI cycle. + */ + break; + } + count += new_bufs; + } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT); + + *countptr = count; + if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT)) + return -ENOMEM; + } + + return 0; +} + +static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) +{ + struct dpaa_bp *dpaa_bp; + int *countptr; + int res, i; + + for (i = 0; i < DPAA_BPS_NUM; i++) { + dpaa_bp = priv->dpaa_bps[i]; + if (!dpaa_bp) + return -EINVAL; + countptr = this_cpu_ptr(dpaa_bp->percpu_count); + res = dpaa_eth_refill_bpool(dpaa_bp, countptr); + if (res) + return res; + } + return 0; +} + +#ifndef __rtems__ +/* Cleanup function for outgoing frame descriptors that were built on Tx path, + * either contiguous frames or scatter/gather ones. + * Skb freeing is not handled here. + * + * This function may be called on error paths in the Tx function, so guard + * against cases when not all fd relevant fields were filled in. + * + * Return the skb backpointer, since for S/G frames the buffer containing it + * gets freed here. + */ +static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, + const struct qm_fd *fd) +{ + const enum dma_data_direction dma_dir = DMA_TO_DEVICE; + struct device *dev = priv->net_dev->dev.parent; + dma_addr_t addr = qm_fd_addr(fd); + const struct qm_sg_entry *sgt; + struct sk_buff **skbh, *skb; + int nr_frags, i; + + skbh = (struct sk_buff **)phys_to_virt(addr); + skb = *skbh; + + if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { + nr_frags = skb_shinfo(skb)->nr_frags; + dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + + sizeof(struct qm_sg_entry) * (1 + nr_frags), + dma_dir); + + /* The sgt buffer has been allocated with netdev_alloc_frag(), + * it's from lowmem. + */ + sgt = phys_to_virt(addr + qm_fd_get_offset(fd)); + + /* sgt[0] is from lowmem, was dma_map_single()-ed */ + dma_unmap_single(dev, qm_sg_addr(&sgt[0]), + qm_sg_entry_get_len(&sgt[0]), dma_dir); + + /* remaining pages were mapped with skb_frag_dma_map() */ + for (i = 1; i < nr_frags; i++) { + WARN_ON(qm_sg_entry_is_ext(&sgt[i])); + + dma_unmap_page(dev, qm_sg_addr(&sgt[i]), + qm_sg_entry_get_len(&sgt[i]), dma_dir); + } + + /* Free the page frag that we allocated on Tx */ + skb_free_frag(phys_to_virt(addr)); + } else { + dma_unmap_single(dev, addr, + skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); + } + + return skb; +} + +/* Build a linear skb around the received buffer. + * We are guaranteed there is enough room at the end of the data buffer to + * accommodate the shared info area of the skb. + */ +static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, + const struct qm_fd *fd) +{ + ssize_t fd_off = qm_fd_get_offset(fd); + dma_addr_t addr = qm_fd_addr(fd); + struct dpaa_bp *dpaa_bp; + struct sk_buff *skb; + void *vaddr; + + vaddr = phys_to_virt(addr); + WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); + + dpaa_bp = dpaa_bpid2pool(fd->bpid); + if (!dpaa_bp) + goto free_buffer; + + skb = build_skb(vaddr, dpaa_bp->size + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); + if (unlikely(!skb)) { + WARN_ONCE(1, "Build skb failure on Rx\n"); + goto free_buffer; + } + WARN_ON(fd_off != priv->rx_headroom); + skb_reserve(skb, fd_off); + skb_put(skb, qm_fd_get_length(fd)); + + skb->ip_summed = CHECKSUM_NONE; + + return skb; + +free_buffer: + skb_free_frag(vaddr); + return NULL; +} + +/* Build an skb with the data of the first S/G entry in the linear portion and + * the rest of the frame as skb fragments. + * + * The page fragment holding the S/G Table is recycled here. + */ +static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, + const struct qm_fd *fd) +{ + ssize_t fd_off = qm_fd_get_offset(fd); + dma_addr_t addr = qm_fd_addr(fd); + const struct qm_sg_entry *sgt; + struct page *page, *head_page; + struct dpaa_bp *dpaa_bp; + void *vaddr, *sg_vaddr; + int frag_off, frag_len; + struct sk_buff *skb; + dma_addr_t sg_addr; + int page_offset; + unsigned int sz; + int *count_ptr; + int i; + + vaddr = phys_to_virt(addr); + WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); + + /* Iterate through the SGT entries and add data buffers to the skb */ + sgt = vaddr + fd_off; + for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) { + /* Extension bit is not supported */ + WARN_ON(qm_sg_entry_is_ext(&sgt[i])); + + sg_addr = qm_sg_addr(&sgt[i]); + sg_vaddr = phys_to_virt(sg_addr); + WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, + SMP_CACHE_BYTES)); + + /* We may use multiple Rx pools */ + dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); + if (!dpaa_bp) + goto free_buffers; + + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); + dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size, + DMA_FROM_DEVICE); + if (i == 0) { + sz = dpaa_bp->size + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + skb = build_skb(sg_vaddr, sz); + if (WARN_ON(unlikely(!skb))) + goto free_buffers; + + skb->ip_summed = CHECKSUM_NONE; + + /* Make sure forwarded skbs will have enough space + * on Tx, if extra headers are added. + */ + WARN_ON(fd_off != priv->rx_headroom); + skb_reserve(skb, fd_off); + skb_put(skb, qm_sg_entry_get_len(&sgt[i])); + } else { + /* Not the first S/G entry; all data from buffer will + * be added in an skb fragment; fragment index is offset + * by one since first S/G entry was incorporated in the + * linear part of the skb. + * + * Caution: 'page' may be a tail page. + */ + page = virt_to_page(sg_vaddr); + head_page = virt_to_head_page(sg_vaddr); + + /* Compute offset in (possibly tail) page */ + page_offset = ((unsigned long)sg_vaddr & + (PAGE_SIZE - 1)) + + (page_address(page) - page_address(head_page)); + /* page_offset only refers to the beginning of sgt[i]; + * but the buffer itself may have an internal offset. + */ + frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; + frag_len = qm_sg_entry_get_len(&sgt[i]); + /* skb_add_rx_frag() does no checking on the page; if + * we pass it a tail page, we'll end up with + * bad page accounting and eventually with segafults. + */ + skb_add_rx_frag(skb, i - 1, head_page, frag_off, + frag_len, dpaa_bp->size); + } + /* Update the pool count for the current {cpu x bpool} */ + (*count_ptr)--; + + if (qm_sg_entry_is_final(&sgt[i])) + break; + } + WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); + + /* free the SG table buffer */ + skb_free_frag(vaddr); + + return skb; + +free_buffers: + /* compensate sw bpool counter changes */ + for (i--; i >= 0; i--) { + dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); + if (dpaa_bp) { + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); + (*count_ptr)++; + } + } + /* free all the SG entries */ + for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { + sg_addr = qm_sg_addr(&sgt[i]); + sg_vaddr = phys_to_virt(sg_addr); + skb_free_frag(sg_vaddr); + dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); + if (dpaa_bp) { + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); + (*count_ptr)--; + } + + if (qm_sg_entry_is_final(&sgt[i])) + break; + } + /* free the SGT fragment */ + skb_free_frag(vaddr); + + return NULL; +} + +static int skb_to_contig_fd(struct dpaa_priv *priv, + struct sk_buff *skb, struct qm_fd *fd, + int *offset) +{ + struct net_device *net_dev = priv->net_dev; + struct device *dev = net_dev->dev.parent; + enum dma_data_direction dma_dir; + unsigned char *buffer_start; + struct sk_buff **skbh; + dma_addr_t addr; + int err; + + /* We are guaranteed to have at least tx_headroom bytes + * available, so just use that for offset. + */ + fd->bpid = FSL_DPAA_BPID_INV; + buffer_start = skb->data - priv->tx_headroom; + dma_dir = DMA_TO_DEVICE; + + skbh = (struct sk_buff **)buffer_start; + *skbh = skb; + + /* Enable L3/L4 hardware checksum computation. + * + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may + * need to write into the skb. + */ + err = dpaa_enable_tx_csum(priv, skb, fd, + ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE); + if (unlikely(err < 0)) { + if (net_ratelimit()) + netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", + err); + return err; + } + + /* Fill in the rest of the FD fields */ + qm_fd_set_contig(fd, priv->tx_headroom, skb->len); + fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); + + /* Map the entire buffer size that may be seen by FMan, but no more */ + addr = dma_map_single(dev, skbh, + skb_tail_pointer(skb) - buffer_start, dma_dir); + if (unlikely(dma_mapping_error(dev, addr))) { + if (net_ratelimit()) + netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); + return -EINVAL; + } + qm_fd_addr_set64(fd, addr); + + return 0; +} + +static int skb_to_sg_fd(struct dpaa_priv *priv, + struct sk_buff *skb, struct qm_fd *fd) +{ + const enum dma_data_direction dma_dir = DMA_TO_DEVICE; + const int nr_frags = skb_shinfo(skb)->nr_frags; + struct net_device *net_dev = priv->net_dev; + struct device *dev = net_dev->dev.parent; + struct qm_sg_entry *sgt; + struct sk_buff **skbh; + int i, j, err, sz; + void *buffer_start; + skb_frag_t *frag; + dma_addr_t addr; + size_t frag_len; + void *sgt_buf; + + /* get a page frag to store the SGTable */ + sz = SKB_DATA_ALIGN(priv->tx_headroom + + sizeof(struct qm_sg_entry) * (1 + nr_frags)); + sgt_buf = netdev_alloc_frag(sz); + if (unlikely(!sgt_buf)) { + netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", + sz); + return -ENOMEM; + } + + /* Enable L3/L4 hardware checksum computation. + * + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may + * need to write into the skb. + */ + err = dpaa_enable_tx_csum(priv, skb, fd, + sgt_buf + DPAA_TX_PRIV_DATA_SIZE); + if (unlikely(err < 0)) { + if (net_ratelimit()) + netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", + err); + goto csum_failed; + } + + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); + qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); + sgt[0].bpid = FSL_DPAA_BPID_INV; + sgt[0].offset = 0; + addr = dma_map_single(dev, skb->data, + skb_headlen(skb), dma_dir); + if (unlikely(dma_mapping_error(dev, addr))) { + dev_err(dev, "DMA mapping failed"); + err = -EINVAL; + goto sg0_map_failed; + } + qm_sg_entry_set64(&sgt[0], addr); + + /* populate the rest of SGT entries */ + frag = &skb_shinfo(skb)->frags[0]; + frag_len = frag->size; + for (i = 1; i <= nr_frags; i++, frag++) { + WARN_ON(!skb_frag_page(frag)); + addr = skb_frag_dma_map(dev, frag, 0, + frag_len, dma_dir); + if (unlikely(dma_mapping_error(dev, addr))) { + dev_err(dev, "DMA mapping failed"); + err = -EINVAL; + goto sg_map_failed; + } + + qm_sg_entry_set_len(&sgt[i], frag_len); + sgt[i].bpid = FSL_DPAA_BPID_INV; + sgt[i].offset = 0; + + /* keep the offset in the address */ + qm_sg_entry_set64(&sgt[i], addr); + frag_len = frag->size; + } + qm_sg_entry_set_f(&sgt[i - 1], frag_len); + + qm_fd_set_sg(fd, priv->tx_headroom, skb->len); + + /* DMA map the SGT page */ + buffer_start = (void *)sgt - priv->tx_headroom; + skbh = (struct sk_buff **)buffer_start; + *skbh = skb; + + addr = dma_map_single(dev, buffer_start, priv->tx_headroom + + sizeof(struct qm_sg_entry) * (1 + nr_frags), + dma_dir); + if (unlikely(dma_mapping_error(dev, addr))) { + dev_err(dev, "DMA mapping failed"); + err = -EINVAL; + goto sgt_map_failed; + } + + fd->bpid = FSL_DPAA_BPID_INV; + fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); + qm_fd_addr_set64(fd, addr); + + return 0; + +sgt_map_failed: +sg_map_failed: + for (j = 0; j < i; j++) + dma_unmap_page(dev, qm_sg_addr(&sgt[j]), + qm_sg_entry_get_len(&sgt[j]), dma_dir); +sg0_map_failed: +csum_failed: + skb_free_frag(sgt_buf); + + return err; +} + +static inline int dpaa_xmit(struct dpaa_priv *priv, + struct rtnl_link_stats64 *percpu_stats, + int queue, + struct qm_fd *fd) +{ + struct qman_fq *egress_fq; + int err, i; + + egress_fq = priv->egress_fqs[queue]; + if (fd->bpid == FSL_DPAA_BPID_INV) + fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue])); + + /* Trace this Tx fd */ + trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); + + for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) { + err = qman_enqueue(egress_fq, fd); + if (err != -EBUSY) + break; + } + + if (unlikely(err < 0)) { + percpu_stats->tx_errors++; + percpu_stats->tx_fifo_errors++; + return err; + } + + percpu_stats->tx_packets++; + percpu_stats->tx_bytes += qm_fd_get_length(fd); + + return 0; +} + +static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) +{ + const int queue_mapping = skb_get_queue_mapping(skb); + bool nonlinear = skb_is_nonlinear(skb); + struct rtnl_link_stats64 *percpu_stats; + struct dpaa_percpu_priv *percpu_priv; + struct dpaa_priv *priv; + struct qm_fd fd; + int offset = 0; + int err = 0; + + priv = netdev_priv(net_dev); + percpu_priv = this_cpu_ptr(priv->percpu_priv); + percpu_stats = &percpu_priv->stats; + + qm_fd_clear_fd(&fd); + + if (!nonlinear) { + /* We're going to store the skb backpointer at the beginning + * of the data buffer, so we need a privately owned skb + * + * We've made sure skb is not shared in dev->priv_flags, + * we need to verify the skb head is not cloned + */ + if (skb_cow_head(skb, priv->tx_headroom)) + goto enomem; + + WARN_ON(skb_is_nonlinear(skb)); + } + + /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES; + * make sure we don't feed FMan with more fragments than it supports. + */ + if (nonlinear && + likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) { + /* Just create a S/G fd based on the skb */ + err = skb_to_sg_fd(priv, skb, &fd); + percpu_priv->tx_frag_skbuffs++; + } else { + /* If the egress skb contains more fragments than we support + * we have no choice but to linearize it ourselves. + */ + if (unlikely(nonlinear) && __skb_linearize(skb)) + goto enomem; + + /* Finally, create a contig FD from this skb */ + err = skb_to_contig_fd(priv, skb, &fd, &offset); + } + if (unlikely(err < 0)) + goto skb_to_fd_failed; + + if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) + return NETDEV_TX_OK; + + dpaa_cleanup_tx_fd(priv, &fd); +skb_to_fd_failed: +enomem: + percpu_stats->tx_errors++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} +#endif /* __rtems__ */ +static void dpaa_rx_error(struct net_device *net_dev, + const struct dpaa_priv *priv, + struct dpaa_percpu_priv *percpu_priv, + const struct qm_fd *fd, + u32 fqid) +{ #ifndef __rtems__ + if (net_ratelimit()) + netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", + be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS); + percpu_priv->stats.rx_errors++; #endif /* __rtems__ */ - if (fd->status & FM_FD_ERR_DMA) + if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA) percpu_priv->rx_errors.dme++; - if (fd->status & FM_FD_ERR_PHYSICAL) + if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL) percpu_priv->rx_errors.fpe++; - if (fd->status & FM_FD_ERR_SIZE) + if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE) percpu_priv->rx_errors.fse++; - if (fd->status & FM_FD_ERR_PRS_HDR_ERR) + if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR) percpu_priv->rx_errors.phe++; - dpa_fd_release(net_dev, fd); + dpaa_fd_release(net_dev, fd); } -static void _dpa_tx_error(struct net_device *net_dev, - const struct dpa_priv_s *priv, - struct dpa_percpu_priv_s *percpu_priv, +static void dpaa_tx_error(struct net_device *net_dev, + const struct dpaa_priv *priv, + struct dpaa_percpu_priv *percpu_priv, const struct qm_fd *fd, u32 fqid) { @@ -147,7 +2102,7 @@ static void _dpa_tx_error(struct net_device *net_dev, if (net_ratelimit()) netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", - fd->status & FM_FD_STAT_TX_ERRORS); + be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS); percpu_priv->stats.tx_errors++; #else /* __rtems__ */ @@ -156,64 +2111,57 @@ static void _dpa_tx_error(struct net_device *net_dev, if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); #endif /* __rtems__ */ - /* If we intended the buffers from this frame to go into the bpools - * when the FMan transmit was done, we need to put it in manually. - */ - if (fd->bpid != 0xff) { - dpa_fd_release(net_dev, fd); - return; - } - #ifndef __rtems__ - skb = _dpa_cleanup_tx_fd(priv, fd); + skb = dpaa_cleanup_tx_fd(priv, fd); dev_kfree_skb(skb); #else /* __rtems__ */ - _dpa_cleanup_tx_fd(ifp, fd); + dpaa_cleanup_tx_fd(ifp, fd); #endif /* __rtems__ */ } #ifndef __rtems__ static int dpaa_eth_poll(struct napi_struct *napi, int budget) { - struct dpa_napi_portal *np = - container_of(napi, struct dpa_napi_portal, napi); + struct dpaa_napi_portal *np = + container_of(napi, struct dpaa_napi_portal, napi); int cleaned = qman_p_poll_dqrr(np->p, budget); if (cleaned < budget) { - int tmp; + napi_complete_done(napi, cleaned); + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); - napi_complete(napi); - tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); - DPA_ERR_ON(tmp); + } else if (np->down) { + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); } return cleaned; } #endif /* __rtems__ */ -static void _dpa_tx_conf(struct net_device *net_dev, - const struct dpa_priv_s *priv, - struct dpa_percpu_priv_s *percpu_priv, +static void dpaa_tx_conf(struct net_device *net_dev, + const struct dpaa_priv *priv, + struct dpaa_percpu_priv *percpu_priv, const struct qm_fd *fd, u32 fqid) { #ifndef __rtems__ struct sk_buff *skb; - if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) { + if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) { if (net_ratelimit()) netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", - fd->status & FM_FD_STAT_TX_ERRORS); + be32_to_cpu(fd->status) & + FM_FD_STAT_TX_ERRORS); percpu_priv->stats.tx_errors++; } percpu_priv->tx_confirm++; - skb = _dpa_cleanup_tx_fd(priv, fd); + skb = dpaa_cleanup_tx_fd(priv, fd); - dev_kfree_skb(skb); + consume_skb(skb); #else /* __rtems__ */ struct ifnet *ifp = net_dev->ifp; @@ -221,159 +2169,366 @@ static void _dpa_tx_conf(struct net_device *net_dev, if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } - _dpa_cleanup_tx_fd(ifp, fd); + dpaa_cleanup_tx_fd(ifp, fd); #endif /* __rtems__ */ } -static enum qman_cb_dqrr_result -priv_rx_error_dqrr(struct qman_portal *portal, - struct qman_fq *fq, - const struct qm_dqrr_entry *dq) +static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, + struct qman_portal *portal) { +#ifndef __rtems__ + if (unlikely(in_irq() || !in_serving_softirq())) { + /* Disable QMan IRQ and invoke NAPI */ + qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); + + percpu_priv->np.p = portal; + napi_schedule(&percpu_priv->np.napi); + percpu_priv->in_interrupt++; + return 1; + } +#endif /* __rtems__ */ + return 0; +} + +static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); + struct dpaa_percpu_priv *percpu_priv; struct net_device *net_dev; - struct dpa_priv_s *priv; - struct dpa_percpu_priv_s *percpu_priv; - int *count_ptr; + struct dpaa_bp *dpaa_bp; + struct dpaa_priv *priv; - net_dev = ((struct dpa_fq *)fq)->net_dev; + net_dev = dpaa_fq->net_dev; priv = netdev_priv(net_dev); + dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); + if (!dpaa_bp) + return qman_cb_dqrr_consume; - percpu_priv = raw_cpu_ptr(priv->percpu_priv); - count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count); + percpu_priv = this_cpu_ptr(priv->percpu_priv); if (dpaa_eth_napi_schedule(percpu_priv, portal)) return qman_cb_dqrr_stop; - if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr))) + if (dpaa_eth_refill_bpools(priv)) /* Unable to refill the buffer pool due to insufficient * system memory. Just release the frame back into the pool, * otherwise we'll soon end up with an empty buffer pool. */ - dpa_fd_release(net_dev, &dq->fd); + dpaa_fd_release(net_dev, &dq->fd); else - _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); + dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); return qman_cb_dqrr_consume; } -static enum qman_cb_dqrr_result -priv_rx_default_dqrr(struct qman_portal *portal, - struct qman_fq *fq, - const struct qm_dqrr_entry *dq) +#ifdef __rtems__ +static struct mbuf * +dpaa_bp_addr_to_mbuf(dma_addr_t addr) +{ + void *vaddr = phys_to_virt(addr); + + return (*(struct mbuf **)(vaddr + DPAA_MBUF_POINTER_OFFSET)); +} + +static struct mbuf * +contig_fd_to_mbuf(const struct qm_fd *fd, struct ifnet *ifp) +{ + struct mbuf *m; + ssize_t fd_off = qm_fd_get_offset(fd); + dma_addr_t addr = qm_fd_addr(fd); + + m = dpaa_bp_addr_to_mbuf(addr); + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = m->m_len = qm_fd_get_length(fd); + m->m_data = mtod(m, char *) + fd_off; + + return (m); +} + +static void +dpaa_bp_recycle_frag(struct dpaa_bp *dpaa_bp, dma_addr_t addr, int *count_ptr) +{ + struct bm_buffer bmb; + + bm_buffer_set64(&bmb, addr); + + while (bman_release(dpaa_bp->pool, &bmb, 1)) + cpu_relax(); + + ++(*count_ptr); +} + +static struct mbuf * +sg_fd_to_mbuf(struct dpaa_bp *dpaa_bp, const struct qm_fd *fd, + struct ifnet *ifp, int *count_ptr) +{ + ssize_t fd_off = qm_fd_get_offset(fd); + dma_addr_t addr = qm_fd_addr(fd); + const struct qm_sg_entry *sgt; + int i; + int len; + struct mbuf *m; + struct mbuf *last; + + sgt = (const struct qm_sg_entry *)((char *)phys_to_virt(addr) + fd_off); + len = 0; + + for (i = 0; i < DPAA_SGT_MAX_ENTRIES; ++i) { + dma_addr_t sg_addr; + int sg_len; + struct mbuf *n; + + BSD_ASSERT(!qm_sg_entry_is_ext(&sgt[i])); + BSD_ASSERT(dpaa_bp == dpaa_bpid2pool(sgt[i].bpid)); + + sg_addr = qm_sg_addr(&sgt[i]); + n = dpaa_bp_addr_to_mbuf(sg_addr); + + sg_len = qm_sg_entry_get_len(&sgt[i]); + len += sg_len; + + if (i == 0) { + m = n; + } else { + last->m_next = n; + } + + n->m_len = sg_len; + n->m_data = mtod(n, char *) + sgt[i].offset; + last = n; + + --(*count_ptr); + + if (qm_sg_entry_is_final(&sgt[i])) { + break; + } + } + + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = len; + + dpaa_bp_recycle_frag(dpaa_bp, addr, count_ptr); + + return (m); +} + +static void +dpaa_rx(struct net_device *net_dev, struct qman_portal *portal, + const struct dpaa_priv *priv, struct dpaa_percpu_priv *percpu_priv, + const struct qm_fd *fd, u32 fqid, int *count_ptr) { + struct dpaa_bp *dpaa_bp; + enum qm_fd_format fd_format; + struct mbuf *m; + struct ifnet *ifp; + + ifp = net_dev->ifp; + + if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) { + if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); + dpaa_fd_release(net_dev, fd); + return; + } + + dpaa_bp = dpaa_bpid2pool(fd->bpid); + fd_format = qm_fd_get_format(fd); + + if (likely(fd_format == qm_fd_contig)) { + m = contig_fd_to_mbuf(fd, ifp); + } else { + BSD_ASSERT(fd_format == qm_fd_sg); + m = sg_fd_to_mbuf(dpaa_bp, fd, ifp, count_ptr); + } + + /* Account for either the contig buffer or the SGT buffer (depending on + * which case we were in) having been removed from the pool. + */ + (*count_ptr)--; + + if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); + (*ifp->if_input)(ifp, m); +} +#endif /* __rtems__ */ +static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ +#ifndef __rtems__ + struct rtnl_link_stats64 *percpu_stats; +#endif /* __rtems__ */ + struct dpaa_percpu_priv *percpu_priv; +#ifndef __rtems__ + const struct qm_fd *fd = &dq->fd; + dma_addr_t addr = qm_fd_addr(fd); + enum qm_fd_format fd_format; +#endif /* __rtems__ */ struct net_device *net_dev; - struct dpa_priv_s *priv; - struct dpa_percpu_priv_s *percpu_priv; +#ifndef __rtems__ + u32 fd_status = fd->status; +#endif /* __rtems__ */ + struct dpaa_bp *dpaa_bp; + struct dpaa_priv *priv; +#ifndef __rtems__ + unsigned int skb_len; + struct sk_buff *skb; +#endif /* __rtems__ */ int *count_ptr; - struct dpa_bp *dpa_bp; - net_dev = ((struct dpa_fq *)fq)->net_dev; +#ifndef __rtems__ + fd_status = be32_to_cpu(fd->status); + fd_format = qm_fd_get_format(fd); +#endif /* __rtems__ */ + net_dev = ((struct dpaa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); - dpa_bp = priv->dpa_bp; + dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); + if (!dpaa_bp) + return qman_cb_dqrr_consume; #ifndef __rtems__ /* Trace the Rx fd */ - trace_dpa_rx_fd(net_dev, fq, &dq->fd); + trace_dpaa_rx_fd(net_dev, fq, &dq->fd); #endif /* __rtems__ */ - /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */ - percpu_priv = raw_cpu_ptr(priv->percpu_priv); - count_ptr = raw_cpu_ptr(dpa_bp->percpu_count); + percpu_priv = this_cpu_ptr(priv->percpu_priv); +#ifndef __rtems__ + percpu_stats = &percpu_priv->stats; +#endif /* __rtems__ */ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) return qman_cb_dqrr_stop; - /* Vale of plenty: make sure we didn't run out of buffers */ - - if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr))) + /* Make sure we didn't run out of buffers */ + if (unlikely(dpaa_eth_refill_bpools(priv))) { #ifdef __rtems__ - { struct ifnet *ifp = net_dev->ifp; if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); #endif /* __rtems__ */ - /* Unable to refill the buffer pool due to insufficient - * system memory. Just release the frame back into the pool, - * otherwise we'll soon end up with an empty buffer pool. - */ - dpa_fd_release(net_dev, &dq->fd); -#ifdef __rtems__ + dpaa_fd_release(net_dev, &dq->fd); + return qman_cb_dqrr_consume; } -#endif /* __rtems__ */ + +#ifndef __rtems__ + if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { + if (net_ratelimit()) + netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", + fd_status & FM_FD_STAT_RX_ERRORS); + + percpu_stats->rx_errors++; + dpaa_fd_release(net_dev, fd); + return qman_cb_dqrr_consume; + } + + dpaa_bp = dpaa_bpid2pool(fd->bpid); + if (!dpaa_bp) + return qman_cb_dqrr_consume; + + dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE); + + /* prefetch the first 64 bytes of the frame or the SGT start */ + prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd)); + + fd_format = qm_fd_get_format(fd); + /* The only FD types that we may receive are contig and S/G */ + WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); + + /* Account for either the contig buffer or the SGT buffer (depending on + * which case we were in) having been removed from the pool. + */ + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); + (*count_ptr)--; + + if (likely(fd_format == qm_fd_contig)) + skb = contig_fd_to_skb(priv, fd); + dpa_fd_release(net_dev, &dq->fd); else - _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid, - count_ptr); + skb = sg_fd_to_skb(priv, fd); + if (!skb) + return qman_cb_dqrr_consume; + + skb->protocol = eth_type_trans(skb, net_dev); + + skb_len = skb->len; + + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) + return qman_cb_dqrr_consume; + + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += skb_len; +#else /* __rtems__ */ + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); + dpaa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid, + count_ptr); +#endif /* __rtems__ */ return qman_cb_dqrr_consume; } -static enum qman_cb_dqrr_result -priv_tx_conf_error_dqrr(struct qman_portal *portal, - struct qman_fq *fq, - const struct qm_dqrr_entry *dq) +static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) { + struct dpaa_percpu_priv *percpu_priv; struct net_device *net_dev; - struct dpa_priv_s *priv; - struct dpa_percpu_priv_s *percpu_priv; + struct dpaa_priv *priv; - net_dev = ((struct dpa_fq *)fq)->net_dev; + net_dev = ((struct dpaa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); - percpu_priv = raw_cpu_ptr(priv->percpu_priv); + percpu_priv = this_cpu_ptr(priv->percpu_priv); if (dpaa_eth_napi_schedule(percpu_priv, portal)) return qman_cb_dqrr_stop; - _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); + dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); return qman_cb_dqrr_consume; } -static enum qman_cb_dqrr_result -priv_tx_conf_default_dqrr(struct qman_portal *portal, - struct qman_fq *fq, - const struct qm_dqrr_entry *dq) +static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) { + struct dpaa_percpu_priv *percpu_priv; struct net_device *net_dev; - struct dpa_priv_s *priv; - struct dpa_percpu_priv_s *percpu_priv; + struct dpaa_priv *priv; - net_dev = ((struct dpa_fq *)fq)->net_dev; + net_dev = ((struct dpaa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); #ifndef __rtems__ /* Trace the fd */ - trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd); + trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); #endif /* __rtems__ */ - /* Non-migratable context, safe to use raw_cpu_ptr */ - percpu_priv = raw_cpu_ptr(priv->percpu_priv); + percpu_priv = this_cpu_ptr(priv->percpu_priv); if (dpaa_eth_napi_schedule(percpu_priv, portal)) return qman_cb_dqrr_stop; - _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); + dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); return qman_cb_dqrr_consume; } -static void priv_ern(struct qman_portal *portal, - struct qman_fq *fq, - const struct qm_mr_entry *msg) +static void egress_ern(struct qman_portal *portal, + struct qman_fq *fq, + const union qm_mr_entry *msg) { + const struct qm_fd *fd = &msg->ern.fd; + struct dpaa_percpu_priv *percpu_priv; + const struct dpaa_priv *priv; struct net_device *net_dev; - const struct dpa_priv_s *priv; #ifndef __rtems__ struct sk_buff *skb; #else /* __rtems__ */ struct ifnet *ifp; #endif /* __rtems__ */ - struct dpa_percpu_priv_s *percpu_priv; - const struct qm_fd *fd = &msg->ern.fd; - net_dev = ((struct dpa_fq *)fq)->net_dev; + net_dev = ((struct dpaa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); - /* Non-migratable context, safe to use raw_cpu_ptr */ - percpu_priv = raw_cpu_ptr(priv->percpu_priv); + percpu_priv = this_cpu_ptr(priv->percpu_priv); #ifndef __rtems__ percpu_priv->stats.tx_dropped++; @@ -384,97 +2539,115 @@ static void priv_ern(struct qman_portal *portal, #endif /* __rtems__ */ count_ern(percpu_priv, msg); - /* If we intended this buffer to go into the pool - * when the FM was done, we need to put it in - * manually. - */ - if (msg->ern.fd.bpid != 0xff) { - dpa_fd_release(net_dev, fd); - return; - } - #ifndef __rtems__ - skb = _dpa_cleanup_tx_fd(priv, fd); + skb = dpaa_cleanup_tx_fd(priv, fd); dev_kfree_skb_any(skb); #else /* __rtems__ */ - _dpa_cleanup_tx_fd(ifp, fd); + dpaa_cleanup_tx_fd(ifp, fd); #endif /* __rtems__ */ } -static const struct dpa_fq_cbs_t private_fq_cbs = { - .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } }, - .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } }, - .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } }, - .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } }, - .egress_ern = { .cb = { .ern = priv_ern } } +static const struct dpaa_fq_cbs dpaa_fq_cbs = { + .rx_defq = { .cb = { .dqrr = rx_default_dqrr } }, + .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } }, + .rx_errq = { .cb = { .dqrr = rx_error_dqrr } }, + .tx_errq = { .cb = { .dqrr = conf_error_dqrr } }, + .egress_ern = { .cb = { .ern = egress_ern } } }; -static void dpaa_eth_napi_enable(struct dpa_priv_s *priv) +static void dpaa_eth_napi_enable(struct dpaa_priv *priv) { #ifndef __rtems__ - struct dpa_percpu_priv_s *percpu_priv; - int i, j; + struct dpaa_percpu_priv *percpu_priv; + int i; for_each_possible_cpu(i) { percpu_priv = per_cpu_ptr(priv->percpu_priv, i); - for (j = 0; j < qman_portal_max; j++) - napi_enable(&percpu_priv->np[j].napi); + percpu_priv->np.down = 0; + napi_enable(&percpu_priv->np.napi); } #endif /* __rtems__ */ } -static void dpaa_eth_napi_disable(struct dpa_priv_s *priv) +static void dpaa_eth_napi_disable(struct dpaa_priv *priv) { #ifndef __rtems__ - struct dpa_percpu_priv_s *percpu_priv; - int i, j; + struct dpaa_percpu_priv *percpu_priv; + int i; for_each_possible_cpu(i) { percpu_priv = per_cpu_ptr(priv->percpu_priv, i); - for (j = 0; j < qman_portal_max; j++) - napi_disable(&percpu_priv->np[j].napi); + percpu_priv->np.down = 1; + napi_disable(&percpu_priv->np.napi); } #endif /* __rtems__ */ } #ifndef __rtems__ -static int dpa_eth_priv_start(struct net_device *net_dev) +static int dpaa_open(struct net_device *net_dev) #else /* __rtems__ */ int dpa_eth_priv_start(struct net_device *net_dev) #endif /* __rtems__ */ { - int err; - struct dpa_priv_s *priv; + struct mac_device *mac_dev; + struct dpaa_priv *priv; + int err, i; priv = netdev_priv(net_dev); - + mac_dev = priv->mac_dev; dpaa_eth_napi_enable(priv); - err = dpa_start(net_dev); - if (err < 0) - dpaa_eth_napi_disable(priv); +#ifndef __rtems__ + net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev); + if (!net_dev->phydev) { + netif_err(priv, ifup, net_dev, "init_phy() failed\n"); + err = -ENODEV; + goto phy_init_failed; + } +#endif /* __rtems__ */ + + for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { + err = fman_port_enable(mac_dev->port[i]); + if (err) + goto mac_start_failed; + } + + err = priv->mac_dev->start(mac_dev); + if (err < 0) { + netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); + goto mac_start_failed; + } + +#ifndef __rtems__ + netif_tx_start_all_queues(net_dev); +#endif /* __rtems__ */ + + return 0; + +mac_start_failed: + for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) + fman_port_disable(mac_dev->port[i]); + +#ifndef __rtems__ +phy_init_failed: +#endif /* __rtems__ */ + dpaa_eth_napi_disable(priv); return err; } #ifndef __rtems__ -static int dpa_eth_priv_stop(struct net_device *net_dev) +static int dpaa_eth_stop(struct net_device *net_dev) #else /* __rtems__ */ int dpa_eth_priv_stop(struct net_device *net_dev) #endif /* __rtems__ */ { + struct dpaa_priv *priv; int err; - struct dpa_priv_s *priv; - err = dpa_stop(net_dev); - /* Allow NAPI to consume any frame still in the Rx/TxConfirm - * ingress queues. This is to avoid a race between the current - * context and ksoftirqd which could leave NAPI disabled while - * in fact there's still Rx traffic to be processed. - */ - usleep_range(5000, 10000); + err = dpaa_stop(net_dev); priv = netdev_priv(net_dev); dpaa_eth_napi_disable(priv); @@ -483,131 +2656,89 @@ int dpa_eth_priv_stop(struct net_device *net_dev) } #ifndef __rtems__ -static const struct net_device_ops dpa_private_ops = { - .ndo_open = dpa_eth_priv_start, - .ndo_start_xmit = dpa_tx, - .ndo_stop = dpa_eth_priv_stop, - .ndo_tx_timeout = dpa_timeout, - .ndo_get_stats64 = dpa_get_stats64, - .ndo_set_mac_address = dpa_set_mac_address, +static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) +{ + if (!net_dev->phydev) + return -EINVAL; + return phy_mii_ioctl(net_dev->phydev, rq, cmd); +} + +static const struct net_device_ops dpaa_ops = { + .ndo_open = dpaa_open, + .ndo_start_xmit = dpaa_start_xmit, + .ndo_stop = dpaa_eth_stop, + .ndo_tx_timeout = dpaa_tx_timeout, + .ndo_get_stats64 = dpaa_get_stats64, + .ndo_set_mac_address = dpaa_set_mac_address, .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE - .ndo_select_queue = dpa_select_queue, -#endif - .ndo_change_mtu = dpa_change_mtu, - .ndo_set_rx_mode = dpa_set_rx_mode, - .ndo_init = dpa_ndo_init, - .ndo_set_features = dpa_set_features, - .ndo_fix_features = dpa_fix_features, + .ndo_set_rx_mode = dpaa_set_rx_mode, + .ndo_do_ioctl = dpaa_ioctl, }; -#endif /* __rtems__ */ -static int dpa_private_napi_add(struct net_device *net_dev) +static int dpaa_napi_add(struct net_device *net_dev) { -#ifndef __rtems__ - struct dpa_priv_s *priv = netdev_priv(net_dev); - struct dpa_percpu_priv_s *percpu_priv; - int i, cpu; + struct dpaa_priv *priv = netdev_priv(net_dev); + struct dpaa_percpu_priv *percpu_priv; + int cpu; for_each_possible_cpu(cpu) { percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); - percpu_priv->np = devm_kzalloc(net_dev->dev.parent, - qman_portal_max * sizeof(struct dpa_napi_portal), - GFP_KERNEL); - - if (!percpu_priv->np) - return -ENOMEM; - - for (i = 0; i < qman_portal_max; i++) - netif_napi_add(net_dev, &percpu_priv->np[i].napi, - dpaa_eth_poll, DPA_NAPI_WEIGHT); + netif_napi_add(net_dev, &percpu_priv->np.napi, + dpaa_eth_poll, NAPI_POLL_WEIGHT); } -#endif /* __rtems__ */ return 0; } +#endif /* __rtems__ */ -void dpa_private_napi_del(struct net_device *net_dev) +static void dpaa_napi_del(struct net_device *net_dev) { #ifndef __rtems__ - struct dpa_priv_s *priv = netdev_priv(net_dev); - struct dpa_percpu_priv_s *percpu_priv; - int i, cpu; + struct dpaa_priv *priv = netdev_priv(net_dev); + struct dpaa_percpu_priv *percpu_priv; + int cpu; for_each_possible_cpu(cpu) { percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); - if (percpu_priv->np) { - for (i = 0; i < qman_portal_max; i++) - netif_napi_del(&percpu_priv->np[i].napi); - - devm_kfree(net_dev->dev.parent, percpu_priv->np); - } + netif_napi_del(&percpu_priv->np.napi); } #endif /* __rtems__ */ } -static int dpa_private_netdev_init(struct net_device *net_dev) +static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, + struct bm_buffer *bmb) { - int i; - struct dpa_priv_s *priv = netdev_priv(net_dev); - struct dpa_percpu_priv_s *percpu_priv; -#ifndef __rtems__ - const u8 *mac_addr; -#endif /* __rtems__ */ + dma_addr_t addr = bm_buf_addr(bmb); - /* Although we access another CPU's private data here - * we do it at initialization so it is safe - */ #ifndef __rtems__ - for_each_possible_cpu(i) { -#else /* __rtems__ */ - for (i = 0; i < (int)rtems_get_processor_count(); ++i) { -#endif /* __rtems__ */ - percpu_priv = per_cpu_ptr(priv->percpu_priv, i); - percpu_priv->net_dev = net_dev; - } - -#ifndef __rtems__ - net_dev->netdev_ops = &dpa_private_ops; - mac_addr = priv->mac_dev->addr; + dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE); - net_dev->mem_start = priv->mac_dev->res->start; - net_dev->mem_end = priv->mac_dev->res->end; - - net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_LLTX); - - /* Advertise S/G and HIGHDMA support for private interfaces */ - net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; - /* Recent kernels enable GSO automatically, if - * we declare NETIF_F_SG. For conformity, we'll - * still declare GSO explicitly. - */ - net_dev->features |= NETIF_F_GSO; - - return dpa_netdev_init(net_dev, mac_addr, tx_timeout); + skb_free_frag(phys_to_virt(addr)); #else /* __rtems__ */ - return 0; + BSD_ASSERT(0); + m_freem(dpaa_bp_addr_to_mbuf(addr)); #endif /* __rtems__ */ } -static struct dpa_bp *dpa_priv_bp_probe(struct device *dev) +/* Alloc the dpaa_bp struct and configure default values */ +static struct dpaa_bp *dpaa_bp_alloc(struct device *dev) { - struct dpa_bp *dpa_bp; + struct dpaa_bp *dpaa_bp; - dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL); - if (!dpa_bp) + dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL); + if (!dpaa_bp) return ERR_PTR(-ENOMEM); - dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count); - dpa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; + dpaa_bp->bpid = FSL_DPAA_BPID_INV; + dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); + dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; - dpa_bp->seed_cb = dpa_bp_priv_seed; - dpa_bp->free_buf_cb = _dpa_bp_free_pf; + dpaa_bp->seed_cb = dpaa_bp_seed; + dpaa_bp->free_buf_cb = dpaa_bp_free_pf; - return dpa_bp; + return dpaa_bp; } /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR. @@ -615,7 +2746,7 @@ static struct dpa_bp *dpa_priv_bp_probe(struct device *dev) * this CGR to generate enqueue rejections to FMan in order to drop the frames * before they reach our ingress queues and eat up memory. */ -static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv) +static int dpaa_ingress_cgr_init(struct dpaa_priv *priv) { struct qm_mcc_initcgr initcgr; u32 cs_th; @@ -623,112 +2754,95 @@ static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv) err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); if (err < 0) { - pr_err("Error %d allocating CGR ID\n", err); + if (netif_msg_drv(priv)) + pr_err("Error %d allocating CGR ID\n", err); goto out_error; } /* Enable CS TD, but disable Congestion State Change Notifications. */ - initcgr.we_mask = QM_CGR_WE_CS_THRES; + memset(&initcgr, 0, sizeof(initcgr)); + initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES); initcgr.cgr.cscn_en = QM_CGR_EN; cs_th = DPAA_INGRESS_CS_THRESHOLD; qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); - initcgr.we_mask |= QM_CGR_WE_CSTD_EN; + initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); initcgr.cgr.cstd_en = QM_CGR_EN; - /* This is actually a hack, because this CGR will be associated with - * our affine SWP. However, we'll place our ingress FQs in it. + /* This CGR will be associated with the SWP affined to the current CPU. + * However, we'll place all our ingress FQs in it. */ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, &initcgr); if (err < 0) { - pr_err("Error %d creating ingress CGR with ID %d\n", err, - priv->ingress_cgr.cgrid); + if (netif_msg_drv(priv)) + pr_err("Error %d creating ingress CGR with ID %d\n", + err, priv->ingress_cgr.cgrid); qman_release_cgrid(priv->ingress_cgr.cgrid); goto out_error; } - pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", - priv->ingress_cgr.cgrid, priv->mac_dev->addr); + if (netif_msg_drv(priv)) + pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", + priv->ingress_cgr.cgrid, priv->mac_dev->addr); - /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255 - * range), but we have no common initialization path between the - * different variants of the DPAA Eth driver, so we do it here rather - * than modifying every other variant than "private Eth". - */ priv->use_ingress_cgr = true; out_error: return err; } -static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, - size_t count) -{ - struct dpa_priv_s *priv = netdev_priv(net_dev); - int i; - - netif_dbg(priv, probe, net_dev, - "Using private BM buffer pools\n"); - - priv->bp_count = count; - - for (i = 0; i < count; i++) { - int err; - - err = dpa_bp_alloc(&dpa_bp[i]); - if (err < 0) { - dpa_bp_free(priv); - priv->dpa_bp = NULL; - return err; - } +#ifndef __rtems__ +static const struct of_device_id dpaa_match[]; +#endif /* __rtems__ */ - priv->dpa_bp = &dpa_bp[i]; - } +static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) +{ + u16 headroom; + + /* The frame headroom must accommodate: + * - the driver private data area + * - parse results, hash results, timestamp if selected + * If either hash results or time stamp are selected, both will + * be copied to/from the frame headroom, as TS is located between PR and + * HR in the IC and IC copy size has a granularity of 16bytes + * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) + * + * Also make sure the headroom is a multiple of data_align bytes + */ + headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); - dpa_priv_common_bpid = priv->dpa_bp->bpid; - return 0; + return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, + DPAA_FD_DATA_ALIGNMENT) : + headroom; } #ifndef __rtems__ -static const struct of_device_id dpa_match[]; - -static int -dpaa_eth_priv_probe(struct platform_device *pdev) +static int dpaa_eth_probe(struct platform_device *pdev) #else /* __rtems__ */ int dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev) #endif /* __rtems__ */ { - int err = 0, i, channel; - struct device *dev; - struct dpa_bp *dpa_bp; - struct dpa_fq *dpa_fq, *tmp; - size_t count = 1; + struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL}; + struct dpaa_percpu_priv *percpu_priv; struct net_device *net_dev = NULL; - struct dpa_priv_s *priv = NULL; - struct dpa_percpu_priv_s *percpu_priv; + struct dpaa_fq *dpaa_fq, *tmp; + struct dpaa_priv *priv = NULL; struct fm_port_fqs port_fqs; - struct dpa_buffer_layout_s *buf_layout = NULL; #ifndef __rtems__ struct mac_device *mac_dev; - struct task_struct *kth; #endif /* __rtems__ */ + int err = 0, i, channel; + struct device *dev; dev = &pdev->dev; - /* Get the buffer pool assigned to this interface; - * run only once the default pool probing code - */ - dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? : - dpa_priv_bp_probe(dev); - if (IS_ERR(dpa_bp)) - return PTR_ERR(dpa_bp); - #ifndef __rtems__ /* Allocate this early, so we can store relevant information in * the private area */ - net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES); + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); if (!net_dev) { dev_err(dev, "alloc_etherdev_mq() failed\n"); goto alloc_etherdev_mq_failed; @@ -738,12 +2852,6 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev) net_dev->priv = malloc(sizeof(*priv), M_KMALLOC, M_WAITOK | M_ZERO); #endif /* __rtems__ */ -#ifdef CONFIG_FSL_DPAA_ETH_FRIENDLY_IF_NAME - snprintf(net_dev->name, IFNAMSIZ, "fm%d-mac%d", - dpa_mac_fman_index_get(pdev), - dpa_mac_hw_index_get(pdev)); -#endif - /* Do this here, so we can be verbose early */ #ifndef __rtems__ SET_NETDEV_DEV(net_dev, dev); @@ -754,77 +2862,90 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev) priv->net_dev = net_dev; #ifndef __rtems__ - priv->msg_enable = netif_msg_init(debug, -1); + priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); - mac_dev = dpa_mac_dev_get(pdev); - if (IS_ERR(mac_dev) || !mac_dev) { + mac_dev = dpaa_mac_dev_get(pdev); + if (IS_ERR(mac_dev)) { + dev_err(dev, "dpaa_mac_dev_get() failed\n"); err = PTR_ERR(mac_dev); goto mac_probe_failed; } -#endif /* __rtems__ */ - /* We have physical ports, so we need to establish - * the buffer layout. + /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, + * we choose conservatively and let the user explicitly set a higher + * MTU via ifconfig. Otherwise, the user may end up with different MTUs + * in the same LAN. + * If on the other hand fsl_fm_max_frm has been chosen below 1500, + * start with the maximum allowed. */ - buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout), - GFP_KERNEL); - if (!buf_layout) - goto alloc_failed; + net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); - dpa_set_buffers_layout(mac_dev, buf_layout); + netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", + net_dev->mtu); +#endif /* __rtems__ */ - /* For private ports, need to compute the size of the default - * buffer pool, based on FMan port buffer layout;also update - * the maximum buffer size for private ports if necessary - */ - dpa_bp->size = dpa_bp_size(&buf_layout[RX]); + priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ + priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ - INIT_LIST_HEAD(&priv->dpa_fq_list); +#ifndef __rtems__ + /* device used for DMA mapping */ + arch_setup_dma_ops(dev, 0, 0, NULL, false); + err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (err) { + dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); + goto dev_mask_failed; + } +#endif /* __rtems__ */ - memset(&port_fqs, 0, sizeof(port_fqs)); + /* bp init */ + for (i = 0; i < DPAA_BPS_NUM; i++) { + int err; - err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX); - if (!err) - err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, - &port_fqs, true, TX); + dpaa_bps[i] = dpaa_bp_alloc(dev); + if (IS_ERR(dpaa_bps[i])) + return PTR_ERR(dpaa_bps[i]); + /* the raw size of the buffers used for reception */ + dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); + /* avoid runtime computations by keeping the usable size here */ + dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size); + dpaa_bps[i]->dev = dev; - if (err < 0) - goto fq_probe_failed; + err = dpaa_bp_alloc_pool(dpaa_bps[i]); + if (err < 0) { + dpaa_bps_free(priv); + priv->dpaa_bps[i] = NULL; + goto bp_create_failed; + } + priv->dpaa_bps[i] = dpaa_bps[i]; + } - /* bp init */ + INIT_LIST_HEAD(&priv->dpaa_fq_list); - err = dpa_priv_bp_create(net_dev, dpa_bp, count); + memset(&port_fqs, 0, sizeof(port_fqs)); - if (err < 0) - goto bp_create_failed; + err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); + if (err < 0) { + dev_err(dev, "dpaa_alloc_all_fqs() failed\n"); + goto fq_probe_failed; + } priv->mac_dev = mac_dev; - channel = dpa_get_channel(); - + channel = dpaa_get_channel(); if (channel < 0) { + dev_err(dev, "dpaa_get_channel() failed\n"); err = channel; goto get_channel_failed; } priv->channel = (u16)channel; -#ifndef __rtems__ - /* Start a thread that will walk the cpus with affine portals + /* Start a thread that will walk the CPUs with affine portals * and add this pool channel to each's dequeue mask. */ - kth = kthread_run(dpaa_eth_add_channel, - (void *)(unsigned long)priv->channel, - "dpaa_%p:%d", net_dev, priv->channel); - if (!kth) { - err = -ENOMEM; - goto add_channel_failed; - } -#else /* __rtems__ */ - dpaa_eth_add_channel((void *)(unsigned long)priv->channel); -#endif /* __rtems__ */ + dpaa_eth_add_channel(priv->channel); - dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port[TX]); + dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); /* Create a congestion group for this netdev, with * dynamically-allocated CGR ID. @@ -836,29 +2957,28 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev) dev_err(dev, "Error initializing CGR\n"); goto tx_cgr_init_failed; } - err = dpaa_eth_priv_ingress_cgr_init(priv); + + err = dpaa_ingress_cgr_init(priv); if (err < 0) { dev_err(dev, "Error initializing ingress CGR\n"); goto rx_cgr_init_failed; } /* Add the FQs to the interface, and make them active */ - list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) { - err = dpa_fq_init(dpa_fq, false); + list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { + err = dpaa_fq_init(dpaa_fq, false); if (err < 0) goto fq_alloc_failed; } - priv->buf_layout = buf_layout; - priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]); - priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]); + priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); + priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); /* All real interfaces need their ports initialized */ - dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs, - buf_layout, dev); + dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, + &priv->buf_layout[0], dev); priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); - if (!priv->percpu_priv) { dev_err(dev, "devm_alloc_percpu() failed\n"); err = -ENOMEM; @@ -873,31 +2993,32 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev) memset(percpu_priv, 0, sizeof(*percpu_priv)); } +#ifndef __rtems__ /* Initialize NAPI */ - err = dpa_private_napi_add(net_dev); - + err = dpaa_napi_add(net_dev); if (err < 0) goto napi_add_failed; - err = dpa_private_netdev_init(net_dev); - + err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); if (err < 0) goto netdev_init_failed; -#ifndef __rtems__ dpaa_eth_sysfs_init(&net_dev->dev); - pr_info("Probed interface %s\n", net_dev->name); + netif_info(priv, probe, net_dev, "Probed interface %s\n", + net_dev->name); #endif /* __rtems__ */ return 0; +#ifndef __rtems__ netdev_init_failed: napi_add_failed: - dpa_private_napi_del(net_dev); +#endif /* __rtems__ */ + dpaa_napi_del(net_dev); alloc_percpu_failed: #ifndef __rtems__ - dpa_fq_free(dev, &priv->dpa_fq_list); + dpaa_fq_free(dev, &priv->dpaa_fq_list); #endif /* __rtems__ */ fq_alloc_failed: #ifndef __rtems__ @@ -910,79 +3031,111 @@ rx_cgr_init_failed: qman_release_cgrid(priv->cgr_data.cgr.cgrid); #endif /* __rtems__ */ tx_cgr_init_failed: -#ifndef __rtems__ -add_channel_failed: -#endif /* __rtems__ */ get_channel_failed: - dpa_bp_free(priv); + dpaa_bps_free(priv); bp_create_failed: fq_probe_failed: -alloc_failed: #ifndef __rtems__ +dev_mask_failed: mac_probe_failed: #endif /* __rtems__ */ dev_set_drvdata(dev, NULL); #ifndef __rtems__ free_netdev(net_dev); alloc_etherdev_mq_failed: - if (atomic_read(&dpa_bp->refs) == 0) - devm_kfree(dev, dpa_bp); + for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) { + if (atomic_read(&dpaa_bps[i]->refs) == 0) + devm_kfree(dev, dpaa_bps[i]); + } #else /* __rtems__ */ BSD_ASSERT(0); #endif /* __rtems__ */ + return err; +} + +#ifndef __rtems__ +static int dpaa_remove(struct platform_device *pdev) +{ + struct net_device *net_dev; + struct dpaa_priv *priv; + struct device *dev; + int err; + + dev = &pdev->dev; + net_dev = dev_get_drvdata(dev); + + priv = netdev_priv(net_dev); + + dpaa_eth_sysfs_remove(dev); + + dev_set_drvdata(dev, NULL); + unregister_netdev(net_dev); + + err = dpaa_fq_free(dev, &priv->dpaa_fq_list); + + qman_delete_cgr_safe(&priv->ingress_cgr); + qman_release_cgrid(priv->ingress_cgr.cgrid); + qman_delete_cgr_safe(&priv->cgr_data.cgr); + qman_release_cgrid(priv->cgr_data.cgr.cgrid); + + dpaa_napi_del(net_dev); + + dpaa_bps_free(priv); + + free_netdev(net_dev); return err; } +#endif /* __rtems__ */ #ifndef __rtems__ -static struct platform_device_id dpa_devtype[] = { +static struct platform_device_id dpaa_devtype[] = { { .name = "dpaa-ethernet", .driver_data = 0, }, { } }; -MODULE_DEVICE_TABLE(platform, dpa_devtype); +MODULE_DEVICE_TABLE(platform, dpaa_devtype); -static struct platform_driver dpa_driver = { +static struct platform_driver dpaa_driver = { .driver = { .name = KBUILD_MODNAME, }, - .id_table = dpa_devtype, - .probe = dpaa_eth_priv_probe, - .remove = dpa_remove + .id_table = dpaa_devtype, + .probe = dpaa_eth_probe, + .remove = dpaa_remove }; -static int __init dpa_load(void) +static int __init dpaa_load(void) { int err; - pr_info(DPA_DESCRIPTION "\n"); + pr_debug("FSL DPAA Ethernet driver\n"); - /* initialise dpaa_eth mirror values */ - dpa_rx_extra_headroom = fman_get_rx_extra_headroom(); - dpa_max_frm = fman_get_max_frm(); + /* initialize dpaa_eth mirror values */ + dpaa_rx_extra_headroom = fman_get_rx_extra_headroom(); + dpaa_max_frm = fman_get_max_frm(); - err = platform_driver_register(&dpa_driver); + err = platform_driver_register(&dpaa_driver); if (err < 0) pr_err("Error, platform_driver_register() = %d\n", err); return err; } -module_init(dpa_load); +module_init(dpaa_load); -static void __exit dpa_unload(void) +static void __exit dpaa_unload(void) { - platform_driver_unregister(&dpa_driver); + platform_driver_unregister(&dpaa_driver); - /* Only one channel is used and needs to be relased after all + /* Only one channel is used and needs to be released after all * interfaces are removed */ - dpa_release_channel(); + dpaa_release_channel(); } -module_exit(dpa_unload); +module_exit(dpaa_unload); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>"); -MODULE_DESCRIPTION(DPA_DESCRIPTION); +MODULE_DESCRIPTION("FSL DPAA Ethernet driver"); #endif /* __rtems__ */ diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index 11b11e65..8a2b1189 100644 --- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -1,4 +1,4 @@ -/* Copyright 2008 - 2015 Freescale Semiconductor Inc. +/* Copyright 2008 - 2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -28,131 +28,36 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __DPA_H -#define __DPA_H +#ifndef __DPAA_H +#define __DPAA_H #include <linux/netdevice.h> #include <soc/fsl/qman.h> +#include <soc/fsl/bman.h> #include "fman.h" #include "mac.h" #include "dpaa_eth_trace.h" -#ifndef __rtems__ -extern int dpa_rx_extra_headroom; -extern int dpa_max_frm; - -#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom -#define dpa_get_max_frm() dpa_max_frm -#else /* __rtems__ */ -#define dpa_get_rx_extra_headroom fman_get_rx_extra_headroom -#define dpa_get_max_frm fman_get_max_frm -#endif /* __rtems__ */ - -#define dpa_get_max_mtu() \ - (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN)) - -/* Simple enum of FQ types - used for array indexing */ -enum port_type {RX, TX}; - -struct dpa_buffer_layout_s { - u16 priv_data_size; - bool parse_results; - bool time_stamp; - bool hash_results; - u16 data_align; -}; - -#define DPA_ERR_ON(cond) - -#define DPA_TX_PRIV_DATA_SIZE 16 -#define DPA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) -#define DPA_TIME_STAMP_SIZE 8 -#define DPA_HASH_RESULTS_SIZE 8 -#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \ - dpa_get_rx_extra_headroom()) - -#define FM_FD_STAT_RX_ERRORS \ - (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \ - FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \ - FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \ - FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \ - FM_FD_ERR_PRS_HDR_ERR) +#define DPAA_ETH_TXQ_NUM NR_CPUS -#define FM_FD_STAT_TX_ERRORS \ - (FM_FD_ERR_UNSUPPORTED_FORMAT | \ - FM_FD_ERR_LENGTH | FM_FD_ERR_DMA) - -/* The raw buffer size must be cacheline aligned. - * Normally we use 2K buffers. - */ -#define DPA_BP_RAW_SIZE 2048 - -/* This is what FMan is ever allowed to use. - * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is - * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, - * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us - * half-page-aligned buffers (can we?), so we reserve some more space - * for start-of-buffer alignment. - */ #ifndef __rtems__ -#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \ - SMP_CACHE_BYTES) +#define DPAA_BPS_NUM 3 /* number of bpools per interface */ #else /* __rtems__ */ -/* - * FIXME: 4 bytes would be enough for the mbuf pointer. However, jumbo receive - * frames overwrite this area if < 64 bytes. - */ -#define DPA_OUT_OF_BAND_SIZE 64 -#define DPA_MBUF_POINTER_OFFSET (DPA_BP_RAW_SIZE - DPA_OUT_OF_BAND_SIZE) -#define dpa_bp_size(buffer_layout) DPA_MBUF_POINTER_OFFSET +#define DPAA_BPS_NUM 1 #endif /* __rtems__ */ -/* We must ensure that skb_shinfo is always cacheline-aligned. */ -#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1)) - -/* Largest value that the FQD's OAL field can hold. - * This is DPAA-1.x specific. - */ -#define FSL_QMAN_MAX_OAL 127 - -/* Default alignment for start of data in an Rx FD */ -#define DPA_FD_DATA_ALIGNMENT 16 - -/* Values for the L3R field of the FM Parse Results - */ -/* L3 Type field: First IP Present IPv4 */ -#define FM_L3_PARSE_RESULT_IPV4 0x8000 -/* L3 Type field: First IP Present IPv6 */ -#define FM_L3_PARSE_RESULT_IPV6 0x4000 - -/* Values for the L4R field of the FM Parse Results - * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual. - */ -/* L4 Type field: UDP */ -#define FM_L4_PARSE_RESULT_UDP 0x40 -/* L4 Type field: TCP */ -#define FM_L4_PARSE_RESULT_TCP 0x20 - -/* number of Tx queues to FMan */ -#define DPAA_ETH_TX_QUEUES NR_CPUS - -#define DPAA_ETH_RX_QUEUES 128 - -#define FSL_DPAA_ETH_MAX_BUF_COUNT 128 -#define FSL_DPAA_ETH_REFILL_THRESHOLD 80 /* More detailed FQ types - used for fine-grained WQ assignments */ -enum dpa_fq_type { +enum dpaa_fq_type { FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */ FQ_TYPE_RX_ERROR, /* Rx Error FQs */ - FQ_TYPE_RX_PCD, /* User-defined PCDs */ FQ_TYPE_TX, /* "Real" Tx FQs */ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */ }; -struct dpa_fq { +struct dpaa_fq { struct qman_fq fq_base; struct list_head list; struct net_device *net_dev; @@ -161,10 +66,10 @@ struct dpa_fq { u32 flags; u16 channel; u8 wq; - enum dpa_fq_type fq_type; + enum dpaa_fq_type fq_type; }; -struct dpa_fq_cbs_t { +struct dpaa_fq_cbs { struct qman_fq rx_defq; struct qman_fq tx_defq; struct qman_fq rx_errq; @@ -172,45 +77,30 @@ struct dpa_fq_cbs_t { struct qman_fq egress_ern; }; -struct fqid_cell { - u32 start; - u32 count; -}; - -struct dpa_bp { - struct bman_pool *pool; - u8 bpid; -#ifndef __rtems__ +struct dpaa_bp { + /* device used in the DMA mapping operations */ struct device *dev; -#endif /* __rtems__ */ - /* the buffer pools used for the private ports are initialized - * with config_count buffers for each CPU; at runtime the - * number of buffers per CPU is constantly brought back to this - * level - */ - int config_count; + /* current number of buffers in the buffer pool alloted to each CPU */ + int __percpu *percpu_count; + /* all buffers allocated for this pool have this raw size */ + size_t raw_size; + /* all buffers in this pool have this same usable size */ size_t size; - bool seed_pool; - /* physical address of the contiguous memory used by the pool to store - * the buffers - */ - dma_addr_t paddr; - /* virtual address of the contiguous memory used by the pool to store - * the buffers + /* the buffer pools are initialized with config_count buffers for each + * CPU; at runtime the number of buffers per CPU is constantly brought + * back to this level */ - void __iomem *vaddr; - /* current number of buffers in the bpool alloted to this CPU */ - int __percpu *percpu_count; + u16 config_count; + u8 bpid; + struct bman_pool *pool; + /* bpool can be seeded before use by this cb */ + int (*seed_cb)(struct dpaa_bp *); + /* bpool can be emptied before freeing by this cb */ + void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *); atomic_t refs; - /* some bpools need to be seeded before use by this cb */ - int (*seed_cb)(struct dpa_bp *); - /* some bpools need to be emptied before freeing; this cb is used - * for freeing of individual buffers taken from the pool - */ - void (*free_buf_cb)(void *addr); }; -struct dpa_rx_errors { +struct dpaa_rx_errors { u64 dme; /* DMA Error */ u64 fpe; /* Frame Physical Error */ u64 fse; /* Frame Size Error */ @@ -218,7 +108,7 @@ struct dpa_rx_errors { }; /* Counters for QMan ERN frames - one counter per rejection code */ -struct dpa_ern_cnt { +struct dpaa_ern_cnt { u64 cg_tdrop; /* Congestion group taildrop */ u64 wred; /* WRED congestion */ u64 err_cond; /* Error condition */ @@ -229,16 +119,17 @@ struct dpa_ern_cnt { u64 orp_zero; /* ORP disabled */ }; -struct dpa_napi_portal { +struct dpaa_napi_portal { #ifndef __rtems__ struct napi_struct napi; #endif /* __rtems__ */ struct qman_portal *p; + bool down; }; -struct dpa_percpu_priv_s { +struct dpaa_percpu_priv { struct net_device *net_dev; - struct dpa_napi_portal *np; + struct dpaa_napi_portal np; u64 in_interrupt; u64 tx_confirm; /* fragmented (non-linear) skbuffs received from the stack */ @@ -246,26 +137,28 @@ struct dpa_percpu_priv_s { #ifndef __rtems__ struct rtnl_link_stats64 stats; #endif /* __rtems__ */ - struct dpa_rx_errors rx_errors; - struct dpa_ern_cnt ern_cnt; + struct dpaa_rx_errors rx_errors; + struct dpaa_ern_cnt ern_cnt; }; -struct dpa_priv_s { - struct dpa_percpu_priv_s __percpu *percpu_priv; - struct dpa_bp *dpa_bp; +struct dpaa_buffer_layout { + u16 priv_data_size; +}; + +struct dpaa_priv { + struct dpaa_percpu_priv __percpu *percpu_priv; + struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM]; /* Store here the needed Tx headroom for convenience and speed * (even though it can be computed based on the fields of buf_layout) */ u16 tx_headroom; struct net_device *net_dev; struct mac_device *mac_dev; - struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES]; - struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES]; - - size_t bp_count; + struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM]; + struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM]; - u16 channel; /* "fsl,qman-channel-id" */ - struct list_head dpa_fq_list; + u16 channel; + struct list_head dpaa_fq_list; #ifndef __rtems__ u32 msg_enable; /* net_device message level */ @@ -289,231 +182,28 @@ struct dpa_priv_s { bool use_ingress_cgr; struct qman_cgr ingress_cgr; - struct dpa_buffer_layout_s *buf_layout; + struct dpaa_buffer_layout buf_layout[2]; u16 rx_headroom; }; -struct fm_port_fqs { - struct dpa_fq *tx_defq; - struct dpa_fq *tx_errq; - struct dpa_fq *rx_defq; - struct dpa_fq *rx_errq; -}; +/* from dpaa_ethtool.c */ +extern const struct ethtool_ops dpaa_ethtool_ops; -int dpa_bp_priv_seed(struct dpa_bp *dpa_bp); -int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr); -void _dpa_rx(struct net_device *net_dev, - struct qman_portal *portal, - const struct dpa_priv_s *priv, - struct dpa_percpu_priv_s *percpu_priv, - const struct qm_fd *fd, - u32 fqid, - int *count_ptr); -#ifndef __rtems__ -int dpa_tx(struct sk_buff *skb, struct net_device *net_dev); -struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, - const struct qm_fd *fd); - -/* Turn on HW checksum computation for this outgoing frame. - * If the current protocol is not something we support in this regard - * (or if the stack has already computed the SW checksum), we do nothing. - * - * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value - * otherwise. - * - * Note that this function may modify the fd->cmd field and the skb data buffer - * (the Parse Results area). - */ -int dpa_enable_tx_csum(struct dpa_priv_s *priv, struct sk_buff *skb, - struct qm_fd *fd, char *parse_results); -#else /* __rtems__ */ -void _dpa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd); -#endif /* __rtems__ */ - -static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv, - struct qman_portal *portal) -{ -#ifndef __rtems__ - /* In case of threaded ISR for RT enable kernel, - * in_irq() does not return appropriate value, so use - * in_serving_softirq to distinguish softirq or irq context. - */ - if (unlikely(in_irq() || !in_serving_softirq())) { - /* Disable QMan IRQ and invoke NAPI */ - int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); - - if (likely(!ret)) { - const struct qman_portal_config *pc = - qman_p_get_portal_config(portal); - struct dpa_napi_portal *np = - &percpu_priv->np[pc->channel]; - - np->p = portal; - napi_schedule(&np->napi); - percpu_priv->in_interrupt++; - return 1; - } - } -#else /* __rtems__ */ - /* FIXME */ -#endif /* __rtems__ */ - return 0; -} - -static inline ssize_t __const dpa_fd_length(const struct qm_fd *fd) -{ - return fd->length20; -} - -static inline ssize_t __const dpa_fd_offset(const struct qm_fd *fd) -{ - return fd->offset; -} - -#ifndef __rtems__ -/* Verifies if the skb length is below the interface MTU */ -static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu) -{ - if (unlikely(skb->len > mtu)) - if ((skb->protocol != htons(ETH_P_8021Q)) || - (skb->len > mtu + 4)) - return -1; - - return 0; -} -#endif /* __rtems__ */ - -static inline u16 dpa_get_headroom(struct dpa_buffer_layout_s *bl) -{ - u16 headroom; - /* The frame headroom must accommodate: - * - the driver private data area - * - parse results, hash results, timestamp if selected - * If either hash results or time stamp are selected, both will - * be copied to/from the frame headroom, as TS is located between PR and - * HR in the IC and IC copy size has a granularity of 16bytes - * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) - * - * Also make sure the headroom is a multiple of data_align bytes - */ - headroom = (u16)(bl->priv_data_size + - (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) + - (bl->hash_results || bl->time_stamp ? - DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0)); - - return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom; -} - -#ifndef __rtems__ +/* from dpaa_eth_sysfs.c */ void dpaa_eth_sysfs_remove(struct device *dev); void dpaa_eth_sysfs_init(struct device *dev); - -void dpa_private_napi_del(struct net_device *net_dev); +#ifdef __rtems__ +#include <sys/mbuf.h> + +#define DPAA_ENQUEUE_RETRIES 100000 +#define DPAA_SGT_MAX_ENTRIES 16 +#define DPAA_TX_PRIV_DATA_SIZE 16 +#define FM_L3_PARSE_RESULT_IPV4 0x8000 +#define FM_L3_PARSE_RESULT_IPV6 0x4000 +#define FM_L4_PARSE_RESULT_UDP 0x40 +#define FM_L4_PARSE_RESULT_TCP 0x20 +#define FSL_DPAA_BPID_INV 0xff + +void dpaa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd); #endif /* __rtems__ */ - -static inline void clear_fd(struct qm_fd *fd) -{ - fd->opaque_addr = 0; - fd->opaque = 0; - fd->cmd = 0; -} - -static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv, - struct qman_fq *tx_fq) -{ - int i; - - for (i = 0; i < DPAA_ETH_TX_QUEUES; i++) - if (priv->egress_fqs[i] == tx_fq) - return i; - - return -EINVAL; -} - -#ifndef __rtems__ -static inline int dpa_xmit(struct dpa_priv_s *priv, - struct rtnl_link_stats64 *percpu_stats, - int queue, - struct qm_fd *fd) -{ - int err, i; - struct qman_fq *egress_fq; - - egress_fq = priv->egress_fqs[queue]; - if (fd->bpid == 0xff) - fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]); - - /* Trace this Tx fd */ - trace_dpa_tx_fd(priv->net_dev, egress_fq, fd); - - for (i = 0; i < 100000; i++) { - err = qman_enqueue(egress_fq, fd, 0); - if (err != -EBUSY) - break; - } - - if (unlikely(err < 0)) { - percpu_stats->tx_errors++; - percpu_stats->tx_fifo_errors++; - return err; - } - - percpu_stats->tx_packets++; - percpu_stats->tx_bytes += dpa_fd_length(fd); - - return 0; -} -#endif /* __rtems__ */ - -/* Use multiple WQs for FQ assignment: - * - Tx Confirmation queues go to WQ1. - * - Rx Default and Tx queues go to WQ3 (no differentiation between - * Rx and Tx traffic). - * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance - * to be scheduled, in case there are many more FQs in WQ3). - * This ensures that Tx-confirmed buffers are timely released. In particular, - * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they - * are greatly outnumbered by other FQs in the system, while - * dequeue scheduling is round-robin. - */ -static inline void _dpa_assign_wq(struct dpa_fq *fq) -{ - switch (fq->fq_type) { - case FQ_TYPE_TX_CONFIRM: - case FQ_TYPE_TX_CONF_MQ: - fq->wq = 1; - break; - case FQ_TYPE_RX_DEFAULT: - case FQ_TYPE_TX: - fq->wq = 3; - break; - case FQ_TYPE_RX_ERROR: - case FQ_TYPE_TX_ERROR: - fq->wq = 2; - break; - default: - WARN(1, "Invalid FQ type %d for FQID %d!\n", - fq->fq_type, fq->fqid); - } -} - -#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -/* Use in lieu of skb_get_queue_mapping() */ -#define dpa_get_queue_mapping(skb) \ - raw_smp_processor_id() -#else -/* Use the queue selected by XPS */ -#define dpa_get_queue_mapping(skb) \ - skb_get_queue_mapping(skb) -#endif - -static inline void _dpa_bp_free_pf(void *addr) -{ -#ifndef __rtems__ - put_page(virt_to_head_page(addr)); -#else /* __rtems__ */ - BSD_ASSERT(0); -#endif /* __rtems__ */ -} - -#endif /* __DPA_H */ +#endif /* __DPAA_H */ diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c deleted file mode 100644 index 9a4a2184..00000000 --- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c +++ /dev/null @@ -1,1491 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/of_platform.h> -#include <linux/of_net.h> -#include <linux/etherdevice.h> -#include <linux/kthread.h> -#include <linux/percpu.h> -#ifndef __rtems__ -#include <linux/highmem.h> -#include <linux/sort.h> -#endif /* __rtems__ */ -#include <soc/fsl/qman.h> -#ifndef __rtems__ -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <linux/if_vlan.h> -#endif /* __rtems__ */ -#include "dpaa_eth.h" -#include "dpaa_eth_common.h" -#include "mac.h" - -/* Size in bytes of the FQ taildrop threshold */ -#define DPA_FQ_TD 0x200000 - -#define DPAA_CS_THRESHOLD_1G 0x06000000 -/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 - * The size in bytes of the egress Congestion State notification threshold on - * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a - * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), - * and the larger the frame size, the more acute the problem. - * So we have to find a balance between these factors: - * - avoiding the device staying congested for a prolonged time (risking - * the netdev watchdog to fire - see also the tx_timeout module param); - * - affecting performance of protocols such as TCP, which otherwise - * behave well under the congestion notification mechanism; - * - preventing the Tx cores from tightly-looping (as if the congestion - * threshold was too low to be effective); - * - running out of memory if the CS threshold is set too high. - */ - -#define DPAA_CS_THRESHOLD_10G 0x10000000 -/* The size in bytes of the egress Congestion State notification threshold on - * 10G ports, range 0x1000 .. 0x10000000 - */ - -static struct dpa_bp *dpa_bp_array[64]; - -#ifndef __rtems__ -int dpa_max_frm; - -int dpa_rx_extra_headroom; -#endif /* __rtems__ */ - -static const struct fqid_cell tx_confirm_fqids[] = { - {0, DPAA_ETH_TX_QUEUES} -}; - -static const struct fqid_cell default_fqids[][3] = { - [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} }, - [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} } -}; - -#ifndef __rtems__ -int dpa_netdev_init(struct net_device *net_dev, - const u8 *mac_addr, - u16 tx_timeout) -{ - int err; - struct dpa_priv_s *priv = netdev_priv(net_dev); - struct device *dev = net_dev->dev.parent; - - net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - /* we do not want shared skbs on TX */ - net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; - - net_dev->features |= net_dev->hw_features; - net_dev->vlan_features = net_dev->features; - - memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); - - net_dev->ethtool_ops = &dpa_ethtool_ops; - - net_dev->needed_headroom = priv->tx_headroom; - net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); - - /* start without the RUNNING flag, phylib controls it later */ - netif_carrier_off(net_dev); - - err = register_netdev(net_dev); - if (err < 0) { - dev_err(dev, "register_netdev() = %d\n", err); - return err; - } - - return 0; -} -#endif /* __rtems__ */ - -int dpa_start(struct net_device *net_dev) -{ - int err, i; - struct dpa_priv_s *priv; - struct mac_device *mac_dev; - - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - -#ifndef __rtems__ - err = mac_dev->init_phy(net_dev, priv->mac_dev); - if (err < 0) { - netif_err(priv, ifup, net_dev, "init_phy() = %d\n", err); - return err; - } -#endif /* __rtems__ */ - - for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { - err = fman_port_enable(mac_dev->port[i]); - if (err) - goto mac_start_failed; - } - - err = priv->mac_dev->start(mac_dev); - if (err < 0) { - netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); - goto mac_start_failed; - } - -#ifndef __rtems__ - netif_tx_start_all_queues(net_dev); -#endif /* __rtems__ */ - - return 0; - -mac_start_failed: - for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) - fman_port_disable(mac_dev->port[i]); - - return err; -} - -int dpa_stop(struct net_device *net_dev) -{ - int i, err, error; - struct dpa_priv_s *priv; - struct mac_device *mac_dev; - - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - -#ifndef __rtems__ - netif_tx_stop_all_queues(net_dev); -#endif /* __rtems__ */ - /* Allow the Fman (Tx) port to process in-flight frames before we - * try switching it off. - */ - usleep_range(5000, 10000); - - err = mac_dev->stop(mac_dev); - if (err < 0) - netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", - err); - - for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { - error = fman_port_disable(mac_dev->port[i]); - if (error) - err = error; - } - -#ifndef __rtems__ - if (mac_dev->phy_dev) - phy_disconnect(mac_dev->phy_dev); - mac_dev->phy_dev = NULL; -#endif /* __rtems__ */ - - return err; -} - -#ifndef __rtems__ -void dpa_timeout(struct net_device *net_dev) -{ - const struct dpa_priv_s *priv; - struct dpa_percpu_priv_s *percpu_priv; - - priv = netdev_priv(net_dev); - percpu_priv = raw_cpu_ptr(priv->percpu_priv); - - netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", - jiffies_to_msecs(jiffies - net_dev->trans_start)); - - percpu_priv->stats.tx_errors++; -} - -/* Calculates the statistics for the given device by adding the statistics - * collected by each CPU. - */ -struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev, - struct rtnl_link_stats64 *stats) -{ - struct dpa_priv_s *priv = netdev_priv(net_dev); - u64 *cpustats; - u64 *netstats = (u64 *)stats; - int i, j; - struct dpa_percpu_priv_s *percpu_priv; - int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); - - for_each_possible_cpu(i) { - percpu_priv = per_cpu_ptr(priv->percpu_priv, i); - - cpustats = (u64 *)&percpu_priv->stats; - - for (j = 0; j < numstats; j++) - netstats[j] += cpustats[j]; - } - - return stats; -} -#endif /* __rtems__ */ - -int dpa_change_mtu(struct net_device *net_dev, int new_mtu) -{ - const int max_mtu = dpa_get_max_mtu(); - - /* Make sure we don't exceed the Ethernet controller's MAXFRM */ - if (new_mtu < 68 || new_mtu > max_mtu) { - netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n", - new_mtu, 68, max_mtu); - return -EINVAL; - } -#ifndef __rtems__ - net_dev->mtu = new_mtu; -#endif /* __rtems__ */ - - return 0; -} - -#ifndef __rtems__ -/* .ndo_init callback */ -int dpa_ndo_init(struct net_device *net_dev) -{ - /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, - * we choose conservatively and let the user explicitly set a higher - * MTU via ifconfig. Otherwise, the user may end up with different MTUs - * in the same LAN. - * If on the other hand fsl_fm_max_frm has been chosen below 1500, - * start with the maximum allowed. - */ - int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN); - - netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", - init_mtu); - net_dev->mtu = init_mtu; - - return 0; -} - -int dpa_set_features(struct net_device *dev, netdev_features_t features) -{ - /* Not much to do here for now */ - dev->features = features; - return 0; -} - -netdev_features_t dpa_fix_features(struct net_device *dev, - netdev_features_t features) -{ - netdev_features_t unsupported_features = 0; - - /* In theory we should never be requested to enable features that - * we didn't set in netdev->features and netdev->hw_features at probe - * time, but double check just to be on the safe side. - * We don't support enabling Rx csum through ethtool yet - */ - unsupported_features |= NETIF_F_RXCSUM; - - features &= ~unsupported_features; - - return features; -} - -int dpa_remove(struct platform_device *pdev) -{ - int err; - struct device *dev; - struct net_device *net_dev; - struct dpa_priv_s *priv; - - dev = &pdev->dev; - net_dev = dev_get_drvdata(dev); - - priv = netdev_priv(net_dev); - - dpaa_eth_sysfs_remove(dev); - - dev_set_drvdata(dev, NULL); - unregister_netdev(net_dev); - - err = dpa_fq_free(dev, &priv->dpa_fq_list); - - qman_delete_cgr_safe(&priv->ingress_cgr); - qman_release_cgrid(priv->ingress_cgr.cgrid); - qman_delete_cgr_safe(&priv->cgr_data.cgr); - qman_release_cgrid(priv->cgr_data.cgr.cgrid); - - dpa_private_napi_del(net_dev); - - dpa_bp_free(priv); - - if (priv->buf_layout) - devm_kfree(dev, priv->buf_layout); - - free_netdev(net_dev); - - return err; -} - -struct mac_device *dpa_mac_dev_get(struct platform_device *pdev) -{ - struct device *dpa_dev, *dev; - struct device_node *mac_node; - struct platform_device *of_dev; - struct mac_device *mac_dev; - struct dpaa_eth_data *eth_data; - - dpa_dev = &pdev->dev; - eth_data = dpa_dev->platform_data; - if (!eth_data) - return ERR_PTR(-ENODEV); - - mac_node = eth_data->mac_node; - - of_dev = of_find_device_by_node(mac_node); - if (!of_dev) { - dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n", - mac_node->full_name); - of_node_put(mac_node); - return ERR_PTR(-EINVAL); - } - of_node_put(mac_node); - - dev = &of_dev->dev; - - mac_dev = dev_get_drvdata(dev); - if (!mac_dev) { - dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n", - dev_name(dev)); - return ERR_PTR(-EINVAL); - } - - return mac_dev; -} - -int dpa_mac_hw_index_get(struct platform_device *pdev) -{ - struct device *dpa_dev; - struct dpaa_eth_data *eth_data; - - dpa_dev = &pdev->dev; - eth_data = dpa_dev->platform_data; - - return eth_data->mac_hw_id; -} - -int dpa_mac_fman_index_get(struct platform_device *pdev) -{ - struct device *dpa_dev; - struct dpaa_eth_data *eth_data; - - dpa_dev = &pdev->dev; - eth_data = dpa_dev->platform_data; - - return eth_data->fman_hw_id; -} - -int dpa_set_mac_address(struct net_device *net_dev, void *addr) -{ - const struct dpa_priv_s *priv; - int err; - struct mac_device *mac_dev; - - priv = netdev_priv(net_dev); - - err = eth_mac_addr(net_dev, addr); - if (err < 0) { - netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); - return err; - } - - mac_dev = priv->mac_dev; - - err = mac_dev->change_addr(mac_dev->fman_mac, - (enet_addr_t *)net_dev->dev_addr); - if (err < 0) { - netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", - err); - return err; - } - - return 0; -} - -void dpa_set_rx_mode(struct net_device *net_dev) -{ - int err; - const struct dpa_priv_s *priv; - - priv = netdev_priv(net_dev); - - if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { - priv->mac_dev->promisc = !priv->mac_dev->promisc; - err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, - priv->mac_dev->promisc); - if (err < 0) - netif_err(priv, drv, net_dev, - "mac_dev->set_promisc() = %d\n", - err); - } - - err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); - if (err < 0) - netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", - err); -} -#endif /* __rtems__ */ - -void dpa_set_buffers_layout(struct mac_device *mac_dev, - struct dpa_buffer_layout_s *layout) -{ - /* Rx */ - layout[RX].priv_data_size = (u16)DPA_RX_PRIV_DATA_SIZE; - layout[RX].parse_results = true; - layout[RX].hash_results = true; - layout[RX].data_align = DPA_FD_DATA_ALIGNMENT; - - /* Tx */ - layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE; - layout[TX].parse_results = true; - layout[TX].hash_results = true; - layout[TX].data_align = DPA_FD_DATA_ALIGNMENT; -} - -int dpa_bp_alloc(struct dpa_bp *dpa_bp) -{ - int err; - struct bman_pool_params bp_params; -#ifndef __rtems__ - struct platform_device *pdev; -#endif /* __rtems__ */ - - if (dpa_bp->size == 0 || dpa_bp->config_count == 0) { - pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers"); - return -EINVAL; - } - - memset(&bp_params, 0, sizeof(struct bman_pool_params)); - - /* If the pool is already specified, we only create one per bpid */ - if (dpa_bpid2pool_use(dpa_bp->bpid)) - return 0; - - if (dpa_bp->bpid == 0) - bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID; - else - bp_params.bpid = dpa_bp->bpid; - - dpa_bp->pool = bman_new_pool(&bp_params); - if (!dpa_bp->pool) { - pr_err("bman_new_pool() failed\n"); - return -ENODEV; - } - - dpa_bp->bpid = (u8)bman_get_params(dpa_bp->pool)->bpid; - -#ifndef __rtems__ - pdev = platform_device_register_simple("DPAA_bpool", - dpa_bp->bpid, NULL, 0); - if (IS_ERR(pdev)) { - err = PTR_ERR(pdev); - goto pdev_register_failed; - } - - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); - if (err) - goto pdev_mask_failed; - - dpa_bp->dev = &pdev->dev; -#endif /* __rtems__ */ - - if (dpa_bp->seed_cb) { - err = dpa_bp->seed_cb(dpa_bp); - if (err) - goto pool_seed_failed; - } - - dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp); - - return 0; - -pool_seed_failed: -#ifndef __rtems__ -pdev_mask_failed: - platform_device_unregister(pdev); -pdev_register_failed: -#endif /* __rtems__ */ - bman_free_pool(dpa_bp->pool); - - return err; -} - -void dpa_bp_drain(struct dpa_bp *bp) -{ - int ret; - u8 num = 8; - - do { - struct bm_buffer bmb[8]; - int i; - - ret = bman_acquire(bp->pool, bmb, num, 0); - if (ret < 0) { - if (num == 8) { - /* we have less than 8 buffers left; - * drain them one by one - */ - num = 1; - ret = 1; - continue; - } else { - /* Pool is fully drained */ - break; - } - } - - for (i = 0; i < num; i++) { - dma_addr_t addr = bm_buf_addr(&bmb[i]); - -#ifndef __rtems__ - dma_unmap_single(bp->dev, addr, bp->size, - DMA_BIDIRECTIONAL); -#endif /* __rtems__ */ - - bp->free_buf_cb(phys_to_virt(addr)); - } - } while (ret > 0); -} - -static void _dpa_bp_free(struct dpa_bp *dpa_bp) -{ - struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid); - - /* the mapping between bpid and dpa_bp is done very late in the - * allocation procedure; if something failed before the mapping, the bp - * was not configured, therefore we don't need the below instructions - */ - if (!bp) - return; - - if (!atomic_dec_and_test(&bp->refs)) - return; - - if (bp->free_buf_cb) - dpa_bp_drain(bp); - - dpa_bp_array[bp->bpid] = NULL; - bman_free_pool(bp->pool); - -#ifndef __rtems__ - if (bp->dev) - platform_device_unregister(to_platform_device(bp->dev)); -#endif /* __rtems__ */ -} - -void dpa_bp_free(struct dpa_priv_s *priv) -{ - int i; - - for (i = 0; i < priv->bp_count; i++) - _dpa_bp_free(&priv->dpa_bp[i]); -} - -struct dpa_bp *dpa_bpid2pool(int bpid) -{ - return dpa_bp_array[bpid]; -} - -void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp) -{ - dpa_bp_array[bpid] = dpa_bp; - atomic_set(&dpa_bp->refs, 1); -} - -bool dpa_bpid2pool_use(int bpid) -{ - if (dpa_bpid2pool(bpid)) { - atomic_inc(&dpa_bp_array[bpid]->refs); - return true; - } - - return false; -} - -#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) -{ - return dpa_get_queue_mapping(skb); -} -#endif - -struct dpa_fq *dpa_fq_alloc(struct device *dev, - const struct fqid_cell *fqids, - struct list_head *list, - enum dpa_fq_type fq_type) -{ - int i; - struct dpa_fq *dpa_fq; - - dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids->count, GFP_KERNEL); - if (!dpa_fq) - return NULL; - - for (i = 0; i < fqids->count; i++) { - dpa_fq[i].fq_type = fq_type; - dpa_fq[i].fqid = fqids->start ? fqids->start + i : 0; - list_add_tail(&dpa_fq[i].list, list); - } - - for (i = 0; i < fqids->count; i++) - _dpa_assign_wq(dpa_fq + i); - - return dpa_fq; -} - -int dpa_fq_probe_mac(struct device *dev, struct list_head *list, - struct fm_port_fqs *port_fqs, - bool alloc_tx_conf_fqs, - enum port_type ptype) -{ - const struct fqid_cell *fqids; - struct dpa_fq *dpa_fq; - int num_ranges; - int i; - - if (ptype == TX && alloc_tx_conf_fqs) { - if (!dpa_fq_alloc(dev, tx_confirm_fqids, list, - FQ_TYPE_TX_CONF_MQ)) - goto fq_alloc_failed; - } - - fqids = default_fqids[ptype]; - num_ranges = 3; - - for (i = 0; i < num_ranges; i++) { - switch (i) { - case 0: - /* The first queue is the error queue */ - if (fqids[i].count != 1) - goto invalid_error_queue; - - dpa_fq = dpa_fq_alloc(dev, &fqids[i], list, - ptype == RX ? - FQ_TYPE_RX_ERROR : - FQ_TYPE_TX_ERROR); - if (!dpa_fq) - goto fq_alloc_failed; - - if (ptype == RX) - port_fqs->rx_errq = &dpa_fq[0]; - else - port_fqs->tx_errq = &dpa_fq[0]; - break; - case 1: - /* the second queue is the default queue */ - if (fqids[i].count != 1) - goto invalid_default_queue; - - dpa_fq = dpa_fq_alloc(dev, &fqids[i], list, - ptype == RX ? - FQ_TYPE_RX_DEFAULT : - FQ_TYPE_TX_CONFIRM); - if (!dpa_fq) - goto fq_alloc_failed; - - if (ptype == RX) - port_fqs->rx_defq = &dpa_fq[0]; - else - port_fqs->tx_defq = &dpa_fq[0]; - break; - default: - /* all subsequent queues are Tx */ - if (!dpa_fq_alloc(dev, &fqids[i], list, FQ_TYPE_TX)) - goto fq_alloc_failed; - break; - } - } - - return 0; - -fq_alloc_failed: - dev_err(dev, "dpa_fq_alloc() failed\n"); - return -ENOMEM; - -invalid_default_queue: -invalid_error_queue: - dev_err(dev, "Too many default or error queues\n"); - return -EINVAL; -} - -static u32 rx_pool_channel; -static DEFINE_SPINLOCK(rx_pool_channel_init); - -int dpa_get_channel(void) -{ - spin_lock(&rx_pool_channel_init); - if (!rx_pool_channel) { - u32 pool; - int ret = qman_alloc_pool(&pool); - - if (!ret) - rx_pool_channel = pool; - } - spin_unlock(&rx_pool_channel_init); - if (!rx_pool_channel) - return -ENOMEM; - return rx_pool_channel; -} - -void dpa_release_channel(void) -{ - qman_release_pool(rx_pool_channel); -} - -int dpaa_eth_add_channel(void *__arg) -{ -#ifndef __rtems__ - const cpumask_t *cpus = qman_affine_cpus(); -#endif /* __rtems__ */ - u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg); - int cpu; - struct qman_portal *portal; - -#ifndef __rtems__ - for_each_cpu(cpu, cpus) { -#else /* __rtems__ */ - for (cpu = 0; cpu < (int)rtems_get_processor_count(); ++cpu) { -#endif /* __rtems__ */ - - portal = (struct qman_portal *)qman_get_affine_portal(cpu); - qman_p_static_dequeue_add(portal, pool); - } - return 0; -} - -/* Congestion group state change notification callback. - * Stops the device's egress queues while they are congested and - * wakes them upon exiting congested state. - * Also updates some CGR-related stats. - */ -static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, - int congested) -{ - struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr, - struct dpa_priv_s, cgr_data.cgr); - - if (congested) { - priv->cgr_data.congestion_start_jiffies = jiffies; -#ifndef __rtems__ - netif_tx_stop_all_queues(priv->net_dev); -#else /* __rtems__ */ - BSD_ASSERT(0); -#endif /* __rtems__ */ - priv->cgr_data.cgr_congested_count++; - } else { - priv->cgr_data.congested_jiffies += - (jiffies - priv->cgr_data.congestion_start_jiffies); -#ifndef __rtems__ - netif_tx_wake_all_queues(priv->net_dev); -#else /* __rtems__ */ - BSD_ASSERT(0); -#endif /* __rtems__ */ - } -} - -int dpaa_eth_cgr_init(struct dpa_priv_s *priv) -{ - struct qm_mcc_initcgr initcgr; - u32 cs_th; - int err; - - err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); - if (err < 0) { - pr_err("Error %d allocating CGR ID\n", err); - goto out_error; - } - priv->cgr_data.cgr.cb = dpaa_eth_cgscn; - - /* Enable Congestion State Change Notifications and CS taildrop */ - initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES; - initcgr.cgr.cscn_en = QM_CGR_EN; - - /* Set different thresholds based on the MAC speed. - * This may turn suboptimal if the MAC is reconfigured at a speed - * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. - * In such cases, we ought to reconfigure the threshold, too. - */ -#ifndef __rtems__ - if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) - cs_th = DPAA_CS_THRESHOLD_10G; - else - cs_th = DPAA_CS_THRESHOLD_1G; -#else /* __rtems__ */ - /* FIXME */ - cs_th = DPAA_CS_THRESHOLD_1G; -#endif /* __rtems__ */ - qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); - - initcgr.we_mask |= QM_CGR_WE_CSTD_EN; - initcgr.cgr.cstd_en = QM_CGR_EN; - - err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, - &initcgr); - if (err < 0) { - pr_err("Error %d creating CGR with ID %d\n", err, - priv->cgr_data.cgr.cgrid); - qman_release_cgrid(priv->cgr_data.cgr.cgrid); - goto out_error; - } - pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", - priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, - priv->cgr_data.cgr.chan); - -out_error: - return err; -} - -static inline void dpa_setup_ingress(const struct dpa_priv_s *priv, - struct dpa_fq *fq, - const struct qman_fq *template) -{ - fq->fq_base = *template; - fq->net_dev = priv->net_dev; - - fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; - fq->channel = priv->channel; -} - -static inline void dpa_setup_egress(const struct dpa_priv_s *priv, - struct dpa_fq *fq, - struct fman_port *port, - const struct qman_fq *template) -{ - fq->fq_base = *template; - fq->net_dev = priv->net_dev; - - if (port) { - fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; - fq->channel = (u16)fman_port_get_qman_channel_id(port); - } else { - fq->flags = QMAN_FQ_FLAG_NO_MODIFY; - } -} - -void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs, - struct fman_port *tx_port) -{ - struct dpa_fq *fq; -#ifndef __rtems__ - u16 portals[NR_CPUS]; - int cpu, num_portals = 0; - const cpumask_t *affine_cpus = qman_affine_cpus(); -#endif /* __rtems__ */ - int egress_cnt = 0, conf_cnt = 0; - -#ifndef __rtems__ - for_each_cpu(cpu, affine_cpus) - portals[num_portals++] = qman_affine_channel(cpu); - if (num_portals == 0) - dev_err(priv->net_dev->dev.parent, - "No Qman software (affine) channels found"); -#else /* __rtems__ */ - /* FIXME */ -#endif /* __rtems__ */ - - /* Initialize each FQ in the list */ - list_for_each_entry(fq, &priv->dpa_fq_list, list) { - switch (fq->fq_type) { - case FQ_TYPE_RX_DEFAULT: - DPA_ERR_ON(!priv->mac_dev); - dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq); - break; - case FQ_TYPE_RX_ERROR: - DPA_ERR_ON(!priv->mac_dev); - dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq); - break; - case FQ_TYPE_TX: - dpa_setup_egress(priv, fq, tx_port, - &fq_cbs->egress_ern); - /* If we have more Tx queues than the number of cores, - * just ignore the extra ones. - */ - if (egress_cnt < DPAA_ETH_TX_QUEUES) - priv->egress_fqs[egress_cnt++] = &fq->fq_base; - break; - case FQ_TYPE_TX_CONFIRM: - DPA_ERR_ON(!priv->mac_dev); - dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq); - break; - case FQ_TYPE_TX_CONF_MQ: - DPA_ERR_ON(!priv->mac_dev); - dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq); - priv->conf_fqs[conf_cnt++] = &fq->fq_base; - break; - case FQ_TYPE_TX_ERROR: - DPA_ERR_ON(!priv->mac_dev); - dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq); - break; - default: -#ifndef __rtems__ - dev_warn(priv->net_dev->dev.parent, - "Unknown FQ type detected!\n"); -#else /* __rtems__ */ - BSD_ASSERT(0); -#endif /* __rtems__ */ - break; - } - } - - /* The number of Tx queues may be smaller than the number of cores, if - * the Tx queue range is specified in the device tree instead of being - * dynamically allocated. - * Make sure all CPUs receive a corresponding Tx queue. - */ - while (egress_cnt < DPAA_ETH_TX_QUEUES) { - list_for_each_entry(fq, &priv->dpa_fq_list, list) { - if (fq->fq_type != FQ_TYPE_TX) - continue; - priv->egress_fqs[egress_cnt++] = &fq->fq_base; - if (egress_cnt == DPAA_ETH_TX_QUEUES) - break; - } - } -} - -int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable) -{ - int err; - const struct dpa_priv_s *priv; -#ifndef __rtems__ - struct device *dev; -#endif /* __rtems__ */ - struct qman_fq *fq; - struct qm_mcc_initfq initfq; - struct qman_fq *confq = NULL; - int queue_id; - - priv = netdev_priv(dpa_fq->net_dev); -#ifndef __rtems__ - dev = dpa_fq->net_dev->dev.parent; -#endif /* __rtems__ */ - - if (dpa_fq->fqid == 0) - dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; - - dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); - - err = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base); - if (err) { -#ifndef __rtems__ - dev_err(dev, "qman_create_fq() failed\n"); -#else /* __rtems__ */ - BSD_ASSERT(0); -#endif /* __rtems__ */ - return err; - } - fq = &dpa_fq->fq_base; - - if (dpa_fq->init) { - memset(&initfq, 0, sizeof(initfq)); - - initfq.we_mask = QM_INITFQ_WE_FQCTRL; - /* Note: we may get to keep an empty FQ in cache */ - initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; - - /* Try to reduce the number of portal interrupts for - * Tx Confirmation FQs. - */ - if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM) - initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; - - /* FQ placement */ - initfq.we_mask |= QM_INITFQ_WE_DESTWQ; - - initfq.fqd.dest.channel = dpa_fq->channel; - initfq.fqd.dest.wq = dpa_fq->wq; - - /* Put all egress queues in a congestion group of their own. - * Sensu stricto, the Tx confirmation queues are Rx FQs, - * rather than Tx - but they nonetheless account for the - * memory footprint on behalf of egress traffic. We therefore - * place them in the netdev's CGR, along with the Tx FQs. - */ - if (dpa_fq->fq_type == FQ_TYPE_TX || - dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM || - dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { - initfq.we_mask |= QM_INITFQ_WE_CGID; - initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; - initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; - /* Set a fixed overhead accounting, in an attempt to - * reduce the impact of fixed-size skb shells and the - * driver's needed headroom on system memory. This is - * especially the case when the egress traffic is - * composed of small datagrams. - * Unfortunately, QMan's OAL value is capped to an - * insufficient value, but even that is better than - * no overhead accounting at all. - */ - initfq.we_mask |= QM_INITFQ_WE_OAC; - initfq.fqd.oac_init.oac = QM_OAC_CG; -#ifndef __rtems__ - initfq.fqd.oac_init.oal = - (signed char)(min(sizeof(struct sk_buff) + - priv->tx_headroom, - (size_t)FSL_QMAN_MAX_OAL)); -#else /* __rtems__ */ - /* FIXME */ - initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL; -#endif /* __rtems__ */ - } - - if (td_enable) { - initfq.we_mask |= QM_INITFQ_WE_TDTHRESH; - qm_fqd_taildrop_set(&initfq.fqd.td, - DPA_FQ_TD, 1); - initfq.fqd.fq_ctrl = QM_FQCTRL_TDE; - } - - /* Configure the Tx confirmation queue, now that we know - * which Tx queue it pairs with. - */ - if (dpa_fq->fq_type == FQ_TYPE_TX) { - queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base); - if (queue_id >= 0) - confq = priv->conf_fqs[queue_id]; - if (confq) { - initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; - /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) - * A2V=1 (contextA A2 field is valid) - * A0V=1 (contextA A0 field is valid) - * B0V=1 (contextB field is valid) - * ContextA A2: EBD=1 (deallocate buffers inside FMan) - * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) - */ - initfq.fqd.context_a.hi = 0x1e000000; - initfq.fqd.context_a.lo = 0x80000000; - } - } - - /* Put all *private* ingress queues in our "ingress CGR". */ - if (priv->use_ingress_cgr && - (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT || - dpa_fq->fq_type == FQ_TYPE_RX_ERROR)) { - initfq.we_mask |= QM_INITFQ_WE_CGID; - initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; - initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; - /* Set a fixed overhead accounting, just like for the - * egress CGR. - */ - initfq.we_mask |= QM_INITFQ_WE_OAC; - initfq.fqd.oac_init.oac = QM_OAC_CG; -#ifndef __rtems__ - initfq.fqd.oac_init.oal = - (signed char)(min(sizeof(struct sk_buff) + - priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL)); -#else /* __rtems__ */ - /* FIXME */ - initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL; -#endif /* __rtems__ */ - } - - /* Initialization common to all ingress queues */ - if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { - initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; - initfq.fqd.fq_ctrl |= - QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; - initfq.fqd.context_a.stashing.exclusive = - QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | - QM_STASHING_EXCL_ANNOTATION; - initfq.fqd.context_a.stashing.data_cl = 2; - initfq.fqd.context_a.stashing.annotation_cl = 1; - initfq.fqd.context_a.stashing.context_cl = - DIV_ROUND_UP(sizeof(struct qman_fq), 64); - } - - err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); - if (err < 0) { -#ifndef __rtems__ - dev_err(dev, "qman_init_fq(%u) = %d\n", - qman_fq_fqid(fq), err); -#endif /* __rtems__ */ - qman_destroy_fq(fq, 0); - return err; - } - } - - dpa_fq->fqid = qman_fq_fqid(fq); - - return 0; -} - -#ifndef __rtems__ -static int _dpa_fq_free(struct device *dev, struct qman_fq *fq) -{ - int err, error; - struct dpa_fq *dpa_fq; - const struct dpa_priv_s *priv; - - err = 0; - - dpa_fq = container_of(fq, struct dpa_fq, fq_base); - priv = netdev_priv(dpa_fq->net_dev); - - if (dpa_fq->init) { - err = qman_retire_fq(fq, NULL); - if (err < 0 && netif_msg_drv(priv)) - dev_err(dev, "qman_retire_fq(%u) = %d\n", - qman_fq_fqid(fq), err); - - error = qman_oos_fq(fq); - if (error < 0 && netif_msg_drv(priv)) { - dev_err(dev, "qman_oos_fq(%u) = %d\n", - qman_fq_fqid(fq), error); - if (err >= 0) - err = error; - } - } - - qman_destroy_fq(fq, 0); - list_del(&dpa_fq->list); - - return err; -} - -int dpa_fq_free(struct device *dev, struct list_head *list) -{ - int err, error; - struct dpa_fq *dpa_fq, *tmp; - - err = 0; - list_for_each_entry_safe(dpa_fq, tmp, list, list) { - error = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq); - if (error < 0 && err >= 0) - err = error; - } - - return err; -} -#endif /* __rtems__ */ - -static void -dpaa_eth_init_tx_port(struct fman_port *port, struct dpa_fq *errq, - struct dpa_fq *defq, - struct dpa_buffer_layout_s *buf_layout) -{ - struct fman_port_params params; - struct fman_buffer_prefix_content buf_prefix_content; - int err; - - memset(¶ms, 0, sizeof(params)); - memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); - - buf_prefix_content.priv_data_size = buf_layout->priv_data_size; - buf_prefix_content.pass_prs_result = buf_layout->parse_results; - buf_prefix_content.pass_hash_result = buf_layout->hash_results; - buf_prefix_content.pass_time_stamp = buf_layout->time_stamp; - buf_prefix_content.data_align = buf_layout->data_align; - - params.specific_params.non_rx_params.err_fqid = errq->fqid; - params.specific_params.non_rx_params.dflt_fqid = defq->fqid; - - err = fman_port_config(port, ¶ms); - if (err) - pr_info("fman_port_config failed\n"); - - err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); - if (err) - pr_info("fman_port_cfg_buf_prefix_content failed\n"); - - err = fman_port_init(port); - if (err) - pr_err("fm_port_init failed\n"); -} - -static void -dpaa_eth_init_rx_port(struct fman_port *port, struct dpa_bp *bp, - size_t count, struct dpa_fq *errq, struct dpa_fq *defq, - struct dpa_buffer_layout_s *buf_layout) -{ - struct fman_port_params params; - struct fman_buffer_prefix_content buf_prefix_content; - struct fman_port_rx_params *rx_p; - int i, err; - - memset(¶ms, 0, sizeof(params)); - memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); - - buf_prefix_content.priv_data_size = buf_layout->priv_data_size; - buf_prefix_content.pass_prs_result = buf_layout->parse_results; - buf_prefix_content.pass_hash_result = buf_layout->hash_results; - buf_prefix_content.pass_time_stamp = buf_layout->time_stamp; - buf_prefix_content.data_align = buf_layout->data_align; - - rx_p = ¶ms.specific_params.rx_params; - rx_p->err_fqid = errq->fqid; - rx_p->dflt_fqid = defq->fqid; - - count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); - rx_p->ext_buf_pools.num_of_pools_used = (u8)count; - for (i = 0; i < count; i++) { - rx_p->ext_buf_pools.ext_buf_pool[i].id = bp[i].bpid; - rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bp[i].size; - } - - err = fman_port_config(port, ¶ms); - if (err) - pr_info("fman_port_config failed\n"); - - err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); - if (err) - pr_info("fman_port_cfg_buf_prefix_content failed\n"); - - err = fman_port_init(port); - if (err) - pr_err("fm_port_init failed\n"); -} - -void dpaa_eth_init_ports(struct mac_device *mac_dev, - struct dpa_bp *bp, size_t count, - struct fm_port_fqs *port_fqs, - struct dpa_buffer_layout_s *buf_layout, - struct device *dev) -{ - struct fman_port *rxport = mac_dev->port[RX]; - struct fman_port *txport = mac_dev->port[TX]; - - dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, - port_fqs->tx_defq, &buf_layout[TX]); - dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq, - port_fqs->rx_defq, &buf_layout[RX]); -} - -void dpa_release_sgt(struct qm_sg_entry *sgt) -{ - struct dpa_bp *dpa_bp; - struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX]; - u8 i = 0, j; - - memset(bmb, 0, sizeof(bmb)); - - do { - dpa_bp = dpa_bpid2pool(sgt[i].bpid); - DPA_ERR_ON(!dpa_bp); - - j = 0; - do { - DPA_ERR_ON(sgt[i].extension); - - bmb[j].hi = sgt[i].addr_hi; - bmb[j].lo = be32_to_cpu(sgt[i].addr_lo); - - j++; i++; - } while (j < ARRAY_SIZE(bmb) && - !sgt[i - 1].final && - sgt[i - 1].bpid == sgt[i].bpid); - - while (bman_release(dpa_bp->pool, bmb, j, 0)) - cpu_relax(); - } while (!sgt[i - 1].final); -} - -void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd) -{ - struct qm_sg_entry *sgt; - struct dpa_bp *dpa_bp; - struct bm_buffer bmb; - dma_addr_t addr; - void *vaddr; - - memset(&bmb, 0, sizeof(bmb)); - bm_buffer_set64(&bmb, fd->addr); - - dpa_bp = dpa_bpid2pool(fd->bpid); - DPA_ERR_ON(!dpa_bp); - - if (fd->format == qm_fd_sg) { - vaddr = phys_to_virt(fd->addr); - sgt = vaddr + dpa_fd_offset(fd); - -#ifndef __rtems__ - dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size, - DMA_BIDIRECTIONAL); -#endif /* __rtems__ */ - - dpa_release_sgt(sgt); - -#ifndef __rtems__ - addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(dpa_bp->dev, addr)) { - dev_err(dpa_bp->dev, "DMA mapping failed"); - return; - } -#else /* __rtems__ */ - addr = (dma_addr_t)vaddr; -#endif /* __rtems__ */ - bm_buffer_set64(&bmb, addr); - } - - while (bman_release(dpa_bp->pool, &bmb, 1, 0)) - cpu_relax(); -} - -void count_ern(struct dpa_percpu_priv_s *percpu_priv, - const struct qm_mr_entry *msg) -{ - switch (msg->ern.rc & QM_MR_RC_MASK) { - case QM_MR_RC_CGR_TAILDROP: - percpu_priv->ern_cnt.cg_tdrop++; - break; - case QM_MR_RC_WRED: - percpu_priv->ern_cnt.wred++; - break; - case QM_MR_RC_ERROR: - percpu_priv->ern_cnt.err_cond++; - break; - case QM_MR_RC_ORPWINDOW_EARLY: - percpu_priv->ern_cnt.early_window++; - break; - case QM_MR_RC_ORPWINDOW_LATE: - percpu_priv->ern_cnt.late_window++; - break; - case QM_MR_RC_FQ_TAILDROP: - percpu_priv->ern_cnt.fq_tdrop++; - break; - case QM_MR_RC_ORPWINDOW_RETIRED: - percpu_priv->ern_cnt.fq_retired++; - break; - case QM_MR_RC_ORP_ZERO: - percpu_priv->ern_cnt.orp_zero++; - break; - } -} - -#ifndef __rtems__ -/* Turn on HW checksum computation for this outgoing frame. - * If the current protocol is not something we support in this regard - * (or if the stack has already computed the SW checksum), we do nothing. - * - * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value - * otherwise. - * - * Note that this function may modify the fd->cmd field and the skb data buffer - * (the Parse Results area). - */ -int dpa_enable_tx_csum(struct dpa_priv_s *priv, - struct sk_buff *skb, - struct qm_fd *fd, - char *parse_results) -{ - struct fman_prs_result *parse_result; - struct iphdr *iph; - struct ipv6hdr *ipv6h = NULL; - u8 l4_proto; - u16 ethertype = ntohs(skb->protocol); - int retval = 0; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - /* Note: L3 csum seems to be already computed in sw, but we can't choose - * L4 alone from the FM configuration anyway. - */ - - /* Fill in some fields of the Parse Results array, so the FMan - * can find them as if they came from the FMan Parser. - */ - parse_result = (struct fman_prs_result *)parse_results; - - /* If we're dealing with VLAN, get the real Ethernet type */ - if (ethertype == ETH_P_8021Q) { - /* We can't always assume the MAC header is set correctly - * by the stack, so reset to beginning of skb->data - */ - skb_reset_mac_header(skb); - ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); - } - - /* Fill in the relevant L3 parse result fields - * and read the L4 protocol type - */ - switch (ethertype) { - case ETH_P_IP: - parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); - iph = ip_hdr(skb); - DPA_ERR_ON(!iph); - l4_proto = iph->protocol; - break; - case ETH_P_IPV6: - parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); - ipv6h = ipv6_hdr(skb); - DPA_ERR_ON(!ipv6h); - l4_proto = ipv6h->nexthdr; - break; - default: - /* We shouldn't even be here */ - if (net_ratelimit()) - netif_alert(priv, tx_err, priv->net_dev, - "Can't compute HW csum for L3 proto 0x%x\n", - ntohs(skb->protocol)); - retval = -EIO; - goto return_error; - } - - /* Fill in the relevant L4 parse result fields */ - switch (l4_proto) { - case IPPROTO_UDP: - parse_result->l4r = FM_L4_PARSE_RESULT_UDP; - break; - case IPPROTO_TCP: - parse_result->l4r = FM_L4_PARSE_RESULT_TCP; - break; - default: - /* This can as well be a BUG() */ - if (net_ratelimit()) - netif_alert(priv, tx_err, priv->net_dev, - "Can't compute HW csum for L4 proto 0x%x\n", - l4_proto); - retval = -EIO; - goto return_error; - } - - /* At index 0 is IPOffset_1 as defined in the Parse Results */ - parse_result->ip_off[0] = (u8)skb_network_offset(skb); - parse_result->l4_off = (u8)skb_transport_offset(skb); - - /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ - fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC; - - /* On P1023 and similar platforms fd->cmd interpretation could - * be disabled by setting CONTEXT_A bit ICMD; currently this bit - * is not set so we do not need to check; in the future, if/when - * using context_a we need to check this bit - */ - -return_error: - return retval; -} -#endif /* __rtems__ */ diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h deleted file mode 100644 index 954de393..00000000 --- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h +++ /dev/null @@ -1,113 +0,0 @@ -/* Copyright 2008 - 2015 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __DPAA_ETH_COMMON_H -#define __DPAA_ETH_COMMON_H - -#include <linux/etherdevice.h> -#include <soc/fsl/bman.h> -#include <linux/of_platform.h> - -#include "dpaa_eth.h" - -#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ -#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ - -/* used in napi related functions */ -extern u16 qman_portal_max; - -/* from dpa_ethtool.c */ -extern const struct ethtool_ops dpa_ethtool_ops; - -int dpa_netdev_init(struct net_device *net_dev, - const u8 *mac_addr, - u16 tx_timeout); -int dpa_start(struct net_device *net_dev); -int dpa_stop(struct net_device *net_dev); -void dpa_timeout(struct net_device *net_dev); -struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev, - struct rtnl_link_stats64 *stats); -int dpa_change_mtu(struct net_device *net_dev, int new_mtu); -int dpa_ndo_init(struct net_device *net_dev); -#ifndef __rtems__ -int dpa_set_features(struct net_device *dev, netdev_features_t features); -netdev_features_t dpa_fix_features(struct net_device *dev, - netdev_features_t features); -#endif /* __rtems__ */ -int dpa_remove(struct platform_device *pdev); -struct mac_device *dpa_mac_dev_get(struct platform_device *pdev); -int dpa_mac_hw_index_get(struct platform_device *pdev); -int dpa_mac_fman_index_get(struct platform_device *pdev); -int dpa_set_mac_address(struct net_device *net_dev, void *addr); -void dpa_set_rx_mode(struct net_device *net_dev); -void dpa_set_buffers_layout(struct mac_device *mac_dev, - struct dpa_buffer_layout_s *layout); -int dpa_bp_alloc(struct dpa_bp *dpa_bp); -void dpa_bp_free(struct dpa_priv_s *priv); -struct dpa_bp *dpa_bpid2pool(int bpid); -void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp); -bool dpa_bpid2pool_use(int bpid); -void dpa_bp_drain(struct dpa_bp *bp); -#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE -u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); -#endif -struct dpa_fq *dpa_fq_alloc(struct device *dev, - const struct fqid_cell *fqids, - struct list_head *list, - enum dpa_fq_type fq_type); -int dpa_fq_probe_mac(struct device *dev, struct list_head *list, - struct fm_port_fqs *port_fqs, - bool tx_conf_fqs_per_core, - enum port_type ptype); -int dpa_get_channel(void); -void dpa_release_channel(void); -int dpaa_eth_add_channel(void *__arg); -int dpaa_eth_cgr_init(struct dpa_priv_s *priv); -void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs, - struct fman_port *tx_port); -int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable); -int dpa_fq_free(struct device *dev, struct list_head *list); -void dpaa_eth_init_ports(struct mac_device *mac_dev, - struct dpa_bp *bp, size_t count, - struct fm_port_fqs *port_fqs, - struct dpa_buffer_layout_s *buf_layout, - struct device *dev); -void dpa_release_sgt(struct qm_sg_entry *sgt); -void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd); -void count_ern(struct dpa_percpu_priv_s *percpu_priv, - const struct qm_mr_entry *msg); -#ifndef __rtems__ -int dpa_enable_tx_csum(struct dpa_priv_s *priv, - struct sk_buff *skb, - struct qm_fd *fd, - char *parse_results); -#endif /* __rtems__ */ -#endif /* __DPAA_ETH_COMMON_H */ diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c deleted file mode 100644 index 2d0903e3..00000000 --- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c +++ /dev/null @@ -1,710 +0,0 @@ -#include <machine/rtems-bsd-kernel-space.h> - -#include <rtems/bsd/local/opt_dpaa.h> - -/* Copyright 2012 - 2015 Freescale Semiconductor Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/init.h> -#include <linux/skbuff.h> -#include <linux/highmem.h> -#include <soc/fsl/bman.h> - -#include "dpaa_eth.h" -#include "dpaa_eth_common.h" - -/* Convenience macros for storing/retrieving the skb back-pointers. - * - * NB: @off is an offset from a (struct sk_buff **) pointer! - */ -#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \ - { \ - skbh = (struct sk_buff **)addr; \ - *(skbh + (off)) = skb; \ - } -#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \ - { \ - skbh = (struct sk_buff **)addr; \ - skb = *(skbh + (off)); \ - } - -/* DMA map and add a page frag back into the bpool. - * @vaddr fragment must have been allocated with netdev_alloc_frag(), - * specifically for fitting into @dpa_bp. - */ -static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr, - int *count_ptr) -{ - struct bm_buffer bmb; - dma_addr_t addr; - - addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { - dev_err(dpa_bp->dev, "DMA mapping failed"); - return; - } - - bm_buffer_set64(&bmb, addr); - - while (bman_release(dpa_bp->pool, &bmb, 1, 0)) - cpu_relax(); - - (*count_ptr)++; -} - -static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp) -{ - struct bm_buffer bmb[8]; - void *new_buf; - dma_addr_t addr; - u8 i; - struct device *dev = dpa_bp->dev; - struct sk_buff *skb, **skbh; - - memset(bmb, 0, sizeof(bmb)); - - for (i = 0; i < 8; i++) { - /* We'll prepend the skb back-pointer; can't use the DPA - * priv space, because FMan will overwrite it (from offset 0) - * if it ends up being the second, third, etc. fragment - * in a S/G frame. - * - * We only need enough space to store a pointer, but allocate - * an entire cacheline for performance reasons. - */ - new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE); - if (unlikely(!new_buf)) - goto netdev_alloc_failed; - new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES); - - skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); - if (unlikely(!skb)) { - put_page(virt_to_head_page(new_buf)); - goto build_skb_failed; - } - DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1); - - addr = dma_map_single(dev, new_buf, - dpa_bp->size, DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(dev, addr))) - goto dma_map_failed; - - bm_buffer_set64(&bmb[i], addr); - } - -release_bufs: - /* Release the buffers. In case bman is busy, keep trying - * until successful. bman_release() is guaranteed to succeed - * in a reasonable amount of time - */ - while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0))) - cpu_relax(); - return i; - -dma_map_failed: - kfree_skb(skb); - -build_skb_failed: -netdev_alloc_failed: - net_err_ratelimited("dpa_bp_add_8_bufs() failed\n"); - WARN_ONCE(1, "Memory allocation failure on Rx\n"); - - bm_buffer_set64(&bmb[i], 0); - /* Avoid releasing a completely null buffer; bman_release() requires - * at least one buffer. - */ - if (likely(i)) - goto release_bufs; - - return 0; -} - -/* Cold path wrapper over _dpa_bp_add_8_bufs(). */ -static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu) -{ - int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu); - *count_ptr += _dpa_bp_add_8_bufs(dpa_bp); -} - -int dpa_bp_priv_seed(struct dpa_bp *dpa_bp) -{ - int i; - - /* Give each CPU an allotment of "config_count" buffers */ - for_each_possible_cpu(i) { - int j; - - /* Although we access another CPU's counters here - * we do it at boot time so it is safe - */ - for (j = 0; j < dpa_bp->config_count; j += 8) - dpa_bp_add_8_bufs(dpa_bp, i); - } - return 0; -} - -/* Add buffers/(pages) for Rx processing whenever bpool count falls below - * REFILL_THRESHOLD. - */ -int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr) -{ - int count = *countptr; - int new_bufs; - - if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) { - do { - new_bufs = _dpa_bp_add_8_bufs(dpa_bp); - if (unlikely(!new_bufs)) { - /* Avoid looping forever if we've temporarily - * run out of memory. We'll try again at the - * next NAPI cycle. - */ - break; - } - count += new_bufs; - } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT); - - *countptr = count; - if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT)) - return -ENOMEM; - } - - return 0; -} - -/* Cleanup function for outgoing frame descriptors that were built on Tx path, - * either contiguous frames or scatter/gather ones. - * Skb freeing is not handled here. - * - * This function may be called on error paths in the Tx function, so guard - * against cases when not all fd relevant fields were filled in. - * - * Return the skb backpointer, since for S/G frames the buffer containing it - * gets freed here. - */ -struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, - const struct qm_fd *fd) -{ - const struct qm_sg_entry *sgt; - int i; - struct dpa_bp *dpa_bp = priv->dpa_bp; - dma_addr_t addr = qm_fd_addr(fd); - struct sk_buff **skbh; - struct sk_buff *skb = NULL; - const enum dma_data_direction dma_dir = DMA_TO_DEVICE; - int nr_frags; - - - /* retrieve skb back pointer */ - DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0); - - if (unlikely(fd->format == qm_fd_sg)) { - nr_frags = skb_shinfo(skb)->nr_frags; - dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) + - sizeof(struct qm_sg_entry) * (1 + nr_frags), - dma_dir); - - /* The sgt buffer has been allocated with netdev_alloc_frag(), - * it's from lowmem. - */ - sgt = phys_to_virt(addr + dpa_fd_offset(fd)); - - /* sgt[0] is from lowmem, was dma_map_single()-ed */ - dma_unmap_single(dpa_bp->dev, (dma_addr_t)sgt[0].addr, - sgt[0].length, dma_dir); - - /* remaining pages were mapped with dma_map_page() */ - for (i = 1; i < nr_frags; i++) { - DPA_ERR_ON(sgt[i].extension); - - dma_unmap_page(dpa_bp->dev, (dma_addr_t)sgt[i].addr, - sgt[i].length, dma_dir); - } - - /* Free the page frag that we allocated on Tx */ - put_page(virt_to_head_page(sgt)); - } else { - dma_unmap_single(dpa_bp->dev, addr, - skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); - } - - return skb; -} - -/* Build a linear skb around the received buffer. - * We are guaranteed there is enough room at the end of the data buffer to - * accommodate the shared info area of the skb. - */ -static struct sk_buff *contig_fd_to_skb(const struct dpa_priv_s *priv, - const struct qm_fd *fd) -{ - struct sk_buff *skb = NULL, **skbh; - ssize_t fd_off = dpa_fd_offset(fd); - dma_addr_t addr = qm_fd_addr(fd); - void *vaddr; - - vaddr = phys_to_virt(addr); - DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); - - /* Retrieve the skb and adjust data and tail pointers, to make sure - * forwarded skbs will have enough space on Tx if extra headers - * are added. - */ - DPA_READ_SKB_PTR(skb, skbh, vaddr, -1); - - DPA_ERR_ON(fd_off != priv->rx_headroom); - skb_reserve(skb, fd_off); - skb_put(skb, dpa_fd_length(fd)); - - skb->ip_summed = CHECKSUM_NONE; - - return skb; -} - -/* Build an skb with the data of the first S/G entry in the linear portion and - * the rest of the frame as skb fragments. - * - * The page fragment holding the S/G Table is recycled here. - */ -static struct sk_buff *sg_fd_to_skb(const struct dpa_priv_s *priv, - const struct qm_fd *fd, - int *count_ptr) -{ - const struct qm_sg_entry *sgt; - dma_addr_t addr = qm_fd_addr(fd); - ssize_t fd_off = dpa_fd_offset(fd); - dma_addr_t sg_addr; - void *vaddr, *sg_vaddr; - struct dpa_bp *dpa_bp; - struct page *page, *head_page; - int frag_offset, frag_len; - int page_offset; - int i; - struct sk_buff *skb = NULL, *skb_tmp, **skbh; - - vaddr = phys_to_virt(addr); - DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); - - dpa_bp = priv->dpa_bp; - /* Iterate through the SGT entries and add data buffers to the skb */ - sgt = vaddr + fd_off; - for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) { - /* Extension bit is not supported */ - DPA_ERR_ON(sgt[i].extension); - - /* We use a single global Rx pool */ - DPA_ERR_ON(dpa_bp != dpa_bpid2pool(sgt[i].bpid)); - - sg_addr = qm_sg_addr(&sgt[i]); - sg_vaddr = phys_to_virt(sg_addr); - DPA_ERR_ON(!IS_ALIGNED((unsigned long)sg_vaddr, - SMP_CACHE_BYTES)); - - dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size, - DMA_BIDIRECTIONAL); - if (i == 0) { - DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1); - DPA_ERR_ON(skb->head != sg_vaddr); - - skb->ip_summed = CHECKSUM_NONE; - - /* Make sure forwarded skbs will have enough space - * on Tx, if extra headers are added. - */ - DPA_ERR_ON(fd_off != priv->rx_headroom); - skb_reserve(skb, fd_off); - skb_put(skb, sgt[i].length); - } else { - /* Not the first S/G entry; all data from buffer will - * be added in an skb fragment; fragment index is offset - * by one since first S/G entry was incorporated in the - * linear part of the skb. - * - * Caution: 'page' may be a tail page. - */ - DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1); - page = virt_to_page(sg_vaddr); - head_page = virt_to_head_page(sg_vaddr); - - /* Free (only) the skbuff shell because its data buffer - * is already a frag in the main skb. - */ - get_page(head_page); - dev_kfree_skb(skb_tmp); - - /* Compute offset in (possibly tail) page */ - page_offset = ((unsigned long)sg_vaddr & - (PAGE_SIZE - 1)) + - (page_address(page) - page_address(head_page)); - /* page_offset only refers to the beginning of sgt[i]; - * but the buffer itself may have an internal offset. - */ - frag_offset = sgt[i].offset + page_offset; - frag_len = sgt[i].length; - /* skb_add_rx_frag() does no checking on the page; if - * we pass it a tail page, we'll end up with - * bad page accounting and eventually with segafults. - */ - skb_add_rx_frag(skb, i - 1, head_page, frag_offset, - frag_len, dpa_bp->size); - } - /* Update the pool count for the current {cpu x bpool} */ - (*count_ptr)--; - - if (sgt[i].final) - break; - } - WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); - - /* recycle the SGT fragment */ - DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid)); - dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr); - return skb; -} - -void _dpa_rx(struct net_device *net_dev, - struct qman_portal *portal, - const struct dpa_priv_s *priv, - struct dpa_percpu_priv_s *percpu_priv, - const struct qm_fd *fd, - u32 fqid, - int *count_ptr) -{ - struct dpa_bp *dpa_bp; - struct sk_buff *skb; - dma_addr_t addr = qm_fd_addr(fd); - u32 fd_status = fd->status; - unsigned int skb_len; - struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats; - - if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { - if (net_ratelimit()) - netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", - fd_status & FM_FD_STAT_RX_ERRORS); - - percpu_stats->rx_errors++; - goto _release_frame; - } - - dpa_bp = priv->dpa_bp; - DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid)); - - /* prefetch the first 64 bytes of the frame or the SGT start */ - dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL); - prefetch(phys_to_virt(addr) + dpa_fd_offset(fd)); - - /* The only FD types that we may receive are contig and S/G */ - DPA_ERR_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg)); - - if (likely(fd->format == qm_fd_contig)) - skb = contig_fd_to_skb(priv, fd); - else - skb = sg_fd_to_skb(priv, fd, count_ptr); - - /* Account for either the contig buffer or the SGT buffer (depending on - * which case we were in) having been removed from the pool. - */ - (*count_ptr)--; - skb->protocol = eth_type_trans(skb, net_dev); - - /* IP Reassembled frames are allowed to be larger than MTU */ - if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) && - !(fd_status & FM_FD_IPR))) { - percpu_stats->rx_dropped++; - goto drop_bad_frame; - } - - skb_len = skb->len; - - if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) - goto packet_dropped; - - percpu_stats->rx_packets++; - percpu_stats->rx_bytes += skb_len; - -packet_dropped: - return; - -drop_bad_frame: - dev_kfree_skb(skb); - return; - -_release_frame: - dpa_fd_release(net_dev, fd); -} - -static int skb_to_contig_fd(struct dpa_priv_s *priv, - struct sk_buff *skb, struct qm_fd *fd, - int *count_ptr, int *offset) -{ - struct sk_buff **skbh; - dma_addr_t addr; - struct dpa_bp *dpa_bp = priv->dpa_bp; - struct net_device *net_dev = priv->net_dev; - int err; - enum dma_data_direction dma_dir; - unsigned char *buffer_start; - - { - /* We are guaranteed to have at least tx_headroom bytes - * available, so just use that for offset. - */ - fd->bpid = 0xff; - buffer_start = skb->data - priv->tx_headroom; - fd->offset = priv->tx_headroom; - dma_dir = DMA_TO_DEVICE; - - DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0); - } - - /* Enable L3/L4 hardware checksum computation. - * - * We must do this before dma_map_single(DMA_TO_DEVICE), because we may - * need to write into the skb. - */ - err = dpa_enable_tx_csum(priv, skb, fd, - ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE); - if (unlikely(err < 0)) { - if (net_ratelimit()) - netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", - err); - return err; - } - - /* Fill in the rest of the FD fields */ - fd->format = qm_fd_contig; - fd->length20 = skb->len; - fd->cmd |= FM_FD_CMD_FCO; - - /* Map the entire buffer size that may be seen by FMan, but no more */ - addr = dma_map_single(dpa_bp->dev, skbh, - skb_tail_pointer(skb) - buffer_start, dma_dir); - if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { - if (net_ratelimit()) - netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); - return -EINVAL; - } - fd->addr_hi = (u8)upper_32_bits(addr); - fd->addr_lo = lower_32_bits(addr); - - return 0; -} - -static int skb_to_sg_fd(struct dpa_priv_s *priv, - struct sk_buff *skb, struct qm_fd *fd) -{ - struct dpa_bp *dpa_bp = priv->dpa_bp; - dma_addr_t addr; - struct sk_buff **skbh; - struct net_device *net_dev = priv->net_dev; - int err; - - struct qm_sg_entry *sgt; - void *sgt_buf; - void *buffer_start; - skb_frag_t *frag; - int i, j; - const enum dma_data_direction dma_dir = DMA_TO_DEVICE; - const int nr_frags = skb_shinfo(skb)->nr_frags; - - fd->format = qm_fd_sg; - - /* get a page frag to store the SGTable */ - sgt_buf = netdev_alloc_frag(priv->tx_headroom + - sizeof(struct qm_sg_entry) * (1 + nr_frags)); - if (unlikely(!sgt_buf)) { - netdev_err(net_dev, "netdev_alloc_frag() failed\n"); - return -ENOMEM; - } - - /* Enable L3/L4 hardware checksum computation. - * - * We must do this before dma_map_single(DMA_TO_DEVICE), because we may - * need to write into the skb. - */ - err = dpa_enable_tx_csum(priv, skb, fd, - sgt_buf + DPA_TX_PRIV_DATA_SIZE); - if (unlikely(err < 0)) { - if (net_ratelimit()) - netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", - err); - goto csum_failed; - } - - sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); - sgt[0].bpid = 0xff; - sgt[0].offset = 0; - sgt[0].length = cpu_to_be32(skb_headlen(skb)); - sgt[0].extension = 0; - sgt[0].final = 0; - addr = dma_map_single(dpa_bp->dev, skb->data, sgt[0].length, dma_dir); - if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { - dev_err(dpa_bp->dev, "DMA mapping failed"); - err = -EINVAL; - goto sg0_map_failed; - } - sgt[0].addr_hi = (u8)upper_32_bits(addr); - sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr)); - - /* populate the rest of SGT entries */ - for (i = 1; i <= nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i - 1]; - sgt[i].bpid = 0xff; - sgt[i].offset = 0; - sgt[i].length = cpu_to_be32(frag->size); - sgt[i].extension = 0; - sgt[i].final = 0; - - DPA_ERR_ON(!skb_frag_page(frag)); - addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length, - dma_dir); - if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { - dev_err(dpa_bp->dev, "DMA mapping failed"); - err = -EINVAL; - goto sg_map_failed; - } - - /* keep the offset in the address */ - sgt[i].addr_hi = (u8)upper_32_bits(addr); - sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr)); - } - sgt[i - 1].final = 1; - - fd->length20 = skb->len; - fd->offset = priv->tx_headroom; - - /* DMA map the SGT page */ - buffer_start = (void *)sgt - priv->tx_headroom; - DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0); - - addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom + - sizeof(struct qm_sg_entry) * (1 + nr_frags), - dma_dir); - if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) { - dev_err(dpa_bp->dev, "DMA mapping failed"); - err = -EINVAL; - goto sgt_map_failed; - } - - fd->bpid = 0xff; - fd->cmd |= FM_FD_CMD_FCO; - fd->addr_hi = (u8)upper_32_bits(addr); - fd->addr_lo = lower_32_bits(addr); - - return 0; - -sgt_map_failed: -sg_map_failed: - for (j = 0; j < i; j++) - dma_unmap_page(dpa_bp->dev, qm_sg_addr(&sgt[j]), - cpu_to_be32(sgt[j].length), dma_dir); -sg0_map_failed: -csum_failed: - put_page(virt_to_head_page(sgt_buf)); - - return err; -} - -int dpa_tx(struct sk_buff *skb, struct net_device *net_dev) -{ - struct dpa_priv_s *priv; - struct qm_fd fd; - struct dpa_percpu_priv_s *percpu_priv; - struct rtnl_link_stats64 *percpu_stats; - int err = 0; - const int queue_mapping = dpa_get_queue_mapping(skb); - bool nonlinear = skb_is_nonlinear(skb); - int *countptr, offset = 0; - - priv = netdev_priv(net_dev); - /* Non-migratable context, safe to use raw_cpu_ptr */ - percpu_priv = raw_cpu_ptr(priv->percpu_priv); - percpu_stats = &percpu_priv->stats; - countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count); - - clear_fd(&fd); - - if (!nonlinear) { - /* We're going to store the skb backpointer at the beginning - * of the data buffer, so we need a privately owned skb - * - * We've made sure skb is not shared in dev->priv_flags, - * we need to verify the skb head is not cloned - */ - if (skb_cow_head(skb, priv->tx_headroom)) - goto enomem; - - BUG_ON(skb_is_nonlinear(skb)); - } - - /* MAX_SKB_FRAGS is equal or larger than our DPA_SGT_MAX_ENTRIES; - * make sure we don't feed FMan with more fragments than it supports. - * Btw, we're using the first sgt entry to store the linear part of - * the skb, so we're one extra frag short. - */ - if (nonlinear && - likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) { - /* Just create a S/G fd based on the skb */ - err = skb_to_sg_fd(priv, skb, &fd); - percpu_priv->tx_frag_skbuffs++; - } else { - /* If the egress skb contains more fragments than we support - * we have no choice but to linearize it ourselves. - */ - if (unlikely(nonlinear) && __skb_linearize(skb)) - goto enomem; - - /* Finally, create a contig FD from this skb */ - err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset); - } - if (unlikely(err < 0)) - goto skb_to_fd_failed; - - if (likely(dpa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) - return NETDEV_TX_OK; - - /* dpa_xmit failed */ - if (fd.bpid != 0xff) { - (*countptr)--; - dpa_fd_release(net_dev, &fd); - percpu_stats->tx_errors++; - return NETDEV_TX_OK; - } - _dpa_cleanup_tx_fd(priv, &fd); -skb_to_fd_failed: -enomem: - percpu_stats->tx_errors++; - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h index 46eca272..ee6dfc9a 100644 --- a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h +++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h @@ -80,9 +80,9 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd, TP_fast_assign( __entry->fqid = fq->fqid; __entry->fd_addr = qm_fd_addr_get64(fd); - __entry->fd_format = fd->format; - __entry->fd_offset = dpa_fd_offset(fd); - __entry->fd_length = dpa_fd_length(fd); + __entry->fd_format = qm_fd_get_format(fd); + __entry->fd_offset = qm_fd_get_offset(fd); + __entry->fd_length = qm_fd_get_length(fd); __entry->fd_status = fd->status; __assign_str(name, netdev->name); ), @@ -99,7 +99,7 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd, */ /* Tx (egress) fd */ -DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd, +DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_fd, TP_PROTO(struct net_device *netdev, struct qman_fq *fq, @@ -109,7 +109,7 @@ DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd, ); /* Rx fd */ -DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd, +DEFINE_EVENT(dpaa_eth_fd, dpaa_rx_fd, TP_PROTO(struct net_device *netdev, struct qman_fq *fq, @@ -119,7 +119,7 @@ DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd, ); /* Tx confirmation fd */ -DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd, +DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_conf_fd, TP_PROTO(struct net_device *netdev, struct qman_fq *fq, |