summaryrefslogtreecommitdiffstats
path: root/linux/drivers/net/ethernet/freescale/fman/fman.c
diff options
context:
space:
mode:
Diffstat (limited to 'linux/drivers/net/ethernet/freescale/fman/fman.c')
-rw-r--r--linux/drivers/net/ethernet/freescale/fman/fman.c1293
1 files changed, 770 insertions, 523 deletions
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman.c b/linux/drivers/net/ethernet/freescale/fman/fman.c
index 5119b400..c0f26b31 100644
--- a/linux/drivers/net/ethernet/freescale/fman/fman.c
+++ b/linux/drivers/net/ethernet/freescale/fman/fman.c
@@ -16,7 +16,7 @@
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
-// *
+ *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -38,8 +38,8 @@
#include "fman.h"
#include "fman_muram.h"
-#include <asm/mpc85xx.h>
+#include <linux/fsl/guts.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -48,12 +48,12 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
+#include <linux/libfdt_env.h>
#ifdef __rtems__
#include <bsp/fdt.h>
#include <bsp/qoriq.h>
#endif /* __rtems__ */
-
/* General defines */
#define FMAN_LIODN_TBL 64 /* size of LIODN table */
#define MAX_NUM_OF_MACS 10
@@ -88,31 +88,11 @@
#define EX_BMI_DISPATCH_RAM_ECC 0x00010000
#define EX_DMA_SINGLE_PORT_ECC 0x00008000
-#define DFLT_EXCEPTIONS \
- ((EX_DMA_BUS_ERROR) | \
- (EX_DMA_READ_ECC) | \
- (EX_DMA_SYSTEM_WRITE_ECC) | \
- (EX_DMA_FM_WRITE_ECC) | \
- (EX_FPM_STALL_ON_TASKS) | \
- (EX_FPM_SINGLE_ECC) | \
- (EX_FPM_DOUBLE_ECC) | \
- (EX_QMI_DEQ_FROM_UNKNOWN_PORTID) | \
- (EX_BMI_LIST_RAM_ECC) | \
- (EX_BMI_STORAGE_PROFILE_ECC) | \
- (EX_BMI_STATISTICS_RAM_ECC) | \
- (EX_MURAM_ECC) | \
- (EX_BMI_DISPATCH_RAM_ECC) | \
- (EX_QMI_DOUBLE_ECC) | \
- (EX_QMI_SINGLE_ECC))
-
/* DMA defines */
/* masks */
-#define DMA_MODE_AID_OR 0x20000000
-#define DMA_MODE_SBER 0x10000000
#define DMA_MODE_BER 0x00200000
#define DMA_MODE_ECC 0x00000020
#define DMA_MODE_SECURE_PROT 0x00000800
-#define DMA_MODE_EMER_READ 0x00080000
#define DMA_MODE_AXI_DBG_MASK 0x0F000000
#define DMA_TRANSFER_PORTID_MASK 0xFF000000
@@ -130,7 +110,6 @@
#define DMA_MODE_CEN_SHIFT 13
#define DMA_MODE_CEN_MASK 0x00000007
#define DMA_MODE_DBG_SHIFT 7
-#define DMA_MODE_EMER_LVL_SHIFT 6
#define DMA_MODE_AID_MODE_SHIFT 4
#define DMA_THRESH_COMMQ_SHIFT 24
@@ -160,8 +139,6 @@
#define FPM_RAM_MURAM_ECC 0x00008000
#define FPM_RAM_IRAM_ECC 0x00004000
-#define FPM_RAM_MURAM_TEST_ECC 0x20000000
-#define FPM_RAM_IRAM_TEST_ECC 0x10000000
#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000
#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000
#define FPM_RAM_IRAM_ECC_EN 0x40000000
@@ -247,8 +224,6 @@
#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
#define QMI_INTR_EN_SINGLE_ECC 0x80000000
-#define QMI_TAPC_TAP 22
-
#define QMI_GS_HALT_NOT_BUSY 0x00000002
/* IRAM defines */
@@ -266,7 +241,6 @@
#define DEFAULT_DMA_DBG_CNT_MODE 0
#define DEFAULT_DMA_SOS_EMERGENCY 0
#define DEFAULT_DMA_WATCHDOG 0
-#define DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER 0
#define DEFAULT_DISP_LIMIT 0
#define DEFAULT_PRS_DISP_TH 16
#define DEFAULT_PLCR_DISP_TH 16
@@ -509,13 +483,9 @@ struct fman_dma_regs {
u32 res00e0[0x400 - 56];
};
-struct fman_rg {
- struct fman_fpm_regs __iomem *fpm_rg;
- struct fman_dma_regs __iomem *dma_rg;
- struct fman_bmi_regs __iomem *bmi_rg;
- struct fman_qmi_regs __iomem *qmi_rg;
-};
-
+/* Structure that holds current FMan state.
+ * Used for saving run time information.
+ */
struct fman_state_struct {
u8 fm_id;
u16 fm_clk_freq;
@@ -527,7 +497,6 @@ struct fman_state_struct {
u32 accumulated_fifo_size;
u8 accumulated_num_of_open_dmas;
u8 accumulated_num_of_deq_tnums;
- bool low_end_restriction;
u32 exceptions;
u32 extra_fifo_pool_size;
u8 extra_tasks_pool_size;
@@ -558,6 +527,7 @@ struct fman_state_struct {
struct resource *res;
};
+/* Structure that holds FMan initial configuration */
struct fman_cfg {
u8 disp_limit_tsh;
u8 prs_disp_tsh;
@@ -570,7 +540,6 @@ struct fman_cfg {
u8 fm_ctl2_disp_tsh;
int dma_cache_override;
enum fman_dma_aid_mode dma_aid_mode;
- bool dma_aid_override;
u32 dma_axi_dbg_num_of_beats;
u32 dma_cam_num_of_entries;
u32 dma_watchdog;
@@ -582,31 +551,18 @@ struct fman_cfg {
u32 dma_read_buf_tsh_clr_emer;
u32 dma_sos_emergency;
int dma_dbg_cnt_mode;
- bool dma_stop_on_bus_error;
- bool dma_en_emergency;
- u32 dma_emergency_bus_select;
- int dma_emergency_level;
- bool dma_en_emergency_smoother;
- u32 dma_emergency_switch_counter;
- bool halt_on_external_activ;
- bool halt_on_unrecov_ecc_err;
int catastrophic_err;
int dma_err;
- bool en_muram_test_mode;
- bool en_iram_test_mode;
- bool external_ecc_rams_enable;
- u16 tnum_aging_period;
u32 exceptions;
u16 clk_freq;
- bool pedantic_dma;
u32 cam_base_addr;
u32 fifo_base_addr;
u32 total_fifo_size;
u32 total_num_of_tasks;
- bool qmi_deq_option_support;
u32 qmi_def_tnums_thresh;
};
+/* Structure that holds information received from device tree */
struct fman_dts_params {
void __iomem *base_addr; /* FMan virtual address */
#ifndef __rtems__
@@ -621,10 +577,36 @@ struct fman_dts_params {
u32 qman_channel_base; /* QMan channels base */
u32 num_of_qman_channels; /* Number of QMan channels */
- phys_addr_t muram_phy_base_addr; /* MURAM physical address */
- resource_size_t muram_size; /* MURAM size */
+ struct resource muram_res; /* MURAM resource */
};
+/** fman_exceptions_cb
+ * fman - Pointer to FMan
+ * exception - The exception.
+ *
+ * Exceptions user callback routine, will be called upon an exception
+ * passing the exception identification.
+ *
+ * Return: irq status
+ */
+typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
+ enum fman_exceptions exception);
+
+/** fman_bus_error_cb
+ * fman - Pointer to FMan
+ * port_id - Port id
+ * addr - Address that caused the error
+ * tnum - Owner of error
+ * liodn - Logical IO device number
+ *
+ * Bus error user callback routine, will be called upon bus error,
+ * passing parameters describing the errors and the owner.
+ *
+ * Return: IRQ status
+ */
+typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
+ u64 addr, u8 tnum, u16 liodn);
+
struct fman {
struct device *dev;
void __iomem *base_addr;
@@ -643,12 +625,11 @@ struct fman {
struct fman_cfg *cfg;
struct muram_info *muram;
/* cam section in muram */
- int cam_offset;
+ unsigned long cam_offset;
size_t cam_size;
/* Fifo in MURAM */
- int fifo_offset;
+ unsigned long fifo_offset;
size_t fifo_size;
- bool reset_on_init;
u32 liodn_base[64];
u32 liodn_offset[64];
@@ -656,38 +637,47 @@ struct fman {
struct fman_dts_params dts_params;
};
-static void fman_exceptions(struct fman *fman, enum fman_exceptions exception)
+static irqreturn_t fman_exceptions(struct fman *fman,
+ enum fman_exceptions exception)
{
- pr_debug("FMan[%d] exception %d\n",
- fman->state->fm_id, exception);
+ dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n",
+ __func__, fman->state->fm_id, exception);
+
+ return IRQ_HANDLED;
}
-static void fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
- u64 __maybe_unused addr, u8 __maybe_unused tnum,
- u16 __maybe_unused liodn)
+static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
+ u64 __maybe_unused addr,
+ u8 __maybe_unused tnum,
+ u16 __maybe_unused liodn)
{
- pr_debug("FMan[%d] bus error: port_id[%d]\n",
- fman->state->fm_id, port_id);
+ dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n",
+ __func__, fman->state->fm_id, port_id);
+
+ return IRQ_HANDLED;
}
-static inline void call_mac_isr(struct fman *fman, u8 id)
+static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id)
{
- if (fman->intr_mng[id].isr_cb)
+ if (fman->intr_mng[id].isr_cb) {
fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
}
static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
{
u8 sw_port_id = 0;
- if (hw_port_id >= BASE_TX_PORTID) {
+ if (hw_port_id >= BASE_TX_PORTID)
sw_port_id = hw_port_id - BASE_TX_PORTID;
- } else if (hw_port_id >= BASE_RX_PORTID) {
+ else if (hw_port_id >= BASE_RX_PORTID)
sw_port_id = hw_port_id - BASE_RX_PORTID;
- } else {
+ else
sw_port_id = 0;
- WARN_ON(false);
- }
return sw_port_id;
}
@@ -697,26 +687,26 @@ static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
{
u32 tmp = 0;
- tmp = (u32)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
+ tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT;
- tmp |= (FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1);
+ tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
/* order restoration */
if (port_id % 2)
- tmp |= (FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
+ tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
else
- tmp |= (FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
+ tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
iowrite32be(tmp, &fpm_rg->fmfp_prc);
}
-static void set_port_liodn(struct fman_rg *fman_rg, u8 port_id,
+static void set_port_liodn(struct fman *fman, u8 port_id,
u32 liodn_base, u32 liodn_ofst)
{
u32 tmp;
/* set LIODN base for this port */
- tmp = ioread32be(&fman_rg->dma_rg->fmdmplr[port_id / 2]);
+ tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
if (port_id % 2) {
tmp &= ~DMA_LIODN_BASE_MASK;
tmp |= liodn_base;
@@ -724,8 +714,8 @@ static void set_port_liodn(struct fman_rg *fman_rg, u8 port_id,
tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
tmp |= liodn_base << DMA_LIODN_SHIFT;
}
- iowrite32be(tmp, &fman_rg->dma_rg->fmdmplr[port_id / 2]);
- iowrite32be(liodn_ofst, &fman_rg->bmi_rg->fmbm_spliodn[port_id - 1]);
+ iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
+ iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
}
static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
@@ -758,24 +748,14 @@ static void fman_defconfig(struct fman_cfg *cfg)
cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
cfg->dma_err = DEFAULT_DMA_ERR;
- cfg->halt_on_external_activ = false;
- cfg->halt_on_unrecov_ecc_err = false;
- cfg->en_iram_test_mode = false;
- cfg->en_muram_test_mode = false;
- cfg->external_ecc_rams_enable = false;
- cfg->dma_aid_override = false;
cfg->dma_aid_mode = DEFAULT_AID_MODE;
cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
- cfg->dma_en_emergency = false;
cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
- cfg->dma_en_emergency_smoother = false;
- cfg->dma_emergency_switch_counter =
- DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER;
cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
@@ -785,11 +765,6 @@ static void fman_defconfig(struct fman_cfg *cfg)
cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
-
- cfg->pedantic_dma = false;
- cfg->tnum_aging_period = 0;
- cfg->dma_stop_on_bus_error = false;
- cfg->qmi_deq_option_support = false;
}
static int dma_init(struct fman *fman)
@@ -808,37 +783,23 @@ static int dma_init(struct fman *fman)
/* configure mode register */
tmp_reg = 0;
tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
- if (cfg->dma_aid_override)
- tmp_reg |= DMA_MODE_AID_OR;
if (cfg->exceptions & EX_DMA_BUS_ERROR)
tmp_reg |= DMA_MODE_BER;
if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
(cfg->exceptions & EX_DMA_READ_ECC) |
(cfg->exceptions & EX_DMA_FM_WRITE_ECC))
tmp_reg |= DMA_MODE_ECC;
- if (cfg->dma_stop_on_bus_error)
- tmp_reg |= DMA_MODE_SBER;
if (cfg->dma_axi_dbg_num_of_beats)
tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
((cfg->dma_axi_dbg_num_of_beats - 1)
<< DMA_MODE_AXI_DBG_SHIFT));
- if (cfg->dma_en_emergency) {
- tmp_reg |= cfg->dma_emergency_bus_select;
- tmp_reg |= cfg->dma_emergency_level << DMA_MODE_EMER_LVL_SHIFT;
- if (cfg->dma_en_emergency_smoother)
- iowrite32be(cfg->dma_emergency_switch_counter,
- &dma_rg->fmdmemsr);
- }
tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
tmp_reg |= DMA_MODE_SECURE_PROT;
tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
- if (cfg->pedantic_dma)
- tmp_reg |= DMA_MODE_EMER_READ;
-
iowrite32be(tmp_reg, &dma_rg->fmdmmr);
/* configure thresholds register */
@@ -874,7 +835,8 @@ static int dma_init(struct fman *fman)
(u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
if (IS_ERR_VALUE(fman->cam_offset)) {
- pr_err("MURAM alloc for DMA CAM failed\n");
+ dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+ __func__);
return -ENOMEM;
}
@@ -888,22 +850,24 @@ static int dma_init(struct fman *fman)
fman->cam_offset = fman_muram_alloc(fman->muram,
fman->cam_size);
if (IS_ERR_VALUE(fman->cam_offset)) {
- pr_err("MURAM alloc for DMA CAM failed\n");
+ dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+ __func__);
return -ENOMEM;
}
if (fman->cfg->dma_cam_num_of_entries % 8 ||
fman->cfg->dma_cam_num_of_entries > 32) {
- pr_err("wrong dma_cam_num_of_entries\n");
+ dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n",
+ __func__);
return -EINVAL;
}
cam_base_addr = (u32 __iomem *)
fman_muram_offset_to_vbase(fman->muram,
fman->cam_offset);
- out_be32(cam_base_addr,
- ~((1 << (32 - fman->cfg->dma_cam_num_of_entries)) -
- 1));
+ iowrite32be(~((1 <<
+ (32 - fman->cfg->dma_cam_num_of_entries)) - 1),
+ cam_base_addr);
}
fman->cfg->cam_base_addr = fman->cam_offset;
@@ -948,10 +912,10 @@ static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
- if (!cfg->halt_on_external_activ)
- tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
- if (!cfg->halt_on_unrecov_ecc_err)
- tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
+ /* FMan is not halted upon external halt activation */
+ tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
+ /* Man is not halted upon Unrecoverable ECC error behavior */
+ tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
/* clear all fmCtls event registers */
@@ -964,17 +928,7 @@ static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
*/
/* event bits */
tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
- /* Rams enable not effected by RCR bit,
- * but by a COP configuration
- */
- if (cfg->external_ecc_rams_enable)
- tmp_reg |= FPM_RAM_RAMS_ECC_EN_SRC_SEL;
-
- /* enable test mode */
- if (cfg->en_muram_test_mode)
- tmp_reg |= FPM_RAM_MURAM_TEST_ECC;
- if (cfg->en_iram_test_mode)
- tmp_reg |= FPM_RAM_IRAM_TEST_ECC;
+
iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
tmp_reg = 0;
@@ -1031,8 +985,6 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
struct fman_cfg *cfg)
{
u32 tmp_reg;
- u16 period_in_fm_clocks;
- u8 remainder;
/* Init QMI Registers */
@@ -1048,22 +1000,6 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
/* enable events */
iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
- if (cfg->tnum_aging_period) {
- /* tnum_aging_period is in units of usec, clk_freq in Mhz */
- period_in_fm_clocks = (u16)
- (cfg->tnum_aging_period * cfg->clk_freq);
- /* period_in_fm_clocks must be a 64 multiple */
- remainder = (u8)(period_in_fm_clocks % 64);
- if (remainder) {
- tmp_reg = (u32)((period_in_fm_clocks / 64) + 1);
- } else {
- tmp_reg = (u32)(period_in_fm_clocks / 64);
- if (!tmp_reg)
- tmp_reg = 1;
- }
- tmp_reg <<= QMI_TAPC_TAP;
- iowrite32be(tmp_reg, &qmi_rg->fmqm_tapc);
- }
tmp_reg = 0;
/* Clear interrupt events */
iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
@@ -1073,163 +1009,163 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
}
-static int enable(struct fman_rg *fman_rg, struct fman_cfg *cfg)
+static int enable(struct fman *fman, struct fman_cfg *cfg)
{
u32 cfg_reg = 0;
/* Enable all modules */
- /* clear&enable global counters - calculate reg and save for later,
+ /* clear&enable global counters - calculate reg and save for later,
* because it's the same reg for QMI enable
*/
cfg_reg = QMI_CFG_EN_COUNTERS;
- if (cfg->qmi_deq_option_support)
- cfg_reg |= (u32)(((cfg->qmi_def_tnums_thresh) << 8) |
- cfg->qmi_def_tnums_thresh);
- iowrite32be(BMI_INIT_START, &fman_rg->bmi_rg->fmbm_init);
+ /* Set enqueue and dequeue thresholds */
+ cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh;
+
+ iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init);
iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
- &fman_rg->qmi_rg->fmqm_gc);
+ &fman->qmi_regs->fmqm_gc);
return 0;
}
-static int set_exception(struct fman_rg *fman_rg,
+static int set_exception(struct fman *fman,
enum fman_exceptions exception, bool enable)
{
u32 tmp;
switch (exception) {
case FMAN_EX_DMA_BUS_ERROR:
- tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
+ tmp = ioread32be(&fman->dma_regs->fmdmmr);
if (enable)
tmp |= DMA_MODE_BER;
else
tmp &= ~DMA_MODE_BER;
/* disable bus error */
- iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
+ iowrite32be(tmp, &fman->dma_regs->fmdmmr);
break;
case FMAN_EX_DMA_READ_ECC:
case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
case FMAN_EX_DMA_FM_WRITE_ECC:
- tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
+ tmp = ioread32be(&fman->dma_regs->fmdmmr);
if (enable)
tmp |= DMA_MODE_ECC;
else
tmp &= ~DMA_MODE_ECC;
- iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
+ iowrite32be(tmp, &fman->dma_regs->fmdmmr);
break;
case FMAN_EX_FPM_STALL_ON_TASKS:
- tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
if (enable)
tmp |= FPM_EV_MASK_STALL_EN;
else
tmp &= ~FPM_EV_MASK_STALL_EN;
- iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
break;
case FMAN_EX_FPM_SINGLE_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
if (enable)
tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
else
tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
- iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
break;
case FMAN_EX_FPM_DOUBLE_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
if (enable)
tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
else
tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
- iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
break;
case FMAN_EX_QMI_SINGLE_ECC:
- tmp = ioread32be(&fman_rg->qmi_rg->fmqm_ien);
+ tmp = ioread32be(&fman->qmi_regs->fmqm_ien);
if (enable)
tmp |= QMI_INTR_EN_SINGLE_ECC;
else
tmp &= ~QMI_INTR_EN_SINGLE_ECC;
- iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_ien);
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_ien);
break;
case FMAN_EX_QMI_DOUBLE_ECC:
- tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
+ tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
if (enable)
tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
else
tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
- iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
break;
case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
- tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
+ tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
if (enable)
tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
else
tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
- iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
break;
case FMAN_EX_BMI_LIST_RAM_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_BMI_STATISTICS_RAM_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_BMI_DISPATCH_RAM_ECC:
- tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
if (enable)
tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
else
tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
- iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
break;
case FMAN_EX_IRAM_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
+ tmp = ioread32be(&fman->fpm_regs->fm_rie);
if (enable) {
/* enable ECC if not enabled */
- enable_rams_ecc(fman_rg->fpm_rg);
+ enable_rams_ecc(fman->fpm_regs);
/* enable ECC interrupts */
tmp |= FPM_IRAM_ECC_ERR_EX_EN;
} else {
/* ECC mechanism may be disabled,
* depending on driver status
*/
- disable_rams_ecc(fman_rg->fpm_rg);
+ disable_rams_ecc(fman->fpm_regs);
tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
}
- iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
+ iowrite32be(tmp, &fman->fpm_regs->fm_rie);
break;
case FMAN_EX_MURAM_ECC:
- tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
+ tmp = ioread32be(&fman->fpm_regs->fm_rie);
if (enable) {
/* enable ECC if not enabled */
- enable_rams_ecc(fman_rg->fpm_rg);
+ enable_rams_ecc(fman->fpm_regs);
/* enable ECC interrupts */
tmp |= FPM_MURAM_ECC_ERR_EX_EN;
} else {
/* ECC mechanism may be disabled,
* depending on driver status
*/
- disable_rams_ecc(fman_rg->fpm_rg);
+ disable_rams_ecc(fman->fpm_regs);
tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
}
- iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
+ iowrite32be(tmp, &fman->fpm_regs->fm_rie);
break;
default:
return -EINVAL;
@@ -1346,10 +1282,11 @@ static void free_init_resources(struct fman *fman)
fman->fifo_size);
}
-static void bmi_err_event(struct fman *fman)
+static irqreturn_t bmi_err_event(struct fman *fman)
{
u32 event, mask, force;
struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&bmi_rg->fmbm_ievr);
mask = ioread32be(&bmi_rg->fmbm_ier);
@@ -1362,19 +1299,22 @@ static void bmi_err_event(struct fman *fman)
iowrite32be(event, &bmi_rg->fmbm_ievr);
if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
- fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+
+ return ret;
}
-static void qmi_err_event(struct fman *fman)
+static irqreturn_t qmi_err_event(struct fman *fman)
{
u32 event, mask, force;
struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&qmi_rg->fmqm_eie);
mask = ioread32be(&qmi_rg->fmqm_eien);
@@ -1388,17 +1328,21 @@ static void qmi_err_event(struct fman *fman)
iowrite32be(event, &qmi_rg->fmqm_eie);
if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
- fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
- fman->exception_cb(fman, FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+ ret = fman->exception_cb(fman,
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+
+ return ret;
}
-static void dma_err_event(struct fman *fman)
+static irqreturn_t dma_err_event(struct fman *fman)
{
u32 status, mask, com_id;
u8 tnum, port_id, relative_port_id;
u16 liodn;
struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+ irqreturn_t ret = IRQ_NONE;
status = ioread32be(&dma_rg->fmdmsr);
mask = ioread32be(&dma_rg->fmdmmr);
@@ -1431,22 +1375,26 @@ static void dma_err_event(struct fman *fman)
tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
DMA_TRANSFER_TNUM_SHIFT);
liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
- fman->bus_error_cb(fman, relative_port_id, addr, tnum, liodn);
+ ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum,
+ liodn);
}
if (status & DMA_STATUS_FM_SPDAT_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
if (status & DMA_STATUS_READ_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
if (status & DMA_STATUS_FM_WRITE_ECC)
- fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+
+ return ret;
}
-static void fpm_err_event(struct fman *fman)
+static irqreturn_t fpm_err_event(struct fman *fman)
{
u32 event;
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&fpm_rg->fmfp_ee);
/* clear the all occurred events */
@@ -1454,18 +1402,21 @@ static void fpm_err_event(struct fman *fman)
if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
(event & FPM_EV_MASK_DOUBLE_ECC_EN))
- fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
- fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
if ((event & FPM_EV_MASK_SINGLE_ECC) &&
(event & FPM_EV_MASK_SINGLE_ECC_EN))
- fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+
+ return ret;
}
-static void muram_err_intr(struct fman *fman)
+static irqreturn_t muram_err_intr(struct fman *fman)
{
u32 event, mask;
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&fpm_rg->fm_rcr);
mask = ioread32be(&fpm_rg->fm_rie);
@@ -1474,13 +1425,16 @@ static void muram_err_intr(struct fman *fman)
iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
- fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+
+ return ret;
}
-static void qmi_event(struct fman *fman)
+static irqreturn_t qmi_event(struct fman *fman)
{
u32 event, mask, force;
struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+ irqreturn_t ret = IRQ_NONE;
event = ioread32be(&qmi_rg->fmqm_ie);
mask = ioread32be(&qmi_rg->fmqm_ien);
@@ -1493,7 +1447,9 @@ static void qmi_event(struct fman *fman)
iowrite32be(event, &qmi_rg->fmqm_ie);
if (event & QMI_INTR_EN_SINGLE_ECC)
- fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+ ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+
+ return ret;
}
static void enable_time_stamp(struct fman *fman)
@@ -1534,23 +1490,29 @@ static void enable_time_stamp(struct fman *fman)
static int clear_iram(struct fman *fman)
{
struct fman_iram_regs __iomem *iram;
- int i;
+ int i, count;
- iram = (struct fman_iram_regs __iomem *)(fman->base_addr + IMEM_OFFSET);
+ iram = fman->base_addr + IMEM_OFFSET;
/* Enable the auto-increment */
- out_be32(&iram->iadd, IRAM_IADD_AIE);
- while (in_be32(&iram->iadd) != IRAM_IADD_AIE)
- ;
+ iowrite32be(IRAM_IADD_AIE, &iram->iadd);
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count);
+ if (count == 0)
+ return -EBUSY;
for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
- out_be32(&iram->idata, 0xffffffff);
+ iowrite32be(0xffffffff, &iram->idata);
- out_be32(&iram->iadd, fman->state->fm_iram_size - 4);
- /* Memory barrier */
- mb();
- while (in_be32(&iram->idata) != 0xffffffff)
- ;
+ iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd);
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&iram->idata) != 0xffffffff) && --count);
+ if (count == 0)
+ return -EBUSY;
return 0;
}
@@ -1623,9 +1585,10 @@ static int get_module_event(enum fman_event_modules module, u8 mod_id,
switch (module) {
case FMAN_MOD_MAC:
- event = (intr_type == FMAN_INTR_TYPE_ERR) ?
- (FMAN_EV_ERR_MAC0 + mod_id) :
- (FMAN_EV_MAC0 + mod_id);
+ if (intr_type == FMAN_INTR_TYPE_ERR)
+ event = FMAN_EV_ERR_MAC0 + mod_id;
+ else
+ event = FMAN_EV_MAC0 + mod_id;
break;
case FMAN_MOD_FMAN_CTRL:
if (intr_type == FMAN_INTR_TYPE_ERR)
@@ -1667,14 +1630,15 @@ static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
if ((fman->state->accumulated_fifo_size + fifo) >
(fman->state->total_fifo_size -
fman->state->extra_fifo_pool_size)) {
- pr_err("Requested fifo size and extra size exceed total FIFO size.\n");
+ dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n",
+ __func__);
return -EAGAIN;
}
/* Read, modify and write to HW */
- tmp = (u32)((fifo / FMAN_BMI_FIFO_UNITS - 1) |
- ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
- BMI_EXTRA_FIFO_SIZE_SHIFT));
+ tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) |
+ ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
+ BMI_EXTRA_FIFO_SIZE_SHIFT);
iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
/* update accumulated */
@@ -1693,14 +1657,14 @@ static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
if (extra_tasks)
fman->state->extra_tasks_pool_size =
- (u8)max(fman->state->extra_tasks_pool_size, extra_tasks);
+ max(fman->state->extra_tasks_pool_size, extra_tasks);
/* check that there are enough uncommitted tasks */
if ((fman->state->accumulated_num_of_tasks + tasks) >
(fman->state->total_num_of_tasks -
fman->state->extra_tasks_pool_size)) {
- pr_err("Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
- fman->state->fm_id);
+ dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
+ __func__, fman->state->fm_id);
return -EAGAIN;
}
/* update accumulated */
@@ -1759,8 +1723,8 @@ static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
if ((fman->state->rev_info.major < 6) &&
(fman->state->accumulated_num_of_open_dmas - current_val +
open_dmas > fman->state->max_num_of_open_dmas)) {
- pr_err("Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
- fman->state->fm_id);
+ dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
+ __func__, fman->state->fm_id);
return -EAGAIN;
} else if ((fman->state->rev_info.major >= 6) &&
!((fman->state->rev_info.major == 6) &&
@@ -1768,8 +1732,8 @@ static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
(fman->state->accumulated_num_of_open_dmas -
current_val + open_dmas >
fman->state->dma_thresh_max_commq + 1)) {
- pr_err("Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
- fman->state->fm_id,
+ dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
+ __func__, fman->state->fm_id,
fman->state->dma_thresh_max_commq + 1);
return -EAGAIN;
}
@@ -1820,8 +1784,9 @@ static int fman_config(struct fman *fman)
goto err_fm_drv;
/* Initialize MURAM block */
- fman->muram = fman_muram_init(fman->dts_params.muram_phy_base_addr,
- fman->dts_params.muram_size);
+ fman->muram =
+ fman_muram_init(fman->dts_params.muram_res.start,
+ resource_size(&fman->dts_params.muram_res));
if (!fman->muram)
goto err_fm_soc_specific;
@@ -1836,24 +1801,31 @@ static int fman_config(struct fman *fman)
#endif /* __rtems__ */
fman->exception_cb = fman_exceptions;
fman->bus_error_cb = fman_bus_error;
- fman->fpm_regs =
- (struct fman_fpm_regs __iomem *)(base_addr + FPM_OFFSET);
- fman->bmi_regs =
- (struct fman_bmi_regs __iomem *)(base_addr + BMI_OFFSET);
- fman->qmi_regs =
- (struct fman_qmi_regs __iomem *)(base_addr + QMI_OFFSET);
- fman->dma_regs =
- (struct fman_dma_regs __iomem *)(base_addr + DMA_OFFSET);
+ fman->fpm_regs = base_addr + FPM_OFFSET;
+ fman->bmi_regs = base_addr + BMI_OFFSET;
+ fman->qmi_regs = base_addr + QMI_OFFSET;
+ fman->dma_regs = base_addr + DMA_OFFSET;
fman->base_addr = base_addr;
spin_lock_init(&fman->spinlock);
fman_defconfig(fman->cfg);
- fman->cfg->qmi_deq_option_support = true;
-
fman->state->extra_fifo_pool_size = 0;
- fman->state->exceptions = DFLT_EXCEPTIONS;
- fman->reset_on_init = true;
+ fman->state->exceptions = (EX_DMA_BUS_ERROR |
+ EX_DMA_READ_ECC |
+ EX_DMA_SYSTEM_WRITE_ECC |
+ EX_DMA_FM_WRITE_ECC |
+ EX_FPM_STALL_ON_TASKS |
+ EX_FPM_SINGLE_ECC |
+ EX_FPM_DOUBLE_ECC |
+ EX_QMI_DEQ_FROM_UNKNOWN_PORTID |
+ EX_BMI_LIST_RAM_ECC |
+ EX_BMI_STORAGE_PROFILE_ECC |
+ EX_BMI_STATISTICS_RAM_ECC |
+ EX_MURAM_ECC |
+ EX_BMI_DISPATCH_RAM_ECC |
+ EX_QMI_DOUBLE_ECC |
+ EX_QMI_SINGLE_ECC);
/* Read FMan revision for future use*/
fman_get_revision(fman, &fman->state->rev_info);
@@ -1912,20 +1884,108 @@ err_fm_state:
return -EINVAL;
}
+#ifndef __rtems__
+static int fman_reset(struct fman *fman)
+{
+ u32 count;
+ int err = 0;
+
+ if (fman->state->rev_info.major < 6) {
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0)
+ err = -EBUSY;
+
+ goto _return;
+ } else {
+#ifdef CONFIG_PPC
+ struct device_node *guts_node;
+ struct ccsr_guts __iomem *guts_regs;
+ u32 devdisr2, reg;
+
+ /* Errata A007273 */
+ guts_node =
+ of_find_compatible_node(NULL, NULL,
+ "fsl,qoriq-device-config-2.0");
+ if (!guts_node) {
+ dev_err(fman->dev, "%s: Couldn't find guts node\n",
+ __func__);
+ goto guts_node;
+ }
+
+ guts_regs = of_iomap(guts_node, 0);
+ if (!guts_regs) {
+ dev_err(fman->dev, "%s: Couldn't map %s regs\n",
+ __func__, guts_node->full_name);
+ goto guts_regs;
+ }
+#define FMAN1_ALL_MACS_MASK 0xFCC00000
+#define FMAN2_ALL_MACS_MASK 0x000FCC00
+ /* Read current state */
+ devdisr2 = ioread32be(&guts_regs->devdisr2);
+ if (fman->dts_params.id == 0)
+ reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
+ else
+ reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
+
+ /* Enable all MACs */
+ iowrite32be(reg, &guts_regs->devdisr2);
+#endif
+
+ /* Perform FMan reset */
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0) {
+#ifdef CONFIG_PPC
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+#endif
+ err = -EBUSY;
+ goto _return;
+ }
+#ifdef CONFIG_PPC
+
+ /* Restore devdisr2 value */
+ iowrite32be(devdisr2, &guts_regs->devdisr2);
+
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+#endif
+
+ goto _return;
+
+#ifdef CONFIG_PPC
+guts_regs:
+ of_node_put(guts_node);
+guts_node:
+ dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
+ __func__);
+#endif
+ }
+_return:
+ return err;
+}
+#endif /* __rtems__ */
+
static int fman_init(struct fman *fman)
{
struct fman_cfg *cfg = NULL;
- struct fman_rg fman_rg;
- int err = 0, i;
+ int err = 0, i, count;
if (is_init_done(fman->cfg))
return -EINVAL;
- fman_rg.bmi_rg = fman->bmi_regs;
- fman_rg.qmi_rg = fman->qmi_regs;
- fman_rg.fpm_rg = fman->fpm_regs;
- fman_rg.dma_rg = fman->dma_regs;
-
fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
cfg = fman->cfg;
@@ -1948,8 +2008,8 @@ static int fman_init(struct fman *fman)
u32 liodn_base;
fman->liodn_offset[i] =
- ioread32be(&fman_rg.bmi_rg->fmbm_spliodn[i - 1]);
- liodn_base = ioread32be(&fman_rg.dma_rg->fmdmplr[i / 2]);
+ ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
+ liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
if (i % 2) {
/* FMDM_PLR LSB holds LIODN base for odd ports */
liodn_base &= DMA_LIODN_BASE_MASK;
@@ -1961,23 +2021,21 @@ static int fman_init(struct fman *fman)
fman->liodn_base[i] = liodn_base;
}
- /* Reset the FM if required. */
- if (fman->reset_on_init) {
- if (fman->state->rev_info.major >= 6) {
- /* Errata A007273 */
- pr_debug("FManV3 reset is not supported!\n");
- } else {
- out_be32(&fman->fpm_regs->fm_rstc, FPM_RSTC_FM_RESET);
- /* Memory barrier */
- mb();
- usleep_range(100, 300);
- }
+ err = fman_reset(fman);
+ if (err)
+ return err;
- if (!!(ioread32be(&fman_rg.qmi_rg->fmqm_gs) &
- QMI_GS_HALT_NOT_BUSY)) {
- resume(fman->fpm_regs);
- usleep_range(100, 300);
- }
+ if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
+ resume(fman->fpm_regs);
+ /* Wait until QMI is not in halt not busy state */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) &
+ QMI_GS_HALT_NOT_BUSY) && --count);
+ if (count == 0)
+ dev_warn(fman->dev, "%s: QMI is in halt not busy state\n",
+ __func__);
}
if (clear_iram(fman) != 0)
@@ -2000,9 +2058,10 @@ static int fman_init(struct fman *fman)
/* allocate MURAM for FIFO according to total size */
fman->fifo_offset = fman_muram_alloc(fman->muram,
fman->state->total_fifo_size);
- if (IS_ERR_VALUE(fman->cam_offset)) {
+ if (IS_ERR_VALUE(fman->fifo_offset)) {
free_init_resources(fman);
- pr_err("MURAM alloc for BMI FIFO failed\n");
+ dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
+ __func__);
return -ENOMEM;
}
@@ -2017,7 +2076,7 @@ static int fman_init(struct fman *fman)
/* Init QMI Registers */
qmi_init(fman->qmi_regs, fman->cfg);
- err = enable(&fman_rg, cfg);
+ err = enable(fman, cfg);
if (err != 0)
return err;
@@ -2033,16 +2092,10 @@ static int fman_set_exception(struct fman *fman,
enum fman_exceptions exception, bool enable)
{
u32 bit_mask = 0;
- struct fman_rg fman_rg;
if (!is_init_done(fman->cfg))
return -EINVAL;
- fman_rg.bmi_rg = fman->bmi_regs;
- fman_rg.qmi_rg = fman->qmi_regs;
- fman_rg.fpm_rg = fman->fpm_regs;
- fman_rg.dma_rg = fman->dma_regs;
-
bit_mask = get_exception_flag(exception);
if (bit_mask) {
if (enable)
@@ -2050,13 +2103,27 @@ static int fman_set_exception(struct fman *fman,
else
fman->state->exceptions &= ~bit_mask;
} else {
- pr_err("Undefined exception\n");
+ dev_err(fman->dev, "%s: Undefined exception (%d)\n",
+ __func__, exception);
return -EINVAL;
}
- return set_exception(&fman_rg, exception, enable);
+ return set_exception(fman, exception, enable);
}
+/**
+ * fman_register_intr
+ * @fman: A Pointer to FMan device
+ * @mod: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ * @f_isr: The interrupt service routine.
+ * @h_src_arg: Argument to be passed to f_isr.
+ *
+ * Used to register an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
void fman_register_intr(struct fman *fman, enum fman_event_modules module,
u8 mod_id, enum fman_intr_type intr_type,
void (*isr_cb)(void *src_arg), void *src_arg)
@@ -2064,47 +2131,61 @@ void fman_register_intr(struct fman *fman, enum fman_event_modules module,
int event = 0;
event = get_module_event(module, mod_id, intr_type);
- WARN_ON(!(event < FMAN_EV_CNT));
+ WARN_ON(event >= FMAN_EV_CNT);
/* register in local FM structure */
fman->intr_mng[event].isr_cb = isr_cb;
fman->intr_mng[event].src_handle = src_arg;
}
-
+EXPORT_SYMBOL(fman_register_intr);
+
+/**
+ * fman_unregister_intr
+ * @fman: A Pointer to FMan device
+ * @mod: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ *
+ * Used to unregister an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
u8 mod_id, enum fman_intr_type intr_type)
{
int event = 0;
event = get_module_event(module, mod_id, intr_type);
- WARN_ON(!(event < FMAN_EV_CNT));
+ WARN_ON(event >= FMAN_EV_CNT);
fman->intr_mng[event].isr_cb = NULL;
fman->intr_mng[event].src_handle = NULL;
}
+EXPORT_SYMBOL(fman_unregister_intr);
+/**
+ * fman_set_port_params
+ * @fman: A Pointer to FMan device
+ * @port_params: Port parameters
+ *
+ * Used by FMan Port to pass parameters to the FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_set_port_params(struct fman *fman,
struct fman_port_init_params *port_params)
{
int err;
- unsigned long int_flags;
+ unsigned long flags;
u8 port_id = port_params->port_id, mac_id;
- struct fman_rg fman_rg;
-
- fman_rg.bmi_rg = fman->bmi_regs;
- fman_rg.qmi_rg = fman->qmi_regs;
- fman_rg.fpm_rg = fman->fpm_regs;
- fman_rg.dma_rg = fman->dma_regs;
- spin_lock_irqsave(&fman->spinlock, int_flags);
+ spin_lock_irqsave(&fman->spinlock, flags);
err = set_num_of_tasks(fman, port_params->port_id,
&port_params->num_of_tasks,
&port_params->num_of_extra_tasks);
- if (err) {
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return err;
- }
+ if (err)
+ goto return_err;
/* TX Ports */
if (port_params->port_type != FMAN_PORT_TYPE_RX) {
@@ -2113,7 +2194,7 @@ int fman_set_port_params(struct fman *fman,
/* update qmi ENQ/DEQ threshold */
fman->state->accumulated_num_of_deq_tnums +=
port_params->deq_pipeline_depth;
- enq_th = (ioread32be(&fman_rg.qmi_rg->fmqm_gc) &
+ enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) &
QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
/* if enq_th is too big, we reduce it to the max value
* that is still 0
@@ -2124,13 +2205,13 @@ int fman_set_port_params(struct fman *fman,
fman->state->qmi_max_num_of_tnums -
fman->state->accumulated_num_of_deq_tnums - 1;
- reg = ioread32be(&fman_rg.qmi_rg->fmqm_gc);
+ reg = ioread32be(&fman->qmi_regs->fmqm_gc);
reg &= ~QMI_CFG_ENQ_MASK;
reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
- iowrite32be(reg, &fman_rg.qmi_rg->fmqm_gc);
+ iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
}
- deq_th = ioread32be(&fman_rg.qmi_rg->fmqm_gc) &
+ deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) &
QMI_CFG_DEQ_MASK;
/* if deq_th is too small, we enlarge it to the min
* value that is still 0.
@@ -2139,59 +2220,70 @@ int fman_set_port_params(struct fman *fman,
*/
if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
(deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
- deq_th =
- fman->state->accumulated_num_of_deq_tnums + 1;
- reg = ioread32be(&fman_rg.qmi_rg->fmqm_gc);
+ deq_th = fman->state->accumulated_num_of_deq_tnums + 1;
+ reg = ioread32be(&fman->qmi_regs->fmqm_gc);
reg &= ~QMI_CFG_DEQ_MASK;
reg |= deq_th;
- iowrite32be(reg, &fman_rg.qmi_rg->fmqm_gc);
+ iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
}
}
err = set_size_of_fifo(fman, port_params->port_id,
&port_params->size_of_fifo,
&port_params->extra_size_of_fifo);
- if (err) {
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return err;
- }
+ if (err)
+ goto return_err;
err = set_num_of_open_dmas(fman, port_params->port_id,
&port_params->num_of_open_dmas,
&port_params->num_of_extra_open_dmas);
- if (err) {
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return err;
- }
+ if (err)
+ goto return_err;
- set_port_liodn(&fman_rg, port_id, fman->liodn_base[port_id],
+ set_port_liodn(fman, port_id, fman->liodn_base[port_id],
fman->liodn_offset[port_id]);
if (fman->state->rev_info.major < 6)
- set_port_order_restoration(fman_rg.fpm_rg, port_id);
+ set_port_order_restoration(fman->fpm_regs, port_id);
mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
fman->state->port_mfl[mac_id] = port_params->max_frame_length;
} else {
- pr_warn("Port max_frame_length is smaller than MAC current MTU\n");
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
- return -EINVAL;
+ dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n",
+ __func__, port_id, mac_id);
+ err = -EINVAL;
+ goto return_err;
}
- spin_unlock_irqrestore(&fman->spinlock, int_flags);
+ spin_unlock_irqrestore(&fman->spinlock, flags);
return 0;
+
+return_err:
+ spin_unlock_irqrestore(&fman->spinlock, flags);
+ return err;
}
+EXPORT_SYMBOL(fman_set_port_params);
+/**
+ * fman_reset_mac
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id to be reset
+ *
+ * Reset a specific MAC
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_reset_mac(struct fman *fman, u8 mac_id)
{
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
u32 msk, timeout = 100;
if (fman->state->rev_info.major >= 6) {
- pr_warn("FMan MAC reset no available for FMan V3!\n");
+ dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n",
+ __func__);
return -EINVAL;
}
@@ -2228,7 +2320,8 @@ int fman_reset_mac(struct fman *fman, u8 mac_id)
msk = FPM_RSTC_MAC9_RESET;
break;
default:
- pr_warn("Illegal MAC Id\n");
+ dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n",
+ __func__, mac_id);
return -EINVAL;
}
@@ -2242,125 +2335,73 @@ int fman_reset_mac(struct fman *fman, u8 mac_id)
return 0;
}
+EXPORT_SYMBOL(fman_reset_mac);
+/**
+ * fman_set_mac_max_frame
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id
+ * @mfl: Maximum frame length
+ *
+ * Set maximum frame length of specific MAC in FMan driver
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
{
/* if port is already initialized, check that MaxFrameLength is smaller
* or equal to the port's max
*/
if ((!fman->state->port_mfl[mac_id]) ||
- (fman->state->port_mfl[mac_id] &&
- (mfl <= fman->state->port_mfl[mac_id]))) {
+ (mfl <= fman->state->port_mfl[mac_id])) {
fman->state->mac_mfl[mac_id] = mfl;
} else {
- pr_warn("MAC max_frame_length is larger than Port max_frame_length\n");
+ dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
+ __func__);
return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL(fman_set_mac_max_frame);
+/**
+ * fman_get_clock_freq
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan clock frequency
+ *
+ * Return: FMan clock frequency
+ */
u16 fman_get_clock_freq(struct fman *fman)
{
return fman->state->fm_clk_freq;
}
+/**
+ * fman_get_bmi_max_fifo_size
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan maximum FIFO size
+ *
+ * Return: FMan Maximum FIFO size
+ */
u32 fman_get_bmi_max_fifo_size(struct fman *fman)
{
return fman->state->bmi_max_fifo_size;
}
+EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
-static void fman_event_isr(struct fman *fman)
-{
- u32 pending;
- struct fman_fpm_regs __iomem *fpm_rg;
-
- if (!is_init_done(fman->cfg))
- return;
-
- fpm_rg = fman->fpm_regs;
-
- /* normal interrupts */
- pending = ioread32be(&fpm_rg->fm_npi);
- if (!pending)
- return;
-
- if (pending & INTR_EN_QMI)
- qmi_event(fman);
-
- /* MAC interrupts */
- if (pending & INTR_EN_MAC0)
- call_mac_isr(fman, FMAN_EV_MAC0 + 0);
- if (pending & INTR_EN_MAC1)
- call_mac_isr(fman, FMAN_EV_MAC0 + 1);
- if (pending & INTR_EN_MAC2)
- call_mac_isr(fman, FMAN_EV_MAC0 + 2);
- if (pending & INTR_EN_MAC3)
- call_mac_isr(fman, FMAN_EV_MAC0 + 3);
- if (pending & INTR_EN_MAC4)
- call_mac_isr(fman, FMAN_EV_MAC0 + 4);
- if (pending & INTR_EN_MAC5)
- call_mac_isr(fman, FMAN_EV_MAC0 + 5);
- if (pending & INTR_EN_MAC6)
- call_mac_isr(fman, FMAN_EV_MAC0 + 6);
- if (pending & INTR_EN_MAC7)
- call_mac_isr(fman, FMAN_EV_MAC0 + 7);
- if (pending & INTR_EN_MAC8)
- call_mac_isr(fman, FMAN_EV_MAC0 + 8);
- if (pending & INTR_EN_MAC9)
- call_mac_isr(fman, FMAN_EV_MAC0 + 9);
-}
-
-static int fman_error_isr(struct fman *fman)
-{
- u32 pending;
- struct fman_fpm_regs __iomem *fpm_rg;
-
- if (!is_init_done(fman->cfg))
- return -EINVAL;
-
- fpm_rg = fman->fpm_regs;
-
- /* error interrupts */
- pending = ioread32be(&fpm_rg->fm_epi);
- if (!pending)
- return -EINVAL;
-
- if (pending & ERR_INTR_EN_BMI)
- bmi_err_event(fman);
- if (pending & ERR_INTR_EN_QMI)
- qmi_err_event(fman);
- if (pending & ERR_INTR_EN_FPM)
- fpm_err_event(fman);
- if (pending & ERR_INTR_EN_DMA)
- dma_err_event(fman);
- if (pending & ERR_INTR_EN_MURAM)
- muram_err_intr(fman);
-
- /* MAC error interrupts */
- if (pending & ERR_INTR_EN_MAC0)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
- if (pending & ERR_INTR_EN_MAC1)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
- if (pending & ERR_INTR_EN_MAC2)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
- if (pending & ERR_INTR_EN_MAC3)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
- if (pending & ERR_INTR_EN_MAC4)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
- if (pending & ERR_INTR_EN_MAC5)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
- if (pending & ERR_INTR_EN_MAC6)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
- if (pending & ERR_INTR_EN_MAC7)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
- if (pending & ERR_INTR_EN_MAC8)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
- if (pending & ERR_INTR_EN_MAC9)
- call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
-
- return 0;
-}
-
+/**
+ * fman_get_revision
+ * @fman - Pointer to the FMan module
+ * @rev_info - A structure of revision information parameters.
+ *
+ * Returns the FM revision
+ *
+ * Allowed only following fman_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
{
u32 tmp;
@@ -2370,7 +2411,17 @@ void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
FPM_REV1_MAJOR_SHIFT);
rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
}
+EXPORT_SYMBOL(fman_get_revision);
+/**
+ * fman_get_qman_channel_id
+ * @fman: A Pointer to FMan device
+ * @port_id: Port id
+ *
+ * Get QMan channel ID associated to the Port id
+ *
+ * Return: QMan channel ID
+ */
u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
{
int i;
@@ -2396,11 +2447,21 @@ u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
return fman->state->qman_channel_base + i;
}
+EXPORT_SYMBOL(fman_get_qman_channel_id);
+/**
+ * fman_get_mem_region
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan memory region
+ *
+ * Return: A structure with FMan memory region information
+ */
struct resource *fman_get_mem_region(struct fman *fman)
{
return fman->state->res;
}
+EXPORT_SYMBOL(fman_get_mem_region);
/* Bootargs defines */
/* Extra headroom for RX buffers - Default, min and max */
@@ -2422,7 +2483,7 @@ struct resource *fman_get_mem_region(struct fman *fman)
* particular forwarding scenarios that add extra headers to the
* forwarded frame.
*/
-int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
module_param(fsl_fm_rx_extra_headroom, int, 0);
MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
@@ -2436,13 +2497,18 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
* fm_set_max_frm() callback.
*/
#ifndef __rtems__
-int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
#else /* __rtems__ */
-int fsl_fm_max_frm = FSL_FM_MAX_POSSIBLE_FRAME_SIZE;
+static int fsl_fm_max_frm = FSL_FM_MAX_POSSIBLE_FRAME_SIZE;
#endif /* __rtems__ */
module_param(fsl_fm_max_frm, int, 0);
MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
+/**
+ * fman_get_max_frm
+ *
+ * Return: Max frame length configured in the FM driver
+ */
u16 fman_get_max_frm(void)
{
static bool fm_check_mfl;
@@ -2464,6 +2530,11 @@ u16 fman_get_max_frm(void)
}
EXPORT_SYMBOL(fman_get_max_frm);
+/**
+ * fman_get_rx_extra_headroom
+ *
+ * Return: Extra headroom size configured in the FM driver
+ */
int fman_get_rx_extra_headroom(void)
{
static bool fm_check_rx_extra_headroom;
@@ -2479,7 +2550,7 @@ int fman_get_rx_extra_headroom(void)
fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
}
- fsl_fm_rx_extra_headroom = true;
+ fm_check_rx_extra_headroom = true;
fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
}
@@ -2487,32 +2558,202 @@ int fman_get_rx_extra_headroom(void)
}
EXPORT_SYMBOL(fman_get_rx_extra_headroom);
+/**
+ * fman_bind
+ * @dev: FMan OF device pointer
+ *
+ * Bind to a specific FMan device.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan device
+ */
struct fman *fman_bind(struct device *fm_dev)
{
return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
}
+EXPORT_SYMBOL(fman_bind);
-void fman_unbind(struct fman *fman)
+static irqreturn_t fman_err_irq(int irq, void *handle)
{
- put_device(fman->dev);
-}
+ struct fman *fman = (struct fman *)handle;
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+ irqreturn_t single_ret, ret = IRQ_NONE;
-struct device *fman_get_device(struct fman *fman)
-{
- return fman->dev;
+ if (!is_init_done(fman->cfg))
+ return IRQ_NONE;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* error interrupts */
+ pending = ioread32be(&fpm_rg->fm_epi);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & ERR_INTR_EN_BMI) {
+ single_ret = bmi_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_QMI) {
+ single_ret = qmi_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_FPM) {
+ single_ret = fpm_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_DMA) {
+ single_ret = dma_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MURAM) {
+ single_ret = muram_err_intr(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ /* MAC error interrupts */
+ if (pending & ERR_INTR_EN_MAC0) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC1) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC2) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC3) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC4) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC5) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC6) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC7) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC8) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC9) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
}
-static irqreturn_t fman_irq(int irq, void *fman)
+static irqreturn_t fman_irq(int irq, void *handle)
{
- fman_event_isr(fman);
+ struct fman *fman = (struct fman *)handle;
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+ irqreturn_t single_ret, ret = IRQ_NONE;
- return IRQ_HANDLED;
+ if (!is_init_done(fman->cfg))
+ return IRQ_NONE;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* normal interrupts */
+ pending = ioread32be(&fpm_rg->fm_npi);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & INTR_EN_QMI) {
+ single_ret = qmi_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ /* MAC interrupts */
+ if (pending & INTR_EN_MAC0) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC1) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC2) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC3) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC4) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC5) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC6) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC7) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC8) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC9) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
}
#ifndef __rtems__
static const struct of_device_id fman_muram_match[] = {
{
- .compatible = "fsl,fman-muram"},
+ .compatible = "fsl,fman-muram"},
{}
};
MODULE_DEVICE_TABLE(of, fman_muram_match);
@@ -2528,8 +2769,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
const char *fdt = bsp_fdt_get();
struct device_node *fm_node;
#endif /* __rtems__ */
- const u32 *u32_prop;
- int lenp, err, irq;
+ u32 val, range[2];
+ int err, irq;
#ifndef __rtems__
struct clk *clk;
u32 clk_rate;
@@ -2545,22 +2786,20 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fm_node = of_node_get(of_dev->dev.of_node);
- u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp);
- if (!u32_prop) {
- pr_err("of_get_property(%s, cell-index) failed\n",
- fm_node->full_name);
+ err = of_property_read_u32(fm_node, "cell-index", &val);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
+ __func__, fm_node->full_name);
goto fman_node_put;
}
- if (WARN_ON(lenp != sizeof(u32)))
- goto fman_node_put;
-
- fman->dts_params.id = (u8)*u32_prop;
+ fman->dts_params.id = (u8)val;
#ifndef __rtems__
/* Get the FM interrupt */
res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
if (!res) {
- pr_err("Can't get FMan IRQ resource\n");
+ dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
+ __func__);
goto fman_node_put;
}
irq = res->start;
@@ -2568,7 +2807,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the FM error interrupt */
res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
if (!res) {
- pr_err("Can't get FMan Error IRQ resource\n");
+ dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
+ __func__);
goto fman_node_put;
}
fman->dts_params.err_irq = res->start;
@@ -2576,12 +2816,14 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the FM address */
res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
if (!res) {
- pr_err("Can't get FMan memory resouce\n");
+ dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
+ __func__);
goto fman_node_put;
}
phys_base_addr = res->start;
- mem_size = res->end + 1 - res->start;
+ mem_size = resource_size(res);
+
#else /* __rtems__ */
irq = of_irq_to_resource(fm_node, 0, NULL);
fman->dts_params.err_irq = of_irq_to_resource(fm_node, 1, NULL);
@@ -2590,57 +2832,54 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
#endif /* __rtems__ */
#ifndef __rtems__
- clk = of_clk_get_by_name(fm_node, NULL);
+ clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
- pr_err("Failed to get FM%d clock structure\n",
- fman->dts_params.id);
+ dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
+ __func__, fman->dts_params.id);
goto fman_node_put;
}
clk_rate = clk_get_rate(clk);
if (!clk_rate) {
- pr_err("Failed to determine FM%d clock rate\n",
- fman->dts_params.id);
+ dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
+ __func__, fman->dts_params.id);
goto fman_node_put;
}
/* Rounding to MHz */
- fman->dts_params.clk_freq = (u16)((clk_rate + 500000) / 1000000);
+ fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
#else /* __rtems__ */
/* FIXME */
fman->dts_params.clk_freq = 733;
#endif /* __rtems__ */
- u32_prop = (const u32 *)of_get_property(fm_node,
- "fsl,qman-channel-range",
- &lenp);
- if (!u32_prop) {
- pr_err("of_get_property(%s, fsl,qman-channel-range) failed\n",
- fm_node->full_name);
+ err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
+ &range[0], 2);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
+ __func__, fm_node->full_name);
goto fman_node_put;
}
- if (WARN_ON(lenp != sizeof(u32) * 2))
- goto fman_node_put;
- fman->dts_params.qman_channel_base = u32_prop[0];
- fman->dts_params.num_of_qman_channels = u32_prop[1];
+ fman->dts_params.qman_channel_base = range[0];
+ fman->dts_params.num_of_qman_channels = range[1];
/* Get the MURAM base address and size */
#ifndef __rtems__
/* FIXME */
muram_node = of_find_matching_node(fm_node, fman_muram_match);
if (!muram_node) {
- pr_err("could not find MURAM node\n");
+ dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
+ __func__);
goto fman_node_put;
}
- err = of_address_to_resource(muram_node, 0, res);
+ err = of_address_to_resource(muram_node, 0,
+ &fman->dts_params.muram_res);
if (err) {
of_node_put(muram_node);
- pr_err("of_address_to_resource() = %d\n", err);
+ dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
+ __func__, err);
goto fman_node_put;
}
-
- fman->dts_params.muram_phy_base_addr = res->start;
- fman->dts_params.muram_size = res->end + 1 - res->start;
#else /* __rtems__ */
{
int node = fdt_node_offset_by_compatible(fdt,
@@ -2655,48 +2894,55 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
pr_err("could not find MURAM node\n");
goto fman_node_put;
}
- fman->dts_params.muram_phy_base_addr = phys_base_addr +
- res.start;
- fman->dts_params.muram_size = res.end - res.start;
+ fman->dts_params.muram_res.start = phys_base_addr + res.start;
+ fman->dts_params.muram_res.end = phys_base_addr + res.end - 1;
}
#endif /* __rtems__ */
- {
- /* In B4 rev 2.0 (and above) the MURAM size is 512KB.
- * Check the SVR and update MURAM size if required.
- */
- u32 svr;
-
- svr = mfspr(SPRN_SVR);
-
- if ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_MAJ(svr) >= 2))
- fman->dts_params.muram_size = 0x80000;
- }
-
#ifndef __rtems__
of_node_put(muram_node);
#endif /* __rtems__ */
- of_node_put(fm_node);
- err = devm_request_irq(&of_dev->dev, irq, fman_irq,
- IRQF_NO_SUSPEND, "fman", fman);
+ err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
if (err < 0) {
- pr_err("Error: allocating irq %d (error = %d)\n", irq, err);
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, irq, err);
goto fman_free;
}
+ if (fman->dts_params.err_irq != 0) {
+ err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq,
+ fman_err_irq, IRQF_SHARED,
+ "fman-err", fman);
+ if (err < 0) {
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, fman->dts_params.err_irq, err);
+ goto fman_free;
+ }
+ }
+
#ifndef __rtems__
fman->dts_params.res =
devm_request_mem_region(&of_dev->dev, phys_base_addr,
mem_size, "fman");
if (!fman->dts_params.res) {
- pr_err("request_mem_region() failed\n");
+ dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
+ __func__);
goto fman_free;
}
fman->dts_params.base_addr =
devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (fman->dts_params.base_addr == 0) {
- pr_err("devm_ioremap() failed\n");
+ if (!fman->dts_params.base_addr) {
+ dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
+ goto fman_free;
+ }
+
+ fman->dev = &of_dev->dev;
+
+ err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: of_platform_populate() failed\n",
+ __func__);
goto fman_free;
}
#endif /* __rtems__ */
@@ -2710,14 +2956,6 @@ fman_free:
return NULL;
}
-static irqreturn_t fman_err_irq(int irq, void *fman)
-{
- if (fman_error_isr(fman) == 0)
- return IRQ_HANDLED;
-
- return IRQ_NONE;
-}
-
static int fman_probe(struct platform_device *of_dev)
{
struct fman *fman;
@@ -2730,26 +2968,14 @@ static int fman_probe(struct platform_device *of_dev)
if (!fman)
return -EIO;
- if (fman->dts_params.err_irq != 0) {
- err = devm_request_irq(dev, fman->dts_params.err_irq,
- fman_err_irq,
- IRQF_SHARED | IRQF_NO_SUSPEND,
- "fman-err", fman);
- if (err < 0) {
- pr_err("Error: allocating irq %d (error = %d)\n",
- fman->dts_params.err_irq, err);
- return -EINVAL;
- }
- }
-
err = fman_config(fman);
if (err) {
- pr_err("FMan config failed\n");
+ dev_err(dev, "%s: FMan config failed\n", __func__);
return -EINVAL;
}
if (fman_init(fman) != 0) {
- pr_err("FMan init failed\n");
+ dev_err(dev, "%s: FMan init failed\n", __func__);
return -EINVAL;
}
@@ -2775,9 +3001,7 @@ static int fman_probe(struct platform_device *of_dev)
dev_set_drvdata(dev, fman);
- fman->dev = dev;
-
- pr_debug("FM%d probed\n", fman->dts_params.id);
+ dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
return 0;
}
@@ -2785,21 +3009,42 @@ static int fman_probe(struct platform_device *of_dev)
#ifndef __rtems__
static const struct of_device_id fman_match[] = {
{
- .compatible = "fsl,fman"},
+ .compatible = "fsl,fman"},
{}
};
-MODULE_DEVICE_TABLE(of, fm_match);
+MODULE_DEVICE_TABLE(of, fman_match);
static struct platform_driver fman_driver = {
.driver = {
- .name = "fsl-fman",
- .of_match_table = fman_match,
- },
+ .name = "fsl-fman",
+ .of_match_table = fman_match,
+ },
.probe = fman_probe,
};
-builtin_platform_driver(fman_driver);
+static int __init fman_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA FMan driver\n");
+
+ err = platform_driver_register(&fman_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(fman_load);
+
+static void __exit fman_unload(void)
+{
+ platform_driver_unregister(&fman_driver);
+}
+module_exit(fman_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
#else /* __rtems__ */
#include <sys/cdefs.h>
#include <sys/param.h>
@@ -2807,7 +3052,7 @@ builtin_platform_driver(fman_driver);
#include <sys/bus.h>
#include <sys/kernel.h>
-void
+int
fman_reset(struct fman *fman)
{
@@ -2825,6 +3070,8 @@ fman_reset(struct fman *fman)
QMI_GS_HALT_NOT_BUSY)) {
usleep_range(100, 300);
}
+
+ return (0);
}
struct fman_softc {