summaryrefslogtreecommitdiffstats
path: root/rtemsbsd/sys/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'rtemsbsd/sys/powerpc')
-rw-r--r--rtemsbsd/sys/powerpc/compat.c297
-rw-r--r--rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.c801
-rw-r--r--rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h82
-rw-r--r--rtemsbsd/sys/powerpc/fdt_phy.c360
-rw-r--r--rtemsbsd/sys/powerpc/fman_muram.c116
-rw-r--r--rtemsbsd/sys/powerpc/linux_compat.c965
6 files changed, 2621 insertions, 0 deletions
diff --git a/rtemsbsd/sys/powerpc/compat.c b/rtemsbsd/sys/powerpc/compat.c
new file mode 100644
index 00000000..2ee73a9b
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/compat.c
@@ -0,0 +1,297 @@
+#include <machine/rtems-bsd-kernel-space.h>
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/slab.h>
+
+MALLOC_DEFINE(M_KMALLOC, "kmalloc", "Linux kmalloc compatibility");
+
+#include <bsp/fdt.h>
+
+#include <linux/of.h>
+
+const void *
+of_get_property(const struct device_node *dn, const char *name, int *len)
+{
+ const void *fdt = bsp_fdt_get();
+
+ return (fdt_getprop(fdt, dn->offset, name, len));
+}
+
+bool
+of_device_is_available(const struct device_node *dn)
+{
+ const char *status;
+ int len;
+
+ status = of_get_property(dn, "status", &len);
+ return (status == NULL ||
+ (len > 0 && (strcmp(status, "okay") == 0 ||
+ strcmp(status, "ok") == 0)));
+}
+
+int
+of_device_is_compatible(const struct device_node *dn, const char *name)
+{
+ const void *fdt = bsp_fdt_get();
+
+ return (fdt_node_check_compatible(fdt, dn->offset, name) == 0);
+}
+
+struct device_node *
+of_find_compatible_node(struct device_node *dns, const struct device_node *dn,
+ const char *type, const char *compatible)
+{
+ const void *fdt = bsp_fdt_get();
+ int node;
+
+ (void)type;
+
+ if (dn != NULL) {
+ node = dn->offset;
+ } else {
+ node = 0;
+ }
+
+ memset(dns, 0, sizeof(*dns));
+
+ while (1) {
+ int err;
+
+ node = fdt_next_node(fdt, node, NULL);
+ if (node < 0)
+ return (NULL);
+
+ err = fdt_node_check_compatible(fdt, node, compatible);
+ if (err == 0) {
+ dns->offset = node;
+ return (dns);
+ }
+ }
+}
+
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+static int
+get_cells(const void *fdt, int node, const char *name)
+{
+ const fdt32_t *c;
+ int len;
+ int val;
+
+ do {
+ c = fdt_getprop(fdt, node, name, &len);
+ if (c != NULL) {
+ if (len != sizeof(*c))
+ return (-EINVAL);
+ val = fdt32_to_cpu(*c);
+ if (val <= 0 ||
+ val > sizeof(resource_size_t) / sizeof(*c))
+ return (-EINVAL);
+ return (val);
+ }
+ node = fdt_parent_offset(fdt, node);
+ } while (node >= 0);
+
+ return (-EINVAL);
+}
+
+static int
+get_address_cells(const void *fdt, int node)
+{
+
+ return (get_cells(fdt, node, "#address-cells"));
+}
+
+static int
+get_size_cells(const void *fdt, int node)
+{
+
+ return (get_cells(fdt, node, "#size-cells"));
+}
+
+int
+of_address_to_resource(struct device_node *dn, int index,
+ struct resource *res)
+{
+ const void *fdt = bsp_fdt_get();
+ int ac;
+ int sc;
+ int len;
+ const fdt32_t *p;
+ int i;
+
+ memset(res, 0, sizeof(*res));
+
+ ac = get_address_cells(fdt, dn->offset);
+ if (ac < 0)
+ return (-EINVAL);
+
+ sc = get_size_cells(fdt, dn->offset);
+ if (sc < 0)
+ return (-EINVAL);
+
+ p = fdt_getprop(fdt, dn->offset, "reg", &len);
+ if (p == NULL)
+ return (-EINVAL);
+
+ len /= sizeof(*p);
+ i = index * (ac + sc);
+ if (i + ac + sc > len)
+ return (-EINVAL);
+
+ while (ac > 0) {
+ res->start = (res->start << 32) | fdt32_to_cpu(p[i]);
+ ++i;
+ --ac;
+ }
+
+ while (sc > 0) {
+ res->end = (res->end << 32) | fdt32_to_cpu(p[i]);
+ ++i;
+ --sc;
+ }
+ res->end += res->start;
+
+ return (0);
+}
+
+int
+of_irq_to_resource(struct device_node *dn, int index,
+ struct resource *res)
+{
+ const void *fdt = bsp_fdt_get();
+ int len;
+ const fdt32_t *p;
+ int i;
+ int irq;
+
+ if (res != NULL)
+ memset(res, 0, sizeof(*res));
+
+ p = fdt_getprop(fdt, dn->offset, "interrupts", &len);
+ if (p == NULL)
+ return (-EINVAL);
+
+ i = index * 16;
+ if (i + 16 > len)
+ return (-EINVAL);
+
+ irq = (int)fdt32_to_cpu(p[i / sizeof(*p)]);
+#ifdef __PPC__
+ /* FIXME */
+ irq -= 16;
+#endif
+ return (irq);
+}
+
+#include <linux/of_net.h>
+#include <linux/if_ether.h>
+
+static const void *
+get_mac_address(struct device_node *dn, const char *name)
+{
+ const void *fdt = bsp_fdt_get();
+ int len;
+ const fdt32_t *p;
+
+ p = fdt_getprop(fdt, dn->offset, name, &len);
+ if (p == NULL || len != ETH_ALEN) {
+ return (NULL);
+ }
+
+ return (p);
+}
+
+const void *
+of_get_mac_address(struct device_node *dn)
+{
+ const void *addr;
+
+ addr = get_mac_address(dn, "mac-address");
+ if (addr != NULL) {
+ return addr;
+ }
+
+ return get_mac_address(dn, "local-mac-address");
+}
+
+#include <linux/interrupt.h>
+
+struct arg_wrapper {
+ irq_handler_t handler;
+ unsigned int irq;
+ void *arg;
+};
+
+static void
+handler_wrapper(void *arg)
+{
+ struct arg_wrapper *aw = arg;
+
+ (*aw->handler)(aw->irq, aw->arg);
+}
+
+int __must_check
+request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *arg)
+{
+ struct arg_wrapper *aw;
+ rtems_status_code sc;
+
+ aw = kmalloc(sizeof(*aw), GFP_KERNEL);
+ if (aw == NULL)
+ return (-ENOMEM);
+
+ aw->handler = handler;
+ aw->irq = irq;
+ aw->arg = arg;
+ sc = rtems_interrupt_server_handler_install(RTEMS_ID_NONE, irq, name,
+ RTEMS_INTERRUPT_SHARED, handler_wrapper, aw);
+ if (sc != RTEMS_SUCCESSFUL)
+ return (-EINVAL);
+
+ return (0);
+}
+
+#include <linux/bitrev.h>
+
+const uint8_t bitrev_nibbles[16] = {
+ 0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15
+};
+
+#include <linux/platform_device.h>
+
+struct resource *
+platform_get_resource_impl(struct platform_device *dev,
+ unsigned int type, unsigned int num, struct resource *res)
+{
+
+ return (res);
+}
diff --git a/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.c b/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.c
new file mode 100644
index 00000000..35e83d18
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.c
@@ -0,0 +1,801 @@
+#include <machine/rtems-bsd-kernel-space.h>
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include "if_fmanmac.h"
+
+#include <sys/sockio.h>
+
+#include <net/if_vlan_var.h>
+#include <netinet/ip.h>
+
+#include <linux/phy.h>
+
+#include "../../../../../../../../linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h"
+#include "../../../../../../../../linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h"
+
+#define FMAN_MAC_LOCK(sc) mtx_lock(&(sc)->mtx)
+#define FMAN_MAC_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
+#define FMAN_MAC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
+
+#define FMAN_MAC_CSUM (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TCP_IPV6 | \
+ CSUM_UDP_IPV6)
+
+struct fman_mac_sgt {
+ char priv[DPA_TX_PRIV_DATA_SIZE];
+ struct fman_prs_result prs;
+ struct qm_sg_entry sg[DPA_SGT_MAX_ENTRIES];
+ struct mbuf *m;
+};
+
+static void
+fman_mac_enable_tx_csum(struct mbuf *m, struct qm_fd *fd,
+ struct fman_prs_result *prs)
+{
+ int csum_flags = m->m_pkthdr.csum_flags;
+
+ if ((csum_flags & FMAN_MAC_CSUM) == 0) {
+ return;
+ }
+
+ memset(prs, 0, sizeof(*prs));
+
+ if ((csum_flags & FMAN_MAC_CSUM) == CSUM_IP) {
+ prs->l3r = FM_L3_PARSE_RESULT_IPV4;
+ } else if ((csum_flags & CSUM_TCP) != 0) {
+ prs->l3r = FM_L3_PARSE_RESULT_IPV4;
+ prs->l4r = FM_L4_PARSE_RESULT_TCP;
+ } else if ((csum_flags & CSUM_UDP) != 0) {
+ prs->l3r = FM_L3_PARSE_RESULT_IPV4;
+ prs->l4r = FM_L4_PARSE_RESULT_UDP;
+ } else if ((csum_flags & CSUM_TCP_IPV6) != 0) {
+ prs->l3r = FM_L3_PARSE_RESULT_IPV6;
+ prs->l4r = FM_L4_PARSE_RESULT_TCP;
+ } else if ((csum_flags & CSUM_UDP_IPV6) != 0) {
+ prs->l3r = FM_L3_PARSE_RESULT_IPV6;
+ prs->l4r = FM_L4_PARSE_RESULT_UDP;
+ } else {
+ BSD_ASSERT(0);
+ }
+
+ /* FIXME: VLAN */
+ prs->ip_off[0] = (u8)sizeof(struct ether_header);
+ prs->l4_off = (u8)(sizeof(struct ether_header) + sizeof(struct ip));
+
+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+}
+
+static void
+fman_mac_txstart_locked(struct ifnet *ifp, struct fman_mac_softc *sc)
+{
+
+ FMAN_MAC_ASSERT_LOCKED(sc);
+
+ for (;;) {
+ struct fman_mac_sgt *sgt;
+ struct mbuf *m;
+ struct mbuf *n;
+ struct qm_fd fd;
+ struct dpa_priv_s *priv;
+ struct qman_fq *egress_fq;
+ int queue = 0;
+ size_t i;
+ uintptr_t addr;
+
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL) {
+ break;
+ }
+
+ sgt = uma_zalloc(sc->sgt_zone, M_NOWAIT);
+ if (sgt == NULL) {
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ m_freem(m);
+ continue;
+ }
+
+ clear_fd(&fd);
+ fd.bpid = 0xff;
+ fd.offset = offsetof(struct fman_mac_sgt, sg);
+ fd.format = qm_fd_sg;
+ fd.length20 = m->m_pkthdr.len;
+ fd.cmd |= FM_FD_CMD_FCO;
+ addr = (uintptr_t)sgt;
+ fd.addr_hi = (u8)upper_32_bits(addr);
+ fd.addr_lo = lower_32_bits(addr);
+ fman_mac_enable_tx_csum(m, &fd, &sgt->prs);
+
+repeat_with_collapsed_mbuf_chain:
+
+ i = 0;
+ n = m;
+
+ while (n != NULL && i < DPA_SGT_MAX_ENTRIES) {
+ int len = n->m_len;
+
+ if (len > 0) {
+ sgt->sg[i].bpid = 0xff;
+ sgt->sg[i].offset = 0;
+ sgt->sg[i].length = len;
+ sgt->sg[i].extension = 0;
+ sgt->sg[i].final = 0;
+ addr = mtod(n, uintptr_t);
+ sgt->sg[i].addr_hi = (u8)upper_32_bits(addr);
+ sgt->sg[i].addr_lo =
+ cpu_to_be32(lower_32_bits(addr));
+ ++i;
+ }
+
+ n = n->m_next;
+ }
+
+ if (n != NULL && i == DPA_SGT_MAX_ENTRIES) {
+ struct mbuf *c;
+
+ c = m_collapse(m, M_NOWAIT, DPA_SGT_MAX_ENTRIES);
+ if (c == NULL) {
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ m_freem(m);
+ uma_zfree(sc->sgt_zone, sgt);
+ continue;
+ }
+
+ m = c;
+ goto repeat_with_collapsed_mbuf_chain;
+ }
+
+ sgt->sg[i - 1].final = 1;
+ sgt->m = m;
+ priv = netdev_priv(&sc->mac_dev.net_dev);
+ egress_fq = priv->egress_fqs[queue];
+ fd.cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
+ qman_enqueue(egress_fq, &fd, QMAN_ENQUEUE_FLAG_WAIT);
+ }
+}
+
+static void
+fman_mac_txstart(struct ifnet *ifp)
+{
+ struct fman_mac_softc *sc;
+
+ sc = ifp->if_softc;
+
+ FMAN_MAC_LOCK(sc);
+ fman_mac_txstart_locked(ifp, sc);
+ FMAN_MAC_UNLOCK(sc);
+}
+
+static void
+fman_mac_tick(void *arg)
+{
+ struct fman_mac_softc *sc;
+ struct ifnet *ifp;
+
+ sc = arg;
+ ifp = sc->ifp;
+
+ FMAN_MAC_ASSERT_LOCKED(sc);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ return;
+ }
+
+ mii_tick(sc->mii_softc);
+ callout_reset(&sc->fman_mac_callout, hz, fman_mac_tick, sc);
+}
+
+static void
+fman_mac_set_multi(struct fman_mac_softc *sc)
+{
+ struct mac_device *mac_dev;
+
+ FMAN_MAC_ASSERT_LOCKED(sc);
+ mac_dev = &sc->mac_dev;
+ (*mac_dev->set_multi)(&mac_dev->net_dev, mac_dev);
+}
+
+static void
+fman_mac_set_promisc(struct fman_mac_softc *sc, int if_flags)
+{
+ struct mac_device *mac_dev;
+
+ FMAN_MAC_ASSERT_LOCKED(sc);
+ mac_dev = &sc->mac_dev;
+ (*mac_dev->set_promisc)(mac_dev->fman_mac,
+ (if_flags & IFF_PROMISC) != 0);
+}
+
+static int
+fman_mac_set_mtu(struct fman_mac_softc *sc, int mtu)
+{
+ struct ifnet *ifp;
+ int real_mtu;
+
+ ifp = sc->ifp;
+ real_mtu = mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ if (real_mtu > fman_get_max_frm() ||
+ real_mtu < ETHER_MIN_LEN) {
+ return (EINVAL);
+ }
+
+ ifp->if_mtu = mtu;
+ return (0);
+}
+
+static void
+fman_mac_init_locked(struct fman_mac_softc *sc)
+{
+ struct ifnet *ifp;
+ int error;
+
+ FMAN_MAC_ASSERT_LOCKED(sc);
+
+ ifp = sc->ifp;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ return;
+ }
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+
+ error = dpa_eth_priv_start(&sc->mac_dev.net_dev);
+ BSD_ASSERT(error == 0);
+
+ mii_mediachg(sc->mii_softc);
+ callout_reset(&sc->fman_mac_callout, hz, fman_mac_tick, sc);
+
+ fman_mac_set_multi(sc);
+}
+
+static void
+fman_mac_stop_locked(struct fman_mac_softc *sc)
+{
+ struct ifnet *ifp;
+ int error;
+
+ FMAN_MAC_ASSERT_LOCKED(sc);
+
+ ifp = sc->ifp;
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ error = dpa_eth_priv_stop(&sc->mac_dev.net_dev);
+ BSD_ASSERT(error == 0);
+}
+
+static void
+fman_mac_init(void *if_softc)
+{
+ struct fman_mac_softc *sc;
+
+ sc = if_softc;
+ FMAN_MAC_LOCK(sc);
+ fman_mac_init_locked(sc);
+ FMAN_MAC_UNLOCK(sc);
+}
+
+static int
+fman_mac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct fman_mac_softc *sc;
+ struct mii_data *mii;
+ struct ifreq *ifr;
+ int error;
+
+ sc = ifp->if_softc;
+ ifr = (struct ifreq *)data;
+
+ error = 0;
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ FMAN_MAC_LOCK(sc);
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if ((ifp->if_flags ^ sc->if_flags) &
+ IFF_PROMISC)
+ fman_mac_set_promisc(sc,
+ ifp->if_flags);
+ } else {
+ fman_mac_init_locked(sc);
+ }
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ fman_mac_stop_locked(sc);
+ }
+ }
+ sc->if_flags = ifp->if_flags;
+ FMAN_MAC_UNLOCK(sc);
+ break;
+ case SIOCSIFMTU:
+ error = fman_mac_set_mtu(sc, ifr->ifr_mtu);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ FMAN_MAC_LOCK(sc);
+ fman_mac_set_multi(sc);
+ FMAN_MAC_UNLOCK(sc);
+ }
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ mii = sc->mii_softc;
+ error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
+ break;
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (error);
+}
+
+static int
+fman_mac_media_change(struct ifnet *ifp)
+{
+ struct fman_mac_softc *sc;
+ int error;
+
+ sc = ifp->if_softc;
+ FMAN_MAC_LOCK(sc);
+ error = mii_mediachg(sc->mii_softc);
+ FMAN_MAC_UNLOCK(sc);
+ return (error);
+}
+
+static void
+fman_mac_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct fman_mac_softc *sc;
+ struct mii_data *mii;
+
+ sc = ifp->if_softc;
+ mii = sc->mii_softc;
+ FMAN_MAC_LOCK(sc);
+ mii_pollstat(mii);
+ ifmr->ifm_active = mii->mii_media_active;
+ ifmr->ifm_status = mii->mii_media_status;
+ FMAN_MAC_UNLOCK(sc);
+}
+
+int
+fman_mac_dev_attach(device_t dev)
+{
+ struct fman_mac_softc *sc;
+ struct fman_ivars *ivars;
+ struct ifnet *ifp;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
+ MTX_DEF);
+
+ callout_init_mtx(&sc->fman_mac_callout, &sc->mtx, 0);
+
+ sc->sgt_zone = uma_zcreate("FMan MAC SGT", sizeof(struct fman_mac_sgt),
+ NULL, NULL, NULL, NULL, 16, 0);
+ if (sc->sgt_zone == NULL) {
+ goto error_0;
+ }
+
+ /* Set up the Ethernet interface */
+ sc->ifp = ifp = if_alloc(IFT_ETHER);
+ if (sc->ifp == NULL) {
+ goto error_1;
+ }
+
+ snprintf(&sc->name[0], sizeof(sc->name), "fm%im",
+ device_get_unit(device_get_parent(dev)));
+
+ ifp->if_softc = sc;
+ if_initname(ifp, &sc->name[0], sc->mac_dev.data.mac_hw_id);
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
+ IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
+ ifp->if_capenable = ifp->if_capabilities;
+ ifp->if_hwassist = FMAN_MAC_CSUM;
+ ifp->if_start = fman_mac_txstart;
+ ifp->if_ioctl = fman_mac_ioctl;
+ ifp->if_init = fman_mac_init;
+ IFQ_SET_MAXLEN(&ifp->if_snd, 128);
+ ifp->if_snd.ifq_drv_maxlen = 128;
+ IFQ_SET_READY(&ifp->if_snd);
+ ifp->if_hdrlen = sizeof(struct ether_vlan_header);
+
+ /* Attach the MII driver if necessary */
+ ivars = device_get_ivars(dev);
+ sc->phy_dev = fdt_phy_obtain(ivars->of_dev.dev.of_node->offset);
+ if (sc->phy_dev != NULL) {
+ error = mii_attach(dev, &sc->miibus, ifp,
+ fman_mac_media_change, fman_mac_media_status,
+ BMSR_DEFCAPMASK, sc->phy_dev->phy, MII_OFFSET_ANY, 0);
+ if (error != 0) {
+ goto error_2;
+ }
+ sc->mii_softc = device_get_softc(sc->miibus);
+ }
+
+ sc->mac_dev.net_dev.ifp = ifp;
+
+ ether_ifattach(ifp, &sc->mac_dev.addr[0]);
+#if 0
+ fman_mac_set_mtu(sc, ETHERMTU_JUMBO);
+#endif
+
+ return (0);
+
+error_2:
+ if_free(ifp);
+error_1:
+ uma_zdestroy(sc->sgt_zone);
+error_0:
+ mtx_destroy(&sc->mtx);
+ return (ENXIO);
+}
+
+int
+fman_mac_dev_detach(device_t _dev)
+{
+ struct fman_mac_softc *sc = device_get_softc(_dev);
+
+ ether_ifdetach(sc->ifp);
+
+ FMAN_MAC_LOCK(sc);
+ fman_mac_stop_locked(sc);
+ FMAN_MAC_UNLOCK(sc);
+
+ if_free(sc->ifp);
+ uma_zdestroy(sc->sgt_zone);
+ mtx_destroy(&sc->mtx);
+
+ return (bus_generic_detach(_dev));
+}
+
+int
+fman_mac_miibus_read_reg(device_t dev, int phy, int reg)
+{
+ struct fman_mac_softc *sc;
+ struct fdt_phy_device *phy_dev;
+ struct fdt_mdio_device *mdio_dev;
+
+ sc = device_get_softc(dev);
+ phy_dev = sc->phy_dev;
+ BSD_ASSERT(phy == phy_dev->phy);
+ mdio_dev = phy_dev->mdio_dev;
+ return ((*mdio_dev->read)(mdio_dev, phy, reg));
+}
+
+int
+fman_mac_miibus_write_reg(device_t dev, int phy, int reg, int val)
+{
+ struct fman_mac_softc *sc;
+ struct fdt_phy_device *phy_dev;
+ struct fdt_mdio_device *mdio_dev;
+
+ sc = device_get_softc(dev);
+ phy_dev = sc->phy_dev;
+ BSD_ASSERT(phy == phy_dev->phy);
+ mdio_dev = phy_dev->mdio_dev;
+ return ((*mdio_dev->write)(mdio_dev, phy, reg, val));
+}
+
+void
+fman_mac_miibus_statchg(device_t dev)
+{
+ struct fman_mac_softc *sc;
+ struct mac_device *mac_dev;
+ struct mii_data *mii;
+ u16 speed;
+
+ sc = device_get_softc(dev);
+ mac_dev = &sc->mac_dev;
+ mii = sc->mii_softc;
+
+ FMAN_MAC_ASSERT_LOCKED(sc);
+
+ switch (IFM_SUBTYPE(mii->mii_media_active)) {
+ case IFM_10_T:
+ case IFM_10_2:
+ case IFM_10_5:
+ case IFM_10_STP:
+ case IFM_10_FL:
+ speed = SPEED_10;
+ break;
+ case IFM_100_TX:
+ case IFM_100_FX:
+ case IFM_100_T4:
+ case IFM_100_VG:
+ case IFM_100_T2:
+ speed = SPEED_100;
+ break;
+ case IFM_1000_SX:
+ case IFM_1000_LX:
+ case IFM_1000_CX:
+ case IFM_1000_T:
+ speed = SPEED_1000;
+ break;
+ case IFM_10G_LR:
+ case IFM_10G_SR:
+ case IFM_10G_CX4:
+ case IFM_10G_TWINAX:
+ case IFM_10G_TWINAX_LONG:
+ case IFM_10G_LRM:
+ speed = SPEED_10000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ (*mac_dev->adjust_link)(mac_dev, speed);
+}
+
+static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
+{
+ struct bm_buffer bmb[8];
+ u8 i;
+
+ memset(bmb, 0, sizeof(bmb));
+
+ for (i = 0; i < 8; ++i) {
+ struct mbuf *m;
+
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (unlikely(m == NULL)) {
+ goto cl_alloc_failed;
+ }
+
+ RTEMS_STATIC_ASSERT(DPA_BP_RAW_SIZE == MCLBYTES, DPA_BP_RAW_SIZE);
+ *(struct mbuf **)(mtod(m, char *) + DPA_MBUF_POINTER_OFFSET) = m;
+
+ bm_buffer_set64(&bmb[i], mtod(m, uintptr_t));
+ }
+
+release_bufs:
+ /* Release the buffers. In case bman is busy, keep trying
+ * until successful. bman_release() is guaranteed to succeed
+ * in a reasonable amount of time
+ */
+ while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
+ cpu_relax();
+ return i;
+
+cl_alloc_failed:
+ bm_buffer_set64(&bmb[i], 0);
+ /* Avoid releasing a completely null buffer; bman_release() requires
+ * at least one buffer.
+ */
+ if (likely(i))
+ goto release_bufs;
+
+ return 0;
+}
+
+/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
+static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
+{
+ int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
+ *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
+}
+
+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
+{
+ int i;
+
+ /* Give each CPU an allotment of "config_count" buffers */
+#ifndef __rtems__
+ for_each_possible_cpu(i) {
+#else /* __rtems__ */
+ for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
+#endif /* __rtems__ */
+ int j;
+
+ /* Although we access another CPU's counters here
+ * we do it at boot time so it is safe
+ */
+ for (j = 0; j < dpa_bp->config_count; j += 8)
+ dpa_bp_add_8_bufs(dpa_bp, i);
+ }
+ return 0;
+}
+
+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
+{
+ int count = *countptr;
+ int new_bufs;
+
+ if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
+ do {
+ new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
+ if (unlikely(!new_bufs)) {
+ /* Avoid looping forever if we've temporarily
+ * run out of memory. We'll try again at the
+ * next NAPI cycle.
+ */
+ break;
+ }
+ count += new_bufs;
+ } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
+
+ *countptr = count;
+ if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct mbuf *
+addr_to_mbuf(dma_addr_t addr)
+{
+ void *vaddr = phys_to_virt(addr);
+
+ return (*(struct mbuf **)(vaddr + DPA_MBUF_POINTER_OFFSET));
+}
+
+static struct mbuf *
+contig_fd_to_mbuf(const struct qm_fd *fd, struct ifnet *ifp)
+{
+ struct mbuf *m;
+ ssize_t fd_off = dpa_fd_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+
+ m = addr_to_mbuf(addr);
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = m->m_len = dpa_fd_length(fd);
+ m->m_data = mtod(m, char *) + fd_off;
+
+ return (m);
+}
+
+static void
+dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, dma_addr_t addr, int *count_ptr)
+{
+ struct bm_buffer bmb;
+
+ bm_buffer_set64(&bmb, addr);
+
+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
+ cpu_relax();
+
+ ++(*count_ptr);
+}
+
+static struct mbuf *
+sg_fd_to_mbuf(struct dpa_bp *dpa_bp, const struct qm_fd *fd,
+ struct ifnet *ifp, int *count_ptr)
+{
+ ssize_t fd_off = dpa_fd_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ int i;
+ int len;
+ struct mbuf *m;
+ struct mbuf *last;
+
+ sgt = (const struct qm_sg_entry *)((char *)phys_to_virt(addr) + fd_off);
+ len = 0;
+
+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; ++i) {
+ dma_addr_t sg_addr;
+ int sg_len;
+ struct mbuf *n;
+
+ BSD_ASSERT(sgt[i].extension == 0);
+ BSD_ASSERT(dpa_bp == dpa_bpid2pool(sgt[i].bpid));
+
+ sg_addr = qm_sg_addr(&sgt[i]);
+ n = addr_to_mbuf(sg_addr);
+
+ sg_len = sgt[i].length;
+ len += sg_len;
+
+ if (i == 0) {
+ m = n;
+ } else {
+ last->m_next = n;
+ }
+
+ n->m_len = sg_len;
+ m->m_data = mtod(m, char *) + sgt[i].offset;
+ last = n;
+
+ --(*count_ptr);
+
+ if (sgt[i].final) {
+ break;
+ }
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = len;
+
+ dpa_bp_recycle_frag(dpa_bp, addr, count_ptr);
+
+ return (m);
+}
+
+void
+_dpa_rx(struct net_device *net_dev, struct qman_portal *portal,
+ const struct dpa_priv_s *priv, struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd, u32 fqid, int *count_ptr)
+{
+ struct dpa_bp *dpa_bp;
+ struct mbuf *m;
+ struct ifnet *ifp;
+
+ ifp = net_dev->ifp;
+
+ if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ dpa_fd_release(net_dev, fd);
+ return;
+ }
+
+ dpa_bp = priv->dpa_bp;
+ BSD_ASSERT(dpa_bp == dpa_bpid2pool(fd->bpid));
+
+ if (likely(fd->format == qm_fd_contig)) {
+ m = contig_fd_to_mbuf(fd, ifp);
+ } else {
+ BSD_ASSERT(fd->format == qm_fd_sg);
+ m = sg_fd_to_mbuf(dpa_bp, fd, ifp, count_ptr);
+ }
+
+ /* Account for either the contig buffer or the SGT buffer (depending on
+ * which case we were in) having been removed from the pool.
+ */
+ (*count_ptr)--;
+
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+ (*ifp->if_input)(ifp, m);
+}
+
+void _dpa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd)
+{
+ struct fman_mac_softc *sc;
+ struct fman_mac_sgt *sgt;
+
+ BSD_ASSERT(fd->format == qm_fd_sg);
+
+ sc = ifp->if_softc;
+ sgt = (struct fman_mac_sgt *)qm_fd_addr(fd);
+
+ m_freem(sgt->m);
+ uma_zfree(sc->sgt_zone, sgt);
+}
diff --git a/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h b/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h
new file mode 100644
index 00000000..ba07e362
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IF_FMANMAC_H
+#define _IF_FMANMAC_H
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/bus.h>
+#include <sys/callout.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+#include <net/if_var.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <linux/netdevice.h>
+
+#include <fdt_phy.h>
+
+#include "mac.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+struct fman_mac_softc {
+ struct mac_device mac_dev;
+ struct fdt_phy_device *phy_dev;
+ device_t miibus;
+ struct mii_data *mii_softc;
+ struct ifnet *ifp;
+ int if_flags;
+ struct mtx mtx;
+ uma_zone_t sgt_zone;
+ struct callout fman_mac_callout;
+ char name[8];
+};
+
+int fman_mac_dev_attach(device_t dev);
+
+int fman_mac_dev_detach(device_t dev);
+
+int fman_mac_miibus_read_reg(device_t dev, int phy, int reg);
+
+int fman_mac_miibus_write_reg(device_t dev, int phy, int reg, int val);
+
+void fman_mac_miibus_statchg(device_t dev);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _IF_FMANMAC_H */
diff --git a/rtemsbsd/sys/powerpc/fdt_phy.c b/rtemsbsd/sys/powerpc/fdt_phy.c
new file mode 100644
index 00000000..b6f87f90
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/fdt_phy.c
@@ -0,0 +1,360 @@
+#include <machine/rtems-bsd-kernel-space.h>
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright (c) 2016 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <fdt_phy.h>
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/time.h>
+#include <sys/queue.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <libfdt.h>
+
+#include <rtems/bsd.h>
+
+#include <bsp/fdt.h>
+
+#define MDIO_LOCK() mtx_lock(&mdio.mutex)
+#define MDIO_UNLOCK() mtx_unlock(&mdio.mutex)
+
+struct mdio_device {
+ struct fdt_mdio_device base;
+ SLIST_ENTRY(mdio_device) next;
+ int node;
+};
+
+static struct {
+ SLIST_HEAD(, mdio_device) instances;
+ struct mtx mutex;
+} mdio = {
+ .instances = SLIST_HEAD_INITIALIZER(mdio.instances)
+};
+
+MTX_SYSINIT(mdio_mutex, &mdio.mutex, "FDT MDIO", MTX_DEF);
+
+static uint64_t
+fdt_get_address(const void *fdt, int node)
+{
+ uint64_t addr;
+ int nodes[16];
+ size_t i;
+ int ac;
+
+ i = 0;
+ do {
+ nodes[i] = node;
+ ++i;
+ node = fdt_parent_offset(fdt, node);
+ } while (node >= 0 && i < nitems(nodes));
+
+ if (node >= 0) {
+ return (0);
+ }
+
+ ac = 1;
+ addr = 0;
+ while (i > 0) {
+ const fdt32_t *p;
+ int len;
+
+ p = fdt_getprop(fdt, nodes[i - 1], "reg", &len);
+ if (p != NULL) {
+ if (ac == 1 && len >= 4) {
+ addr += fdt32_to_cpu(p[0]);
+ } else if (ac == 2 && len >= 8) {
+ addr += fdt32_to_cpu(p[1]);
+ addr += (uint64_t)fdt32_to_cpu(p[0]) << 32;
+ } else {
+ return (0);
+ }
+ }
+
+ p = fdt_getprop(fdt, nodes[i - 1], "#address-cells", &len);
+ if (p != NULL) {
+ if (len != 4) {
+ return (0);
+ }
+ ac = (int)fdt32_to_cpu(p[0]);
+ if (ac != 1 && ac != 2) {
+ return (0);
+ }
+ }
+
+ --i;
+ }
+
+ return (addr);
+}
+
+struct fman_mdio_regs {
+ uint32_t reserved[12];
+ uint32_t mdio_cfg;
+ uint32_t mdio_ctrl;
+ uint32_t mdio_data;
+ uint32_t mdio_addr;
+};
+
+#define MDIO_CFG_BSY (1U << 31)
+#define MDIO_CFG_ENC45 (1U << 6)
+#define MDIO_CFG_RD_ERR (1U << 1)
+
+#define MDIO_CTRL_READ (1U << 15)
+#define MDIO_CTRL_REG_ADDR(x) ((x) & 0x1fU)
+#define MDIO_CTRL_PHY_ADDR(x) (((x) & 0x1fU) << 5)
+
+struct fman_mdio_device {
+ struct mdio_device base;
+ volatile struct fman_mdio_regs *regs;
+};
+
+static int
+fman_mdio_wait(volatile struct fman_mdio_regs *regs)
+{
+ struct bintime start;
+
+ rtems_bsd_binuptime(&start);
+
+ while ((regs->mdio_cfg & MDIO_CFG_BSY) != 0) {
+ struct bintime now;
+
+ rtems_bsd_binuptime(&now);
+ if (bttosbt(now) - bttosbt(start) > 100 * SBT_1US) {
+ break;
+ }
+ }
+
+ /* Check again, to take thread pre-emption into account */
+ if ((regs->mdio_cfg & MDIO_CFG_BSY) != 0) {
+ return (EIO);
+ }
+
+ return (0);
+}
+
+static int
+fman_mdio_read(struct fdt_mdio_device *base, int phy, int reg)
+{
+ struct fman_mdio_device *fm;
+ volatile struct fman_mdio_regs *regs;
+ int val;
+ int err;
+
+ fm = (struct fman_mdio_device *)base;
+ regs = fm->regs;
+
+ MDIO_LOCK();
+
+ err = fman_mdio_wait(regs);
+ if (err == 0) {
+ uint32_t mdio_cfg;
+ uint32_t mdio_ctrl;
+
+ mdio_cfg = regs->mdio_cfg;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+ regs->mdio_cfg = mdio_cfg;
+
+ mdio_ctrl = MDIO_CTRL_PHY_ADDR(phy) | MDIO_CTRL_REG_ADDR(reg);
+ regs->mdio_ctrl = mdio_ctrl;
+ mdio_ctrl |= MDIO_CTRL_READ;
+ regs->mdio_ctrl = mdio_ctrl;
+
+ err = fman_mdio_wait(regs);
+ if (err == 0 && (regs->mdio_cfg & MDIO_CFG_RD_ERR) == 0) {
+ val = (int)(regs->mdio_data & 0xffff);
+ } else {
+ val = 0xffff;
+ }
+ } else {
+ val = 0xffff;
+ }
+
+ MDIO_UNLOCK();
+
+ return (val);
+}
+
+static int
+fman_mdio_write(struct fdt_mdio_device *base, int phy, int reg, int val)
+{
+ struct fman_mdio_device *fm;
+ volatile struct fman_mdio_regs *regs;
+ int err;
+
+ fm = (struct fman_mdio_device *)base;
+ regs = fm->regs;
+
+ MDIO_LOCK();
+
+ err = fman_mdio_wait(regs);
+ if (err == 0) {
+ uint32_t mdio_cfg;
+ uint32_t mdio_ctrl;
+
+ mdio_cfg = regs->mdio_cfg;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+ regs->mdio_cfg = mdio_cfg;
+
+ mdio_ctrl = MDIO_CTRL_PHY_ADDR(phy) | MDIO_CTRL_REG_ADDR(reg);
+ regs->mdio_ctrl = mdio_ctrl;
+
+ regs->mdio_data = (uint32_t)(val & 0xffff);
+
+ fman_mdio_wait(regs);
+ }
+
+ MDIO_UNLOCK();
+
+ return (0);
+}
+
+static struct mdio_device *
+create_fman_mdio(const void *fdt, int mdio_node)
+{
+ struct fman_mdio_device *fm = NULL;
+
+ fm = malloc(sizeof(*fm), M_TEMP, M_WAITOK | M_ZERO);
+ if (fm == NULL) {
+ return (NULL);
+ }
+
+ fm->regs = (volatile struct fman_mdio_regs *)(uintptr_t)
+ fdt_get_address(fdt, mdio_node);
+ fm->base.base.read = fman_mdio_read;
+ fm->base.base.write = fman_mdio_write;
+
+ return (&fm->base);
+}
+
+static struct mdio_device *
+create_mdio_device(const void *fdt, int mdio_node)
+{
+
+ if (fdt_node_check_compatible(fdt, mdio_node,
+ "fsl,fman-memac-mdio") == 0 ||
+ fdt_node_check_compatible(fdt, mdio_node,
+ "fsl,fman-xmdio") == 0) {
+ return (create_fman_mdio(fdt, mdio_node));
+ } else {
+ return (NULL);
+ }
+}
+
+static int
+find_mdio_device(const void *fdt, int mdio_node,
+ struct fdt_phy_device *phy_dev)
+{
+ struct mdio_device *mdio_dev = NULL;
+
+ SLIST_FOREACH(mdio_dev, &mdio.instances, next) {
+ if (mdio_dev->node == mdio_node) {
+ break;
+ }
+ }
+
+ if (mdio_dev == NULL) {
+ mdio_dev = create_mdio_device(fdt, mdio_node);
+ }
+
+ if (mdio_dev == NULL) {
+ return (ENXIO);
+ }
+
+ phy_dev->mdio_dev = &mdio_dev->base;
+ return (0);
+}
+
+static struct fdt_phy_device *
+phy_obtain(const void *fdt, int mdio_node, int phy)
+{
+ struct fdt_phy_device *phy_dev;
+ int err;
+
+ phy_dev = malloc(sizeof(*phy_dev), M_TEMP, M_WAITOK | M_ZERO);
+ if (phy_dev == NULL) {
+ return (NULL);
+ }
+
+ phy_dev->phy = phy;
+ MDIO_LOCK();
+ err = find_mdio_device(fdt, mdio_node, phy_dev);
+ MDIO_UNLOCK();
+
+ if (err != 0) {
+ free(phy_dev, M_TEMP);
+ return (NULL);
+ }
+
+ return (phy_dev);
+}
+
+struct fdt_phy_device *
+fdt_phy_obtain(int device_node)
+{
+ const void *fdt;
+ const fdt32_t *phandle;
+ const fdt32_t *phy;
+ int len;
+ int node;
+
+ fdt = bsp_fdt_get();
+
+ phandle = fdt_getprop(fdt, device_node, "phy-handle", &len);
+ if (phandle == NULL || len != sizeof(*phandle)) {
+ return (NULL);
+ }
+
+ node = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(*phandle));
+ if (node < 0) {
+ return (NULL);
+ }
+
+ phy = fdt_getprop(fdt, node, "reg", &len);
+ if (phy == NULL || len != sizeof(*phy)) {
+ return (NULL);
+ }
+
+ node = fdt_parent_offset(fdt, node);
+ if (node < 0) {
+ return (NULL);
+ }
+
+ return (phy_obtain(fdt, node, (int)fdt32_to_cpu(*phy)));
+}
+
+void
+fdt_phy_release(struct fdt_phy_device *phy_dev)
+{
+
+ free(phy_dev, M_TEMP);
+}
diff --git a/rtemsbsd/sys/powerpc/fman_muram.c b/rtemsbsd/sys/powerpc/fman_muram.c
new file mode 100644
index 00000000..c4a8e8de
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/fman_muram.c
@@ -0,0 +1,116 @@
+#include <machine/rtems-bsd-kernel-space.h>
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "../../../linux/drivers/net/ethernet/freescale/fman/fman_muram.h"
+
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <stdlib.h>
+#include <rtems/score/heapimpl.h>
+
+#define MURAM_LOCK(x) (mtx_lock(&(x)->mtx))
+
+#define MURAM_UNLOCK(x) (mtx_unlock(&(x)->mtx))
+
+struct muram_info {
+ struct mtx mtx;
+ Heap_Control heap;
+ unsigned long base;
+};
+
+static unsigned long
+fman_muram_vbase_to_offset(struct muram_info *muram, unsigned long addr)
+{
+
+ return (addr - muram->base);
+}
+
+struct muram_info *fman_muram_init(phys_addr_t base, size_t size)
+{
+ struct muram_info *muram;
+ uintptr_t s;
+
+ muram = malloc(sizeof(*muram));
+ if (muram == NULL)
+ return (NULL);
+
+ muram->base = (unsigned long)base;
+ memset((void *)muram->base, 0xab, size);
+
+ s = _Heap_Initialize(&muram->heap, (void *)(uintptr_t)base, size, 64);
+ if (s == 0) {
+ free(muram);
+ return (NULL);
+ }
+
+ mtx_init(&muram->mtx, "FMan MURAM", NULL, MTX_DEF);
+
+ return (muram);
+}
+
+unsigned long
+fman_muram_offset_to_vbase(struct muram_info *muram, unsigned long offset)
+{
+
+ return (offset + muram->base);
+}
+
+int
+fman_muram_alloc(struct muram_info *muram, size_t size)
+{
+ void *p;
+
+ MURAM_LOCK(muram);
+ p = _Heap_Allocate(&muram->heap, size);
+ MURAM_UNLOCK(muram);
+
+ if (p == NULL)
+ return -ENOMEM;
+
+ memset(p, 0, size);
+
+ return (fman_muram_vbase_to_offset(muram, (unsigned long)p));
+}
+
+void
+fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
+{
+ void *p = (void *)fman_muram_offset_to_vbase(muram, offset);
+
+ MURAM_LOCK(muram);
+ _Heap_Free(&muram->heap, p);
+ MURAM_UNLOCK(muram);
+}
diff --git a/rtemsbsd/sys/powerpc/linux_compat.c b/rtemsbsd/sys/powerpc/linux_compat.c
new file mode 100644
index 00000000..f54a671c
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/linux_compat.c
@@ -0,0 +1,965 @@
+#include <machine/rtems-bsd-kernel-space.h>
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/proc.h>
+#ifndef __rtems__
+#include <sys/sglist.h>
+#endif /* __rtems__ */
+#include <sys/sleepqueue.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filio.h>
+#include <sys/rwlock.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/stdarg.h>
+#ifndef __rtems__
+#include <machine/pmap.h>
+#endif /* __rtems__ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#ifdef __rtems__
+#include <linux/completion.h>
+#else /* __rtems__ */
+#include <linux/cdev.h>
+#include <linux/file.h>
+#include <linux/sysfs.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+
+#include <vm/vm_pager.h>
+
+MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
+
+#include <linux/rbtree.h>
+/* Undo Linux compat changes. */
+#undef RB_ROOT
+#undef file
+#undef cdev
+#define RB_ROOT(head) (head)->rbh_root
+
+struct kobject class_root;
+struct device linux_rootdev;
+struct class miscclass;
+struct list_head pci_drivers;
+struct list_head pci_devices;
+struct net init_net;
+spinlock_t pci_lock;
+
+unsigned long linux_timer_hz_mask;
+
+int
+panic_cmp(struct rb_node *one, struct rb_node *two)
+{
+ panic("no cmp");
+}
+
+RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
+
+int
+kobject_set_name(struct kobject *kobj, const char *fmt, ...)
+{
+ va_list args;
+ int error;
+
+ va_start(args, fmt);
+ error = kobject_set_name_vargs(kobj, fmt, args);
+ va_end(args);
+
+ return (error);
+}
+
+static inline int
+kobject_add_complete(struct kobject *kobj, struct kobject *parent)
+{
+ struct kobj_type *t;
+ int error;
+
+ kobj->parent = kobject_get(parent);
+ error = sysfs_create_dir(kobj);
+ if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
+ struct attribute **attr;
+ t = kobj->ktype;
+
+ for (attr = t->default_attrs; *attr != NULL; attr++) {
+ error = sysfs_create_file(kobj, *attr);
+ if (error)
+ break;
+ }
+ if (error)
+ sysfs_remove_dir(kobj);
+
+ }
+ return (error);
+}
+
+int
+kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
+{
+ va_list args;
+ int error;
+
+ va_start(args, fmt);
+ error = kobject_set_name_vargs(kobj, fmt, args);
+ va_end(args);
+ if (error)
+ return (error);
+
+ return kobject_add_complete(kobj, parent);
+}
+
+void
+kobject_release(struct kref *kref)
+{
+ struct kobject *kobj;
+ char *name;
+
+ kobj = container_of(kref, struct kobject, kref);
+ sysfs_remove_dir(kobj);
+ if (kobj->parent)
+ kobject_put(kobj->parent);
+ kobj->parent = NULL;
+ name = kobj->name;
+ if (kobj->ktype && kobj->ktype->release)
+ kobj->ktype->release(kobj);
+ kfree(name);
+}
+
+static void
+kobject_kfree(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static void
+kobject_kfree_name(struct kobject *kobj)
+{
+ if (kobj) {
+ kfree(kobj->name);
+ }
+}
+
+struct kobj_type kfree_type = { .release = kobject_kfree };
+
+static void
+dev_release(struct device *dev)
+{
+ pr_debug("dev_release: %s\n", dev_name(dev));
+ kfree(dev);
+}
+
+struct device *
+device_create(struct class *class, struct device *parent, dev_t devt,
+ void *drvdata, const char *fmt, ...)
+{
+ struct device *dev;
+ va_list args;
+
+ dev = kzalloc(sizeof(*dev), M_WAITOK);
+ dev->parent = parent;
+ dev->class = class;
+ dev->devt = devt;
+ dev->driver_data = drvdata;
+ dev->release = dev_release;
+ va_start(args, fmt);
+ kobject_set_name_vargs(&dev->kobj, fmt, args);
+ va_end(args);
+ device_register(dev);
+
+ return (dev);
+}
+
+int
+kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
+ struct kobject *parent, const char *fmt, ...)
+{
+ va_list args;
+ int error;
+
+ kobject_init(kobj, ktype);
+ kobj->ktype = ktype;
+ kobj->parent = parent;
+ kobj->name = NULL;
+
+ va_start(args, fmt);
+ error = kobject_set_name_vargs(kobj, fmt, args);
+ va_end(args);
+ if (error)
+ return (error);
+ return kobject_add_complete(kobj, parent);
+}
+
+static void
+linux_file_dtor(void *cdp)
+{
+ struct linux_file *filp;
+
+ filp = cdp;
+ filp->f_op->release(filp->f_vnode, filp);
+ vdrop(filp->f_vnode);
+ kfree(filp);
+}
+
+static int
+linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
+{
+ struct linux_cdev *ldev;
+ struct linux_file *filp;
+ struct file *file;
+ int error;
+
+ file = curthread->td_fpop;
+ ldev = dev->si_drv1;
+ if (ldev == NULL)
+ return (ENODEV);
+ filp = kzalloc(sizeof(*filp), GFP_KERNEL);
+ filp->f_dentry = &filp->f_dentry_store;
+ filp->f_op = ldev->ops;
+ filp->f_flags = file->f_flag;
+ vhold(file->f_vnode);
+ filp->f_vnode = file->f_vnode;
+ if (filp->f_op->open) {
+ error = -filp->f_op->open(file->f_vnode, filp);
+ if (error) {
+ kfree(filp);
+ return (error);
+ }
+ }
+ error = devfs_set_cdevpriv(filp, linux_file_dtor);
+ if (error) {
+ filp->f_op->release(file->f_vnode, filp);
+ kfree(filp);
+ return (error);
+ }
+
+ return 0;
+}
+
+static int
+linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
+{
+ struct linux_cdev *ldev;
+ struct linux_file *filp;
+ struct file *file;
+ int error;
+
+ file = curthread->td_fpop;
+ ldev = dev->si_drv1;
+ if (ldev == NULL)
+ return (0);
+ if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+ return (error);
+ filp->f_flags = file->f_flag;
+ devfs_clear_cdevpriv();
+
+
+ return (0);
+}
+
+static int
+linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ struct linux_cdev *ldev;
+ struct linux_file *filp;
+ struct file *file;
+ int error;
+
+ file = curthread->td_fpop;
+ ldev = dev->si_drv1;
+ if (ldev == NULL)
+ return (0);
+ if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+ return (error);
+ filp->f_flags = file->f_flag;
+ /*
+ * Linux does not have a generic ioctl copyin/copyout layer. All
+ * linux ioctls must be converted to void ioctls which pass a
+ * pointer to the address of the data. We want the actual user
+ * address so we dereference here.
+ */
+ data = *(void **)data;
+ if (filp->f_op->unlocked_ioctl)
+ error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data);
+ else
+ error = ENOTTY;
+
+ return (error);
+}
+
+static int
+linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct linux_cdev *ldev;
+ struct linux_file *filp;
+ struct file *file;
+ ssize_t bytes;
+ int error;
+
+ file = curthread->td_fpop;
+ ldev = dev->si_drv1;
+ if (ldev == NULL)
+ return (0);
+ if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+ return (error);
+ filp->f_flags = file->f_flag;
+ if (uio->uio_iovcnt != 1)
+ panic("linux_dev_read: uio %p iovcnt %d",
+ uio, uio->uio_iovcnt);
+ if (filp->f_op->read) {
+ bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
+ uio->uio_iov->iov_len, &uio->uio_offset);
+ if (bytes >= 0) {
+ uio->uio_iov->iov_base =
+ ((uint8_t *)uio->uio_iov->iov_base) + bytes;
+ uio->uio_iov->iov_len -= bytes;
+ uio->uio_resid -= bytes;
+ } else
+ error = -bytes;
+ } else
+ error = ENXIO;
+
+ return (error);
+}
+
+static int
+linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct linux_cdev *ldev;
+ struct linux_file *filp;
+ struct file *file;
+ ssize_t bytes;
+ int error;
+
+ file = curthread->td_fpop;
+ ldev = dev->si_drv1;
+ if (ldev == NULL)
+ return (0);
+ if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+ return (error);
+ filp->f_flags = file->f_flag;
+ if (uio->uio_iovcnt != 1)
+ panic("linux_dev_write: uio %p iovcnt %d",
+ uio, uio->uio_iovcnt);
+ if (filp->f_op->write) {
+ bytes = filp->f_op->write(filp, uio->uio_iov->iov_base,
+ uio->uio_iov->iov_len, &uio->uio_offset);
+ if (bytes >= 0) {
+ uio->uio_iov->iov_base =
+ ((uint8_t *)uio->uio_iov->iov_base) + bytes;
+ uio->uio_iov->iov_len -= bytes;
+ uio->uio_resid -= bytes;
+ } else
+ error = -bytes;
+ } else
+ error = ENXIO;
+
+ return (error);
+}
+
+static int
+linux_dev_poll(struct cdev *dev, int events, struct thread *td)
+{
+ struct linux_cdev *ldev;
+ struct linux_file *filp;
+ struct file *file;
+ int revents;
+ int error;
+
+ file = curthread->td_fpop;
+ ldev = dev->si_drv1;
+ if (ldev == NULL)
+ return (0);
+ if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+ return (error);
+ filp->f_flags = file->f_flag;
+ if (filp->f_op->poll)
+ revents = filp->f_op->poll(filp, NULL) & events;
+ else
+ revents = 0;
+
+ return (revents);
+}
+
+static int
+linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
+ vm_size_t size, struct vm_object **object, int nprot)
+{
+ struct linux_cdev *ldev;
+ struct linux_file *filp;
+ struct file *file;
+ struct vm_area_struct vma;
+ int error;
+
+ file = curthread->td_fpop;
+ ldev = dev->si_drv1;
+ if (ldev == NULL)
+ return (ENODEV);
+ if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+ return (error);
+ filp->f_flags = file->f_flag;
+ vma.vm_start = 0;
+ vma.vm_end = size;
+ vma.vm_pgoff = *offset / PAGE_SIZE;
+ vma.vm_pfn = 0;
+ vma.vm_page_prot = 0;
+ if (filp->f_op->mmap) {
+ error = -filp->f_op->mmap(filp, &vma);
+ if (error == 0) {
+ struct sglist *sg;
+
+ sg = sglist_alloc(1, M_WAITOK);
+ sglist_append_phys(sg,
+ (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT, vma.vm_len);
+ *object = vm_pager_allocate(OBJT_SG, sg, vma.vm_len,
+ nprot, 0, curthread->td_ucred);
+ if (*object == NULL) {
+ sglist_free(sg);
+ return (EINVAL);
+ }
+ *offset = 0;
+ if (vma.vm_page_prot != VM_MEMATTR_DEFAULT) {
+ VM_OBJECT_WLOCK(*object);
+ vm_object_set_memattr(*object,
+ vma.vm_page_prot);
+ VM_OBJECT_WUNLOCK(*object);
+ }
+ }
+ } else
+ error = ENODEV;
+
+ return (error);
+}
+
+struct cdevsw linuxcdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = D_TRACKCLOSE,
+ .d_open = linux_dev_open,
+ .d_close = linux_dev_close,
+ .d_read = linux_dev_read,
+ .d_write = linux_dev_write,
+ .d_ioctl = linux_dev_ioctl,
+ .d_mmap_single = linux_dev_mmap_single,
+ .d_poll = linux_dev_poll,
+};
+
+static int
+linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
+ int flags, struct thread *td)
+{
+ struct linux_file *filp;
+ ssize_t bytes;
+ int error;
+
+ error = 0;
+ filp = (struct linux_file *)file->f_data;
+ filp->f_flags = file->f_flag;
+ if (uio->uio_iovcnt != 1)
+ panic("linux_file_read: uio %p iovcnt %d",
+ uio, uio->uio_iovcnt);
+ if (filp->f_op->read) {
+ bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
+ uio->uio_iov->iov_len, &uio->uio_offset);
+ if (bytes >= 0) {
+ uio->uio_iov->iov_base =
+ ((uint8_t *)uio->uio_iov->iov_base) + bytes;
+ uio->uio_iov->iov_len -= bytes;
+ uio->uio_resid -= bytes;
+ } else
+ error = -bytes;
+ } else
+ error = ENXIO;
+
+ return (error);
+}
+
+static int
+linux_file_poll(struct file *file, int events, struct ucred *active_cred,
+ struct thread *td)
+{
+ struct linux_file *filp;
+ int revents;
+
+ filp = (struct linux_file *)file->f_data;
+ filp->f_flags = file->f_flag;
+ if (filp->f_op->poll)
+ revents = filp->f_op->poll(filp, NULL) & events;
+ else
+ revents = 0;
+
+ return (0);
+}
+
+static int
+linux_file_close(struct file *file, struct thread *td)
+{
+ struct linux_file *filp;
+ int error;
+
+ filp = (struct linux_file *)file->f_data;
+ filp->f_flags = file->f_flag;
+ error = -filp->f_op->release(NULL, filp);
+ funsetown(&filp->f_sigio);
+ kfree(filp);
+
+ return (error);
+}
+
+static int
+linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
+ struct thread *td)
+{
+ struct linux_file *filp;
+ int error;
+
+ filp = (struct linux_file *)fp->f_data;
+ filp->f_flags = fp->f_flag;
+ error = 0;
+
+ switch (cmd) {
+ case FIONBIO:
+ break;
+ case FIOASYNC:
+ if (filp->f_op->fasync == NULL)
+ break;
+ error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC);
+ break;
+ case FIOSETOWN:
+ error = fsetown(*(int *)data, &filp->f_sigio);
+ if (error == 0)
+ error = filp->f_op->fasync(0, filp,
+ fp->f_flag & FASYNC);
+ break;
+ case FIOGETOWN:
+ *(int *)data = fgetown(&filp->f_sigio);
+ break;
+ default:
+ error = ENOTTY;
+ break;
+ }
+ return (error);
+}
+
+static int
+linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
+ struct thread *td)
+{
+
+ return (EOPNOTSUPP);
+}
+
+static int
+linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
+ struct filedesc *fdp)
+{
+
+ return (0);
+}
+
+struct fileops linuxfileops = {
+ .fo_read = linux_file_read,
+ .fo_write = invfo_rdwr,
+ .fo_truncate = invfo_truncate,
+ .fo_kqfilter = invfo_kqfilter,
+ .fo_stat = linux_file_stat,
+ .fo_fill_kinfo = linux_file_fill_kinfo,
+ .fo_poll = linux_file_poll,
+ .fo_close = linux_file_close,
+ .fo_ioctl = linux_file_ioctl,
+ .fo_chmod = invfo_chmod,
+ .fo_chown = invfo_chown,
+ .fo_sendfile = invfo_sendfile,
+};
+
+/*
+ * Hash of vmmap addresses. This is infrequently accessed and does not
+ * need to be particularly large. This is done because we must store the
+ * caller's idea of the map size to properly unmap.
+ */
+struct vmmap {
+ LIST_ENTRY(vmmap) vm_next;
+ void *vm_addr;
+ unsigned long vm_size;
+};
+
+struct vmmaphd {
+ struct vmmap *lh_first;
+};
+#define VMMAP_HASH_SIZE 64
+#define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
+#define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
+static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
+static struct mtx vmmaplock;
+
+static void
+vmmap_add(void *addr, unsigned long size)
+{
+ struct vmmap *vmmap;
+
+ vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
+ mtx_lock(&vmmaplock);
+ vmmap->vm_size = size;
+ vmmap->vm_addr = addr;
+ LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
+ mtx_unlock(&vmmaplock);
+}
+
+static struct vmmap *
+vmmap_remove(void *addr)
+{
+ struct vmmap *vmmap;
+
+ mtx_lock(&vmmaplock);
+ LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
+ if (vmmap->vm_addr == addr)
+ break;
+ if (vmmap)
+ LIST_REMOVE(vmmap, vm_next);
+ mtx_unlock(&vmmaplock);
+
+ return (vmmap);
+}
+
+void *
+_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
+{
+ void *addr;
+
+ addr = pmap_mapdev_attr(phys_addr, size, attr);
+ if (addr == NULL)
+ return (NULL);
+ vmmap_add(addr, size);
+
+ return (addr);
+}
+
+void
+iounmap(void *addr)
+{
+ struct vmmap *vmmap;
+
+ vmmap = vmmap_remove(addr);
+ if (vmmap == NULL)
+ return;
+ pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size);
+ kfree(vmmap);
+}
+
+
+void *
+vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
+{
+ vm_offset_t off;
+ size_t size;
+
+ size = count * PAGE_SIZE;
+ off = kva_alloc(size);
+ if (off == 0)
+ return (NULL);
+ vmmap_add((void *)off, size);
+ pmap_qenter(off, pages, count);
+
+ return ((void *)off);
+}
+
+void
+vunmap(void *addr)
+{
+ struct vmmap *vmmap;
+
+ vmmap = vmmap_remove(addr);
+ if (vmmap == NULL)
+ return;
+ pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
+ kva_free((vm_offset_t)addr, vmmap->vm_size);
+ kfree(vmmap);
+}
+
+char *
+kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
+{
+ unsigned int len;
+ char *p;
+ va_list aq;
+
+ va_copy(aq, ap);
+ len = vsnprintf(NULL, 0, fmt, aq);
+ va_end(aq);
+
+ p = kmalloc(len + 1, gfp);
+ if (p != NULL)
+ vsnprintf(p, len + 1, fmt, ap);
+
+ return (p);
+}
+
+char *
+kasprintf(gfp_t gfp, const char *fmt, ...)
+{
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = kvasprintf(gfp, fmt, ap);
+ va_end(ap);
+
+ return (p);
+}
+
+static int
+linux_timer_jiffies_until(unsigned long expires)
+{
+ int delta = expires - jiffies;
+ /* guard against already expired values */
+ if (delta < 1)
+ delta = 1;
+ return (delta);
+}
+
+static void
+linux_timer_callback_wrapper(void *context)
+{
+ struct timer_list *timer;
+
+ timer = context;
+ timer->function(timer->data);
+}
+
+void
+mod_timer(struct timer_list *timer, unsigned long expires)
+{
+
+ timer->expires = expires;
+ callout_reset(&timer->timer_callout,
+ linux_timer_jiffies_until(expires),
+ &linux_timer_callback_wrapper, timer);
+}
+
+void
+add_timer(struct timer_list *timer)
+{
+
+ callout_reset(&timer->timer_callout,
+ linux_timer_jiffies_until(timer->expires),
+ &linux_timer_callback_wrapper, timer);
+}
+
+static void
+linux_timer_init(void *arg)
+{
+
+ /*
+ * Compute an internal HZ value which can divide 2**32 to
+ * avoid timer rounding problems when the tick value wraps
+ * around 2**32:
+ */
+ linux_timer_hz_mask = 1;
+ while (linux_timer_hz_mask < (unsigned long)hz)
+ linux_timer_hz_mask *= 2;
+ linux_timer_hz_mask--;
+}
+SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
+#endif /* __rtems__ */
+
+void
+linux_complete_common(struct completion *c, int all)
+{
+ int wakeup_swapper;
+
+ sleepq_lock(c);
+ c->done++;
+ if (all)
+ wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
+ else
+ wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
+ sleepq_release(c);
+ if (wakeup_swapper)
+ kick_proc0();
+}
+
+/*
+ * Indefinite wait for done != 0 with or without signals.
+ */
+long
+linux_wait_for_common(struct completion *c, int flags)
+{
+
+ if (flags != 0)
+ flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
+ else
+ flags = SLEEPQ_SLEEP;
+ for (;;) {
+ sleepq_lock(c);
+ if (c->done)
+ break;
+ sleepq_add(c, NULL, "completion", flags, 0);
+#ifndef __rtems__
+ if (flags & SLEEPQ_INTERRUPTIBLE) {
+ if (sleepq_wait_sig(c, 0) != 0)
+ return (-ERESTARTSYS);
+ } else
+#endif /* __rtems__ */
+ sleepq_wait(c, 0);
+ }
+ c->done--;
+ sleepq_release(c);
+
+ return (0);
+}
+
+#ifndef __rtems__
+/*
+ * Time limited wait for done != 0 with or without signals.
+ */
+long
+linux_wait_for_timeout_common(struct completion *c, long timeout, int flags)
+{
+ long end = jiffies + timeout;
+
+ if (flags != 0)
+ flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
+ else
+ flags = SLEEPQ_SLEEP;
+ for (;;) {
+ int ret;
+
+ sleepq_lock(c);
+ if (c->done)
+ break;
+ sleepq_add(c, NULL, "completion", flags, 0);
+ sleepq_set_timeout(c, linux_timer_jiffies_until(end));
+ if (flags & SLEEPQ_INTERRUPTIBLE)
+ ret = sleepq_timedwait_sig(c, 0);
+ else
+ ret = sleepq_timedwait(c, 0);
+ if (ret != 0) {
+ /* check for timeout or signal */
+ if (ret == EWOULDBLOCK)
+ return (0);
+ else
+ return (-ERESTARTSYS);
+ }
+ }
+ c->done--;
+ sleepq_release(c);
+
+ /* return how many jiffies are left */
+ return (linux_timer_jiffies_until(end));
+}
+#endif /* __rtems__ */
+
+int
+linux_try_wait_for_completion(struct completion *c)
+{
+ int isdone;
+
+ isdone = 1;
+ sleepq_lock(c);
+ if (c->done)
+ c->done--;
+ else
+ isdone = 0;
+ sleepq_release(c);
+ return (isdone);
+}
+
+int
+linux_completion_done(struct completion *c)
+{
+ int isdone;
+
+ isdone = 1;
+ sleepq_lock(c);
+ if (c->done == 0)
+ isdone = 0;
+ sleepq_release(c);
+ return (isdone);
+}
+
+#ifndef __rtems__
+static void
+linux_compat_init(void *arg)
+{
+ struct sysctl_oid *rootoid;
+ int i;
+
+ rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
+ OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
+ kobject_init(&class_root, &class_ktype);
+ kobject_set_name(&class_root, "class");
+ class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
+ OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
+ kobject_init(&linux_rootdev.kobj, &dev_ktype);
+ kobject_set_name(&linux_rootdev.kobj, "device");
+ linux_rootdev.kobj.oidp = SYSCTL_ADD_NODE(NULL,
+ SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL,
+ "device");
+ linux_rootdev.bsddev = root_bus;
+ miscclass.name = "misc";
+ class_register(&miscclass);
+ INIT_LIST_HEAD(&pci_drivers);
+ INIT_LIST_HEAD(&pci_devices);
+ spin_lock_init(&pci_lock);
+ mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
+ for (i = 0; i < VMMAP_HASH_SIZE; i++)
+ LIST_INIT(&vmmaphead[i]);
+}
+SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
+
+static void
+linux_compat_uninit(void *arg)
+{
+ kobject_kfree_name(&class_root);
+ kobject_kfree_name(&linux_rootdev.kobj);
+ kobject_kfree_name(&miscclass.kobj);
+}
+SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
+#endif /* __rtems__ */