@@ -318,7 +318,6 @@ config XEN_NETDEV_BACKEND
The corresponding Linux frontend driver is enabled by the
CONFIG_XEN_NETDEV_FRONTEND configuration option.
-
The backend driver presents a standard network device
endpoint for each paravirtual network device to the driver
domain network stack. These can then be bridged or routed
@@ -330,12 +329,63 @@ config XEN_NETDEV_BACKEND
will be called xen-netback.
config VMXNET3
- tristate "VMware VMXNET3 ethernet driver"
- depends on PCI && INET
+ tristate "VMware VMXNET3 ethernet driver"
+ depends on PCI && INET
+ help
+ This driver supports VMware's vmxnet3 virtual ethernet NIC.
+ To compile this driver as a module, choose M here: the
+ module will be called vmxnet3.
+
+config DPA
+ bool "Freescale Data Path Frame Manager Ethernet"
+ depends on FSL_SOC && FSL_BMAN_PORTAL && FSL_QMAN_PORTAL && FSL_FMAN
+ select PHYLIB
+
+config DPA_OFFLINE_PORTS
+ bool "Offline Ports support"
+ depends on DPA
+ default y
+ help
+ The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide
+ most of the functionality of the regular, online ports, except they receive their
+ frames from a core or an accelerator on the SoC, via QMan frame queues,
+ rather than directly from the network.
+ Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like
+ any online FMan port. They deliver the processed frames to frame queues, according
+ to the applied PCD configurations.
+
+ Choosing this feature will not impact the functionality and/or performance of the system,
+ so it is safe to have it.
+
+config DPA_MAX_FRM_SIZE
+ int "Maximum L2 frame size"
+ depends on DPA
+ range 64 9600
+ default "1522"
help
- This driver supports VMware's vmxnet3 virtual ethernet NIC.
- To compile this driver as a module, choose M here: the
- module will be called vmxnet3.
+ Configure this in relation to the maximum possible MTU of your network configuration. In particular,
+ one would need to increase this value in order to use jumbo frames. DPA_MAX_FRM_SIZE must accomodate
+ the Ethernet FCS (4 bytes) and one ETH+VLAN header (18 bytes), to a total of 22 bytes in excess of
+ the desired L3 MTU.
+
+ Note that having too large a DPA_MAX_FRM_SIZE (much larger than the actual MTU) may lead to buffer
+ exhaustion, especially in the case of badly fragmented datagrams on the Rx path. Conversely,
+ having a DPA_MAX_FRM_SIZE smaller than the actual MTU will lead to frames being dropped.
+
+ This can be overridden by specifying "fsl_fman_phy_max_frm" in the kernel bootargs:
+ * in Hypervisor-based scenarios, by adding a "chosen" node with the "bootargs" property specifying
+ "fsl_fman_phy_max_frm=<YourValue>";
+ * in non-Hypervisor-based scenarios, via u-boot's env, by modifying the "bootargs" env variable.
+
+config FSL_DPA_1588
+ tristate "IEEE 1588-compliant timestamping"
+ depends on DPA
+ default n
+
+config DPAA_ETH_UNIT_TESTS
+ bool "Run Unit Tests for DPAA Ethernet"
+ depends on DPA
+ default y
source "drivers/net/hyperv/Kconfig"
@@ -53,6 +53,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_WIMAX) += wimax/
+obj-$(if $(CONFIG_DPA),y) += dpa/
obj-$(CONFIG_VMXNET3) += vmxnet3/
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
new file mode 100644
@@ -0,0 +1,19 @@
+#
+# Makefile for the Freescale Ethernet controllers
+#
+EXTRA_CFLAGS += -DVERSION=\"\"
+#
+#Include netcomm SW specific definitions
+include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk
+
+EXTRA_CFLAGS += -I$(NET_DPA)
+
+#Netcomm SW tree
+obj-$(CONFIG_FSL_FMAN) += NetCommSw/
+obj-$(CONFIG_FSL_DPA_1588) += dpaa_1588.o
+obj-$(CONFIG_DPA) += fsl-mac.o fsl-dpa.o
+obj-$(CONFIG_DPA_OFFLINE_PORTS) += fsl-oh.o
+
+fsl-dpa-objs := dpa-ethtool.o dpaa_eth.o xgmac_mdio.o
+fsl-mac-objs := mac.o mac-api.o
+fsl-oh-objs := offline_port.o
@@ -1,7 +1,7 @@
#
# Makefile config for the Freescale NetcommSW
#
-NET_DPA = $(srctree)/drivers/net
+NET_DPA = $(srctree)/drivers/net/ethernet/freescale
DRV_DPA = $(srctree)/drivers/net/dpa
NCSW = $(srctree)/drivers/net/dpa/NetCommSw
new file mode 100644
@@ -0,0 +1,201 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/string.h>
+
+#include "dpaa_eth.h"
+
+static int __cold dpa_get_settings(struct net_device *net_dev, struct ethtool_cmd *et_cmd)
+{
+ int _errno;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (priv->mac_dev == NULL) {
+ cpu_netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ cpu_netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
+ if (unlikely(_errno < 0))
+ cpu_netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno);
+
+ return _errno;
+}
+
+static int __cold dpa_set_settings(struct net_device *net_dev, struct ethtool_cmd *et_cmd)
+{
+ int _errno;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (priv->mac_dev == NULL) {
+ cpu_netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ cpu_netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
+ if (unlikely(_errno < 0))
+ cpu_netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno);
+
+ return _errno;
+}
+
+static void __cold dpa_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *drvinfo)
+{
+ int _errno;
+
+ strncpy(drvinfo->driver, KBUILD_MODNAME,
+ sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
+ strncpy(drvinfo->version, VERSION,
+ sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->version)-1] = 0;
+ _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%X", 0);
+
+ if (unlikely(_errno >= sizeof(drvinfo->fw_version))) { /* Truncated output */
+ cpu_netdev_notice(net_dev, "snprintf() = %d\n", _errno);
+ } else if (unlikely(_errno < 0)) {
+ cpu_netdev_warn(net_dev, "snprintf() = %d\n", _errno);
+ memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
+ }
+ strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info) - 1)[sizeof(drvinfo->bus_info)-1] = 0;
+}
+
+uint32_t __cold dpa_get_msglevel(struct net_device *net_dev)
+{
+ return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
+}
+
+void __cold dpa_set_msglevel(struct net_device *net_dev, uint32_t msg_enable)
+{
+ ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
+}
+
+int __cold dpa_nway_reset(struct net_device *net_dev)
+{
+ int _errno;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (priv->mac_dev == NULL) {
+ cpu_netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ cpu_netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ _errno = 0;
+ if (priv->mac_dev->phy_dev->autoneg) {
+ _errno = phy_start_aneg(priv->mac_dev->phy_dev);
+ if (unlikely(_errno < 0))
+ cpu_netdev_err(net_dev, "phy_start_aneg() = %d\n",
+ _errno);
+ }
+
+ return _errno;
+}
+
+void __cold dpa_get_ringparam(struct net_device *net_dev, struct ethtool_ringparam *et_ringparam)
+{
+ et_ringparam->rx_max_pending = 0;
+ et_ringparam->rx_mini_max_pending = 0;
+ et_ringparam->rx_jumbo_max_pending = 0;
+ et_ringparam->tx_max_pending = 0;
+
+ et_ringparam->rx_pending = 0;
+ et_ringparam->rx_mini_pending = 0;
+ et_ringparam->rx_jumbo_pending = 0;
+ et_ringparam->tx_pending = 0;
+}
+
+void __cold dpa_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *et_pauseparam)
+{
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (priv->mac_dev == NULL) {
+ cpu_netdev_info(net_dev, "This is a MAC-less interface\n");
+ return;
+ }
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ cpu_netdev_err(net_dev, "phy device not initialized\n");
+ return;
+ }
+
+ et_pauseparam->autoneg = priv->mac_dev->phy_dev->autoneg;
+}
+
+int __cold dpa_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *et_pauseparam)
+{
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (priv->mac_dev == NULL) {
+ cpu_netdev_info(net_dev, "This is a MAC-less interface\n");
+ return -ENODEV;
+ }
+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
+ cpu_netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ priv->mac_dev->phy_dev->autoneg = et_pauseparam->autoneg;
+
+ return 0;
+}
+
+const struct ethtool_ops dpa_ethtool_ops __devinitconst = {
+ .get_settings = dpa_get_settings,
+ .set_settings = dpa_set_settings,
+ .get_drvinfo = dpa_get_drvinfo,
+ .get_msglevel = dpa_get_msglevel,
+ .set_msglevel = dpa_set_msglevel,
+ .nway_reset = dpa_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = dpa_get_ringparam,
+ .get_pauseparam = dpa_get_pauseparam,
+ .set_pauseparam = dpa_set_pauseparam,
+};
new file mode 100644
@@ -0,0 +1,562 @@
+/*
+ * drivers/net/dpa/dpaa_1588.c
+ *
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2009 IXXAT Automation, GmbH
+ *
+ * DPAA Ethernet Driver -- IEEE 1588 interface functionality
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <asm/div64.h>
+#include "dpaa_eth.h"
+#include "dpaa_1588.h"
+
+static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
+{
+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
+
+ circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size);
+ if (!circ_buf->buf)
+ return 1;
+
+ circ_buf->head = 0;
+ circ_buf->tail = 0;
+ ptp_buf->size = size;
+ spin_lock_init(&ptp_buf->ptp_lock);
+
+ return 0;
+}
+
+static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
+{
+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
+
+ circ_buf->head = 0;
+ circ_buf->tail = 0;
+ ptp_buf->size = size;
+}
+
+static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf,
+ struct dpa_ptp_data *data)
+{
+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
+ int size = ptp_buf->size;
+ struct dpa_ptp_data *tmp;
+ unsigned long flags;
+ int head, tail;
+
+ spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
+
+ head = circ_buf->head;
+ tail = circ_buf->tail;
+
+ if (CIRC_SPACE(head, tail, size) <= 0) {
+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
+ return 1;
+ }
+
+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head;
+ memcpy(tmp, data, sizeof(struct dpa_ptp_data));
+
+ circ_buf->head = (head + 1) & (size - 1);
+
+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
+
+ return 0;
+}
+
+static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst,
+ struct dpa_ptp_ident *src)
+{
+ int ret;
+
+ if ((dst->version != src->version) || (dst->msg_type != src->msg_type))
+ return 0;
+
+ if ((dst->netw_prot == src->netw_prot)
+ || src->netw_prot == DPA_PTP_PROT_DONTCARE) {
+ if (dst->seq_id != src->seq_id)
+ return 0;
+
+ ret = memcmp(dst->snd_port_id, src->snd_port_id,
+ DPA_PTP_SOURCE_PORT_LENGTH);
+ if (ret)
+ return 0;
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
+static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf,
+ struct dpa_ptp_ident *ident,
+ struct dpa_ptp_time *ts)
+{
+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
+ int size = ptp_buf->size;
+ int head, tail, idx;
+ unsigned long flags;
+ struct dpa_ptp_data *tmp;
+ struct dpa_ptp_ident *tmp_ident;
+
+ spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
+
+ head = circ_buf->head;
+ tail = idx = circ_buf->tail;
+
+ if (CIRC_CNT_TO_END(head, tail, size) == 0) {
+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
+ return 1;
+ }
+
+ while (idx != head) {
+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
+ tmp_ident = &tmp->ident;
+ if (dpa_ptp_is_ident_match(tmp_ident, ident))
+ break;
+ idx = (idx + 1) & (size - 1);
+ }
+
+ if (idx == head) {
+ circ_buf->tail = head;
+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
+ return 1;
+ }
+
+ ts->sec = tmp->ts.sec;
+ ts->nsec = tmp->ts.nsec;
+
+ circ_buf->tail = (idx + 1) & (size - 1);
+
+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
+
+ return 0;
+}
+
+static int dpa_ptp_get_time(dma_addr_t fd_addr, u32 *high, u32 *low)
+{
+ u8 *ts_addr = (u8 *)phys_to_virt(fd_addr);
+ u32 sec, nsec, mod;
+ u64 tmp;
+
+ ts_addr += DPA_PTP_TIMESTAMP_OFFSET;
+ sec = *((u32 *)ts_addr);
+ nsec = *(((u32 *)ts_addr) + 1);
+ tmp = ((u64)sec << 32 | nsec) * DPA_PTP_NOMINAL_FREQ_PERIOD;
+
+ mod = do_div(tmp, NANOSEC_PER_SECOND);
+ *high = (u32)tmp;
+ *low = mod;
+
+ return 0;
+}
+
+/*
+ * Parse the PTP packets
+ *
+ * The PTP header can be found in an IPv4 packet, IPv6 patcket or in
+ * an IEEE802.3 ethernet frame. This function returns the position of
+ * the PTP packet or NULL if no PTP found
+ */
+static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type)
+{
+ u8 *pos = skb->data + ETH_ALEN + ETH_ALEN;
+ u8 *ptp_loc = NULL;
+ u8 msg_type;
+ struct iphdr *iph;
+ struct udphdr *udph;
+ struct ipv6hdr *ipv6h;
+
+ *eth_type = *((u16 *)pos);
+
+ /* Check if inner tag is here */
+ if (*eth_type == ETH_P_8021Q) {
+ pos += DPA_VLAN_TAG_LEN;
+ *eth_type = *((u16 *)pos);
+ }
+
+ pos += DPA_ETYPE_LEN;
+
+ switch (*eth_type) {
+ /* Transport of PTP over Ethernet */
+ case ETH_P_1588:
+ ptp_loc = pos;
+ msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf;
+ if ((msg_type == PTP_MSGTYPE_SYNC)
+ || (msg_type == PTP_MSGTYPE_DELREQ)
+ || (msg_type == PTP_MSGTYPE_PDELREQ)
+ || (msg_type == PTP_MSGTYPE_PDELRESP))
+ return ptp_loc;
+ break;
+ /* Transport of PTP over IPv4 */
+ case ETH_P_IP:
+ iph = (struct iphdr *)pos;
+ if (ntohs(iph->protocol) != IPPROTO_UDP)
+ return NULL;
+
+ pos += iph->ihl * 4;
+ udph = (struct udphdr *)pos;
+ if (ntohs(udph->dest) != 319)
+ return NULL;
+ ptp_loc = pos + sizeof(struct udphdr);
+ break;
+ /* Transport of PTP over IPv6 */
+ case ETH_P_IPV6:
+ ipv6h = (struct ipv6hdr *)pos;
+ if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP)
+ return NULL;
+
+ pos += sizeof(struct ipv6hdr);
+ udph = (struct udphdr *)pos;
+ if (ntohs(udph->dest) != 319)
+ return NULL;
+ ptp_loc = pos + sizeof(struct udphdr);
+ break;
+ default:
+ break;
+ }
+
+ return ptp_loc;
+}
+
+static int dpa_ptp_store_stamp(struct net_device *dev, struct sk_buff *skb,
+ dma_addr_t fd_addr, struct dpa_ptp_data *ptp_data)
+{
+ u32 sec, nsec;
+ u8 *ptp_loc;
+ u16 eth_type;
+
+ ptp_loc = dpa_ptp_parse_packet(skb, ð_type);
+ if (!ptp_loc)
+ return -EINVAL;
+
+ switch (eth_type) {
+ case ETH_P_IP:
+ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4;
+ break;
+ case ETH_P_IPV6:
+ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6;
+ break;
+ case ETH_P_1588:
+ ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf;
+ ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf;
+ ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID));
+ memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID,
+ DPA_PTP_SOURCE_PORT_LENGTH);
+
+ dpa_ptp_get_time(fd_addr, &sec, &nsec);
+ ptp_data->ts.sec = (u64)sec;
+ ptp_data->ts.nsec = nsec;
+
+ return 0;
+}
+
+void dpa_ptp_store_txstamp(struct net_device *dev, struct sk_buff *skb,
+ const struct qm_fd *fd)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ struct dpa_ptp_tsu *tsu = priv->tsu;
+ struct dpa_ptp_data ptp_tx_data;
+ dma_addr_t fd_addr = qm_fd_addr(fd);
+ int ret;
+
+ ret = dpa_ptp_store_stamp(dev, skb, fd_addr, &ptp_tx_data);
+ if (ret)
+ return;
+ dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data);
+}
+
+void dpa_ptp_store_rxstamp(struct net_device *dev, struct sk_buff *skb,
+ const struct qm_fd *fd)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ struct dpa_ptp_tsu *tsu = priv->tsu;
+ struct dpa_ptp_data ptp_rx_data;
+ dma_addr_t fd_addr = qm_fd_addr(fd);
+ int ret;
+
+ ret = dpa_ptp_store_stamp(dev, skb, fd_addr, &ptp_rx_data);
+ if (ret)
+ return;
+ dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data);
+}
+
+static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
+ struct dpa_ptp_ident *ident,
+ struct dpa_ptp_time *ts)
+{
+ struct dpa_ptp_tsu *tsu = ptp_tsu;
+ struct dpa_ptp_time tmp;
+ int flag;
+
+ flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp);
+ if (!flag) {
+ ts->sec = tmp.sec;
+ ts->nsec = tmp.nsec;
+ return 0;
+ }
+
+ return -1;
+}
+
+static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
+ struct dpa_ptp_ident *ident,
+ struct dpa_ptp_time *ts)
+{
+ struct dpa_ptp_tsu *tsu = ptp_tsu;
+ struct dpa_ptp_time tmp;
+ int flag;
+
+ flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp);
+ if (!flag) {
+ ts->sec = tmp.sec;
+ ts->nsec = tmp.nsec;
+ return 0;
+ }
+
+ return -1;
+}
+
+static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
+ struct dpa_ptp_time *cnt_time)
+{
+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
+ u64 tmp, fiper;
+
+ if (mac_dev->fm_rtc_disable)
+ mac_dev->fm_rtc_disable(tsu->dpa_priv->net_dev);
+
+ /* TMR_FIPER1 will pulse every second after ALARM1 expired */
+ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
+ fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD;
+ if (mac_dev->fm_rtc_set_alarm)
+ mac_dev->fm_rtc_set_alarm(tsu->dpa_priv->net_dev, 0, tmp);
+ if (mac_dev->fm_rtc_set_fiper)
+ mac_dev->fm_rtc_set_fiper(tsu->dpa_priv->net_dev, 0, fiper);
+
+ if (mac_dev->fm_rtc_enable)
+ mac_dev->fm_rtc_enable(tsu->dpa_priv->net_dev);
+}
+
+static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
+ struct dpa_ptp_time *curr_time)
+{
+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
+ u64 tmp;
+ u32 mod;
+
+ if (mac_dev->fm_rtc_get_cnt)
+ mac_dev->fm_rtc_get_cnt(tsu->dpa_priv->net_dev, &tmp);
+
+ mod = do_div(tmp, NANOSEC_PER_SECOND);
+ curr_time->sec = (u32)tmp;
+ curr_time->nsec = mod;
+}
+
+static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
+ struct dpa_ptp_time *cnt_time)
+{
+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
+ u64 tmp;
+
+ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
+
+ if (mac_dev->fm_rtc_set_cnt)
+ mac_dev->fm_rtc_set_cnt(tsu->dpa_priv->net_dev, tmp);
+
+ /* Restart fiper two seconds later */
+ cnt_time->sec += 2;
+ cnt_time->nsec = 0;
+ dpa_set_fiper_alarm(tsu, cnt_time);
+}
+
+static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
+{
+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
+ u32 drift;
+
+ if (mac_dev->fm_rtc_get_drift)
+ mac_dev->fm_rtc_get_drift(tsu->dpa_priv->net_dev, &drift);
+
+ *addend = drift;
+}
+
+static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
+{
+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
+
+ if (mac_dev->fm_rtc_set_drift)
+ mac_dev->fm_rtc_set_drift(tsu->dpa_priv->net_dev, addend);
+}
+
+static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
+{
+ dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
+ dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
+}
+
+int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ struct dpa_ptp_tsu *tsu = priv->tsu;
+ struct mac_device *mac_dev = priv->mac_dev;
+ struct dpa_ptp_data ptp_data;
+ struct dpa_ptp_data *ptp_data_user;
+ struct dpa_ptp_time act_time;
+ u32 addend;
+ int retval = 0;
+
+ if (!tsu || !tsu->valid)
+ return -ENODEV;
+
+ switch (cmd) {
+ case PTP_ENBL_TXTS_IOCTL:
+ tsu->hwts_tx_en_ioctl = 1;
+ if (mac_dev->ptp_enable)
+ mac_dev->ptp_enable(mac_dev);
+ break;
+ case PTP_DSBL_TXTS_IOCTL:
+ tsu->hwts_tx_en_ioctl = 0;
+ if (mac_dev->ptp_disable)
+ mac_dev->ptp_disable(mac_dev);
+ break;
+ case PTP_ENBL_RXTS_IOCTL:
+ tsu->hwts_rx_en_ioctl = 1;
+ break;
+ case PTP_DSBL_RXTS_IOCTL:
+ tsu->hwts_rx_en_ioctl = 0;
+ break;
+ case PTP_GET_RX_TIMESTAMP:
+ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
+ if (copy_from_user(&ptp_data.ident,
+ &ptp_data_user->ident, sizeof(ptp_data.ident)))
+ return -EINVAL;
+
+ if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
+ return -EAGAIN;
+
+ if (copy_to_user((void __user *)&ptp_data_user->ts,
+ &ptp_data.ts, sizeof(ptp_data.ts)))
+ return -EFAULT;
+ break;
+ case PTP_GET_TX_TIMESTAMP:
+ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
+ if (copy_from_user(&ptp_data.ident,
+ &ptp_data_user->ident, sizeof(ptp_data.ident)))
+ return -EINVAL;
+
+ if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
+ return -EAGAIN;
+
+ if (copy_to_user((void __user *)&ptp_data_user->ts,
+ &ptp_data.ts, sizeof(ptp_data.ts)))
+ return -EFAULT;
+ break;
+ case PTP_GET_TIME:
+ dpa_get_curr_cnt(tsu, &act_time);
+ if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time)))
+ return -EFAULT;
+ break;
+ case PTP_SET_TIME:
+ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
+ return -EINVAL;
+ dpa_set_1588cnt(tsu, &act_time);
+ break;
+ case PTP_GET_ADJ:
+ dpa_get_drift(tsu, &addend);
+ if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend)))
+ return -EFAULT;
+ break;
+ case PTP_SET_ADJ:
+ if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend)))
+ return -EINVAL;
+ dpa_set_drift(tsu, addend);
+ break;
+ case PTP_SET_FIPER_ALARM:
+ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
+ return -EINVAL;
+ dpa_set_fiper_alarm(tsu, &act_time);
+ break;
+ case PTP_CLEANUP_TS:
+ dpa_flush_timestamp(tsu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return retval;
+}
+
+int dpa_ptp_init(struct dpa_priv_s *priv)
+{
+ struct dpa_ptp_tsu *tsu;
+
+ /* Allocate memory for PTP structure */
+ tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL);
+ if (!tsu)
+ return -ENOMEM;
+
+ memset(tsu, 0, sizeof(*tsu));
+ tsu->valid = TRUE;
+ tsu->dpa_priv = priv;
+
+ dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
+ dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
+
+ priv->tsu = tsu;
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_ptp_init);
+
+void dpa_ptp_cleanup(struct dpa_priv_s *priv)
+{
+ struct dpa_ptp_tsu *tsu = priv->tsu;
+
+ tsu->valid = FALSE;
+ vfree(tsu->rx_timestamps.circ_buf.buf);
+ vfree(tsu->tx_timestamps.circ_buf.buf);
+
+ kfree(tsu);
+}
+EXPORT_SYMBOL(dpa_ptp_cleanup);
+
+static int __init __cold dpa_ptp_load(void)
+{
+ return 0;
+}
+module_init(dpa_ptp_load);
+
+static void __exit __cold dpa_ptp_unload(void)
+{
+}
+module_exit(dpa_ptp_unload);
new file mode 100644
@@ -0,0 +1,141 @@
+/*
+ * drivers/net/dpa/dpaa_1588.h
+ *
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef __DPAA_1588_H__
+#define __DPAA_1588_H__
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/circ_buf.h>
+#include <linux/fsl_qman.h>
+
+#define DEFAULT_PTP_RX_BUF_SZ 2048
+#define DEFAULT_PTP_TX_BUF_SZ 512
+
+/* 1588 private ioctl calls */
+#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE
+#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1)
+#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2)
+#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3)
+#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4)
+#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5)
+#define PTP_SET_TIME (SIOCDEVPRIVATE + 6)
+#define PTP_GET_TIME (SIOCDEVPRIVATE + 7)
+#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8)
+#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9)
+#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10)
+#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11)
+
+/* PTP V2 message type */
+enum {
+ PTP_MSGTYPE_SYNC = 0x0,
+ PTP_MSGTYPE_DELREQ = 0x1,
+ PTP_MSGTYPE_PDELREQ = 0x2,
+ PTP_MSGTYPE_PDELRESP = 0x3,
+ PTP_MSGTYPE_FLWUP = 0x8,
+ PTP_MSGTYPE_DELRESP = 0x9,
+ PTP_MSGTYPE_PDELRES_FLWUP = 0xA,
+ PTP_MSGTYPE_ANNOUNCE = 0xB,
+ PTP_MSGTYPE_SGNLNG = 0xC,
+ PTP_MSGTYPE_MNGMNT = 0xD,
+};
+
+/* Byte offset of data in the PTP V2 headers */
+#define PTP_OFFS_MSG_TYPE 0
+#define PTP_OFFS_VER_PTP 1
+#define PTP_OFFS_MSG_LEN 2
+#define PTP_OFFS_DOM_NMB 4
+#define PTP_OFFS_FLAGS 6
+#define PTP_OFFS_CORFIELD 8
+#define PTP_OFFS_SRCPRTID 20
+#define PTP_OFFS_SEQ_ID 30
+#define PTP_OFFS_CTRL 32
+#define PTP_OFFS_LOGMEAN 33
+
+#define PTP_IP_OFFS 14
+#define PTP_UDP_OFFS 34
+#define PTP_HEADER_OFFS 42
+#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE)
+#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID)
+#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID)
+#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL)
+
+/* 1588-2008 network protocol enumeration values */
+#define DPA_PTP_PROT_IPV4 1
+#define DPA_PTP_PROT_IPV6 2
+#define DPA_PTP_PROT_802_3 3
+#define DPA_PTP_PROT_DONTCARE 0xFFFF
+
+#define DPA_PTP_SOURCE_PORT_LENGTH 10
+#define DPA_PTP_HEADER_SZE 34
+#define DPA_ETYPE_LEN 2
+#define DPA_VLAN_TAG_LEN 4
+
+#define DPA_PTP_TIMESTAMP_OFFSET 0x30
+#define DPA_PTP_NOMINAL_FREQ_PERIOD 0xa /* 10ns -> 100M */
+#define NANOSEC_PER_SECOND 1000000000
+
+/* Struct needed to identify a timestamp */
+struct dpa_ptp_ident {
+ u8 version;
+ u8 msg_type;
+ u16 netw_prot;
+ u16 seq_id;
+ u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH];
+};
+
+/* Timestamp format in 1588-2008 */
+struct dpa_ptp_time {
+ u64 sec; /* just 48 bit used */
+ u32 nsec;
+};
+
+/* needed for timestamp data over ioctl */
+struct dpa_ptp_data {
+ struct dpa_ptp_ident ident;
+ struct dpa_ptp_time ts;
+};
+
+struct dpa_ptp_circ_buf {
+ struct circ_buf circ_buf;
+ u32 size;
+ spinlock_t ptp_lock;
+};
+
+/* PTP TSU control structure */
+struct dpa_ptp_tsu {
+ struct dpa_priv_s *dpa_priv;
+ bool valid;
+ struct dpa_ptp_circ_buf rx_timestamps;
+ struct dpa_ptp_circ_buf tx_timestamps;
+
+ /* HW timestamping over ioctl enabled flag */
+ int hwts_tx_en_ioctl;
+ int hwts_rx_en_ioctl;
+};
+
+extern int dpa_ptp_init(struct dpa_priv_s *priv);
+extern void dpa_ptp_cleanup(struct dpa_priv_s *priv);
+extern void dpa_ptp_store_txstamp(struct net_device *dev, struct sk_buff *skb,
+ const struct qm_fd *fd);
+extern void dpa_ptp_store_rxstamp(struct net_device *dev, struct sk_buff *skb,
+ const struct qm_fd *fd);
+extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd);
+#endif
new file mode 100644
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2008-2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPA_COMMON_H
+#define __DPA_COMMON_H
+
+#include <linux/kernel.h> /* pr_*() */
+#include <linux/device.h> /* dev_*() */
+#include <linux/smp.h> /* smp_processor_id() */
+
+/* The basename of the source file is being compiled */
+#define __file__ KBUILD_BASENAME".c"
+
+#define __hot
+
+#define cpu_printk(level, format, arg...) \
+ pr_##level("cpu%d: " format, smp_processor_id(), ##arg)
+
+#define cpu_pr_emerg(format, arg...) \
+ cpu_printk(emerg, format, ##arg)
+#define cpu_pr_alert(format, arg...) \
+ cpu_printk(alert, format, ##arg)
+#define cpu_pr_crit(format, arg...) \
+ cpu_printk(crit, format, ##arg)
+#define cpu_pr_err(format, arg...) \
+ cpu_printk(err, format, ##arg)
+#define cpu_pr_warning(format, arg...) \
+ cpu_printk(warning, format, ##arg)
+#define cpu_pr_notice(format, arg...) \
+ cpu_printk(notice, format, ##arg)
+#define cpu_pr_info(format, arg...) \
+ cpu_printk(info, format, ##arg)
+#define cpu_pr_debug(format, arg...) \
+ cpu_printk(debug, format, ##arg)
+
+/* Keep this in sync with the dev_*() definitions from linux/device.h */
+#define cpu_dev_printk(level, dev, format, arg...) \
+ cpu_pr_##level("%s: %s: " format, dev_driver_string(dev), \
+ dev_name(dev), ##arg)
+
+#define cpu_dev_emerg(dev, format, arg...) \
+ cpu_dev_printk(emerg, dev, format, ##arg)
+#define cpu_dev_alert(dev, format, arg...) \
+ cpu_dev_printk(alert, dev, format, ##arg)
+#define cpu_dev_crit(dev, format, arg...) \
+ cpu_dev_printk(crit, dev, format, ##arg)
+#define cpu_dev_err(dev, format, arg...) \
+ cpu_dev_printk(err, dev, format, ##arg)
+#define cpu_dev_warn(dev, format, arg...) \
+ cpu_dev_printk(warning, dev, format, ##arg)
+#define cpu_dev_notice(dev, format, arg...) \
+ cpu_dev_printk(notice, dev, format, ##arg)
+#define cpu_dev_info(dev, format, arg...) \
+ cpu_dev_printk(info, dev, format, ##arg)
+#define cpu_dev_dbg(dev, format, arg...) \
+ cpu_dev_printk(debug, dev, format, ##arg)
+
+#define dpaa_eth_printk(level, dev, format, arg...) \
+ cpu_dev_printk(level, dev, "%s:%hu:%s() " format, \
+ __file__, __LINE__, __func__, ##arg)
+
+#define dpaa_eth_emerg(dev, format, arg...) \
+ dpaa_eth_printk(emerg, dev, format, ##arg)
+#define dpaa_eth_alert(dev, format, arg...) \
+ dpaa_eth_printk(alert, dev, format, ##arg)
+#define dpaa_eth_crit(dev, format, arg...) \
+ dpaa_eth_printk(crit, dev, format, ##arg)
+#define dpaa_eth_err(dev, format, arg...) \
+ dpaa_eth_printk(err, dev, format, ##arg)
+#define dpaa_eth_warning(dev, format, arg...) \
+ dpaa_eth_printk(warning, dev, format, ##arg)
+#define dpaa_eth_notice(dev, format, arg...) \
+ dpaa_eth_printk(notice, dev, format, ##arg)
+#define dpaa_eth_info(dev, format, arg...) \
+ dpaa_eth_printk(info, dev, format, ##arg)
+#define dpaa_eth_debug(dev, format, arg...) \
+ dpaa_eth_printk(debug, dev, format, ##arg)
+
+#define cpu_netdev_emerg(net_dev, format, arg...) \
+ dpaa_eth_emerg((net_dev)->dev.parent, "%s: " format, \
+ (net_dev)->name , ##arg)
+#define cpu_netdev_alert(net_dev, format, arg...) \
+ dpaa_eth_alert((net_dev)->dev.parent, "%s: " format, \
+ (net_dev)->name , ##arg)
+#define cpu_netdev_crit(net_dev, format, arg...) \
+ dpaa_eth_crit((net_dev)->dev.parent, "%s: " format, \
+ (net_dev)->name , ##arg)
+#define cpu_netdev_err(net_dev, format, arg...) \
+ dpaa_eth_err((net_dev)->dev.parent, "%s: " format, \
+ (net_dev)->name , ##arg)
+#define cpu_netdev_warn(net_dev, format, arg...) \
+ dpaa_eth_warning((net_dev)->dev.parent, "%s: " format, \
+ (net_dev)->name , ##arg)
+#define cpu_netdev_notice(net_dev, format, arg...) \
+ dpaa_eth_notice((net_dev)->dev.parent, "%s: " format, \
+ (net_dev)->name , ##arg)
+#define cpu_netdev_info(net_dev, format, arg...) \
+ dpaa_eth_info((net_dev)->dev.parent, "%s: " format, \
+ (net_dev)->name , ##arg)
+#define cpu_netdev_dbg(net_dev, format, arg...) \
+ dpaa_eth_debug((net_dev)->dev.parent, "%s: " format, \
+ (net_dev)->name , ##arg)
+
+enum {RX, TX};
+
+#define DPA_PRIV_DATA_SIZE 16
+#define DPA_PARSE_RESULTS_SIZE sizeof(t_FmPrsResult)
+#define DPA_HASH_RESULTS_SIZE 16
+
+#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, has_timer) \
+{ \
+ param.errq = errq_id; \
+ param.defq = defq_id; \
+ param.priv_data_size = DPA_PRIV_DATA_SIZE; \
+ param.parse_results = true; \
+ param.hash_results = true; \
+ param.time_stamp = has_timer; \
+ fm_set_##type##_port_params(port, ¶m); \
+}
+
+#endif /* __DPA_COMMON_H */
new file mode 100644
@@ -0,0 +1,3251 @@
+/*
+ * Copyright 2008-2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sort.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/io.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h> /* arp_hdr_len() */
+#include <linux/if_vlan.h> /* VLAN_HLEN */
+#include <linux/icmp.h> /* struct icmphdr */
+#include <linux/ip.h> /* struct iphdr */
+#include <linux/ipv6.h> /* struct ipv6hdr */
+#include <linux/udp.h> /* struct udphdr */
+#include <linux/tcp.h> /* struct tcphdr */
+#include <linux/net.h> /* net_ratelimit() */
+#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
+#include <linux/highmem.h>
+#include <linux/percpu.h>
+#include <linux/dma-mapping.h>
+#include <asm/smp.h> /* get_hard_smp_processor_id() */
+#include <asm/debug.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+#include <linux/fsl_bman.h>
+
+#include "fsl_fman.h"
+#include "fm_ext.h"
+#include "fm_port_ext.h"
+
+#include "mac.h"
+#include "dpaa_eth.h"
+#include "dpaa_1588.h"
+
+#define ARRAY2_SIZE(arr) (ARRAY_SIZE(arr) * ARRAY_SIZE((arr)[0]))
+
+#define DPA_NETIF_FEATURES (NETIF_F_HW_QDISC)
+#define DEFAULT_COUNT 64
+#define DEFAULT_BUF_SIZE DPA_BP_SIZE(fsl_fman_phy_maxfrm);
+#define DPA_MAX_TX_BACKLOG 512
+#define DPA_NAPI_WEIGHT 64
+
+#define DPA_BP_REFILL (1 | (smp_processor_id() << 16))
+#define DPA_BP_FINE ((smp_processor_id() << 16))
+#define DPA_BP_REFILL_NEEDED 1
+
+/* Bootarg used to override the Kconfig DPA_MAX_FRM_SIZE value */
+#define FSL_FMAN_PHY_MAXFRM_BOOTARG "fsl_fman_phy_max_frm"
+
+/*
+ * Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define FM_L3_PARSE_RESULT_IPV4 0x8000
+/* L3 Type field: First IP Present IPv6 */
+#define FM_L3_PARSE_RESULT_IPV6 0x4000
+
+/*
+ * Values for the L4R field of the FM Parse Results
+ */
+/* L4 Type field: UDP */
+#define FM_L4_PARSE_RESULT_UDP 0x40
+/* L4 Type field: TCP */
+#define FM_L4_PARSE_RESULT_TCP 0x20
+
+/*
+ * FD status field indicating whether the FM Parser has attempted to validate
+ * the L4 csum of the frame.
+ * Note that having this bit set doesn't necessarily imply that the checksum
+ * is valid. One would have to check the parse results to find that out.
+ */
+#define FM_FD_STAT_L4CV 0x00000004
+
+#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
+
+MODULE_DESCRIPTION(DPA_DESCRIPTION);
+
+static uint8_t debug = -1;
+module_param(debug, byte, S_IRUGO);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+
+static uint16_t __devinitdata tx_timeout = 1000;
+module_param(tx_timeout, ushort, S_IRUGO);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dpa_debugfs_root;
+#endif
+
+/*
+ * Max frame size, across all interfaces.
+ * Configurable from Kconfig or bootargs, to avoid allocating
+ * oversized (socket) buffers when not using jumbo frames.
+ * Must be large enough to accomodate the network MTU, but small enough
+ * to avoid wasting skb memory.
+ *
+ * Could be overridden once, at boot-time, via the
+ * fsl_fman_phy_set_max_frm() callback.
+ */
+int fsl_fman_phy_maxfrm = CONFIG_DPA_MAX_FRM_SIZE;
+
+static const char rtx[][3] = {
+ [RX] = "RX",
+ [TX] = "TX"
+};
+
+struct dpa_fq {
+ struct qman_fq fq_base;
+ struct list_head list;
+ struct net_device *net_dev;
+ bool init;
+ uint32_t fqid;
+ uint32_t flags;
+ uint16_t channel;
+ uint8_t wq;
+};
+
+/* BM */
+
+#ifdef DEBUG
+#define GFP_DPA_BP (GFP_DMA | __GFP_ZERO | GFP_ATOMIC)
+#else
+#define GFP_DPA_BP (GFP_DMA | GFP_ATOMIC)
+#endif
+
+#define DPA_BP_HEAD (DPA_PRIV_DATA_SIZE + DPA_PARSE_RESULTS_SIZE + \
+ DPA_HASH_RESULTS_SIZE)
+#define DPA_BP_SIZE(s) (DPA_BP_HEAD + (s))
+
+#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
+
+#define FM_FD_STAT_ERRORS \
+ (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
+ FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
+ FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
+ FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
+
+static struct dpa_bp *dpa_bp_array[64];
+
+static struct dpa_bp *default_pool;
+
+static struct dpa_bp *dpa_bpid2pool(int bpid)
+{
+ return dpa_bp_array[bpid];
+}
+
+static void dpa_bp_depletion(struct bman_portal *portal,
+ struct bman_pool *pool, void *cb_ctx, int depleted)
+{
+ if (net_ratelimit())
+ pr_err("Invalid Pool depleted notification!\n");
+}
+
+static void bmb_free(struct dpa_bp *bp, struct bm_buffer *bmb)
+{
+ int i;
+ struct sk_buff **skbh;
+ struct sk_buff *skb;
+
+ for (i = 0; i < 8; i++) {
+ dma_addr_t addr = bm_buf_addr(&bmb[i]);
+ if (!addr)
+ break;
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+ dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
+
+ dev_kfree_skb(skb);
+ }
+}
+
+static void dpa_bp_add_8(struct dpa_bp *dpa_bp)
+{
+ struct bm_buffer bmb[8];
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ int i;
+ struct sk_buff *skb;
+ int err;
+ int *count_ptr;
+
+ count_ptr = per_cpu_ptr(dpa_bp->percpu_count, smp_processor_id());
+
+ for (i = 0; i < 8; i++) {
+ /*
+ * The buffers tend to be aligned all to the same cache
+ * index. A standard dequeue operation pulls in 15 packets.
+ * This means that when it stashes, it evicts half of the
+ * packets it's stashing. In order to prevent that, we pad
+ * by a variable number of cache lines, to reduce collisions.
+ * We always pad by at least 1 cache line, because we want
+ * a little extra room at the beginning for IPSec and to
+ * accommodate NET_IP_ALIGN.
+ */
+ int pad = (i + 1) * L1_CACHE_BYTES;
+
+ skb = dev_alloc_skb(dpa_bp->size + pad);
+ if (unlikely(!skb)) {
+ printk(KERN_ERR "dev_alloc_skb() failed\n");
+ bm_buffer_set64(&bmb[i], 0);
+ break;
+ }
+
+ skbh = (struct sk_buff **)(skb->head + pad);
+ *skbh = skb;
+
+ addr = dma_map_single(dpa_bp->dev, skb->head + pad,
+ dpa_bp->size, DMA_FROM_DEVICE);
+
+ bm_buffer_set64(&bmb[i], addr);
+ }
+
+ /* Avoid releasing a completely null buffer; bman_release() requires
+ * at least one buf. */
+ if (likely(i)) {
+ err = bman_release(dpa_bp->pool, bmb, i, 0);
+
+ if (unlikely(err < 0))
+ bmb_free(dpa_bp, bmb);
+ else
+ *count_ptr += i;
+ }
+}
+
+static void dpa_make_private_pool(struct dpa_bp *dpa_bp)
+{
+ int i;
+
+ dpa_bp->percpu_count = __alloc_percpu(sizeof(*dpa_bp->percpu_count),
+ __alignof__(*dpa_bp->percpu_count));
+
+ /* Give each cpu an allotment of "count" buffers */
+ for_each_online_cpu(i) {
+ int *thiscount;
+ int *countptr;
+ int j;
+ thiscount = per_cpu_ptr(dpa_bp->percpu_count,
+ smp_processor_id());
+ countptr = per_cpu_ptr(dpa_bp->percpu_count, i);
+
+ for (j = 0; j < dpa_bp->count; j += 8)
+ dpa_bp_add_8(dpa_bp);
+
+ /* Adjust the counts */
+ *countptr = j;
+
+ if (countptr != thiscount)
+ *thiscount = *thiscount - j;
+ }
+}
+
+
+static void dpaa_eth_seed_pool(struct dpa_bp *bp)
+{
+ size_t count = bp->count;
+ size_t addr = bp->paddr;
+
+ while (count) {
+ struct bm_buffer bufs[8];
+ int num_bufs = 0;
+
+ do {
+ BUG_ON(addr > 0xffffffffffffull);
+ bufs[num_bufs].bpid = bp->bpid;
+ bm_buffer_set64(&bufs[num_bufs++], addr);
+ addr += bp->size;
+
+ } while (--count && (num_bufs < 8));
+
+ while (bman_release(bp->pool, bufs, num_bufs, 0))
+ cpu_relax();
+ }
+}
+
+static int dpa_make_shared_pool(struct dpa_bp *bp)
+{
+ devm_request_mem_region(bp->dev, bp->paddr, bp->size * bp->count,
+ KBUILD_MODNAME);
+ bp->vaddr = devm_ioremap_prot(bp->dev, bp->paddr,
+ bp->size * bp->count, 0);
+ if (bp->vaddr == NULL) {
+ cpu_pr_err("Could not map memory for pool %d\n", bp->bpid);
+ return -EIO;
+ }
+
+ if (bp->seed_pool)
+ dpaa_eth_seed_pool(bp);
+
+ return 0;
+}
+
+static int __devinit __must_check __attribute__((nonnull))
+dpa_bp_alloc(struct dpa_bp *dpa_bp)
+{
+ int err;
+ struct bman_pool_params bp_params;
+ struct platform_device *pdev;
+
+ BUG_ON(dpa_bp->size == 0);
+ BUG_ON(dpa_bp->count == 0);
+
+ bp_params.flags = BMAN_POOL_FLAG_DEPLETION;
+ bp_params.cb = dpa_bp_depletion;
+ bp_params.cb_ctx = dpa_bp;
+
+ /* We support two options. Either a global shared pool, or
+ * a specified pool. If the pool is specified, we only
+ * create one per bpid */
+ if (dpa_bp->kernel_pool && default_pool) {
+ atomic_inc(&default_pool->refs);
+ return 0;
+ }
+
+ if (dpa_bp_array[dpa_bp->bpid]) {
+ atomic_inc(&dpa_bp_array[dpa_bp->bpid]->refs);
+ return 0;
+ }
+
+ if (dpa_bp->bpid == 0)
+ bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
+ else
+ bp_params.bpid = dpa_bp->bpid;
+
+ dpa_bp->pool = bman_new_pool(&bp_params);
+ if (unlikely(dpa_bp->pool == NULL)) {
+ cpu_pr_err("bman_new_pool() failed\n");
+ return -ENODEV;
+ }
+
+ dpa_bp->bpid = bman_get_params(dpa_bp->pool)->bpid;
+
+ pdev = platform_device_register_simple("dpaa_eth_bpool",
+ dpa_bp->bpid, NULL, 0);
+ if (IS_ERR(pdev)) {
+ err = PTR_ERR(pdev);
+ goto pdev_register_failed;
+ }
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)))
+ goto pdev_mask_failed;
+
+ dpa_bp->dev = &pdev->dev;
+
+ if (dpa_bp->kernel_pool) {
+ dpa_make_private_pool(dpa_bp);
+ if (!default_pool)
+ default_pool = dpa_bp;
+ } else {
+ err = dpa_make_shared_pool(dpa_bp);
+ if (err)
+ goto make_shared_pool_failed;
+ }
+
+ dpa_bp_array[dpa_bp->bpid] = dpa_bp;
+
+ atomic_set(&dpa_bp->refs, 1);
+
+ return 0;
+
+make_shared_pool_failed:
+pdev_mask_failed:
+ platform_device_unregister(pdev);
+pdev_register_failed:
+ bman_free_pool(dpa_bp->pool);
+
+ return err;
+}
+
+static void __cold __attribute__((nonnull))
+_dpa_bp_free(struct dpa_bp *dpa_bp)
+{
+ struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
+
+ if (!atomic_dec_and_test(&bp->refs))
+ return;
+
+ if (bp->kernel_pool) {
+ int num;
+
+ do {
+ struct bm_buffer bmb[8];
+ int i;
+
+ num = bman_acquire(bp->pool, bmb, 8, 0);
+
+ for (i = 0; i < num; i++) {
+ dma_addr_t addr = bm_buf_addr(&bmb[i]);
+ struct sk_buff **skbh = phys_to_virt(addr);
+ struct sk_buff *skb = *skbh;
+
+ dma_unmap_single(bp->dev, addr, bp->size,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ }
+ } while (num == 8);
+ }
+
+ dpa_bp_array[bp->bpid] = 0;
+ bman_free_pool(bp->pool);
+}
+
+static void __cold __attribute__((nonnull))
+dpa_bp_free(struct dpa_priv_s *priv, struct dpa_bp *dpa_bp)
+{
+ int i;
+
+ for (i = 0; i < priv->bp_count; i++)
+ _dpa_bp_free(&priv->dpa_bp[i]);
+}
+
+/* QM */
+
+static int __devinit __must_check __attribute__((nonnull))
+_dpa_fq_alloc(struct list_head *list, struct dpa_fq *dpa_fq)
+{
+ int _errno;
+ const struct dpa_priv_s *priv;
+ struct device *dev;
+ struct qman_fq *fq;
+ struct qm_mcc_initfq initfq;
+ /* Set the QMan taildrop threshold high enough to accomodate
+ * one 64k frame, plus an extra (here, 16k) for
+ * other frames awaiting Tx. */
+ const u32 qman_taildrop_threshold = 0x14000;
+
+ priv = netdev_priv(dpa_fq->net_dev);
+ dev = dpa_fq->net_dev->dev.parent;
+
+ if (dpa_fq->fqid == 0)
+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
+
+ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
+ if (_errno) {
+ dpaa_eth_err(dev, "qman_create_fq() failed\n");
+ return _errno;
+ }
+ fq = &dpa_fq->fq_base;
+
+ if (dpa_fq->init) {
+ initfq.we_mask = QM_INITFQ_WE_DESTWQ;
+ initfq.fqd.dest.channel = dpa_fq->channel;
+ initfq.fqd.dest.wq = dpa_fq->wq;
+ initfq.we_mask |= QM_INITFQ_WE_TDTHRESH | QM_INITFQ_WE_FQCTRL;
+ qm_fqd_taildrop_set(&initfq.fqd.td, qman_taildrop_threshold, 1);
+ initfq.fqd.fq_ctrl = QM_FQCTRL_TDE | QM_FQCTRL_PREFERINCACHE;
+ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.fqd.fq_ctrl |=
+ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ initfq.fqd.context_a.stashing.exclusive =
+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_ANNOTATION;
+ initfq.fqd.context_a.stashing.data_cl = 2;
+ initfq.fqd.context_a.stashing.annotation_cl = 1;
+ initfq.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+ };
+
+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (_errno < 0) {
+ dpaa_eth_err(dev, "qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), _errno);
+ qman_destroy_fq(fq, 0);
+ return _errno;
+ }
+ }
+
+ dpa_fq->fqid = qman_fq_fqid(fq);
+ list_add_tail(&dpa_fq->list, list);
+
+ return 0;
+}
+
+static int __cold __attribute__((nonnull))
+_dpa_fq_free(struct device *dev, struct qman_fq *fq)
+{
+ int _errno, __errno;
+ struct dpa_fq *dpa_fq;
+ const struct dpa_priv_s *priv;
+
+ _errno = 0;
+
+ dpa_fq = container_of(fq, struct dpa_fq, fq_base);
+ priv = netdev_priv(dpa_fq->net_dev);
+
+ if (dpa_fq->init) {
+ _errno = qman_retire_fq(fq, NULL);
+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
+ dpaa_eth_err(dev, "qman_retire_fq(%u) = %d\n",
+ qman_fq_fqid(fq), _errno);
+
+ __errno = qman_oos_fq(fq);
+ if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
+ dpaa_eth_err(dev, "qman_oos_fq(%u) = %d\n",
+ qman_fq_fqid(fq), __errno);
+ if (_errno >= 0)
+ _errno = __errno;
+ }
+ }
+
+ qman_destroy_fq(fq, 0);
+ list_del(&dpa_fq->list);
+
+ return _errno;
+}
+
+static int __cold __attribute__((nonnull))
+dpa_fq_free(struct device *dev, struct list_head *list)
+{
+ int _errno, __errno;
+ struct dpa_fq *dpa_fq, *tmp;
+
+ _errno = 0;
+ list_for_each_entry_safe(dpa_fq, tmp, list, list) {
+ __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
+ if (unlikely(__errno < 0) && _errno >= 0)
+ _errno = __errno;
+ }
+
+ return _errno;
+}
+
+
+static inline ssize_t __const __must_check __attribute__((nonnull))
+dpa_fd_length(const struct qm_fd *fd)
+{
+ return fd->length20;
+}
+
+static inline ssize_t __const __must_check __attribute__((nonnull))
+dpa_fd_offset(const struct qm_fd *fd)
+{
+ return fd->offset;
+}
+
+static int __must_check __attribute__((nonnull))
+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
+{
+ int _errno, __errno, i, j;
+ const struct dpa_priv_s *priv;
+ const struct qm_sg_entry *sgt;
+ struct dpa_bp *_dpa_bp, *dpa_bp;
+ struct bm_buffer _bmb, bmb[8];
+
+ priv = netdev_priv(net_dev);
+
+ _bmb.hi = fd->addr_hi;
+ _bmb.lo = fd->addr_lo;
+
+ _dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(IS_ERR(_dpa_bp));
+
+ _errno = 0;
+ if (fd->format == qm_fd_sg) {
+ sgt = (phys_to_virt(bm_buf_addr(&_bmb)) + dpa_fd_offset(fd));
+
+ i = 0;
+ do {
+ dpa_bp = dpa_bpid2pool(sgt[i].bpid);
+ BUG_ON(IS_ERR(dpa_bp));
+
+ j = 0;
+ do {
+ BUG_ON(sgt[i].extension);
+
+ bmb[j].hi = sgt[i].addr_hi;
+ bmb[j].lo = sgt[i].addr_lo;
+ j++; i++;
+ } while (j < ARRAY_SIZE(bmb) &&
+ !sgt[i-1].final &&
+ sgt[i-1].bpid == sgt[i].bpid);
+
+ __errno = bman_release(dpa_bp->pool, bmb, j, 0);
+ if (unlikely(__errno < 0)) {
+ if (netif_msg_drv(priv) && net_ratelimit())
+ cpu_netdev_err(net_dev,
+ "bman_release(%hu) = %d\n",
+ dpa_bp->bpid, _errno);
+ if (_errno >= 0)
+ _errno = __errno;
+ }
+ } while (!sgt[i-1].final);
+ }
+
+ __errno = bman_release(_dpa_bp->pool, &_bmb, 1, 0);
+ if (unlikely(__errno < 0)) {
+ if (netif_msg_drv(priv) && net_ratelimit())
+ cpu_netdev_err(net_dev, "bman_release(%hu) = %d\n",
+ _dpa_bp->bpid, __errno);
+ if (_errno >= 0)
+ _errno = __errno;
+ }
+
+ return _errno;
+}
+
+/* net_device */
+
+#define NN_ALLOCATED_SPACE(net_dev) \
+ max((size_t)arp_hdr_len(net_dev), sizeof(struct iphdr))
+#define NN_RESERVED_SPACE(net_dev) \
+ min((size_t)arp_hdr_len(net_dev), sizeof(struct iphdr))
+
+#define TT_ALLOCATED_SPACE(net_dev) \
+ max(sizeof(struct icmphdr), max(sizeof(struct udphdr), \
+ sizeof(struct tcphdr)))
+#define TT_RESERVED_SPACE(net_dev) \
+ min(sizeof(struct icmphdr), min(sizeof(struct udphdr), \
+ sizeof(struct tcphdr)))
+
+static struct net_device_stats * __cold
+dpa_get_stats(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ unsigned long *netstats;
+ unsigned long *cpustats;
+ int i, j;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int numstats = sizeof(net_dev->stats) / sizeof(unsigned long);
+
+ netstats = (unsigned long *)&net_dev->stats;
+
+ memset(netstats, 0, sizeof(net_dev->stats));
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ cpustats = (unsigned long *)&percpu_priv->stats;
+
+ for (j = 0; j < numstats; j++)
+ netstats[j] += cpustats[j];
+ }
+
+ return &net_dev->stats;
+}
+
+static int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ const struct dpa_priv_s *priv;
+ const int max_mtu = fsl_fman_phy_maxfrm - (VLAN_ETH_HLEN + ETH_FCS_LEN);
+ const int min_mtu = 64;
+
+ priv = netdev_priv(net_dev);
+
+ /* Make sure we don't exceed the Ethernet controller's MAXFRM */
+ if (new_mtu < min_mtu || new_mtu > max_mtu) {
+ cpu_netdev_err(net_dev, "Invalid L3 mtu %d "
+ "(must be between %d and %d).\n",
+ new_mtu, min_mtu, max_mtu);
+ return -EINVAL;
+ }
+ net_dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int dpa_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ const struct dpa_priv_s *priv;
+ int _errno;
+
+ priv = netdev_priv(net_dev);
+
+ _errno = eth_mac_addr(net_dev, addr);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ cpu_netdev_err(net_dev,
+ "eth_mac_addr() = %d\n",
+ _errno);
+ return _errno;
+ }
+
+ if (!priv->mac_dev)
+ /* MAC-less interface, so nothing more to do here */
+ return 0;
+
+ _errno = priv->mac_dev->change_addr(priv->mac_dev, net_dev->dev_addr);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ cpu_netdev_err(net_dev,
+ "mac_dev->change_addr() = %d\n",
+ _errno);
+ return _errno;
+ }
+
+ return 0;
+}
+
+static void __cold dpa_change_rx_flags(struct net_device *net_dev, int flags)
+{
+ int _errno;
+ const struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (!priv->mac_dev)
+ return;
+
+ if ((flags & IFF_PROMISC) != 0) {
+ _errno = priv->mac_dev->change_promisc(priv->mac_dev);
+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
+ cpu_netdev_err(net_dev,
+ "mac_dev->change_promisc() = %d\n",
+ _errno);
+ }
+}
+
+static void dpa_set_multicast_list(struct net_device *net_dev)
+{
+ int _errno;
+ struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (!priv->mac_dev) {
+ if (netif_msg_drv(priv))
+ cpu_netdev_warn(net_dev,
+ "%s() called on MAC-less interface\n",
+ __func__);
+ return;
+ }
+
+ _errno = priv->mac_dev->set_multi(net_dev);
+ if ((_errno < 0) && netif_msg_drv(priv))
+ cpu_netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
+}
+
+#ifdef CONFIG_FSL_DPA_1588
+static int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
+ if (priv->tsu && priv->tsu->valid)
+ ret = dpa_ioctl_1588(dev, rq, cmd);
+ else
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+#endif
+
+/*
+ * When we put the buffer into the pool, we purposefully added
+ * some padding to the address so that the buffers wouldn't all
+ * be page-aligned. But the skb has been reset to a default state,
+ * so it is pointing up to DPAA_ETH_MAX_PAD - L1_CACHE_BYTES bytes
+ * before the actual data. We subtract skb->head from the fd addr,
+ * and then mask off the translated part to get the actual distance.
+ */
+static int dpa_process_one(struct dpa_percpu_priv_s *percpu_priv,
+ struct sk_buff *skb, struct dpa_bp *bp, const struct qm_fd *fd)
+{
+ dma_addr_t addr = qm_fd_addr(fd);
+ u32 addrlo = lower_32_bits(addr);
+ u32 skblo = lower_32_bits((unsigned long)skb->head);
+ u32 pad = (addrlo - skblo) & (PAGE_SIZE - 1);
+ unsigned int data_start;
+
+ (*percpu_priv->dpa_bp_count)--;
+
+ /*
+ * The skb is currently pointed at head + NET_SKB_PAD. The packet
+ * starts at skb->head + pad + fd offset.
+ */
+ data_start = pad + dpa_fd_offset(fd) - NET_SKB_PAD;
+ skb_put(skb, dpa_fd_length(fd) + data_start);
+ skb_pull(skb, data_start);
+
+ return 0;
+}
+
+static void _dpa_rx_error(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd)
+{
+ int _errno;
+
+ if (netif_msg_hw(priv) && net_ratelimit())
+ cpu_netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+
+ _errno = dpa_fd_release(net_dev, fd);
+ if (unlikely(_errno < 0)) {
+ dump_stack();
+ panic("Can't release buffer to the BM during RX\n");
+ }
+}
+
+static void _dpa_tx_error(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd)
+{
+ struct sk_buff *skb;
+ struct sk_buff **skbh;
+ dma_addr_t addr = qm_fd_addr(fd);
+ struct dpa_bp *bp = priv->dpa_bp;
+
+ if (netif_msg_hw(priv) && net_ratelimit())
+ cpu_netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+ dma_unmap_single(bp->dev, addr, bp->size, DMA_TO_DEVICE);
+
+ dev_kfree_skb(skb);
+}
+
+static void __hot _dpa_rx(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd)
+{
+ int _errno;
+ struct dpa_bp *dpa_bp;
+ struct sk_buff *skb;
+ struct sk_buff **skbh;
+ dma_addr_t addr = qm_fd_addr(fd);
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+
+ if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ cpu_netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+
+ goto _return_dpa_fd_release;
+ }
+
+ if (unlikely(fd->format != qm_fd_contig)) {
+ percpu_priv->stats.rx_dropped++;
+ if (netif_msg_rx_status(priv) && net_ratelimit())
+ cpu_netdev_warn(net_dev, "Dropping a SG frame\n");
+ goto _return_dpa_fd_release;
+ }
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+
+ dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
+ DMA_FROM_DEVICE);
+
+ skb = *skbh;
+ prefetch(skb);
+
+ /* Fill the SKB */
+ dpa_process_one(percpu_priv, skb, dpa_bp, fd);
+
+ prefetch(skb_shinfo(skb));
+
+#ifdef CONFIG_FSL_DPA_1588
+ if (priv->tsu && priv->tsu->valid)
+ dpa_ptp_store_rxstamp(net_dev, skb, fd);
+#endif
+
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ if (unlikely(skb->len > net_dev->mtu)) {
+ if ((skb->protocol != ETH_P_8021Q) ||
+ (skb->len > net_dev->mtu + 4)) {
+ percpu_priv->stats.rx_dropped++;
+ goto drop_large_frame;
+ }
+ }
+
+ /* Check if the FMan Parser has already validated the L4 csum. */
+ if (fd->status & FM_FD_STAT_L4CV) {
+ /* If we're here, the csum must be valid (if it hadn't,
+ * the frame would have been received on the Error FQ,
+ * respectively on the _dpa_rx_error() path). */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ percpu_priv->stats.rx_dropped++;
+ else {
+ percpu_priv->stats.rx_packets++;
+ percpu_priv->stats.rx_bytes += dpa_fd_length(fd);
+ }
+
+ net_dev->last_rx = jiffies;
+
+ return;
+
+drop_large_frame:
+ (*percpu_priv->dpa_bp_count)++;
+ skb_recycle(skb);
+_return_dpa_fd_release:
+ _errno = dpa_fd_release(net_dev, fd);
+ if (unlikely(_errno < 0)) {
+ dump_stack();
+ panic("Can't release buffer to the BM during RX\n");
+ }
+}
+
+static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
+{
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i;
+
+ if (priv->shared)
+ return;
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ napi_disable(&percpu_priv->napi);
+ }
+}
+
+static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
+{
+ struct dpa_percpu_priv_s *percpu_priv;
+ int i;
+
+ if (priv->shared)
+ return;
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ napi_enable(&percpu_priv->napi);
+ }
+}
+
+static int dpaa_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct dpa_percpu_priv_s *percpu_priv;
+ int cleaned = qman_poll_dqrr(budget);
+ int count;
+
+ percpu_priv = container_of(napi, struct dpa_percpu_priv_s, napi);
+
+ count = *percpu_priv->dpa_bp_count;
+
+ if (count < DEFAULT_COUNT / 4) {
+ int i;
+
+ for (i = count; i < DEFAULT_COUNT; i += 8)
+ dpa_bp_add_8(percpu_priv->dpa_bp);
+ }
+
+ if (cleaned < budget) {
+ int tmp;
+ napi_complete(napi);
+ tmp = qman_irqsource_add(QM_PIRQ_DQRI);
+ BUG_ON(tmp);
+ }
+
+ return cleaned;
+}
+
+static void __hot _dpa_tx(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd)
+{
+ struct sk_buff **skbh;
+ struct sk_buff *skb;
+ dma_addr_t addr = qm_fd_addr(fd);
+ struct dpa_bp *bp = priv->dpa_bp;
+
+ /* This might not perfectly reflect the reality, if the core dequeueing
+ * the Tx confirmation is different from the one that did the enqueue,
+ * but at least it'll show up in the total count. */
+ percpu_priv->tx_confirm++;
+
+ if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ cpu_netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+ }
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+#ifdef CONFIG_FSL_DPA_1588
+ if (priv->tsu && priv->tsu->valid)
+ dpa_ptp_store_txstamp(net_dev, skb, fd);
+#endif
+
+ dma_unmap_single(bp->dev, addr, bp->size, DMA_TO_DEVICE);
+
+ dev_kfree_skb(skb);
+}
+
+static struct dpa_bp *dpa_size2pool(struct dpa_priv_s *priv, size_t size)
+{
+ int i;
+
+ for (i = 0; i < priv->bp_count; i++)
+ if (DPA_BP_SIZE(size) <= priv->dpa_bp[i].size)
+ return dpa_bpid2pool(priv->dpa_bp[i].bpid);
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void * __must_check __attribute__((nonnull))
+dpa_phys2virt(const struct dpa_bp *dpa_bp, dma_addr_t addr)
+{
+ return dpa_bp->vaddr + (addr - dpa_bp->paddr);
+}
+
+/**
+ * Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+static inline int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
+{
+ t_FmPrsResult *parse_result;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h = NULL;
+ int l4_proto;
+ int ethertype = ntohs(skb->protocol);
+ int retval = 0;
+
+ if (!priv->mac_dev || skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
+ * L4 alone from the FM configuration anyway. */
+
+ /* Fill in some fields of the Parse Results array, so the FMan
+ * can find them as if they came from the FMan Parser. */
+ parse_result = (t_FmPrsResult *)parse_results;
+
+ /* If we're dealing with VLAN, get the real Ethernet type */
+ if (ethertype == ETH_P_8021Q) {
+ /* We can't always assume the MAC header is set correctly
+ * by the stack, so reset to beginning of skb->data */
+ skb_reset_mac_header(skb);
+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ }
+
+ /* Fill in the relevant L3 parse result fields
+ * and read the L4 protocol type */
+ switch (ethertype) {
+ case ETH_P_IP:
+ parse_result->l3r = FM_L3_PARSE_RESULT_IPV4;
+ iph = ip_hdr(skb);
+ BUG_ON(iph == NULL);
+ l4_proto = ntohs(iph->protocol);
+ break;
+ case ETH_P_IPV6:
+ parse_result->l3r = FM_L3_PARSE_RESULT_IPV6;
+ ipv6h = ipv6_hdr(skb);
+ BUG_ON(ipv6h == NULL);
+ l4_proto = ntohs(ipv6h->nexthdr);
+ break;
+ default:
+ /* We shouldn't even be here */
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ cpu_netdev_alert(priv->net_dev, "Can't compute HW csum "
+ "for L3 proto 0x%x\n", ntohs(skb->protocol));
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* Fill in the relevant L4 parse result fields */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+ break;
+ case IPPROTO_TCP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+ break;
+ default:
+ /* This can as well be a BUG() */
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ cpu_netdev_alert(priv->net_dev, "Can't compute HW csum "
+ "for L4 proto 0x%x\n", l4_proto);
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
+ parse_result->ip_off[0] = skb_network_offset(skb);
+ parse_result->l4_off = skb_transport_offset(skb);
+
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+
+return_error:
+ return retval;
+}
+
+static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu, int queue,
+ struct qm_fd *fd)
+{
+ int err;
+
+ prefetchw(&percpu->start_tx);
+ err = qman_enqueue(priv->egress_fqs[queue], fd, 0);
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ cpu_netdev_err(priv->net_dev, "qman_enqueue() = %d\n",
+ err);
+ percpu->stats.tx_errors++;
+ percpu->stats.tx_fifo_errors++;
+ return err;
+ }
+
+ percpu->stats.tx_packets++;
+ percpu->stats.tx_bytes += dpa_fd_length(fd);
+
+ return NETDEV_TX_OK;
+}
+
+static int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_priv_s *priv;
+ struct device *dev;
+ struct qm_fd fd;
+ int queue_mapping;
+ int err;
+ void *dpa_bp_vaddr;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+ dev = net_dev->dev.parent;
+
+ memset(&fd, 0, sizeof(fd));
+ fd.format = qm_fd_contig;
+
+ queue_mapping = skb_get_queue_mapping(skb);
+
+ dpa_bp = dpa_size2pool(priv, skb_headlen(skb));
+ if (unlikely(IS_ERR(dpa_bp))) {
+ err = PTR_ERR(dpa_bp);
+ goto bpools_too_small_error;
+ }
+
+ err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
+ if (unlikely(err <= 0)) {
+ percpu_priv->stats.tx_errors++;
+ if (err == 0)
+ err = -ENOMEM;
+ goto buf_acquire_failed;
+ }
+ fd.bpid = dpa_bp->bpid;
+
+ fd.length20 = skb_headlen(skb);
+ fd.cmd = FM_FD_CMD_FCO;
+ fd.addr_hi = bmb.hi;
+ fd.addr_lo = bmb.lo;
+ fd.offset = DPA_BP_HEAD;
+
+ dpa_bp_vaddr = dpa_phys2virt(dpa_bp, bm_buf_addr(&bmb));
+
+ /* Copy the packet payload */
+ skb_copy_from_linear_data(skb, dpa_bp_vaddr + dpa_fd_offset(&fd),
+ dpa_fd_length(&fd));
+
+ /* Enable L3/L4 hardware checksum computation, if applicable */
+ err = dpa_enable_tx_csum(priv, skb, &fd,
+ dpa_bp_vaddr + DPA_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ cpu_netdev_err(net_dev, "Tx HW csum error: %d\n", err);
+ percpu_priv->stats.tx_errors++;
+ goto l3_l4_csum_failed;
+ }
+
+ err = dpa_xmit(priv, percpu_priv, queue_mapping, &fd);
+
+l3_l4_csum_failed:
+bpools_too_small_error:
+buf_acquire_failed:
+ /* We're done with the skb */
+ dev_kfree_skb(skb);
+
+ return err;
+}
+
+static int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv;
+ struct device *dev;
+ struct qm_fd fd;
+ unsigned int headroom;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ struct dpa_bp *dpa_bp;
+ int queue_mapping;
+ int err;
+ unsigned int pad;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+ dev = net_dev->dev.parent;
+
+ memset(&fd, 0, sizeof(fd));
+ fd.format = qm_fd_contig;
+
+ headroom = skb_headroom(skb);
+ queue_mapping = skb_get_queue_mapping(skb);
+
+ if (headroom < DPA_BP_HEAD) {
+ struct sk_buff *skb_new;
+
+ skb_new = skb_realloc_headroom(skb, DPA_BP_HEAD);
+ if (!skb_new) {
+ percpu_priv->stats.tx_errors++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ kfree_skb(skb);
+ skb = skb_new;
+ headroom = skb_headroom(skb);
+ }
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+
+ if (!skb)
+ return NETDEV_TX_OK;
+
+ /*
+ * We are guaranteed that we have at least DPA_BP_HEAD of headroom.
+ * Buffers we allocated are padded to improve cache usage. In order
+ * to increase buffer re-use, we aim to keep any such buffers the
+ * same. This means the address passed to the FM should be DPA_BP_HEAD
+ * before the data, and we might as well do the same for buffers
+ * from elsewhere in the kernel.
+ */
+ skbh = (struct sk_buff **)(skb->data - DPA_BP_HEAD);
+ pad = headroom - DPA_BP_HEAD;
+
+ *skbh = skb;
+
+ dpa_bp = priv->dpa_bp;
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb. */
+ err = dpa_enable_tx_csum(priv, skb, &fd,
+ ((char *)skbh) + DPA_PRIV_DATA_SIZE);
+
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ cpu_netdev_err(net_dev, "HW csum error: %d\n", err);
+ percpu_priv->stats.tx_errors++;
+ goto l3_l4_csum_failed;
+ }
+
+#ifdef CONFIG_FSL_DPA_1588
+ if (priv->tsu && priv->tsu->valid)
+ fd.cmd |= FM_FD_CMD_UPD;
+#endif
+
+ fd.length20 = skb->len;
+ fd.offset = DPA_BP_HEAD; /* This is now guaranteed */
+
+ if (likely(skb_is_recycleable(skb, dpa_bp->size + pad)
+ && (*percpu_priv->dpa_bp_count + 1 <= dpa_bp->count))) {
+ fd.cmd |= FM_FD_CMD_FCO;
+ fd.bpid = dpa_bp->bpid;
+ skb_recycle(skb);
+ skb = NULL;
+ (*percpu_priv->dpa_bp_count)++;
+ percpu_priv->tx_returned++;
+ }
+
+ addr = dma_map_single(dpa_bp->dev, skbh, dpa_bp->size, DMA_TO_DEVICE);
+ if (unlikely(addr == 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ cpu_netdev_err(net_dev, "dma_map_single() failed\n");
+ goto dma_map_failed;
+ }
+
+ fd.addr_hi = upper_32_bits(addr);
+ fd.addr_lo = lower_32_bits(addr);
+
+ if (unlikely(dpa_xmit(priv, percpu_priv, queue_mapping, &fd) < 0))
+ goto xmit_failed;
+
+ net_dev->trans_start = jiffies;
+
+ return NETDEV_TX_OK;
+
+xmit_failed:
+ dma_unmap_single(dev, addr, dpa_bp->size, DMA_TO_DEVICE);
+
+dma_map_failed:
+ if (fd.cmd & FM_FD_CMD_FCO)
+ (*percpu_priv->dpa_bp_count)--;
+
+l3_l4_csum_failed:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static enum qman_cb_dqrr_result
+ingress_rx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (dpaa_eth_napi_schedule(percpu_priv)) {
+ percpu_priv->in_interrupt++;
+ return qman_cb_dqrr_stop;
+ }
+
+ _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result __hot
+shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int err;
+ const struct qm_fd *fd = &dq->fd;
+ struct dpa_bp *dpa_bp;
+ size_t size;
+ struct sk_buff *skb;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ cpu_netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+
+ goto out;
+ }
+
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(IS_ERR(dpa_bp));
+
+ if (fd->format == qm_fd_sg) {
+ percpu_priv->stats.rx_dropped++;
+ if (netif_msg_rx_status(priv) && net_ratelimit())
+ cpu_netdev_warn(net_dev,
+ "%s:%hu:%s(): Dropping a SG frame\n",
+ __file__, __LINE__, __func__);
+ goto out;
+ }
+
+ size = dpa_fd_length(fd);
+
+ skb = __netdev_alloc_skb(net_dev, DPA_BP_HEAD + size, GFP_ATOMIC);
+ if (unlikely(skb == NULL)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ cpu_netdev_err(net_dev, "Could not alloc skb\n");
+
+ percpu_priv->stats.rx_dropped++;
+
+ goto out;
+ }
+
+ skb_reserve(skb, DPA_BP_HEAD);
+
+ /* Fill the SKB */
+ memcpy(skb_put(skb, dpa_fd_length(fd)),
+ dpa_phys2virt(dpa_bp, qm_fd_addr(fd)) +
+ dpa_fd_offset(fd), dpa_fd_length(fd));
+
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ if (unlikely(skb->len > net_dev->mtu)) {
+ if ((skb->protocol != ETH_P_8021Q) ||
+ (skb->len > net_dev->mtu + 4)) {
+ percpu_priv->stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+ }
+
+ if (unlikely(netif_rx(skb) != NET_RX_SUCCESS))
+ percpu_priv->stats.rx_dropped++;
+ else {
+ percpu_priv->stats.rx_packets++;
+ percpu_priv->stats.rx_bytes += dpa_fd_length(fd);
+ }
+
+ net_dev->last_rx = jiffies;
+
+out:
+ err = dpa_fd_release(net_dev, fd);
+ if (unlikely(err < 0)) {
+ dump_stack();
+ panic("Can't release buffer to the BM during RX\n");
+ }
+
+ return qman_cb_dqrr_consume;
+}
+
+
+static enum qman_cb_dqrr_result __hot
+ingress_rx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv))) {
+ percpu_priv->in_interrupt++;
+ return qman_cb_dqrr_stop;
+ }
+
+ prefetchw(&percpu_priv->ingress_calls);
+
+ _dpa_rx(net_dev, priv, percpu_priv, &dq->fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+ingress_tx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (dpaa_eth_napi_schedule(percpu_priv)) {
+ percpu_priv->in_interrupt++;
+ return qman_cb_dqrr_stop;
+ }
+
+ _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result __hot
+ingress_tx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (dpaa_eth_napi_schedule(percpu_priv)) {
+ percpu_priv->in_interrupt++;
+ return qman_cb_dqrr_stop;
+ }
+
+ _dpa_tx(net_dev, priv, percpu_priv, &dq->fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+static void shared_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ const struct dpa_priv_s *priv;
+ int err;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_fq *dpa_fq = (struct dpa_fq *)fq;
+
+ net_dev = dpa_fq->net_dev;
+ priv = netdev_priv(net_dev);
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ err = dpa_fd_release(net_dev, &msg->ern.fd);
+ if (unlikely(err < 0)) {
+ dump_stack();
+ panic("Can't release buffer to the BM during a TX\n");
+ }
+
+ percpu_priv->stats.tx_dropped++;
+ percpu_priv->stats.tx_fifo_errors++;
+}
+
+static void egress_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ const struct dpa_priv_s *priv;
+ struct sk_buff *skb;
+ struct sk_buff **skbh;
+ struct dpa_percpu_priv_s *percpu_priv;
+ dma_addr_t addr = qm_fd_addr(&msg->ern.fd);
+ struct dpa_bp *bp;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+ bp = priv->dpa_bp;
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ percpu_priv->stats.tx_dropped++;
+ percpu_priv->stats.tx_fifo_errors++;
+
+ /*
+ * If we intended this buffer to go into the pool
+ * when the FM was done, we need to put it in
+ * manually.
+ */
+ if (msg->ern.fd.cmd & FM_FD_CMD_FCO) {
+ struct bm_buffer bmb;
+
+ bm_buffer_set64(&bmb, addr);
+ while (bman_release(bp->pool, &bmb, 1, 0))
+ cpu_relax();
+
+ return;
+ }
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+ dma_unmap_single(bp->dev, addr, bp->size, DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(skb);
+}
+
+static const struct qman_fq rx_shared_fq __devinitconst = {
+ .cb = {shared_rx_dqrr, NULL, NULL, NULL}
+};
+static const struct qman_fq rx_private_defq __devinitconst = {
+ .cb = {ingress_rx_default_dqrr, NULL, NULL, NULL}
+};
+static const struct qman_fq rx_private_errq __devinitconst = {
+ .cb = {ingress_rx_error_dqrr, NULL, NULL, NULL}
+};
+static const struct qman_fq tx_private_defq __devinitconst = {
+ .cb = {ingress_tx_default_dqrr, NULL, NULL, NULL}
+};
+static const struct qman_fq tx_private_errq __devinitconst = {
+ .cb = {ingress_tx_error_dqrr, NULL, NULL, NULL}
+};
+static const struct qman_fq dummyq __devinitconst = {
+ .cb = {NULL, NULL, NULL, NULL}
+};
+static const struct qman_fq private_egress_fq __devinitconst = {
+ .cb = {NULL, egress_ern, NULL, NULL}
+};
+static const struct qman_fq shared_egress_fq __devinitconst = {
+ .cb = {NULL, shared_ern, NULL, NULL}
+};
+
+#ifdef CONFIG_DPAA_ETH_UNIT_TESTS
+static bool __devinitdata tx_unit_test_passed = true;
+
+static void __devinit tx_unit_test_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct sk_buff **skbh;
+ struct sk_buff *skb;
+ const struct qm_fd *fd;
+ dma_addr_t addr;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ tx_unit_test_passed = false;
+
+ fd = &msg->ern.fd;
+
+ addr = qm_fd_addr(fd);
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+ if (!skb || !is_kernel_addr((unsigned long)skb))
+ panic("Corrupt skb in ERN!\n");
+
+ kfree_skb(skb);
+}
+
+static unsigned char __devinitdata *tx_unit_skb_head;
+static unsigned char __devinitdata *tx_unit_skb_end;
+static int __devinitdata tx_unit_tested;
+
+static enum qman_cb_dqrr_result __devinit tx_unit_test_dqrr(
+ struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct sk_buff **skbh;
+ struct sk_buff *skb;
+ const struct qm_fd *fd;
+ dma_addr_t addr;
+ unsigned char *startaddr;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ tx_unit_test_passed = false;
+
+ tx_unit_tested++;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ fd = &dq->fd;
+
+ addr = qm_fd_addr(fd);
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ startaddr = (unsigned char *)skbh;
+ skb = *skbh;
+
+ if (!skb || !is_kernel_addr((unsigned long)skb))
+ panic("Invalid skb address in TX Unit Test FD\n");
+
+ /* Make sure we're dealing with the same skb */
+ if (skb->head != tx_unit_skb_head
+ || skb_end_pointer(skb) != tx_unit_skb_end)
+ goto out;
+
+ /*
+ * If we recycled, then there must be enough room between fd.addr
+ * and skb->end for a new RX buffer
+ */
+ if (fd->cmd & FM_FD_CMD_FCO) {
+ size_t bufsize = skb_end_pointer(skb) - startaddr;
+
+ if (bufsize < fsl_fman_phy_maxfrm)
+ goto out;
+ } else {
+ /*
+ * If we didn't recycle, but the buffer was big enough,
+ * increment the counter to put it back
+ */
+ if (skb_end_pointer(skb) - skb->head >= fsl_fman_phy_maxfrm)
+ (*percpu_priv->dpa_bp_count)++;
+
+ /* If we didn't recycle, the data pointer should be good */
+ if (skb->data != startaddr + dpa_fd_offset(fd))
+ goto out;
+ }
+
+ tx_unit_test_passed = true;
+out:
+ /* The skb is no longer needed, and belongs to us */
+ kfree_skb(skb);
+
+ return qman_cb_dqrr_consume;
+}
+
+static const struct qman_fq tx_unit_test_fq __devinitconst = {
+ .cb = {tx_unit_test_dqrr, tx_unit_test_ern, NULL, NULL}
+};
+
+static struct __devinitdata dpa_fq unit_fq;
+
+static bool __devinitdata tx_unit_test_ran; /* Starts as false */
+
+static int __devinit dpa_tx_unit_test(struct net_device *net_dev)
+{
+ /* Create a new FQ */
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct qman_fq *oldq;
+ int size, headroom;
+ struct dpa_percpu_priv_s *percpu_priv;
+ cpumask_t *oldcpus;
+ int test_count = 0;
+ int err = 0;
+ int tests_failed = 0;
+ const cpumask_t *cpus = qman_affine_cpus();
+
+ oldcpus = tsk_cpus_allowed(current);
+ set_cpus_allowed_ptr(current, cpus);
+ /* disable bottom halves */
+ local_bh_disable();
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ qman_irqsource_remove(QM_PIRQ_DQRI);
+ unit_fq.net_dev = net_dev;
+ unit_fq.fq_base = tx_unit_test_fq;
+
+ /* Save old queue */
+ oldq = priv->egress_fqs[smp_processor_id()];
+
+ err = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID, &unit_fq.fq_base);
+
+ if (err < 0) {
+ pr_err("UNIT test FQ create failed: %d\n", err);
+ goto fq_create_fail;
+ }
+
+ err = qman_init_fq(&unit_fq.fq_base,
+ QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err < 0) {
+ pr_err("UNIT test FQ init failed: %d\n", err);
+ goto fq_init_fail;
+ }
+
+ pr_err("TX Unit Test using FQ %d\n", qman_fq_fqid(&unit_fq.fq_base));
+
+ /* Replace queue 0 with this queue */
+ priv->egress_fqs[smp_processor_id()] = &unit_fq.fq_base;
+
+ /* Try packet sizes from 64-bytes to just above the maximum */
+ for (size = 64; size <= 9600 + 128; size += 64) {
+ for (headroom = DPA_BP_HEAD; headroom < 0x800; headroom += 16) {
+ int ret;
+ struct sk_buff *skb;
+
+ test_count++;
+
+ skb = dev_alloc_skb(size + headroom);
+
+ if (!skb) {
+ pr_err("Failed to allocate skb\n");
+ err = -ENOMEM;
+ goto end_test;
+ }
+
+ if (skb_end_pointer(skb) - skb->head >=
+ fsl_fman_phy_maxfrm)
+ (*percpu_priv->dpa_bp_count)--;
+
+ skb_put(skb, size + headroom);
+ skb_pull(skb, headroom);
+
+ tx_unit_skb_head = skb->head;
+ tx_unit_skb_end = skb_end_pointer(skb);
+
+ skb_set_queue_mapping(skb, smp_processor_id());
+
+ /* tx */
+ ret = net_dev->netdev_ops->ndo_start_xmit(skb, net_dev);
+
+ if (ret != NETDEV_TX_OK) {
+ pr_err("Failed to TX with err %d\n", ret);
+ err = -EIO;
+ goto end_test;
+ }
+
+ /* Wait for it to arrive */
+ ret = spin_event_timeout(qman_poll_dqrr(1) != 0,
+ 100000, 1);
+
+ if (!ret)
+ pr_err("TX Packet never arrived\n");
+
+ /* Was it good? */
+ if (tx_unit_test_passed == false) {
+ pr_err("Test failed:\n");
+ pr_err("size: %d pad: %d head: %p end: %p\n",
+ size, headroom, tx_unit_skb_head,
+ tx_unit_skb_end);
+ tests_failed++;
+ }
+ }
+ }
+
+end_test:
+ err = qman_retire_fq(&unit_fq.fq_base, NULL);
+ if (unlikely(err < 0))
+ pr_err("Could not retire TX Unit Test FQ (%d)\n", err);
+
+ err = qman_oos_fq(&unit_fq.fq_base);
+ if (unlikely(err < 0))
+ pr_err("Could not OOS TX Unit Test FQ (%d)\n", err);
+
+fq_init_fail:
+ qman_destroy_fq(&unit_fq.fq_base, 0);
+
+fq_create_fail:
+ priv->egress_fqs[smp_processor_id()] = oldq;
+ local_bh_enable();
+ qman_irqsource_add(QM_PIRQ_DQRI);
+ tx_unit_test_ran = true;
+ set_cpus_allowed_ptr(current, oldcpus);
+
+ pr_err("Tested %d/%d packets. %d failed\n", test_count, tx_unit_tested,
+ tests_failed);
+
+ if (tests_failed)
+ err = -EINVAL;
+
+ return err;
+}
+#endif
+
+static int __cold dpa_start(struct net_device *net_dev)
+{
+ int err, i;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ if (!mac_dev)
+ goto no_mac;
+
+#ifdef CONFIG_FSL_DPA_1588
+ if (priv->tsu && priv->tsu->valid) {
+ if (mac_dev->fm_rtc_enable)
+ mac_dev->fm_rtc_enable(net_dev);
+ }
+#endif
+
+ dpaa_eth_napi_enable(priv);
+
+ err = mac_dev->init_phy(net_dev);
+ if (err < 0) {
+ if (netif_msg_ifup(priv))
+ cpu_netdev_err(net_dev, "init_phy() = %d\n", err);
+ goto init_phy_failed;
+ }
+
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_enable(mac_dev->port_dev[i]);
+
+ err = priv->mac_dev->start(mac_dev);
+ if (err < 0) {
+ if (netif_msg_ifup(priv))
+ cpu_netdev_err(net_dev, "mac_dev->start() = %d\n", err);
+ goto mac_start_failed;
+ }
+
+no_mac:
+ netif_tx_start_all_queues(net_dev);
+
+ return 0;
+
+mac_start_failed:
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_disable(mac_dev->port_dev[i]);
+
+init_phy_failed:
+ dpaa_eth_napi_disable(priv);
+
+ return err;
+}
+
+static int __cold dpa_stop(struct net_device *net_dev)
+{
+ int _errno, i;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ netif_tx_stop_all_queues(net_dev);
+
+ if (!mac_dev)
+ return 0;
+
+#ifdef CONFIG_FSL_DPA_1588
+ if (priv->tsu && priv->tsu->valid) {
+ if (mac_dev->fm_rtc_disable)
+ mac_dev->fm_rtc_disable(net_dev);
+ }
+#endif
+
+ _errno = mac_dev->stop(mac_dev);
+ if (unlikely(_errno < 0))
+ if (netif_msg_ifdown(priv))
+ cpu_netdev_err(net_dev, "mac_dev->stop() = %d\n",
+ _errno);
+
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_disable(mac_dev->port_dev[i]);
+
+ if (mac_dev->phy_dev)
+ phy_disconnect(mac_dev->phy_dev);
+ mac_dev->phy_dev = NULL;
+
+ dpaa_eth_napi_disable(priv);
+
+ return _errno;
+}
+
+static void __cold dpa_timeout(struct net_device *net_dev)
+{
+ const struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (netif_msg_timer(priv))
+ cpu_netdev_crit(net_dev, "Transmit timeout latency: %lu ms\n",
+ (jiffies - net_dev->trans_start) * 1000 / HZ);
+
+ percpu_priv->stats.tx_errors++;
+}
+
+static int __devinit dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
+{
+ return ((struct dpa_bp *)dpa_bp0)->size -
+ ((struct dpa_bp *)dpa_bp1)->size;
+}
+
+static struct dpa_bp * __devinit __cold __must_check __attribute__((nonnull))
+dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
+{
+ int i, lenp, na, ns;
+ struct device *dev;
+ struct device_node *dev_node;
+ const phandle *phandle_prop;
+ const uint32_t *bpid;
+ const uint32_t *bpool_cfg;
+ struct dpa_bp *dpa_bp;
+ int has_kernel_pool = 0;
+ int has_shared_pool = 0;
+
+ dev = &_of_dev->dev;
+
+ /* The default is one, if there's no property */
+ *count = 1;
+
+ /* There are three types of buffer pool configuration:
+ * 1) No bp assignment
+ * 2) A static assignment to an empty configuration
+ * 3) A static assignment to one or more configured pools
+ *
+ * We don't support using multiple unconfigured pools.
+ */
+
+ /* Get the buffer pools to be used */
+ phandle_prop = of_get_property(dev->of_node,
+ "fsl,bman-buffer-pools", &lenp);
+
+ if (phandle_prop)
+ *count = lenp / sizeof(phandle);
+ else {
+ if (default_pool)
+ return default_pool;
+
+ has_kernel_pool = 1;
+ }
+
+ dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
+ if (unlikely(dpa_bp == NULL)) {
+ dpaa_eth_err(dev, "devm_kzalloc() failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev_node = of_find_node_by_path("/");
+ if (unlikely(dev_node == NULL)) {
+ dpaa_eth_err(dev, "of_find_node_by_path(/) failed\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ na = of_n_addr_cells(dev_node);
+ ns = of_n_size_cells(dev_node);
+
+ for (i = 0; i < *count && phandle_prop; i++) {
+ of_node_put(dev_node);
+ dev_node = of_find_node_by_phandle(phandle_prop[i]);
+ if (unlikely(dev_node == NULL)) {
+ dpaa_eth_err(dev, "of_find_node_by_phandle() failed\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
+ dpaa_eth_err(dev,
+ "!of_device_is_compatible(%s, fsl,bpool)\n",
+ dev_node->full_name);
+ dpa_bp = ERR_PTR(-EINVAL);
+ goto _return_of_node_put;
+ }
+
+ bpid = of_get_property(dev_node, "fsl,bpid", &lenp);
+ if ((bpid == NULL) || (lenp != sizeof(*bpid))) {
+ dpaa_eth_err(dev, "fsl,bpid property not found.\n");
+ dpa_bp = ERR_PTR(-EINVAL);
+ goto _return_of_node_put;
+ }
+ dpa_bp[i].bpid = *bpid;
+
+ bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
+ &lenp);
+ if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
+ const uint32_t *seed_pool;
+
+ dpa_bp[i].count = of_read_number(bpool_cfg, ns);
+ dpa_bp[i].size = of_read_number(bpool_cfg + ns, ns);
+ dpa_bp[i].paddr =
+ of_read_number(bpool_cfg + 2 * ns, na);
+
+ seed_pool = of_get_property(dev_node,
+ "fsl,bpool-ethernet-seeds", &lenp);
+ dpa_bp[i].seed_pool = !!seed_pool;
+
+ has_shared_pool = 1;
+ } else {
+ has_kernel_pool = 1;
+ }
+
+ if (i > 0)
+ has_shared_pool = 1;
+ }
+
+ if (has_kernel_pool && has_shared_pool) {
+ dpaa_eth_err(dev, "Invalid buffer pool configuration "
+ "for node %s\n", dev_node->full_name);
+ dpa_bp = ERR_PTR(-EINVAL);
+ goto _return_of_node_put;
+ } else if (has_kernel_pool) {
+ dpa_bp->count = DEFAULT_COUNT;
+ dpa_bp->size = DEFAULT_BUF_SIZE;
+ dpa_bp->kernel_pool = 1;
+ }
+
+ sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
+
+ return dpa_bp;
+
+_return_of_node_put:
+ if (dev_node)
+ of_node_put(dev_node);
+
+ return dpa_bp;
+}
+
+static int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
+ size_t count)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ int i;
+
+ if (dpa_bp->kernel_pool) {
+ priv->shared = 0;
+
+ if (netif_msg_probe(priv))
+ cpu_dev_info(net_dev->dev.parent,
+ "Using private BM buffer pools\n");
+ } else {
+ priv->shared = 1;
+ }
+
+ priv->dpa_bp = dpa_bp;
+ priv->bp_count = count;
+
+ for (i = 0; i < count; i++) {
+ int err;
+ err = dpa_bp_alloc(&dpa_bp[i]);
+ if (err < 0) {
+ dpa_bp_free(priv, dpa_bp);
+ priv->dpa_bp = NULL;
+ return err;
+ }
+
+ /* For now, just point to the default pool.
+ * We can add support for more pools, later
+ */
+ if (dpa_bp->kernel_pool)
+ priv->dpa_bp = default_pool;
+ }
+
+ return 0;
+}
+
+static struct mac_device * __devinit __cold __must_check
+__attribute__((nonnull))
+dpa_mac_probe(struct platform_device *_of_dev)
+{
+ struct device *dpa_dev, *dev;
+ struct device_node *mac_node;
+ int lenp;
+ const phandle *phandle_prop;
+ struct platform_device *of_dev;
+ struct mac_device *mac_dev;
+#ifdef CONFIG_FSL_DPA_1588
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct device_node *timer_node;
+#endif
+
+ phandle_prop = of_get_property(_of_dev->dev.of_node, "fsl,fman-mac", &lenp);
+ if (phandle_prop == NULL)
+ return NULL;
+
+ BUG_ON(lenp != sizeof(phandle));
+
+ dpa_dev = &_of_dev->dev;
+
+ mac_node = of_find_node_by_phandle(*phandle_prop);
+ if (unlikely(mac_node == NULL)) {
+ dpaa_eth_err(dpa_dev, "of_find_node_by_phandle() failed\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ of_dev = of_find_device_by_node(mac_node);
+ if (unlikely(of_dev == NULL)) {
+ dpaa_eth_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
+ mac_node->full_name);
+ of_node_put(mac_node);
+ return ERR_PTR(-EINVAL);
+ }
+ of_node_put(mac_node);
+
+ dev = &of_dev->dev;
+
+ mac_dev = dev_get_drvdata(dev);
+ if (unlikely(mac_dev == NULL)) {
+ dpaa_eth_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
+ dev_name(dev));
+ return ERR_PTR(-EINVAL);
+ }
+
+#ifdef CONFIG_FSL_DPA_1588
+ phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp);
+ if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
+ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
+ (mac_dev->speed == SPEED_1000)))) {
+ timer_node = of_find_node_by_phandle(*phandle_prop);
+ if (timer_node && (net_dev = dev_get_drvdata(dpa_dev))) {
+ priv = netdev_priv(net_dev);
+ if (!dpa_ptp_init(priv))
+ dpaa_eth_info(dev, "%s: ptp-timer enabled\n",
+ mac_node->full_name);
+ }
+ }
+#endif
+
+ return mac_dev;
+}
+
+static const char fsl_qman_frame_queues[][25] __devinitconst = {
+ [RX] = "fsl,qman-frame-queues-rx",
+ [TX] = "fsl,qman-frame-queues-tx"
+};
+
+#ifdef CONFIG_DEBUG_FS
+static int __cold dpa_debugfs_show(struct seq_file *file, void *offset)
+{
+ int i;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv, total;
+ struct dpa_bp *dpa_bp;
+ unsigned int count_total = 0;
+
+ BUG_ON(offset == NULL);
+
+ priv = netdev_priv((struct net_device *)file->private);
+
+ dpa_bp = priv->dpa_bp;
+
+ memset(&total, 0, sizeof(total));
+
+ seq_printf(file, "\tirqs\trx\ttx\trecycle\tconfirm\ttx err\trx err" \
+ "\tbp count\n");
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ total.in_interrupt += percpu_priv->in_interrupt;
+ total.ingress_calls += percpu_priv->stats.rx_packets;
+ total.stats.tx_packets += percpu_priv->stats.tx_packets;
+ total.tx_returned += percpu_priv->tx_returned;
+ total.tx_confirm += percpu_priv->tx_confirm;
+ total.stats.tx_errors += percpu_priv->stats.tx_errors;
+ total.stats.rx_errors += percpu_priv->stats.rx_errors;
+ count_total += *percpu_priv->dpa_bp_count;
+
+ seq_printf(file, "%hu/%hu\t%u\t%lu\t%lu\t%u\t%u\t%lu\t%lu" \
+ "\t%d\n",
+ get_hard_smp_processor_id(i), i,
+ percpu_priv->in_interrupt,
+ percpu_priv->stats.rx_packets,
+ percpu_priv->stats.tx_packets,
+ percpu_priv->tx_returned,
+ percpu_priv->tx_confirm,
+ percpu_priv->stats.tx_errors,
+ percpu_priv->stats.rx_errors,
+ *percpu_priv->dpa_bp_count);
+ }
+ seq_printf(file, "Total\t%u\t%u\t%lu\t%u\t%u\t%lu\t%lu\t%d\n",
+ total.in_interrupt,
+ total.ingress_calls,
+ total.stats.tx_packets,
+ total.tx_returned,
+ total.tx_confirm,
+ total.stats.tx_errors,
+ total.stats.rx_errors,
+ count_total);
+
+ return 0;
+}
+
+static int __cold dpa_debugfs_open(struct inode *inode, struct file *file)
+{
+ int _errno;
+ const struct net_device *net_dev;
+
+ _errno = single_open(file, dpa_debugfs_show, inode->i_private);
+ if (unlikely(_errno < 0)) {
+ net_dev = (struct net_device *)inode->i_private;
+
+ if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev)))
+ cpu_netdev_err(net_dev, "single_open() = %d\n",
+ _errno);
+ }
+ return _errno;
+}
+
+static const struct file_operations dpa_debugfs_fops = {
+ .open = dpa_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb)
+{
+ return smp_processor_id();
+}
+
+static const struct net_device_ops dpa_private_ops = {
+ .ndo_open = dpa_start,
+ .ndo_start_xmit = dpa_tx,
+ .ndo_stop = dpa_stop,
+ .ndo_change_rx_flags = dpa_change_rx_flags,
+ .ndo_tx_timeout = dpa_timeout,
+ .ndo_get_stats = dpa_get_stats,
+ .ndo_set_mac_address = dpa_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_select_queue = dpa_select_queue,
+ .ndo_change_mtu = dpa_change_mtu,
+ .ndo_set_rx_mode = dpa_set_multicast_list,
+#ifdef CONFIG_FSL_DPA_1588
+ .ndo_do_ioctl = dpa_ioctl,
+#endif
+};
+
+static const struct net_device_ops dpa_shared_ops = {
+ .ndo_open = dpa_start,
+ .ndo_start_xmit = dpa_shared_tx,
+ .ndo_stop = dpa_stop,
+ .ndo_change_rx_flags = dpa_change_rx_flags,
+ .ndo_tx_timeout = dpa_timeout,
+ .ndo_get_stats = dpa_get_stats,
+ .ndo_set_mac_address = dpa_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = dpa_set_multicast_list,
+#ifdef CONFIG_FSL_DPA_1588
+ .ndo_do_ioctl = dpa_ioctl,
+#endif
+};
+
+static int __devinit dpa_get_channel(struct device *dev,
+ struct device_node *dpa_node)
+{
+ struct device_node *dev_node;
+ const uint32_t *channel_id;
+ int lenp;
+
+ dev_node = of_parse_phandle(dpa_node, "fsl,qman-channel", 0);
+ if (dev_node == NULL) {
+ dpaa_eth_err(dev, "Could not find fsl,qman-channel property\n");
+ return -EFAULT;
+ }
+
+ channel_id = of_get_property(dev_node, "fsl,qman-channel-id", &lenp);
+ if ((channel_id == NULL) || (lenp < sizeof(*channel_id))) {
+ dpaa_eth_err(dev, "Could not get fsl,qman-channel-id in %s\n",
+ dev_node->full_name);
+ of_node_put(dev_node);
+ return -EINVAL;
+ }
+ of_node_put(dev_node);
+ return *channel_id;
+}
+
+struct fqid_cell {
+ uint32_t start;
+ uint32_t count;
+};
+
+static const struct fqid_cell default_fqids[][3] __devinitconst = {
+ [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
+ [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
+};
+
+static int __devinit
+dpa_fq_probe(struct platform_device *_of_dev, struct list_head *list,
+ struct dpa_fq **defq, struct dpa_fq **errq,
+ struct dpa_fq **fqs, int ptype)
+{
+ struct device *dev = &_of_dev->dev;
+ struct device_node *np = dev->of_node;
+ const struct fqid_cell *fqids;
+ int i, j, lenp;
+ int num_fqids;
+ struct dpa_fq *dpa_fq;
+ int err = 0;
+
+ fqids = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
+ if (fqids == NULL) {
+ fqids = default_fqids[ptype];
+ num_fqids = 3;
+ } else
+ num_fqids = lenp / sizeof(*fqids);
+
+ for (i = 0; i < num_fqids; i++) {
+ dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids[i].count,
+ GFP_KERNEL);
+ if (dpa_fq == NULL) {
+ dpaa_eth_err(dev, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ /* The first queue is the Error queue */
+ if (i == 0 && errq) {
+ *errq = dpa_fq;
+
+ if (fqids[i].count != 1) {
+ dpaa_eth_err(dev, "Too many error queues!\n");
+ err = -EINVAL;
+ goto invalid_error_queues;
+ }
+ }
+
+ /* The second queue is the the Default queue */
+ if (i == 1 && defq) {
+ *defq = dpa_fq;
+
+ if (fqids[i].count != 1) {
+ dpaa_eth_err(dev, "Too many default queues!\n");
+ err = -EINVAL;
+ goto invalid_default_queues;
+ }
+ }
+
+ /*
+ * All subsequent queues are gathered together.
+ * The first 8 will be used by the private linux interface
+ * if these are TX queues
+ */
+ if (i == 2 || (!errq && i == 0 && fqs))
+ *fqs = dpa_fq;
+
+#warning We lost the 8-queue enforcement
+
+#define DPA_NUM_WQS 8
+ for (j = 0; j < fqids[i].count; j++) {
+ dpa_fq[j].fqid = fqids[i].start ?
+ fqids[i].start + j : 0;
+ dpa_fq[j].wq = dpa_fq[j].fqid ?
+ dpa_fq[j].fqid % DPA_NUM_WQS : DPA_NUM_WQS - 1;
+ list_add_tail(&dpa_fq[j].list, list);
+ }
+ }
+
+invalid_default_queues:
+invalid_error_queues:
+ return err;
+}
+
+static void dpa_setup_ingress(struct dpa_priv_s *priv, struct dpa_fq *fq,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ fq->channel = priv->channel;
+}
+
+static void dpa_setup_egress(struct dpa_priv_s *priv,
+ struct list_head *head, struct dpa_fq *fq,
+ struct fm_port *port)
+{
+ struct list_head *ptr = &fq->list;
+ int i = 0;
+
+ while (true) {
+ struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list);
+ if (priv->shared)
+ iter->fq_base = shared_egress_fq;
+ else
+ iter->fq_base = private_egress_fq;
+
+ iter->net_dev = priv->net_dev;
+ priv->egress_fqs[i++] = &iter->fq_base;
+
+ if (port) {
+ iter->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ iter->channel = fm_get_tx_port_channel(port);
+ } else
+ iter->flags = QMAN_FQ_FLAG_NO_MODIFY;
+
+ if (list_is_last(ptr, head))
+ break;
+
+ ptr = ptr->next;
+ }
+}
+
+static void dpa_setup_ingress_queues(struct dpa_priv_s *priv,
+ struct list_head *head, struct dpa_fq *fq)
+{
+ struct list_head *ptr = &fq->list;
+ u32 fqid;
+ int portals[NR_CPUS];
+ int num_portals;
+ int i;
+ struct device_node *qm_node;
+ struct device_node *cpu_node;
+ const uint32_t *uint32_prop;
+ const phandle *ph;
+ int lenp;
+ int cpu;
+ bool found;
+ const cpumask_t *affine_cpus = qman_affine_cpus();
+
+ /*
+ * Make a list of the available portals.
+ * We're only interested in those portals which have an affine core
+ * and moreover that core is included in the cpumask provided by QMan
+ */
+ num_portals = 0;
+ for_each_compatible_node(qm_node, NULL, "fsl,qman-portal") {
+ /* Check if portal has an affine core */
+ ph = of_get_property(qm_node, "cpu-handle", &lenp);
+ if (!ph || (lenp != sizeof(phandle)))
+ continue;
+
+ /* Get the hardware id of the affine core */
+ cpu_node = of_find_node_by_phandle(*ph);
+ if (!cpu_node)
+ continue;
+ uint32_prop = of_get_property(cpu_node, "reg", &lenp);
+ if (!uint32_prop || (lenp != sizeof(uint32_t))) {
+ dpaa_eth_err(fq->net_dev->dev.parent,
+ "failed to get property %s for node %s",
+ "reg", cpu_node->full_name);
+ continue;
+ }
+
+ /* If it's not included in the cpumask we got from QMan,
+ * skip portal */
+ found = false;
+ for_each_cpu(cpu, affine_cpus) {
+ if (*uint32_prop == get_hard_smp_processor_id(cpu)
+ && !of_get_property(qm_node,
+ "fsl,usdpaa-portal", NULL)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ continue;
+
+ /* This portal is good, store its sw channel */
+ uint32_prop = of_get_property(qm_node,
+ "fsl,qman-channel-id", &lenp);
+ if (!uint32_prop || (lenp != sizeof(uint32_t))) {
+ dpaa_eth_err(fq->net_dev->dev.parent,
+ "Failed to get property %s for node %s",
+ "fsl,qman-channel-id", qm_node->full_name);
+ continue;
+ }
+ portals[num_portals++] = *uint32_prop;
+ }
+ if (num_portals == 0) {
+ dpaa_eth_err(fq->net_dev->dev.parent,
+ "No adequate Qman portals found");
+ return;
+ }
+
+ i = 0;
+ fqid = 0;
+ if (priv->mac_dev)
+ fqid = (priv->mac_dev->res->start & 0x1fffff) >> 6;
+
+ while (true) {
+ struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list);
+
+ if (priv->shared)
+ dpa_setup_ingress(priv, iter, &rx_shared_fq);
+ else
+ dpa_setup_ingress(priv, iter, &rx_private_defq);
+
+ if (!iter->fqid)
+ iter->fqid = fqid++;
+
+ /* Assign the queues to a channel in a round-robin fashion */
+ iter->channel = portals[i];
+ i = (i + 1) % num_portals;
+
+ if (list_is_last(ptr, head))
+ break;
+
+ ptr = ptr->next;
+ }
+}
+
+static void __devinit
+dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
+ struct dpa_fq *defq, bool has_timer)
+{
+ struct fm_port_non_rx_params tx_port_param;
+
+ dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
+ has_timer);
+}
+
+static void __devinit
+dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
+ struct dpa_fq *errq, struct dpa_fq *defq, bool has_timer)
+{
+ struct fm_port_rx_params rx_port_param;
+ int i;
+
+ count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
+ rx_port_param.num_pools = count;
+ for (i = 0; i < count; i++) {
+ if (i >= rx_port_param.num_pools)
+ break;
+
+ rx_port_param.pool_param[i].id = bp[i].bpid;
+ rx_port_param.pool_param[i].size = bp[i].size;
+ }
+
+ dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
+ has_timer);
+}
+
+static void dpa_rx_fq_init(struct dpa_priv_s *priv, struct list_head *head,
+ struct dpa_fq *defq, struct dpa_fq *errq,
+ struct dpa_fq *fqs)
+{
+ if (fqs)
+ dpa_setup_ingress_queues(priv, head, fqs);
+
+ /* Only real devices need default/error queues set up */
+ if (!priv->mac_dev)
+ return;
+
+ if (defq->fqid == 0 && netif_msg_probe(priv))
+ cpu_pr_info("Using dynamic RX QM frame queues\n");
+
+ if (priv->shared) {
+ dpa_setup_ingress(priv, defq, &rx_shared_fq);
+ dpa_setup_ingress(priv, errq, &rx_shared_fq);
+ } else {
+ dpa_setup_ingress(priv, defq, &rx_private_defq);
+ dpa_setup_ingress(priv, errq, &rx_private_errq);
+ }
+}
+
+static void dpa_tx_fq_init(struct dpa_priv_s *priv, struct list_head *head,
+ struct dpa_fq *defq, struct dpa_fq *errq,
+ struct dpa_fq *fqs, struct fm_port *port)
+{
+ if (fqs)
+ dpa_setup_egress(priv, head, fqs, port);
+
+ /* Only real devices need default/error queues set up */
+ if (!priv->mac_dev)
+ return;
+
+ if (defq->fqid == 0 && netif_msg_probe(priv))
+ cpu_pr_info("Using dynamic TX QM frame queues\n");
+
+ /* The shared driver doesn't use tx confirmation */
+ if (priv->shared) {
+ dpa_setup_ingress(priv, defq, &dummyq);
+ dpa_setup_ingress(priv, errq, &dummyq);
+ } else {
+ dpa_setup_ingress(priv, defq, &tx_private_defq);
+ dpa_setup_ingress(priv, errq, &tx_private_errq);
+ }
+}
+
+static int dpa_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev)
+{
+ int err;
+ const uint8_t *mac_addr;
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+
+ net_dev->features |= DPA_NETIF_FEATURES;
+ net_dev->vlan_features |= DPA_NETIF_FEATURES;
+
+ if (!priv->mac_dev) {
+ /* Get the MAC address */
+ mac_addr = of_get_mac_address(dpa_node);
+ if (mac_addr == NULL) {
+ if (netif_msg_probe(priv))
+ dpaa_eth_err(dev, "No MAC address found!\n");
+ return -EINVAL;
+ }
+ } else {
+ net_dev->mem_start = priv->mac_dev->res->start;
+ net_dev->mem_end = priv->mac_dev->res->end;
+
+ mac_addr = priv->mac_dev->addr;
+ net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ net_dev->vlan_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ }
+
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+
+ SET_ETHTOOL_OPS(net_dev, &dpa_ethtool_ops);
+ net_dev->needed_headroom = DPA_BP_HEAD;
+ net_dev->watchdog_timeo = tx_timeout * HZ / 1000;
+
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dpaa_eth_err(dev, "register_netdev() = %d\n", err);
+ return err;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ priv->debugfs_file = debugfs_create_file(net_dev->name, S_IRUGO,
+ dpa_debugfs_root, net_dev,
+ &dpa_debugfs_fops);
+ if (unlikely(priv->debugfs_file == NULL)) {
+ cpu_netdev_err(net_dev, "debugfs_create_file(%s/%s/%s) = %d\n",
+ powerpc_debugfs_root->d_iname,
+ dpa_debugfs_root->d_iname,
+ net_dev->name, err);
+
+ unregister_netdev(net_dev);
+ return -ENOMEM;
+ }
+#endif
+
+ return 0;
+}
+
+static int dpa_shared_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev)
+{
+ net_dev->netdev_ops = &dpa_shared_ops;
+
+ return dpa_netdev_init(dpa_node, net_dev);
+}
+
+static int dpa_private_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev)
+{
+ int i;
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ percpu_priv->net_dev = net_dev;
+
+ percpu_priv->dpa_bp = priv->dpa_bp;
+ percpu_priv->dpa_bp_count =
+ per_cpu_ptr(priv->dpa_bp->percpu_count, i);
+ netif_napi_add(net_dev, &percpu_priv->napi, dpaa_eth_poll,
+ DPA_NAPI_WEIGHT);
+ }
+
+ net_dev->netdev_ops = &dpa_private_ops;
+
+ return dpa_netdev_init(dpa_node, net_dev);
+}
+
+static int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
+ uint8_t alignment, uint32_t *base_fqid)
+{
+ dpaa_eth_crit(dev, "callback not implemented!\n");
+ BUG();
+
+ return 0;
+}
+
+static int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
+{
+
+ dpaa_eth_crit(dev, "callback not implemented!\n");
+ BUG();
+
+ return 0;
+}
+
+static ssize_t dpaa_eth_show_addr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev)
+ return sprintf(buf, "%llx",
+ (unsigned long long)mac_dev->res->start);
+ else
+ return sprintf(buf, "none");
+}
+
+static DEVICE_ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL);
+
+static ssize_t dpaa_eth_show_fqids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t bytes = 0;
+ int i = 0;
+ char *str;
+ struct dpa_fq *fq;
+ struct dpa_fq *tmp;
+ struct dpa_fq *prev = NULL;
+ u32 first_fqid = 0;
+ u32 last_fqid = 0;
+ char *prevstr = NULL;
+
+ list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
+ void *dqrr = fq->fq_base.cb.dqrr;
+ if (dqrr == ingress_rx_error_dqrr)
+ str = "error";
+ else if (i == 1 && dqrr == ingress_rx_default_dqrr)
+ str = "default";
+ else if (dqrr == ingress_rx_error_dqrr ||
+ dqrr == ingress_rx_default_dqrr)
+ str = "RX";
+ else if (dqrr == ingress_tx_default_dqrr)
+ str = "TX confirmation";
+ else if (dqrr == ingress_tx_error_dqrr)
+ str = "TX error";
+ else if (dqrr == NULL)
+ str = "TX";
+ else
+ str = "unknown";
+
+ if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
+ str != prevstr)) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes,
+ "%s: %d\n", prevstr, prev->fqid);
+ else
+ bytes += sprintf(buf + bytes,
+ "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
+ last_fqid = fq->fqid;
+ else
+ first_fqid = last_fqid = fq->fqid;
+
+ prev = fq;
+ prevstr = str;
+ i++;
+ }
+
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr, prev->fqid);
+ else
+ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+
+ return bytes;
+}
+
+static DEVICE_ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL);
+
+
+static void __devinit dpaa_eth_sysfs_init(struct device *dev)
+{
+ if (device_create_file(dev, &dev_attr_device_addr))
+ dev_err(dev, "Error creating dpaa_eth addr file\n");
+ if (device_create_file(dev, &dev_attr_fqids))
+ dev_err(dev, "Error creating dpaa_eth fqids file\n");
+}
+static const struct of_device_id dpa_match[] __devinitconst ;
+static int __devinit
+dpaa_eth_probe(struct platform_device *_of_dev)
+{
+ int err, i;
+ struct device *dev;
+ struct device_node *dpa_node;
+ struct dpa_bp *dpa_bp;
+ struct dpa_fq *dpa_fq, *tmp;
+ struct list_head rxfqlist;
+ struct list_head txfqlist;
+ size_t count;
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct dpa_fq *rxdefault = NULL;
+ struct dpa_fq *txdefault = NULL;
+ struct dpa_fq *rxerror = NULL;
+ struct dpa_fq *txerror = NULL;
+ struct dpa_fq *rxextra = NULL;
+ struct dpa_fq *txfqs = NULL;
+ struct fm_port *rxport = NULL;
+ struct fm_port *txport = NULL;
+ bool has_timer = FALSE;
+ struct mac_device *mac_dev;
+ int proxy_enet;
+ const struct of_device_id *match;
+
+ dev = &_of_dev->dev;
+
+ dpa_node = dev->of_node;
+
+ match = of_match_device(dpa_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ if (!of_device_is_available(dpa_node))
+ return -ENODEV;
+
+ /*
+ * If it's not an fsl,dpa-ethernet node, we just serve as a proxy
+ * initializer driver, and don't do any linux device setup
+ */
+ proxy_enet = strcmp(match->compatible, "fsl,dpa-ethernet");
+
+ /*
+ * Allocate this early, so we can store relevant information in
+ * the private area
+ */
+ if (!proxy_enet) {
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ if (!net_dev) {
+ dpaa_eth_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Do this here, so we can be verbose early */
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+
+ priv->msg_enable = netif_msg_init(debug, -1);
+ }
+
+ /* Get the buffer pools assigned to this interface */
+ dpa_bp = dpa_bp_probe(_of_dev, &count);
+ if (IS_ERR(dpa_bp)) {
+ err = PTR_ERR(dpa_bp);
+ goto bp_probe_failed;
+ }
+
+ mac_dev = dpa_mac_probe(_of_dev);
+ if (IS_ERR(mac_dev)) {
+ err = PTR_ERR(mac_dev);
+ goto mac_probe_failed;
+ } else if (mac_dev) {
+ rxport = mac_dev->port_dev[RX];
+ txport = mac_dev->port_dev[TX];
+ }
+
+ INIT_LIST_HEAD(&rxfqlist);
+ INIT_LIST_HEAD(&txfqlist);
+
+ if (rxport)
+ err = dpa_fq_probe(_of_dev, &rxfqlist, &rxdefault, &rxerror,
+ &rxextra, RX);
+ else
+ err = dpa_fq_probe(_of_dev, &rxfqlist, NULL, NULL,
+ &rxextra, RX);
+
+ if (err < 0)
+ goto rx_fq_probe_failed;
+
+ if (txport)
+ err = dpa_fq_probe(_of_dev, &txfqlist, &txdefault, &txerror,
+ &txfqs, TX);
+ else
+ err = dpa_fq_probe(_of_dev, &txfqlist, NULL, NULL, &txfqs, TX);
+
+ if (err < 0)
+ goto tx_fq_probe_failed;
+
+ /*
+ * Now we have all of the configuration information.
+ * We support a number of configurations:
+ * 1) Private interface - An optimized linux ethernet driver with
+ * a real network connection.
+ * 2) Shared interface - A device intended for virtual connections
+ * or for a real interface that is shared between partitions
+ * 3) Proxy initializer - Just configures the MAC on behalf of
+ * another partition
+ */
+
+ /* bp init */
+ if (net_dev) {
+ err = dpa_bp_create(net_dev, dpa_bp, count);
+
+ if (err < 0)
+ goto bp_create_failed;
+
+ priv->mac_dev = mac_dev;
+
+ priv->channel = dpa_get_channel(dev, dpa_node);
+
+ if (priv->channel < 0) {
+ err = priv->channel;
+ goto get_channel_failed;
+ }
+
+ dpa_rx_fq_init(priv, &rxfqlist, rxdefault, rxerror, rxextra);
+ dpa_tx_fq_init(priv, &txfqlist, txdefault, txerror, txfqs,
+ txport);
+
+ /* Add the FQs to the interface, and make them active */
+ INIT_LIST_HEAD(&priv->dpa_fq_list);
+
+ list_for_each_entry_safe(dpa_fq, tmp, &rxfqlist, list) {
+ err = _dpa_fq_alloc(&priv->dpa_fq_list, dpa_fq);
+ if (err < 0)
+ goto fq_alloc_failed;
+ }
+
+ list_for_each_entry_safe(dpa_fq, tmp, &txfqlist, list) {
+ err = _dpa_fq_alloc(&priv->dpa_fq_list, dpa_fq);
+ if (err < 0)
+ goto fq_alloc_failed;
+ }
+
+ if (priv->tsu && priv->tsu->valid)
+ has_timer = TRUE;
+ }
+
+ /* All real interfaces need their ports initialized */
+ if (mac_dev) {
+ struct fm_port_pcd_param rx_port_pcd_param;
+
+ dpaa_eth_init_rx_port(rxport, dpa_bp, count, rxerror,
+ rxdefault, has_timer);
+ dpaa_eth_init_tx_port(txport, txerror, txdefault, has_timer);
+
+ rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
+ rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
+ rx_port_pcd_param.dev = dev;
+ fm_port_pcd_bind(rxport, &rx_port_pcd_param);
+ }
+
+ /*
+ * Proxy interfaces need to be started, and the allocated
+ * memory freed
+ */
+ if (!net_dev) {
+ devm_kfree(&_of_dev->dev, dpa_bp);
+ devm_kfree(&_of_dev->dev, rxdefault);
+ devm_kfree(&_of_dev->dev, rxerror);
+ devm_kfree(&_of_dev->dev, txdefault);
+ devm_kfree(&_of_dev->dev, txerror);
+
+ if (mac_dev)
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_enable(mac_dev->port_dev[i]);
+
+ return 0;
+ }
+
+ /* Now we need to initialize either a private or shared interface */
+ priv->percpu_priv = __alloc_percpu(sizeof(*priv->percpu_priv),
+ __alignof__(*priv->percpu_priv));
+ if (priv->percpu_priv == NULL) {
+ dpaa_eth_err(dev, "__alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+
+ if (priv->shared)
+ err = dpa_shared_netdev_init(dpa_node, net_dev);
+ else
+ err = dpa_private_netdev_init(dpa_node, net_dev);
+
+ if (err < 0)
+ goto netdev_init_failed;
+
+ dpaa_eth_sysfs_init(&net_dev->dev);
+
+#ifdef CONFIG_DPAA_ETH_UNIT_TESTS
+ /* The unit test is designed to test private interfaces */
+ if (!priv->shared && !tx_unit_test_ran) {
+ err = dpa_tx_unit_test(net_dev);
+
+ BUG_ON(err);
+ }
+#endif
+
+ return 0;
+
+netdev_init_failed:
+ if (net_dev)
+ free_percpu(priv->percpu_priv);
+alloc_percpu_failed:
+fq_alloc_failed:
+ if (net_dev)
+ dpa_fq_free(dev, &priv->dpa_fq_list);
+get_channel_failed:
+ if (net_dev)
+ dpa_bp_free(priv, priv->dpa_bp);
+bp_create_failed:
+tx_fq_probe_failed:
+rx_fq_probe_failed:
+mac_probe_failed:
+bp_probe_failed:
+ dev_set_drvdata(dev, NULL);
+ if (net_dev)
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static const struct of_device_id dpa_match[] __devinitconst = {
+ {
+ .compatible = "fsl,dpa-ethernet"
+ },
+ {
+ .compatible = "fsl,dpa-ethernet-init"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpa_match);
+
+static int __devexit __cold dpa_remove(struct platform_device *of_dev)
+{
+ int err;
+ struct device *dev;
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+
+ dev = &of_dev->dev;
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
+ dev_set_drvdata(dev, NULL);
+ unregister_netdev(net_dev);
+
+ err = dpa_fq_free(dev, &priv->dpa_fq_list);
+
+ free_percpu(priv->percpu_priv);
+
+ dpa_bp_free(priv, priv->dpa_bp);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(priv->debugfs_file);
+#endif
+
+#ifdef CONFIG_FSL_DPA_1588
+ if (priv->tsu && priv->tsu->valid)
+ dpa_ptp_cleanup(priv);
+#endif
+
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static struct platform_driver dpa_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = dpa_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa_eth_probe,
+ .remove = __devexit_p(dpa_remove)
+};
+
+static int __init __cold dpa_load(void)
+{
+ int _errno;
+
+ cpu_pr_info(KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n");
+
+#ifdef CONFIG_DEBUG_FS
+ dpa_debugfs_root = debugfs_create_dir(KBUILD_MODNAME,
+ powerpc_debugfs_root);
+ if (unlikely(dpa_debugfs_root == NULL)) {
+ _errno = -ENOMEM;
+ cpu_pr_err(KBUILD_MODNAME ": %s:%hu:%s(): "
+ "debugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n",
+ __file__, __LINE__, __func__,
+ powerpc_debugfs_root->d_iname, _errno);
+ goto _return;
+ }
+#endif
+
+ _errno = platform_driver_register(&dpa_driver);
+ if (unlikely(_errno < 0)) {
+ cpu_pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ __file__, __LINE__, __func__, _errno);
+ goto _return_debugfs_remove;
+ }
+
+ goto _return;
+
+_return_debugfs_remove:
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(dpa_debugfs_root);
+#endif
+_return:
+ cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__);
+
+ return _errno;
+}
+module_init(dpa_load);
+
+static void __exit __cold dpa_unload(void)
+{
+ cpu_pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", __file__, __func__);
+
+ platform_driver_unregister(&dpa_driver);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(dpa_debugfs_root);
+#endif
+
+ cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__);
+}
+module_exit(dpa_unload);
+
+static int __init fsl_fman_phy_set_max_frm(char *str)
+{
+ int ret = 0;
+
+ ret = get_option(&str, &fsl_fman_phy_maxfrm);
+ if (ret != 1) {
+ /* This will only work if CONFIG_EARLY_PRINTK is compiled in,
+ * and something like "earlyprintk=serial,uart0,115200" is
+ * specified in the bootargs */
+ printk(KERN_WARNING "No suitable %s=<int> prop in bootargs; "
+ "will use the default DPA_MAX_FRM_SIZE (%d) "
+ "from Kconfig.\n",
+ FSL_FMAN_PHY_MAXFRM_BOOTARG, CONFIG_DPA_MAX_FRM_SIZE);
+
+ fsl_fman_phy_maxfrm = CONFIG_DPA_MAX_FRM_SIZE;
+ return 1;
+ }
+
+ /* Don't allow invalid bootargs; fallback to the Kconfig value */
+ if (fsl_fman_phy_maxfrm < 64 || fsl_fman_phy_maxfrm > 9600) {
+ printk(KERN_WARNING "Invalid %s=%d in bootargs, valid range is "
+ "64-9600. Falling back to the DPA_MAX_FRM_SIZE (%d) "
+ "from Kconfig.\n",
+ FSL_FMAN_PHY_MAXFRM_BOOTARG, fsl_fman_phy_maxfrm,
+ CONFIG_DPA_MAX_FRM_SIZE);
+
+ fsl_fman_phy_maxfrm = CONFIG_DPA_MAX_FRM_SIZE;
+ return 1;
+ }
+
+ printk(KERN_INFO "Using fsl_fman_phy_maxfrm=%d from bootargs\n",
+ fsl_fman_phy_maxfrm);
+ return 0;
+}
+early_param(FSL_FMAN_PHY_MAXFRM_BOOTARG, fsl_fman_phy_set_max_frm);
new file mode 100644
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2008-2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPA_H
+#define __DPA_H
+
+#include <linux/ethtool.h> /* struct ethtool_ops */
+#include <linux/netdevice.h>
+#include <linux/list.h> /* struct list_head */
+#include <linux/workqueue.h> /* struct work_struct */
+#include <linux/skbuff.h>
+#include <linux/hardirq.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/dcache.h> /* struct dentry */
+#endif
+
+#include <linux/fsl_qman.h> /* struct qman_fq */
+
+#include "dpaa_eth-common.h"
+
+#include "mac.h" /* struct mac_device */
+
+
+/* number of Tx queues to FMan */
+#define DPAA_ETH_TX_QUEUES 8
+#define DPAA_ETH_RX_QUEUES 128
+
+struct pcd_range {
+ uint32_t base;
+ uint32_t count;
+};
+
+struct dpa_bp {
+ struct bman_pool *pool;
+ uint8_t bpid;
+ struct device *dev;
+ size_t count;
+ size_t size;
+ bool seed_pool;
+ dma_addr_t paddr;
+ void *vaddr;
+ int kernel_pool;
+ int *percpu_count;
+ int *needs_refill;
+ atomic_t refs;
+};
+
+struct dpa_percpu_priv_s {
+ struct net_device *net_dev;
+ int *dpa_bp_count;
+ struct dpa_bp *dpa_bp;
+ struct napi_struct napi;
+ u32 start_tx;
+ u32 in_interrupt;
+ u32 ingress_calls;
+ u32 tx_returned;
+ u32 tx_confirm;
+ struct net_device_stats stats;
+};
+
+struct dpa_priv_s {
+ struct dpa_bp *dpa_bp;
+ size_t bp_count;
+ int shared;
+ struct net_device *net_dev;
+
+ uint16_t channel; /* "fsl,qman-channel-id" */
+ struct list_head dpa_fq_list;
+ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
+
+ struct mac_device *mac_dev;
+
+ struct dpa_percpu_priv_s *percpu_priv;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_file;
+#endif
+
+ uint32_t msg_enable; /* net_device message level */
+ struct dpa_ptp_tsu *tsu;
+};
+
+extern const struct ethtool_ops dpa_ethtool_ops;
+extern int fsl_fman_phy_maxfrm;
+
+static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv)
+{
+ if (unlikely(in_irq())) {
+ /* Disable QMan IRQ and invoke NAPI */
+ int ret = qman_irqsource_remove(QM_PIRQ_DQRI);
+ if (likely(!ret)) {
+ napi_schedule(&percpu_priv->napi);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+#endif /* __DPA_H */
new file mode 100644
@@ -0,0 +1,674 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#include "dpaa_eth-common.h"
+#include "dpaa_eth.h"
+#include "mac.h"
+
+#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
+#include "fm_mac_ext.h"
+#include "fm_rtc_ext.h"
+
+#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
+
+MODULE_DESCRIPTION(MAC_DESCRIPTION);
+
+struct mac_priv_s {
+ t_Handle mac;
+};
+
+const char *mac_driver_description __initconst = MAC_DESCRIPTION;
+const size_t mac_sizeof_priv[] __devinitconst = {
+ [DTSEC] = sizeof(struct mac_priv_s),
+ [XGMAC] = sizeof(struct mac_priv_s)
+};
+
+static const e_EnetMode _100[] __devinitconst =
+{
+ [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100,
+ [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100
+};
+
+static const e_EnetMode _1000[] __devinitconst =
+{
+ [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000,
+ [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000,
+ [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000,
+ [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000
+};
+
+static e_EnetMode __devinit __cold __attribute__((nonnull))
+macdev2enetinterface(const struct mac_device *mac_dev)
+{
+ switch (mac_dev->max_speed) {
+ case SPEED_100:
+ return _100[mac_dev->phy_if];
+ case SPEED_1000:
+ return _1000[mac_dev->phy_if];
+ case SPEED_10000:
+ return e_ENET_MODE_XGMII_10000;
+ default:
+ return e_ENET_MODE_MII_100;
+ }
+}
+
+static void mac_exception(t_Handle _mac_dev, e_FmMacExceptions exception)
+{
+ struct mac_device *mac_dev;
+
+ mac_dev = (struct mac_device *)_mac_dev;
+
+ if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
+ /* don't flag RX FIFO after the first */
+ FM_MAC_SetException(
+ ((struct mac_priv_s *)macdev_priv(_mac_dev))->mac,
+ e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
+ printk(KERN_ERR "10G MAC got RX FIFO Error = %x\n", exception);
+ }
+
+ cpu_dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", __file__, __func__,
+ exception);
+}
+
+static int __devinit __cold init(struct mac_device *mac_dev)
+{
+ int _errno;
+ t_Error err;
+ struct mac_priv_s *priv;
+ t_FmMacParams param;
+ uint32_t version;
+
+ priv = macdev_priv(mac_dev);
+
+ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
+ mac_dev->dev, mac_dev->res->start, 0x2000);
+ param.enetMode = macdev2enetinterface(mac_dev);
+ memcpy(¶m.addr, mac_dev->addr, min(sizeof(param.addr),
+ sizeof(mac_dev->addr)));
+ param.macId = mac_dev->cell_index;
+ param.h_Fm = (t_Handle)mac_dev->fm;
+ param.mdioIrq = NO_IRQ;
+ param.f_Exception = mac_exception;
+ param.f_Event = mac_exception;
+ param.h_App = mac_dev;
+
+ priv->mac = FM_MAC_Config(¶m);
+ if (unlikely(priv->mac == NULL)) {
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_Config() failed\n");
+ _errno = -EINVAL;
+ goto _return;
+ }
+
+ err = FM_MAC_ConfigMaxFrameLength(priv->mac, fsl_fman_phy_maxfrm);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0)) {
+ dpaa_eth_err(mac_dev->dev,
+ "FM_MAC_ConfigMaxFrameLength() = 0x%08x\n", err);
+ goto _return_fm_mac_free;
+ }
+
+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
+ /* 10G always works with pad and CRC */
+ err = FM_MAC_ConfigPadAndCrc(priv->mac, true);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0)) {
+ dpaa_eth_err(mac_dev->dev,
+ "FM_MAC_ConfigPadAndCrc() = 0x%08x\n", err);
+ goto _return_fm_mac_free;
+ }
+
+ err = FM_MAC_ConfigHalfDuplex(priv->mac, mac_dev->half_duplex);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0)) {
+ dpaa_eth_err(mac_dev->dev,
+ "FM_MAC_ConfigHalfDuplex() = 0x%08x\n", err);
+ goto _return_fm_mac_free;
+ }
+ }
+ else {
+ err = FM_MAC_ConfigResetOnInit(priv->mac, true);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0)) {
+ dpaa_eth_err(mac_dev->dev,
+ "FM_MAC_ConfigResetOnInit() = 0x%08x\n", err);
+ goto _return_fm_mac_free;
+ }
+ }
+
+ err = FM_MAC_Init(priv->mac);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0)) {
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_Init() = 0x%08x\n", err);
+ goto _return_fm_mac_free;
+ }
+
+#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
+ err = FM_MAC_SetException(priv->mac,
+ e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0)) {
+ dpaa_eth_err(mac_dev->dev,
+ "FM_MAC_SetException() = 0x%08x\n", err);
+ goto _return_fm_mac_free;
+ }
+ }
+#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
+
+ /* For 10G MAC, disable Tx ECC exception */
+ if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
+ err = FM_MAC_SetException(priv->mac,
+ e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0)) {
+ dpaa_eth_err(mac_dev->dev,
+ "FM_MAC_SetException() = 0x%08x\n", err);
+ goto _return_fm_mac_free;
+ }
+ }
+
+ err = FM_MAC_GetVesrion(priv->mac, &version);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0)) {
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_GetVesrion() = 0x%08x\n",
+ err);
+ goto _return_fm_mac_free;
+ }
+ cpu_dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
+ ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
+ "dTSEC" : "XGEC"), version);
+
+ goto _return;
+
+
+_return_fm_mac_free:
+ err = FM_MAC_Free(priv->mac);
+ if (unlikely(-GET_ERROR_TYPE(err) < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_Free() = 0x%08x\n", err);
+_return:
+ return _errno;
+}
+
+static int __cold start(struct mac_device *mac_dev)
+{
+ int _errno;
+ t_Error err;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+
+ err = FM_MAC_Enable(((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
+ e_COMM_MODE_RX_AND_TX);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_Enable() = 0x%08x\n", err);
+
+ if (phy_dev) {
+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000)
+ phy_start(phy_dev);
+ else if (phy_dev->drv->read_status)
+ phy_dev->drv->read_status(phy_dev);
+ }
+
+ return _errno;
+}
+
+static int __cold stop(struct mac_device *mac_dev)
+{
+ int _errno;
+ t_Error err;
+
+ if (mac_dev->phy_dev &&
+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000))
+ phy_stop(mac_dev->phy_dev);
+
+ err = FM_MAC_Disable(((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
+ e_COMM_MODE_RX_AND_TX);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_Disable() = 0x%08x\n", err);
+
+ return _errno;
+}
+
+static int __cold change_promisc(struct mac_device *mac_dev)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_MAC_SetPromiscuous(
+ ((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
+ mac_dev->promisc = !mac_dev->promisc);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev,
+ "FM_MAC_SetPromiscuous() = 0x%08x\n", err);
+
+ return _errno;
+}
+
+static int __cold set_multi(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ struct mac_priv_s *mac_priv;
+ struct mac_address *old_addr, *tmp;
+ struct netdev_hw_addr *ha;
+ int _errno;
+ t_Error err;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+ mac_priv = macdev_priv(mac_dev);
+
+ /* Clear previous address list */
+ list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
+ err = FM_MAC_RemoveHashMacAddr(mac_priv->mac,
+ (t_EnetAddr *)old_addr->addr);
+ _errno = -GET_ERROR_TYPE(err);
+ if (_errno < 0) {
+ dpaa_eth_err(mac_dev->dev,
+ "FM_MAC_RemoveHashMacAddr() = 0x%08x\n", err);
+ return _errno;
+ }
+ list_del(&old_addr->list);
+ kfree(old_addr);
+ }
+
+ /* Add all the addresses from the new list */
+ netdev_for_each_mc_addr(ha, net_dev) {
+ err = FM_MAC_AddHashMacAddr(mac_priv->mac,
+ (t_EnetAddr *)ha->addr);
+ _errno = -GET_ERROR_TYPE(err);
+ if (_errno < 0) {
+ dpaa_eth_err(mac_dev->dev,
+ "FM_MAC_AddHashMacAddr() = 0x%08x\n", err);
+ return _errno;
+ }
+ tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
+ if (!tmp) {
+ dpaa_eth_err(mac_dev->dev, "Out of memory\n");
+ return -ENOMEM;
+ }
+ memcpy(tmp->addr, ha->addr, ETH_ALEN);
+ list_add(&tmp->list, &mac_dev->mc_addr_list);
+ }
+ return 0;
+}
+
+static int __cold change_addr(struct mac_device *mac_dev, uint8_t *addr)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_MAC_ModifyMacAddr(
+ ((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
+ (t_EnetAddr *)addr);
+ _errno = -GET_ERROR_TYPE(err);
+ if (_errno < 0)
+ dpaa_eth_err(mac_dev->dev,
+ "FM_MAC_ModifyMacAddr() = 0x%08x\n", err);
+
+ return _errno;
+}
+
+static void adjust_link(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ int _errno;
+ t_Error err;
+
+ if (!phy_dev->link)
+ return;
+
+ err = FM_MAC_AdjustLink(
+ ((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
+ phy_dev->speed, phy_dev->duplex);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_AdjustLink() = 0x%08x\n",
+ err);
+
+ return;
+}
+
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
+ */
+static int dtsec_init_phy(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+ struct phy_device *phy_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ if (!mac_dev->phy_node)
+ phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id,
+ &adjust_link, 0, mac_dev->phy_if);
+ else
+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
+ &adjust_link, 0, mac_dev->phy_if);
+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
+ cpu_netdev_err(net_dev, "Could not connect to PHY %s\n",
+ mac_dev->phy_node ?
+ mac_dev->phy_node->full_name :
+ mac_dev->fixed_bus_id);
+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
+ }
+
+ /* Remove any features not supported by the controller */
+ phy_dev->supported &= priv->mac_dev->if_support;
+ phy_dev->advertising = phy_dev->supported;
+
+ priv->mac_dev->phy_dev = phy_dev;
+
+ return 0;
+}
+
+static int xgmac_init_phy(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ struct phy_device *phy_dev;
+
+ if (!mac_dev->phy_node)
+ phy_dev = phy_attach(net_dev, mac_dev->fixed_bus_id, 0,
+ mac_dev->phy_if);
+ else
+ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0,
+ mac_dev->phy_if);
+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
+ cpu_netdev_err(net_dev, "Could not attach to PHY %s\n",
+ mac_dev->phy_node ?
+ mac_dev->phy_node->full_name :
+ mac_dev->fixed_bus_id);
+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
+ }
+
+ phy_dev->supported &= priv->mac_dev->if_support;
+ phy_dev->advertising = phy_dev->supported;
+
+ mac_dev->phy_dev = phy_dev;
+
+ return 0;
+}
+
+static int __cold uninit(struct mac_device *mac_dev)
+{
+ int _errno, __errno;
+ t_Error err;
+ const struct mac_priv_s *priv;
+
+ priv = macdev_priv(mac_dev);
+
+ err = FM_MAC_Disable(priv->mac, e_COMM_MODE_RX_AND_TX);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_Disable() = 0x%08x\n", err);
+
+ err = FM_MAC_Free(priv->mac);
+ __errno = -GET_ERROR_TYPE(err);
+ if (unlikely(__errno < 0)) {
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_Free() = 0x%08x\n", err);
+ if (_errno < 0)
+ _errno = __errno;
+ }
+
+ return _errno;
+}
+
+static int __cold ptp_enable(struct mac_device *mac_dev)
+{
+ int _errno;
+ t_Error err;
+ const struct mac_priv_s *priv;
+
+ priv = macdev_priv(mac_dev);
+
+ err = FM_MAC_Enable1588TimeStamp(priv->mac);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_Enable1588TimeStamp()"
+ "= 0x%08x\n", err);
+ return _errno;
+}
+
+static int __cold ptp_disable(struct mac_device *mac_dev)
+{
+ int _errno;
+ t_Error err;
+ const struct mac_priv_s *priv;
+
+ priv = macdev_priv(mac_dev);
+
+ err = FM_MAC_Disable1588TimeStamp(priv->mac);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_MAC_Disable1588TimeStamp()"
+ "= 0x%08x\n", err);
+ return _errno;
+}
+
+static int __cold fm_rtc_enable(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_Enable(fm_get_rtc_handle(mac_dev->fm_dev), 0);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_RTC_Enable = 0x%08x\n", err);
+
+ return _errno;
+}
+
+static int __cold fm_rtc_disable(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_Disable(fm_get_rtc_handle(mac_dev->fm_dev));
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_RTC_Disable = 0x%08x\n", err);
+
+ return _errno;
+}
+
+static int __cold fm_rtc_get_cnt(struct net_device *net_dev, uint64_t *ts)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_GetCurrentTime(fm_get_rtc_handle(mac_dev->fm_dev), ts);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_RTC_GetCurrentTime = 0x%08x\n",
+ err);
+
+ return _errno;
+}
+
+static int __cold fm_rtc_set_cnt(struct net_device *net_dev, uint64_t ts)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_SetCurrentTime(fm_get_rtc_handle(mac_dev->fm_dev), ts);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_RTC_SetCurrentTime = 0x%08x\n",
+ err);
+
+ return _errno;
+}
+
+static int __cold fm_rtc_get_drift(struct net_device *net_dev, uint32_t *drift)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_GetFreqCompensation(fm_get_rtc_handle(mac_dev->fm_dev),
+ drift);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_RTC_GetFreqCompensation ="
+ "0x%08x\n", err);
+
+ return _errno;
+}
+
+static int __cold fm_rtc_set_drift(struct net_device *net_dev, uint32_t drift)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_SetFreqCompensation(fm_get_rtc_handle(mac_dev->fm_dev),
+ drift);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_RTC_SetFreqCompensation ="
+ "0x%08x\n", err);
+
+ return _errno;
+}
+
+static int __cold fm_rtc_set_alarm(struct net_device *net_dev, uint32_t id,
+ uint64_t time)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ t_FmRtcAlarmParams alarm;
+ int _errno;
+ t_Error err;
+
+ alarm.alarmId = id;
+ alarm.alarmTime = time;
+ alarm.f_AlarmCallback = NULL;
+ err = FM_RTC_SetAlarm(fm_get_rtc_handle(mac_dev->fm_dev),
+ &alarm);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_RTC_SetAlarm ="
+ "0x%08x\n", err);
+
+ return _errno;
+}
+
+static int __cold fm_rtc_set_fiper(struct net_device *net_dev, uint32_t id,
+ uint64_t fiper)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+ t_FmRtcPeriodicPulseParams pp;
+ int _errno;
+ t_Error err;
+
+ pp.periodicPulseId = id;
+ pp.periodicPulsePeriod = fiper;
+ pp.f_PeriodicPulseCallback = NULL;
+ err = FM_RTC_SetPeriodicPulse(fm_get_rtc_handle(mac_dev->fm_dev), &pp);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ dpaa_eth_err(mac_dev->dev, "FM_RTC_SetPeriodicPulse ="
+ "0x%08x\n", err);
+
+ return _errno;
+}
+
+static void __devinit __cold setup_dtsec(struct mac_device *mac_dev)
+{
+ mac_dev->init_phy = dtsec_init_phy;
+ mac_dev->init = init;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+ mac_dev->change_promisc = change_promisc;
+ mac_dev->change_addr = change_addr;
+ mac_dev->set_multi = set_multi;
+ mac_dev->uninit = uninit;
+ mac_dev->ptp_enable = ptp_enable;
+ mac_dev->ptp_disable = ptp_disable;
+ mac_dev->fm_rtc_enable = fm_rtc_enable;
+ mac_dev->fm_rtc_disable = fm_rtc_disable;
+ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
+ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
+ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
+ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
+ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
+ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
+}
+
+static void __devinit __cold setup_xgmac(struct mac_device *mac_dev)
+{
+ mac_dev->init_phy = xgmac_init_phy;
+ mac_dev->init = init;
+ mac_dev->start = start;
+ mac_dev->stop = stop;
+ mac_dev->change_promisc = change_promisc;
+ mac_dev->change_addr = change_addr;
+ mac_dev->set_multi = set_multi;
+ mac_dev->uninit = uninit;
+}
+
+void (*const mac_setup[])(struct mac_device *mac_dev) __devinitconst = {
+ [DTSEC] = setup_dtsec,
+ [XGMAC] = setup_xgmac
+};
new file mode 100644
@@ -0,0 +1,428 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/device.h>
+#include <linux/phy.h>
+
+#include "dpaa_eth-common.h"
+
+#include "lnxwrp_fm_ext.h"
+
+#include "fsl_pq_mdio.h"
+#include "mac.h"
+
+#define DTSEC_SUPPORTED \
+ (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg \
+ | SUPPORTED_MII)
+
+static const char phy_str[][11] __devinitconst =
+{
+ [PHY_INTERFACE_MODE_MII] = "mii",
+ [PHY_INTERFACE_MODE_GMII] = "gmii",
+ [PHY_INTERFACE_MODE_SGMII] = "sgmii",
+ [PHY_INTERFACE_MODE_TBI] = "tbi",
+ [PHY_INTERFACE_MODE_RMII] = "rmii",
+ [PHY_INTERFACE_MODE_RGMII] = "rgmii",
+ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
+ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
+ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
+ [PHY_INTERFACE_MODE_RTBI] = "rtbi",
+ [PHY_INTERFACE_MODE_XGMII] = "xgmii"
+};
+
+static phy_interface_t __devinit __pure __attribute__((nonnull)) str2phy(const char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phy_str); i++)
+ if (strcmp(str, phy_str[i]) == 0)
+ return (phy_interface_t)i;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+static const uint16_t phy2speed[] __devinitconst =
+{
+ [PHY_INTERFACE_MODE_MII] = SPEED_100,
+ [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RMII] = SPEED_100,
+ [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_XGMII] = SPEED_10000
+};
+
+static struct mac_device * __devinit __cold
+alloc_macdev(struct device *dev, size_t sizeof_priv, void (*setup)(struct mac_device *mac_dev))
+{
+ struct mac_device *mac_dev;
+
+ mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL);
+ if (unlikely(mac_dev == NULL))
+ mac_dev = ERR_PTR(-ENOMEM);
+ else {
+ mac_dev->dev = dev;
+ dev_set_drvdata(dev, mac_dev);
+ setup(mac_dev);
+ }
+
+ return mac_dev;
+}
+
+static int __devexit __cold free_macdev(struct mac_device *mac_dev)
+{
+ dev_set_drvdata(mac_dev->dev, NULL);
+
+ return mac_dev->uninit(mac_dev);
+}
+
+static const struct of_device_id mac_match[] __devinitconst = {
+ [DTSEC] = {
+ .compatible = "fsl,fman-1g-mac"
+ },
+ [XGMAC] = {
+ .compatible = "fsl,fman-10g-mac"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mac_match);
+
+static int __devinit __cold mac_probe(struct platform_device *_of_dev)
+{
+ int _errno, i, lenp;
+ struct device *dev;
+ struct device_node *mac_node, *dev_node;
+ struct mac_device *mac_dev;
+ struct platform_device *of_dev;
+ struct resource res;
+ const uint8_t *mac_addr;
+ const char *char_prop;
+ const phandle *phandle_prop;
+ const uint32_t *uint32_prop;
+ const struct of_device_id *match;
+
+ dev = &_of_dev->dev;
+ mac_node = dev->of_node;
+
+ match = of_match_device(mac_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i; i++);
+ BUG_ON(i >= ARRAY_SIZE(mac_match) - 1);
+
+ mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]);
+ if (IS_ERR(mac_dev)) {
+ _errno = PTR_ERR(mac_dev);
+ dpaa_eth_err(dev, "alloc_macdev() = %d\n", _errno);
+ goto _return;
+ }
+
+ INIT_LIST_HEAD(&mac_dev->mc_addr_list);
+
+ /* Get the FM node */
+ dev_node = of_get_parent(mac_node);
+ if (unlikely(dev_node == NULL)) {
+ dpaa_eth_err(dev, "of_get_parent(%s) failed\n",
+ mac_node->full_name);
+ _errno = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+ if (unlikely(of_dev == NULL)) {
+ dpaa_eth_err(dev, "of_find_device_by_node(%s) failed\n",
+ dev_node->full_name);
+ _errno = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ mac_dev->fm_dev = fm_bind(&of_dev->dev);
+ if (unlikely(mac_dev->fm_dev == NULL)) {
+ dpaa_eth_err(dev, "fm_bind(%s) failed\n", dev_node->full_name);
+ _errno = -ENODEV;
+ goto _return_of_node_put;
+ }
+
+ mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
+ of_node_put(dev_node);
+
+ /* Get the address of the memory mapped registers */
+ _errno = of_address_to_resource(mac_node, 0, &res);
+ if (unlikely(_errno < 0)) {
+ dpaa_eth_err(dev, "of_address_to_resource(%s) = %d\n",
+ mac_node->full_name, _errno);
+ goto _return_dev_set_drvdata;
+ }
+
+ mac_dev->res = __devm_request_region(
+ dev,
+ fm_get_mem_region(mac_dev->fm_dev),
+ res.start, res.end + 1 - res.start, "mac");
+ if (unlikely(mac_dev->res == NULL)) {
+ dpaa_eth_err(dev, "__devm_request_mem_region(mac) failed\n");
+ _errno = -EBUSY;
+ goto _return_dev_set_drvdata;
+ }
+
+ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
+ mac_dev->res->end + 1 - mac_dev->res->start);
+ if (unlikely(mac_dev->vaddr == NULL)) {
+ dpaa_eth_err(dev, "devm_ioremap() failed\n");
+ _errno = -EIO;
+ goto _return_dev_set_drvdata;
+ }
+
+ /*
+ * XXX: Warning, future versions of Linux will most likely not even
+ * call the driver code to allow us to override the TBIPA value,
+ * we'll need to address this when we move to newer kernel rev
+ */
+#define TBIPA_OFFSET 0x1c
+#define TBIPA_DEFAULT_ADDR 5
+ mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ if (mac_dev->tbi_node) {
+ u32 tbiaddr = TBIPA_DEFAULT_ADDR;
+
+ uint32_prop = of_get_property(mac_dev->tbi_node, "reg", NULL);
+ if (uint32_prop)
+ tbiaddr = *uint32_prop;
+ out_be32(mac_dev->vaddr + TBIPA_OFFSET, tbiaddr);
+ }
+
+ if (!of_device_is_available(mac_node)) {
+ devm_iounmap(dev, mac_dev->vaddr);
+ __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev),
+ res.start, res.end + 1 - res.start);
+ fm_unbind(mac_dev->fm_dev);
+ devm_kfree(dev, mac_dev);
+ dev_set_drvdata(dev, NULL);
+ return -ENODEV;
+ }
+
+ /* Get the cell-index */
+ uint32_prop = of_get_property(mac_node, "cell-index", &lenp);
+ if (unlikely(uint32_prop == NULL)) {
+ dpaa_eth_err(dev, "of_get_property(%s, cell-index) failed\n",
+ mac_node->full_name);
+ _errno = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+ BUG_ON(lenp != sizeof(uint32_t));
+ mac_dev->cell_index = *uint32_prop;
+
+ /* Get the MAC address */
+ mac_addr = of_get_mac_address(mac_node);
+ if (unlikely(mac_addr == NULL)) {
+ dpaa_eth_err(dev, "of_get_mac_address(%s) failed\n",
+ mac_node->full_name);
+ _errno = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+ memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
+
+ /* Get the port handles */
+ phandle_prop = of_get_property(mac_node, "fsl,port-handles", &lenp);
+ if (unlikely(phandle_prop == NULL)) {
+ dpaa_eth_err(dev, "of_get_property(%s, port-handles) failed\n",
+ mac_node->full_name);
+ _errno = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+ BUG_ON(lenp != sizeof(phandle) * ARRAY_SIZE(mac_dev->port_dev));
+
+ for_each_port_device(i, mac_dev->port_dev) {
+ /* Find the port node */
+ dev_node = of_find_node_by_phandle(phandle_prop[i]);
+ if (unlikely(dev_node == NULL)) {
+ dpaa_eth_err(dev, "of_find_node_by_phandle() failed\n");
+ _errno = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+ if (unlikely(of_dev == NULL)) {
+ dpaa_eth_err(dev, "of_find_device_by_node(%s) failed\n",
+ dev_node->full_name);
+ _errno = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev);
+ if (unlikely(mac_dev->port_dev[i] == NULL)) {
+ dpaa_eth_err(dev, "dev_get_drvdata(%s) failed\n",
+ dev_node->full_name);
+ _errno = -EINVAL;
+ goto _return_of_node_put;
+ }
+ of_node_put(dev_node);
+ }
+
+ /* Get the PHY connection type */
+ char_prop = (const char *)of_get_property(mac_node,
+ "phy-connection-type", NULL);
+ if (unlikely(char_prop == NULL)) {
+ dpaa_eth_warning(dev,
+ "of_get_property(%s, phy-connection-type) "
+ "failed. Defaulting to MII\n",
+ mac_node->full_name);
+ mac_dev->phy_if = PHY_INTERFACE_MODE_MII;
+ } else
+ mac_dev->phy_if = str2phy(char_prop);
+
+ mac_dev->link = false;
+ mac_dev->half_duplex = false;
+ mac_dev->speed = phy2speed[mac_dev->phy_if];
+ mac_dev->max_speed = mac_dev->speed;
+ mac_dev->if_support = DTSEC_SUPPORTED;
+ /* We don't support half-duplex in SGMII mode */
+ if (strstr(char_prop, "sgmii"))
+ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_100baseT_Half);
+
+ /* Gigabit support (no half-duplex) */
+ if (mac_dev->max_speed == 1000)
+ mac_dev->if_support |= SUPPORTED_1000baseT_Full;
+
+ /* The 10G interface only supports one mode */
+ if (strstr(char_prop, "xgmii"))
+ mac_dev->if_support = SUPPORTED_10000baseT_Full;
+
+ /* Get the rest of the PHY information */
+ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
+ if (mac_dev->phy_node == NULL) {
+ int sz;
+ const u32 *phy_id = of_get_property(mac_node, "fixed-link",
+ &sz);
+ if (!phy_id || sz < sizeof(*phy_id)) {
+ cpu_dev_err(dev, "No PHY (or fixed link) found\n");
+ _errno = -EINVAL;
+ goto _return_dev_set_drvdata;
+ }
+
+ sprintf(mac_dev->fixed_bus_id, PHY_ID_FMT, "0", phy_id[0]);
+ }
+
+ _errno = mac_dev->init(mac_dev);
+ if (unlikely(_errno < 0)) {
+ dpaa_eth_err(dev, "mac_dev->init() = %d\n", _errno);
+ goto _return_dev_set_drvdata;
+ }
+
+ cpu_dev_info(dev,
+ "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
+
+ goto _return;
+
+_return_of_node_put:
+ of_node_put(dev_node);
+_return_dev_set_drvdata:
+ dev_set_drvdata(dev, NULL);
+_return:
+ return _errno;
+}
+
+static int __devexit __cold mac_remove(struct platform_device *of_dev)
+{
+ int i, _errno;
+ struct device *dev;
+ struct mac_device *mac_dev;
+
+ dev = &of_dev->dev;
+ mac_dev = (struct mac_device *)dev_get_drvdata(dev);
+
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_unbind(mac_dev->port_dev[i]);
+
+ fm_unbind(mac_dev->fm_dev);
+
+ _errno = free_macdev(mac_dev);
+
+ return _errno;
+}
+
+static struct platform_driver mac_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = mac_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = mac_probe,
+ .remove = __devexit_p(mac_remove)
+};
+
+static int __init __cold mac_load(void)
+{
+ int _errno;
+
+ cpu_pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", __file__, __func__);
+
+ cpu_pr_info(KBUILD_MODNAME ": %s (" VERSION ")\n", mac_driver_description);
+
+ _errno = platform_driver_register(&mac_driver);
+ if (unlikely(_errno < 0)) {
+ cpu_pr_err(KBUILD_MODNAME ": %s:%hu:%s(): of_register_platform_driver() = %d\n",
+ __file__, __LINE__, __func__, _errno);
+ goto _return;
+ }
+
+ goto _return;
+
+_return:
+ cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__);
+
+ return _errno;
+}
+module_init(mac_load);
+
+static void __exit __cold mac_unload(void)
+{
+ cpu_pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", __file__, __func__);
+
+ platform_driver_unregister(&mac_driver);
+
+ cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__);
+}
+module_exit(mac_unload);
new file mode 100644
@@ -0,0 +1,113 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MAC_H
+#define __MAC_H
+
+#include <linux/device.h> /* struct device, BUS_ID_SIZE */
+#include <linux/if_ether.h> /* ETH_ALEN */
+#include <linux/phy.h> /* phy_interface_t, struct phy_device */
+#include <linux/list.h>
+
+#include "fsl_fman.h" /* struct port_device */
+
+#ifndef CONFIG_DPA_MAX_FRM_SIZE
+#define CONFIG_DPA_MAX_FRM_SIZE 0
+#endif
+
+enum {DTSEC, XGMAC};
+
+struct mac_device {
+ struct device *dev;
+ void *priv;
+ uint8_t cell_index;
+ struct resource *res;
+ void *vaddr;
+ uint8_t addr[ETH_ALEN];
+ bool promisc;
+
+ struct fm *fm_dev;
+ struct fm_port *port_dev[2];
+
+ phy_interface_t phy_if;
+ u32 if_support;
+ bool link;
+ bool half_duplex;
+ uint16_t speed;
+ uint16_t max_speed;
+ struct device_node *phy_node;
+ char fixed_bus_id[MII_BUS_ID_SIZE + 3];
+ struct device_node *tbi_node;
+ struct phy_device *phy_dev;
+ void *fm;
+ /* List of multicast addresses */
+ struct list_head mc_addr_list;
+
+ int (*init_phy)(struct net_device *net_dev);
+ int (*init)(struct mac_device *mac_dev);
+ int (*start)(struct mac_device *mac_dev);
+ int (*stop)(struct mac_device *mac_dev);
+ int (*change_promisc)(struct mac_device *mac_dev);
+ int (*change_addr)(struct mac_device *mac_dev, uint8_t *addr);
+ int (*set_multi)(struct net_device *net_dev);
+ int (*uninit)(struct mac_device *mac_dev);
+ int (*ptp_enable)(struct mac_device *mac_dev);
+ int (*ptp_disable)(struct mac_device *mac_dev);
+ int (*fm_rtc_enable)(struct net_device *net_dev);
+ int (*fm_rtc_disable)(struct net_device *net_dev);
+ int (*fm_rtc_get_cnt)(struct net_device *net_dev, uint64_t *ts);
+ int (*fm_rtc_set_cnt)(struct net_device *net_dev, uint64_t ts);
+ int (*fm_rtc_get_drift)(struct net_device *net_dev, uint32_t *drift);
+ int (*fm_rtc_set_drift)(struct net_device *net_dev, uint32_t drift);
+ int (*fm_rtc_set_alarm)(struct net_device *net_dev, uint32_t id,
+ uint64_t time);
+ int (*fm_rtc_set_fiper)(struct net_device *net_dev, uint32_t id,
+ uint64_t fiper);
+};
+
+struct mac_address {
+ uint8_t addr[ETH_ALEN];
+ struct list_head list;
+};
+
+#define for_each_port_device(i, port_dev) \
+ for (i = 0; i < ARRAY_SIZE(port_dev); i++)
+
+static inline void * __attribute((nonnull)) macdev_priv(const struct mac_device *mac_dev)
+{
+ return (void *)mac_dev + sizeof(*mac_dev);
+}
+
+extern const char *mac_driver_description;
+extern const size_t mac_sizeof_priv[];
+extern void (*const mac_setup[])(struct mac_device *mac_dev);
+
+#endif /* __MAC_H */
new file mode 100644
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Offline Parsing / Host Command port driver for FSL QorIQ FMan.
+ * Validates device-tree configuration and sets up the offline ports.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include "offline_port.h"
+#include "dpaa_eth-common.h"
+
+#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Bogdan Hamciuc <bogdan.hamciuc@freescale.com>");
+MODULE_DESCRIPTION(OH_MOD_DESCRIPTION);
+
+
+static const struct of_device_id oh_port_match_table[] __devinitconst = {
+ {
+ .compatible = "fsl,dpa-oh"
+ },
+ {
+ .compatible = "fsl,dpa-oh-shared"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, oh_port_match_table);
+
+static int oh_port_remove(struct platform_device *_of_dev);
+static int oh_port_probe(struct platform_device *_of_dev);
+
+static struct platform_driver oh_port_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = oh_port_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = oh_port_probe,
+ .remove = __devexit_p(oh_port_remove)
+};
+
+/* Allocation code for the OH port's PCD frame queues */
+static int __devinit __cold oh_alloc_pcd_fqids(struct device *dev,
+ uint32_t num,
+ uint8_t alignment,
+ uint32_t *base_fqid)
+{
+ cpu_dev_crit(dev, "callback not implemented!\n");
+ BUG();
+
+ return 0;
+}
+
+static int __devinit __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
+{
+ dpaa_eth_crit(dev, "callback not implemented!\n");
+ BUG();
+
+ return 0;
+}
+
+static int __devinit
+oh_port_probe(struct platform_device *_of_dev)
+{
+ struct device *dpa_oh_dev;
+ struct device_node *dpa_oh_node;
+ int lenp, _errno = 0, fq_idx;
+ const phandle *oh_port_handle;
+ struct platform_device *oh_of_dev;
+ struct device_node *oh_node;
+ struct device *oh_dev;
+ struct dpa_oh_config_s *oh_config;
+ uint32_t *oh_all_queues;
+ uint32_t queues_count;
+ uint32_t crt_fqid_base;
+ uint32_t crt_fq_count;
+ struct fm_port_non_rx_params oh_port_tx_params;
+ struct fm_port_pcd_param oh_port_pcd_params;
+ /* True if the current partition owns the OH port. */
+ bool init_oh_port;
+ const struct of_device_id *match;
+
+ dpa_oh_dev = &_of_dev->dev;
+ dpa_oh_node = dpa_oh_dev->of_node;
+ BUG_ON(dpa_oh_node == NULL);
+
+ match = of_match_device(oh_port_match_table, dpa_oh_dev);
+ if (!match)
+ return -EINVAL;
+
+ cpu_dev_dbg(dpa_oh_dev, "Probing OH port...\n");
+
+ /*
+ * Find the referenced OH node
+ */
+
+ oh_port_handle = of_get_property(dpa_oh_node,
+ "fsl,fman-oh-port", &lenp);
+ if (oh_port_handle == NULL) {
+ cpu_dev_err(dpa_oh_dev, "No OH port handle found in node %s\n",
+ dpa_oh_node->full_name);
+ return -EINVAL;
+ }
+
+ BUG_ON(lenp % sizeof(*oh_port_handle));
+ if (lenp != sizeof(*oh_port_handle)) {
+ cpu_dev_err(dpa_oh_dev, "Found %lu OH port bindings in node %s, "
+ "only 1 phandle is allowed.\n",
+ (unsigned long int)(lenp / sizeof(*oh_port_handle)), dpa_oh_node->full_name);
+ return -EINVAL;
+ }
+
+ /* Read configuration for the OH port */
+ oh_node = of_find_node_by_phandle(*oh_port_handle);
+ if (oh_node == NULL) {
+ cpu_dev_err(dpa_oh_dev, "Can't find OH node referenced from "
+ "node %s\n", dpa_oh_node->full_name);
+ return -EINVAL;
+ }
+ cpu_dev_info(dpa_oh_dev, "Found OH node handle compatible with %s.\n",
+ match->compatible);
+
+ oh_of_dev = of_find_device_by_node(oh_node);
+ BUG_ON(oh_of_dev == NULL);
+ oh_dev = &oh_of_dev->dev;
+ of_node_put(oh_node);
+
+ /*
+ * The OH port must be initialized exactly once.
+ * The following scenarios are of interest:
+ * - the node is Linux-private (will always initialize it);
+ * - the node is shared between two Linux partitions
+ * (only one of them will initialize it);
+ * - the node is shared between a Linux and a LWE partition
+ * (Linux will initialize it) - "fsl,dpa-oh-shared"
+ */
+
+ /* Check if the current partition owns the OH port
+ * and ought to initialize it. It may be the case that we leave this
+ * to another (also Linux) partition. */
+ init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");
+
+ /* If we aren't the "owner" of the OH node, we're done here. */
+ if (!init_oh_port) {
+ cpu_dev_dbg(dpa_oh_dev, "Not owning the shared OH port %s, "
+ "will not initialize it.\n", oh_node->full_name);
+ return 0;
+ }
+
+ /* Allocate OH dev private data */
+ oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
+ if (oh_config == NULL) {
+ cpu_dev_err(dpa_oh_dev, "Can't allocate private data for "
+ "OH node %s referenced from node %s!\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ return -ENOMEM;
+ }
+
+ /*
+ * Read FQ ids/nums for the DPA OH node
+ */
+ oh_all_queues = (uint32_t *)of_get_property(dpa_oh_node,
+ "fsl,qman-frame-queues-oh", &lenp);
+ if (oh_all_queues == NULL) {
+ cpu_dev_err(dpa_oh_dev, "No frame queues have been "
+ "defined for OH node %s referenced from node %s\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ /* Check that the OH error and default FQs are there */
+ BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
+ queues_count = lenp / (2 * sizeof(*oh_all_queues));
+ if (queues_count != 2) {
+ dpaa_eth_err(dpa_oh_dev, "Error and Default queues must be "
+ "defined for OH node %s referenced from node %s\n",
+ oh_node->full_name, dpa_oh_node->full_name);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ /* Read the FQIDs defined for this OH port */
+ cpu_dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
+ fq_idx = 0;
+
+ /* Error FQID - must be present */
+ crt_fqid_base = oh_all_queues[fq_idx++];
+ crt_fq_count = oh_all_queues[fq_idx++];
+ if (crt_fq_count != 1) {
+ cpu_dev_err(dpa_oh_dev, "Only 1 Error FQ allowed in OH node %s "
+ "referenced from node %s (read: %d FQIDs).\n",
+ oh_node->full_name, dpa_oh_node->full_name,
+ crt_fq_count);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+ oh_config->error_fqid = crt_fqid_base;
+ cpu_dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
+ oh_config->error_fqid, oh_node->full_name);
+
+ /* Default FQID - must be present */
+ crt_fqid_base = oh_all_queues[fq_idx++];
+ crt_fq_count = oh_all_queues[fq_idx++];
+ if (crt_fq_count != 1) {
+ cpu_dev_err(dpa_oh_dev, "Only 1 Default FQ allowed "
+ "in OH node %s referenced from %s (read: %d FQIDs).\n",
+ oh_node->full_name, dpa_oh_node->full_name,
+ crt_fq_count);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+ oh_config->default_fqid = crt_fqid_base;
+ cpu_dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
+ oh_config->default_fqid, oh_node->full_name);
+
+ /* Get a handle to the fm_port so we can set
+ * its configuration params */
+ oh_config->oh_port = fm_port_bind(oh_dev);
+ if (oh_config->oh_port == NULL) {
+ cpu_dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
+ oh_node->full_name);
+ _errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ /* Set Tx params */
+ dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
+ oh_config->error_fqid, oh_config->default_fqid, FALSE);
+ /* Set PCD params */
+ oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
+ oh_port_pcd_params.cbf = oh_free_pcd_fqids;
+ oh_port_pcd_params.dev = dpa_oh_dev;
+ fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);
+
+ dev_set_drvdata(dpa_oh_dev, oh_config);
+
+ /* Enable the OH port */
+ fm_port_enable(oh_config->oh_port);
+ cpu_dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);
+
+ return 0;
+
+return_kfree:
+ devm_kfree(dpa_oh_dev, oh_config);
+ return _errno;
+}
+
+static int __devexit __cold oh_port_remove(struct platform_device *_of_dev)
+{
+ int _errno = 0;
+ struct dpa_oh_config_s *oh_config;
+
+ cpu_pr_info("Removing OH port...\n");
+
+ oh_config = dev_get_drvdata(&_of_dev->dev);
+ if (oh_config == NULL) {
+ cpu_pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): No OH config in device private data!\n",
+ __file__, __LINE__, __func__);
+ _errno = -ENODEV;
+ goto return_error;
+ }
+ if (oh_config->oh_port == NULL) {
+ cpu_pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): No fm port in device private data!\n",
+ __file__, __LINE__, __func__);
+ _errno = -EINVAL;
+ goto return_error;
+ }
+
+ fm_port_disable(oh_config->oh_port);
+ devm_kfree(&_of_dev->dev, oh_config);
+ dev_set_drvdata(&_of_dev->dev, NULL);
+
+return_error:
+ return _errno;
+}
+
+static int __init __cold oh_port_load(void)
+{
+ int _errno;
+
+ cpu_pr_info(KBUILD_MODNAME ": " OH_MOD_DESCRIPTION " (" VERSION ")\n");
+
+ _errno = platform_driver_register(&oh_port_driver);
+ if (_errno < 0) {
+ cpu_pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ __file__, __LINE__, __func__, _errno);
+ }
+
+ cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__);
+ return _errno;
+}
+module_init(oh_port_load);
+
+static void __exit __cold oh_port_unload(void)
+{
+ cpu_pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", __file__, __func__);
+
+ platform_driver_unregister(&oh_port_driver);
+
+ cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__);
+}
+module_exit(oh_port_unload);
new file mode 100644
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OFFLINE_PORT_H
+#define __OFFLINE_PORT_H
+
+#include "fsl_fman.h"
+
+/* OH port configuration */
+struct dpa_oh_config_s {
+ uint32_t error_fqid;
+ uint32_t default_fqid;
+ struct fm_port *oh_port;
+};
+
+#endif /* __OFFLINE_PORT_H */
new file mode 100644
@@ -0,0 +1,286 @@
+/* Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * QorIQ 10-G MDIO Controller
+ *
+ * Author: Andy Fleming <afleming@freescale.com>
+ *
+ * Based on fsl_pq_mdio.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+
+#include "xgmac_mdio.h"
+
+/* Write value to the PHY for this device to the register at regnum, */
+/* waiting until the write is done before it returns. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+int xgmac_mdio_write(struct mii_bus *bus, int port_addr,
+ int dev_addr, int regnum, u16 value)
+{
+ struct tgec_mdio_controller __iomem *regs = bus->priv;
+ u32 mdio_ctl, mdio_stat;
+
+ if (dev_addr == MDIO_DEVAD_NONE)
+ return 0xffff;
+
+ /* Setup the MII Mgmt clock speed */
+ mdio_stat = MDIO_STAT_CLKDIV(100);
+ out_be32(®s->mdio_stat, mdio_stat);
+
+ /* Wait till the bus is free */
+ while ((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY)
+ cpu_relax();
+
+ /* Set the port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(port_addr) | MDIO_CTL_DEV_ADDR(dev_addr);
+ out_be32(®s->mdio_ctl, mdio_ctl);
+
+ /* Set the register address */
+ out_be32(®s->mdio_addr, regnum & 0xffff);
+
+ /* Wait till the bus is free */
+ while ((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY)
+ cpu_relax();
+
+ /* Write the value to the register */
+ out_be32(®s->mdio_data, MDIO_DATA(value));
+
+ /* Wait till the MDIO write is complete */
+ while ((in_be32(®s->mdio_data)) & MDIO_DATA_BSY)
+ cpu_relax();
+
+ return 0;
+}
+
+
+/* Reads from register regnum in the PHY for device dev, */
+/* returning the value. Clears miimcom first. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+int xgmac_mdio_read(struct mii_bus *bus, int port_addr, int dev_addr,
+ int regnum)
+{
+ struct tgec_mdio_controller __iomem *regs = bus->priv;
+ u32 mdio_ctl, mdio_stat;
+
+ /* Setup the MII Mgmt clock speed */
+ mdio_stat = MDIO_STAT_CLKDIV(100);
+ out_be32(®s->mdio_stat, mdio_stat);
+
+ /* Wait till the bus is free */
+ while ((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY)
+ cpu_relax();
+
+ /* Set the Port and Device Addrs */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(port_addr) | MDIO_CTL_DEV_ADDR(dev_addr);
+ out_be32(®s->mdio_ctl, mdio_ctl);
+
+ /* Set the register address */
+ out_be32(®s->mdio_addr, regnum & 0xffff);
+
+ /* Wait till the bus is free */
+ while ((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY)
+ cpu_relax();
+
+ /* Initiate the read */
+ mdio_ctl |= MDIO_CTL_READ;
+ out_be32(®s->mdio_ctl, mdio_ctl);
+
+ /* Wait till the MDIO write is complete */
+ while ((in_be32(®s->mdio_data)) & MDIO_DATA_BSY)
+ cpu_relax();
+
+ /* Return all Fs if nothing was there */
+ if (in_be32(®s->mdio_stat) & MDIO_STAT_RD_ER)
+ return 0xffff;
+
+ return in_be32(®s->mdio_data) & 0xffff;
+}
+
+
+/* Reset the MIIM registers, and wait for the bus to free */
+static int xgmac_mdio_reset(struct mii_bus *bus)
+{
+ struct tgec_mdio_controller __iomem *regs = bus->priv;
+ int timeout = PHY_INIT_TIMEOUT;
+ u32 mdio_stat;
+
+ mutex_lock(&bus->mdio_lock);
+
+ /* Setup the MII Mgmt clock speed */
+ mdio_stat = MDIO_STAT_CLKDIV(100);
+ out_be32(®s->mdio_stat, mdio_stat);
+
+ /* Wait till the bus is free */
+ while (((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY) && timeout--)
+ cpu_relax();
+
+ mutex_unlock(&bus->mdio_lock);
+
+ if (timeout < 0) {
+ printk(KERN_ERR "%s: The MII Bus is stuck!\n",
+ bus->name);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+
+static int xgmac_mdio_probe(struct platform_device *ofdev)
+{
+ struct tgec_mdio_controller __iomem *regs;
+ struct device_node *np = ofdev->dev.of_node;
+ struct mii_bus *new_bus;
+ u64 addr, size;
+ int err = 0;
+
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
+ new_bus = mdiobus_alloc();
+ if (NULL == new_bus)
+ return -ENOMEM;
+
+ new_bus->name = "Freescale XGMAC MDIO Bus",
+ new_bus->read = &xgmac_mdio_read,
+ new_bus->write = &xgmac_mdio_write,
+ new_bus->reset = &xgmac_mdio_reset,
+
+ /* Set the PHY base address */
+ addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
+ regs = ioremap(addr, size);
+
+ if (NULL == regs) {
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ new_bus->priv = (void __force *)regs;
+
+ new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+
+ if (NULL == new_bus->irq) {
+ err = -ENOMEM;
+ goto err_irq_alloc;
+ }
+
+ new_bus->parent = &ofdev->dev;
+ dev_set_drvdata(&ofdev->dev, new_bus);
+
+ sprintf(new_bus->id, "%s", np->name);
+
+ err = of_mdiobus_register(new_bus, np);
+
+ if (err) {
+ printk(KERN_ERR "%s: Cannot register as MDIO bus\n",
+ new_bus->name);
+ goto err_registration;
+ }
+
+ return 0;
+
+err_registration:
+ kfree(new_bus->irq);
+err_irq_alloc:
+ iounmap(regs);
+err_ioremap:
+ return err;
+}
+
+
+static int xgmac_mdio_remove(struct platform_device *ofdev)
+{
+ struct device *device = &ofdev->dev;
+ struct mii_bus *bus = dev_get_drvdata(device);
+
+ mdiobus_unregister(bus);
+
+ dev_set_drvdata(device, NULL);
+
+ iounmap((void __iomem *)bus->priv);
+ bus->priv = NULL;
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static struct of_device_id xgmac_mdio_match[] = {
+ {
+ .compatible = "fsl,fman-xmdio",
+ },
+ {},
+};
+
+static struct platform_driver xgmac_mdio_driver = {
+ .driver = {
+ .name = "fsl-fman_xmdio",
+ .of_match_table = xgmac_mdio_match,
+ },
+ .probe = xgmac_mdio_probe,
+ .remove = xgmac_mdio_remove,
+};
+
+int __init xgmac_mdio_init(void)
+{
+ return platform_driver_register(&xgmac_mdio_driver);
+}
+
+void xgmac_mdio_exit(void)
+{
+ platform_driver_unregister(&xgmac_mdio_driver);
+}
+subsys_initcall_sync(xgmac_mdio_init);
+module_exit(xgmac_mdio_exit);
new file mode 100644
@@ -0,0 +1,61 @@
+/* Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Freescale FMAN XGMAC MDIO Driver -- MDIO Management Bus Implementation
+ * Driver for the MDIO bus controller on QorIQ 10G ports
+ *
+ * Author: Andy Fleming
+ */
+
+#ifndef __XGMAC_MDIO_H
+#define __XGMAC_MDIO_H
+
+struct tgec_mdio_controller {
+ u32 res0[0xc];
+ u32 mdio_stat; /* MDIO configuration and status */
+ u32 mdio_ctl; /* MDIO control */
+ u32 mdio_data; /* MDIO data */
+ u32 mdio_addr; /* MDIO address */
+} __attribute__ ((packed));
+
+#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_BSY (1 << 0)
+#define MDIO_STAT_RD_ER (1 << 1)
+#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS (1 << 10)
+#define MDIO_CTL_SCAN_EN (1 << 11)
+#define MDIO_CTL_POST_INC (1 << 14)
+#define MDIO_CTL_READ (1 << 15)
+
+#define MDIO_DATA(x) (x & 0xffff)
+#define MDIO_DATA_BSY (1 << 31)
+
+#endif /* __XGMAC_MDIO_H */