qualcommbe: v6.12: add PPE driver (part 2)

Add the second part of the PPE driver. This includes the EDMA and
network device support. This part does not appear to have been
officially submitted for upstream review. The series is taken from
target/linux/qualcommbe/patches-6.6, and had to be heavily modified
in order to compile of v6.12. Changes to patches are noted in the
respective patch body.

Also add the PPE and EDMA nodes in this series.

Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
Link: https://github.com/openwrt/openwrt/pull/18796
Signed-off-by: Robert Marko <robimarko@gmail.com>
This commit is contained in:
Alexandru Gagniuc 2025-05-13 22:03:38 -05:00 committed by Robert Marko
parent cd9f3b8d33
commit f3fc278fcb
18 changed files with 10080 additions and 0 deletions

View file

@ -0,0 +1,201 @@
From 93cf3297818ee61607f0a8d1d34e4fb7fcde3cdf Mon Sep 17 00:00:00 2001
From: Luo Jie <quic_luoj@quicinc.com>
Date: Tue, 26 Dec 2023 20:18:09 +0800
Subject: [PATCH] net: ethernet: qualcomm: Add PPE scheduler config
PPE scheduler config determines the priority of scheduling the
packet. The scheduler config is used for supporting the QoS
offload in PPE hardware.
Change-Id: I4811bd133074757371775a6a69a1cc3cfaa8d0d0
Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
Alex G: rebase patch on top of PPE driver submission from 20250209.
Add the ppe_queue_priority_set() function and its
dependencies. They will be used in the edma support in
susequent changes.
ppe_queue_priority_set() used to be part of ppe_api.c, and
is hereby moved to ppe_config.c .
Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
---
.../net/ethernet/qualcomm/ppe/ppe_config.c | 141 ++++++++++++++++++
.../net/ethernet/qualcomm/ppe/ppe_config.h | 5 +
2 files changed, 146 insertions(+)
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
@@ -864,6 +864,51 @@ static int ppe_scheduler_l0_queue_map_se
val);
}
+/* Get the first level scheduler configuration. */
+static int ppe_scheduler_l0_queue_map_get(struct ppe_device *ppe_dev,
+ int node_id, int *port,
+ struct ppe_scheduler_cfg *scheduler_cfg)
+{
+ u32 val, reg;
+ int ret;
+
+ reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ scheduler_cfg->flow_id = FIELD_GET(PPE_L0_FLOW_MAP_TBL_FLOW_ID, val);
+ scheduler_cfg->pri = FIELD_GET(PPE_L0_FLOW_MAP_TBL_C_PRI, val);
+ scheduler_cfg->drr_node_wt = FIELD_GET(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, val);
+
+ reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg->flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg->pri) *
+ PPE_L0_C_FLOW_CFG_TBL_INC;
+
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ scheduler_cfg->drr_node_id = FIELD_GET(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, val);
+ scheduler_cfg->unit_is_packet = FIELD_GET(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, val);
+
+ reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ *port = FIELD_GET(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, val);
+
+ reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ scheduler_cfg->frame_mode = FIELD_GET(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, val);
+
+ return 0;
+}
+
/* Set the PPE flow level scheduler configuration. */
static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev,
int node_id, int port,
@@ -916,6 +961,50 @@ static int ppe_scheduler_l1_queue_map_se
return regmap_update_bits(ppe_dev->regmap, reg, PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
}
+/* Get the second level scheduler configuration. */
+static int ppe_scheduler_l1_queue_map_get(struct ppe_device *ppe_dev,
+ int node_id, int *port,
+ struct ppe_scheduler_cfg *scheduler_cfg)
+{
+ u32 val, reg;
+ int ret;
+
+ reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ scheduler_cfg->flow_id = FIELD_GET(PPE_L1_FLOW_MAP_TBL_FLOW_ID, val);
+ scheduler_cfg->pri = FIELD_GET(PPE_L1_FLOW_MAP_TBL_C_PRI, val);
+ scheduler_cfg->drr_node_wt = FIELD_GET(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, val);
+
+ reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg->flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg->pri) *
+ PPE_L1_C_FLOW_CFG_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ scheduler_cfg->drr_node_id = FIELD_GET(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, val);
+ scheduler_cfg->unit_is_packet = FIELD_GET(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, val);
+
+ reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ *port = FIELD_GET(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, val);
+
+ reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ scheduler_cfg->frame_mode = FIELD_GET(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
+
+ return 0;
+}
+
/**
* ppe_queue_scheduler_set - Configure scheduler for PPE hardware queue
* @ppe_dev: PPE device
@@ -942,6 +1031,58 @@ int ppe_queue_scheduler_set(struct ppe_d
}
/**
+ * ppe_queue_scheduler_get - get QoS scheduler of PPE hardware queue
+ * @ppe_dev: PPE device
+ * @node_id: PPE node ID
+ * @flow_level: Flow level scheduler or queue level scheduler
+ * @port: PPE port ID to get scheduler config
+ * @scheduler_cfg: QoS scheduler configuration
+ *
+ * The hardware QoS function is supported by PPE, the current scheduler
+ * configuration can be acquired based on the queue ID of PPE port.
+ *
+ * Return 0 on success, negative error code on failure.
+ */
+int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int *port,
+ struct ppe_scheduler_cfg *scheduler_cfg)
+{
+ if (flow_level)
+ return ppe_scheduler_l1_queue_map_get(ppe_dev, node_id,
+ port, scheduler_cfg);
+
+ return ppe_scheduler_l0_queue_map_get(ppe_dev, node_id,
+ port, scheduler_cfg);
+}
+
+
+/**
+ * ppe_queue_priority_set - set scheduler priority of PPE hardware queue
+ * @ppe_dev: PPE device
+ * @node_id: PPE hardware node ID, which is either queue ID or flow ID
+ * @priority: Qos scheduler priority
+ *
+ * Configure scheduler priority of PPE hardware queque, the maximum node
+ * ID supported is PPE_QUEUE_ID_NUM added by PPE_FLOW_ID_NUM, queue ID
+ * belongs to level 0, flow ID belongs to level 1 in the packet pipeline.
+ *
+ * Return 0 on success, negative error code on failure.
+ */
+int ppe_queue_priority_set(struct ppe_device *ppe_dev,
+ int node_id, int priority)
+{
+ struct ppe_scheduler_cfg sch_cfg;
+ int ret, port, level = 0;
+
+ ret = ppe_queue_scheduler_get(ppe_dev, node_id, level, &port, &sch_cfg);
+ if (ret)
+ return ret;
+
+ sch_cfg.pri = priority;
+ return ppe_queue_scheduler_set(ppe_dev, node_id, level, port, sch_cfg);
+}
+
+/**
* ppe_queue_ucast_base_set - Set PPE unicast queue base ID and profile ID
* @ppe_dev: PPE device
* @queue_dst: PPE queue destination configuration
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
@@ -291,6 +291,11 @@ int ppe_hw_config(struct ppe_device *ppe
int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
int node_id, bool flow_level, int port,
struct ppe_scheduler_cfg scheduler_cfg);
+int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int *port,
+ struct ppe_scheduler_cfg *scheduler_cfg);
+int ppe_queue_priority_set(struct ppe_device *ppe_dev,
+ int queue_id, int priority);
int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
struct ppe_queue_ucast_dest queue_dst,
int queue_base,

View file

@ -0,0 +1,673 @@
From dbcc0d01241a1353d8e11e764cf7fcd390ae3f1f Mon Sep 17 00:00:00 2001
From: Lei Wei <quic_leiwei@quicinc.com>
Date: Thu, 29 Feb 2024 20:16:14 +0800
Subject: [PATCH] net: ethernet: qualcomm: Add PPE port MAC MIB statistics
functions
Add PPE port MAC MIB statistics functions which are used by netdev
ops and ethtool. For GMAC, a polling task is scheduled to read the
MIB counters periodically to avoid 32bit register counter overflow.
Change-Id: Ic20e240061278f77d703f652e1f7d959db8fac37
Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
---
drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 465 +++++++++++++++++++
drivers/net/ethernet/qualcomm/ppe/ppe_port.h | 13 +
drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 91 ++++
3 files changed, 569 insertions(+)
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
@@ -23,6 +23,122 @@
/* PPE BM port start for PPE MAC ports */
#define PPE_BM_PORT_MAC_START 7
+/* Poll interval time to poll GMAC MIBs for overflow protection,
+ * the time should ensure that the 32bit GMAC packet counter
+ * register would not overflow within this time at line rate
+ * speed for 64B packet size.
+ */
+#define PPE_GMIB_POLL_INTERVAL_MS 120000
+
+#define PPE_MAC_MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+/* PPE MAC MIB description */
+struct ppe_mac_mib_info {
+ u32 size;
+ u32 offset;
+ const char *name;
+};
+
+/* PPE GMAC MIB statistics type */
+enum ppe_gmib_stats_type {
+ gmib_rx_broadcast,
+ gmib_rx_pause,
+ gmib_rx_multicast,
+ gmib_rx_fcserr,
+ gmib_rx_alignerr,
+ gmib_rx_runt,
+ gmib_rx_frag,
+ gmib_rx_jumbofcserr,
+ gmib_rx_jumboalignerr,
+ gmib_rx_pkt64,
+ gmib_rx_pkt65to127,
+ gmib_rx_pkt128to255,
+ gmib_rx_pkt256to511,
+ gmib_rx_pkt512to1023,
+ gmib_rx_pkt1024to1518,
+ gmib_rx_pkt1519tomax,
+ gmib_rx_toolong,
+ gmib_rx_bytes_g,
+ gmib_rx_bytes_b,
+ gmib_rx_unicast,
+ gmib_tx_broadcast,
+ gmib_tx_pause,
+ gmib_tx_multicast,
+ gmib_tx_underrun,
+ gmib_tx_pkt64,
+ gmib_tx_pkt65to127,
+ gmib_tx_pkt128to255,
+ gmib_tx_pkt256to511,
+ gmib_tx_pkt512to1023,
+ gmib_tx_pkt1024to1518,
+ gmib_tx_pkt1519tomax,
+ gmib_tx_bytes,
+ gmib_tx_collisions,
+ gmib_tx_abortcol,
+ gmib_tx_multicol,
+ gmib_tx_singlecol,
+ gmib_tx_excdeffer,
+ gmib_tx_deffer,
+ gmib_tx_latecol,
+ gmib_tx_unicast,
+};
+
+/* PPE XGMAC MIB statistics type */
+enum ppe_xgmib_stats_type {
+ xgmib_tx_bytes,
+ xgmib_tx_frames,
+ xgmib_tx_broadcast_g,
+ xgmib_tx_multicast_g,
+ xgmib_tx_pkt64,
+ xgmib_tx_pkt65to127,
+ xgmib_tx_pkt128to255,
+ xgmib_tx_pkt256to511,
+ xgmib_tx_pkt512to1023,
+ xgmib_tx_pkt1024tomax,
+ xgmib_tx_unicast,
+ xgmib_tx_multicast,
+ xgmib_tx_broadcast,
+ xgmib_tx_underflow_err,
+ xgmib_tx_bytes_g,
+ xgmib_tx_frames_g,
+ xgmib_tx_pause,
+ xgmib_tx_vlan_g,
+ xgmib_tx_lpi_usec,
+ xgmib_tx_lpi_tran,
+ xgmib_rx_frames,
+ xgmib_rx_bytes,
+ xgmib_rx_bytes_g,
+ xgmib_rx_broadcast_g,
+ xgmib_rx_multicast_g,
+ xgmib_rx_crc_err,
+ xgmib_rx_runt_err,
+ xgmib_rx_jabber_err,
+ xgmib_rx_undersize_g,
+ xgmib_rx_oversize_g,
+ xgmib_rx_pkt64,
+ xgmib_rx_pkt65to127,
+ xgmib_rx_pkt128to255,
+ xgmib_rx_pkt256to511,
+ xgmib_rx_pkt512to1023,
+ xgmib_rx_pkt1024tomax,
+ xgmib_rx_unicast_g,
+ xgmib_rx_len_err,
+ xgmib_rx_outofrange_err,
+ xgmib_rx_pause,
+ xgmib_rx_fifo_overflow,
+ xgmib_rx_vlan,
+ xgmib_rx_wdog_err,
+ xgmib_rx_lpi_usec,
+ xgmib_rx_lpi_tran,
+ xgmib_rx_drop_frames,
+ xgmib_rx_drop_bytes,
+};
+
/* PPE port clock and reset name */
static const char * const ppe_port_clk_rst_name[] = {
[PPE_PORT_CLK_RST_MAC] = "port_mac",
@@ -30,6 +146,322 @@ static const char * const ppe_port_clk_r
[PPE_PORT_CLK_RST_TX] = "port_tx",
};
+/* PPE GMAC MIB statistics description information */
+static const struct ppe_mac_mib_info gmib_info[] = {
+ PPE_MAC_MIB_DESC(4, GMAC_RXBROAD_ADDR, "rx_broadcast"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXPAUSE_ADDR, "rx_pause"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXMULTI_ADDR, "rx_multicast"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXFCSERR_ADDR, "rx_fcserr"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXALIGNERR_ADDR, "rx_alignerr"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXRUNT_ADDR, "rx_runt"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXFRAG_ADDR, "rx_frag"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXJUMBOFCSERR_ADDR, "rx_jumbofcserr"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXJUMBOALIGNERR_ADDR, "rx_jumboalignerr"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT64_ADDR, "rx_pkt64"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT65TO127_ADDR, "rx_pkt65to127"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT128TO255_ADDR, "rx_pkt128to255"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT256TO511_ADDR, "rx_pkt256to511"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT512TO1023_ADDR, "rx_pkt512to1023"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT1024TO1518_ADDR, "rx_pkt1024to1518"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXPKT1519TOX_ADDR, "rx_pkt1519tomax"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXTOOLONG_ADDR, "rx_toolong"),
+ PPE_MAC_MIB_DESC(8, GMAC_RXBYTE_G_ADDR, "rx_bytes_g"),
+ PPE_MAC_MIB_DESC(8, GMAC_RXBYTE_B_ADDR, "rx_bytes_b"),
+ PPE_MAC_MIB_DESC(4, GMAC_RXUNI_ADDR, "rx_unicast"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXBROAD_ADDR, "tx_broadcast"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXPAUSE_ADDR, "tx_pause"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXMULTI_ADDR, "tx_multicast"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXUNDERRUN_ADDR, "tx_underrun"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT64_ADDR, "tx_pkt64"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT65TO127_ADDR, "tx_pkt65to127"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT128TO255_ADDR, "tx_pkt128to255"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT256TO511_ADDR, "tx_pkt256to511"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT512TO1023_ADDR, "tx_pkt512to1023"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT1024TO1518_ADDR, "tx_pkt1024to1518"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXPKT1519TOX_ADDR, "tx_pkt1519tomax"),
+ PPE_MAC_MIB_DESC(8, GMAC_TXBYTE_ADDR, "tx_bytes"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXCOLLISIONS_ADDR, "tx_collisions"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXABORTCOL_ADDR, "tx_abortcol"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXMULTICOL_ADDR, "tx_multicol"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXSINGLECOL_ADDR, "tx_singlecol"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXEXCESSIVEDEFER_ADDR, "tx_excdeffer"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXDEFER_ADDR, "tx_deffer"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXLATECOL_ADDR, "tx_latecol"),
+ PPE_MAC_MIB_DESC(4, GMAC_TXUNI_ADDR, "tx_unicast"),
+};
+
+/* PPE XGMAC MIB statistics description information */
+static const struct ppe_mac_mib_info xgmib_info[] = {
+ PPE_MAC_MIB_DESC(8, XGMAC_TXBYTE_GB_ADDR, "tx_bytes"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT_GB_ADDR, "tx_frames"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXBROAD_G_ADDR, "tx_broadcast_g"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXMULTI_G_ADDR, "tx_multicast_g"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT64_GB_ADDR, "tx_pkt64"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT65TO127_GB_ADDR, "tx_pkt65to127"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT128TO255_GB_ADDR, "tx_pkt128to255"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT256TO511_GB_ADDR, "tx_pkt256to511"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT512TO1023_GB_ADDR, "tx_pkt512to1023"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT1024TOMAX_GB_ADDR, "tx_pkt1024tomax"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXUNI_GB_ADDR, "tx_unicast"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXMULTI_GB_ADDR, "tx_multicast"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXBROAD_GB_ADDR, "tx_broadcast"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXUNDERFLOW_ERR_ADDR, "tx_underflow_err"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXBYTE_G_ADDR, "tx_bytes_g"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXPKT_G_ADDR, "tx_frames_g"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXPAUSE_ADDR, "tx_pause"),
+ PPE_MAC_MIB_DESC(8, XGMAC_TXVLAN_G_ADDR, "tx_vlan_g"),
+ PPE_MAC_MIB_DESC(4, XGMAC_TXLPI_USEC_ADDR, "tx_lpi_usec"),
+ PPE_MAC_MIB_DESC(4, XGMAC_TXLPI_TRAN_ADDR, "tx_lpi_tran"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT_GB_ADDR, "rx_frames"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXBYTE_GB_ADDR, "rx_bytes"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXBYTE_G_ADDR, "rx_bytes_g"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXBROAD_G_ADDR, "rx_broadcast_g"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXMULTI_G_ADDR, "rx_multicast_g"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXCRC_ERR_ADDR, "rx_crc_err"),
+ PPE_MAC_MIB_DESC(4, XGMAC_RXRUNT_ERR_ADDR, "rx_runt_err"),
+ PPE_MAC_MIB_DESC(4, XGMAC_RXJABBER_ERR_ADDR, "rx_jabber_err"),
+ PPE_MAC_MIB_DESC(4, XGMAC_RXUNDERSIZE_G_ADDR, "rx_undersize_g"),
+ PPE_MAC_MIB_DESC(4, XGMAC_RXOVERSIZE_G_ADDR, "rx_oversize_g"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT64_GB_ADDR, "rx_pkt64"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT65TO127_GB_ADDR, "rx_pkt65to127"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT128TO255_GB_ADDR, "rx_pkt128to255"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT256TO511_GB_ADDR, "rx_pkt256to511"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT512TO1023_GB_ADDR, "rx_pkt512to1023"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXPKT1024TOMAX_GB_ADDR, "rx_pkt1024tomax"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXUNI_G_ADDR, "rx_unicast_g"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXLEN_ERR_ADDR, "rx_len_err"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXOUTOFRANGE_ADDR, "rx_outofrange_err"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXPAUSE_ADDR, "rx_pause"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXFIFOOVERFLOW_ADDR, "rx_fifo_overflow"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXVLAN_GB_ADDR, "rx_vlan"),
+ PPE_MAC_MIB_DESC(4, XGMAC_RXWATCHDOG_ERR_ADDR, "rx_wdog_err"),
+ PPE_MAC_MIB_DESC(4, XGMAC_RXLPI_USEC_ADDR, "rx_lpi_usec"),
+ PPE_MAC_MIB_DESC(4, XGMAC_RXLPI_TRAN_ADDR, "rx_lpi_tran"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXDISCARD_GB_ADDR, "rx_drop_frames"),
+ PPE_MAC_MIB_DESC(8, XGMAC_RXDISCARDBYTE_GB_ADDR, "rx_drop_bytes"),
+};
+
+/* Get GMAC MIBs from registers and accumulate to PPE port GMIB stats array */
+static void ppe_port_gmib_update(struct ppe_port *ppe_port)
+{
+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
+ const struct ppe_mac_mib_info *mib;
+ int port = ppe_port->port_id;
+ u32 reg, val;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(gmib_info); i++) {
+ mib = &gmib_info[i];
+ reg = PPE_PORT_GMAC_ADDR(port) + mib->offset;
+
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_warn(ppe_dev->dev, "%s: %d\n", __func__, ret);
+ continue;
+ }
+
+ ppe_port->gmib_stats[i] += val;
+ if (mib->size == 8) {
+ ret = regmap_read(ppe_dev->regmap, reg + 4, &val);
+ if (ret) {
+ dev_warn(ppe_dev->dev, "%s: %d\n",
+ __func__, ret);
+ continue;
+ }
+
+ ppe_port->gmib_stats[i] += (u64)val << 32;
+ }
+ }
+}
+
+/* Polling task to read GMIB statistics to avoid GMIB 32bit register overflow */
+static void ppe_port_gmib_stats_poll(struct work_struct *work)
+{
+ struct ppe_port *ppe_port = container_of(work, struct ppe_port,
+ gmib_read.work);
+ spin_lock(&ppe_port->gmib_stats_lock);
+ ppe_port_gmib_update(ppe_port);
+ spin_unlock(&ppe_port->gmib_stats_lock);
+
+ schedule_delayed_work(&ppe_port->gmib_read,
+ msecs_to_jiffies(PPE_GMIB_POLL_INTERVAL_MS));
+}
+
+/* Get the XGMAC MIB counter based on the specific MIB stats type */
+static u64 ppe_port_xgmib_get(struct ppe_port *ppe_port,
+ enum ppe_xgmib_stats_type xgmib_type)
+{
+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
+ const struct ppe_mac_mib_info *mib;
+ int port = ppe_port->port_id;
+ u32 reg, val;
+ u64 data = 0;
+ int ret;
+
+ mib = &xgmib_info[xgmib_type];
+ reg = PPE_PORT_XGMAC_ADDR(port) + mib->offset;
+
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_warn(ppe_dev->dev, "%s: %d\n", __func__, ret);
+ goto data_return;
+ }
+
+ data = val;
+ if (mib->size == 8) {
+ ret = regmap_read(ppe_dev->regmap, reg + 4, &val);
+ if (ret) {
+ dev_warn(ppe_dev->dev, "%s: %d\n", __func__, ret);
+ goto data_return;
+ }
+
+ data |= (u64)val << 32;
+ }
+
+data_return:
+ return data;
+}
+
+/**
+ * ppe_port_get_sset_count() - Get PPE port statistics string count
+ * @ppe_port: PPE port
+ * @sset: string set ID
+ *
+ * Description: Get the MAC statistics string count for the PPE port
+ * specified by @ppe_port.
+ *
+ * Return: The count of the statistics string.
+ */
+int ppe_port_get_sset_count(struct ppe_port *ppe_port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC)
+ return ARRAY_SIZE(gmib_info);
+ else
+ return ARRAY_SIZE(xgmib_info);
+}
+
+/**
+ * ppe_port_get_strings() - Get PPE port statistics strings
+ * @ppe_port: PPE port
+ * @stringset: string set ID
+ * @data: pointer to statistics strings
+ *
+ * Description: Get the MAC statistics stings for the PPE port
+ * specified by @ppe_port. The strings are stored in the buffer
+ * indicated by @data which used in the ethtool ops.
+ */
+void ppe_port_get_strings(struct ppe_port *ppe_port, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
+ for (i = 0; i < ARRAY_SIZE(gmib_info); i++)
+ strscpy(data + i * ETH_GSTRING_LEN, gmib_info[i].name,
+ ETH_GSTRING_LEN);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(xgmib_info); i++)
+ strscpy(data + i * ETH_GSTRING_LEN, xgmib_info[i].name,
+ ETH_GSTRING_LEN);
+ }
+}
+
+/**
+ * ppe_port_get_ethtool_stats() - Get PPE port ethtool statistics
+ * @ppe_port: PPE port
+ * @data: pointer to statistics data
+ *
+ * Description: Get the MAC statistics for the PPE port specified
+ * by @ppe_port. The statistics are stored in the buffer indicated
+ * by @data which used in the ethtool ops.
+ */
+void ppe_port_get_ethtool_stats(struct ppe_port *ppe_port, u64 *data)
+{
+ int i;
+
+ if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
+ spin_lock(&ppe_port->gmib_stats_lock);
+
+ ppe_port_gmib_update(ppe_port);
+ for (i = 0; i < ARRAY_SIZE(gmib_info); i++)
+ data[i] = ppe_port->gmib_stats[i];
+
+ spin_unlock(&ppe_port->gmib_stats_lock);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(xgmib_info); i++)
+ data[i] = ppe_port_xgmib_get(ppe_port, i);
+ }
+}
+
+/**
+ * ppe_port_get_stats64() - Get PPE port statistics
+ * @ppe_port: PPE port
+ * @s: statistics pointer
+ *
+ * Description: Get the MAC statistics for the PPE port specified
+ * by @ppe_port.
+ */
+void ppe_port_get_stats64(struct ppe_port *ppe_port,
+ struct rtnl_link_stats64 *s)
+{
+ if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
+ u64 *src = ppe_port->gmib_stats;
+
+ spin_lock(&ppe_port->gmib_stats_lock);
+
+ ppe_port_gmib_update(ppe_port);
+
+ s->rx_packets = src[gmib_rx_unicast] +
+ src[gmib_rx_broadcast] + src[gmib_rx_multicast];
+
+ s->tx_packets = src[gmib_tx_unicast] +
+ src[gmib_tx_broadcast] + src[gmib_tx_multicast];
+
+ s->rx_bytes = src[gmib_rx_bytes_g];
+ s->tx_bytes = src[gmib_tx_bytes];
+ s->multicast = src[gmib_rx_multicast];
+
+ s->rx_crc_errors = src[gmib_rx_fcserr] + src[gmib_rx_frag];
+ s->rx_frame_errors = src[gmib_rx_alignerr];
+ s->rx_errors = s->rx_crc_errors + s->rx_frame_errors;
+ s->rx_dropped = src[gmib_rx_toolong] + s->rx_errors;
+
+ s->tx_fifo_errors = src[gmib_tx_underrun];
+ s->tx_aborted_errors = src[gmib_tx_abortcol];
+ s->tx_errors = s->tx_fifo_errors + s->tx_aborted_errors;
+ s->collisions = src[gmib_tx_collisions];
+
+ spin_unlock(&ppe_port->gmib_stats_lock);
+ } else {
+ s->multicast = ppe_port_xgmib_get(ppe_port, xgmib_rx_multicast_g);
+
+ s->rx_packets = s->multicast;
+ s->rx_packets += ppe_port_xgmib_get(ppe_port, xgmib_rx_unicast_g);
+ s->rx_packets += ppe_port_xgmib_get(ppe_port, xgmib_rx_broadcast_g);
+
+ s->tx_packets = ppe_port_xgmib_get(ppe_port, xgmib_tx_frames);
+ s->rx_bytes = ppe_port_xgmib_get(ppe_port, xgmib_rx_bytes);
+ s->tx_bytes = ppe_port_xgmib_get(ppe_port, xgmib_tx_bytes);
+
+ s->rx_crc_errors = ppe_port_xgmib_get(ppe_port, xgmib_rx_crc_err);
+ s->rx_fifo_errors = ppe_port_xgmib_get(ppe_port, xgmib_rx_fifo_overflow);
+
+ s->rx_length_errors = ppe_port_xgmib_get(ppe_port, xgmib_rx_len_err);
+ s->rx_errors = s->rx_crc_errors +
+ s->rx_fifo_errors + s->rx_length_errors;
+ s->rx_dropped = s->rx_errors;
+
+ s->tx_fifo_errors = ppe_port_xgmib_get(ppe_port, xgmib_tx_underflow_err);
+ s->tx_errors = s->tx_packets -
+ ppe_port_xgmib_get(ppe_port, xgmib_tx_frames_g);
+ }
+}
+
/* PPE port and MAC reset */
static int ppe_port_mac_reset(struct ppe_port *ppe_port)
{
@@ -261,6 +693,9 @@ static void ppe_port_mac_link_up(struct
int ret, port = ppe_port->port_id;
u32 reg, val;
+ /* Start GMIB statistics polling */
+ schedule_delayed_work(&ppe_port->gmib_read, 0);
+
if (mac_type == PPE_MAC_TYPE_GMAC)
ret = ppe_port_gmac_link_up(ppe_port,
speed, duplex, tx_pause, rx_pause);
@@ -306,6 +741,9 @@ static void ppe_port_mac_link_down(struc
int ret, port = ppe_port->port_id;
u32 reg;
+ /* Stop GMIB statistics polling */
+ cancel_delayed_work_sync(&ppe_port->gmib_read);
+
/* Disable PPE port TX */
reg = PPE_PORT_BRIDGE_CTRL_ADDR + PPE_PORT_BRIDGE_CTRL_INC * port;
ret = regmap_update_bits(ppe_dev->regmap, reg,
@@ -627,6 +1065,27 @@ static int ppe_port_mac_hw_init(struct p
return ret;
}
+/* PPE port MAC MIB work task initialization */
+static int ppe_port_mac_mib_work_init(struct ppe_port *ppe_port)
+{
+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
+ u64 *gstats;
+
+ gstats = devm_kzalloc(ppe_dev->dev,
+ sizeof(*gstats) * ARRAY_SIZE(gmib_info),
+ GFP_KERNEL);
+ if (!gstats)
+ return -ENOMEM;
+
+ ppe_port->gmib_stats = gstats;
+
+ spin_lock_init(&ppe_port->gmib_stats_lock);
+ INIT_DELAYED_WORK(&ppe_port->gmib_read,
+ ppe_port_gmib_stats_poll);
+
+ return 0;
+}
+
/**
* ppe_port_mac_init() - Initialization of PPE ports for the PPE device
* @ppe_dev: PPE device
@@ -693,6 +1152,12 @@ int ppe_port_mac_init(struct ppe_device
goto err_port_node;
}
+ ret = ppe_port_mac_mib_work_init(&ppe_ports->port[i]);
+ if (ret) {
+ dev_err(ppe_dev->dev, "Failed to initialize MAC MIB work\n");
+ goto err_port_node;
+ }
+
i++;
}
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
@@ -8,6 +8,8 @@
#include <linux/phylink.h>
+struct rtnl_link_stats64;
+
/**
* enum ppe_port_clk_rst_type - PPE port clock and reset ID type
* @PPE_PORT_CLK_RST_MAC: The clock and reset ID for port MAC
@@ -44,6 +46,9 @@ enum ppe_mac_type {
* @port_id: Port ID
* @clks: Port clocks
* @rstcs: Port resets
+ * @gmib_read: Delay work task for GMAC MIB statistics polling function
+ * @gmib_stats: GMAC MIB statistics array
+ * @gmib_stats_lock: Lock to protect GMAC MIB statistics
*/
struct ppe_port {
struct phylink *phylink;
@@ -56,6 +61,9 @@ struct ppe_port {
int port_id;
struct clk *clks[PPE_PORT_CLK_RST_MAX];
struct reset_control *rstcs[PPE_PORT_CLK_RST_MAX];
+ struct delayed_work gmib_read;
+ u64 *gmib_stats;
+ spinlock_t gmib_stats_lock; /* Protects GMIB stats */
};
/**
@@ -73,4 +81,9 @@ void ppe_port_mac_deinit(struct ppe_devi
int ppe_port_phylink_setup(struct ppe_port *ppe_port,
struct net_device *netdev);
void ppe_port_phylink_destroy(struct ppe_port *ppe_port);
+int ppe_port_get_sset_count(struct ppe_port *ppe_port, int sset);
+void ppe_port_get_strings(struct ppe_port *ppe_port, u32 stringset, u8 *data);
+void ppe_port_get_ethtool_stats(struct ppe_port *ppe_port, u64 *data);
+void ppe_port_get_stats64(struct ppe_port *ppe_port,
+ struct rtnl_link_stats64 *s);
#endif
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
@@ -618,6 +618,48 @@
#define GMAC_MIB_CTRL_MASK \
(GMAC_MIB_RD_CLR | GMAC_MIB_RST | GMAC_MIB_EN)
+/* GMAC MIB counter registers */
+#define GMAC_RXBROAD_ADDR 0x40
+#define GMAC_RXPAUSE_ADDR 0x44
+#define GMAC_RXMULTI_ADDR 0x48
+#define GMAC_RXFCSERR_ADDR 0x4C
+#define GMAC_RXALIGNERR_ADDR 0x50
+#define GMAC_RXRUNT_ADDR 0x54
+#define GMAC_RXFRAG_ADDR 0x58
+#define GMAC_RXJUMBOFCSERR_ADDR 0x5C
+#define GMAC_RXJUMBOALIGNERR_ADDR 0x60
+#define GMAC_RXPKT64_ADDR 0x64
+#define GMAC_RXPKT65TO127_ADDR 0x68
+#define GMAC_RXPKT128TO255_ADDR 0x6C
+#define GMAC_RXPKT256TO511_ADDR 0x70
+#define GMAC_RXPKT512TO1023_ADDR 0x74
+#define GMAC_RXPKT1024TO1518_ADDR 0x78
+#define GMAC_RXPKT1519TOX_ADDR 0x7C
+#define GMAC_RXTOOLONG_ADDR 0x80
+#define GMAC_RXBYTE_G_ADDR 0x84
+#define GMAC_RXBYTE_B_ADDR 0x8C
+#define GMAC_RXUNI_ADDR 0x94
+#define GMAC_TXBROAD_ADDR 0xA0
+#define GMAC_TXPAUSE_ADDR 0xA4
+#define GMAC_TXMULTI_ADDR 0xA8
+#define GMAC_TXUNDERRUN_ADDR 0xAC
+#define GMAC_TXPKT64_ADDR 0xB0
+#define GMAC_TXPKT65TO127_ADDR 0xB4
+#define GMAC_TXPKT128TO255_ADDR 0xB8
+#define GMAC_TXPKT256TO511_ADDR 0xBC
+#define GMAC_TXPKT512TO1023_ADDR 0xC0
+#define GMAC_TXPKT1024TO1518_ADDR 0xC4
+#define GMAC_TXPKT1519TOX_ADDR 0xC8
+#define GMAC_TXBYTE_ADDR 0xCC
+#define GMAC_TXCOLLISIONS_ADDR 0xD4
+#define GMAC_TXABORTCOL_ADDR 0xD8
+#define GMAC_TXMULTICOL_ADDR 0xDC
+#define GMAC_TXSINGLECOL_ADDR 0xE0
+#define GMAC_TXEXCESSIVEDEFER_ADDR 0xE4
+#define GMAC_TXDEFER_ADDR 0xE8
+#define GMAC_TXLATECOL_ADDR 0xEC
+#define GMAC_TXUNI_ADDR 0xF0
+
/* XGMAC TX configuration register */
#define XGMAC_TX_CONFIG_ADDR 0x0
#define XGMAC_SPEED_M GENMASK(31, 29)
@@ -680,4 +722,53 @@
#define XGMAC_MCF BIT(3)
#define XGMAC_CNTRST BIT(0)
+/* XGMAC MIB counter registers */
+#define XGMAC_TXBYTE_GB_ADDR 0x814
+#define XGMAC_TXPKT_GB_ADDR 0x81C
+#define XGMAC_TXBROAD_G_ADDR 0x824
+#define XGMAC_TXMULTI_G_ADDR 0x82C
+#define XGMAC_TXPKT64_GB_ADDR 0x834
+#define XGMAC_TXPKT65TO127_GB_ADDR 0x83C
+#define XGMAC_TXPKT128TO255_GB_ADDR 0x844
+#define XGMAC_TXPKT256TO511_GB_ADDR 0x84C
+#define XGMAC_TXPKT512TO1023_GB_ADDR 0x854
+#define XGMAC_TXPKT1024TOMAX_GB_ADDR 0x85C
+#define XGMAC_TXUNI_GB_ADDR 0x864
+#define XGMAC_TXMULTI_GB_ADDR 0x86C
+#define XGMAC_TXBROAD_GB_ADDR 0x874
+#define XGMAC_TXUNDERFLOW_ERR_ADDR 0x87C
+#define XGMAC_TXBYTE_G_ADDR 0x884
+#define XGMAC_TXPKT_G_ADDR 0x88C
+#define XGMAC_TXPAUSE_ADDR 0x894
+#define XGMAC_TXVLAN_G_ADDR 0x89C
+#define XGMAC_TXLPI_USEC_ADDR 0x8A4
+#define XGMAC_TXLPI_TRAN_ADDR 0x8A8
+#define XGMAC_RXPKT_GB_ADDR 0x900
+#define XGMAC_RXBYTE_GB_ADDR 0x908
+#define XGMAC_RXBYTE_G_ADDR 0x910
+#define XGMAC_RXBROAD_G_ADDR 0x918
+#define XGMAC_RXMULTI_G_ADDR 0x920
+#define XGMAC_RXCRC_ERR_ADDR 0x928
+#define XGMAC_RXRUNT_ERR_ADDR 0x930
+#define XGMAC_RXJABBER_ERR_ADDR 0x934
+#define XGMAC_RXUNDERSIZE_G_ADDR 0x938
+#define XGMAC_RXOVERSIZE_G_ADDR 0x93C
+#define XGMAC_RXPKT64_GB_ADDR 0x940
+#define XGMAC_RXPKT65TO127_GB_ADDR 0x948
+#define XGMAC_RXPKT128TO255_GB_ADDR 0x950
+#define XGMAC_RXPKT256TO511_GB_ADDR 0x958
+#define XGMAC_RXPKT512TO1023_GB_ADDR 0x960
+#define XGMAC_RXPKT1024TOMAX_GB_ADDR 0x968
+#define XGMAC_RXUNI_G_ADDR 0x970
+#define XGMAC_RXLEN_ERR_ADDR 0x978
+#define XGMAC_RXOUTOFRANGE_ADDR 0x980
+#define XGMAC_RXPAUSE_ADDR 0x988
+#define XGMAC_RXFIFOOVERFLOW_ADDR 0x990
+#define XGMAC_RXVLAN_GB_ADDR 0x998
+#define XGMAC_RXWATCHDOG_ERR_ADDR 0x9A0
+#define XGMAC_RXLPI_USEC_ADDR 0x9A4
+#define XGMAC_RXLPI_TRAN_ADDR 0x9A8
+#define XGMAC_RXDISCARD_GB_ADDR 0x9AC
+#define XGMAC_RXDISCARDBYTE_GB_ADDR 0x9B4
+
#endif

View file

@ -0,0 +1,172 @@
From 55fbbc8ef90df27a16bca1613a793a578b79a384 Mon Sep 17 00:00:00 2001
From: Lei Wei <quic_leiwei@quicinc.com>
Date: Fri, 1 Mar 2024 13:36:26 +0800
Subject: [PATCH] net: ethernet: qualcomm: Add PPE port MAC address and EEE
functions
Add PPE port MAC address set and EEE set API functions which
will be used by netdev ops and ethtool.
Change-Id: Id2b3b06ae940b3b6f5227d927316329cdf3caeaa
Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
Alex G: use struct ethtool_keee instead of ethtool_eee
Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
---
drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 75 ++++++++++++++++++++
drivers/net/ethernet/qualcomm/ppe/ppe_port.h | 3 +
drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 29 ++++++++
3 files changed, 107 insertions(+)
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
@@ -462,6 +462,81 @@ void ppe_port_get_stats64(struct ppe_por
}
}
+/**
+ * ppe_port_set_mac_address() - Set PPE port MAC address
+ * @ppe_port: PPE port
+ * @addr: MAC address
+ *
+ * Description: Set MAC address for the given PPE port.
+ *
+ * Return: 0 upon success or a negative error upon failure.
+ */
+int ppe_port_set_mac_address(struct ppe_port *ppe_port, const u8 *addr)
+{
+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
+ int port = ppe_port->port_id;
+ u32 reg, val;
+ int ret;
+
+ if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
+ reg = PPE_PORT_GMAC_ADDR(port);
+ val = (addr[5] << 8) | addr[4];
+ ret = regmap_write(ppe_dev->regmap, reg + GMAC_GOL_ADDR0_ADDR, val);
+ if (ret)
+ return ret;
+
+ val = (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | addr[3];
+ ret = regmap_write(ppe_dev->regmap, reg + GMAC_GOL_ADDR1_ADDR, val);
+ if (ret)
+ return ret;
+ } else {
+ reg = PPE_PORT_XGMAC_ADDR(port);
+ val = (addr[5] << 8) | addr[4] | XGMAC_ADDR_EN;
+ ret = regmap_write(ppe_dev->regmap, reg + XGMAC_ADDR0_H_ADDR, val);
+ if (ret)
+ return ret;
+
+ val = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | addr[0];
+ ret = regmap_write(ppe_dev->regmap, reg + XGMAC_ADDR0_L_ADDR, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ppe_port_set_mac_eee() - Set EEE configuration for PPE port MAC
+ * @ppe_port: PPE port
+ * @eee: EEE settings
+ *
+ * Description: Set port MAC EEE settings for the given PPE port.
+ *
+ * Return: 0 upon success or a negative error upon failure.
+ */
+int ppe_port_set_mac_eee(struct ppe_port *ppe_port, struct ethtool_keee *eee)
+{
+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
+ int port = ppe_port->port_id;
+ u32 val;
+ int ret;
+
+ ret = regmap_read(ppe_dev->regmap, PPE_LPI_EN_ADDR, &val);
+ if (ret)
+ return ret;
+
+ if (eee->tx_lpi_enabled)
+ val |= PPE_LPI_PORT_EN(port);
+ else
+ val &= ~PPE_LPI_PORT_EN(port);
+
+ ret = regmap_write(ppe_dev->regmap, PPE_LPI_EN_ADDR, val);
+
+ return ret;
+}
+
/* PPE port and MAC reset */
static int ppe_port_mac_reset(struct ppe_port *ppe_port)
{
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
@@ -8,6 +8,7 @@
#include <linux/phylink.h>
+struct ethtool_keee;
struct rtnl_link_stats64;
/**
@@ -86,4 +87,6 @@ void ppe_port_get_strings(struct ppe_por
void ppe_port_get_ethtool_stats(struct ppe_port *ppe_port, u64 *data);
void ppe_port_get_stats64(struct ppe_port *ppe_port,
struct rtnl_link_stats64 *s);
+int ppe_port_set_mac_address(struct ppe_port *ppe_port, const u8 *addr);
+int ppe_port_set_mac_eee(struct ppe_port *ppe_port, struct ethtool_keee *eee);
#endif
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
@@ -20,6 +20,16 @@
#define PPE_PORT5_SEL_PCS1 BIT(4)
#define PPE_PORT_SEL_XGMAC(x) (BIT(8) << ((x) - 1))
+/* PPE port LPI enable register */
+#define PPE_LPI_EN_ADDR 0x400
+#define PPE_LPI_PORT1_EN BIT(0)
+#define PPE_LPI_PORT2_EN BIT(1)
+#define PPE_LPI_PORT3_EN BIT(2)
+#define PPE_LPI_PORT4_EN BIT(3)
+#define PPE_LPI_PORT5_EN BIT(4)
+#define PPE_LPI_PORT6_EN BIT(5)
+#define PPE_LPI_PORT_EN(x) (BIT(0) << ((x) - 1))
+
/* PPE scheduler configurations for buffer manager block. */
#define PPE_BM_SCH_CTRL_ADDR 0xb000
#define PPE_BM_SCH_CTRL_INC 4
@@ -592,6 +602,17 @@
#define GMAC_SPEED_100 1
#define GMAC_SPEED_1000 2
+/* GMAC MAC address register */
+#define GMAC_GOL_ADDR0_ADDR 0x8
+#define GMAC_ADDR_BYTE5 GENMASK(15, 8)
+#define GMAC_ADDR_BYTE4 GENMASK(7, 0)
+
+#define GMAC_GOL_ADDR1_ADDR 0xC
+#define GMAC_ADDR_BYTE0 GENMASK(31, 24)
+#define GMAC_ADDR_BYTE1 GENMASK(23, 16)
+#define GMAC_ADDR_BYTE2 GENMASK(15, 8)
+#define GMAC_ADDR_BYTE3 GENMASK(7, 0)
+
/* GMAC control register */
#define GMAC_CTRL_ADDR 0x18
#define GMAC_TX_THD_M GENMASK(27, 24)
@@ -717,6 +738,14 @@
#define XGMAC_RX_FLOW_CTRL_ADDR 0x90
#define XGMAC_RXFCEN BIT(0)
+/* XGMAC MAC address register */
+#define XGMAC_ADDR0_H_ADDR 0x300
+#define XGMAC_ADDR_EN BIT(31)
+#define XGMAC_ADDRH GENMASK(15, 0)
+
+#define XGMAC_ADDR0_L_ADDR 0x304
+#define XGMAC_ADDRL GENMASK(31, 0)
+
/* XGMAC management counters control register */
#define XGMAC_MMC_CTRL_ADDR 0x800
#define XGMAC_MCF BIT(3)

View file

@ -0,0 +1,78 @@
From 3981aeae5dd43dea94a0ec10f0b2977ebd102560 Mon Sep 17 00:00:00 2001
From: Luo Jie <quic_luoj@quicinc.com>
Date: Tue, 5 Mar 2024 16:42:56 +0800
Subject: [PATCH] net: ethernet: qualcomm: Add API to configure PPE port max
frame size
This function is called when the MTU of an ethernet port is
configured. It limits the size of packet passed through the
ethernet port.
Change-Id: I2a4dcd04407156d73770d2becbb7cbc0d56b3754
Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
---
drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 44 ++++++++++++++++++++
drivers/net/ethernet/qualcomm/ppe/ppe_port.h | 1 +
2 files changed, 45 insertions(+)
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
@@ -537,6 +537,50 @@ int ppe_port_set_mac_eee(struct ppe_port
return ret;
}
+/**
+ * ppe_port_set_maxframe() - Set port maximum frame size
+ * @ppe_port: PPE port structure
+ * @maxframe_size: Maximum frame size supported by PPE port
+ *
+ * Description: Set MTU of network interface specified by @ppe_port.
+ *
+ * Return: 0 upon success or a negative error upon failure.
+ */
+int ppe_port_set_maxframe(struct ppe_port *ppe_port, int maxframe_size)
+{
+ struct ppe_device *ppe_dev = ppe_port->ppe_dev;
+ u32 reg, val, mru_mtu_val[3];
+ int port = ppe_port->port_id;
+ int ret;
+
+ /* The max frame size should be MTU added by ETH_HLEN in PPE. */
+ maxframe_size += ETH_HLEN;
+
+ /* MAC takes cover the FCS for the calculation of frame size. */
+ if (maxframe_size > PPE_PORT_MAC_MAX_FRAME_SIZE - ETH_FCS_LEN)
+ return -EINVAL;
+
+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
+ val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU, maxframe_size);
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_MC_MTU_CTRL_TBL_MTU,
+ val);
+ if (ret)
+ return ret;
+
+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ PPE_MRU_MTU_CTRL_SET_MRU(mru_mtu_val, maxframe_size);
+ PPE_MRU_MTU_CTRL_SET_MTU(mru_mtu_val, maxframe_size);
+
+ return regmap_bulk_write(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+}
+
/* PPE port and MAC reset */
static int ppe_port_mac_reset(struct ppe_port *ppe_port)
{
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
@@ -89,4 +89,5 @@ void ppe_port_get_stats64(struct ppe_por
struct rtnl_link_stats64 *s);
int ppe_port_set_mac_address(struct ppe_port *ppe_port, const u8 *addr);
int ppe_port_set_mac_eee(struct ppe_port *ppe_port, struct ethtool_keee *eee);
+int ppe_port_set_maxframe(struct ppe_port *ppe_port, int maxframe_size);
#endif

View file

@ -0,0 +1,932 @@
From 00d4f3cb4f5d1e6924151a4551f06b6a82bf0146 Mon Sep 17 00:00:00 2001
From: Pavithra R <quic_pavir@quicinc.com>
Date: Wed, 28 Feb 2024 11:25:15 +0530
Subject: [PATCH] net: ethernet: qualcomm: Add EDMA support for QCOM IPQ9574
chipset.
Add the infrastructure functions such as Makefile,
EDMA hardware configuration, clock and IRQ initializations.
Change-Id: I64f65e554e70e9095b0cf3636fec421569ae6895
Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
Co-developed-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
Alex G: use "ppe_config.h" header instead of "ppe_api.h"
add missing definitions and functions from ppe_api:
- enum ppe_queue_class_type {}
- ppe_edma_queue_offset_config()
Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
---
drivers/net/ethernet/qualcomm/ppe/Makefile | 3 +
drivers/net/ethernet/qualcomm/ppe/edma.c | 480 +++++++++++++++++++
drivers/net/ethernet/qualcomm/ppe/edma.h | 113 +++++
drivers/net/ethernet/qualcomm/ppe/ppe.c | 10 +-
drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 253 ++++++++++
5 files changed, 858 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.c
create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.h
--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
@@ -5,3 +5,6 @@
obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o ppe_port.o
+
+#EDMA
+qcom-ppe-objs += edma.o
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0-only
+ /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ /* Qualcomm Ethernet DMA driver setup, HW configuration, clocks and
+ * interrupt initializations.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "edma.h"
+#include "ppe_regs.h"
+
+#define EDMA_IRQ_NAME_SIZE 32
+
+/* Global EDMA context. */
+struct edma_context *edma_ctx;
+
+/* Priority to multi-queue mapping. */
+static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7};
+
+enum edma_clk_id {
+ EDMA_CLK,
+ EDMA_CFG_CLK,
+ EDMA_CLK_MAX
+};
+
+static const char * const clock_name[EDMA_CLK_MAX] = {
+ [EDMA_CLK] = "edma",
+ [EDMA_CFG_CLK] = "edma-cfg",
+};
+
+/* Rx Fill ring info for IPQ9574. */
+static struct edma_ring_info ipq9574_rxfill_ring_info = {
+ .max_rings = 8,
+ .ring_start = 4,
+ .num_rings = 4,
+};
+
+/* Rx ring info for IPQ9574. */
+static struct edma_ring_info ipq9574_rx_ring_info = {
+ .max_rings = 24,
+ .ring_start = 20,
+ .num_rings = 4,
+};
+
+/* Tx ring info for IPQ9574. */
+static struct edma_ring_info ipq9574_tx_ring_info = {
+ .max_rings = 32,
+ .ring_start = 8,
+ .num_rings = 24,
+};
+
+/* Tx complete ring info for IPQ9574. */
+static struct edma_ring_info ipq9574_txcmpl_ring_info = {
+ .max_rings = 32,
+ .ring_start = 8,
+ .num_rings = 24,
+};
+
+/* HW info for IPQ9574. */
+static struct edma_hw_info ipq9574_hw_info = {
+ .rxfill = &ipq9574_rxfill_ring_info,
+ .rx = &ipq9574_rx_ring_info,
+ .tx = &ipq9574_tx_ring_info,
+ .txcmpl = &ipq9574_txcmpl_ring_info,
+ .max_ports = 6,
+ .napi_budget_rx = 128,
+ .napi_budget_tx = 512,
+};
+
+static int edma_clock_set_and_enable(struct device *dev,
+ const char *id, unsigned long rate)
+{
+ struct device_node *edma_np;
+ struct clk *clk = NULL;
+ int ret;
+
+ edma_np = of_get_child_by_name(dev->of_node, "edma");
+
+ clk = devm_get_clk_from_child(dev, edma_np, id);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "clk %s get failed\n", id);
+ of_node_put(edma_np);
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_set_rate(clk, rate);
+ if (ret) {
+ dev_err(dev, "set %lu rate for %s failed\n", rate, id);
+ of_node_put(edma_np);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "clk %s enable failed\n", id);
+ of_node_put(edma_np);
+ return ret;
+ }
+
+ of_node_put(edma_np);
+
+ dev_dbg(dev, "set %lu rate for %s\n", rate, id);
+
+ return 0;
+}
+
+static int edma_clock_init(void)
+{
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct device *dev = ppe_dev->dev;
+ unsigned long ppe_rate;
+ int ret;
+
+ ppe_rate = ppe_dev->clk_rate;
+
+ ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CLK],
+ ppe_rate);
+ if (ret)
+ return ret;
+
+ ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CFG_CLK],
+ ppe_rate);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
+ *
+ * Map int_priority values to priority class and initialize
+ * unicast priority map table for default profile_id.
+ */
+static int edma_configure_ucast_prio_map_tbl(void)
+{
+ u8 pri_class, int_pri;
+ int ret = 0;
+
+ /* Set the priority class value for every possible priority. */
+ for (int_pri = 0; int_pri < PPE_QUEUE_INTER_PRI_NUM; int_pri++) {
+ pri_class = edma_pri_map[int_pri];
+
+ /* Priority offset should be less than maximum supported
+ * queue priority.
+ */
+ if (pri_class > EDMA_PRI_MAX_PER_CORE - 1) {
+ pr_err("Configured incorrect priority offset: %d\n",
+ pri_class);
+ return -EINVAL;
+ }
+
+ ret = ppe_edma_queue_offset_config(edma_ctx->ppe_dev,
+ PPE_QUEUE_CLASS_PRIORITY, int_pri, pri_class);
+
+ if (ret) {
+ pr_err("Failed with error: %d to set queue priority class for int_pri: %d for profile_id: %d\n",
+ ret, int_pri, 0);
+ return ret;
+ }
+
+ pr_debug("profile_id: %d, int_priority: %d, pri_class: %d\n",
+ 0, int_pri, pri_class);
+ }
+
+ return ret;
+}
+
+static int edma_irq_init(void)
+{
+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct edma_ring_info *rx = hw_info->rx;
+ char edma_irq_name[EDMA_IRQ_NAME_SIZE];
+ struct device *dev = ppe_dev->dev;
+ struct platform_device *pdev;
+ struct device_node *edma_np;
+ u32 i;
+
+ pdev = to_platform_device(dev);
+ edma_np = of_get_child_by_name(dev->of_node, "edma");
+ edma_ctx->intr_info.intr_txcmpl = kzalloc((sizeof(*edma_ctx->intr_info.intr_txcmpl) *
+ txcmpl->num_rings), GFP_KERNEL);
+ if (!edma_ctx->intr_info.intr_txcmpl) {
+ of_node_put(edma_np);
+ return -ENOMEM;
+ }
+
+ /* Get TXCMPL rings IRQ numbers. */
+ for (i = 0; i < txcmpl->num_rings; i++) {
+ snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_txcmpl_%d",
+ txcmpl->ring_start + i);
+ edma_ctx->intr_info.intr_txcmpl[i] = of_irq_get_byname(edma_np, edma_irq_name);
+ if (edma_ctx->intr_info.intr_txcmpl[i] < 0) {
+ dev_err(dev, "%s: txcmpl_info.intr[%u] irq get failed\n",
+ edma_np->name, i);
+ of_node_put(edma_np);
+ kfree(edma_ctx->intr_info.intr_txcmpl);
+ return edma_ctx->intr_info.intr_txcmpl[i];
+ }
+
+ dev_dbg(dev, "%s: intr_info.intr_txcmpl[%u] = %u\n",
+ edma_np->name, i, edma_ctx->intr_info.intr_txcmpl[i]);
+ }
+
+ edma_ctx->intr_info.intr_rx = kzalloc((sizeof(*edma_ctx->intr_info.intr_rx) *
+ rx->num_rings), GFP_KERNEL);
+ if (!edma_ctx->intr_info.intr_rx) {
+ of_node_put(edma_np);
+ kfree(edma_ctx->intr_info.intr_txcmpl);
+ return -ENOMEM;
+ }
+
+ /* Get RXDESC rings IRQ numbers. */
+ for (i = 0; i < rx->num_rings; i++) {
+ snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_rxdesc_%d",
+ rx->ring_start + i);
+ edma_ctx->intr_info.intr_rx[i] = of_irq_get_byname(edma_np, edma_irq_name);
+ if (edma_ctx->intr_info.intr_rx[i] < 0) {
+ dev_err(dev, "%s: rx_queue_map_info.intr[%u] irq get failed\n",
+ edma_np->name, i);
+ of_node_put(edma_np);
+ kfree(edma_ctx->intr_info.intr_rx);
+ kfree(edma_ctx->intr_info.intr_txcmpl);
+ return edma_ctx->intr_info.intr_rx[i];
+ }
+
+ dev_dbg(dev, "%s: intr_info.intr_rx[%u] = %u\n",
+ edma_np->name, i, edma_ctx->intr_info.intr_rx[i]);
+ }
+
+ /* Get misc IRQ number. */
+ edma_ctx->intr_info.intr_misc = of_irq_get_byname(edma_np, "edma_misc");
+ if (edma_ctx->intr_info.intr_misc < 0) {
+ dev_err(dev, "%s: misc_intr irq get failed\n", edma_np->name);
+ of_node_put(edma_np);
+ kfree(edma_ctx->intr_info.intr_rx);
+ kfree(edma_ctx->intr_info.intr_txcmpl);
+ return edma_ctx->intr_info.intr_misc;
+ }
+
+ of_node_put(edma_np);
+
+ dev_dbg(dev, "%s: misc IRQ:%u\n", edma_np->name,
+ edma_ctx->intr_info.intr_misc);
+
+ return 0;
+}
+
+static int edma_hw_reset(void)
+{
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct device *dev = ppe_dev->dev;
+ struct reset_control *edma_hw_rst;
+ struct device_node *edma_np;
+ const char *reset_string;
+ u32 count, i;
+ int ret;
+
+ /* Count and parse reset names from DTSI. */
+ edma_np = of_get_child_by_name(dev->of_node, "edma");
+ count = of_property_count_strings(edma_np, "reset-names");
+ if (count < 0) {
+ dev_err(dev, "EDMA reset entry not found\n");
+ of_node_put(edma_np);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = of_property_read_string_index(edma_np, "reset-names",
+ i, &reset_string);
+ if (ret) {
+ dev_err(dev, "Error reading reset-names");
+ of_node_put(edma_np);
+ return -EINVAL;
+ }
+
+ edma_hw_rst = of_reset_control_get_exclusive(edma_np, reset_string);
+ if (IS_ERR(edma_hw_rst)) {
+ of_node_put(edma_np);
+ return PTR_ERR(edma_hw_rst);
+ }
+
+ /* 100ms delay is required by hardware to reset EDMA. */
+ reset_control_assert(edma_hw_rst);
+ fsleep(100);
+
+ reset_control_deassert(edma_hw_rst);
+ fsleep(100);
+
+ reset_control_put(edma_hw_rst);
+ dev_dbg(dev, "EDMA HW reset, i:%d reset_string:%s\n", i, reset_string);
+ }
+
+ of_node_put(edma_np);
+
+ return 0;
+}
+
+static int edma_hw_configure(void)
+{
+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct regmap *regmap = ppe_dev->regmap;
+ u32 data, reg;
+ int ret;
+
+ reg = EDMA_BASE_OFFSET + EDMA_REG_MAS_CTRL_ADDR;
+ ret = regmap_read(regmap, reg, &data);
+ if (ret)
+ return ret;
+
+ pr_debug("EDMA ver %d hw init\n", data);
+
+ /* Setup private data structure. */
+ edma_ctx->intr_info.intr_mask_rx = EDMA_RXDESC_INT_MASK_PKT_INT;
+ edma_ctx->intr_info.intr_mask_txcmpl = EDMA_TX_INT_MASK_PKT_INT;
+
+ /* Reset EDMA. */
+ ret = edma_hw_reset();
+ if (ret) {
+ pr_err("Error in resetting the hardware. ret: %d\n", ret);
+ return ret;
+ }
+
+ /* Allocate memory for netdevices. */
+ edma_ctx->netdev_arr = kzalloc((sizeof(**edma_ctx->netdev_arr) *
+ hw_info->max_ports),
+ GFP_KERNEL);
+ if (!edma_ctx->netdev_arr)
+ return -ENOMEM;
+
+ /* Configure DMA request priority, DMA read burst length,
+ * and AXI write size.
+ */
+ data = FIELD_PREP(EDMA_DMAR_BURST_LEN_MASK, EDMA_BURST_LEN_ENABLE);
+ data |= FIELD_PREP(EDMA_DMAR_REQ_PRI_MASK, 0);
+ data |= FIELD_PREP(EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK, 31);
+ data |= FIELD_PREP(EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK, 7);
+ data |= FIELD_PREP(EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK, 7);
+
+ reg = EDMA_BASE_OFFSET + EDMA_REG_DMAR_CTRL_ADDR;
+ ret = regmap_write(regmap, reg, data);
+ if (ret)
+ return ret;
+
+ /* Configure Tx Timeout Threshold. */
+ data = EDMA_TX_TIMEOUT_THRESH_VAL;
+
+ reg = EDMA_BASE_OFFSET + EDMA_REG_TX_TIMEOUT_THRESH_ADDR;
+ ret = regmap_write(regmap, reg, data);
+ if (ret)
+ return ret;
+
+ /* Set Miscellaneous error mask. */
+ data = EDMA_MISC_AXI_RD_ERR_MASK |
+ EDMA_MISC_AXI_WR_ERR_MASK |
+ EDMA_MISC_RX_DESC_FIFO_FULL_MASK |
+ EDMA_MISC_RX_ERR_BUF_SIZE_MASK |
+ EDMA_MISC_TX_SRAM_FULL_MASK |
+ EDMA_MISC_TX_CMPL_BUF_FULL_MASK |
+ EDMA_MISC_DATA_LEN_ERR_MASK;
+ data |= EDMA_MISC_TX_TIMEOUT_MASK;
+ edma_ctx->intr_info.intr_mask_misc = data;
+
+ /* Global EDMA enable and padding enable. */
+ data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
+
+ reg = EDMA_BASE_OFFSET + EDMA_REG_PORT_CTRL_ADDR;
+ ret = regmap_write(regmap, reg, data);
+ if (ret)
+ return ret;
+
+ /* Initialize unicast priority map table. */
+ ret = (int)edma_configure_ucast_prio_map_tbl();
+ if (ret) {
+ pr_err("Failed to initialize unicast priority map table: %d\n",
+ ret);
+ kfree(edma_ctx->netdev_arr);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * edma_destroy - EDMA Destroy.
+ * @ppe_dev: PPE device
+ *
+ * Free the memory allocated during setup.
+ */
+void edma_destroy(struct ppe_device *ppe_dev)
+{
+ kfree(edma_ctx->intr_info.intr_rx);
+ kfree(edma_ctx->intr_info.intr_txcmpl);
+ kfree(edma_ctx->netdev_arr);
+}
+
+/**
+ * edma_setup - EDMA Setup.
+ * @ppe_dev: PPE device
+ *
+ * Configure Ethernet global ctx, clocks, hardware and interrupts.
+ *
+ * Return 0 on success, negative error code on failure.
+ */
+int edma_setup(struct ppe_device *ppe_dev)
+{
+ struct device *dev = ppe_dev->dev;
+ int ret;
+
+ edma_ctx = devm_kzalloc(dev, sizeof(*edma_ctx), GFP_KERNEL);
+ if (!edma_ctx)
+ return -ENOMEM;
+
+ edma_ctx->hw_info = &ipq9574_hw_info;
+ edma_ctx->ppe_dev = ppe_dev;
+
+ /* Configure the EDMA common clocks. */
+ ret = edma_clock_init();
+ if (ret) {
+ dev_err(dev, "Error in configuring the EDMA clocks\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "QCOM EDMA common clocks are configured\n");
+
+ ret = edma_hw_configure();
+ if (ret) {
+ dev_err(dev, "Error in edma configuration\n");
+ return ret;
+ }
+
+ ret = edma_irq_init();
+ if (ret) {
+ dev_err(dev, "Error in irq initialization\n");
+ return ret;
+ }
+
+ dev_info(dev, "EDMA configuration successful\n");
+
+ return 0;
+}
+
+/**
+ * ppe_edma_queue_offset_config - Configure queue offset for EDMA interface
+ * @ppe_dev: PPE device
+ * @class: The class to configure queue offset
+ * @index: Class index, internal priority or hash value
+ * @queue_offset: Queue offset value
+ *
+ * PPE EDMA queue offset is configured based on the PPE internal priority or
+ * RSS hash value, the profile ID is fixed to 0 for EDMA interface.
+ *
+ * Return 0 on success, negative error code on failure.
+ */
+int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
+ enum ppe_queue_class_type class,
+ int index, int queue_offset)
+{
+ if (class == PPE_QUEUE_CLASS_PRIORITY)
+ return ppe_queue_ucast_offset_pri_set(ppe_dev, 0,
+ index, queue_offset);
+
+ return ppe_queue_ucast_offset_hash_set(ppe_dev, 0,
+ index, queue_offset);
+}
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __EDMA_MAIN__
+#define __EDMA_MAIN__
+
+#include "ppe_config.h"
+
+/* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
+ *
+ * One timer unit is 128 clock cycles.
+ *
+ * So, therefore the microsecond to timer unit calculation is:
+ * Timer unit = time in microseconds / (one clock cycle in microsecond * cycles in 1 timer unit)
+ * = ('x' microsecond * EDMA clock frequency in MHz ('y') / 128).
+ *
+ */
+#define EDMA_CYCLE_PER_TIMER_UNIT 128
+#define EDMA_MICROSEC_TO_TIMER_UNIT(x, y) ((x) * (y) / EDMA_CYCLE_PER_TIMER_UNIT)
+#define MHZ 1000000UL
+
+/* EDMA profile ID. */
+#define EDMA_CPU_PORT_PROFILE_ID 0
+
+/* Number of PPE queue priorities supported per ARM core. */
+#define EDMA_PRI_MAX_PER_CORE 8
+
+/**
+ * enum ppe_queue_class_type - PPE queue class type
+ * @PPE_QUEUE_CLASS_PRIORITY: Queue offset configured from internal priority
+ * @PPE_QUEUE_CLASS_HASH: Queue offset configured from RSS hash.
+ */
+enum ppe_queue_class_type {
+ PPE_QUEUE_CLASS_PRIORITY,
+ PPE_QUEUE_CLASS_HASH,
+};
+
+/**
+ * struct edma_ring_info - EDMA ring data structure.
+ * @max_rings: Maximum number of rings
+ * @ring_start: Ring start ID
+ * @num_rings: Number of rings
+ */
+struct edma_ring_info {
+ u32 max_rings;
+ u32 ring_start;
+ u32 num_rings;
+};
+
+/**
+ * struct edma_hw_info - EDMA hardware data structure.
+ * @rxfill: Rx Fill ring information
+ * @rx: Rx Desc ring information
+ * @tx: Tx Desc ring information
+ * @txcmpl: Tx complete ring information
+ * @max_ports: Maximum number of ports
+ * @napi_budget_rx: Rx NAPI budget
+ * @napi_budget_tx: Tx NAPI budget
+ */
+struct edma_hw_info {
+ struct edma_ring_info *rxfill;
+ struct edma_ring_info *rx;
+ struct edma_ring_info *tx;
+ struct edma_ring_info *txcmpl;
+ u32 max_ports;
+ u32 napi_budget_rx;
+ u32 napi_budget_tx;
+};
+
+/**
+ * struct edma_intr_info - EDMA interrupt data structure.
+ * @intr_mask_rx: RX interrupt mask
+ * @intr_rx: Rx interrupts
+ * @intr_mask_txcmpl: Tx completion interrupt mask
+ * @intr_txcmpl: Tx completion interrupts
+ * @intr_mask_misc: Miscellaneous interrupt mask
+ * @intr_misc: Miscellaneous interrupts
+ */
+struct edma_intr_info {
+ u32 intr_mask_rx;
+ u32 *intr_rx;
+ u32 intr_mask_txcmpl;
+ u32 *intr_txcmpl;
+ u32 intr_mask_misc;
+ u32 intr_misc;
+};
+
+/**
+ * struct edma_context - EDMA context.
+ * @netdev_arr: Net device for each EDMA port
+ * @ppe_dev: PPE device
+ * @hw_info: EDMA Hardware info
+ * @intr_info: EDMA Interrupt info
+ */
+struct edma_context {
+ struct net_device **netdev_arr;
+ struct ppe_device *ppe_dev;
+ struct edma_hw_info *hw_info;
+ struct edma_intr_info intr_info;
+};
+
+/* Global EDMA context. */
+extern struct edma_context *edma_ctx;
+
+void edma_destroy(struct ppe_device *ppe_dev);
+int edma_setup(struct ppe_device *ppe_dev);
+int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
+ enum ppe_queue_class_type class,
+ int index, int queue_offset);
+
+
+#endif
--- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include "edma.h"
#include "ppe.h"
#include "ppe_config.h"
#include "ppe_debugfs.h"
@@ -201,10 +202,16 @@ static int qcom_ppe_probe(struct platfor
if (ret)
return dev_err_probe(dev, ret, "PPE HW config failed\n");
- ret = ppe_port_mac_init(ppe_dev);
+ ret = edma_setup(ppe_dev);
if (ret)
+ return dev_err_probe(dev, ret, "EDMA setup failed\n");
+
+ ret = ppe_port_mac_init(ppe_dev);
+ if (ret) {
+ edma_destroy(ppe_dev);
return dev_err_probe(dev, ret,
"PPE Port MAC initialization failed\n");
+ }
ppe_debugfs_setup(ppe_dev);
platform_set_drvdata(pdev, ppe_dev);
@@ -219,6 +226,7 @@ static void qcom_ppe_remove(struct platf
ppe_dev = platform_get_drvdata(pdev);
ppe_debugfs_teardown(ppe_dev);
ppe_port_mac_deinit(ppe_dev);
+ edma_destroy(ppe_dev);
platform_set_drvdata(pdev, NULL);
}
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
@@ -800,4 +800,257 @@
#define XGMAC_RXDISCARD_GB_ADDR 0x9AC
#define XGMAC_RXDISCARDBYTE_GB_ADDR 0x9B4
+#define EDMA_BASE_OFFSET 0xb00000
+
+/* EDMA register offsets */
+#define EDMA_REG_MAS_CTRL_ADDR 0x0
+#define EDMA_REG_PORT_CTRL_ADDR 0x4
+#define EDMA_REG_VLAN_CTRL_ADDR 0x8
+#define EDMA_REG_RXDESC2FILL_MAP_0_ADDR 0x14
+#define EDMA_REG_RXDESC2FILL_MAP_1_ADDR 0x18
+#define EDMA_REG_RXDESC2FILL_MAP_2_ADDR 0x1c
+#define EDMA_REG_TXQ_CTRL_ADDR 0x20
+#define EDMA_REG_TXQ_CTRL_2_ADDR 0x24
+#define EDMA_REG_TXQ_FC_0_ADDR 0x28
+#define EDMA_REG_TXQ_FC_1_ADDR 0x30
+#define EDMA_REG_TXQ_FC_2_ADDR 0x34
+#define EDMA_REG_TXQ_FC_3_ADDR 0x38
+#define EDMA_REG_RXQ_CTRL_ADDR 0x3c
+#define EDMA_REG_MISC_ERR_QID_ADDR 0x40
+#define EDMA_REG_RXQ_FC_THRE_ADDR 0x44
+#define EDMA_REG_DMAR_CTRL_ADDR 0x48
+#define EDMA_REG_AXIR_CTRL_ADDR 0x4c
+#define EDMA_REG_AXIW_CTRL_ADDR 0x50
+#define EDMA_REG_MIN_MSS_ADDR 0x54
+#define EDMA_REG_LOOPBACK_CTRL_ADDR 0x58
+#define EDMA_REG_MISC_INT_STAT_ADDR 0x5c
+#define EDMA_REG_MISC_INT_MASK_ADDR 0x60
+#define EDMA_REG_DBG_CTRL_ADDR 0x64
+#define EDMA_REG_DBG_DATA_ADDR 0x68
+#define EDMA_REG_TX_TIMEOUT_THRESH_ADDR 0x6c
+#define EDMA_REG_REQ0_FIFO_THRESH_ADDR 0x80
+#define EDMA_REG_WB_OS_THRESH_ADDR 0x84
+#define EDMA_REG_MISC_ERR_QID_REG2_ADDR 0x88
+#define EDMA_REG_TXDESC2CMPL_MAP_0_ADDR 0x8c
+#define EDMA_REG_TXDESC2CMPL_MAP_1_ADDR 0x90
+#define EDMA_REG_TXDESC2CMPL_MAP_2_ADDR 0x94
+#define EDMA_REG_TXDESC2CMPL_MAP_3_ADDR 0x98
+#define EDMA_REG_TXDESC2CMPL_MAP_4_ADDR 0x9c
+#define EDMA_REG_TXDESC2CMPL_MAP_5_ADDR 0xa0
+
+/* Tx descriptor ring configuration register addresses */
+#define EDMA_REG_TXDESC_BA(n) (0x1000 + (0x1000 * (n)))
+#define EDMA_REG_TXDESC_PROD_IDX(n) (0x1004 + (0x1000 * (n)))
+#define EDMA_REG_TXDESC_CONS_IDX(n) (0x1008 + (0x1000 * (n)))
+#define EDMA_REG_TXDESC_RING_SIZE(n) (0x100c + (0x1000 * (n)))
+#define EDMA_REG_TXDESC_CTRL(n) (0x1010 + (0x1000 * (n)))
+#define EDMA_REG_TXDESC_BA2(n) (0x1014 + (0x1000 * (n)))
+
+/* RxFill ring configuration register addresses */
+#define EDMA_REG_RXFILL_BA(n) (0x29000 + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_PROD_IDX(n) (0x29004 + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_CONS_IDX(n) (0x29008 + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_RING_SIZE(n) (0x2900c + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_BUFFER1_SIZE(n) (0x29010 + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_FC_THRE(n) (0x29014 + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_UGT_THRE(n) (0x29018 + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_RING_EN(n) (0x2901c + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_DISABLE(n) (0x29020 + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_DISABLE_DONE(n) (0x29024 + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_INT_STAT(n) (0x31000 + (0x1000 * (n)))
+#define EDMA_REG_RXFILL_INT_MASK(n) (0x31004 + (0x1000 * (n)))
+
+/* Rx descriptor ring configuration register addresses */
+#define EDMA_REG_RXDESC_BA(n) (0x39000 + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_PROD_IDX(n) (0x39004 + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_CONS_IDX(n) (0x39008 + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_RING_SIZE(n) (0x3900c + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_FC_THRE(n) (0x39010 + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_UGT_THRE(n) (0x39014 + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_CTRL(n) (0x39018 + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_BPC(n) (0x3901c + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_DISABLE(n) (0x39020 + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_DISABLE_DONE(n) (0x39024 + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_PREHEADER_BA(n) (0x39028 + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_INT_STAT(n) (0x59000 + (0x1000 * (n)))
+#define EDMA_REG_RXDESC_INT_MASK(n) (0x59004 + (0x1000 * (n)))
+
+#define EDMA_REG_RX_MOD_TIMER(n) (0x59008 + (0x1000 * (n)))
+#define EDMA_REG_RX_INT_CTRL(n) (0x5900c + (0x1000 * (n)))
+
+/* Tx completion ring configuration register addresses */
+#define EDMA_REG_TXCMPL_BA(n) (0x79000 + (0x1000 * (n)))
+#define EDMA_REG_TXCMPL_PROD_IDX(n) (0x79004 + (0x1000 * (n)))
+#define EDMA_REG_TXCMPL_CONS_IDX(n) (0x79008 + (0x1000 * (n)))
+#define EDMA_REG_TXCMPL_RING_SIZE(n) (0x7900c + (0x1000 * (n)))
+#define EDMA_REG_TXCMPL_UGT_THRE(n) (0x79010 + (0x1000 * (n)))
+#define EDMA_REG_TXCMPL_CTRL(n) (0x79014 + (0x1000 * (n)))
+#define EDMA_REG_TXCMPL_BPC(n) (0x79018 + (0x1000 * (n)))
+
+#define EDMA_REG_TX_INT_STAT(n) (0x99000 + (0x1000 * (n)))
+#define EDMA_REG_TX_INT_MASK(n) (0x99004 + (0x1000 * (n)))
+#define EDMA_REG_TX_MOD_TIMER(n) (0x99008 + (0x1000 * (n)))
+#define EDMA_REG_TX_INT_CTRL(n) (0x9900c + (0x1000 * (n)))
+
+/* EDMA_QID2RID_TABLE_MEM register field masks */
+#define EDMA_RX_RING_ID_QUEUE0_MASK GENMASK(7, 0)
+#define EDMA_RX_RING_ID_QUEUE1_MASK GENMASK(15, 8)
+#define EDMA_RX_RING_ID_QUEUE2_MASK GENMASK(23, 16)
+#define EDMA_RX_RING_ID_QUEUE3_MASK GENMASK(31, 24)
+
+/* EDMA_REG_PORT_CTRL register bit definitions */
+#define EDMA_PORT_PAD_EN 0x1
+#define EDMA_PORT_EDMA_EN 0x2
+
+/* EDMA_REG_DMAR_CTRL register field masks */
+#define EDMA_DMAR_REQ_PRI_MASK GENMASK(2, 0)
+#define EDMA_DMAR_BURST_LEN_MASK BIT(3)
+#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK GENMASK(8, 4)
+#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK GENMASK(11, 9)
+#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK GENMASK(14, 12)
+
+#define EDMA_BURST_LEN_ENABLE 0
+
+/* Tx timeout threshold */
+#define EDMA_TX_TIMEOUT_THRESH_VAL 0xFFFF
+
+/* Rx descriptor ring base address mask */
+#define EDMA_RXDESC_BA_MASK 0xffffffff
+
+/* Rx Descriptor ring pre-header base address mask */
+#define EDMA_RXDESC_PREHEADER_BA_MASK 0xffffffff
+
+/* Tx descriptor prod ring index mask */
+#define EDMA_TXDESC_PROD_IDX_MASK 0xffff
+
+/* Tx descriptor consumer ring index mask */
+#define EDMA_TXDESC_CONS_IDX_MASK 0xffff
+
+/* Tx descriptor ring size mask */
+#define EDMA_TXDESC_RING_SIZE_MASK 0xffff
+
+/* Tx descriptor ring enable */
+#define EDMA_TXDESC_TX_ENABLE 0x1
+
+#define EDMA_TXDESC_CTRL_TXEN_MASK BIT(0)
+#define EDMA_TXDESC_CTRL_FC_GRP_ID_MASK GENMASK(3, 1)
+
+/* Tx completion ring prod index mask */
+#define EDMA_TXCMPL_PROD_IDX_MASK 0xffff
+
+/* Tx completion ring urgent threshold mask */
+#define EDMA_TXCMPL_LOW_THRE_MASK 0xffff
+#define EDMA_TXCMPL_LOW_THRE_SHIFT 0
+
+/* EDMA_REG_TX_MOD_TIMER mask */
+#define EDMA_TX_MOD_TIMER_INIT_MASK 0xffff
+#define EDMA_TX_MOD_TIMER_INIT_SHIFT 0
+
+/* Rx fill ring prod index mask */
+#define EDMA_RXFILL_PROD_IDX_MASK 0xffff
+
+/* Rx fill ring consumer index mask */
+#define EDMA_RXFILL_CONS_IDX_MASK 0xffff
+
+/* Rx fill ring size mask */
+#define EDMA_RXFILL_RING_SIZE_MASK 0xffff
+
+/* Rx fill ring flow control threshold masks */
+#define EDMA_RXFILL_FC_XON_THRE_MASK 0x7ff
+#define EDMA_RXFILL_FC_XON_THRE_SHIFT 12
+#define EDMA_RXFILL_FC_XOFF_THRE_MASK 0x7ff
+#define EDMA_RXFILL_FC_XOFF_THRE_SHIFT 0
+
+/* Rx fill ring enable bit */
+#define EDMA_RXFILL_RING_EN 0x1
+
+/* Rx desc ring prod index mask */
+#define EDMA_RXDESC_PROD_IDX_MASK 0xffff
+
+/* Rx descriptor ring cons index mask */
+#define EDMA_RXDESC_CONS_IDX_MASK 0xffff
+
+/* Rx descriptor ring size masks */
+#define EDMA_RXDESC_RING_SIZE_MASK 0xffff
+#define EDMA_RXDESC_PL_OFFSET_MASK 0x1ff
+#define EDMA_RXDESC_PL_OFFSET_SHIFT 16
+#define EDMA_RXDESC_PL_DEFAULT_VALUE 0
+
+/* Rx descriptor ring flow control threshold masks */
+#define EDMA_RXDESC_FC_XON_THRE_MASK 0x7ff
+#define EDMA_RXDESC_FC_XON_THRE_SHIFT 12
+#define EDMA_RXDESC_FC_XOFF_THRE_MASK 0x7ff
+#define EDMA_RXDESC_FC_XOFF_THRE_SHIFT 0
+
+/* Rx descriptor ring urgent threshold mask */
+#define EDMA_RXDESC_LOW_THRE_MASK 0xffff
+#define EDMA_RXDESC_LOW_THRE_SHIFT 0
+
+/* Rx descriptor ring enable bit */
+#define EDMA_RXDESC_RX_EN 0x1
+
+/* Tx interrupt status bit */
+#define EDMA_TX_INT_MASK_PKT_INT 0x1
+
+/* Rx interrupt mask */
+#define EDMA_RXDESC_INT_MASK_PKT_INT 0x1
+
+#define EDMA_MASK_INT_DISABLE 0x0
+#define EDMA_MASK_INT_CLEAR 0x0
+
+/* EDMA_REG_RX_MOD_TIMER register field masks */
+#define EDMA_RX_MOD_TIMER_INIT_MASK 0xffff
+#define EDMA_RX_MOD_TIMER_INIT_SHIFT 0
+
+/* EDMA Ring mask */
+#define EDMA_RING_DMA_MASK 0xffffffff
+
+/* RXDESC threshold interrupt. */
+#define EDMA_RXDESC_UGT_INT_STAT 0x2
+
+/* RXDESC timer interrupt */
+#define EDMA_RXDESC_PKT_INT_STAT 0x1
+
+/* RXDESC Interrupt status mask */
+#define EDMA_RXDESC_RING_INT_STATUS_MASK \
+ (EDMA_RXDESC_UGT_INT_STAT | EDMA_RXDESC_PKT_INT_STAT)
+
+/* TXCMPL threshold interrupt. */
+#define EDMA_TXCMPL_UGT_INT_STAT 0x2
+
+/* TXCMPL timer interrupt */
+#define EDMA_TXCMPL_PKT_INT_STAT 0x1
+
+/* TXCMPL Interrupt status mask */
+#define EDMA_TXCMPL_RING_INT_STATUS_MASK \
+ (EDMA_TXCMPL_UGT_INT_STAT | EDMA_TXCMPL_PKT_INT_STAT)
+
+#define EDMA_TXCMPL_RETMODE_OPAQUE 0x0
+
+#define EDMA_RXDESC_LOW_THRE 0
+#define EDMA_RX_MOD_TIMER_INIT 1000
+#define EDMA_RX_NE_INT_EN 0x2
+
+#define EDMA_TX_MOD_TIMER 150
+
+#define EDMA_TX_INITIAL_PROD_IDX 0x0
+#define EDMA_TX_NE_INT_EN 0x2
+
+/* EDMA misc error mask */
+#define EDMA_MISC_AXI_RD_ERR_MASK BIT(0)
+#define EDMA_MISC_AXI_WR_ERR_MASK BIT(1)
+#define EDMA_MISC_RX_DESC_FIFO_FULL_MASK BIT(2)
+#define EDMA_MISC_RX_ERR_BUF_SIZE_MASK BIT(3)
+#define EDMA_MISC_TX_SRAM_FULL_MASK BIT(4)
+#define EDMA_MISC_TX_CMPL_BUF_FULL_MASK BIT(5)
+
+#define EDMA_MISC_DATA_LEN_ERR_MASK BIT(6)
+#define EDMA_MISC_TX_TIMEOUT_MASK BIT(7)
+
+/* EDMA txdesc2cmpl map */
+#define EDMA_TXDESC2CMPL_MAP_TXDESC_MASK 0x1F
+
+/* EDMA rxdesc2fill map */
+#define EDMA_RXDESC2FILL_MAP_RXDESC_MASK 0x7
+
#endif

View file

@ -0,0 +1,397 @@
From 5dc80c468c668d855d76b323f09bbadb95cc3147 Mon Sep 17 00:00:00 2001
From: Suruchi Agarwal <quic_suruchia@quicinc.com>
Date: Thu, 21 Mar 2024 16:14:46 -0700
Subject: [PATCH] net: ethernet: qualcomm: Add netdevice support for QCOM
IPQ9574 chipset.
Add EDMA ports and netdevice operations for QCOM IPQ9574 chipset.
Change-Id: I08b2eff52b4ef0d6d428c1c416f5580ef010973f
Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
---
drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
drivers/net/ethernet/qualcomm/ppe/edma.h | 3 +
drivers/net/ethernet/qualcomm/ppe/edma_port.c | 270 ++++++++++++++++++
drivers/net/ethernet/qualcomm/ppe/edma_port.h | 31 ++
drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 19 ++
5 files changed, 324 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_port.c
create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_port.h
--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o ppe_port.o
#EDMA
-qcom-ppe-objs += edma.o
+qcom-ppe-objs += edma.o edma_port.o
--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
@@ -26,6 +26,9 @@
/* Number of PPE queue priorities supported per ARM core. */
#define EDMA_PRI_MAX_PER_CORE 8
+/* Interface ID start. */
+#define EDMA_START_IFNUM 1
+
/**
* enum ppe_queue_class_type - PPE queue class type
* @PPE_QUEUE_CLASS_PRIORITY: Queue offset configured from internal priority
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-only
+ /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* EDMA port initialization, configuration and netdevice ops handling */
+
+#include <linux/etherdevice.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/phylink.h>
+#include <linux/printk.h>
+
+#include "edma.h"
+#include "edma_port.h"
+#include "ppe_regs.h"
+
+/* Number of netdev queues. */
+#define EDMA_NETDEV_QUEUE_NUM 4
+
+static u16 __maybe_unused edma_port_select_queue(__maybe_unused struct net_device *netdev,
+ __maybe_unused struct sk_buff *skb,
+ __maybe_unused struct net_device *sb_dev)
+{
+ int cpu = get_cpu();
+
+ put_cpu();
+
+ return cpu;
+}
+
+static int edma_port_open(struct net_device *netdev)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct ppe_port *ppe_port;
+
+ if (!port_priv)
+ return -EINVAL;
+
+ /* Inform the Linux Networking stack about the hardware capability of
+ * checksum offloading and other features. Each port is
+ * responsible to maintain the feature set it supports.
+ */
+ netdev->features |= EDMA_NETDEV_FEATURES;
+ netdev->hw_features |= EDMA_NETDEV_FEATURES;
+ netdev->vlan_features |= EDMA_NETDEV_FEATURES;
+ netdev->wanted_features |= EDMA_NETDEV_FEATURES;
+
+ ppe_port = port_priv->ppe_port;
+
+ if (ppe_port->phylink)
+ phylink_start(ppe_port->phylink);
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static int edma_port_close(struct net_device *netdev)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct ppe_port *ppe_port;
+
+ if (!port_priv)
+ return -EINVAL;
+
+ netif_stop_queue(netdev);
+
+ ppe_port = port_priv->ppe_port;
+
+ /* Phylink close. */
+ if (ppe_port->phylink)
+ phylink_stop(ppe_port->phylink);
+
+ return 0;
+}
+
+static int edma_port_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct ppe_port *ppe_port;
+ int ret = -EINVAL;
+
+ if (!port_priv)
+ return -EINVAL;
+
+ ppe_port = port_priv->ppe_port;
+ if (ppe_port->phylink)
+ return phylink_mii_ioctl(ppe_port->phylink, ifr, cmd);
+
+ return ret;
+}
+
+static int edma_port_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+
+ if (!port_priv)
+ return -EINVAL;
+
+ netdev->mtu = mtu;
+
+ return ppe_port_set_maxframe(port_priv->ppe_port, mtu);
+}
+
+static netdev_features_t edma_port_feature_check(__maybe_unused struct sk_buff *skb,
+ __maybe_unused struct net_device *netdev,
+ netdev_features_t features)
+{
+ return features;
+}
+
+static void edma_port_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+
+ if (!port_priv)
+ return;
+
+ ppe_port_get_stats64(port_priv->ppe_port, stats);
+}
+
+static int edma_port_set_mac_address(struct net_device *netdev, void *macaddr)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)macaddr;
+ int ret;
+
+ if (!port_priv)
+ return -EINVAL;
+
+ netdev_dbg(netdev, "AddrFamily: %d, %0x:%0x:%0x:%0x:%0x:%0x\n",
+ addr->sa_family, addr->sa_data[0], addr->sa_data[1],
+ addr->sa_data[2], addr->sa_data[3], addr->sa_data[4],
+ addr->sa_data[5]);
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret)
+ return ret;
+
+ if (ppe_port_set_mac_address(port_priv->ppe_port, (u8 *)addr)) {
+ netdev_err(netdev, "set mac address failed for dev: %s\n", netdev->name);
+ return -EINVAL;
+ }
+
+ eth_commit_mac_addr_change(netdev, addr);
+
+ return 0;
+}
+
+static const struct net_device_ops edma_port_netdev_ops = {
+ .ndo_open = edma_port_open,
+ .ndo_stop = edma_port_close,
+ .ndo_get_stats64 = edma_port_get_stats64,
+ .ndo_set_mac_address = edma_port_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = edma_port_change_mtu,
+ .ndo_eth_ioctl = edma_port_ioctl,
+ .ndo_features_check = edma_port_feature_check,
+ .ndo_select_queue = edma_port_select_queue,
+};
+
+/**
+ * edma_port_destroy - EDMA port destroy.
+ * @port: PPE port
+ *
+ * Unregister and free the netdevice.
+ */
+void edma_port_destroy(struct ppe_port *port)
+{
+ int port_id = port->port_id;
+ struct net_device *netdev = edma_ctx->netdev_arr[port_id - 1];
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ ppe_port_phylink_destroy(port);
+ edma_ctx->netdev_arr[port_id - 1] = NULL;
+}
+
+/**
+ * edma_port_setup - EDMA port Setup.
+ * @port: PPE port
+ *
+ * Initialize and register the netdevice.
+ *
+ * Return 0 on success, negative error code on failure.
+ */
+int edma_port_setup(struct ppe_port *port)
+{
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct device_node *np = port->np;
+ struct edma_port_priv *port_priv;
+ int port_id = port->port_id;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
+ int ret = 0;
+ u8 *maddr;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct edma_port_priv),
+ EDMA_NETDEV_QUEUE_NUM, EDMA_NETDEV_QUEUE_NUM);
+ if (!netdev) {
+ pr_err("alloc_etherdev() failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(netdev, ppe_dev->dev);
+ netdev->dev.of_node = np;
+
+ /* max_mtu is set to 1500 in ether_setup(). */
+ netdev->max_mtu = ETH_MAX_MTU;
+
+ port_priv = netdev_priv(netdev);
+ memset((void *)port_priv, 0, sizeof(struct edma_port_priv));
+
+ port_priv->ppe_port = port;
+ port_priv->netdev = netdev;
+ netdev->watchdog_timeo = 5 * HZ;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netdev->netdev_ops = &edma_port_netdev_ops;
+ netdev->gso_max_segs = GSO_MAX_SEGS;
+
+ maddr = mac_addr;
+ if (of_get_mac_address(np, maddr))
+ maddr = NULL;
+
+ if (maddr && is_valid_ether_addr(maddr)) {
+ eth_hw_addr_set(netdev, maddr);
+ } else {
+ eth_hw_addr_random(netdev);
+ netdev_info(netdev, "GMAC%d Using random MAC address - %pM\n",
+ port_id, netdev->dev_addr);
+ }
+
+ netdev_dbg(netdev, "Configuring the port %s(qcom-id:%d)\n",
+ netdev->name, port_id);
+
+ /* We expect 'port_id' to correspond to ports numbers on SoC.
+ * These begin from '1' and hence we subtract
+ * one when using it as an array index.
+ */
+ edma_ctx->netdev_arr[port_id - 1] = netdev;
+
+ /* Setup phylink. */
+ ret = ppe_port_phylink_setup(port, netdev);
+ if (ret) {
+ netdev_dbg(netdev, "EDMA port phylink setup for netdevice %s\n",
+ netdev->name);
+ goto port_phylink_setup_fail;
+ }
+
+ /* Register the network interface. */
+ ret = register_netdev(netdev);
+ if (ret) {
+ netdev_dbg(netdev, "Error registering netdevice %s\n",
+ netdev->name);
+ goto register_netdev_fail;
+ }
+
+ netdev_dbg(netdev, "Setup EDMA port GMAC%d done\n", port_id);
+ return ret;
+
+register_netdev_fail:
+ ppe_port_phylink_destroy(port);
+port_phylink_setup_fail:
+ free_netdev(netdev);
+ edma_ctx->netdev_arr[port_id - 1] = NULL;
+
+ return ret;
+}
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __EDMA_PORTS__
+#define __EDMA_PORTS__
+
+#include "ppe_port.h"
+
+#define EDMA_NETDEV_FEATURES (NETIF_F_FRAGLIST \
+ | NETIF_F_SG \
+ | NETIF_F_RXCSUM \
+ | NETIF_F_HW_CSUM \
+ | NETIF_F_TSO \
+ | NETIF_F_TSO6)
+
+/**
+ * struct edma_port_priv - EDMA port priv structure.
+ * @ppe_port: Pointer to PPE port
+ * @netdev: Corresponding netdevice
+ * @flags: Feature flags
+ */
+struct edma_port_priv {
+ struct ppe_port *ppe_port;
+ struct net_device *netdev;
+ unsigned long flags;
+};
+
+void edma_port_destroy(struct ppe_port *port);
+int edma_port_setup(struct ppe_port *port);
+#endif
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/rtnetlink.h>
+#include "edma_port.h"
#include "ppe.h"
#include "ppe_port.h"
#include "ppe_regs.h"
@@ -1277,12 +1278,26 @@ int ppe_port_mac_init(struct ppe_device
goto err_port_node;
}
+ ret = edma_port_setup(&ppe_ports->port[i]);
+ if (ret) {
+ dev_err(ppe_dev->dev, "QCOM EDMA port setup failed\n");
+ i--;
+ goto err_port_setup;
+ }
+
i++;
}
of_node_put(ports_node);
return 0;
+err_port_setup:
+ /* Destroy edma ports created till now */
+ while (i >= 0) {
+ edma_port_destroy(&ppe_ports->port[i]);
+ i--;
+ }
+
err_port_clk:
for (j = 0; j < i; j++)
ppe_port_clock_deinit(&ppe_ports->port[j]);
@@ -1307,6 +1322,10 @@ void ppe_port_mac_deinit(struct ppe_devi
for (i = 0; i < ppe_dev->ports->num; i++) {
ppe_port = &ppe_dev->ports->port[i];
+
+ /* Destroy all phylinks and edma ports */
+ edma_port_destroy(ppe_port);
+
ppe_port_clock_deinit(ppe_port);
}
}

View file

@ -0,0 +1,730 @@
From 8a924457c0b71acee96c8f78ef386e2a354a2aca Mon Sep 17 00:00:00 2001
From: Suruchi Agarwal <quic_suruchia@quicinc.com>
Date: Thu, 21 Mar 2024 16:31:04 -0700
Subject: [PATCH] net: ethernet: qualcomm: Add miscellaneous error interrupts
and counters
Miscellaneous error interrupts, EDMA Tx/Rx and error counters are supported
using debugfs framework.
Change-Id: I7da8b978a7e93947b03a45269a81b401f35da31c
Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
---
drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
drivers/net/ethernet/qualcomm/ppe/edma.c | 162 ++++++++
drivers/net/ethernet/qualcomm/ppe/edma.h | 30 ++
.../net/ethernet/qualcomm/ppe/edma_debugfs.c | 370 ++++++++++++++++++
.../net/ethernet/qualcomm/ppe/ppe_debugfs.c | 17 +
5 files changed, 580 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o ppe_port.o
#EDMA
-qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_port.o edma_rx.o edma_tx.o
+qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o
--- a/drivers/net/ethernet/qualcomm/ppe/edma.c
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
@@ -152,6 +152,42 @@ static int edma_clock_init(void)
}
/**
+ * edma_err_stats_alloc - Allocate stats memory
+ *
+ * Allocate memory for per-CPU error stats.
+ */
+int edma_err_stats_alloc(void)
+{
+ u32 i;
+
+ edma_ctx->err_stats = alloc_percpu(*edma_ctx->err_stats);
+ if (!edma_ctx->err_stats)
+ return -ENOMEM;
+
+ for_each_possible_cpu(i) {
+ struct edma_err_stats *stats;
+
+ stats = per_cpu_ptr(edma_ctx->err_stats, i);
+ u64_stats_init(&stats->syncp);
+ }
+
+ return 0;
+}
+
+/**
+ * edma_err_stats_free - Free stats memory
+ *
+ * Free memory of per-CPU error stats.
+ */
+void edma_err_stats_free(void)
+{
+ if (edma_ctx->err_stats) {
+ free_percpu(edma_ctx->err_stats);
+ edma_ctx->err_stats = NULL;
+ }
+}
+
+/**
* edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
*
* Map int_priority values to priority class and initialize
@@ -191,11 +227,113 @@ static int edma_configure_ucast_prio_map
return ret;
}
+static void edma_disable_misc_interrupt(void)
+{
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct regmap *regmap = ppe_dev->regmap;
+ u32 reg;
+
+ reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
+ regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
+}
+
+static void edma_enable_misc_interrupt(void)
+{
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct regmap *regmap = ppe_dev->regmap;
+ u32 reg;
+
+ reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
+ regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_misc);
+}
+
+static irqreturn_t edma_misc_handle_irq(int irq,
+ __maybe_unused void *ctx)
+{
+ struct edma_err_stats *stats = this_cpu_ptr(edma_ctx->err_stats);
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct regmap *regmap = ppe_dev->regmap;
+ u32 misc_intr_status, data, reg;
+
+ /* Read Misc intr status */
+ reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_STAT_ADDR;
+ regmap_read(regmap, reg, &data);
+ misc_intr_status = data & edma_ctx->intr_info.intr_mask_misc;
+
+ pr_debug("Received misc irq %d, status: %d\n", irq, misc_intr_status);
+
+ if (FIELD_GET(EDMA_MISC_AXI_RD_ERR_MASK, misc_intr_status)) {
+ pr_err("MISC AXI read error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_axi_read_err;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_AXI_WR_ERR_MASK, misc_intr_status)) {
+ pr_err("MISC AXI write error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_axi_write_err;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_RX_DESC_FIFO_FULL_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC Rx descriptor fifo full error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_rxdesc_fifo_full;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_RX_ERR_BUF_SIZE_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC Rx buffer size error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_rx_buf_size_err;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_TX_SRAM_FULL_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC Tx SRAM full error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_tx_sram_full;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_TX_CMPL_BUF_FULL_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC Tx complete buffer full error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_txcmpl_buf_full;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_DATA_LEN_ERR_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC data length error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_tx_data_len_err;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_TX_TIMEOUT_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC Tx timeout error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_tx_timeout;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int edma_irq_register(void)
{
struct edma_hw_info *hw_info = edma_ctx->hw_info;
struct edma_ring_info *txcmpl = hw_info->txcmpl;
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
struct edma_ring_info *rx = hw_info->rx;
+ struct device *dev = ppe_dev->dev;
int ret;
u32 i;
@@ -270,8 +408,25 @@ static int edma_irq_register(void)
edma_rxdesc_irq_name[i]);
}
+ /* Request Misc IRQ */
+ ret = request_irq(edma_ctx->intr_info.intr_misc, edma_misc_handle_irq,
+ IRQF_SHARED, "edma_misc",
+ (void *)dev);
+ if (ret) {
+ pr_err("MISC IRQ:%d request failed\n",
+ edma_ctx->intr_info.intr_misc);
+ goto misc_intr_req_fail;
+ }
+
return 0;
+misc_intr_req_fail:
+ /* Free IRQ for RXDESC rings */
+ for (i = 0; i < rx->num_rings; i++) {
+ synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
+ free_irq(edma_ctx->intr_info.intr_rx[i],
+ (void *)&edma_ctx->rx_rings[i]);
+ }
rx_desc_ring_intr_req_fail:
for (i = 0; i < rx->num_rings; i++)
kfree(edma_rxdesc_irq_name[i]);
@@ -503,6 +658,7 @@ static int edma_hw_configure(void)
edma_cfg_tx_disable_interrupts(i);
edma_cfg_rx_disable_interrupts();
+ edma_disable_misc_interrupt();
edma_cfg_rx_rings_disable();
@@ -614,6 +770,7 @@ void edma_destroy(struct ppe_device *ppe
edma_cfg_tx_disable_interrupts(i);
edma_cfg_rx_disable_interrupts();
+ edma_disable_misc_interrupt();
/* Free IRQ for TXCMPL rings. */
for (i = 0; i < txcmpl->num_rings; i++) {
@@ -634,6 +791,10 @@ void edma_destroy(struct ppe_device *ppe
}
kfree(edma_rxdesc_irq_name);
+ /* Free Misc IRQ */
+ synchronize_irq(edma_ctx->intr_info.intr_misc);
+ free_irq(edma_ctx->intr_info.intr_misc, (void *)(ppe_dev->dev));
+
kfree(edma_ctx->intr_info.intr_rx);
kfree(edma_ctx->intr_info.intr_txcmpl);
@@ -699,6 +860,7 @@ int edma_setup(struct ppe_device *ppe_de
}
edma_cfg_rx_enable_interrupts();
+ edma_enable_misc_interrupt();
dev_info(dev, "EDMA configuration successful\n");
--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
@@ -47,6 +47,30 @@ enum ppe_queue_class_type {
};
/**
+ * struct edma_err_stats - EDMA error stats
+ * @edma_axi_read_err: AXI read error
+ * @edma_axi_write_err: AXI write error
+ * @edma_rxdesc_fifo_full: Rx desc FIFO full error
+ * @edma_rx_buf_size_err: Rx buffer size too small error
+ * @edma_tx_sram_full: Tx packet SRAM buffer full error
+ * @edma_tx_data_len_err: Tx data length error
+ * @edma_tx_timeout: Tx timeout error
+ * @edma_txcmpl_buf_full: Tx completion buffer full error
+ * @syncp: Synchronization pointer
+ */
+struct edma_err_stats {
+ u64 edma_axi_read_err;
+ u64 edma_axi_write_err;
+ u64 edma_rxdesc_fifo_full;
+ u64 edma_rx_buf_size_err;
+ u64 edma_tx_sram_full;
+ u64 edma_tx_data_len_err;
+ u64 edma_tx_timeout;
+ u64 edma_txcmpl_buf_full;
+ struct u64_stats_sync syncp;
+};
+
+/**
* struct edma_ring_info - EDMA ring data structure.
* @max_rings: Maximum number of rings
* @ring_start: Ring start ID
@@ -107,6 +131,7 @@ struct edma_intr_info {
* @rx_rings: Rx Desc Rings, SW is consumer
* @tx_rings: Tx Descriptor Ring, SW is producer
* @txcmpl_rings: Tx complete Ring, SW is consumer
+ * @err_stats: Per CPU error statistics
* @rx_page_mode: Page mode enabled or disabled
* @rx_buf_size: Rx buffer size for Jumbo MRU
* @tx_requeue_stop: Tx requeue stop enabled or disabled
@@ -121,6 +146,7 @@ struct edma_context {
struct edma_rxdesc_ring *rx_rings;
struct edma_txdesc_ring *tx_rings;
struct edma_txcmpl_ring *txcmpl_rings;
+ struct edma_err_stats __percpu *err_stats;
u32 rx_page_mode;
u32 rx_buf_size;
bool tx_requeue_stop;
@@ -129,8 +155,12 @@ struct edma_context {
/* Global EDMA context */
extern struct edma_context *edma_ctx;
+int edma_err_stats_alloc(void);
+void edma_err_stats_free(void);
void edma_destroy(struct ppe_device *ppe_dev);
int edma_setup(struct ppe_device *ppe_dev);
+void edma_debugfs_teardown(void);
+int edma_debugfs_setup(struct ppe_device *ppe_dev);
int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
enum ppe_queue_class_type class,
int index, int queue_offset);
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* EDMA debugfs routines for display of Tx/Rx counters. */
+
+#include <linux/cpumask.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+
+#include "edma.h"
+
+#define EDMA_STATS_BANNER_MAX_LEN 80
+#define EDMA_RX_RING_STATS_NODE_NAME "EDMA_RX"
+#define EDMA_TX_RING_STATS_NODE_NAME "EDMA_TX"
+#define EDMA_ERR_STATS_NODE_NAME "EDMA_ERR"
+
+static struct dentry *edma_dentry;
+static struct dentry *stats_dentry;
+
+static void edma_debugfs_print_banner(struct seq_file *m, char *node)
+{
+ u32 banner_char_len, i;
+
+ for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
+ seq_puts(m, "_");
+ banner_char_len = (EDMA_STATS_BANNER_MAX_LEN - (strlen(node) + 2)) / 2;
+ seq_puts(m, "\n\n");
+
+ for (i = 0; i < banner_char_len; i++)
+ seq_puts(m, "<");
+ seq_printf(m, " %s ", node);
+
+ for (i = 0; i < banner_char_len; i++)
+ seq_puts(m, ">");
+ seq_puts(m, "\n");
+
+ for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
+ seq_puts(m, "_");
+ seq_puts(m, "\n\n");
+}
+
+static int edma_debugfs_rx_rings_stats_show(struct seq_file *m,
+ void __maybe_unused *p)
+{
+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
+ struct edma_ring_info *rxfill = hw_info->rxfill;
+ struct edma_rxfill_stats *rxfill_stats;
+ struct edma_rxdesc_stats *rxdesc_stats;
+ struct edma_ring_info *rx = hw_info->rx;
+ unsigned int start;
+ u32 i;
+
+ rxfill_stats = kcalloc(rxfill->num_rings, sizeof(*rxfill_stats), GFP_KERNEL);
+ if (!rxfill_stats)
+ return -ENOMEM;
+
+ rxdesc_stats = kcalloc(rx->num_rings, sizeof(*rxdesc_stats), GFP_KERNEL);
+ if (!rxdesc_stats) {
+ kfree(rxfill_stats);
+ return -ENOMEM;
+ }
+
+ /* Get stats for Rx fill rings. */
+ for (i = 0; i < rxfill->num_rings; i++) {
+ struct edma_rxfill_ring *rxfill_ring;
+ struct edma_rxfill_stats *stats;
+
+ rxfill_ring = &edma_ctx->rxfill_rings[i];
+ stats = &rxfill_ring->rxfill_stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ rxfill_stats[i].alloc_failed = stats->alloc_failed;
+ rxfill_stats[i].page_alloc_failed = stats->page_alloc_failed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ }
+
+ /* Get stats for Rx Desc rings. */
+ for (i = 0; i < rx->num_rings; i++) {
+ struct edma_rxdesc_ring *rxdesc_ring;
+ struct edma_rxdesc_stats *stats;
+
+ rxdesc_ring = &edma_ctx->rx_rings[i];
+ stats = &rxdesc_ring->rxdesc_stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ rxdesc_stats[i].src_port_inval = stats->src_port_inval;
+ rxdesc_stats[i].src_port_inval_type = stats->src_port_inval_type;
+ rxdesc_stats[i].src_port_inval_netdev = stats->src_port_inval_netdev;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ }
+
+ edma_debugfs_print_banner(m, EDMA_RX_RING_STATS_NODE_NAME);
+
+ seq_puts(m, "\n#EDMA RX descriptor rings stats:\n\n");
+ for (i = 0; i < rx->num_rings; i++) {
+ seq_printf(m, "\t\tEDMA RX descriptor %d ring stats:\n", i + rx->ring_start);
+ seq_printf(m, "\t\t rxdesc[%d]:src_port_inval = %llu\n",
+ i + rx->ring_start, rxdesc_stats[i].src_port_inval);
+ seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_type = %llu\n",
+ i + rx->ring_start, rxdesc_stats[i].src_port_inval_type);
+ seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_netdev = %llu\n",
+ i + rx->ring_start,
+ rxdesc_stats[i].src_port_inval_netdev);
+ seq_puts(m, "\n");
+ }
+
+ seq_puts(m, "\n#EDMA RX fill rings stats:\n\n");
+ for (i = 0; i < rxfill->num_rings; i++) {
+ seq_printf(m, "\t\tEDMA RX fill %d ring stats:\n", i + rxfill->ring_start);
+ seq_printf(m, "\t\t rxfill[%d]:alloc_failed = %llu\n",
+ i + rxfill->ring_start, rxfill_stats[i].alloc_failed);
+ seq_printf(m, "\t\t rxfill[%d]:page_alloc_failed = %llu\n",
+ i + rxfill->ring_start, rxfill_stats[i].page_alloc_failed);
+ seq_puts(m, "\n");
+ }
+
+ kfree(rxfill_stats);
+ kfree(rxdesc_stats);
+ return 0;
+}
+
+static int edma_debugfs_tx_rings_stats_show(struct seq_file *m,
+ void __maybe_unused *p)
+{
+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
+ struct edma_ring_info *tx = hw_info->tx;
+ struct edma_txcmpl_stats *txcmpl_stats;
+ struct edma_txdesc_stats *txdesc_stats;
+ unsigned int start;
+ u32 i;
+
+ txcmpl_stats = kcalloc(txcmpl->num_rings, sizeof(*txcmpl_stats), GFP_KERNEL);
+ if (!txcmpl_stats)
+ return -ENOMEM;
+
+ txdesc_stats = kcalloc(tx->num_rings, sizeof(*txdesc_stats), GFP_KERNEL);
+ if (!txdesc_stats) {
+ kfree(txcmpl_stats);
+ return -ENOMEM;
+ }
+
+ /* Get stats for Tx desc rings. */
+ for (i = 0; i < tx->num_rings; i++) {
+ struct edma_txdesc_ring *txdesc_ring;
+ struct edma_txdesc_stats *stats;
+
+ txdesc_ring = &edma_ctx->tx_rings[i];
+ stats = &txdesc_ring->txdesc_stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ txdesc_stats[i].no_desc_avail = stats->no_desc_avail;
+ txdesc_stats[i].tso_max_seg_exceed = stats->tso_max_seg_exceed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ }
+
+ /* Get stats for Tx Complete rings. */
+ for (i = 0; i < txcmpl->num_rings; i++) {
+ struct edma_txcmpl_ring *txcmpl_ring;
+ struct edma_txcmpl_stats *stats;
+
+ txcmpl_ring = &edma_ctx->txcmpl_rings[i];
+ stats = &txcmpl_ring->txcmpl_stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ txcmpl_stats[i].invalid_buffer = stats->invalid_buffer;
+ txcmpl_stats[i].errors = stats->errors;
+ txcmpl_stats[i].desc_with_more_bit = stats->desc_with_more_bit;
+ txcmpl_stats[i].no_pending_desc = stats->no_pending_desc;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ }
+
+ edma_debugfs_print_banner(m, EDMA_TX_RING_STATS_NODE_NAME);
+
+ seq_puts(m, "\n#EDMA TX complete rings stats:\n\n");
+ for (i = 0; i < txcmpl->num_rings; i++) {
+ seq_printf(m, "\t\tEDMA TX complete %d ring stats:\n", i + txcmpl->ring_start);
+ seq_printf(m, "\t\t txcmpl[%d]:invalid_buffer = %llu\n",
+ i + txcmpl->ring_start, txcmpl_stats[i].invalid_buffer);
+ seq_printf(m, "\t\t txcmpl[%d]:errors = %llu\n",
+ i + txcmpl->ring_start, txcmpl_stats[i].errors);
+ seq_printf(m, "\t\t txcmpl[%d]:desc_with_more_bit = %llu\n",
+ i + txcmpl->ring_start, txcmpl_stats[i].desc_with_more_bit);
+ seq_printf(m, "\t\t txcmpl[%d]:no_pending_desc = %llu\n",
+ i + txcmpl->ring_start, txcmpl_stats[i].no_pending_desc);
+ seq_puts(m, "\n");
+ }
+
+ seq_puts(m, "\n#EDMA TX descriptor rings stats:\n\n");
+ for (i = 0; i < tx->num_rings; i++) {
+ seq_printf(m, "\t\tEDMA TX descriptor %d ring stats:\n", i + tx->ring_start);
+ seq_printf(m, "\t\t txdesc[%d]:no_desc_avail = %llu\n",
+ i + tx->ring_start, txdesc_stats[i].no_desc_avail);
+ seq_printf(m, "\t\t txdesc[%d]:tso_max_seg_exceed = %llu\n",
+ i + tx->ring_start, txdesc_stats[i].tso_max_seg_exceed);
+ seq_puts(m, "\n");
+ }
+
+ kfree(txcmpl_stats);
+ kfree(txdesc_stats);
+ return 0;
+}
+
+static int edma_debugfs_err_stats_show(struct seq_file *m,
+ void __maybe_unused *p)
+{
+ struct edma_err_stats *err_stats, *pcpu_err_stats;
+ unsigned int start;
+ u32 cpu;
+
+ err_stats = kzalloc(sizeof(*err_stats), GFP_KERNEL);
+ if (!err_stats)
+ return -ENOMEM;
+
+ /* Get percpu EDMA miscellaneous stats. */
+ for_each_possible_cpu(cpu) {
+ pcpu_err_stats = per_cpu_ptr(edma_ctx->err_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu_err_stats->syncp);
+ err_stats->edma_axi_read_err +=
+ pcpu_err_stats->edma_axi_read_err;
+ err_stats->edma_axi_write_err +=
+ pcpu_err_stats->edma_axi_write_err;
+ err_stats->edma_rxdesc_fifo_full +=
+ pcpu_err_stats->edma_rxdesc_fifo_full;
+ err_stats->edma_rx_buf_size_err +=
+ pcpu_err_stats->edma_rx_buf_size_err;
+ err_stats->edma_tx_sram_full +=
+ pcpu_err_stats->edma_tx_sram_full;
+ err_stats->edma_tx_data_len_err +=
+ pcpu_err_stats->edma_tx_data_len_err;
+ err_stats->edma_tx_timeout +=
+ pcpu_err_stats->edma_tx_timeout;
+ err_stats->edma_txcmpl_buf_full +=
+ pcpu_err_stats->edma_txcmpl_buf_full;
+ } while (u64_stats_fetch_retry(&pcpu_err_stats->syncp, start));
+ }
+
+ edma_debugfs_print_banner(m, EDMA_ERR_STATS_NODE_NAME);
+
+ seq_puts(m, "\n#EDMA error stats:\n\n");
+ seq_printf(m, "\t\t axi read error = %llu\n",
+ err_stats->edma_axi_read_err);
+ seq_printf(m, "\t\t axi write error = %llu\n",
+ err_stats->edma_axi_write_err);
+ seq_printf(m, "\t\t Rx descriptor fifo full = %llu\n",
+ err_stats->edma_rxdesc_fifo_full);
+ seq_printf(m, "\t\t Rx buffer size error = %llu\n",
+ err_stats->edma_rx_buf_size_err);
+ seq_printf(m, "\t\t Tx SRAM full = %llu\n",
+ err_stats->edma_tx_sram_full);
+ seq_printf(m, "\t\t Tx data length error = %llu\n",
+ err_stats->edma_tx_data_len_err);
+ seq_printf(m, "\t\t Tx timeout = %llu\n",
+ err_stats->edma_tx_timeout);
+ seq_printf(m, "\t\t Tx completion buffer full = %llu\n",
+ err_stats->edma_txcmpl_buf_full);
+
+ kfree(err_stats);
+ return 0;
+}
+
+static int edma_debugs_rx_rings_stats_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, edma_debugfs_rx_rings_stats_show,
+ inode->i_private);
+}
+
+static const struct file_operations edma_debugfs_rx_rings_file_ops = {
+ .open = edma_debugs_rx_rings_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+static int edma_debugs_tx_rings_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, edma_debugfs_tx_rings_stats_show, inode->i_private);
+}
+
+static const struct file_operations edma_debugfs_tx_rings_file_ops = {
+ .open = edma_debugs_tx_rings_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+static int edma_debugs_err_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, edma_debugfs_err_stats_show, inode->i_private);
+}
+
+static const struct file_operations edma_debugfs_misc_file_ops = {
+ .open = edma_debugs_err_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+/**
+ * edma_debugfs_teardown - EDMA debugfs teardown.
+ *
+ * EDMA debugfs teardown and free stats memory.
+ */
+void edma_debugfs_teardown(void)
+{
+ /* Free EDMA miscellaneous stats memory */
+ edma_err_stats_free();
+
+ debugfs_remove_recursive(edma_dentry);
+ edma_dentry = NULL;
+ stats_dentry = NULL;
+}
+
+/**
+ * edma_debugfs_setup - EDMA debugfs setup.
+ * @ppe_dev: PPE Device
+ *
+ * EDMA debugfs setup.
+ */
+int edma_debugfs_setup(struct ppe_device *ppe_dev)
+{
+ edma_dentry = debugfs_create_dir("edma", ppe_dev->debugfs_root);
+ if (!edma_dentry) {
+ pr_err("Unable to create debugfs edma directory in debugfs\n");
+ goto debugfs_dir_failed;
+ }
+
+ stats_dentry = debugfs_create_dir("stats", edma_dentry);
+ if (!stats_dentry) {
+ pr_err("Unable to create debugfs stats directory in debugfs\n");
+ goto debugfs_dir_failed;
+ }
+
+ if (!debugfs_create_file("rx_ring_stats", 0444, stats_dentry,
+ NULL, &edma_debugfs_rx_rings_file_ops)) {
+ pr_err("Unable to create Rx rings statistics file entry in debugfs\n");
+ goto debugfs_dir_failed;
+ }
+
+ if (!debugfs_create_file("tx_ring_stats", 0444, stats_dentry,
+ NULL, &edma_debugfs_tx_rings_file_ops)) {
+ pr_err("Unable to create Tx rings statistics file entry in debugfs\n");
+ goto debugfs_dir_failed;
+ }
+
+ /* Allocate memory for EDMA miscellaneous stats */
+ if (edma_err_stats_alloc() < 0) {
+ pr_err("Unable to allocate miscellaneous percpu stats\n");
+ goto debugfs_dir_failed;
+ }
+
+ if (!debugfs_create_file("err_stats", 0444, stats_dentry,
+ NULL, &edma_debugfs_misc_file_ops)) {
+ pr_err("Unable to create EDMA miscellaneous statistics file entry in debugfs\n");
+ goto debugfs_dir_failed;
+ }
+
+ return 0;
+
+debugfs_dir_failed:
+ debugfs_remove_recursive(edma_dentry);
+ edma_dentry = NULL;
+ stats_dentry = NULL;
+ return -ENOMEM;
+}
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
@@ -7,9 +7,11 @@
#include <linux/bitfield.h>
#include <linux/debugfs.h>
+#include <linux/netdevice.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
+#include "edma.h"
#include "ppe.h"
#include "ppe_config.h"
#include "ppe_debugfs.h"
@@ -678,15 +680,30 @@ static const struct file_operations ppe_
void ppe_debugfs_setup(struct ppe_device *ppe_dev)
{
+ int ret;
+
ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
debugfs_create_file("packet_counters", 0444,
ppe_dev->debugfs_root,
ppe_dev,
&ppe_debugfs_packet_counter_fops);
+
+ if (!ppe_dev->debugfs_root) {
+ dev_err(ppe_dev->dev, "Error in PPE debugfs setup\n");
+ return;
+ }
+
+ ret = edma_debugfs_setup(ppe_dev);
+ if (ret) {
+ dev_err(ppe_dev->dev, "Error in EDMA debugfs setup API. ret: %d\n", ret);
+ debugfs_remove_recursive(ppe_dev->debugfs_root);
+ ppe_dev->debugfs_root = NULL;
+ }
}
void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
{
+ edma_debugfs_teardown();
debugfs_remove_recursive(ppe_dev->debugfs_root);
ppe_dev->debugfs_root = NULL;
}

View file

@ -0,0 +1,344 @@
From bd61a680fb657eb65272225f18c93fe338c700da Mon Sep 17 00:00:00 2001
From: Pavithra R <quic_pavir@quicinc.com>
Date: Thu, 30 May 2024 20:46:36 +0530
Subject: [PATCH] net: ethernet: qualcomm: Add ethtool support for EDMA
ethtool ops can be used for EDMA netdevice configuration and statistics.
Change-Id: I57fc19415dacbe51fed000520336463938220609
Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
Alex G: use struct ethtool_keee instead of ethtool_eee
Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
---
drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
drivers/net/ethernet/qualcomm/ppe/edma.h | 1 +
.../net/ethernet/qualcomm/ppe/edma_ethtool.c | 294 ++++++++++++++++++
drivers/net/ethernet/qualcomm/ppe/edma_port.c | 1 +
4 files changed, 297 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_ethtool.c
--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o ppe_port.o
#EDMA
-qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o
+qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o edma_ethtool.o
--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
@@ -161,6 +161,7 @@ void edma_destroy(struct ppe_device *ppe
int edma_setup(struct ppe_device *ppe_dev);
void edma_debugfs_teardown(void);
int edma_debugfs_setup(struct ppe_device *ppe_dev);
+void edma_set_ethtool_ops(struct net_device *netdev);
int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
enum ppe_queue_class_type class,
int index, int queue_offset);
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_ethtool.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* ethtool support for EDMA */
+
+#include <linux/cpumask.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/phylink.h>
+
+#include "edma.h"
+#include "edma_port.h"
+
+struct edma_ethtool_stats {
+ u8 stat_string[ETH_GSTRING_LEN];
+ u32 stat_offset;
+};
+
+/**
+ * struct edma_gmac_stats - Per-GMAC statistics.
+ * @rx_packets: Number of RX packets
+ * @rx_bytes: Number of RX bytes
+ * @rx_dropped: Number of RX dropped packets
+ * @rx_fraglist_packets: Number of RX fraglist packets
+ * @rx_nr_frag_packets: Number of RX nr fragment packets
+ * @rx_nr_frag_headroom_err: Number of RX nr fragment packets with headroom error
+ * @tx_packets: Number of TX packets
+ * @tx_bytes: Number of TX bytes
+ * @tx_dropped: Number of TX dropped packets
+ * @tx_nr_frag_packets: Number of TX nr fragment packets
+ * @tx_fraglist_packets: Number of TX fraglist packets
+ * @tx_fraglist_with_nr_frags_packets: Number of TX fraglist packets with nr fragments
+ * @tx_tso_packets: Number of TX TCP segmentation offload packets
+ * @tx_tso_drop_packets: Number of TX TCP segmentation dropped packets
+ * @tx_gso_packets: Number of TX SW GSO packets
+ * @tx_gso_drop_packets: Number of TX SW GSO dropped packets
+ * @tx_queue_stopped: Number of times Queue got stopped
+ */
+struct edma_gmac_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_fraglist_packets;
+ u64 rx_nr_frag_packets;
+ u64 rx_nr_frag_headroom_err;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+ u64 tx_nr_frag_packets;
+ u64 tx_fraglist_packets;
+ u64 tx_fraglist_with_nr_frags_packets;
+ u64 tx_tso_packets;
+ u64 tx_tso_drop_packets;
+ u64 tx_gso_packets;
+ u64 tx_gso_drop_packets;
+ u64 tx_queue_stopped[EDMA_MAX_CORE];
+};
+
+#define EDMA_STAT(m) offsetof(struct edma_gmac_stats, m)
+
+static const struct edma_ethtool_stats edma_gstrings_stats[] = {
+ {"rx_bytes", EDMA_STAT(rx_bytes)},
+ {"rx_packets", EDMA_STAT(rx_packets)},
+ {"rx_dropped", EDMA_STAT(rx_dropped)},
+ {"rx_fraglist_packets", EDMA_STAT(rx_fraglist_packets)},
+ {"rx_nr_frag_packets", EDMA_STAT(rx_nr_frag_packets)},
+ {"rx_nr_frag_headroom_err", EDMA_STAT(rx_nr_frag_headroom_err)},
+ {"tx_bytes", EDMA_STAT(tx_bytes)},
+ {"tx_packets", EDMA_STAT(tx_packets)},
+ {"tx_dropped", EDMA_STAT(tx_dropped)},
+ {"tx_nr_frag_packets", EDMA_STAT(tx_nr_frag_packets)},
+ {"tx_fraglist_packets", EDMA_STAT(tx_fraglist_packets)},
+ {"tx_fraglist_nr_frags_packets", EDMA_STAT(tx_fraglist_with_nr_frags_packets)},
+ {"tx_tso_packets", EDMA_STAT(tx_tso_packets)},
+ {"tx_tso_drop_packets", EDMA_STAT(tx_tso_drop_packets)},
+ {"tx_gso_packets", EDMA_STAT(tx_gso_packets)},
+ {"tx_gso_drop_packets", EDMA_STAT(tx_gso_drop_packets)},
+ {"tx_queue_stopped_cpu0", EDMA_STAT(tx_queue_stopped[0])},
+ {"tx_queue_stopped_cpu1", EDMA_STAT(tx_queue_stopped[1])},
+ {"tx_queue_stopped_cpu2", EDMA_STAT(tx_queue_stopped[2])},
+ {"tx_queue_stopped_cpu3", EDMA_STAT(tx_queue_stopped[3])},
+};
+
+#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)
+
+static void edma_port_get_stats(struct net_device *netdev,
+ struct edma_gmac_stats *stats)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct edma_port_rx_stats *pcpu_rx_stats;
+ struct edma_port_tx_stats *pcpu_tx_stats;
+ int i;
+
+ memset(stats, 0, sizeof(struct edma_port_pcpu_stats));
+
+ for_each_possible_cpu(i) {
+ struct edma_port_rx_stats rxp;
+ struct edma_port_tx_stats txp;
+ unsigned int start;
+
+ pcpu_rx_stats = per_cpu_ptr(port_priv->pcpu_stats.rx_stats, i);
+
+ do {
+ start = u64_stats_fetch_begin(&pcpu_rx_stats->syncp);
+ memcpy(&rxp, pcpu_rx_stats, sizeof(*pcpu_rx_stats));
+ } while (u64_stats_fetch_retry(&pcpu_rx_stats->syncp, start));
+
+ stats->rx_packets += rxp.rx_pkts;
+ stats->rx_bytes += rxp.rx_bytes;
+ stats->rx_dropped += rxp.rx_drops;
+ stats->rx_nr_frag_packets += rxp.rx_nr_frag_pkts;
+ stats->rx_fraglist_packets += rxp.rx_fraglist_pkts;
+ stats->rx_nr_frag_headroom_err += rxp.rx_nr_frag_headroom_err;
+
+ pcpu_tx_stats = per_cpu_ptr(port_priv->pcpu_stats.tx_stats, i);
+
+ do {
+ start = u64_stats_fetch_begin(&pcpu_tx_stats->syncp);
+ memcpy(&txp, pcpu_tx_stats, sizeof(*pcpu_tx_stats));
+ } while (u64_stats_fetch_retry(&pcpu_tx_stats->syncp, start));
+
+ stats->tx_packets += txp.tx_pkts;
+ stats->tx_bytes += txp.tx_bytes;
+ stats->tx_dropped += txp.tx_drops;
+ stats->tx_nr_frag_packets += txp.tx_nr_frag_pkts;
+ stats->tx_fraglist_packets += txp.tx_fraglist_pkts;
+ stats->tx_fraglist_with_nr_frags_packets += txp.tx_fraglist_with_nr_frags_pkts;
+ stats->tx_tso_packets += txp.tx_tso_pkts;
+ stats->tx_tso_drop_packets += txp.tx_tso_drop_pkts;
+ stats->tx_gso_packets += txp.tx_gso_pkts;
+ stats->tx_gso_drop_packets += txp.tx_gso_drop_pkts;
+ stats->tx_queue_stopped[i] += txp.tx_queue_stopped[i];
+ }
+}
+
+static void edma_get_ethtool_stats(struct net_device *netdev,
+ __maybe_unused struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct edma_gmac_stats edma_stats;
+ u64 *mib_data;
+ int i;
+ u8 *p;
+
+ if (!port_priv)
+ return;
+
+ /* Get the DMA Driver statistics from the data plane if available. */
+ memset(&edma_stats, 0, sizeof(struct edma_gmac_stats));
+ edma_port_get_stats(netdev, &edma_stats);
+
+ /* Populate data plane statistics. */
+ for (i = 0; i < EDMA_STATS_LEN; i++) {
+ p = ((u8 *)(&edma_stats) + edma_gstrings_stats[i].stat_offset);
+ data[i] = *(u64 *)p;
+ }
+
+ /* Get the GMAC MIB statistics along with the DMA driver statistics. */
+ mib_data = &data[EDMA_STATS_LEN];
+ ppe_port_get_ethtool_stats(port_priv->ppe_port, mib_data);
+}
+
+static int edma_get_strset_count(struct net_device *netdev, int sset)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ int sset_count = 0;
+
+ if (!port_priv || sset != ETH_SS_STATS)
+ return 0;
+
+ sset_count = ppe_port_get_sset_count(port_priv->ppe_port, sset);
+
+ return (EDMA_STATS_LEN + sset_count);
+}
+
+static void edma_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ int i;
+
+ if (!port_priv || stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < EDMA_STATS_LEN; i++) {
+ memcpy(data, edma_gstrings_stats[i].stat_string,
+ strlen(edma_gstrings_stats[i].stat_string));
+ data += ETH_GSTRING_LEN;
+ }
+
+ ppe_port_get_strings(port_priv->ppe_port, stringset, data);
+}
+
+static int edma_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct ppe_port *port = port_priv->ppe_port;
+
+ if (!port_priv)
+ return -EINVAL;
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int edma_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct ppe_port *port = port_priv->ppe_port;
+
+ if (!port_priv)
+ return -EINVAL;
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static void edma_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct ppe_port *port = port_priv->ppe_port;
+
+ if (!port_priv)
+ return;
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int edma_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct ppe_port *port = port_priv->ppe_port;
+
+ if (!port_priv)
+ return -EINVAL;
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
+static int edma_get_eee(struct net_device *netdev, struct ethtool_keee *eee)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct ppe_port *port = port_priv->ppe_port;
+
+ if (!port_priv)
+ return -EINVAL;
+
+ return phylink_ethtool_get_eee(port->phylink, eee);
+}
+
+static int edma_set_eee(struct net_device *netdev, struct ethtool_keee *eee)
+{
+ struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+ struct ppe_port *port = port_priv->ppe_port;
+ int ret;
+
+ if (!port_priv)
+ return -EINVAL;
+
+ ret = ppe_port_set_mac_eee(port_priv->ppe_port, eee);
+ if (ret)
+ return ret;
+
+ return phylink_ethtool_set_eee(port->phylink, eee);
+}
+
+static const struct ethtool_ops edma_ethtool_ops = {
+ .get_strings = &edma_get_strings,
+ .get_sset_count = &edma_get_strset_count,
+ .get_ethtool_stats = &edma_get_ethtool_stats,
+ .get_link = &ethtool_op_get_link,
+ .get_link_ksettings = edma_get_link_ksettings,
+ .set_link_ksettings = edma_set_link_ksettings,
+ .get_pauseparam = &edma_get_pauseparam,
+ .set_pauseparam = &edma_set_pauseparam,
+ .get_eee = &edma_get_eee,
+ .set_eee = &edma_set_eee,
+};
+
+/**
+ * edma_set_ethtool_ops - Set ethtool operations
+ * @netdev: Netdevice
+ *
+ * Set ethtool operations.
+ */
+void edma_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &edma_ethtool_ops;
+}
--- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
@@ -380,6 +380,7 @@ int edma_port_setup(struct ppe_port *por
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netdev->netdev_ops = &edma_port_netdev_ops;
netdev->gso_max_segs = GSO_MAX_SEGS;
+ edma_set_ethtool_ops(netdev);
maddr = mac_addr;
if (of_get_mac_address(np, maddr))

View file

@ -0,0 +1,286 @@
From 2ecec7e47e269e05cdd393c34aae51d4866070c6 Mon Sep 17 00:00:00 2001
From: Pavithra R <quic_pavir@quicinc.com>
Date: Tue, 11 Jun 2024 00:00:46 +0530
Subject: [PATCH] net: ethernet: qualcomm: Add module parameters for driver
tunings
Add module params and corresponding functionality for Tx/Rx
mitigation timer/packet count, napi budget and tx requeue stop.
Change-Id: I1717559c931bba4f355ee06ab89f289818400ca2
Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
---
drivers/net/ethernet/qualcomm/ppe/edma.c | 35 +++++++++++++++++++
.../net/ethernet/qualcomm/ppe/edma_cfg_rx.c | 29 +++++++++++++--
.../net/ethernet/qualcomm/ppe/edma_cfg_rx.h | 21 +++++++++++
.../net/ethernet/qualcomm/ppe/edma_cfg_tx.c | 29 +++++++++++++--
.../net/ethernet/qualcomm/ppe/edma_cfg_tx.h | 16 +++++++++
drivers/net/ethernet/qualcomm/ppe/edma_rx.h | 4 +++
drivers/net/ethernet/qualcomm/ppe/edma_tx.h | 4 +++
7 files changed, 134 insertions(+), 4 deletions(-)
--- a/drivers/net/ethernet/qualcomm/ppe/edma.c
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
@@ -38,6 +38,38 @@ static int rx_buff_size;
module_param(rx_buff_size, int, 0640);
MODULE_PARM_DESC(rx_buff_size, "Rx Buffer size for Jumbo MRU value (default:0)");
+int edma_rx_napi_budget = EDMA_RX_NAPI_WORK_DEF;
+module_param(edma_rx_napi_budget, int, 0444);
+MODULE_PARM_DESC(edma_rx_napi_budget, "Rx NAPI budget (default:128, min:16, max:512)");
+
+int edma_tx_napi_budget = EDMA_TX_NAPI_WORK_DEF;
+module_param(edma_tx_napi_budget, int, 0444);
+MODULE_PARM_DESC(edma_tx_napi_budget, "Tx NAPI budget (default:512 for ipq95xx, min:16, max:512)");
+
+int edma_rx_mitigation_pkt_cnt = EDMA_RX_MITIGATION_PKT_CNT_DEF;
+module_param(edma_rx_mitigation_pkt_cnt, int, 0444);
+MODULE_PARM_DESC(edma_rx_mitigation_pkt_cnt,
+ "Rx mitigation packet count value (default:16, min:0, max: 256)");
+
+s32 edma_rx_mitigation_timer = EDMA_RX_MITIGATION_TIMER_DEF;
+module_param(edma_rx_mitigation_timer, int, 0444);
+MODULE_PARM_DESC(edma_dp_rx_mitigation_timer,
+ "Rx mitigation timer value in microseconds (default:25, min:0, max: 1000)");
+
+int edma_tx_mitigation_timer = EDMA_TX_MITIGATION_TIMER_DEF;
+module_param(edma_tx_mitigation_timer, int, 0444);
+MODULE_PARM_DESC(edma_tx_mitigation_timer,
+ "Tx mitigation timer value in microseconds (default:250, min:0, max: 1000)");
+
+int edma_tx_mitigation_pkt_cnt = EDMA_TX_MITIGATION_PKT_CNT_DEF;
+module_param(edma_tx_mitigation_pkt_cnt, int, 0444);
+MODULE_PARM_DESC(edma_tx_mitigation_pkt_cnt,
+ "Tx mitigation packet count value (default:16, min:0, max: 256)");
+
+static int tx_requeue_stop;
+module_param(tx_requeue_stop, int, 0640);
+MODULE_PARM_DESC(tx_requeue_stop, "Disable Tx requeue function (default:0)");
+
/* Priority to multi-queue mapping. */
static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7};
@@ -828,7 +860,10 @@ int edma_setup(struct ppe_device *ppe_de
edma_ctx->hw_info = &ipq9574_hw_info;
edma_ctx->ppe_dev = ppe_dev;
edma_ctx->rx_buf_size = rx_buff_size;
+
edma_ctx->tx_requeue_stop = false;
+ if (tx_requeue_stop != 0)
+ edma_ctx->tx_requeue_stop = true;
/* Configure the EDMA common clocks. */
ret = edma_clock_init();
--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
@@ -166,6 +166,24 @@ static void edma_cfg_rx_desc_ring_config
reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_RING_SIZE(rxdesc_ring->ring_id);
regmap_write(regmap, reg, data);
+ /* Validate mitigation timer value */
+ if (edma_rx_mitigation_timer < EDMA_RX_MITIGATION_TIMER_MIN ||
+ edma_rx_mitigation_timer > EDMA_RX_MITIGATION_TIMER_MAX) {
+ pr_err("Invalid Rx mitigation timer configured:%d for ring:%d. Using the default timer value:%d\n",
+ edma_rx_mitigation_timer, rxdesc_ring->ring_id,
+ EDMA_RX_MITIGATION_TIMER_DEF);
+ edma_rx_mitigation_timer = EDMA_RX_MITIGATION_TIMER_DEF;
+ }
+
+ /* Validate mitigation packet count value */
+ if (edma_rx_mitigation_pkt_cnt < EDMA_RX_MITIGATION_PKT_CNT_MIN ||
+ edma_rx_mitigation_pkt_cnt > EDMA_RX_MITIGATION_PKT_CNT_MAX) {
+ pr_err("Invalid Rx mitigation packet count configured:%d for ring:%d. Using the default packet counter value:%d\n",
+ edma_rx_mitigation_timer, rxdesc_ring->ring_id,
+ EDMA_RX_MITIGATION_PKT_CNT_DEF);
+ edma_rx_mitigation_pkt_cnt = EDMA_RX_MITIGATION_PKT_CNT_DEF;
+ }
+
/* Configure the Mitigation timer */
data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_RX_MITIGATION_TIMER_DEF,
ppe_dev->clk_rate / MHZ);
@@ -176,7 +194,7 @@ static void edma_cfg_rx_desc_ring_config
regmap_write(regmap, reg, data);
/* Configure the Mitigation packet count */
- data = (EDMA_RX_MITIGATION_PKT_CNT_DEF & EDMA_RXDESC_LOW_THRE_MASK)
+ data = (edma_rx_mitigation_pkt_cnt & EDMA_RXDESC_LOW_THRE_MASK)
<< EDMA_RXDESC_LOW_THRE_SHIFT;
pr_debug("EDMA Rx mitigation packet count value: %d\n", data);
reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_UGT_THRE(rxdesc_ring->ring_id);
@@ -915,6 +933,13 @@ void edma_cfg_rx_napi_add(void)
struct edma_ring_info *rx = hw_info->rx;
u32 i;
+ if (edma_rx_napi_budget < EDMA_RX_NAPI_WORK_MIN ||
+ edma_rx_napi_budget > EDMA_RX_NAPI_WORK_MAX) {
+ pr_err("Incorrect Rx NAPI budget: %d, setting to default: %d",
+ edma_rx_napi_budget, hw_info->napi_budget_rx);
+ edma_rx_napi_budget = hw_info->napi_budget_rx;
+ }
+
for (i = 0; i < rx->num_rings; i++) {
struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
@@ -923,7 +948,7 @@ void edma_cfg_rx_napi_add(void)
rxdesc_ring->napi_added = true;
}
- netdev_dbg(edma_ctx->dummy_dev, "Rx NAPI budget: %d\n", hw_info->napi_budget_rx);
+ netdev_dbg(edma_ctx->dummy_dev, "Rx NAPI budget: %d\n", edma_rx_napi_budget);
}
/**
--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
@@ -5,6 +5,15 @@
#ifndef __EDMA_CFG_RX__
#define __EDMA_CFG_RX__
+/* Rx default NAPI budget */
+#define EDMA_RX_NAPI_WORK_DEF 128
+
+/* RX minimum NAPI budget */
+#define EDMA_RX_NAPI_WORK_MIN 16
+
+/* Rx maximum NAPI budget */
+#define EDMA_RX_NAPI_WORK_MAX 512
+
/* SKB payload size used in page mode */
#define EDMA_RX_PAGE_MODE_SKB_SIZE 256
@@ -22,9 +31,21 @@
/* Rx mitigation timer's default value in microseconds */
#define EDMA_RX_MITIGATION_TIMER_DEF 25
+/* Rx mitigation timer's minimum value in microseconds */
+#define EDMA_RX_MITIGATION_TIMER_MIN 0
+
+/* Rx mitigation timer's maximum value in microseconds */
+#define EDMA_RX_MITIGATION_TIMER_MAX 1000
+
/* Rx mitigation packet count's default value */
#define EDMA_RX_MITIGATION_PKT_CNT_DEF 16
+/* Rx mitigation packet count's minimum value */
+#define EDMA_RX_MITIGATION_PKT_CNT_MIN 0
+
+/* Rx mitigation packet count's maximum value */
+#define EDMA_RX_MITIGATION_PKT_CNT_MAX 256
+
/* Default bitmap of cores for RPS to ARM cores */
#define EDMA_RX_DEFAULT_BITMAP ((1 << EDMA_MAX_CORE) - 1)
--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
@@ -170,6 +170,24 @@ static void edma_cfg_txcmpl_ring_configu
reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_CTRL(txcmpl_ring->id);
regmap_write(regmap, reg, EDMA_TXCMPL_RETMODE_OPAQUE);
+ /* Validate mitigation timer value */
+ if (edma_tx_mitigation_timer < EDMA_TX_MITIGATION_TIMER_MIN ||
+ edma_tx_mitigation_timer > EDMA_TX_MITIGATION_TIMER_MAX) {
+ pr_err("Invalid Tx mitigation timer configured:%d for ring:%d. Using the default timer value:%d\n",
+ edma_tx_mitigation_timer, txcmpl_ring->id,
+ EDMA_TX_MITIGATION_TIMER_DEF);
+ edma_tx_mitigation_timer = EDMA_TX_MITIGATION_TIMER_DEF;
+ }
+
+ /* Validate mitigation packet count value */
+ if (edma_tx_mitigation_pkt_cnt < EDMA_TX_MITIGATION_PKT_CNT_MIN ||
+ edma_tx_mitigation_pkt_cnt > EDMA_TX_MITIGATION_PKT_CNT_MAX) {
+ pr_err("Invalid Tx mitigation packet count configured:%d for ring:%d. Using the default packet counter value:%d\n",
+ edma_tx_mitigation_timer, txcmpl_ring->id,
+ EDMA_TX_MITIGATION_PKT_CNT_DEF);
+ edma_tx_mitigation_pkt_cnt = EDMA_TX_MITIGATION_PKT_CNT_DEF;
+ }
+
/* Configure the Mitigation timer. */
data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_TX_MITIGATION_TIMER_DEF,
ppe_dev->clk_rate / MHZ);
@@ -180,7 +198,7 @@ static void edma_cfg_txcmpl_ring_configu
regmap_write(regmap, reg, data);
/* Configure the Mitigation packet count. */
- data = (EDMA_TX_MITIGATION_PKT_CNT_DEF & EDMA_TXCMPL_LOW_THRE_MASK)
+ data = (edma_tx_mitigation_pkt_cnt & EDMA_TXCMPL_LOW_THRE_MASK)
<< EDMA_TXCMPL_LOW_THRE_SHIFT;
pr_debug("EDMA Tx mitigation packet count value: %d\n", data);
reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_UGT_THRE(txcmpl_ring->id);
@@ -634,6 +652,13 @@ void edma_cfg_tx_napi_add(struct net_dev
struct edma_txcmpl_ring *txcmpl_ring;
u32 i, ring_idx;
+ if (edma_tx_napi_budget < EDMA_TX_NAPI_WORK_MIN ||
+ edma_tx_napi_budget > EDMA_TX_NAPI_WORK_MAX) {
+ pr_err("Incorrect Tx NAPI budget: %d, setting to default: %d",
+ edma_tx_napi_budget, hw_info->napi_budget_tx);
+ edma_tx_napi_budget = hw_info->napi_budget_tx;
+ }
+
/* Adding tx napi for a interface with each queue. */
for_each_possible_cpu(i) {
ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
@@ -644,5 +669,5 @@ void edma_cfg_tx_napi_add(struct net_dev
netdev_dbg(netdev, "Napi added for txcmpl ring: %u\n", txcmpl_ring->id);
}
- netdev_dbg(netdev, "Tx NAPI budget: %d\n", hw_info->napi_budget_tx);
+ netdev_dbg(netdev, "Tx NAPI budget: %d\n", edma_tx_napi_budget);
}
--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
@@ -5,12 +5,28 @@
#ifndef __EDMA_CFG_TX__
#define __EDMA_CFG_TX__
+#define EDMA_TX_NAPI_WORK_DEF 512
+#define EDMA_TX_NAPI_WORK_MIN 16
+#define EDMA_TX_NAPI_WORK_MAX 512
+
/* Tx mitigation timer's default value. */
#define EDMA_TX_MITIGATION_TIMER_DEF 250
+/* Tx mitigation timer's minimum value in microseconds */
+#define EDMA_TX_MITIGATION_TIMER_MIN 0
+
+/* Tx mitigation timer's maximum value in microseconds */
+#define EDMA_TX_MITIGATION_TIMER_MAX 1000
+
/* Tx mitigation packet count default value. */
#define EDMA_TX_MITIGATION_PKT_CNT_DEF 16
+/* Tx mitigation packet count's minimum value */
+#define EDMA_TX_MITIGATION_PKT_CNT_MIN 0
+
+/* Tx mitigation packet count's maximum value */
+#define EDMA_TX_MITIGATION_PKT_CNT_MAX 256
+
void edma_cfg_tx_rings(void);
int edma_cfg_tx_rings_alloc(void);
void edma_cfg_tx_rings_cleanup(void);
--- a/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
@@ -281,6 +281,10 @@ struct edma_rxdesc_ring {
struct sk_buff *last;
};
+extern int edma_rx_napi_budget;
+extern int edma_rx_mitigation_timer;
+extern int edma_rx_mitigation_pkt_cnt;
+
irqreturn_t edma_rx_handle_irq(int irq, void *ctx);
int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count);
int edma_rx_napi_poll(struct napi_struct *napi, int budget);
--- a/drivers/net/ethernet/qualcomm/ppe/edma_tx.h
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_tx.h
@@ -288,6 +288,10 @@ struct edma_txcmpl_ring {
bool napi_added;
};
+extern int edma_tx_napi_budget;
+extern int edma_tx_mitigation_timer;
+extern int edma_tx_mitigation_pkt_cnt;
+
enum edma_tx_status edma_tx_ring_xmit(struct net_device *netdev,
struct sk_buff *skb,
struct edma_txdesc_ring *txdesc_ring,

View file

@ -0,0 +1,145 @@
From dcac735a715c13a817d65ae371564cf2793330b2 Mon Sep 17 00:00:00 2001
From: Pavithra R <quic_pavir@quicinc.com>
Date: Tue, 11 Jun 2024 01:43:22 +0530
Subject: [PATCH] net: ethernet: qualcomm: Add sysctl for RPS bitmap
Add sysctl to configure RPS bitmap for EDMA receive.
This bitmap is used to configure the set of ARM cores
used to receive packets from EDMA.
Change-Id: Ie0e7d5971db93ea1494608a9e79c4abb13ce69b6
Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
Alex G: Use **const** ctl_table argument for .proc_handler
Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
---
drivers/net/ethernet/qualcomm/ppe/edma.c | 23 ++++++++++++++++
drivers/net/ethernet/qualcomm/ppe/edma.h | 2 ++
.../net/ethernet/qualcomm/ppe/edma_cfg_rx.c | 27 +++++++++++++++++++
.../net/ethernet/qualcomm/ppe/edma_cfg_rx.h | 6 ++++-
4 files changed, 57 insertions(+), 1 deletion(-)
--- a/drivers/net/ethernet/qualcomm/ppe/edma.c
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
@@ -797,6 +797,11 @@ void edma_destroy(struct ppe_device *ppe
struct edma_ring_info *rx = hw_info->rx;
u32 i;
+ if (edma_ctx->rx_rps_ctl_table_hdr) {
+ unregister_sysctl_table(edma_ctx->rx_rps_ctl_table_hdr);
+ edma_ctx->rx_rps_ctl_table_hdr = NULL;
+ }
+
/* Disable interrupts. */
for (i = 1; i <= hw_info->max_ports; i++)
edma_cfg_tx_disable_interrupts(i);
@@ -840,6 +845,17 @@ void edma_destroy(struct ppe_device *ppe
kfree(edma_ctx->netdev_arr);
}
+/* EDMA Rx RPS core sysctl table */
+static struct ctl_table edma_rx_rps_core_table[] = {
+ {
+ .procname = "rps_bitmap_cores",
+ .data = &edma_cfg_rx_rps_bitmap_cores,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = edma_cfg_rx_rps_bitmap
+ },
+};
+
/**
* edma_setup - EDMA Setup.
* @ppe_dev: PPE device
@@ -865,6 +881,13 @@ int edma_setup(struct ppe_device *ppe_de
if (tx_requeue_stop != 0)
edma_ctx->tx_requeue_stop = true;
+ edma_ctx->rx_rps_ctl_table_hdr = register_sysctl("net/edma",
+ edma_rx_rps_core_table);
+ if (!edma_ctx->rx_rps_ctl_table_hdr) {
+ pr_err("Rx rps sysctl table configuration failed\n");
+ return -EINVAL;
+ }
+
/* Configure the EDMA common clocks. */
ret = edma_clock_init();
if (ret) {
--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
@@ -132,6 +132,7 @@ struct edma_intr_info {
* @tx_rings: Tx Descriptor Ring, SW is producer
* @txcmpl_rings: Tx complete Ring, SW is consumer
* @err_stats: Per CPU error statistics
+ * @rx_rps_ctl_table_hdr: Rx RPS sysctl table
* @rx_page_mode: Page mode enabled or disabled
* @rx_buf_size: Rx buffer size for Jumbo MRU
* @tx_requeue_stop: Tx requeue stop enabled or disabled
@@ -147,6 +148,7 @@ struct edma_context {
struct edma_txdesc_ring *tx_rings;
struct edma_txcmpl_ring *txcmpl_rings;
struct edma_err_stats __percpu *err_stats;
+ struct ctl_table_header *rx_rps_ctl_table_hdr;
u32 rx_page_mode;
u32 rx_buf_size;
bool tx_requeue_stop;
--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
@@ -43,6 +43,8 @@ static u32 edma_rx_ring_queue_map[][EDMA
{ 6, 14, 22, 30 },
{ 7, 15, 23, 31 }};
+u32 edma_cfg_rx_rps_bitmap_cores = EDMA_RX_DEFAULT_BITMAP;
+
static int edma_cfg_rx_desc_rings_reset_queue_mapping(void)
{
struct edma_hw_info *hw_info = edma_ctx->hw_info;
@@ -987,3 +989,28 @@ int edma_cfg_rx_rps_hash_map(void)
return 0;
}
+
+/* Configure RPS hash mapping based on bitmap */
+int edma_cfg_rx_rps_bitmap(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret;
+
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+
+ if (!write)
+ return ret;
+
+ if (!edma_cfg_rx_rps_bitmap_cores ||
+ edma_cfg_rx_rps_bitmap_cores > EDMA_RX_DEFAULT_BITMAP) {
+ pr_warn("Incorrect CPU bitmap: %x. Setting it to default value: %d",
+ edma_cfg_rx_rps_bitmap_cores, EDMA_RX_DEFAULT_BITMAP);
+ edma_cfg_rx_rps_bitmap_cores = EDMA_RX_DEFAULT_BITMAP;
+ }
+
+ ret = edma_cfg_rx_rps_hash_map();
+
+ pr_info("EDMA RPS bitmap value: %d\n", edma_cfg_rx_rps_bitmap_cores);
+
+ return ret;
+}
--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
@@ -49,6 +49,8 @@
/* Default bitmap of cores for RPS to ARM cores */
#define EDMA_RX_DEFAULT_BITMAP ((1 << EDMA_MAX_CORE) - 1)
+extern u32 edma_cfg_rx_rps_bitmap_cores;
+
int edma_cfg_rx_rings(void);
int edma_cfg_rx_rings_alloc(void);
void edma_cfg_rx_ring_mappings(void);
@@ -64,6 +66,8 @@ void edma_cfg_rx_rings_enable(void);
void edma_cfg_rx_rings_disable(void);
void edma_cfg_rx_buff_size_setup(void);
int edma_cfg_rx_rps_hash_map(void);
-int edma_cfg_rx_rps(struct ctl_table *table, int write,
+int edma_cfg_rx_rps(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
+int edma_cfg_rx_rps_bitmap(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
#endif

View file

@ -0,0 +1,48 @@
From a809433c9b6a418dd886f12a5dcb3376f73bf2a7 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Wed, 4 Dec 2024 01:37:05 +0100
Subject: [PATCH] net: ethernet: qualcomm: Add support for label property for
EDMA port
Add support for label property for EDMA port. This is useful to define
custom name in DTS for specific ethernet port instead of assigning a
dynamic name at runtime.
This also improve the log output by using modern APIs.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
---
drivers/net/ethernet/qualcomm/ppe/edma_port.c | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
--- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
@@ -355,13 +355,25 @@ int edma_port_setup(struct ppe_port *por
int port_id = port->port_id;
struct net_device *netdev;
u8 mac_addr[ETH_ALEN];
+ const char *name;
+ int assign_type;
int ret = 0;
u8 *maddr;
- netdev = alloc_etherdev_mqs(sizeof(struct edma_port_priv),
- EDMA_NETDEV_QUEUE_NUM, EDMA_NETDEV_QUEUE_NUM);
+ name = of_get_property(np, "label", NULL);
+ if (name) {
+ assign_type = NET_NAME_PREDICTABLE;
+ } else {
+ name = "eth%d";
+ assign_type = NET_NAME_ENUM;
+ }
+
+ netdev = alloc_netdev_mqs(sizeof(struct edma_port_priv),
+ name, assign_type,
+ ether_setup,
+ EDMA_NETDEV_QUEUE_NUM, EDMA_NETDEV_QUEUE_NUM);
if (!netdev) {
- pr_err("alloc_etherdev() failed\n");
+ dev_err(ppe_dev->dev, "alloc_netdev_mqs() failed\n");
return -ENOMEM;
}

View file

@ -0,0 +1,30 @@
From 9c4ad75f17788a64c1e37d0b9e19ca157e01c80a Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Mon, 9 Dec 2024 18:19:06 +0100
Subject: [PATCH] net: ethernet: qualcomm: ppe: Fix unmet dependency with
QCOM_PPE
Fix unmet dependency with QCOM_PPE on selecting SFP.
WARNING: unmet direct dependencies detected for SFP
Depends on [m]: NETDEVICES [=y] && PHYLIB [=y] && I2C [=y] && PHYLINK [=y] && (HWMON [=m] || HWMON [=m]=n [=n])
Selected by [y]:
- QCOM_PPE [=y] && NETDEVICES [=y] && ETHERNET [=y] && NET_VENDOR_QUALCOMM [=y] && HAS_IOMEM [=y] && OF [=y] && COMMON_CLK [=y]
This permit correct compilation of the modules with SFP enabled.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
---
drivers/net/ethernet/qualcomm/Kconfig | 1 -
1 file changed, 1 deletion(-)
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -68,7 +68,6 @@ config QCOM_PPE
select REGMAP_MMIO
select PHYLINK
select PCS_QCOM_IPQ_UNIPHY
- select SFP
help
This driver supports the Qualcomm Technologies, Inc. packet
process engine (PPE) available with IPQ SoC. The PPE includes

View file

@ -0,0 +1,24 @@
From ac41b401d274a4004027fa4000d801cd28c51f4c Mon Sep 17 00:00:00 2001
From: Alexandru Gagniuc <mr.nuke.me@gmail.com>
Date: Tue, 13 May 2025 13:41:37 -0500
Subject: [PATCH] net: ethernet: qualcomm: ppe: select correct PCS dependency
The config symbol for the PCS driver has changed to PCS_QCOM_IPQ9574,
since the original submission. Update Kconfig accordingly.
Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
---
drivers/net/ethernet/qualcomm/Kconfig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -67,7 +67,7 @@ config QCOM_PPE
depends on COMMON_CLK
select REGMAP_MMIO
select PHYLINK
- select PCS_QCOM_IPQ_UNIPHY
+ select PCS_QCOM_IPQ9574
help
This driver supports the Qualcomm Technologies, Inc. packet
process engine (PPE) available with IPQ SoC. The PPE includes

View file

@ -0,0 +1,72 @@
From bbf706ecfd4295d73c8217d5220573dd51d7a081 Mon Sep 17 00:00:00 2001
From: Luo Jie <quic_luoj@quicinc.com>
Date: Fri, 1 Mar 2024 14:46:45 +0800
Subject: [PATCH] arm64: dts: qcom: Add IPQ9574 PPE base device node
PPE is the packet process engine on the Qualcomm IPQ platform,
which is connected with the external switch or PHY device via
the UNIPHY (PCS).
Change-Id: I254bd48c218aa4eab54f697a2ad149f5a93b682c
Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
Alex G: Add "qcom_ppe" label to PPE node
Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
---
arch/arm64/boot/dts/qcom/ipq9574.dtsi | 39 +++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
@@ -13,6 +13,7 @@
#include <dt-bindings/interconnect/qcom,ipq9574.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/reset/qcom,ipq9574-gcc.h>
+#include <dt-bindings/reset/qcom,ipq9574-nsscc.h>
#include <dt-bindings/thermal/thermal.h>
/ {
@@ -1269,6 +1270,44 @@
#interconnect-cells = <1>;
};
+ qcom_ppe: ethernet@3a000000 {
+ compatible = "qcom,ipq9574-ppe";
+ reg = <0x3a000000 0xbef800>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&nsscc NSS_CC_PPE_SWITCH_CLK>,
+ <&nsscc NSS_CC_PPE_SWITCH_CFG_CLK>,
+ <&nsscc NSS_CC_PPE_SWITCH_IPE_CLK>,
+ <&nsscc NSS_CC_PPE_SWITCH_BTQ_CLK>;
+ clock-names = "ppe",
+ "ppe_cfg",
+ "ppe_ipe",
+ "ppe_btq";
+ resets = <&nsscc PPE_FULL_RESET>;
+ interconnects = <&nsscc MASTER_NSSNOC_PPE
+ &nsscc SLAVE_NSSNOC_PPE>,
+ <&nsscc MASTER_NSSNOC_PPE_CFG
+ &nsscc SLAVE_NSSNOC_PPE_CFG>,
+ <&gcc MASTER_NSSNOC_QOSGEN_REF
+ &gcc SLAVE_NSSNOC_QOSGEN_REF>,
+ <&gcc MASTER_NSSNOC_TIMEOUT_REF
+ &gcc SLAVE_NSSNOC_TIMEOUT_REF>,
+ <&gcc MASTER_MEM_NOC_NSSNOC
+ &gcc SLAVE_MEM_NOC_NSSNOC>,
+ <&gcc MASTER_NSSNOC_MEMNOC
+ &gcc SLAVE_NSSNOC_MEMNOC>,
+ <&gcc MASTER_NSSNOC_MEM_NOC_1
+ &gcc SLAVE_NSSNOC_MEM_NOC_1>;
+ interconnect-names = "ppe",
+ "ppe_cfg",
+ "qos_gen",
+ "timeout_ref",
+ "nssnoc_memnoc",
+ "memnoc_nssnoc",
+ "memnoc_nssnoc_1";
+ };
+
pcs_uniphy0: ethernet-pcs@7a00000 {
compatible = "qcom,ipq9574-pcs";
reg = <0x7a00000 0x10000>;

View file

@ -0,0 +1,91 @@
From bd50babc7db2a35d98236a0386173dccd6c6374b Mon Sep 17 00:00:00 2001
From: Pavithra R <quic_pavir@quicinc.com>
Date: Wed, 6 Mar 2024 22:29:41 +0530
Subject: [PATCH] arm64: dts: qcom: Add EDMA node for IPQ9574
Add EDMA (Ethernet DMA) device tree node for IPQ9574 to
enable ethernet support.
Change-Id: I87d7c50f2485c8670948dce305000337f6499f8b
Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
---
arch/arm64/boot/dts/qcom/ipq9574.dtsi | 68 +++++++++++++++++++++++++++
1 file changed, 68 insertions(+)
--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
@@ -1306,6 +1306,74 @@
"nssnoc_memnoc",
"memnoc_nssnoc",
"memnoc_nssnoc_1";
+
+ edma {
+ compatible = "qcom,ipq9574-edma";
+ clocks = <&nsscc NSS_CC_PPE_EDMA_CLK>,
+ <&nsscc NSS_CC_PPE_EDMA_CFG_CLK>;
+ clock-names = "edma",
+ "edma-cfg";
+ resets = <&nsscc EDMA_HW_RESET>;
+ reset-names = "edma_rst";
+ interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 505 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 504 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 503 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 502 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 499 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma_txcmpl_8",
+ "edma_txcmpl_9",
+ "edma_txcmpl_10",
+ "edma_txcmpl_11",
+ "edma_txcmpl_12",
+ "edma_txcmpl_13",
+ "edma_txcmpl_14",
+ "edma_txcmpl_15",
+ "edma_txcmpl_16",
+ "edma_txcmpl_17",
+ "edma_txcmpl_18",
+ "edma_txcmpl_19",
+ "edma_txcmpl_20",
+ "edma_txcmpl_21",
+ "edma_txcmpl_22",
+ "edma_txcmpl_23",
+ "edma_txcmpl_24",
+ "edma_txcmpl_25",
+ "edma_txcmpl_26",
+ "edma_txcmpl_27",
+ "edma_txcmpl_28",
+ "edma_txcmpl_29",
+ "edma_txcmpl_30",
+ "edma_txcmpl_31",
+ "edma_rxdesc_20",
+ "edma_rxdesc_21",
+ "edma_rxdesc_22",
+ "edma_rxdesc_23",
+ "edma_misc";
+ };
};
pcs_uniphy0: ethernet-pcs@7a00000 {