1
0
Fork 0
mirror of https://github.com/Ysurac/openmptcprouter.git synced 2025-03-09 15:40:20 +00:00

Revert "ipq40xx: refresh ess driver and phy with new api"

This reverts commit b84ccce33a.
This commit is contained in:
suyuan168 2021-12-29 01:43:58 +08:00
parent c840059bca
commit 187c5e1123
7 changed files with 0 additions and 5591 deletions

View file

@ -1,9 +0,0 @@
#
## Makefile for the Qualcomm Atheros ethernet edma driver
#
obj-$(CONFIG_ESSEDMA) += essedma.o
essedma-objs := edma_axi.o edma.o edma_ethtool.o

View file

@ -1,455 +0,0 @@
/*
* Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _EDMA_H_
#define _EDMA_H_
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/smp.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/sysctl.h>
#include <linux/phy.h>
#include <linux/of_net.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <asm-generic/bug.h>
#include "ess_edma.h"
#define EDMA_CPU_CORES_SUPPORTED 4
#define EDMA_MAX_PORTID_SUPPORTED 5
#define EDMA_MAX_VLAN_SUPPORTED EDMA_MAX_PORTID_SUPPORTED
#define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1)
#define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f /* 0001_1111 = 0x1f */
#define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */
#define EDMA_MAX_RECEIVE_QUEUE 8
#define EDMA_MAX_TRANSMIT_QUEUE 16
/* WAN/LAN adapter number */
#define EDMA_WAN 0
#define EDMA_LAN 1
/* VLAN tag */
#define EDMA_LAN_DEFAULT_VLAN 1
#define EDMA_WAN_DEFAULT_VLAN 2
#define EDMA_DEFAULT_GROUP1_VLAN 1
#define EDMA_DEFAULT_GROUP2_VLAN 2
#define EDMA_DEFAULT_GROUP3_VLAN 3
#define EDMA_DEFAULT_GROUP4_VLAN 4
#define EDMA_DEFAULT_GROUP5_VLAN 5
/* Queues exposed to linux kernel */
#define EDMA_NETDEV_TX_QUEUE 4
#define EDMA_NETDEV_RX_QUEUE 4
/* Number of queues per core */
#define EDMA_NUM_TXQ_PER_CORE 4
#define EDMA_NUM_RXQ_PER_CORE 2
#define EDMA_TPD_EOP_SHIFT 31
#define EDMA_PORT_ID_SHIFT 12
#define EDMA_PORT_ID_MASK 0x7
/* tpd word 3 bit 18-28 */
#define EDMA_TPD_PORT_BITMAP_SHIFT 18
#define EDMA_TPD_FROM_CPU_SHIFT 25
#define EDMA_FROM_CPU_MASK 0x80
#define EDMA_SKB_PRIORITY_MASK 0x38
/* TX/RX descriptor ring count */
/* should be a power of 2 */
#define EDMA_RX_RING_SIZE 128
#define EDMA_TX_RING_SIZE 128
/* Flags used in paged/non paged mode */
#define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256
#define EDMA_RX_HEAD_BUFF_SIZE 1540
/* MAX frame size supported by switch */
#define EDMA_MAX_JUMBO_FRAME_SIZE 9216
/* Configurations */
#define EDMA_INTR_CLEAR_TYPE 0
#define EDMA_INTR_SW_IDX_W_TYPE 0
#define EDMA_FIFO_THRESH_TYPE 0
#define EDMA_RSS_TYPE 0
#define EDMA_RX_IMT 0x0020
#define EDMA_TX_IMT 0x0050
#define EDMA_TPD_BURST 5
#define EDMA_TXF_BURST 0x100
#define EDMA_RFD_BURST 8
#define EDMA_RFD_THR 16
#define EDMA_RFD_LTHR 0
/* RX/TX per CPU based mask/shift */
#define EDMA_TX_PER_CPU_MASK 0xF
#define EDMA_RX_PER_CPU_MASK 0x3
#define EDMA_TX_PER_CPU_MASK_SHIFT 0x2
#define EDMA_RX_PER_CPU_MASK_SHIFT 0x1
#define EDMA_TX_CPU_START_SHIFT 0x2
#define EDMA_RX_CPU_START_SHIFT 0x1
/* FLags used in transmit direction */
#define EDMA_HW_CHECKSUM 0x00000001
#define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002
#define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004
#define EDMA_SW_DESC_FLAG_LAST 0x1
#define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2
#define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4
#define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8
#define EDMA_SW_DESC_FLAG_SKB_NONE 0x10
#define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20
#define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1)
/* Ethtool specific list of EDMA supported features */
#define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \
| SUPPORTED_10baseT_Full \
| SUPPORTED_100baseT_Half \
| SUPPORTED_100baseT_Full \
| SUPPORTED_1000baseT_Full)
/* Recevie side atheros Header */
#define EDMA_RX_ATH_HDR_VERSION 0x2
#define EDMA_RX_ATH_HDR_VERSION_SHIFT 14
#define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11
#define EDMA_RX_ATH_PORT_TYPE_SHIFT 6
#define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4
/* Transmit side atheros Header */
#define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F
#define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80
#define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7
#define EDMA_TXQ_START_CORE0 8
#define EDMA_TXQ_START_CORE1 12
#define EDMA_TXQ_START_CORE2 0
#define EDMA_TXQ_START_CORE3 4
#define EDMA_TXQ_IRQ_MASK_CORE0 0x0F00
#define EDMA_TXQ_IRQ_MASK_CORE1 0xF000
#define EDMA_TXQ_IRQ_MASK_CORE2 0x000F
#define EDMA_TXQ_IRQ_MASK_CORE3 0x00F0
#define EDMA_ETH_HDR_LEN 12
#define EDMA_ETH_TYPE_MASK 0xFFFF
#define EDMA_RX_BUFFER_WRITE 16
#define EDMA_RFD_AVAIL_THR 80
#define EDMA_GMAC_NO_MDIO_PHY PHY_MAX_ADDR
extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst,
__be16 sport, __be16 dport,
uint8_t proto, u16 loadbalance, bool action);
struct edma_ethtool_statistics {
u32 tx_q0_pkt;
u32 tx_q1_pkt;
u32 tx_q2_pkt;
u32 tx_q3_pkt;
u32 tx_q4_pkt;
u32 tx_q5_pkt;
u32 tx_q6_pkt;
u32 tx_q7_pkt;
u32 tx_q8_pkt;
u32 tx_q9_pkt;
u32 tx_q10_pkt;
u32 tx_q11_pkt;
u32 tx_q12_pkt;
u32 tx_q13_pkt;
u32 tx_q14_pkt;
u32 tx_q15_pkt;
u32 tx_q0_byte;
u32 tx_q1_byte;
u32 tx_q2_byte;
u32 tx_q3_byte;
u32 tx_q4_byte;
u32 tx_q5_byte;
u32 tx_q6_byte;
u32 tx_q7_byte;
u32 tx_q8_byte;
u32 tx_q9_byte;
u32 tx_q10_byte;
u32 tx_q11_byte;
u32 tx_q12_byte;
u32 tx_q13_byte;
u32 tx_q14_byte;
u32 tx_q15_byte;
u32 rx_q0_pkt;
u32 rx_q1_pkt;
u32 rx_q2_pkt;
u32 rx_q3_pkt;
u32 rx_q4_pkt;
u32 rx_q5_pkt;
u32 rx_q6_pkt;
u32 rx_q7_pkt;
u32 rx_q0_byte;
u32 rx_q1_byte;
u32 rx_q2_byte;
u32 rx_q3_byte;
u32 rx_q4_byte;
u32 rx_q5_byte;
u32 rx_q6_byte;
u32 rx_q7_byte;
u32 tx_desc_error;
u32 rx_alloc_fail_ctr;
};
struct edma_mdio_data {
struct mii_bus *mii_bus;
void __iomem *membase;
int phy_irq[PHY_MAX_ADDR];
};
/* EDMA LINK state */
enum edma_link_state {
__EDMA_LINKUP, /* Indicate link is UP */
__EDMA_LINKDOWN /* Indicate link is down */
};
/* EDMA GMAC state */
enum edma_gmac_state {
__EDMA_UP /* use to indicate GMAC is up */
};
/* edma transmit descriptor */
struct edma_tx_desc {
__le16 len; /* full packet including CRC */
__le16 svlan_tag; /* vlan tag */
__le32 word1; /* byte 4-7 */
__le32 addr; /* address of buffer */
__le32 word3; /* byte 12 */
};
/* edma receive return descriptor */
struct edma_rx_return_desc {
u16 rrd0;
u16 rrd1;
u16 rrd2;
u16 rrd3;
u16 rrd4;
u16 rrd5;
u16 rrd6;
u16 rrd7;
};
/* RFD descriptor */
struct edma_rx_free_desc {
__le32 buffer_addr; /* buffer address */
};
/* edma hw specific data */
struct edma_hw {
u32 __iomem *hw_addr; /* inner register address */
struct edma_adapter *adapter; /* netdevice adapter */
u32 rx_intr_mask; /*rx interrupt mask */
u32 tx_intr_mask; /* tx interrupt nask */
u32 misc_intr_mask; /* misc interrupt mask */
u32 wol_intr_mask; /* wake on lan interrupt mask */
bool intr_clear_type; /* interrupt clear */
bool intr_sw_idx_w; /* interrupt software index */
u32 rx_head_buff_size; /* Rx buffer size */
u8 rss_type; /* rss protocol type */
};
/* edma_sw_desc stores software descriptor
* SW descriptor has 1:1 map with HW descriptor
*/
struct edma_sw_desc {
struct sk_buff *skb;
dma_addr_t dma; /* dma address */
u16 length; /* Tx/Rx buffer length */
u32 flags;
};
/* per core related information */
struct edma_per_cpu_queues_info {
struct napi_struct napi; /* napi associated with the core */
u32 tx_mask; /* tx interrupt mask */
u32 rx_mask; /* rx interrupt mask */
u32 tx_status; /* tx interrupt status */
u32 rx_status; /* rx interrupt status */
u32 tx_start; /* tx queue start */
u32 rx_start; /* rx queue start */
struct edma_common_info *edma_cinfo; /* edma common info */
};
/* edma specific common info */
struct edma_common_info {
struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */
struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */
struct platform_device *pdev; /* device structure */
struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED];
struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX];
struct ctl_table_header *edma_ctl_table_hdr;
int num_gmac;
struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */
int num_rx_queues; /* number of rx queue */
u32 num_tx_queues; /* number of tx queue */
u32 tx_irq[16]; /* number of tx irq */
u32 rx_irq[8]; /* number of rx irq */
u32 from_cpu; /* from CPU TPD field */
u32 num_rxq_per_core; /* Rx queues per core */
u32 num_txq_per_core; /* Tx queues per core */
u16 tx_ring_count; /* Tx ring count */
u16 rx_ring_count; /* Rx ring*/
u16 rx_head_buffer_len; /* rx buffer length */
u16 rx_page_buffer_len; /* rx buffer length */
u32 page_mode; /* Jumbo frame supported flag */
u32 fraglist_mode; /* fraglist supported flag */
struct edma_hw hw; /* edma hw specific structure */
struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */
spinlock_t stats_lock; /* protect edma stats area for updation */
struct timer_list edma_stats_timer;
bool is_single_phy;
void __iomem *ess_hw_addr;
struct clk *ess_clk;
};
/* transimit packet descriptor (tpd) ring */
struct edma_tx_desc_ring {
struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */
struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE];
/* Array of netdevs associated with the tpd ring */
void *hw_desc; /* descriptor ring virtual address */
struct edma_sw_desc *sw_desc; /* buffer associated with ring */
int netdev_bmp; /* Bitmap for per-ring netdevs */
u32 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
dma_addr_t dma; /* descriptor ring physical address */
u16 sw_next_to_fill; /* next Tx descriptor to fill */
u16 sw_next_to_clean; /* next Tx descriptor to clean */
};
/* receive free descriptor (rfd) ring */
struct edma_rfd_desc_ring {
void *hw_desc; /* descriptor ring virtual address */
struct edma_sw_desc *sw_desc; /* buffer associated with ring */
u16 size; /* bytes allocated to sw_desc */
u16 count; /* number of descriptors in the ring */
dma_addr_t dma; /* descriptor ring physical address */
u16 sw_next_to_fill; /* next descriptor to fill */
u16 sw_next_to_clean; /* next descriptor to clean */
u16 pending_fill; /* fill pending from previous iteration */
};
/* edma_rfs_flter_node - rfs filter node in hash table */
struct edma_rfs_filter_node {
struct flow_keys keys;
u32 flow_id; /* flow_id of filter provided by kernel */
u16 filter_id; /* filter id of filter returned by adaptor */
u16 rq_id; /* desired rq index */
struct hlist_node node; /* edma rfs list node */
};
/* edma_rfs_flow_tbl - rfs flow table */
struct edma_rfs_flow_table {
u16 max_num_filter; /* Maximum number of filters edma supports */
u16 hashtoclean; /* hash table index to clean next */
int filter_available; /* Number of free filters available */
struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES];
spinlock_t rfs_ftab_lock;
struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */
};
/* EDMA net device structure */
struct edma_adapter {
struct net_device *netdev; /* netdevice */
struct platform_device *pdev; /* platform device */
struct edma_common_info *edma_cinfo; /* edma common info */
struct phy_device *phydev; /* Phy device */
struct edma_rfs_flow_table rfs; /* edma rfs flow table */
struct net_device_stats stats; /* netdev statistics */
set_rfs_filter_callback_t set_rfs_rule;
u32 flags;/* status flags */
unsigned long state_flags; /* GMAC up/down flags */
u32 forced_speed; /* link force speed */
u32 forced_duplex; /* link force duplex */
u32 link_state; /* phy link state */
u32 phy_mdio_addr; /* PHY device address on MII interface */
u32 poll_required; /* check if link polling is required */
u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */
u32 default_vlan_tag; /* vlan tag */
u32 dp_bitmap;
uint8_t phy_id[MII_BUS_ID_SIZE + 3];
};
int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo);
int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo);
int edma_open(struct net_device *netdev);
int edma_close(struct net_device *netdev);
void edma_free_tx_resources(struct edma_common_info *edma_c_info);
void edma_free_rx_resources(struct edma_common_info *edma_c_info);
int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo);
int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo);
void edma_free_tx_rings(struct edma_common_info *edma_cinfo);
void edma_free_rx_rings(struct edma_common_info *edma_cinfo);
void edma_free_queues(struct edma_common_info *edma_cinfo);
void edma_irq_disable(struct edma_common_info *edma_cinfo);
int edma_reset(struct edma_common_info *edma_cinfo);
int edma_poll(struct napi_struct *napi, int budget);
netdev_tx_t edma_xmit(struct sk_buff *skb,
struct net_device *netdev);
int edma_configure(struct edma_common_info *edma_cinfo);
void edma_irq_enable(struct edma_common_info *edma_cinfo);
void edma_enable_tx_ctrl(struct edma_hw *hw);
void edma_enable_rx_ctrl(struct edma_hw *hw);
void edma_stop_rx_tx(struct edma_hw *hw);
void edma_free_irqs(struct edma_adapter *adapter);
irqreturn_t edma_interrupt(int irq, void *dev);
void edma_write_reg(u16 reg_addr, u32 reg_value);
void edma_read_reg(u16 reg_addr, volatile u32 *reg_value);
struct net_device_stats *edma_get_stats(struct net_device *netdev);
int edma_set_mac_addr(struct net_device *netdev, void *p);
int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq, u32 flow_id);
int edma_register_rfs_filter(struct net_device *netdev,
set_rfs_filter_callback_t set_filter);
void edma_flow_may_expire(struct timer_list *t);
void edma_set_ethtool_ops(struct net_device *netdev);
void edma_set_stp_rstp(bool tag);
void edma_assign_ath_hdr_type(int tag);
int edma_get_default_vlan_tag(struct net_device *netdev);
void edma_adjust_link(struct net_device *netdev);
int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id);
void edma_read_append_stats(struct edma_common_info *edma_cinfo);
void edma_change_tx_coalesce(int usecs);
void edma_change_rx_coalesce(int usecs);
void edma_get_tx_rx_coalesce(u32 *reg_val);
void edma_clear_irq_status(void);
void ess_set_port_status_speed(struct edma_common_info *edma_cinfo,
struct phy_device *phydev, uint8_t port_id);
#endif /* _EDMA_H_ */

View file

@ -1,350 +0,0 @@
/*
* Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/version.h>
#include "edma.h"
struct edma_ethtool_stats {
uint8_t stat_string[ETH_GSTRING_LEN];
uint32_t stat_offset;
};
#define EDMA_STAT(m) offsetof(struct edma_ethtool_statistics, m)
#define DRVINFO_LEN 32
/* Array of strings describing statistics
*/
static const struct edma_ethtool_stats edma_gstrings_stats[] = {
{"tx_q0_pkt", EDMA_STAT(tx_q0_pkt)},
{"tx_q1_pkt", EDMA_STAT(tx_q1_pkt)},
{"tx_q2_pkt", EDMA_STAT(tx_q2_pkt)},
{"tx_q3_pkt", EDMA_STAT(tx_q3_pkt)},
{"tx_q4_pkt", EDMA_STAT(tx_q4_pkt)},
{"tx_q5_pkt", EDMA_STAT(tx_q5_pkt)},
{"tx_q6_pkt", EDMA_STAT(tx_q6_pkt)},
{"tx_q7_pkt", EDMA_STAT(tx_q7_pkt)},
{"tx_q8_pkt", EDMA_STAT(tx_q8_pkt)},
{"tx_q9_pkt", EDMA_STAT(tx_q9_pkt)},
{"tx_q10_pkt", EDMA_STAT(tx_q10_pkt)},
{"tx_q11_pkt", EDMA_STAT(tx_q11_pkt)},
{"tx_q12_pkt", EDMA_STAT(tx_q12_pkt)},
{"tx_q13_pkt", EDMA_STAT(tx_q13_pkt)},
{"tx_q14_pkt", EDMA_STAT(tx_q14_pkt)},
{"tx_q15_pkt", EDMA_STAT(tx_q15_pkt)},
{"tx_q0_byte", EDMA_STAT(tx_q0_byte)},
{"tx_q1_byte", EDMA_STAT(tx_q1_byte)},
{"tx_q2_byte", EDMA_STAT(tx_q2_byte)},
{"tx_q3_byte", EDMA_STAT(tx_q3_byte)},
{"tx_q4_byte", EDMA_STAT(tx_q4_byte)},
{"tx_q5_byte", EDMA_STAT(tx_q5_byte)},
{"tx_q6_byte", EDMA_STAT(tx_q6_byte)},
{"tx_q7_byte", EDMA_STAT(tx_q7_byte)},
{"tx_q8_byte", EDMA_STAT(tx_q8_byte)},
{"tx_q9_byte", EDMA_STAT(tx_q9_byte)},
{"tx_q10_byte", EDMA_STAT(tx_q10_byte)},
{"tx_q11_byte", EDMA_STAT(tx_q11_byte)},
{"tx_q12_byte", EDMA_STAT(tx_q12_byte)},
{"tx_q13_byte", EDMA_STAT(tx_q13_byte)},
{"tx_q14_byte", EDMA_STAT(tx_q14_byte)},
{"tx_q15_byte", EDMA_STAT(tx_q15_byte)},
{"rx_q0_pkt", EDMA_STAT(rx_q0_pkt)},
{"rx_q1_pkt", EDMA_STAT(rx_q1_pkt)},
{"rx_q2_pkt", EDMA_STAT(rx_q2_pkt)},
{"rx_q3_pkt", EDMA_STAT(rx_q3_pkt)},
{"rx_q4_pkt", EDMA_STAT(rx_q4_pkt)},
{"rx_q5_pkt", EDMA_STAT(rx_q5_pkt)},
{"rx_q6_pkt", EDMA_STAT(rx_q6_pkt)},
{"rx_q7_pkt", EDMA_STAT(rx_q7_pkt)},
{"rx_q0_byte", EDMA_STAT(rx_q0_byte)},
{"rx_q1_byte", EDMA_STAT(rx_q1_byte)},
{"rx_q2_byte", EDMA_STAT(rx_q2_byte)},
{"rx_q3_byte", EDMA_STAT(rx_q3_byte)},
{"rx_q4_byte", EDMA_STAT(rx_q4_byte)},
{"rx_q5_byte", EDMA_STAT(rx_q5_byte)},
{"rx_q6_byte", EDMA_STAT(rx_q6_byte)},
{"rx_q7_byte", EDMA_STAT(rx_q7_byte)},
{"tx_desc_error", EDMA_STAT(tx_desc_error)},
{"rx_alloc_fail_ctr", EDMA_STAT(rx_alloc_fail_ctr)},
};
#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)
/* edma_get_strset_count()
* Get strset count
*/
static int edma_get_strset_count(struct net_device *netdev,
int sset)
{
switch (sset) {
case ETH_SS_STATS:
return EDMA_STATS_LEN;
default:
netdev_dbg(netdev, "%s: Invalid string set", __func__);
return -EOPNOTSUPP;
}
}
/* edma_get_strings()
* get stats string
*/
static void edma_get_strings(struct net_device *netdev, uint32_t stringset,
uint8_t *data)
{
uint8_t *p = data;
uint32_t i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < EDMA_STATS_LEN; i++) {
memcpy(p, edma_gstrings_stats[i].stat_string,
min((size_t)ETH_GSTRING_LEN,
strlen(edma_gstrings_stats[i].stat_string)
+ 1));
p += ETH_GSTRING_LEN;
}
break;
}
}
/* edma_get_ethtool_stats()
* Get ethtool statistics
*/
static void edma_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct edma_adapter *adapter = netdev_priv(netdev);
struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
int i;
uint8_t *p = NULL;
edma_read_append_stats(edma_cinfo);
for(i = 0; i < EDMA_STATS_LEN; i++) {
p = (uint8_t *)&(edma_cinfo->edma_ethstats) +
edma_gstrings_stats[i].stat_offset;
data[i] = *(uint32_t *)p;
}
}
/* edma_get_drvinfo()
* get edma driver info
*/
static void edma_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "ess_edma", DRVINFO_LEN);
strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
}
/* edma_nway_reset()
* Reset the phy, if available.
*/
static int edma_nway_reset(struct net_device *netdev)
{
return -EINVAL;
}
/* edma_get_wol()
* get wake on lan info
*/
static void edma_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
wol->supported = 0;
wol->wolopts = 0;
}
/* edma_get_msglevel()
* get message level.
*/
static uint32_t edma_get_msglevel(struct net_device *netdev)
{
return 0;
}
/* edma_get_settings()
* Get edma settings
*/
static int edma_get_settings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct edma_adapter *adapter = netdev_priv(netdev);
if (adapter->poll_required) {
if ((adapter->forced_speed != SPEED_UNKNOWN)
&& !(adapter->poll_required))
return -EPERM;
phy_ethtool_ksettings_get(adapter->phydev, cmd);
if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, adapter->phydev->advertising))
cmd->base.port = PORT_FIBRE;
else
cmd->base.port = PORT_TP;
} else {
/* If the speed/duplex for this GMAC is forced and we
* are not polling for link state changes, return the
* values as specified by platform. This will be true
* for GMACs connected to switch, and interfaces that
* do not use a PHY.
*/
if (!(adapter->poll_required)) {
if (adapter->forced_speed != SPEED_UNKNOWN) {
/* set speed and duplex */
cmd->base.speed = SPEED_1000;
cmd->base.duplex = DUPLEX_FULL;
/* Populate capabilities advertised by self */
linkmode_zero(cmd->link_modes.advertising);
cmd->base.autoneg = 0;
cmd->base.port = PORT_TP;
cmd->base.transceiver = XCVR_EXTERNAL;
} else {
/* non link polled and non
* forced speed/duplex interface
*/
return -EIO;
}
}
}
return 0;
}
/* edma_set_settings()
* Set EDMA settings
*/
static int edma_set_settings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct edma_adapter *adapter = netdev_priv(netdev);
if ((adapter->forced_speed != SPEED_UNKNOWN) &&
!adapter->poll_required)
return -EPERM;
return phy_ethtool_ksettings_set(adapter->phydev, cmd);
}
/* edma_get_coalesce
* get interrupt mitigation
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0)
static int edma_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
#else
static int edma_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
#endif
{
u32 reg_val;
edma_get_tx_rx_coalesce(&reg_val);
/* We read the Interrupt Moderation Timer(IMT) register value,
* use lower 16 bit for rx and higher 16 bit for Tx. We do a
* left shift by 1, because IMT resolution timer is 2usecs.
* Hence the value given by the register is multiplied by 2 to
* get the actual time in usecs.
*/
ec->tx_coalesce_usecs = (((reg_val >> 16) & 0xffff) << 1);
ec->rx_coalesce_usecs = ((reg_val & 0xffff) << 1);
return 0;
}
/* edma_set_coalesce
* set interrupt mitigation
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0)
static int edma_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
#else
static int edma_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
#endif
{
if (ec->tx_coalesce_usecs)
edma_change_tx_coalesce(ec->tx_coalesce_usecs);
if (ec->rx_coalesce_usecs)
edma_change_rx_coalesce(ec->rx_coalesce_usecs);
return 0;
}
/* edma_set_priv_flags()
* Set EDMA private flags
*/
static int edma_set_priv_flags(struct net_device *netdev, u32 flags)
{
return 0;
}
/* edma_get_priv_flags()
* get edma driver flags
*/
static u32 edma_get_priv_flags(struct net_device *netdev)
{
return 0;
}
/* edma_get_ringparam()
* get ring size
*/
static void edma_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct edma_adapter *adapter = netdev_priv(netdev);
struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
ring->tx_max_pending = edma_cinfo->tx_ring_count;
ring->rx_max_pending = edma_cinfo->rx_ring_count;
}
/* Ethtool operations
*/
static const struct ethtool_ops edma_ethtool_ops = {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
#endif
.get_drvinfo = &edma_get_drvinfo,
.get_link = &ethtool_op_get_link,
.get_msglevel = &edma_get_msglevel,
.nway_reset = &edma_nway_reset,
.get_wol = &edma_get_wol,
.get_link_ksettings = &edma_get_settings,
.set_link_ksettings = &edma_set_settings,
.get_strings = &edma_get_strings,
.get_sset_count = &edma_get_strset_count,
.get_ethtool_stats = &edma_get_ethtool_stats,
.get_coalesce = &edma_get_coalesce,
.set_coalesce = &edma_set_coalesce,
.get_priv_flags = edma_get_priv_flags,
.set_priv_flags = edma_set_priv_flags,
.get_ringparam = edma_get_ringparam,
};
/* edma_set_ethtool_ops
* Set ethtool operations
*/
void edma_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &edma_ethtool_ops;
}

View file

@ -1,389 +0,0 @@
/*
* Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _ESS_EDMA_H_
#define _ESS_EDMA_H_
#include <linux/types.h>
struct edma_adapter;
struct edma_hw;
/* register definition */
#define EDMA_REG_MAS_CTRL 0x0
#define EDMA_REG_TIMEOUT_CTRL 0x004
#define EDMA_REG_DBG0 0x008
#define EDMA_REG_DBG1 0x00C
#define EDMA_REG_SW_CTRL0 0x100
#define EDMA_REG_SW_CTRL1 0x104
/* Interrupt Status Register */
#define EDMA_REG_RX_ISR 0x200
#define EDMA_REG_TX_ISR 0x208
#define EDMA_REG_MISC_ISR 0x210
#define EDMA_REG_WOL_ISR 0x218
#define EDMA_MISC_ISR_RX_URG_Q(x) (1 << x)
#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100
#define EDMA_MISC_ISR_AXIR_ERR 0x00000200
#define EDMA_MISC_ISR_TXF_DEAD 0x00000400
#define EDMA_MISC_ISR_AXIW_ERR 0x00000800
#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000
#define EDMA_WOL_ISR 0x00000001
/* Interrupt Mask Register */
#define EDMA_REG_MISC_IMR 0x214
#define EDMA_REG_WOL_IMR 0x218
#define EDMA_RX_IMR_NORMAL_MASK 0x1
#define EDMA_TX_IMR_NORMAL_MASK 0x1
#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF
#define EDMA_WOL_IMR_NORMAL_MASK 0x1
/* Edma receive consumer index */
#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
/* Edma transmit consumer index */
#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
/* IRQ Moderator Initial Timer Register */
#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280
#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF
#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0
#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16
/* Interrupt Control Register */
#define EDMA_REG_INTR_CTRL 0x284
#define EDMA_INTR_CLR_TYP_SHIFT 0
#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1
#define EDMA_INTR_CLEAR_TYPE_W1 0
#define EDMA_INTR_CLEAR_TYPE_R 1
/* RX Interrupt Mask Register */
#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
/* TX Interrupt mask register */
#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
/* Load Ptr Register
* Software sets this bit after the initialization of the head and tail
*/
#define EDMA_REG_TX_SRAM_PART 0x400
#define EDMA_LOAD_PTR_SHIFT 16
/* TXQ Control Register */
#define EDMA_REG_TXQ_CTRL 0x404
#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10
#define EDMA_TXQ_CTRL_TXQ_EN 0x20
#define EDMA_TXQ_CTRL_ENH_MODE 0x40
#define EDMA_TXQ_CTRL_LS_8023_EN 0x80
#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100
#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200
#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF
#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF
#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0
#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16
#define EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
#define EDMA_TXF_WATER_MARK_MASK 0x0FFF
#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0
#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16
#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000
/* WRR Control Register */
#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c
#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410
#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414
#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418
/* Weight round robin(WRR), it takes queue as input, and computes
* starting bits where we need to write the weight for a particular
* queue
*/
#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20)
/* Tx Descriptor Control Register */
#define EDMA_REG_TPD_RING_SIZE 0x41C
#define EDMA_TPD_RING_SIZE_SHIFT 0
#define EDMA_TPD_RING_SIZE_MASK 0xFFFF
/* Transmit descriptor base address */
#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
/* TPD Index Register */
#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF
#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000
#define EDMA_TPD_PROD_IDX_MASK 0xFFFF
#define EDMA_TPD_CONS_IDX_MASK 0xFFFF
#define EDMA_TPD_PROD_IDX_SHIFT 0
#define EDMA_TPD_CONS_IDX_SHIFT 16
/* TX Virtual Queue Mapping Control Register */
#define EDMA_REG_VQ_CTRL0 0x4A0
#define EDMA_REG_VQ_CTRL1 0x4A4
/* Virtual QID shift, it takes queue as input, and computes
* Virtual QID position in virtual qid control register
*/
#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24)
/* Virtual Queue Default Value */
#define EDMA_VQ_REG_VALUE 0x240240
/* Tx side Port Interface Control Register */
#define EDMA_REG_PORT_CTRL 0x4A8
#define EDMA_PAD_EN_SHIFT 15
/* Tx side VLAN Configuration Register */
#define EDMA_REG_VLAN_CFG 0x4AC
#define EDMA_TX_CVLAN 16
#define EDMA_TX_INS_CVLAN 17
#define EDMA_TX_CVLAN_TAG_SHIFT 0
#define EDMA_TX_SVLAN 14
#define EDMA_TX_INS_SVLAN 15
#define EDMA_TX_SVLAN_TAG_SHIFT 16
/* Tx Queue Packet Statistic Register */
#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF
/* Tx Queue Byte Statistic Register */
#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
/* Load Balance Based Ring Offset Register */
#define EDMA_REG_LB_RING 0x800
#define EDMA_LB_RING_ENTRY_MASK 0xff
#define EDMA_LB_RING_ID_MASK 0x7
#define EDMA_LB_RING_PROFILE_ID_MASK 0x3
#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8
#define EDMA_LB_RING_ID_OFFSET 0
#define EDMA_LB_RING_PROFILE_ID_OFFSET 3
#define EDMA_LB_REG_VALUE 0x6040200
/* Load Balance Priority Mapping Register */
#define EDMA_REG_LB_PRI_START 0x804
#define EDMA_REG_LB_PRI_END 0x810
#define EDMA_LB_PRI_REG_INC 4
#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4
#define EDMA_LB_PRI_ENTRY_MASK 0xf
/* RSS Priority Mapping Register */
#define EDMA_REG_RSS_PRI 0x820
#define EDMA_RSS_PRI_ENTRY_MASK 0xf
#define EDMA_RSS_RING_ID_MASK 0x7
#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4
/* RSS Indirection Register */
#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
#define EDMA_NUM_IDT 16
#define EDMA_RSS_IDT_VALUE 0x64206420
/* Default RSS Ring Register */
#define EDMA_REG_DEF_RSS 0x890
#define EDMA_DEF_RSS_MASK 0x7
/* RSS Hash Function Type Register */
#define EDMA_REG_RSS_TYPE 0x894
#define EDMA_RSS_TYPE_NONE 0x01
#define EDMA_RSS_TYPE_IPV4TCP 0x02
#define EDMA_RSS_TYPE_IPV6_TCP 0x04
#define EDMA_RSS_TYPE_IPV4_UDP 0x08
#define EDMA_RSS_TYPE_IPV6UDP 0x10
#define EDMA_RSS_TYPE_IPV4 0x20
#define EDMA_RSS_TYPE_IPV6 0x40
#define EDMA_RSS_HASH_MODE_MASK 0x7f
#define EDMA_REG_RSS_HASH_VALUE 0x8C0
#define EDMA_REG_RSS_TYPE_RESULT 0x8C4
#define EDMA_HASH_TYPE_START 0
#define EDMA_HASH_TYPE_END 5
#define EDMA_HASH_TYPE_SHIFT 12
#define EDMA_RFS_FLOW_ENTRIES 1024
#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1)
#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128
/* RFD Base Address Register */
#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
/* RFD Index Register */
#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2))
#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF
#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000
#define EDMA_RFD_PROD_IDX_MASK 0xFFF
#define EDMA_RFD_CONS_IDX_MASK 0xFFF
#define EDMA_RFD_PROD_IDX_SHIFT 0
#define EDMA_RFD_CONS_IDX_SHIFT 16
/* Rx Descriptor Control Register */
#define EDMA_REG_RX_DESC0 0xA10
#define EDMA_RFD_RING_SIZE_MASK 0xFFF
#define EDMA_RX_BUF_SIZE_MASK 0xFFFF
#define EDMA_RFD_RING_SIZE_SHIFT 0
#define EDMA_RX_BUF_SIZE_SHIFT 16
#define EDMA_REG_RX_DESC1 0xA14
#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F
#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F
#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF
#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0
#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8
#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16
/* RXQ Control Register */
#define EDMA_REG_RXQ_CTRL 0xA18
#define EDMA_FIFO_THRESH_TYPE_SHIF 0
#define EDMA_FIFO_THRESH_128_BYTE 0x0
#define EDMA_FIFO_THRESH_64_BYTE 0x1
#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002
#define EDMA_RXQ_CTRL_EN 0x0000FF00
/* AXI Burst Size Config */
#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0
/* Rx Statistics Register */
#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
/* WoL Pattern Length Register */
#define EDMA_REG_WOL_PATTERN_LEN0 0xC00
#define EDMA_WOL_PT_LEN_MASK 0xFF
#define EDMA_WOL_PT0_LEN_SHIFT 0
#define EDMA_WOL_PT1_LEN_SHIFT 8
#define EDMA_WOL_PT2_LEN_SHIFT 16
#define EDMA_WOL_PT3_LEN_SHIFT 24
#define EDMA_REG_WOL_PATTERN_LEN1 0xC04
#define EDMA_WOL_PT4_LEN_SHIFT 0
#define EDMA_WOL_PT5_LEN_SHIFT 8
#define EDMA_WOL_PT6_LEN_SHIFT 16
/* WoL Control Register */
#define EDMA_REG_WOL_CTRL 0xC08
#define EDMA_WOL_WK_EN 0x00000001
#define EDMA_WOL_MG_EN 0x00000002
#define EDMA_WOL_PT0_EN 0x00000004
#define EDMA_WOL_PT1_EN 0x00000008
#define EDMA_WOL_PT2_EN 0x00000010
#define EDMA_WOL_PT3_EN 0x00000020
#define EDMA_WOL_PT4_EN 0x00000040
#define EDMA_WOL_PT5_EN 0x00000080
#define EDMA_WOL_PT6_EN 0x00000100
/* MAC Control Register */
#define EDMA_REG_MAC_CTRL0 0xC20
#define EDMA_REG_MAC_CTRL1 0xC24
/* WoL Pattern Register */
#define EDMA_REG_WOL_PATTERN_START 0x5000
#define EDMA_PATTERN_PART_REG_OFFSET 0x40
/* TX descriptor fields */
#define EDMA_TPD_HDR_SHIFT 0
#define EDMA_TPD_PPPOE_EN 0x00000100
#define EDMA_TPD_IP_CSUM_EN 0x00000200
#define EDMA_TPD_TCP_CSUM_EN 0x0000400
#define EDMA_TPD_UDP_CSUM_EN 0x00000800
#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00
#define EDMA_TPD_LSO_EN 0x00001000
#define EDMA_TPD_LSO_V2_EN 0x00002000
#define EDMA_TPD_IPV4_EN 0x00010000
#define EDMA_TPD_MSS_MASK 0x1FFF
#define EDMA_TPD_MSS_SHIFT 18
#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18
/* RRD descriptor fields */
#define EDMA_RRD_NUM_RFD_MASK 0x000F
#define EDMA_RRD_SVLAN 0x8000
#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF;
#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF
#define EDMA_RRD_CSUM_FAIL_MASK 0xC000
#define EDMA_RRD_CVLAN 0x0001
#define EDMA_RRD_DESC_VALID 0x8000
#define EDMA_RRD_PRIORITY_SHIFT 4
#define EDMA_RRD_PRIORITY_MASK 0x7
#define EDMA_RRD_PORT_TYPE_SHIFT 7
#define EDMA_RRD_PORT_TYPE_MASK 0x1F
#define ESS_RGMII_CTRL 0x0004
/* Port status registers */
#define ESS_PORT0_STATUS 0x007C
#define ESS_PORT1_STATUS 0x0080
#define ESS_PORT2_STATUS 0x0084
#define ESS_PORT3_STATUS 0x0088
#define ESS_PORT4_STATUS 0x008C
#define ESS_PORT5_STATUS 0x0090
#define ESS_PORT_STATUS_HDX_FLOW_CTL 0x80
#define ESS_PORT_STATUS_DUPLEX_MODE 0x40
#define ESS_PORT_STATUS_RX_FLOW_EN 0x20
#define ESS_PORT_STATUS_TX_FLOW_EN 0x10
#define ESS_PORT_STATUS_RX_MAC_EN 0x08
#define ESS_PORT_STATUS_TX_MAC_EN 0x04
#define ESS_PORT_STATUS_SPEED_INV 0x03
#define ESS_PORT_STATUS_SPEED_1000 0x02
#define ESS_PORT_STATUS_SPEED_100 0x01
#define ESS_PORT_STATUS_SPEED_10 0x00
#define ESS_PORT_1G_FDX (ESS_PORT_STATUS_DUPLEX_MODE | ESS_PORT_STATUS_RX_FLOW_EN | \
ESS_PORT_STATUS_TX_FLOW_EN | ESS_PORT_STATUS_RX_MAC_EN | \
ESS_PORT_STATUS_TX_MAC_EN | ESS_PORT_STATUS_SPEED_1000)
#define PHY_STATUS_REG 0x11
#define PHY_STATUS_SPEED 0xC000
#define PHY_STATUS_SPEED_SHIFT 14
#define PHY_STATUS_DUPLEX 0x2000
#define PHY_STATUS_DUPLEX_SHIFT 13
#define PHY_STATUS_SPEED_DUPLEX_RESOLVED 0x0800
#define PHY_STATUS_CARRIER 0x0400
#define PHY_STATUS_CARRIER_SHIFT 10
/* Port lookup control registers */
#define ESS_PORT0_LOOKUP_CTRL 0x0660
#define ESS_PORT1_LOOKUP_CTRL 0x066C
#define ESS_PORT2_LOOKUP_CTRL 0x0678
#define ESS_PORT3_LOOKUP_CTRL 0x0684
#define ESS_PORT4_LOOKUP_CTRL 0x0690
#define ESS_PORT5_LOOKUP_CTRL 0x069C
#define ESS_PORT0_HEADER_CTRL 0x009C
#define ESS_PORTS_ALL 0x3f
#define ESS_FWD_CTRL1 0x0624
#define ESS_FWD_CTRL1_UC_FLOOD BITS(0, 7)
#define ESS_FWD_CTRL1_UC_FLOOD_S 0
#define ESS_FWD_CTRL1_MC_FLOOD BITS(8, 7)
#define ESS_FWD_CTRL1_MC_FLOOD_S 8
#define ESS_FWD_CTRL1_BC_FLOOD BITS(16, 7)
#define ESS_FWD_CTRL1_BC_FLOOD_S 16
#define ESS_FWD_CTRL1_IGMP BITS(24, 7)
#define ESS_FWD_CTRL1_IGMP_S 24
#endif /* _ESS_EDMA_H_ */

View file

@ -1,865 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2020 Sartura Ltd.
*
* Author: Robert Marko <robert.marko@sartura.hr>
*
* Qualcomm QCA8072 and QCA8075 PHY driver
*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/bitfield.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
#include <linux/ethtool_netlink.h>
#endif
#include <linux/gpio.h>
#include <linux/sfp.h>
#include <dt-bindings/net/qcom-qca807x.h>
#define PHY_ID_QCA8072 0x004dd0b2
#define PHY_ID_QCA8075 0x004dd0b1
#define PHY_ID_QCA807X_PSGMII 0x06820805
/* Downshift */
#define QCA807X_SMARTSPEED_EN BIT(5)
#define QCA807X_SMARTSPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
#define QCA807X_SMARTSPEED_RETRY_LIMIT_DEFAULT 5
#define QCA807X_SMARTSPEED_RETRY_LIMIT_MIN 2
#define QCA807X_SMARTSPEED_RETRY_LIMIT_MAX 9
/* Cable diagnostic test (CDT) */
#define QCA807X_CDT 0x16
#define QCA807X_CDT_ENABLE BIT(15)
#define QCA807X_CDT_ENABLE_INTER_PAIR_SHORT BIT(13)
#define QCA807X_CDT_STATUS BIT(11)
#define QCA807X_CDT_MMD3_STATUS 0x8064
#define QCA807X_CDT_MDI0_STATUS_MASK GENMASK(15, 12)
#define QCA807X_CDT_MDI1_STATUS_MASK GENMASK(11, 8)
#define QCA807X_CDT_MDI2_STATUS_MASK GENMASK(7, 4)
#define QCA807X_CDT_MDI3_STATUS_MASK GENMASK(3, 0)
#define QCA807X_CDT_RESULTS_INVALID 0x0
#define QCA807X_CDT_RESULTS_OK 0x1
#define QCA807X_CDT_RESULTS_OPEN 0x2
#define QCA807X_CDT_RESULTS_SAME_SHORT 0x3
#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OK 0x4
#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OK 0x8
#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OK 0xc
#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OPEN 0x6
#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OPEN 0xa
#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OPEN 0xe
#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_SHORT 0x7
#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_SHORT 0xb
#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_SHORT 0xf
#define QCA807X_CDT_RESULTS_BUSY 0x9
#define QCA807X_CDT_MMD3_MDI0_LENGTH 0x8065
#define QCA807X_CDT_MMD3_MDI1_LENGTH 0x8066
#define QCA807X_CDT_MMD3_MDI2_LENGTH 0x8067
#define QCA807X_CDT_MMD3_MDI3_LENGTH 0x8068
#define QCA807X_CDT_SAME_SHORT_LENGTH_MASK GENMASK(15, 8)
#define QCA807X_CDT_CROSS_SHORT_LENGTH_MASK GENMASK(7, 0)
#define QCA807X_CHIP_CONFIGURATION 0x1f
#define QCA807X_BT_BX_REG_SEL BIT(15)
#define QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK GENMASK(3, 0)
#define QCA807X_CHIP_CONFIGURATION_MODE_QSGMII_SGMII 4
#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER 3
#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_ALL_COPPER 0
#define QCA807X_MEDIA_SELECT_STATUS 0x1a
#define QCA807X_MEDIA_DETECTED_COPPER BIT(5)
#define QCA807X_MEDIA_DETECTED_1000_BASE_X BIT(4)
#define QCA807X_MEDIA_DETECTED_100_BASE_FX BIT(3)
#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION 0x807e
#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN BIT(0)
#define QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH 0x801a
#define QCA807X_CONTROL_DAC_MASK GENMASK(2, 0)
#define QCA807X_MMD7_LED_100N_1 0x8074
#define QCA807X_MMD7_LED_100N_2 0x8075
#define QCA807X_MMD7_LED_1000N_1 0x8076
#define QCA807X_MMD7_LED_1000N_2 0x8077
#define QCA807X_LED_TXACT_BLK_EN_2 BIT(10)
#define QCA807X_LED_RXACT_BLK_EN_2 BIT(9)
#define QCA807X_LED_GT_ON_EN_2 BIT(6)
#define QCA807X_LED_HT_ON_EN_2 BIT(5)
#define QCA807X_LED_BT_ON_EN_2 BIT(4)
#define QCA807X_GPIO_FORCE_EN BIT(15)
#define QCA807X_GPIO_FORCE_MODE_MASK GENMASK(14, 13)
#define QCA807X_INTR_ENABLE 0x12
#define QCA807X_INTR_STATUS 0x13
#define QCA807X_INTR_ENABLE_AUTONEG_ERR BIT(15)
#define QCA807X_INTR_ENABLE_SPEED_CHANGED BIT(14)
#define QCA807X_INTR_ENABLE_DUPLEX_CHANGED BIT(13)
#define QCA807X_INTR_ENABLE_LINK_FAIL BIT(11)
#define QCA807X_INTR_ENABLE_LINK_SUCCESS BIT(10)
#define QCA807X_FUNCTION_CONTROL 0x10
#define QCA807X_FC_MDI_CROSSOVER_MODE_MASK GENMASK(6, 5)
#define QCA807X_FC_MDI_CROSSOVER_AUTO 3
#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDIX 1
#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDI 0
#define QCA807X_PHY_SPECIFIC_STATUS 0x11
#define QCA807X_SS_SPEED_AND_DUPLEX_RESOLVED BIT(11)
#define QCA807X_SS_SPEED_MASK GENMASK(15, 14)
#define QCA807X_SS_SPEED_1000 2
#define QCA807X_SS_SPEED_100 1
#define QCA807X_SS_SPEED_10 0
#define QCA807X_SS_DUPLEX BIT(13)
#define QCA807X_SS_MDIX BIT(6)
/* PSGMII PHY specific */
#define PSGMII_QSGMII_DRIVE_CONTROL_1 0xb
#define PSGMII_QSGMII_TX_DRIVER_MASK GENMASK(7, 4)
#define PSGMII_MODE_CTRL 0x6d
#define PSGMII_MODE_CTRL_AZ_WORKAROUND_MASK GENMASK(3, 0)
#define PSGMII_MMD3_SERDES_CONTROL 0x805a
struct qca807x_gpio_priv {
struct phy_device *phy;
};
static int qca807x_get_downshift(struct phy_device *phydev, u8 *data)
{
int val, cnt, enable;
val = phy_read(phydev, MII_NWAYTEST);
if (val < 0)
return val;
enable = FIELD_GET(QCA807X_SMARTSPEED_EN, val);
cnt = FIELD_GET(QCA807X_SMARTSPEED_RETRY_LIMIT_MASK, val) + 2;
*data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
return 0;
}
static int qca807x_set_downshift(struct phy_device *phydev, u8 cnt)
{
int ret, val;
if (cnt > QCA807X_SMARTSPEED_RETRY_LIMIT_MAX ||
(cnt < QCA807X_SMARTSPEED_RETRY_LIMIT_MIN && cnt != DOWNSHIFT_DEV_DISABLE))
return -EINVAL;
if (!cnt) {
ret = phy_clear_bits(phydev, MII_NWAYTEST, QCA807X_SMARTSPEED_EN);
} else {
val = QCA807X_SMARTSPEED_EN;
val |= FIELD_PREP(QCA807X_SMARTSPEED_RETRY_LIMIT_MASK, cnt - 2);
phy_modify(phydev, MII_NWAYTEST,
QCA807X_SMARTSPEED_EN |
QCA807X_SMARTSPEED_RETRY_LIMIT_MASK,
val);
}
ret = genphy_soft_reset(phydev);
return ret;
}
static int qca807x_get_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, void *data)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
return qca807x_get_downshift(phydev, data);
default:
return -EOPNOTSUPP;
}
}
static int qca807x_set_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, const void *data)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
return qca807x_set_downshift(phydev, *(const u8 *)data);
default:
return -EOPNOTSUPP;
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
static bool qca807x_distance_valid(int result)
{
switch (result) {
case QCA807X_CDT_RESULTS_OPEN:
case QCA807X_CDT_RESULTS_SAME_SHORT:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OK:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OK:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OK:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
return true;
}
return false;
}
static int qca807x_report_length(struct phy_device *phydev,
int pair, int result)
{
int length;
int ret;
ret = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA807X_CDT_MMD3_MDI0_LENGTH + pair);
if (ret < 0)
return ret;
switch (result) {
case ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT:
length = (FIELD_GET(QCA807X_CDT_SAME_SHORT_LENGTH_MASK, ret) * 800) / 10;
break;
case ETHTOOL_A_CABLE_RESULT_CODE_OPEN:
case ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT:
length = (FIELD_GET(QCA807X_CDT_CROSS_SHORT_LENGTH_MASK, ret) * 800) / 10;
break;
}
ethnl_cable_test_fault_length(phydev, pair, length);
return 0;
}
static int qca807x_cable_test_report_trans(int result)
{
switch (result) {
case QCA807X_CDT_RESULTS_OK:
return ETHTOOL_A_CABLE_RESULT_CODE_OK;
case QCA807X_CDT_RESULTS_OPEN:
return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
case QCA807X_CDT_RESULTS_SAME_SHORT:
return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OK:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OK:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OK:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
default:
return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
}
}
static int qca807x_cable_test_report(struct phy_device *phydev)
{
int pair0, pair1, pair2, pair3;
int ret;
ret = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA807X_CDT_MMD3_STATUS);
if (ret < 0)
return ret;
pair0 = FIELD_GET(QCA807X_CDT_MDI0_STATUS_MASK, ret);
pair1 = FIELD_GET(QCA807X_CDT_MDI1_STATUS_MASK, ret);
pair2 = FIELD_GET(QCA807X_CDT_MDI2_STATUS_MASK, ret);
pair3 = FIELD_GET(QCA807X_CDT_MDI3_STATUS_MASK, ret);
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
qca807x_cable_test_report_trans(pair0));
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
qca807x_cable_test_report_trans(pair1));
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
qca807x_cable_test_report_trans(pair2));
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
qca807x_cable_test_report_trans(pair3));
if (qca807x_distance_valid(pair0))
qca807x_report_length(phydev, 0, qca807x_cable_test_report_trans(pair0));
if (qca807x_distance_valid(pair1))
qca807x_report_length(phydev, 1, qca807x_cable_test_report_trans(pair1));
if (qca807x_distance_valid(pair2))
qca807x_report_length(phydev, 2, qca807x_cable_test_report_trans(pair2));
if (qca807x_distance_valid(pair3))
qca807x_report_length(phydev, 3, qca807x_cable_test_report_trans(pair3));
return 0;
}
static int qca807x_cable_test_get_status(struct phy_device *phydev,
bool *finished)
{
int val;
*finished = false;
val = phy_read(phydev, QCA807X_CDT);
if (!((val & QCA807X_CDT_ENABLE) && (val & QCA807X_CDT_STATUS))) {
*finished = true;
return qca807x_cable_test_report(phydev);
}
return 0;
}
static int qca807x_cable_test_start(struct phy_device *phydev)
{
int val, ret;
val = phy_read(phydev, QCA807X_CDT);
/* Enable inter-pair short check as well */
val &= ~QCA807X_CDT_ENABLE_INTER_PAIR_SHORT;
val |= QCA807X_CDT_ENABLE;
ret = phy_write(phydev, QCA807X_CDT, val);
return ret;
}
#endif
#ifdef CONFIG_GPIOLIB
static int qca807x_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0)
return GPIO_LINE_DIRECTION_OUT;
#else
return GPIOF_DIR_OUT;
#endif
}
static int qca807x_gpio_get_reg(unsigned int offset)
{
return QCA807X_MMD7_LED_100N_2 + (offset % 2) * 2;
}
static int qca807x_gpio_get(struct gpio_chip *gc, unsigned int offset)
{
struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
int val;
val = phy_read_mmd(priv->phy, MDIO_MMD_AN, qca807x_gpio_get_reg(offset));
return FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val);
}
static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
int val;
val = phy_read_mmd(priv->phy, MDIO_MMD_AN, qca807x_gpio_get_reg(offset));
val &= ~QCA807X_GPIO_FORCE_MODE_MASK;
val |= QCA807X_GPIO_FORCE_EN;
val |= FIELD_PREP(QCA807X_GPIO_FORCE_MODE_MASK, value);
phy_write_mmd(priv->phy, MDIO_MMD_AN, qca807x_gpio_get_reg(offset), val);
}
static int qca807x_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int value)
{
qca807x_gpio_set(gc, offset, value);
return 0;
}
static int qca807x_gpio(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct qca807x_gpio_priv *priv;
struct gpio_chip *gc;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->phy = phydev;
gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
if (!gc)
return -ENOMEM;
gc->label = dev_name(dev);
gc->base = -1;
gc->ngpio = 2;
gc->parent = dev;
gc->owner = THIS_MODULE;
gc->can_sleep = true;
gc->get_direction = qca807x_gpio_get_direction;
gc->direction_output = qca807x_gpio_dir_out;
gc->get = qca807x_gpio_get;
gc->set = qca807x_gpio_set;
return devm_gpiochip_add_data(dev, gc, priv);
}
#endif
static int qca807x_read_copper_status(struct phy_device *phydev, bool combo_port)
{
int ss, err, page, old_link = phydev->link;
/* Only combo port has dual pages */
if (combo_port) {
/* Check whether copper page is set and set if needed */
page = phy_read(phydev, QCA807X_CHIP_CONFIGURATION);
if (!(page & QCA807X_BT_BX_REG_SEL)) {
page |= QCA807X_BT_BX_REG_SEL;
phy_write(phydev, QCA807X_CHIP_CONFIGURATION, page);
}
}
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
/* why bother the PHY if nothing can have changed */
if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
return 0;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
err = genphy_read_lpa(phydev);
if (err < 0)
return err;
/* Read the QCA807x PHY-Specific Status register copper page,
* which indicates the speed and duplex that the PHY is actually
* using, irrespective of whether we are in autoneg mode or not.
*/
ss = phy_read(phydev, QCA807X_PHY_SPECIFIC_STATUS);
if (ss < 0)
return ss;
if (ss & QCA807X_SS_SPEED_AND_DUPLEX_RESOLVED) {
int sfc;
sfc = phy_read(phydev, QCA807X_FUNCTION_CONTROL);
if (sfc < 0)
return sfc;
switch (FIELD_GET(QCA807X_SS_SPEED_MASK, ss)) {
case QCA807X_SS_SPEED_10:
phydev->speed = SPEED_10;
break;
case QCA807X_SS_SPEED_100:
phydev->speed = SPEED_100;
break;
case QCA807X_SS_SPEED_1000:
phydev->speed = SPEED_1000;
break;
}
if (ss & QCA807X_SS_DUPLEX)
phydev->duplex = DUPLEX_FULL;
else
phydev->duplex = DUPLEX_HALF;
if (ss & QCA807X_SS_MDIX)
phydev->mdix = ETH_TP_MDI_X;
else
phydev->mdix = ETH_TP_MDI;
switch (FIELD_GET(QCA807X_FC_MDI_CROSSOVER_MODE_MASK, sfc)) {
case QCA807X_FC_MDI_CROSSOVER_MANUAL_MDI:
phydev->mdix_ctrl = ETH_TP_MDI;
break;
case QCA807X_FC_MDI_CROSSOVER_MANUAL_MDIX:
phydev->mdix_ctrl = ETH_TP_MDI_X;
break;
case QCA807X_FC_MDI_CROSSOVER_AUTO:
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
break;
}
}
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
phy_resolve_aneg_pause(phydev);
return 0;
}
static int qca807x_read_fiber_status(struct phy_device *phydev, bool combo_port)
{
int ss, err, page, lpa, old_link = phydev->link;
/* Check whether fiber page is set and set if needed */
page = phy_read(phydev, QCA807X_CHIP_CONFIGURATION);
if (page & QCA807X_BT_BX_REG_SEL) {
page &= ~QCA807X_BT_BX_REG_SEL;
phy_write(phydev, QCA807X_CHIP_CONFIGURATION, page);
}
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
/* why bother the PHY if nothing can have changed */
if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
return 0;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
lpa = phy_read(phydev, MII_LPA);
if (lpa < 0)
return lpa;
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
phydev->lp_advertising, lpa & LPA_LPACK);
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
phydev->lp_advertising, lpa & LPA_1000XFULL);
linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
phydev->lp_advertising, lpa & LPA_1000XPAUSE);
linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydev->lp_advertising,
lpa & LPA_1000XPAUSE_ASYM);
phy_resolve_aneg_linkmode(phydev);
}
/* Read the QCA807x PHY-Specific Status register fiber page,
* which indicates the speed and duplex that the PHY is actually
* using, irrespective of whether we are in autoneg mode or not.
*/
ss = phy_read(phydev, QCA807X_PHY_SPECIFIC_STATUS);
if (ss < 0)
return ss;
if (ss & QCA807X_SS_SPEED_AND_DUPLEX_RESOLVED) {
switch (FIELD_GET(QCA807X_SS_SPEED_MASK, ss)) {
case QCA807X_SS_SPEED_100:
phydev->speed = SPEED_100;
break;
case QCA807X_SS_SPEED_1000:
phydev->speed = SPEED_1000;
break;
}
if (ss & QCA807X_SS_DUPLEX)
phydev->duplex = DUPLEX_FULL;
else
phydev->duplex = DUPLEX_HALF;
}
return 0;
}
static int qca807x_read_status(struct phy_device *phydev)
{
int val;
/* Check for Combo port */
if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
/* Check for fiber mode first */
if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
/* Check for actual detected media */
val = phy_read(phydev, QCA807X_MEDIA_SELECT_STATUS);
if (val & QCA807X_MEDIA_DETECTED_COPPER) {
qca807x_read_copper_status(phydev, true);
} else if ((val & QCA807X_MEDIA_DETECTED_1000_BASE_X) ||
(val & QCA807X_MEDIA_DETECTED_100_BASE_FX)) {
qca807x_read_fiber_status(phydev, true);
}
} else {
qca807x_read_copper_status(phydev, true);
}
} else {
qca807x_read_copper_status(phydev, false);
}
return 0;
}
static int qca807x_config_intr(struct phy_device *phydev)
{
int ret, val;
val = phy_read(phydev, QCA807X_INTR_ENABLE);
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
/* Check for combo port as it has fewer interrupts */
if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
val |= QCA807X_INTR_ENABLE_SPEED_CHANGED;
val |= QCA807X_INTR_ENABLE_LINK_FAIL;
val |= QCA807X_INTR_ENABLE_LINK_SUCCESS;
} else {
val |= QCA807X_INTR_ENABLE_AUTONEG_ERR;
val |= QCA807X_INTR_ENABLE_SPEED_CHANGED;
val |= QCA807X_INTR_ENABLE_DUPLEX_CHANGED;
val |= QCA807X_INTR_ENABLE_LINK_FAIL;
val |= QCA807X_INTR_ENABLE_LINK_SUCCESS;
}
ret = phy_write(phydev, QCA807X_INTR_ENABLE, val);
} else {
ret = phy_write(phydev, QCA807X_INTR_ENABLE, 0);
}
return ret;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)
static int qca807x_ack_intr(struct phy_device *phydev)
{
int ret;
ret = phy_read(phydev, QCA807X_INTR_STATUS);
return (ret < 0) ? ret : 0;
}
#else
static irqreturn_t qca807x_handle_interrupt(struct phy_device *phydev)
{
int irq_status, int_enabled;
irq_status = phy_read(phydev, QCA807X_INTR_STATUS);
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
}
/* Read the current enabled interrupts */
int_enabled = phy_read(phydev, QCA807X_INTR_ENABLE);
if (int_enabled < 0) {
phy_error(phydev);
return IRQ_NONE;
}
/* See if this was one of our enabled interrupts */
if (!(irq_status & int_enabled))
return IRQ_NONE;
phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
#endif
static int qca807x_led_config(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
bool led_config = false;
int val;
val = phy_read_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_LED_1000N_1);
if (val < 0)
return val;
if (of_property_read_bool(node, "qcom,single-led-1000")) {
val |= QCA807X_LED_TXACT_BLK_EN_2;
val |= QCA807X_LED_RXACT_BLK_EN_2;
val |= QCA807X_LED_GT_ON_EN_2;
led_config = true;
}
if (of_property_read_bool(node, "qcom,single-led-100")) {
val |= QCA807X_LED_HT_ON_EN_2;
led_config = true;
}
if (of_property_read_bool(node, "qcom,single-led-10")) {
val |= QCA807X_LED_BT_ON_EN_2;
led_config = true;
}
if (led_config)
return phy_write_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_LED_1000N_1, val);
else
return 0;
}
static const struct sfp_upstream_ops qca807x_sfp_ops = {
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
};
static int qca807x_config(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
int control_dac, ret = 0;
u32 of_control_dac;
/* Check for Combo port */
if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
int fiber_mode_autodect;
int psgmii_serdes;
int chip_config;
if (of_property_read_bool(node, "qcom,fiber-enable")) {
/* Enable fiber mode autodection (1000Base-X or 100Base-FX) */
fiber_mode_autodect = phy_read_mmd(phydev, MDIO_MMD_AN,
QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION);
fiber_mode_autodect |= QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN;
phy_write_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION,
fiber_mode_autodect);
/* Enable 4 copper + combo port mode */
chip_config = phy_read(phydev, QCA807X_CHIP_CONFIGURATION);
chip_config &= ~QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK;
chip_config |= FIELD_PREP(QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK,
QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER);
phy_write(phydev, QCA807X_CHIP_CONFIGURATION, chip_config);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->advertising);
}
/* Prevent PSGMII going into hibernation via PSGMII self test */
psgmii_serdes = phy_read_mmd(phydev, MDIO_MMD_PCS, PSGMII_MMD3_SERDES_CONTROL);
psgmii_serdes &= ~BIT(1);
ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
PSGMII_MMD3_SERDES_CONTROL,
psgmii_serdes);
}
if (!of_property_read_u32(node, "qcom,control-dac", &of_control_dac)) {
control_dac = phy_read_mmd(phydev, MDIO_MMD_AN,
QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH);
control_dac &= ~QCA807X_CONTROL_DAC_MASK;
control_dac |= FIELD_PREP(QCA807X_CONTROL_DAC_MASK, of_control_dac);
ret = phy_write_mmd(phydev, MDIO_MMD_AN,
QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH,
control_dac);
}
/* Optionally configure LED-s */
if (IS_ENABLED(CONFIG_GPIOLIB)) {
/* Check whether PHY-s pins are used as GPIO-s */
if (!of_property_read_bool(node, "gpio-controller"))
ret = qca807x_led_config(phydev);
} else {
ret = qca807x_led_config(phydev);
}
return ret;
}
static int qca807x_probe(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
int ret = 0;
if (IS_ENABLED(CONFIG_GPIOLIB)) {
/* Do not register a GPIO controller unless flagged for it */
if (of_property_read_bool(node, "gpio-controller"))
ret = qca807x_gpio(phydev);
}
/* Attach SFP bus on combo port*/
if (of_property_read_bool(node, "qcom,fiber-enable")) {
if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION))
ret = phy_sfp_probe(phydev, &qca807x_sfp_ops);
}
return ret;
}
static int qca807x_psgmii_config(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
int psgmii_az, tx_amp, ret = 0;
u32 tx_driver_strength;
/* Workaround to enable AZ transmitting ability */
if (of_property_read_bool(node, "qcom,psgmii-az")) {
psgmii_az = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PSGMII_MODE_CTRL);
psgmii_az &= ~PSGMII_MODE_CTRL_AZ_WORKAROUND_MASK;
psgmii_az |= FIELD_PREP(PSGMII_MODE_CTRL_AZ_WORKAROUND_MASK, 0xc);
ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PSGMII_MODE_CTRL, psgmii_az);
psgmii_az = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PSGMII_MODE_CTRL);
}
/* PSGMII/QSGMII TX amp set to DT defined value instead of default 600mV */
if (!of_property_read_u32(node, "qcom,tx-driver-strength", &tx_driver_strength)) {
tx_amp = phy_read(phydev, PSGMII_QSGMII_DRIVE_CONTROL_1);
tx_amp &= ~PSGMII_QSGMII_TX_DRIVER_MASK;
tx_amp |= FIELD_PREP(PSGMII_QSGMII_TX_DRIVER_MASK, tx_driver_strength);
ret = phy_write(phydev, PSGMII_QSGMII_DRIVE_CONTROL_1, tx_amp);
}
return ret;
}
static struct phy_driver qca807x_drivers[] = {
{
PHY_ID_MATCH_EXACT(PHY_ID_QCA8072),
.name = "Qualcomm QCA8072",
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
.flags = PHY_POLL_CABLE_TEST,
#endif
/* PHY_GBIT_FEATURES */
.probe = qca807x_probe,
.config_init = qca807x_config,
.read_status = qca807x_read_status,
.config_intr = qca807x_config_intr,
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)
.ack_interrupt = qca807x_ack_intr,
#else
.handle_interrupt = qca807x_handle_interrupt,
#endif
.soft_reset = genphy_soft_reset,
.get_tunable = qca807x_get_tunable,
.set_tunable = qca807x_set_tunable,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
.cable_test_start = qca807x_cable_test_start,
.cable_test_get_status = qca807x_cable_test_get_status,
#endif
},
{
PHY_ID_MATCH_EXACT(PHY_ID_QCA8075),
.name = "Qualcomm QCA8075",
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
.flags = PHY_POLL_CABLE_TEST,
#endif
/* PHY_GBIT_FEATURES */
.probe = qca807x_probe,
.config_init = qca807x_config,
.read_status = qca807x_read_status,
.config_intr = qca807x_config_intr,
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)
.ack_interrupt = qca807x_ack_intr,
#else
.handle_interrupt = qca807x_handle_interrupt,
#endif
.soft_reset = genphy_soft_reset,
.get_tunable = qca807x_get_tunable,
.set_tunable = qca807x_set_tunable,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
.cable_test_start = qca807x_cable_test_start,
.cable_test_get_status = qca807x_cable_test_get_status,
#endif
},
{
PHY_ID_MATCH_EXACT(PHY_ID_QCA807X_PSGMII),
.name = "Qualcomm QCA807x PSGMII",
.probe = qca807x_psgmii_config,
},
};
module_phy_driver(qca807x_drivers);
static struct mdio_device_id __maybe_unused qca807x_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8072) },
{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8075) },
{ PHY_ID_MATCH_MODEL(PHY_ID_QCA807X_PSGMII) },
{ }
};
MODULE_AUTHOR("Robert Marko");
MODULE_DESCRIPTION("Qualcomm QCA807x PHY driver");
MODULE_DEVICE_TABLE(mdio, qca807x_tbl);
MODULE_LICENSE("GPL");