Merge branch 'wangxun-interrupts'
Jiawen Wu says: ==================== Wangxun interrupt and RxTx support Configure interrupt, setup RxTx ring, support to receive and transmit packets. change log: v3: - Use upper_32_bits() to avoid compile warning. - Remove useless codes. v2: - Andrew Lunn: https://lore.kernel.org/netdev/Y86kDphvyHj21IxK@lunn.ch/ - Add a judgment when allocate dma for descriptor. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>pull/938/head
commit
149e8fb06c
|
|
@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN
|
|||
|
||||
config LIBWX
|
||||
tristate
|
||||
select PAGE_POOL
|
||||
help
|
||||
Common library for Wangxun(R) Ethernet drivers.
|
||||
|
||||
|
|
|
|||
|
|
@ -4,4 +4,4 @@
|
|||
|
||||
obj-$(CONFIG_LIBWX) += libwx.o
|
||||
|
||||
libwx-objs := wx_hw.o
|
||||
libwx-objs := wx_hw.o wx_lib.o
|
||||
|
|
|
|||
|
|
@ -8,13 +8,14 @@
|
|||
#include <linux/pci.h>
|
||||
|
||||
#include "wx_type.h"
|
||||
#include "wx_lib.h"
|
||||
#include "wx_hw.h"
|
||||
|
||||
static void wx_intr_disable(struct wx *wx, u64 qmask)
|
||||
{
|
||||
u32 mask;
|
||||
|
||||
mask = (qmask & 0xFFFFFFFF);
|
||||
mask = (qmask & U32_MAX);
|
||||
if (mask)
|
||||
wr32(wx, WX_PX_IMS(0), mask);
|
||||
|
||||
|
|
@ -25,6 +26,45 @@ static void wx_intr_disable(struct wx *wx, u64 qmask)
|
|||
}
|
||||
}
|
||||
|
||||
void wx_intr_enable(struct wx *wx, u64 qmask)
|
||||
{
|
||||
u32 mask;
|
||||
|
||||
mask = (qmask & U32_MAX);
|
||||
if (mask)
|
||||
wr32(wx, WX_PX_IMC(0), mask);
|
||||
if (wx->mac.type == wx_mac_sp) {
|
||||
mask = (qmask >> 32);
|
||||
if (mask)
|
||||
wr32(wx, WX_PX_IMC(1), mask);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(wx_intr_enable);
|
||||
|
||||
/**
|
||||
* wx_irq_disable - Mask off interrupt generation on the NIC
|
||||
* @wx: board private structure
|
||||
**/
|
||||
void wx_irq_disable(struct wx *wx)
|
||||
{
|
||||
struct pci_dev *pdev = wx->pdev;
|
||||
|
||||
wr32(wx, WX_PX_MISC_IEN, 0);
|
||||
wx_intr_disable(wx, WX_INTR_ALL);
|
||||
|
||||
if (pdev->msix_enabled) {
|
||||
int vector;
|
||||
|
||||
for (vector = 0; vector < wx->num_q_vectors; vector++)
|
||||
synchronize_irq(wx->msix_entries[vector].vector);
|
||||
|
||||
synchronize_irq(wx->msix_entries[vector].vector);
|
||||
} else {
|
||||
synchronize_irq(pdev->irq);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(wx_irq_disable);
|
||||
|
||||
/* cmd_addr is used for some special command:
|
||||
* 1. to be sector address, when implemented erase sector command
|
||||
* 2. to be flash address when implemented read, write flash address
|
||||
|
|
@ -765,6 +805,37 @@ void wx_flush_sw_mac_table(struct wx *wx)
|
|||
}
|
||||
EXPORT_SYMBOL(wx_flush_sw_mac_table);
|
||||
|
||||
static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (is_zero_ether_addr(addr))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < wx->mac.num_rar_entries; i++) {
|
||||
if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
|
||||
if (ether_addr_equal(addr, wx->mac_table[i].addr)) {
|
||||
if (wx->mac_table[i].pools != (1ULL << pool)) {
|
||||
memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
|
||||
wx->mac_table[i].pools |= (1ULL << pool);
|
||||
wx_sync_mac_table(wx);
|
||||
return i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE)
|
||||
continue;
|
||||
wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED |
|
||||
WX_MAC_STATE_IN_USE);
|
||||
memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
|
||||
wx->mac_table[i].pools |= (1ULL << pool);
|
||||
wx_sync_mac_table(wx);
|
||||
return i;
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
|
||||
{
|
||||
u32 i;
|
||||
|
|
@ -789,6 +860,184 @@ static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int wx_available_rars(struct wx *wx)
|
||||
{
|
||||
u32 i, count = 0;
|
||||
|
||||
for (i = 0; i < wx->mac.num_rar_entries; i++) {
|
||||
if (wx->mac_table[i].state == 0)
|
||||
count++;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_write_uc_addr_list - write unicast addresses to RAR table
|
||||
* @netdev: network interface device structure
|
||||
* @pool: index for mac table
|
||||
*
|
||||
* Writes unicast address list to the RAR table.
|
||||
* Returns: -ENOMEM on failure/insufficient address space
|
||||
* 0 on no addresses written
|
||||
* X on writing X addresses to the RAR table
|
||||
**/
|
||||
static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
|
||||
{
|
||||
struct wx *wx = netdev_priv(netdev);
|
||||
int count = 0;
|
||||
|
||||
/* return ENOMEM indicating insufficient memory for addresses */
|
||||
if (netdev_uc_count(netdev) > wx_available_rars(wx))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!netdev_uc_empty(netdev)) {
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
netdev_for_each_uc_addr(ha, netdev) {
|
||||
wx_del_mac_filter(wx, ha->addr, pool);
|
||||
wx_add_mac_filter(wx, ha->addr, pool);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_mta_vector - Determines bit-vector in multicast table to set
|
||||
* @wx: pointer to private structure
|
||||
* @mc_addr: the multicast address
|
||||
*
|
||||
* Extracts the 12 bits, from a multicast address, to determine which
|
||||
* bit-vector to set in the multicast table. The hardware uses 12 bits, from
|
||||
* incoming rx multicast addresses, to determine the bit-vector to check in
|
||||
* the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
|
||||
* by the MO field of the MCSTCTRL. The MO field is set during initialization
|
||||
* to mc_filter_type.
|
||||
**/
|
||||
static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
|
||||
{
|
||||
u32 vector = 0;
|
||||
|
||||
switch (wx->mac.mc_filter_type) {
|
||||
case 0: /* use bits [47:36] of the address */
|
||||
vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
|
||||
break;
|
||||
case 1: /* use bits [46:35] of the address */
|
||||
vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
|
||||
break;
|
||||
case 2: /* use bits [45:34] of the address */
|
||||
vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
|
||||
break;
|
||||
case 3: /* use bits [43:32] of the address */
|
||||
vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
|
||||
break;
|
||||
default: /* Invalid mc_filter_type */
|
||||
wx_err(wx, "MC filter type param set incorrectly\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* vector can only be 12-bits or boundary will be exceeded */
|
||||
vector &= 0xFFF;
|
||||
return vector;
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_set_mta - Set bit-vector in multicast table
|
||||
* @wx: pointer to private structure
|
||||
* @mc_addr: Multicast address
|
||||
*
|
||||
* Sets the bit-vector in the multicast table.
|
||||
**/
|
||||
static void wx_set_mta(struct wx *wx, u8 *mc_addr)
|
||||
{
|
||||
u32 vector, vector_bit, vector_reg;
|
||||
|
||||
wx->addr_ctrl.mta_in_use++;
|
||||
|
||||
vector = wx_mta_vector(wx, mc_addr);
|
||||
wx_dbg(wx, " bit-vector = 0x%03X\n", vector);
|
||||
|
||||
/* The MTA is a register array of 128 32-bit registers. It is treated
|
||||
* like an array of 4096 bits. We want to set bit
|
||||
* BitArray[vector_value]. So we figure out what register the bit is
|
||||
* in, read it, OR in the new bit, then write back the new value. The
|
||||
* register is determined by the upper 7 bits of the vector value and
|
||||
* the bit within that register are determined by the lower 5 bits of
|
||||
* the value.
|
||||
*/
|
||||
vector_reg = (vector >> 5) & 0x7F;
|
||||
vector_bit = vector & 0x1F;
|
||||
wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_update_mc_addr_list - Updates MAC list of multicast addresses
|
||||
* @wx: pointer to private structure
|
||||
* @netdev: pointer to net device structure
|
||||
*
|
||||
* The given list replaces any existing list. Clears the MC addrs from receive
|
||||
* address registers and the multicast table. Uses unused receive address
|
||||
* registers for the first multicast addresses, and hashes the rest into the
|
||||
* multicast table.
|
||||
**/
|
||||
static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
|
||||
{
|
||||
struct netdev_hw_addr *ha;
|
||||
u32 i, psrctl;
|
||||
|
||||
/* Set the new number of MC addresses that we are being requested to
|
||||
* use.
|
||||
*/
|
||||
wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
|
||||
wx->addr_ctrl.mta_in_use = 0;
|
||||
|
||||
/* Clear mta_shadow */
|
||||
wx_dbg(wx, " Clearing MTA\n");
|
||||
memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow));
|
||||
|
||||
/* Update mta_shadow */
|
||||
netdev_for_each_mc_addr(ha, netdev) {
|
||||
wx_dbg(wx, " Adding the multicast addresses:\n");
|
||||
wx_set_mta(wx, ha->addr);
|
||||
}
|
||||
|
||||
/* Enable mta */
|
||||
for (i = 0; i < wx->mac.mcft_size; i++)
|
||||
wr32a(wx, WX_PSR_MC_TBL(0), i,
|
||||
wx->mac.mta_shadow[i]);
|
||||
|
||||
if (wx->addr_ctrl.mta_in_use > 0) {
|
||||
psrctl = rd32(wx, WX_PSR_CTL);
|
||||
psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
|
||||
psrctl |= WX_PSR_CTL_MFE |
|
||||
(wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT);
|
||||
wr32(wx, WX_PSR_CTL, psrctl);
|
||||
}
|
||||
|
||||
wx_dbg(wx, "Update mc addr list Complete\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_write_mc_addr_list - write multicast addresses to MTA
|
||||
* @netdev: network interface device structure
|
||||
*
|
||||
* Writes multicast address list to the MTA hash table.
|
||||
* Returns: 0 on no addresses written
|
||||
* X on writing X addresses to MTA
|
||||
**/
|
||||
static int wx_write_mc_addr_list(struct net_device *netdev)
|
||||
{
|
||||
struct wx *wx = netdev_priv(netdev);
|
||||
|
||||
if (!netif_running(netdev))
|
||||
return 0;
|
||||
|
||||
wx_update_mc_addr_list(wx, netdev);
|
||||
|
||||
return netdev_mc_count(netdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_set_mac - Change the Ethernet Address of the NIC
|
||||
* @netdev: network interface device structure
|
||||
|
|
@ -844,6 +1093,430 @@ void wx_disable_rx(struct wx *wx)
|
|||
}
|
||||
EXPORT_SYMBOL(wx_disable_rx);
|
||||
|
||||
static void wx_enable_rx(struct wx *wx)
|
||||
{
|
||||
u32 psrctl;
|
||||
|
||||
/* enable mac receiver */
|
||||
wr32m(wx, WX_MAC_RX_CFG,
|
||||
WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
|
||||
|
||||
wr32m(wx, WX_RDB_PB_CTL,
|
||||
WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN);
|
||||
|
||||
if (wx->mac.set_lben) {
|
||||
psrctl = rd32(wx, WX_PSR_CTL);
|
||||
psrctl |= WX_PSR_CTL_SW_EN;
|
||||
wr32(wx, WX_PSR_CTL, psrctl);
|
||||
wx->mac.set_lben = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_set_rxpba - Initialize Rx packet buffer
|
||||
* @wx: pointer to private structure
|
||||
**/
|
||||
static void wx_set_rxpba(struct wx *wx)
|
||||
{
|
||||
u32 rxpktsize, txpktsize, txpbthresh;
|
||||
|
||||
rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT;
|
||||
wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
|
||||
|
||||
/* Only support an equally distributed Tx packet buffer strategy. */
|
||||
txpktsize = wx->mac.tx_pb_size;
|
||||
txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX;
|
||||
wr32(wx, WX_TDB_PB_SZ(0), txpktsize);
|
||||
wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
|
||||
}
|
||||
|
||||
static void wx_configure_port(struct wx *wx)
|
||||
{
|
||||
u32 value, i;
|
||||
|
||||
value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
|
||||
wr32m(wx, WX_CFG_PORT_CTL,
|
||||
WX_CFG_PORT_CTL_D_VLAN |
|
||||
WX_CFG_PORT_CTL_QINQ,
|
||||
value);
|
||||
|
||||
wr32(wx, WX_CFG_TAG_TPID(0),
|
||||
ETH_P_8021Q | ETH_P_8021AD << 16);
|
||||
wx->tpid[0] = ETH_P_8021Q;
|
||||
wx->tpid[1] = ETH_P_8021AD;
|
||||
for (i = 1; i < 4; i++)
|
||||
wr32(wx, WX_CFG_TAG_TPID(i),
|
||||
ETH_P_8021Q | ETH_P_8021Q << 16);
|
||||
for (i = 2; i < 8; i++)
|
||||
wx->tpid[i] = ETH_P_8021Q;
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_disable_sec_rx_path - Stops the receive data path
|
||||
* @wx: pointer to private structure
|
||||
*
|
||||
* Stops the receive data path and waits for the HW to internally empty
|
||||
* the Rx security block
|
||||
**/
|
||||
static int wx_disable_sec_rx_path(struct wx *wx)
|
||||
{
|
||||
u32 secrx;
|
||||
|
||||
wr32m(wx, WX_RSC_CTL,
|
||||
WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS);
|
||||
|
||||
return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
|
||||
1000, 40000, false, wx, WX_RSC_ST);
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_enable_sec_rx_path - Enables the receive data path
|
||||
* @wx: pointer to private structure
|
||||
*
|
||||
* Enables the receive data path.
|
||||
**/
|
||||
static void wx_enable_sec_rx_path(struct wx *wx)
|
||||
{
|
||||
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
|
||||
WX_WRITE_FLUSH(wx);
|
||||
}
|
||||
|
||||
void wx_set_rx_mode(struct net_device *netdev)
|
||||
{
|
||||
struct wx *wx = netdev_priv(netdev);
|
||||
u32 fctrl, vmolr, vlnctrl;
|
||||
int count;
|
||||
|
||||
/* Check for Promiscuous and All Multicast modes */
|
||||
fctrl = rd32(wx, WX_PSR_CTL);
|
||||
fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
|
||||
vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
|
||||
vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
|
||||
WX_PSR_VM_L2CTL_MPE |
|
||||
WX_PSR_VM_L2CTL_ROPE |
|
||||
WX_PSR_VM_L2CTL_ROMPE);
|
||||
vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
|
||||
vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN);
|
||||
|
||||
/* set all bits that we expect to always be set */
|
||||
fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE;
|
||||
vmolr |= WX_PSR_VM_L2CTL_BAM |
|
||||
WX_PSR_VM_L2CTL_AUPE |
|
||||
WX_PSR_VM_L2CTL_VACC;
|
||||
vlnctrl |= WX_PSR_VLAN_CTL_VFE;
|
||||
|
||||
wx->addr_ctrl.user_set_promisc = false;
|
||||
if (netdev->flags & IFF_PROMISC) {
|
||||
wx->addr_ctrl.user_set_promisc = true;
|
||||
fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
|
||||
/* pf don't want packets routing to vf, so clear UPE */
|
||||
vmolr |= WX_PSR_VM_L2CTL_MPE;
|
||||
vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
|
||||
}
|
||||
|
||||
if (netdev->flags & IFF_ALLMULTI) {
|
||||
fctrl |= WX_PSR_CTL_MPE;
|
||||
vmolr |= WX_PSR_VM_L2CTL_MPE;
|
||||
}
|
||||
|
||||
if (netdev->features & NETIF_F_RXALL) {
|
||||
vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE);
|
||||
vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
|
||||
/* receive bad packets */
|
||||
wr32m(wx, WX_RSC_CTL,
|
||||
WX_RSC_CTL_SAVE_MAC_ERR,
|
||||
WX_RSC_CTL_SAVE_MAC_ERR);
|
||||
} else {
|
||||
vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE;
|
||||
}
|
||||
|
||||
/* Write addresses to available RAR registers, if there is not
|
||||
* sufficient space to store all the addresses then enable
|
||||
* unicast promiscuous mode
|
||||
*/
|
||||
count = wx_write_uc_addr_list(netdev, 0);
|
||||
if (count < 0) {
|
||||
vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
|
||||
vmolr |= WX_PSR_VM_L2CTL_UPE;
|
||||
}
|
||||
|
||||
/* Write addresses to the MTA, if the attempt fails
|
||||
* then we should just turn on promiscuous mode so
|
||||
* that we can at least receive multicast traffic
|
||||
*/
|
||||
count = wx_write_mc_addr_list(netdev);
|
||||
if (count < 0) {
|
||||
vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
|
||||
vmolr |= WX_PSR_VM_L2CTL_MPE;
|
||||
}
|
||||
|
||||
wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
|
||||
wr32(wx, WX_PSR_CTL, fctrl);
|
||||
wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
|
||||
}
|
||||
EXPORT_SYMBOL(wx_set_rx_mode);
|
||||
|
||||
static void wx_set_rx_buffer_len(struct wx *wx)
|
||||
{
|
||||
struct net_device *netdev = wx->netdev;
|
||||
u32 mhadd, max_frame;
|
||||
|
||||
max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
|
||||
/* adjust max frame to be at least the size of a standard frame */
|
||||
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
|
||||
max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
|
||||
|
||||
mhadd = rd32(wx, WX_PSR_MAX_SZ);
|
||||
if (max_frame != mhadd)
|
||||
wr32(wx, WX_PSR_MAX_SZ, max_frame);
|
||||
}
|
||||
|
||||
/* Disable the specified rx queue */
|
||||
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
|
||||
{
|
||||
u8 reg_idx = ring->reg_idx;
|
||||
u32 rxdctl;
|
||||
int ret;
|
||||
|
||||
/* write value back with RRCFG.EN bit cleared */
|
||||
wr32m(wx, WX_PX_RR_CFG(reg_idx),
|
||||
WX_PX_RR_CFG_RR_EN, 0);
|
||||
|
||||
/* the hardware may take up to 100us to really disable the rx queue */
|
||||
ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN),
|
||||
10, 100, true, wx, WX_PX_RR_CFG(reg_idx));
|
||||
|
||||
if (ret == -ETIMEDOUT) {
|
||||
/* Just for information */
|
||||
wx_err(wx,
|
||||
"RRCFG.EN on Rx queue %d not cleared within the polling period\n",
|
||||
reg_idx);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(wx_disable_rx_queue);
|
||||
|
||||
static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
|
||||
{
|
||||
u8 reg_idx = ring->reg_idx;
|
||||
u32 rxdctl;
|
||||
int ret;
|
||||
|
||||
ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN,
|
||||
1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx));
|
||||
|
||||
if (ret == -ETIMEDOUT) {
|
||||
/* Just for information */
|
||||
wx_err(wx,
|
||||
"RRCFG.EN on Rx queue %d not set within the polling period\n",
|
||||
reg_idx);
|
||||
}
|
||||
}
|
||||
|
||||
static void wx_configure_srrctl(struct wx *wx,
|
||||
struct wx_ring *rx_ring)
|
||||
{
|
||||
u16 reg_idx = rx_ring->reg_idx;
|
||||
u32 srrctl;
|
||||
|
||||
srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
|
||||
srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ |
|
||||
WX_PX_RR_CFG_RR_BUF_SZ |
|
||||
WX_PX_RR_CFG_SPLIT_MODE);
|
||||
/* configure header buffer length, needed for RSC */
|
||||
srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT;
|
||||
|
||||
/* configure the packet buffer length */
|
||||
srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
|
||||
|
||||
wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
|
||||
}
|
||||
|
||||
static void wx_configure_tx_ring(struct wx *wx,
|
||||
struct wx_ring *ring)
|
||||
{
|
||||
u32 txdctl = WX_PX_TR_CFG_ENABLE;
|
||||
u8 reg_idx = ring->reg_idx;
|
||||
u64 tdba = ring->dma;
|
||||
int ret;
|
||||
|
||||
/* disable queue to avoid issues while updating state */
|
||||
wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
|
||||
WX_WRITE_FLUSH(wx);
|
||||
|
||||
wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
|
||||
wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
|
||||
|
||||
/* reset head and tail pointers */
|
||||
wr32(wx, WX_PX_TR_RP(reg_idx), 0);
|
||||
wr32(wx, WX_PX_TR_WP(reg_idx), 0);
|
||||
ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx);
|
||||
|
||||
if (ring->count < WX_MAX_TXD)
|
||||
txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
|
||||
txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
|
||||
|
||||
/* reinitialize tx_buffer_info */
|
||||
memset(ring->tx_buffer_info, 0,
|
||||
sizeof(struct wx_tx_buffer) * ring->count);
|
||||
|
||||
/* enable queue */
|
||||
wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
|
||||
|
||||
/* poll to verify queue is enabled */
|
||||
ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE,
|
||||
1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx));
|
||||
if (ret == -ETIMEDOUT)
|
||||
wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
|
||||
}
|
||||
|
||||
static void wx_configure_rx_ring(struct wx *wx,
|
||||
struct wx_ring *ring)
|
||||
{
|
||||
u16 reg_idx = ring->reg_idx;
|
||||
union wx_rx_desc *rx_desc;
|
||||
u64 rdba = ring->dma;
|
||||
u32 rxdctl;
|
||||
|
||||
/* disable queue to avoid issues while updating state */
|
||||
rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
|
||||
wx_disable_rx_queue(wx, ring);
|
||||
|
||||
wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
|
||||
wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
|
||||
|
||||
if (ring->count == WX_MAX_RXD)
|
||||
rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT;
|
||||
else
|
||||
rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT;
|
||||
|
||||
rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT;
|
||||
wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
|
||||
|
||||
/* reset head and tail pointers */
|
||||
wr32(wx, WX_PX_RR_RP(reg_idx), 0);
|
||||
wr32(wx, WX_PX_RR_WP(reg_idx), 0);
|
||||
ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
|
||||
|
||||
wx_configure_srrctl(wx, ring);
|
||||
|
||||
/* initialize rx_buffer_info */
|
||||
memset(ring->rx_buffer_info, 0,
|
||||
sizeof(struct wx_rx_buffer) * ring->count);
|
||||
|
||||
/* initialize Rx descriptor 0 */
|
||||
rx_desc = WX_RX_DESC(ring, 0);
|
||||
rx_desc->wb.upper.length = 0;
|
||||
|
||||
/* enable receive descriptor ring */
|
||||
wr32m(wx, WX_PX_RR_CFG(reg_idx),
|
||||
WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN);
|
||||
|
||||
wx_enable_rx_queue(wx, ring);
|
||||
wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_configure_tx - Configure Transmit Unit after Reset
|
||||
* @wx: pointer to private structure
|
||||
*
|
||||
* Configure the Tx unit of the MAC after a reset.
|
||||
**/
|
||||
static void wx_configure_tx(struct wx *wx)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
/* TDM_CTL.TE must be before Tx queues are enabled */
|
||||
wr32m(wx, WX_TDM_CTL,
|
||||
WX_TDM_CTL_TE, WX_TDM_CTL_TE);
|
||||
|
||||
/* Setup the HW Tx Head and Tail descriptor pointers */
|
||||
for (i = 0; i < wx->num_tx_queues; i++)
|
||||
wx_configure_tx_ring(wx, wx->tx_ring[i]);
|
||||
|
||||
wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10);
|
||||
|
||||
if (wx->mac.type == wx_mac_em)
|
||||
wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1);
|
||||
|
||||
/* enable mac transmitter */
|
||||
wr32m(wx, WX_MAC_TX_CFG,
|
||||
WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE);
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_configure_rx - Configure Receive Unit after Reset
|
||||
* @wx: pointer to private structure
|
||||
*
|
||||
* Configure the Rx unit of the MAC after a reset.
|
||||
**/
|
||||
static void wx_configure_rx(struct wx *wx)
|
||||
{
|
||||
u32 psrtype, i;
|
||||
int ret;
|
||||
|
||||
wx_disable_rx(wx);
|
||||
|
||||
psrtype = WX_RDB_PL_CFG_L4HDR |
|
||||
WX_RDB_PL_CFG_L3HDR |
|
||||
WX_RDB_PL_CFG_L2HDR |
|
||||
WX_RDB_PL_CFG_TUN_TUNHDR |
|
||||
WX_RDB_PL_CFG_TUN_TUNHDR;
|
||||
wr32(wx, WX_RDB_PL_CFG(0), psrtype);
|
||||
|
||||
/* enable hw crc stripping */
|
||||
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
|
||||
|
||||
if (wx->mac.type == wx_mac_sp) {
|
||||
u32 psrctl;
|
||||
|
||||
/* RSC Setup */
|
||||
psrctl = rd32(wx, WX_PSR_CTL);
|
||||
psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
|
||||
psrctl |= WX_PSR_CTL_RSC_DIS;
|
||||
wr32(wx, WX_PSR_CTL, psrctl);
|
||||
}
|
||||
|
||||
/* set_rx_buffer_len must be called before ring initialization */
|
||||
wx_set_rx_buffer_len(wx);
|
||||
|
||||
/* Setup the HW Rx Head and Tail Descriptor Pointers and
|
||||
* the Base and Length of the Rx Descriptor Ring
|
||||
*/
|
||||
for (i = 0; i < wx->num_rx_queues; i++)
|
||||
wx_configure_rx_ring(wx, wx->rx_ring[i]);
|
||||
|
||||
/* Enable all receives, disable security engine prior to block traffic */
|
||||
ret = wx_disable_sec_rx_path(wx);
|
||||
if (ret < 0)
|
||||
wx_err(wx, "The register status is abnormal, please check device.");
|
||||
|
||||
wx_enable_rx(wx);
|
||||
wx_enable_sec_rx_path(wx);
|
||||
}
|
||||
|
||||
static void wx_configure_isb(struct wx *wx)
|
||||
{
|
||||
/* set ISB Address */
|
||||
wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32));
|
||||
if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
|
||||
wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma));
|
||||
}
|
||||
|
||||
void wx_configure(struct wx *wx)
|
||||
{
|
||||
wx_set_rxpba(wx);
|
||||
wx_configure_port(wx);
|
||||
|
||||
wx_set_rx_mode(wx->netdev);
|
||||
|
||||
wx_enable_sec_rx_path(wx);
|
||||
|
||||
wx_configure_tx(wx);
|
||||
wx_configure_rx(wx);
|
||||
wx_configure_isb(wx);
|
||||
}
|
||||
EXPORT_SYMBOL(wx_configure);
|
||||
|
||||
/**
|
||||
* wx_disable_pcie_master - Disable PCI-express master access
|
||||
* @wx: pointer to hardware structure
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
#ifndef _WX_HW_H_
|
||||
#define _WX_HW_H_
|
||||
|
||||
void wx_intr_enable(struct wx *wx, u64 qmask);
|
||||
void wx_irq_disable(struct wx *wx);
|
||||
int wx_check_flash_load(struct wx *wx, u32 check_bit);
|
||||
void wx_control_hw(struct wx *wx, bool drv);
|
||||
int wx_mng_present(struct wx *wx);
|
||||
|
|
@ -20,6 +22,9 @@ void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
|
|||
void wx_flush_sw_mac_table(struct wx *wx);
|
||||
int wx_set_mac(struct net_device *netdev, void *p);
|
||||
void wx_disable_rx(struct wx *wx);
|
||||
void wx_set_rx_mode(struct net_device *netdev);
|
||||
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
|
||||
void wx_configure(struct wx *wx);
|
||||
int wx_disable_pcie_master(struct wx *wx);
|
||||
int wx_stop_adapter(struct wx *wx);
|
||||
void wx_reset_misc(struct wx *wx);
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,32 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* WangXun Gigabit PCI Express Linux driver
|
||||
* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
|
||||
*/
|
||||
|
||||
#ifndef _WX_LIB_H_
|
||||
#define _WX_LIB_H_
|
||||
|
||||
void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count);
|
||||
u16 wx_desc_unused(struct wx_ring *ring);
|
||||
netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
|
||||
struct net_device *netdev);
|
||||
void wx_napi_enable_all(struct wx *wx);
|
||||
void wx_napi_disable_all(struct wx *wx);
|
||||
void wx_reset_interrupt_capability(struct wx *wx);
|
||||
void wx_clear_interrupt_scheme(struct wx *wx);
|
||||
int wx_init_interrupt_scheme(struct wx *wx);
|
||||
irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data);
|
||||
void wx_free_irq(struct wx *wx);
|
||||
int wx_setup_isb_resources(struct wx *wx);
|
||||
void wx_free_isb_resources(struct wx *wx);
|
||||
u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx);
|
||||
void wx_configure_vectors(struct wx *wx);
|
||||
void wx_clean_all_rx_rings(struct wx *wx);
|
||||
void wx_clean_all_tx_rings(struct wx *wx);
|
||||
void wx_free_resources(struct wx *wx);
|
||||
int wx_setup_resources(struct wx *wx);
|
||||
void wx_get_stats64(struct net_device *netdev,
|
||||
struct rtnl_link_stats64 *stats);
|
||||
|
||||
#endif /* _NGBE_LIB_H_ */
|
||||
|
|
@ -5,6 +5,7 @@
|
|||
#define _WX_TYPE_H_
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
/* Vendor ID */
|
||||
#ifndef PCI_VENDOR_ID_WANGXUN
|
||||
|
|
@ -65,21 +66,50 @@
|
|||
/* port cfg Registers */
|
||||
#define WX_CFG_PORT_CTL 0x14400
|
||||
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
|
||||
#define WX_CFG_PORT_CTL_QINQ BIT(2)
|
||||
#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
|
||||
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
|
||||
|
||||
/* GPIO Registers */
|
||||
#define WX_GPIO_DR 0x14800
|
||||
#define WX_GPIO_DR_0 BIT(0) /* SDP0 Data Value */
|
||||
#define WX_GPIO_DR_1 BIT(1) /* SDP1 Data Value */
|
||||
#define WX_GPIO_DDR 0x14804
|
||||
#define WX_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */
|
||||
#define WX_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */
|
||||
#define WX_GPIO_CTL 0x14808
|
||||
#define WX_GPIO_INTEN 0x14830
|
||||
#define WX_GPIO_INTEN_0 BIT(0)
|
||||
#define WX_GPIO_INTEN_1 BIT(1)
|
||||
#define WX_GPIO_INTMASK 0x14834
|
||||
#define WX_GPIO_INTTYPE_LEVEL 0x14838
|
||||
#define WX_GPIO_POLARITY 0x1483C
|
||||
#define WX_GPIO_EOI 0x1484C
|
||||
|
||||
/*********************** Transmit DMA registers **************************/
|
||||
/* transmit global control */
|
||||
#define WX_TDM_CTL 0x18000
|
||||
/* TDM CTL BIT */
|
||||
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
|
||||
#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
|
||||
|
||||
/***************************** RDB registers *********************************/
|
||||
/* receive packet buffer */
|
||||
#define WX_RDB_PB_CTL 0x19000
|
||||
#define WX_RDB_PB_CTL_RXEN BIT(31) /* Enable Receiver */
|
||||
#define WX_RDB_PB_CTL_DISABLED BIT(0)
|
||||
#define WX_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4))
|
||||
#define WX_RDB_PB_SZ_SHIFT 10
|
||||
/* statistic */
|
||||
#define WX_RDB_PFCMACDAL 0x19210
|
||||
#define WX_RDB_PFCMACDAH 0x19214
|
||||
/* ring assignment */
|
||||
#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4))
|
||||
#define WX_RDB_PL_CFG_L4HDR BIT(1)
|
||||
#define WX_RDB_PL_CFG_L3HDR BIT(2)
|
||||
#define WX_RDB_PL_CFG_L2HDR BIT(3)
|
||||
#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
|
||||
#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
|
||||
|
||||
/******************************* PSR Registers *******************************/
|
||||
/* psr control */
|
||||
|
|
@ -97,10 +127,24 @@
|
|||
#define WX_PSR_CTL_MO_SHIFT 5
|
||||
#define WX_PSR_CTL_MO (0x3 << WX_PSR_CTL_MO_SHIFT)
|
||||
#define WX_PSR_CTL_TPE BIT(4)
|
||||
#define WX_PSR_MAX_SZ 0x15020
|
||||
#define WX_PSR_VLAN_CTL 0x15088
|
||||
#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */
|
||||
#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */
|
||||
/* mcasst/ucast overflow tbl */
|
||||
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
|
||||
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
|
||||
|
||||
/* VM L2 contorl */
|
||||
#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
|
||||
#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */
|
||||
#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */
|
||||
#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */
|
||||
#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */
|
||||
#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */
|
||||
#define WX_PSR_VM_L2CTL_BAM BIT(11) /* accept broadcast packets */
|
||||
#define WX_PSR_VM_L2CTL_MPE BIT(12) /* multicast promiscuous */
|
||||
|
||||
/* Management */
|
||||
#define WX_PSR_MNG_FLEX_SEL 0x1582C
|
||||
#define WX_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16))
|
||||
|
|
@ -122,6 +166,27 @@
|
|||
#define WX_PSR_MAC_SWC_IDX 0x16210
|
||||
#define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU
|
||||
|
||||
/********************************* RSEC **************************************/
|
||||
/* general rsec */
|
||||
#define WX_RSC_CTL 0x17000
|
||||
#define WX_RSC_CTL_SAVE_MAC_ERR BIT(6)
|
||||
#define WX_RSC_CTL_CRC_STRIP BIT(2)
|
||||
#define WX_RSC_CTL_RX_DIS BIT(1)
|
||||
#define WX_RSC_ST 0x17004
|
||||
#define WX_RSC_ST_RSEC_RDY BIT(0)
|
||||
|
||||
/****************************** TDB ******************************************/
|
||||
#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
|
||||
#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
|
||||
|
||||
/****************************** TSEC *****************************************/
|
||||
/* Security Control Registers */
|
||||
#define WX_TSC_CTL 0x1D000
|
||||
#define WX_TSC_CTL_TX_DIS BIT(1)
|
||||
#define WX_TSC_CTL_TSEC_DIS BIT(0)
|
||||
#define WX_TSC_BUF_AE 0x1D00C
|
||||
#define WX_TSC_BUF_AE_THR GENMASK(9, 0)
|
||||
|
||||
/************************************** MNG ********************************/
|
||||
#define WX_MNG_SWFW_SYNC 0x1E008
|
||||
#define WX_MNG_SWFW_SYNC_SW_MB BIT(2)
|
||||
|
|
@ -135,6 +200,7 @@
|
|||
#define WX_MAC_TX_CFG 0x11000
|
||||
#define WX_MAC_TX_CFG_TE BIT(0)
|
||||
#define WX_MAC_TX_CFG_SPEED_MASK GENMASK(30, 29)
|
||||
#define WX_MAC_TX_CFG_SPEED_10G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 0)
|
||||
#define WX_MAC_TX_CFG_SPEED_1G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 3)
|
||||
#define WX_MAC_RX_CFG 0x11004
|
||||
#define WX_MAC_RX_CFG_RE BIT(0)
|
||||
|
|
@ -151,10 +217,34 @@
|
|||
/* Interrupt Registers */
|
||||
#define WX_BME_CTL 0x12020
|
||||
#define WX_PX_MISC_IC 0x100
|
||||
#define WX_PX_MISC_ICS 0x104
|
||||
#define WX_PX_MISC_IEN 0x108
|
||||
#define WX_PX_INTA 0x110
|
||||
#define WX_PX_GPIE 0x118
|
||||
#define WX_PX_GPIE_MODEL BIT(0)
|
||||
#define WX_PX_IC 0x120
|
||||
#define WX_PX_IMS(_i) (0x140 + (_i) * 4)
|
||||
#define WX_PX_IMC(_i) (0x150 + (_i) * 4)
|
||||
#define WX_PX_ISB_ADDR_L 0x160
|
||||
#define WX_PX_ISB_ADDR_H 0x164
|
||||
#define WX_PX_TRANSACTION_PENDING 0x168
|
||||
#define WX_PX_ITRSEL 0x180
|
||||
#define WX_PX_ITR(_i) (0x200 + (_i) * 4)
|
||||
#define WX_PX_ITR_CNT_WDIS BIT(31)
|
||||
#define WX_PX_MISC_IVAR 0x4FC
|
||||
#define WX_PX_IVAR(_i) (0x500 + (_i) * 4)
|
||||
|
||||
#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
|
||||
#define WX_7K_ITR 595
|
||||
#define WX_12K_ITR 336
|
||||
#define WX_SP_MAX_EITR 0x00000FF8U
|
||||
#define WX_EM_MAX_EITR 0x00007FFCU
|
||||
|
||||
/* transmit DMA Registers */
|
||||
#define WX_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40))
|
||||
#define WX_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40))
|
||||
#define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40))
|
||||
#define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40))
|
||||
#define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40))
|
||||
/* Transmit Config masks */
|
||||
#define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */
|
||||
|
|
@ -164,8 +254,22 @@
|
|||
#define WX_PX_TR_CFG_THRE_SHIFT 8
|
||||
|
||||
/* Receive DMA Registers */
|
||||
#define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40))
|
||||
#define WX_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40))
|
||||
#define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40))
|
||||
#define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40))
|
||||
#define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40))
|
||||
/* PX_RR_CFG bit definitions */
|
||||
#define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
|
||||
#define WX_PX_RR_CFG_RR_THER_SHIFT 16
|
||||
#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12)
|
||||
#define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8)
|
||||
#define WX_PX_RR_CFG_BHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6)
|
||||
* + at bit 8 offset (<< 12)
|
||||
* = (<< 6)
|
||||
*/
|
||||
#define WX_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */
|
||||
#define WX_PX_RR_CFG_RR_SIZE_SHIFT 1
|
||||
#define WX_PX_RR_CFG_RR_EN BIT(0)
|
||||
|
||||
/* Number of 80 microseconds we wait for PCI Express master disable */
|
||||
|
|
@ -193,8 +297,46 @@
|
|||
#define WX_MAC_STATE_MODIFIED 0x2
|
||||
#define WX_MAC_STATE_IN_USE 0x4
|
||||
|
||||
#define WX_MAX_RXD 8192
|
||||
#define WX_MAX_TXD 8192
|
||||
|
||||
/* Supported Rx Buffer Sizes */
|
||||
#define WX_RXBUFFER_256 256 /* Used for skb receive header */
|
||||
#define WX_RXBUFFER_2K 2048
|
||||
#define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */
|
||||
|
||||
#if MAX_SKB_FRAGS < 8
|
||||
#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024)
|
||||
#else
|
||||
#define WX_RX_BUFSZ WX_RXBUFFER_2K
|
||||
#endif
|
||||
|
||||
#define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */
|
||||
|
||||
#define WX_MAX_DATA_PER_TXD BIT(14)
|
||||
/* Tx Descriptors needed, worst case */
|
||||
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD)
|
||||
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
|
||||
|
||||
/* Ether Types */
|
||||
#define WX_ETH_P_CNM 0x22E7
|
||||
|
||||
#define WX_CFG_PORT_ST 0x14404
|
||||
|
||||
/******************* Receive Descriptor bit definitions **********************/
|
||||
#define WX_RXD_STAT_DD BIT(0) /* Done */
|
||||
#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */
|
||||
|
||||
#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */
|
||||
|
||||
/*********************** Transmit Descriptor Config Masks ****************/
|
||||
#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */
|
||||
#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */
|
||||
#define WX_TXD_PAYLEN_SHIFT 13 /* Desc PAYLEN shift */
|
||||
#define WX_TXD_EOP BIT(24) /* End of Packet */
|
||||
#define WX_TXD_IFCS BIT(25) /* Insert FCS */
|
||||
#define WX_TXD_RS BIT(27) /* Report Status */
|
||||
|
||||
/* Host Interface Command Structures */
|
||||
struct wx_hic_hdr {
|
||||
u8 cmd;
|
||||
|
|
@ -270,9 +412,12 @@ struct wx_mac_info {
|
|||
bool set_lben;
|
||||
u8 addr[ETH_ALEN];
|
||||
u8 perm_addr[ETH_ALEN];
|
||||
u32 mta_shadow[128];
|
||||
s32 mc_filter_type;
|
||||
u32 mcft_size;
|
||||
u32 num_rar_entries;
|
||||
u32 rx_pb_size;
|
||||
u32 tx_pb_size;
|
||||
u32 max_tx_queues;
|
||||
u32 max_rx_queues;
|
||||
|
||||
|
|
@ -312,6 +457,161 @@ enum wx_reset_type {
|
|||
WX_GLOBAL_RESET
|
||||
};
|
||||
|
||||
struct wx_cb {
|
||||
dma_addr_t dma;
|
||||
u16 append_cnt; /* number of skb's appended */
|
||||
bool page_released;
|
||||
bool dma_released;
|
||||
};
|
||||
|
||||
#define WX_CB(skb) ((struct wx_cb *)(skb)->cb)
|
||||
|
||||
/* Transmit Descriptor */
|
||||
union wx_tx_desc {
|
||||
struct {
|
||||
__le64 buffer_addr; /* Address of descriptor's data buf */
|
||||
__le32 cmd_type_len;
|
||||
__le32 olinfo_status;
|
||||
} read;
|
||||
struct {
|
||||
__le64 rsvd; /* Reserved */
|
||||
__le32 nxtseq_seed;
|
||||
__le32 status;
|
||||
} wb;
|
||||
};
|
||||
|
||||
/* Receive Descriptor */
|
||||
union wx_rx_desc {
|
||||
struct {
|
||||
__le64 pkt_addr; /* Packet buffer address */
|
||||
__le64 hdr_addr; /* Header buffer address */
|
||||
} read;
|
||||
struct {
|
||||
struct {
|
||||
union {
|
||||
__le32 data;
|
||||
struct {
|
||||
__le16 pkt_info; /* RSS, Pkt type */
|
||||
__le16 hdr_info; /* Splithdr, hdrlen */
|
||||
} hs_rss;
|
||||
} lo_dword;
|
||||
union {
|
||||
__le32 rss; /* RSS Hash */
|
||||
struct {
|
||||
__le16 ip_id; /* IP id */
|
||||
__le16 csum; /* Packet Checksum */
|
||||
} csum_ip;
|
||||
} hi_dword;
|
||||
} lower;
|
||||
struct {
|
||||
__le32 status_error; /* ext status/error */
|
||||
__le16 length; /* Packet length */
|
||||
__le16 vlan; /* VLAN tag */
|
||||
} upper;
|
||||
} wb; /* writeback */
|
||||
};
|
||||
|
||||
#define WX_RX_DESC(R, i) \
|
||||
(&(((union wx_rx_desc *)((R)->desc))[i]))
|
||||
#define WX_TX_DESC(R, i) \
|
||||
(&(((union wx_tx_desc *)((R)->desc))[i]))
|
||||
|
||||
/* wrapper around a pointer to a socket buffer,
|
||||
* so a DMA handle can be stored along with the buffer
|
||||
*/
|
||||
struct wx_tx_buffer {
|
||||
union wx_tx_desc *next_to_watch;
|
||||
struct sk_buff *skb;
|
||||
unsigned int bytecount;
|
||||
unsigned short gso_segs;
|
||||
DEFINE_DMA_UNMAP_ADDR(dma);
|
||||
DEFINE_DMA_UNMAP_LEN(len);
|
||||
};
|
||||
|
||||
struct wx_rx_buffer {
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t dma;
|
||||
dma_addr_t page_dma;
|
||||
struct page *page;
|
||||
unsigned int page_offset;
|
||||
u16 pagecnt_bias;
|
||||
};
|
||||
|
||||
struct wx_queue_stats {
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
};
|
||||
|
||||
/* iterator for handling rings in ring container */
|
||||
#define wx_for_each_ring(posm, headm) \
|
||||
for (posm = (headm).ring; posm; posm = posm->next)
|
||||
|
||||
struct wx_ring_container {
|
||||
struct wx_ring *ring; /* pointer to linked list of rings */
|
||||
unsigned int total_bytes; /* total bytes processed this int */
|
||||
unsigned int total_packets; /* total packets processed this int */
|
||||
u8 count; /* total number of rings in vector */
|
||||
u8 itr; /* current ITR setting for ring */
|
||||
};
|
||||
|
||||
struct wx_ring {
|
||||
struct wx_ring *next; /* pointer to next ring in q_vector */
|
||||
struct wx_q_vector *q_vector; /* backpointer to host q_vector */
|
||||
struct net_device *netdev; /* netdev ring belongs to */
|
||||
struct device *dev; /* device for DMA mapping */
|
||||
struct page_pool *page_pool;
|
||||
void *desc; /* descriptor ring memory */
|
||||
union {
|
||||
struct wx_tx_buffer *tx_buffer_info;
|
||||
struct wx_rx_buffer *rx_buffer_info;
|
||||
};
|
||||
u8 __iomem *tail;
|
||||
dma_addr_t dma; /* phys. address of descriptor ring */
|
||||
unsigned int size; /* length in bytes */
|
||||
|
||||
u16 count; /* amount of descriptors */
|
||||
|
||||
u8 queue_index; /* needed for multiqueue queue management */
|
||||
u8 reg_idx; /* holds the special value that gets
|
||||
* the hardware register offset
|
||||
* associated with this ring, which is
|
||||
* different for DCB and RSS modes
|
||||
*/
|
||||
u16 next_to_use;
|
||||
u16 next_to_clean;
|
||||
u16 next_to_alloc;
|
||||
|
||||
struct wx_queue_stats stats;
|
||||
struct u64_stats_sync syncp;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
struct wx_q_vector {
|
||||
struct wx *wx;
|
||||
int cpu; /* CPU for DCA */
|
||||
int numa_node;
|
||||
u16 v_idx; /* index of q_vector within array, also used for
|
||||
* finding the bit in EICR and friends that
|
||||
* represents the vector for this ring
|
||||
*/
|
||||
u16 itr; /* Interrupt throttle rate written to EITR */
|
||||
struct wx_ring_container rx, tx;
|
||||
struct napi_struct napi;
|
||||
struct rcu_head rcu; /* to avoid race with update stats on free */
|
||||
|
||||
char name[IFNAMSIZ + 17];
|
||||
|
||||
/* for dynamic allocation of rings associated with this q_vector */
|
||||
struct wx_ring ring[0] ____cacheline_internodealigned_in_smp;
|
||||
};
|
||||
|
||||
enum wx_isb_idx {
|
||||
WX_ISB_HEADER,
|
||||
WX_ISB_MISC,
|
||||
WX_ISB_VEC0,
|
||||
WX_ISB_VEC1,
|
||||
WX_ISB_MAX
|
||||
};
|
||||
|
||||
struct wx {
|
||||
u8 __iomem *hw_addr;
|
||||
struct pci_dev *pdev;
|
||||
|
|
@ -331,6 +631,7 @@ struct wx {
|
|||
u16 oem_svid;
|
||||
u16 msg_enable;
|
||||
bool adapter_stopped;
|
||||
u16 tpid[8];
|
||||
char eeprom_id[32];
|
||||
enum wx_reset_type reset_type;
|
||||
|
||||
|
|
@ -360,6 +661,18 @@ struct wx {
|
|||
u32 tx_ring_count;
|
||||
u32 rx_ring_count;
|
||||
|
||||
struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp;
|
||||
struct wx_ring *rx_ring[64];
|
||||
struct wx_q_vector *q_vector[64];
|
||||
|
||||
unsigned int queues_per_pool;
|
||||
struct msix_entry *msix_entries;
|
||||
|
||||
/* misc interrupt status block */
|
||||
dma_addr_t isb_dma;
|
||||
u32 *isb_mem;
|
||||
u32 isb_tag[WX_ISB_MAX];
|
||||
|
||||
#define WX_MAX_RETA_ENTRIES 128
|
||||
u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
|
||||
|
||||
|
|
@ -371,6 +684,7 @@ struct wx {
|
|||
};
|
||||
|
||||
#define WX_INTR_ALL (~0ULL)
|
||||
#define WX_INTR_Q(i) BIT(i)
|
||||
|
||||
/* register operations */
|
||||
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
#include "../libwx/wx_type.h"
|
||||
#include "../libwx/wx_hw.h"
|
||||
#include "../libwx/wx_lib.h"
|
||||
#include "ngbe_type.h"
|
||||
#include "ngbe_mdio.h"
|
||||
#include "ngbe_hw.h"
|
||||
|
|
@ -112,6 +113,9 @@ static int ngbe_sw_init(struct wx *wx)
|
|||
wx->mac.num_rar_entries = NGBE_RAR_ENTRIES;
|
||||
wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES;
|
||||
wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES;
|
||||
wx->mac.mcft_size = NGBE_MC_TBL_SIZE;
|
||||
wx->mac.rx_pb_size = NGBE_RX_PB_SIZE;
|
||||
wx->mac.tx_pb_size = NGBE_TDB_PB_SZ;
|
||||
|
||||
/* PCI config space info */
|
||||
err = wx_sw_init(wx);
|
||||
|
|
@ -148,27 +152,211 @@ static int ngbe_sw_init(struct wx *wx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ngbe_irq_enable - Enable default interrupt generation settings
|
||||
* @wx: board private structure
|
||||
* @queues: enable all queues interrupts
|
||||
**/
|
||||
static void ngbe_irq_enable(struct wx *wx, bool queues)
|
||||
{
|
||||
u32 mask;
|
||||
|
||||
/* enable misc interrupt */
|
||||
mask = NGBE_PX_MISC_IEN_MASK;
|
||||
|
||||
wr32(wx, WX_GPIO_DDR, WX_GPIO_DDR_0);
|
||||
wr32(wx, WX_GPIO_INTEN, WX_GPIO_INTEN_0 | WX_GPIO_INTEN_1);
|
||||
wr32(wx, WX_GPIO_INTTYPE_LEVEL, 0x0);
|
||||
wr32(wx, WX_GPIO_POLARITY, wx->gpio_ctrl ? 0 : 0x3);
|
||||
|
||||
wr32(wx, WX_PX_MISC_IEN, mask);
|
||||
|
||||
/* mask interrupt */
|
||||
if (queues)
|
||||
wx_intr_enable(wx, NGBE_INTR_ALL);
|
||||
else
|
||||
wx_intr_enable(wx, NGBE_INTR_MISC(wx));
|
||||
}
|
||||
|
||||
/**
|
||||
* ngbe_intr - msi/legacy mode Interrupt Handler
|
||||
* @irq: interrupt number
|
||||
* @data: pointer to a network interface device structure
|
||||
**/
|
||||
static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
|
||||
{
|
||||
struct wx_q_vector *q_vector;
|
||||
struct wx *wx = data;
|
||||
struct pci_dev *pdev;
|
||||
u32 eicr;
|
||||
|
||||
q_vector = wx->q_vector[0];
|
||||
pdev = wx->pdev;
|
||||
|
||||
eicr = wx_misc_isb(wx, WX_ISB_VEC0);
|
||||
if (!eicr) {
|
||||
/* shared interrupt alert!
|
||||
* the interrupt that we masked before the EICR read.
|
||||
*/
|
||||
if (netif_running(wx->netdev))
|
||||
ngbe_irq_enable(wx, true);
|
||||
return IRQ_NONE; /* Not our interrupt */
|
||||
}
|
||||
wx->isb_mem[WX_ISB_VEC0] = 0;
|
||||
if (!(pdev->msi_enabled))
|
||||
wr32(wx, WX_PX_INTA, 1);
|
||||
|
||||
wx->isb_mem[WX_ISB_MISC] = 0;
|
||||
/* would disable interrupts here but it is auto disabled */
|
||||
napi_schedule_irqoff(&q_vector->napi);
|
||||
|
||||
if (netif_running(wx->netdev))
|
||||
ngbe_irq_enable(wx, false);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
|
||||
{
|
||||
struct wx *wx = data;
|
||||
|
||||
/* re-enable the original interrupt state, no lsc, no queues */
|
||||
if (netif_running(wx->netdev))
|
||||
ngbe_irq_enable(wx, false);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* ngbe_request_msix_irqs - Initialize MSI-X interrupts
|
||||
* @wx: board private structure
|
||||
*
|
||||
* ngbe_request_msix_irqs allocates MSI-X vectors and requests
|
||||
* interrupts from the kernel.
|
||||
**/
|
||||
static int ngbe_request_msix_irqs(struct wx *wx)
|
||||
{
|
||||
struct net_device *netdev = wx->netdev;
|
||||
int vector, err;
|
||||
|
||||
for (vector = 0; vector < wx->num_q_vectors; vector++) {
|
||||
struct wx_q_vector *q_vector = wx->q_vector[vector];
|
||||
struct msix_entry *entry = &wx->msix_entries[vector];
|
||||
|
||||
if (q_vector->tx.ring && q_vector->rx.ring)
|
||||
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
|
||||
"%s-TxRx-%d", netdev->name, entry->entry);
|
||||
else
|
||||
/* skip this unused q_vector */
|
||||
continue;
|
||||
|
||||
err = request_irq(entry->vector, wx_msix_clean_rings, 0,
|
||||
q_vector->name, q_vector);
|
||||
if (err) {
|
||||
wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
|
||||
q_vector->name, err);
|
||||
goto free_queue_irqs;
|
||||
}
|
||||
}
|
||||
|
||||
err = request_irq(wx->msix_entries[vector].vector,
|
||||
ngbe_msix_other, 0, netdev->name, wx);
|
||||
|
||||
if (err) {
|
||||
wx_err(wx, "request_irq for msix_other failed: %d\n", err);
|
||||
goto free_queue_irqs;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
free_queue_irqs:
|
||||
while (vector) {
|
||||
vector--;
|
||||
free_irq(wx->msix_entries[vector].vector,
|
||||
wx->q_vector[vector]);
|
||||
}
|
||||
wx_reset_interrupt_capability(wx);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ngbe_request_irq - initialize interrupts
|
||||
* @wx: board private structure
|
||||
*
|
||||
* Attempts to configure interrupts using the best available
|
||||
* capabilities of the hardware and kernel.
|
||||
**/
|
||||
static int ngbe_request_irq(struct wx *wx)
|
||||
{
|
||||
struct net_device *netdev = wx->netdev;
|
||||
struct pci_dev *pdev = wx->pdev;
|
||||
int err;
|
||||
|
||||
if (pdev->msix_enabled)
|
||||
err = ngbe_request_msix_irqs(wx);
|
||||
else if (pdev->msi_enabled)
|
||||
err = request_irq(pdev->irq, ngbe_intr, 0,
|
||||
netdev->name, wx);
|
||||
else
|
||||
err = request_irq(pdev->irq, ngbe_intr, IRQF_SHARED,
|
||||
netdev->name, wx);
|
||||
|
||||
if (err)
|
||||
wx_err(wx, "request_irq failed, Error %d\n", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ngbe_disable_device(struct wx *wx)
|
||||
{
|
||||
struct net_device *netdev = wx->netdev;
|
||||
u32 i;
|
||||
|
||||
/* disable all enabled rx queues */
|
||||
for (i = 0; i < wx->num_rx_queues; i++)
|
||||
/* this call also flushes the previous write */
|
||||
wx_disable_rx_queue(wx, wx->rx_ring[i]);
|
||||
/* disable receives */
|
||||
wx_disable_rx(wx);
|
||||
wx_napi_disable_all(wx);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
netif_tx_disable(netdev);
|
||||
if (wx->gpio_ctrl)
|
||||
ngbe_sfp_modules_txrx_powerctl(wx, false);
|
||||
wx_irq_disable(wx);
|
||||
/* disable transmits in the hardware now that interrupts are off */
|
||||
for (i = 0; i < wx->num_tx_queues; i++) {
|
||||
u8 reg_idx = wx->tx_ring[i]->reg_idx;
|
||||
|
||||
wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
|
||||
}
|
||||
}
|
||||
|
||||
static void ngbe_down(struct wx *wx)
|
||||
{
|
||||
phy_stop(wx->phydev);
|
||||
ngbe_disable_device(wx);
|
||||
wx_clean_all_tx_rings(wx);
|
||||
wx_clean_all_rx_rings(wx);
|
||||
}
|
||||
|
||||
static void ngbe_up(struct wx *wx)
|
||||
{
|
||||
wx_configure_vectors(wx);
|
||||
|
||||
/* make sure to complete pre-operations */
|
||||
smp_mb__before_atomic();
|
||||
wx_napi_enable_all(wx);
|
||||
/* enable transmits */
|
||||
netif_tx_start_all_queues(wx->netdev);
|
||||
|
||||
/* clear any pending interrupts, may auto mask */
|
||||
rd32(wx, WX_PX_IC);
|
||||
rd32(wx, WX_PX_MISC_IC);
|
||||
ngbe_irq_enable(wx, true);
|
||||
if (wx->gpio_ctrl)
|
||||
ngbe_sfp_modules_txrx_powerctl(wx, true);
|
||||
|
||||
phy_start(wx->phydev);
|
||||
}
|
||||
|
||||
|
|
@ -187,12 +375,39 @@ static int ngbe_open(struct net_device *netdev)
|
|||
int err;
|
||||
|
||||
wx_control_hw(wx, true);
|
||||
err = ngbe_phy_connect(wx);
|
||||
|
||||
err = wx_setup_resources(wx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
wx_configure(wx);
|
||||
|
||||
err = ngbe_request_irq(wx);
|
||||
if (err)
|
||||
goto err_free_resources;
|
||||
|
||||
err = ngbe_phy_connect(wx);
|
||||
if (err)
|
||||
goto err_free_irq;
|
||||
|
||||
err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
|
||||
if (err)
|
||||
goto err_dis_phy;
|
||||
|
||||
err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
|
||||
if (err)
|
||||
goto err_dis_phy;
|
||||
|
||||
ngbe_up(wx);
|
||||
|
||||
return 0;
|
||||
err_dis_phy:
|
||||
phy_disconnect(wx->phydev);
|
||||
err_free_irq:
|
||||
wx_free_irq(wx);
|
||||
err_free_resources:
|
||||
wx_free_resources(wx);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -211,18 +426,14 @@ static int ngbe_close(struct net_device *netdev)
|
|||
struct wx *wx = netdev_priv(netdev);
|
||||
|
||||
ngbe_down(wx);
|
||||
wx_free_irq(wx);
|
||||
wx_free_resources(wx);
|
||||
phy_disconnect(wx->phydev);
|
||||
wx_control_hw(wx, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
||||
{
|
||||
struct wx *wx = pci_get_drvdata(pdev);
|
||||
|
|
@ -258,9 +469,11 @@ static void ngbe_shutdown(struct pci_dev *pdev)
|
|||
static const struct net_device_ops ngbe_netdev_ops = {
|
||||
.ndo_open = ngbe_open,
|
||||
.ndo_stop = ngbe_close,
|
||||
.ndo_start_xmit = ngbe_xmit_frame,
|
||||
.ndo_start_xmit = wx_xmit_frame,
|
||||
.ndo_set_rx_mode = wx_set_rx_mode,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = wx_set_mac,
|
||||
.ndo_get_stats64 = wx_get_stats64,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -336,6 +549,17 @@ static int ngbe_probe(struct pci_dev *pdev,
|
|||
netdev->netdev_ops = &ngbe_netdev_ops;
|
||||
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
netdev->features = NETIF_F_SG;
|
||||
|
||||
/* copy netdev features into list of user selectable features */
|
||||
netdev->hw_features |= netdev->features |
|
||||
NETIF_F_RXALL;
|
||||
|
||||
netdev->priv_flags |= IFF_UNICAST_FLT;
|
||||
netdev->priv_flags |= IFF_SUPP_NOFCS;
|
||||
|
||||
netdev->min_mtu = ETH_MIN_MTU;
|
||||
netdev->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
|
||||
|
||||
wx->bd_number = func_nums;
|
||||
/* setup the private structure */
|
||||
|
|
@ -411,10 +635,14 @@ static int ngbe_probe(struct pci_dev *pdev,
|
|||
eth_hw_addr_set(netdev, wx->mac.perm_addr);
|
||||
wx_mac_set_default_filter(wx, wx->mac.perm_addr);
|
||||
|
||||
err = wx_init_interrupt_scheme(wx);
|
||||
if (err)
|
||||
goto err_free_mac_table;
|
||||
|
||||
/* phy Interface Configuration */
|
||||
err = ngbe_mdio_init(wx);
|
||||
if (err)
|
||||
goto err_free_mac_table;
|
||||
goto err_clear_interrupt_scheme;
|
||||
|
||||
err = register_netdev(netdev);
|
||||
if (err)
|
||||
|
|
@ -431,6 +659,8 @@ static int ngbe_probe(struct pci_dev *pdev,
|
|||
|
||||
err_register:
|
||||
wx_control_hw(wx, false);
|
||||
err_clear_interrupt_scheme:
|
||||
wx_clear_interrupt_scheme(wx);
|
||||
err_free_mac_table:
|
||||
kfree(wx->mac_table);
|
||||
err_pci_release_regions:
|
||||
|
|
@ -462,6 +692,7 @@ static void ngbe_remove(struct pci_dev *pdev)
|
|||
pci_select_bars(pdev, IORESOURCE_MEM));
|
||||
|
||||
kfree(wx->mac_table);
|
||||
wx_clear_interrupt_scheme(wx);
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
|
||||
pci_disable_device(pdev);
|
||||
|
|
|
|||
|
|
@ -90,6 +90,20 @@ enum NGBE_MSCA_CMD_value {
|
|||
#define NGBE_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */
|
||||
#define NGBE_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */
|
||||
|
||||
/* Extended Interrupt Enable Set */
|
||||
#define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
|
||||
#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
|
||||
#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
|
||||
#define NGBE_PX_MISC_IEN_GPIO BIT(26)
|
||||
#define NGBE_PX_MISC_IEN_MASK ( \
|
||||
NGBE_PX_MISC_IEN_DEV_RST | \
|
||||
NGBE_PX_MISC_IEN_ETH_LK | \
|
||||
NGBE_PX_MISC_IEN_INT_ERR | \
|
||||
NGBE_PX_MISC_IEN_GPIO)
|
||||
|
||||
#define NGBE_INTR_ALL 0x1FF
|
||||
#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
|
||||
|
||||
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
|
||||
#define NGBE_CFG_LAN_SPEED 0x14440
|
||||
#define NGBE_CFG_PORT_ST 0x14404
|
||||
|
|
@ -120,6 +134,10 @@ enum NGBE_MSCA_CMD_value {
|
|||
#define NGBE_ETH_LENGTH_OF_ADDRESS 6
|
||||
#define NGBE_MAX_MSIX_VECTORS 0x09
|
||||
#define NGBE_RAR_ENTRIES 32
|
||||
#define NGBE_RX_PB_SIZE 42
|
||||
#define NGBE_MC_TBL_SIZE 128
|
||||
#define NGBE_TDB_PB_SZ (20 * 1024) /* 160KB Packet Buffer */
|
||||
#define NGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
|
||||
|
||||
/* TX/RX descriptor defines */
|
||||
#define NGBE_DEFAULT_TXD 512 /* default ring size */
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@
|
|||
#include <net/ip.h>
|
||||
|
||||
#include "../libwx/wx_type.h"
|
||||
#include "../libwx/wx_lib.h"
|
||||
#include "../libwx/wx_hw.h"
|
||||
#include "txgbe_type.h"
|
||||
#include "txgbe_hw.h"
|
||||
|
|
@ -72,9 +73,177 @@ static int txgbe_enumerate_functions(struct wx *wx)
|
|||
return physfns;
|
||||
}
|
||||
|
||||
/**
|
||||
* txgbe_irq_enable - Enable default interrupt generation settings
|
||||
* @wx: pointer to private structure
|
||||
* @queues: enable irqs for queues
|
||||
**/
|
||||
static void txgbe_irq_enable(struct wx *wx, bool queues)
|
||||
{
|
||||
/* unmask interrupt */
|
||||
wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
|
||||
if (queues)
|
||||
wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
|
||||
}
|
||||
|
||||
/**
|
||||
* txgbe_intr - msi/legacy mode Interrupt Handler
|
||||
* @irq: interrupt number
|
||||
* @data: pointer to a network interface device structure
|
||||
**/
|
||||
static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
|
||||
{
|
||||
struct wx_q_vector *q_vector;
|
||||
struct wx *wx = data;
|
||||
struct pci_dev *pdev;
|
||||
u32 eicr;
|
||||
|
||||
q_vector = wx->q_vector[0];
|
||||
pdev = wx->pdev;
|
||||
|
||||
eicr = wx_misc_isb(wx, WX_ISB_VEC0);
|
||||
if (!eicr) {
|
||||
/* shared interrupt alert!
|
||||
* the interrupt that we masked before the ICR read.
|
||||
*/
|
||||
if (netif_running(wx->netdev))
|
||||
txgbe_irq_enable(wx, true);
|
||||
return IRQ_NONE; /* Not our interrupt */
|
||||
}
|
||||
wx->isb_mem[WX_ISB_VEC0] = 0;
|
||||
if (!(pdev->msi_enabled))
|
||||
wr32(wx, WX_PX_INTA, 1);
|
||||
|
||||
wx->isb_mem[WX_ISB_MISC] = 0;
|
||||
/* would disable interrupts here but it is auto disabled */
|
||||
napi_schedule_irqoff(&q_vector->napi);
|
||||
|
||||
/* re-enable link(maybe) and non-queue interrupts, no flush.
|
||||
* txgbe_poll will re-enable the queue interrupts
|
||||
*/
|
||||
if (netif_running(wx->netdev))
|
||||
txgbe_irq_enable(wx, false);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data)
|
||||
{
|
||||
struct wx *wx = data;
|
||||
|
||||
/* re-enable the original interrupt state */
|
||||
if (netif_running(wx->netdev))
|
||||
txgbe_irq_enable(wx, false);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* txgbe_request_msix_irqs - Initialize MSI-X interrupts
|
||||
* @wx: board private structure
|
||||
*
|
||||
* Allocate MSI-X vectors and request interrupts from the kernel.
|
||||
**/
|
||||
static int txgbe_request_msix_irqs(struct wx *wx)
|
||||
{
|
||||
struct net_device *netdev = wx->netdev;
|
||||
int vector, err;
|
||||
|
||||
for (vector = 0; vector < wx->num_q_vectors; vector++) {
|
||||
struct wx_q_vector *q_vector = wx->q_vector[vector];
|
||||
struct msix_entry *entry = &wx->msix_entries[vector];
|
||||
|
||||
if (q_vector->tx.ring && q_vector->rx.ring)
|
||||
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
|
||||
"%s-TxRx-%d", netdev->name, entry->entry);
|
||||
else
|
||||
/* skip this unused q_vector */
|
||||
continue;
|
||||
|
||||
err = request_irq(entry->vector, wx_msix_clean_rings, 0,
|
||||
q_vector->name, q_vector);
|
||||
if (err) {
|
||||
wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
|
||||
q_vector->name, err);
|
||||
goto free_queue_irqs;
|
||||
}
|
||||
}
|
||||
|
||||
err = request_irq(wx->msix_entries[vector].vector,
|
||||
txgbe_msix_other, 0, netdev->name, wx);
|
||||
if (err) {
|
||||
wx_err(wx, "request_irq for msix_other failed: %d\n", err);
|
||||
goto free_queue_irqs;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
free_queue_irqs:
|
||||
while (vector) {
|
||||
vector--;
|
||||
free_irq(wx->msix_entries[vector].vector,
|
||||
wx->q_vector[vector]);
|
||||
}
|
||||
wx_reset_interrupt_capability(wx);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* txgbe_request_irq - initialize interrupts
|
||||
* @wx: board private structure
|
||||
*
|
||||
* Attempt to configure interrupts using the best available
|
||||
* capabilities of the hardware and kernel.
|
||||
**/
|
||||
static int txgbe_request_irq(struct wx *wx)
|
||||
{
|
||||
struct net_device *netdev = wx->netdev;
|
||||
struct pci_dev *pdev = wx->pdev;
|
||||
int err;
|
||||
|
||||
if (pdev->msix_enabled)
|
||||
err = txgbe_request_msix_irqs(wx);
|
||||
else if (pdev->msi_enabled)
|
||||
err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
|
||||
netdev->name, wx);
|
||||
else
|
||||
err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
|
||||
netdev->name, wx);
|
||||
|
||||
if (err)
|
||||
wx_err(wx, "request_irq failed, Error %d\n", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void txgbe_up_complete(struct wx *wx)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
wx_control_hw(wx, true);
|
||||
wx_configure_vectors(wx);
|
||||
|
||||
/* make sure to complete pre-operations */
|
||||
smp_mb__before_atomic();
|
||||
wx_napi_enable_all(wx);
|
||||
|
||||
/* clear any pending interrupts, may auto mask */
|
||||
rd32(wx, WX_PX_IC);
|
||||
rd32(wx, WX_PX_MISC_IC);
|
||||
txgbe_irq_enable(wx, true);
|
||||
|
||||
/* Configure MAC Rx and Tx when link is up */
|
||||
reg = rd32(wx, WX_MAC_RX_CFG);
|
||||
wr32(wx, WX_MAC_RX_CFG, reg);
|
||||
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
|
||||
reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
|
||||
wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
|
||||
reg = rd32(wx, WX_MAC_TX_CFG);
|
||||
wr32(wx, WX_MAC_TX_CFG, (reg & ~WX_MAC_TX_CFG_SPEED_MASK) | WX_MAC_TX_CFG_SPEED_10G);
|
||||
|
||||
/* enable transmits */
|
||||
netif_tx_start_all_queues(wx->netdev);
|
||||
netif_carrier_on(wx->netdev);
|
||||
}
|
||||
|
||||
static void txgbe_reset(struct wx *wx)
|
||||
|
|
@ -96,14 +265,24 @@ static void txgbe_reset(struct wx *wx)
|
|||
static void txgbe_disable_device(struct wx *wx)
|
||||
{
|
||||
struct net_device *netdev = wx->netdev;
|
||||
u32 i;
|
||||
|
||||
wx_disable_pcie_master(wx);
|
||||
/* disable receives */
|
||||
wx_disable_rx(wx);
|
||||
|
||||
/* disable all enabled rx queues */
|
||||
for (i = 0; i < wx->num_rx_queues; i++)
|
||||
/* this call also flushes the previous write */
|
||||
wx_disable_rx_queue(wx, wx->rx_ring[i]);
|
||||
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
netif_tx_disable(netdev);
|
||||
|
||||
wx_irq_disable(wx);
|
||||
wx_napi_disable_all(wx);
|
||||
|
||||
if (wx->bus.func < 2)
|
||||
wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0);
|
||||
else
|
||||
|
|
@ -116,6 +295,13 @@ static void txgbe_disable_device(struct wx *wx)
|
|||
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
|
||||
}
|
||||
|
||||
/* disable transmits in the hardware now that interrupts are off */
|
||||
for (i = 0; i < wx->num_tx_queues; i++) {
|
||||
u8 reg_idx = wx->tx_ring[i]->reg_idx;
|
||||
|
||||
wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
|
||||
}
|
||||
|
||||
/* Disable the Tx DMA engine */
|
||||
wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0);
|
||||
}
|
||||
|
|
@ -124,6 +310,9 @@ static void txgbe_down(struct wx *wx)
|
|||
{
|
||||
txgbe_disable_device(wx);
|
||||
txgbe_reset(wx);
|
||||
|
||||
wx_clean_all_tx_rings(wx);
|
||||
wx_clean_all_rx_rings(wx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -132,12 +321,15 @@ static void txgbe_down(struct wx *wx)
|
|||
**/
|
||||
static int txgbe_sw_init(struct wx *wx)
|
||||
{
|
||||
u16 msix_count = 0;
|
||||
int err;
|
||||
|
||||
wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
|
||||
wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
|
||||
wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
|
||||
wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
|
||||
wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE;
|
||||
wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ;
|
||||
|
||||
/* PCI config space info */
|
||||
err = wx_sw_init(wx);
|
||||
|
|
@ -156,6 +348,25 @@ static int txgbe_sw_init(struct wx *wx)
|
|||
break;
|
||||
}
|
||||
|
||||
/* Set common capability flags and settings */
|
||||
wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS;
|
||||
err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS);
|
||||
if (err)
|
||||
wx_err(wx, "Do not support MSI-X\n");
|
||||
wx->mac.max_msix_vectors = msix_count;
|
||||
|
||||
/* enable itr by default in dynamic mode */
|
||||
wx->rx_itr_setting = 1;
|
||||
wx->tx_itr_setting = 1;
|
||||
|
||||
/* set default ring sizes */
|
||||
wx->tx_ring_count = TXGBE_DEFAULT_TXD;
|
||||
wx->rx_ring_count = TXGBE_DEFAULT_RXD;
|
||||
|
||||
/* set default work limits */
|
||||
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
|
||||
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -171,10 +382,39 @@ static int txgbe_sw_init(struct wx *wx)
|
|||
static int txgbe_open(struct net_device *netdev)
|
||||
{
|
||||
struct wx *wx = netdev_priv(netdev);
|
||||
int err;
|
||||
|
||||
err = wx_setup_resources(wx);
|
||||
if (err)
|
||||
goto err_reset;
|
||||
|
||||
wx_configure(wx);
|
||||
|
||||
err = txgbe_request_irq(wx);
|
||||
if (err)
|
||||
goto err_free_isb;
|
||||
|
||||
/* Notify the stack of the actual queue counts. */
|
||||
err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
|
||||
if (err)
|
||||
goto err_free_irq;
|
||||
|
||||
err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
|
||||
if (err)
|
||||
goto err_free_irq;
|
||||
|
||||
txgbe_up_complete(wx);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_irq:
|
||||
wx_free_irq(wx);
|
||||
err_free_isb:
|
||||
wx_free_isb_resources(wx);
|
||||
err_reset:
|
||||
txgbe_reset(wx);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -187,6 +427,7 @@ static int txgbe_open(struct net_device *netdev)
|
|||
static void txgbe_close_suspend(struct wx *wx)
|
||||
{
|
||||
txgbe_disable_device(wx);
|
||||
wx_free_resources(wx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -205,6 +446,8 @@ static int txgbe_close(struct net_device *netdev)
|
|||
struct wx *wx = netdev_priv(netdev);
|
||||
|
||||
txgbe_down(wx);
|
||||
wx_free_irq(wx);
|
||||
wx_free_resources(wx);
|
||||
wx_control_hw(wx, false);
|
||||
|
||||
return 0;
|
||||
|
|
@ -240,18 +483,14 @@ static void txgbe_shutdown(struct pci_dev *pdev)
|
|||
}
|
||||
}
|
||||
|
||||
static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static const struct net_device_ops txgbe_netdev_ops = {
|
||||
.ndo_open = txgbe_open,
|
||||
.ndo_stop = txgbe_close,
|
||||
.ndo_start_xmit = txgbe_xmit_frame,
|
||||
.ndo_start_xmit = wx_xmit_frame,
|
||||
.ndo_set_rx_mode = wx_set_rx_mode,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = wx_set_mac,
|
||||
.ndo_get_stats64 = wx_get_stats64,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -354,6 +593,16 @@ static int txgbe_probe(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
netdev->features = NETIF_F_SG;
|
||||
|
||||
/* copy netdev features into list of user selectable features */
|
||||
netdev->hw_features |= netdev->features | NETIF_F_RXALL;
|
||||
|
||||
netdev->priv_flags |= IFF_UNICAST_FLT;
|
||||
netdev->priv_flags |= IFF_SUPP_NOFCS;
|
||||
|
||||
netdev->min_mtu = ETH_MIN_MTU;
|
||||
netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
|
||||
|
||||
/* make sure the EEPROM is good */
|
||||
err = txgbe_validate_eeprom_checksum(wx, NULL);
|
||||
|
|
@ -367,6 +616,10 @@ static int txgbe_probe(struct pci_dev *pdev,
|
|||
eth_hw_addr_set(netdev, wx->mac.perm_addr);
|
||||
wx_mac_set_default_filter(wx, wx->mac.perm_addr);
|
||||
|
||||
err = wx_init_interrupt_scheme(wx);
|
||||
if (err)
|
||||
goto err_free_mac_table;
|
||||
|
||||
/* Save off EEPROM version number and Option Rom version which
|
||||
* together make a unique identify for the eeprom
|
||||
*/
|
||||
|
|
@ -411,6 +664,8 @@ static int txgbe_probe(struct pci_dev *pdev,
|
|||
|
||||
pci_set_drvdata(pdev, wx);
|
||||
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
/* calculate the expected PCIe bandwidth required for optimal
|
||||
* performance. Note that some older parts will never have enough
|
||||
* bandwidth due to being older generation PCIe parts. We clamp these
|
||||
|
|
@ -435,6 +690,7 @@ static int txgbe_probe(struct pci_dev *pdev,
|
|||
return 0;
|
||||
|
||||
err_release_hw:
|
||||
wx_clear_interrupt_scheme(wx);
|
||||
wx_control_hw(wx, false);
|
||||
err_free_mac_table:
|
||||
kfree(wx->mac_table);
|
||||
|
|
@ -468,6 +724,7 @@ static void txgbe_remove(struct pci_dev *pdev)
|
|||
pci_select_bars(pdev, IORESOURCE_MEM));
|
||||
|
||||
kfree(wx->mac_table);
|
||||
wx_clear_interrupt_scheme(wx);
|
||||
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
|
||||
|
|
|
|||
|
|
@ -67,6 +67,7 @@
|
|||
#define TXGBE_PBANUM1_PTR 0x06
|
||||
#define TXGBE_PBANUM_PTR_GUARD 0xFAFA
|
||||
|
||||
#define TXGBE_MAX_MSIX_VECTORS 64
|
||||
#define TXGBE_MAX_FDIR_INDICES 63
|
||||
|
||||
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
|
||||
|
|
@ -76,6 +77,26 @@
|
|||
#define TXGBE_SP_MAX_RX_QUEUES 128
|
||||
#define TXGBE_SP_RAR_ENTRIES 128
|
||||
#define TXGBE_SP_MC_TBL_SIZE 128
|
||||
#define TXGBE_SP_RX_PB_SIZE 512
|
||||
#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
|
||||
#define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
|
||||
|
||||
/* TX/RX descriptor defines */
|
||||
#define TXGBE_DEFAULT_TXD 512
|
||||
#define TXGBE_DEFAULT_TX_WORK 256
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
#define TXGBE_DEFAULT_RXD 512
|
||||
#define TXGBE_DEFAULT_RX_WORK 256
|
||||
#else
|
||||
#define TXGBE_DEFAULT_RXD 256
|
||||
#define TXGBE_DEFAULT_RX_WORK 128
|
||||
#endif
|
||||
|
||||
#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
|
||||
#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
|
||||
|
||||
#define TXGBE_MAX_EITR GENMASK(11, 3)
|
||||
|
||||
extern char txgbe_driver_name[];
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue