Commit 3a2e15df authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to i40e only.

Anjali provides two cleanups to remove unnecessary code and a fix
to resolve debugfs dumping only half the NVM.  Then provides a fix
to ethtool NVM reads where shadow RAM was used instead of actual
NVM reads.

Jesse provides a couple of fixes, one removes custom i40e functions
which duplicate existing kernel functionality.  Second fixes constant
cast issues by replacing __constant_htons with htons.

Mitch provides a couple of fixes for the VF interfaces in i40e.  First
provides a fix to guard against VF message races with can cause a panic.
Second fix reinitializes the buffer size each time we clean the ARQ,
because subsequent messages can be truncated. Lastly adds functionality
to enable/disable ICR 0 dynamically.

Vasu adds a simple guard against multiple includes of the i40e_txrx.h
file.

Shannon provides a couple of fixes, first fix swaps a couple of lines
around in the error handling if the allocation for the VSI array fails.
Second fixes an issue where we try to free the q_vector that has not
been setup which can panic the kernel.

David provides a patch to save off the point to memory and the length
of 2 structs used in the admin queue in order to store all info about
allocated kernel memory.

Neerav fixes ring allocation where allocation and clearing of rings
for a VSI should be using the alloc_queue_pairs and not num_queue_pairs.
Then removes the unused define for multi-queue enabled.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1e85c9b6 c3f0c4fe
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
#define I40E_MAX_NPAR_QPS 32 #define I40E_MAX_NPAR_QPS 32
#define I40E_MAX_NUM_DESCRIPTORS 4096 #define I40E_MAX_NUM_DESCRIPTORS 4096
#define I40E_MAX_REGISTER 0x0038FFFF #define I40E_MAX_REGISTER 0x800000
#define I40E_DEFAULT_NUM_DESCRIPTORS 512 #define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32 #define I40E_REQ_DESCRIPTOR_MULTIPLE 32
#define I40E_MIN_NUM_DESCRIPTORS 64 #define I40E_MIN_NUM_DESCRIPTORS 64
...@@ -230,28 +230,24 @@ struct i40e_pf { ...@@ -230,28 +230,24 @@ struct i40e_pf {
#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4) #define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5) #define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6) #define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7) #define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8) #define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9) #define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10) #define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13) #define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14) #define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15) #define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16) #define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18) #define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19) #define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20) #define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21) #define I40E_FLAG_FDIR_ENABLED (u64)(1 << 21)
#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22) #define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 22)
#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26) #define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
#ifdef CONFIG_I40E_VXLAN #ifdef CONFIG_I40E_VXLAN
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) #define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
#endif #endif
u16 num_tx_queues;
u16 num_rx_queues;
bool stat_offsets_loaded; bool stat_offsets_loaded;
struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets; struct i40e_hw_port_stats stats_offsets;
...@@ -521,13 +517,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); ...@@ -521,13 +517,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf, int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig); bool printconfig);
/* needed by i40e_main.c */
void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
struct i40e_ring *tx_ring);
void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
struct i40e_ring *tx_ring);
void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
struct i40e_ring *tx_ring);
int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
struct i40e_pf *pf, bool add); struct i40e_pf *pf, bool add);
...@@ -565,6 +554,7 @@ static inline void i40e_dbg_init(void) {} ...@@ -565,6 +554,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {} static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/ #endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
......
...@@ -66,9 +66,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw) ...@@ -66,9 +66,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{ {
i40e_status ret_code; i40e_status ret_code;
struct i40e_virt_mem mem;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem, ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring, i40e_mem_atq_ring,
(hw->aq.num_asq_entries * (hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)), sizeof(struct i40e_aq_desc)),
...@@ -76,21 +75,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) ...@@ -76,21 +75,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
if (ret_code) if (ret_code)
return ret_code; return ret_code;
hw->aq.asq.desc = hw->aq.asq_mem.va; ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
ret_code = i40e_allocate_virt_mem(hw, &mem,
(hw->aq.num_asq_entries * (hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details))); sizeof(struct i40e_asq_cmd_details)));
if (ret_code) { if (ret_code) {
i40e_free_dma_mem(hw, &hw->aq.asq_mem); i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
hw->aq.asq_mem.va = NULL;
hw->aq.asq_mem.pa = 0;
return ret_code; return ret_code;
} }
hw->aq.asq.details = mem.va;
return ret_code; return ret_code;
} }
...@@ -102,16 +94,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) ...@@ -102,16 +94,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{ {
i40e_status ret_code; i40e_status ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem, ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring, i40e_mem_arq_ring,
(hw->aq.num_arq_entries * (hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)), sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT); I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
hw->aq.arq.desc = hw->aq.arq_mem.va;
hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
return ret_code; return ret_code;
} }
...@@ -125,14 +112,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) ...@@ -125,14 +112,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
**/ **/
static void i40e_free_adminq_asq(struct i40e_hw *hw) static void i40e_free_adminq_asq(struct i40e_hw *hw)
{ {
struct i40e_virt_mem mem; i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
i40e_free_dma_mem(hw, &hw->aq.asq_mem);
hw->aq.asq_mem.va = NULL;
hw->aq.asq_mem.pa = 0;
mem.va = hw->aq.asq.details;
i40e_free_virt_mem(hw, &mem);
hw->aq.asq.details = NULL;
} }
/** /**
...@@ -144,9 +124,7 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw) ...@@ -144,9 +124,7 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
**/ **/
static void i40e_free_adminq_arq(struct i40e_hw *hw) static void i40e_free_adminq_arq(struct i40e_hw *hw)
{ {
i40e_free_dma_mem(hw, &hw->aq.arq_mem); i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
hw->aq.arq_mem.va = NULL;
hw->aq.arq_mem.pa = 0;
} }
/** /**
...@@ -157,7 +135,6 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) ...@@ -157,7 +135,6 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{ {
i40e_status ret_code; i40e_status ret_code;
struct i40e_aq_desc *desc; struct i40e_aq_desc *desc;
struct i40e_virt_mem mem;
struct i40e_dma_mem *bi; struct i40e_dma_mem *bi;
int i; int i;
...@@ -166,11 +143,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) ...@@ -166,11 +143,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
*/ */
/* buffer_info structures do not need alignment */ /* buffer_info structures do not need alignment */
ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries * ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
sizeof(struct i40e_dma_mem))); (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code) if (ret_code)
goto alloc_arq_bufs; goto alloc_arq_bufs;
hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va; hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */ /* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) { for (i = 0; i < hw->aq.num_arq_entries; i++) {
...@@ -212,8 +189,7 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) ...@@ -212,8 +189,7 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
i--; i--;
for (; i >= 0; i--) for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
mem.va = hw->aq.arq.r.arq_bi; i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
i40e_free_virt_mem(hw, &mem);
return ret_code; return ret_code;
} }
...@@ -225,16 +201,15 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) ...@@ -225,16 +201,15 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{ {
i40e_status ret_code; i40e_status ret_code;
struct i40e_virt_mem mem;
struct i40e_dma_mem *bi; struct i40e_dma_mem *bi;
int i; int i;
/* No mapped memory needed yet, just the buffer info structures */ /* No mapped memory needed yet, just the buffer info structures */
ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries * ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
sizeof(struct i40e_dma_mem))); (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code) if (ret_code)
goto alloc_asq_bufs; goto alloc_asq_bufs;
hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va; hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */ /* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) { for (i = 0; i < hw->aq.num_asq_entries; i++) {
...@@ -254,8 +229,7 @@ static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) ...@@ -254,8 +229,7 @@ static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
i--; i--;
for (; i >= 0; i--) for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
mem.va = hw->aq.asq.r.asq_bi; i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
i40e_free_virt_mem(hw, &mem);
return ret_code; return ret_code;
} }
...@@ -266,14 +240,17 @@ static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) ...@@ -266,14 +240,17 @@ static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
**/ **/
static void i40e_free_arq_bufs(struct i40e_hw *hw) static void i40e_free_arq_bufs(struct i40e_hw *hw)
{ {
struct i40e_virt_mem mem;
int i; int i;
/* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++) for (i = 0; i < hw->aq.num_arq_entries; i++)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
mem.va = hw->aq.arq.r.arq_bi; /* free the descriptor memory */
i40e_free_virt_mem(hw, &mem); i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
/* free the dma header */
i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
} }
/** /**
...@@ -282,7 +259,6 @@ static void i40e_free_arq_bufs(struct i40e_hw *hw) ...@@ -282,7 +259,6 @@ static void i40e_free_arq_bufs(struct i40e_hw *hw)
**/ **/
static void i40e_free_asq_bufs(struct i40e_hw *hw) static void i40e_free_asq_bufs(struct i40e_hw *hw)
{ {
struct i40e_virt_mem mem;
int i; int i;
/* only unmap if the address is non-NULL */ /* only unmap if the address is non-NULL */
...@@ -290,9 +266,14 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw) ...@@ -290,9 +266,14 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
if (hw->aq.asq.r.asq_bi[i].pa) if (hw->aq.asq.r.asq_bi[i].pa)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
/* now free the buffer info list */ /* free the buffer info list */
mem.va = hw->aq.asq.r.asq_bi; i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
i40e_free_virt_mem(hw, &mem);
/* free the descriptor memory */
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
/* free the dma header */
i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
} }
/** /**
...@@ -305,14 +286,18 @@ static void i40e_config_asq_regs(struct i40e_hw *hw) ...@@ -305,14 +286,18 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
{ {
if (hw->mac.type == I40E_MAC_VF) { if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */ /* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr)); wr32(hw, I40E_VF_ATQBAH1,
wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr)); upper_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQBAL1,
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries | wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK)); I40E_VF_ATQLEN1_ATQENABLE_MASK));
} else { } else {
/* configure the transmit queue */ /* configure the transmit queue */
wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr)); wr32(hw, I40E_PF_ATQBAH,
wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr)); upper_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQBAL,
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK)); I40E_PF_ATQLEN_ATQENABLE_MASK));
} }
...@@ -328,14 +313,18 @@ static void i40e_config_arq_regs(struct i40e_hw *hw) ...@@ -328,14 +313,18 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
{ {
if (hw->mac.type == I40E_MAC_VF) { if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */ /* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr)); wr32(hw, I40E_VF_ARQBAH1,
wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr)); upper_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQBAL1,
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries | wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK)); I40E_VF_ARQLEN1_ARQENABLE_MASK));
} else { } else {
/* configure the receive queue */ /* configure the receive queue */
wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr)); wr32(hw, I40E_PF_ARQBAH,
wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr)); upper_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQBAL,
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK)); I40E_PF_ARQLEN_ARQENABLE_MASK));
} }
...@@ -483,8 +472,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) ...@@ -483,8 +472,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
/* free ring buffers */ /* free ring buffers */
i40e_free_asq_bufs(hw); i40e_free_asq_bufs(hw);
/* free the ring descriptors */
i40e_free_adminq_asq(hw);
mutex_unlock(&hw->aq.asq_mutex); mutex_unlock(&hw->aq.asq_mutex);
...@@ -516,8 +503,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) ...@@ -516,8 +503,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
/* free ring buffers */ /* free ring buffers */
i40e_free_arq_bufs(hw); i40e_free_arq_bufs(hw);
/* free the ring descriptors */
i40e_free_adminq_arq(hw);
mutex_unlock(&hw->aq.arq_mutex); mutex_unlock(&hw->aq.arq_mutex);
......
...@@ -32,20 +32,20 @@ ...@@ -32,20 +32,20 @@
#include "i40e_adminq_cmd.h" #include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \ #define I40E_ADMINQ_DESC(R, i) \
(&(((struct i40e_aq_desc *)((R).desc))[i])) (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096 #define I40E_ADMINQ_DESC_ALIGNMENT 4096
struct i40e_adminq_ring { struct i40e_adminq_ring {
void *desc; /* Descriptor ring memory */ struct i40e_virt_mem dma_head; /* space for dma structures */
void *details; /* ASQ details */ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
struct i40e_virt_mem cmd_buf; /* command buffer memory */
union { union {
struct i40e_dma_mem *asq_bi; struct i40e_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi; struct i40e_dma_mem *arq_bi;
} r; } r;
u64 dma_addr; /* Physical address of the ring */
u16 count; /* Number of descriptors */ u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */ u16 rx_buf_len; /* Admin Receive Queue buffer length */
...@@ -70,7 +70,7 @@ struct i40e_asq_cmd_details { ...@@ -70,7 +70,7 @@ struct i40e_asq_cmd_details {
}; };
#define I40E_ADMINQ_DETAILS(R, i) \ #define I40E_ADMINQ_DETAILS(R, i) \
(&(((struct i40e_asq_cmd_details *)((R).details))[i])) (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */ /* ARQ event information */
struct i40e_arq_event_info { struct i40e_arq_event_info {
...@@ -95,9 +95,6 @@ struct i40e_adminq_info { ...@@ -95,9 +95,6 @@ struct i40e_adminq_info {
struct mutex asq_mutex; /* Send queue lock */ struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */ struct mutex arq_mutex; /* Receive queue lock */
struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */
/* last status values on send and receive queues */ /* last status values on send and receive queues */
enum i40e_admin_queue_err asq_last_status; enum i40e_admin_queue_err asq_last_status;
enum i40e_admin_queue_err arq_last_status; enum i40e_admin_queue_err arq_last_status;
......
...@@ -239,33 +239,6 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) ...@@ -239,33 +239,6 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status; return status;
} }
/**
* i40e_validate_mac_addr - Validate MAC address
* @mac_addr: pointer to MAC address
*
* Tests a MAC address to ensure it is a valid Individual Address
**/
i40e_status i40e_validate_mac_addr(u8 *mac_addr)
{
i40e_status status = 0;
/* Make sure it is not a multicast address */
if (I40E_IS_MULTICAST(mac_addr)) {
hw_dbg(hw, "MAC address is multicast\n");
status = I40E_ERR_INVALID_MAC_ADDR;
/* Not a broadcast address */
} else if (I40E_IS_BROADCAST(mac_addr)) {
hw_dbg(hw, "MAC address is broadcast\n");
status = I40E_ERR_INVALID_MAC_ADDR;
/* Reject the zero address */
} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
hw_dbg(hw, "MAC address is all zeros\n");
status = I40E_ERR_INVALID_MAC_ADDR;
}
return status;
}
/** /**
* i40e_get_media_type - Gets media type * i40e_get_media_type - Gets media type
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
......
...@@ -192,12 +192,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, ...@@ -192,12 +192,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
len = (sizeof(struct i40e_aq_desc) len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_asq_entries); * pf->hw.aq.num_asq_entries);
memcpy(p, pf->hw.aq.asq.desc, len); memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
p += len; p += len;
len = (sizeof(struct i40e_aq_desc) len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_arq_entries); * pf->hw.aq.num_arq_entries);
memcpy(p, pf->hw.aq.arq.desc, len); memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
p += len; p += len;
i40e_dbg_dump_data_len = buflen; i40e_dbg_dump_data_len = buflen;
...@@ -1740,10 +1740,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ...@@ -1740,10 +1740,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"Read NVM module=0x%x offset=0x%x words=%d\n", "Read NVM module=0x%x offset=0x%x words=%d\n",
module, offset, buffer_len); module, offset, buffer_len);
if (buffer_len) if (bytes)
print_hex_dump(KERN_INFO, "NVM Dump: ", print_hex_dump(KERN_INFO, "NVM Dump: ",
DUMP_PREFIX_OFFSET, 16, 2, DUMP_PREFIX_OFFSET, 16, 2,
buff, buffer_len, true); buff, bytes, true);
} }
kfree(buff); kfree(buff);
buff = NULL; buff = NULL;
......
...@@ -351,38 +351,56 @@ static int i40e_get_eeprom(struct net_device *netdev, ...@@ -351,38 +351,56 @@ static int i40e_get_eeprom(struct net_device *netdev,
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_hw *hw = &np->vsi->back->hw;
int first_word, last_word; struct i40e_pf *pf = np->vsi->back;
u16 i, eeprom_len; int ret_val = 0, len;
u16 *eeprom_buff; u8 *eeprom_buff;
int ret_val = 0; u16 i, sectors;
bool last;
#define I40E_NVM_SECTOR_SIZE 4096
if (eeprom->len == 0) if (eeprom->len == 0)
return -EINVAL; return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16); eeprom->magic = hw->vendor_id | (hw->device_id << 16);
first_word = eeprom->offset >> 1; eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_len = last_word - first_word + 1;
eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
if (!eeprom_buff) if (!eeprom_buff)
return -ENOMEM; return -ENOMEM;
ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len, ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
eeprom_buff); if (ret_val) {
if (eeprom_len == 0) { dev_info(&pf->pdev->dev,
kfree(eeprom_buff); "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
return -EACCES; ret_val, hw->aq.asq_last_status);
goto free_buff;
}
sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
len = I40E_NVM_SECTOR_SIZE;
last = false;
for (i = 0; i < sectors; i++) {
if (i == (sectors - 1)) {
len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
last = true;
}
ret_val = i40e_aq_read_nvm(hw, 0x0,
eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
len,
(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
if (ret_val) {
dev_info(&pf->pdev->dev,
"read NVM failed err=%d status=0x%x\n",
ret_val, hw->aq.asq_last_status);
goto release_nvm;
}
} }
/* Device's eeprom is always little-endian, word addressable */ release_nvm:
for (i = 0; i < eeprom_len; i++) i40e_release_nvm(hw);
le16_to_cpus(&eeprom_buff[i]); memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
free_buff:
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
kfree(eeprom_buff); kfree(eeprom_buff);
return ret_val; return ret_val;
} }
...@@ -390,8 +408,14 @@ static int i40e_get_eeprom_len(struct net_device *netdev) ...@@ -390,8 +408,14 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_hw *hw = &np->vsi->back->hw;
u32 val;
return hw->nvm.sr_size * 2;
val = (rd32(hw, I40E_GLPCI_LBARCTRL)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
/* register returns value in power of 2, 64Kbyte chunks. */
val = (64 * 1024) * (1 << val);
return val;
} }
static void i40e_get_drvinfo(struct net_device *netdev, static void i40e_get_drvinfo(struct net_device *netdev,
......
...@@ -2542,6 +2542,19 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) ...@@ -2542,6 +2542,19 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
i40e_flush(hw); i40e_flush(hw);
} }
/**
* i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
* @pf: board private structure
**/
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
wr32(hw, I40E_PFINT_DYN_CTL0,
I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
i40e_flush(hw);
}
/** /**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure * @pf: board private structure
...@@ -2756,10 +2769,6 @@ static irqreturn_t i40e_intr(int irq, void *data) ...@@ -2756,10 +2769,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
icr0 = rd32(hw, I40E_PFINT_ICR0); icr0 = rd32(hw, I40E_PFINT_ICR0);
val = rd32(hw, I40E_PFINT_DYN_CTL0);
val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
wr32(hw, I40E_PFINT_DYN_CTL0, val);
/* if sharing a legacy IRQ, we might get called w/o an intr pending */ /* if sharing a legacy IRQ, we might get called w/o an intr pending */
if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
return IRQ_NONE; return IRQ_NONE;
...@@ -3140,7 +3149,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) ...@@ -3140,7 +3149,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u16 vector = i + base; u16 vector = i + base;
/* free only the irqs that were actually requested */ /* free only the irqs that were actually requested */
if (vsi->q_vectors[i]->num_ringpairs == 0) if (!vsi->q_vectors[i] ||
!vsi->q_vectors[i]->num_ringpairs)
continue; continue;
/* clear the affinity_mask in the IRQ descriptor */ /* clear the affinity_mask in the IRQ descriptor */
...@@ -3967,11 +3977,11 @@ static int i40e_open(struct net_device *netdev) ...@@ -3967,11 +3977,11 @@ static int i40e_open(struct net_device *netdev)
goto err_setup_rx; goto err_setup_rx;
/* Notify the stack of the actual queue counts. */ /* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(netdev, pf->num_tx_queues); err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
if (err) if (err)
goto err_set_queues; goto err_set_queues;
err = netif_set_real_num_rx_queues(netdev, pf->num_rx_queues); err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
if (err) if (err)
goto err_set_queues; goto err_set_queues;
...@@ -4480,12 +4490,12 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) ...@@ -4480,12 +4490,12 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)) if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
return; return;
event.msg_size = I40E_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf) if (!event.msg_buf)
return; return;
do { do {
event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
ret = i40e_clean_arq_element(hw, &event, &pending); ret = i40e_clean_arq_element(hw, &event, &pending);
if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) { if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
dev_info(&pf->pdev->dev, "No ARQ event found\n"); dev_info(&pf->pdev->dev, "No ARQ event found\n");
...@@ -5350,7 +5360,7 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) ...@@ -5350,7 +5360,7 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
int i; int i;
if (vsi->tx_rings[0]) { if (vsi->tx_rings[0]) {
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu); kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL; vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL; vsi->rx_rings[i] = NULL;
...@@ -5368,7 +5378,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ...@@ -5368,7 +5378,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
int i; int i;
/* Set basic values in the rings to be used later during open() */ /* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->alloc_queue_pairs; i++) {
struct i40e_ring *tx_ring; struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring; struct i40e_ring *rx_ring;
...@@ -5629,7 +5639,6 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) ...@@ -5629,7 +5639,6 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (err) { if (err) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RSS_ENABLED | I40E_FLAG_RSS_ENABLED |
I40E_FLAG_MQ_ENABLED |
I40E_FLAG_DCB_ENABLED | I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ENABLED |
...@@ -5812,7 +5821,6 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -5812,7 +5821,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_MSI_ENABLED | I40E_FLAG_MSI_ENABLED |
I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RX_PS_ENABLED | I40E_FLAG_RX_PS_ENABLED |
I40E_FLAG_MQ_ENABLED |
I40E_FLAG_RX_1BUF_ENABLED; I40E_FLAG_RX_1BUF_ENABLED;
/* Depending on PF configurations, it is possible that the RSS /* Depending on PF configurations, it is possible that the RSS
...@@ -7211,12 +7219,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) ...@@ -7211,12 +7219,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_fdir_teardown(pf); i40e_fdir_teardown(pf);
return -EAGAIN; return -EAGAIN;
} }
/* accommodate kcompat by copying the main VSI queue count
* into the pf, since this newer code pushes the pf queue
* info down a level into a VSI
*/
pf->num_rx_queues = vsi->num_queue_pairs;
pf->num_tx_queues = vsi->num_queue_pairs;
} else { } else {
/* force a reset of TC and queue layout configurations */ /* force a reset of TC and queue layout configurations */
u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
...@@ -7341,8 +7343,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -7341,8 +7343,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
*/ */
queues_left = pf->hw.func_caps.num_tx_qp; queues_left = pf->hw.func_caps.num_tx_qp;
if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) && if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
(pf->flags & I40E_FLAG_MQ_ENABLED)) ||
!(pf->flags & (I40E_FLAG_RSS_ENABLED | !(pf->flags & (I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) || I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
(queues_left == 1)) { (queues_left == 1)) {
...@@ -7353,7 +7354,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -7353,7 +7354,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* make sure all the fancies are disabled */ /* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED | pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_MQ_ENABLED |
I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ENABLED |
I40E_FLAG_FDIR_ATR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED | I40E_FLAG_DCB_ENABLED |
...@@ -7644,7 +7644,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7644,7 +7644,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
i40e_get_mac_addr(hw, hw->mac.addr); i40e_get_mac_addr(hw, hw->mac.addr);
if (i40e_validate_mac_addr(hw->mac.addr)) { if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO; err = -EIO;
goto err_mac_addr; goto err_mac_addr;
...@@ -7762,9 +7762,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7762,9 +7762,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Unwind what we've done if something failed in the setup */ /* Unwind what we've done if something failed in the setup */
err_vsis: err_vsis:
set_bit(__I40E_DOWN, &pf->state); set_bit(__I40E_DOWN, &pf->state);
err_switch_setup:
i40e_clear_interrupt_scheme(pf); i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi); kfree(pf->vsi);
err_switch_setup:
i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer); del_timer_sync(&pf->service_timer);
err_mac_addr: err_mac_addr:
err_configure_lan_hmc: err_configure_lan_hmc:
......
...@@ -205,7 +205,6 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw); ...@@ -205,7 +205,6 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw);
bool i40e_get_link_status(struct i40e_hw *hw); bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw, i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
u8 *mac_addr); u8 *mac_addr);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg); struct i40e_lldp_variables *lldp_cfg);
/* prototype for functions used for NVM access */ /* prototype for functions used for NVM access */
......
...@@ -1317,7 +1317,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, ...@@ -1317,7 +1317,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN; tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */ /* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == __constant_htons(ETH_P_8021Q)) { } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr; struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr) if (!vhdr)
...@@ -1382,7 +1382,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1382,7 +1382,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
return err; return err;
} }
if (protocol == __constant_htons(ETH_P_IP)) { if (protocol == htons(ETH_P_IP)) {
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0; iph->tot_len = 0;
...@@ -1805,9 +1805,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -1805,9 +1805,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
first = &tx_ring->tx_bi[tx_ring->next_to_use]; first = &tx_ring->tx_bi[tx_ring->next_to_use];
/* setup IPv4/IPv6 offloads */ /* setup IPv4/IPv6 offloads */
if (protocol == __constant_htons(ETH_P_IP)) if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4; tx_flags |= I40E_TX_FLAGS_IPV4;
else if (protocol == __constant_htons(ETH_P_IPV6)) else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6; tx_flags |= I40E_TX_FLAGS_IPV6;
tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
......
...@@ -25,6 +25,9 @@ ...@@ -25,6 +25,9 @@
* *
******************************************************************************/ ******************************************************************************/
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
/* Interrupt Throttling and Rate Limiting (storm control) Goodies */ /* Interrupt Throttling and Rate Limiting (storm control) Goodies */
#define I40E_MAX_ITR 0x07FF #define I40E_MAX_ITR 0x07FF
...@@ -295,3 +298,4 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); ...@@ -295,3 +298,4 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring); void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring); void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget); int i40e_napi_poll(struct napi_struct *napi, int budget);
#endif /* _I40E_TXRX_H_ */
...@@ -719,18 +719,19 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf) ...@@ -719,18 +719,19 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
**/ **/
void i40e_free_vfs(struct i40e_pf *pf) void i40e_free_vfs(struct i40e_pf *pf)
{ {
struct i40e_hw *hw = &pf->hw; int i, tmp;
int i;
if (!pf->vf) if (!pf->vf)
return; return;
/* Disable interrupt 0 so we don't try to handle the VFLR. */ /* Disable interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, I40E_PFINT_DYN_CTL0, 0); i40e_irq_dynamic_disable_icr0(pf);
i40e_flush(hw);
mdelay(10); /* let any messages in transit get finished up */
/* free up vf resources */ /* free up vf resources */
for (i = 0; i < pf->num_alloc_vfs; i++) { tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_free_vf_res(&pf->vf[i]); i40e_free_vf_res(&pf->vf[i]);
/* disable qp mappings */ /* disable qp mappings */
...@@ -739,7 +740,6 @@ void i40e_free_vfs(struct i40e_pf *pf) ...@@ -739,7 +740,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
kfree(pf->vf); kfree(pf->vf);
pf->vf = NULL; pf->vf = NULL;
pf->num_alloc_vfs = 0;
if (!i40e_vfs_are_assigned(pf)) if (!i40e_vfs_are_assigned(pf))
pci_disable_sriov(pf->pdev); pci_disable_sriov(pf->pdev);
...@@ -748,11 +748,7 @@ void i40e_free_vfs(struct i40e_pf *pf) ...@@ -748,11 +748,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
"unable to disable SR-IOV because VFs are assigned.\n"); "unable to disable SR-IOV because VFs are assigned.\n");
/* Re-enable interrupt 0. */ /* Re-enable interrupt 0. */
wr32(hw, I40E_PFINT_DYN_CTL0, i40e_irq_dynamic_enable_icr0(pf);
I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
i40e_flush(hw);
} }
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
...@@ -768,6 +764,9 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) ...@@ -768,6 +764,9 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
struct i40e_vf *vfs; struct i40e_vf *vfs;
int i, ret = 0; int i, ret = 0;
/* Disable interrupt 0 so we don't try to handle the VFLR. */
i40e_irq_dynamic_disable_icr0(pf);
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
...@@ -804,6 +803,8 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) ...@@ -804,6 +803,8 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
if (ret) if (ret)
i40e_free_vfs(pf); i40e_free_vfs(pf);
err_iov: err_iov:
/* Re-enable interrupt 0. */
i40e_irq_dynamic_enable_icr0(pf);
return ret; return ret;
} }
...@@ -1644,11 +1645,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, ...@@ -1644,11 +1645,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen) u32 v_retval, u8 *msg, u16 msglen)
{ {
struct i40e_vf *vf = &(pf->vf[vf_id]);
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
int ret; int ret;
pf->vf_aq_requests++; pf->vf_aq_requests++;
if (vf_id >= pf->num_alloc_vfs)
return -EINVAL;
vf = &(pf->vf[vf_id]);
/* perform basic checks on the msg */ /* perform basic checks on the msg */
ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment