Commit a74e344a authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-08-27

This series contains updates to i40e and i40evf only.

Sudheer updates code comments and state variable so that adminq_subtask
will have accutate information whenever it gets scheduled.

Mariusz stores information about FEC modes, to be used to printing link
states information, so that we do not need to call admin queue when
reporting link status.  Adds VF support for controlling VLAN tag
stripping via ethtool.

Jake provides the majority of changes in this series, starting with
increasing the size of the prefix buffer so that it can hold enough
characters for every possible input, which prevents snprintf truncation.
Fixed other string truncation errors/warnings produced by GCC 7.x.
Removed an unnecessary workaround for resetting XPS.  Fixed an issue
where there is a mismatched affinity mask value, so initialize the value
to cpu_possible_mask and invert the logic for checking incorrect CPU vs
IRQ affinity so that the exceptional case is handled at the check.
Removed ULTRA latency mode due to several issues found and will be
looking at better solution for small packet workloads.

Akeem fixes an issue where the incorrect flag was being used to set
promiscuous mode for unicast, which was enabling promiscuous mode only
for multicast instead of unicast.

Carolyn fixes an issue where an error return value is set, but this
value can be overwritten before we actually do exit the function.  So
remove the error code assignment and add code comments for better
understanding on why we do not need to set and return the error.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cde66f24 742c9875
...@@ -328,9 +328,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, ...@@ -328,9 +328,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
len = buf_len; len = buf_len;
/* write the full 16-byte chunks */ /* write the full 16-byte chunks */
if (hw->debug_mask & mask) { if (hw->debug_mask & mask) {
char prefix[20]; char prefix[27];
snprintf(prefix, 20, snprintf(prefix, sizeof(prefix),
"i40e %02x:%02x.%x: \t0x", "i40e %02x:%02x.%x: \t0x",
hw->bus.bus_id, hw->bus.bus_id,
hw->bus.device, hw->bus.device,
...@@ -2529,6 +2529,10 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) ...@@ -2529,6 +2529,10 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
if (status) if (status)
return status; return status;
hw->phy.link_info.req_fec_info =
abilities.fec_cfg_curr_mod_ext_info &
(I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
memcpy(hw->phy.link_info.module_type, &abilities.module_type, memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type)); sizeof(hw->phy.link_info.module_type));
} }
......
...@@ -2874,22 +2874,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) ...@@ -2874,22 +2874,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
static void i40e_config_xps_tx_ring(struct i40e_ring *ring) static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
{ {
struct i40e_vsi *vsi = ring->vsi; struct i40e_vsi *vsi = ring->vsi;
cpumask_var_t mask;
if (!ring->q_vector || !ring->netdev) if (!ring->q_vector || !ring->netdev)
return; return;
/* Single TC mode enable XPS */ if ((vsi->tc_config.numtc <= 1) &&
if (vsi->tc_config.numtc <= 1) { !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
netif_set_xps_queue(ring->netdev, netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask, get_cpu_mask(ring->q_vector->v_idx),
ring->queue_index); ring->queue_index);
} else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
/* Disable XPS to allow selection based on TC */
bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
free_cpumask_var(mask);
} }
/* schedule our worker thread which will take care of /* schedule our worker thread which will take care of
...@@ -3513,8 +3506,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) ...@@ -3513,8 +3506,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
q_vector->affinity_notify.release = i40e_irq_affinity_release; q_vector->affinity_notify.release = i40e_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* assign the mask for this irq */ /* get_cpu_mask returns a static constant mask with
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); * a permanent lifetime so it's ok to use here.
*/
irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
} }
vsi->irqs_ready = true; vsi->irqs_ready = true;
...@@ -4296,7 +4291,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) ...@@ -4296,7 +4291,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity notifier in the IRQ descriptor */ /* clear the affinity notifier in the IRQ descriptor */
irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_notifier(irq_num, NULL);
/* clear the affinity_mask in the IRQ descriptor */ /* remove our suggested affinity mask for this IRQ */
irq_set_affinity_hint(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num); synchronize_irq(irq_num);
free_irq(irq_num, vsi->q_vectors[i]); free_irq(irq_num, vsi->q_vectors[i]);
...@@ -5354,6 +5349,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) ...@@ -5354,6 +5349,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
char *speed = "Unknown"; char *speed = "Unknown";
char *fc = "Unknown"; char *fc = "Unknown";
char *fec = ""; char *fec = "";
char *req_fec = "";
char *an = ""; char *an = "";
new_speed = vsi->back->hw.phy.link_info.link_speed; new_speed = vsi->back->hw.phy.link_info.link_speed;
...@@ -5415,6 +5411,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) ...@@ -5415,6 +5411,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
} }
if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
req_fec = ", Requested FEC: None";
fec = ", FEC: None"; fec = ", FEC: None";
an = ", Autoneg: False"; an = ", Autoneg: False";
...@@ -5427,10 +5424,22 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) ...@@ -5427,10 +5424,22 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
else if (vsi->back->hw.phy.link_info.fec_info & else if (vsi->back->hw.phy.link_info.fec_info &
I40E_AQ_CONFIG_FEC_RS_ENA) I40E_AQ_CONFIG_FEC_RS_ENA)
fec = ", FEC: CL108 RS-FEC"; fec = ", FEC: CL108 RS-FEC";
/* 'CL108 RS-FEC' should be displayed when RS is requested, or
* both RS and FC are requested
*/
if (vsi->back->hw.phy.link_info.req_fec_info &
(I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
if (vsi->back->hw.phy.link_info.req_fec_info &
I40E_AQ_REQUEST_FEC_RS)
req_fec = ", Requested FEC: CL108 RS-FEC";
else
req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
}
} }
netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n", netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
speed, fec, an, fc); speed, req_fec, fec, an, fc);
} }
/** /**
...@@ -8228,7 +8237,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) ...@@ -8228,7 +8237,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
q_vector->vsi = vsi; q_vector->vsi = vsi;
q_vector->v_idx = v_idx; q_vector->v_idx = v_idx;
cpumask_set_cpu(cpu, &q_vector->affinity_mask); cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
if (vsi->netdev) if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi, netif_napi_add(vsi->netdev, &q_vector->napi,
...@@ -9690,8 +9699,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ...@@ -9690,8 +9699,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
i40e_add_mac_filter(vsi, mac_addr); i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
} else { } else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */ /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
snprintf(netdev->name, IFNAMSIZ, "%sv%%d", * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
* the end, which is 4 bytes long, so force truncation of the
* original name by IFNAMSIZ - 4
*/
snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
IFNAMSIZ - 4,
pf->vsi[pf->lan_vsi]->netdev->name); pf->vsi[pf->lan_vsi]->netdev->name);
random_ether_addr(mac_addr); random_ether_addr(mac_addr);
...@@ -9865,13 +9879,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ...@@ -9865,13 +9879,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
*/ */
ret = i40e_vsi_config_tc(vsi, enabled_tc); ret = i40e_vsi_config_tc(vsi, enabled_tc);
if (ret) { if (ret) {
/* Single TC condition is not fatal,
* message and continue
*/
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
enabled_tc, enabled_tc,
i40e_stat_str(&pf->hw, ret), i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status)); pf->hw.aq.asq_last_status));
ret = -ENOENT;
} }
} }
break; break;
......
...@@ -755,7 +755,11 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, ...@@ -755,7 +755,11 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
/* Acquire lock to prevent race condition where adminq_task /* Acquire lock to prevent race condition where adminq_task
* can execute after i40e_nvmupd_nvm_read/write but before state * can execute after i40e_nvmupd_nvm_read/write but before state
* variables (nvm_wait_opcode, nvm_release_on_done) are updated * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
*
* During NVMUpdate, it is observed that lock could be held for
* ~5ms for most commands. However lock is held for ~60ms for
* NVMUPD_CSUM_LCB command.
*/ */
mutex_lock(&hw->aq.arq_mutex); mutex_lock(&hw->aq.arq_mutex);
switch (hw->nvmupd_state) { switch (hw->nvmupd_state) {
...@@ -778,7 +782,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, ...@@ -778,7 +782,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
*/ */
if (cmd->offset == 0xffff) { if (cmd->offset == 0xffff) {
i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
return 0; status = 0;
goto exit;
} }
status = I40E_ERR_NOT_READY; status = I40E_ERR_NOT_READY;
...@@ -793,6 +798,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, ...@@ -793,6 +798,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH; *perrno = -ESRCH;
break; break;
} }
exit:
mutex_unlock(&hw->aq.arq_mutex); mutex_unlock(&hw->aq.arq_mutex);
return status; return status;
} }
......
...@@ -959,19 +959,31 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) ...@@ -959,19 +959,31 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{ {
enum i40e_latency_range new_latency_range = rc->latency_range; enum i40e_latency_range new_latency_range = rc->latency_range;
struct i40e_q_vector *qv = rc->ring->q_vector;
u32 new_itr = rc->itr; u32 new_itr = rc->itr;
int bytes_per_int; int bytes_per_int;
int usecs; unsigned int usecs, estimated_usecs;
if (rc->total_packets == 0 || !rc->itr) if (rc->total_packets == 0 || !rc->itr)
return false; return false;
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs;
/* The calculations in this algorithm depend on interrupts actually
* firing at the ITR rate. This may not happen if the packet rate is
* really low, or if we've been napi polling. Check to make sure
* that's not the case before we continue.
*/
estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
if (estimated_usecs > usecs) {
new_latency_range = I40E_LOW_LATENCY;
goto reset_latency;
}
/* simple throttlerate management /* simple throttlerate management
* 0-10MB/s lowest (50000 ints/s) * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s) * 10-20MB/s low (20000 ints/s)
* 20-1249MB/s bulk (18000 ints/s) * 20-1249MB/s bulk (18000 ints/s)
* > 40000 Rx packets per second (8000 ints/s)
* *
* The math works out because the divisor is in 10^(-6) which * The math works out because the divisor is in 10^(-6) which
* turns the bytes/us input value into MB/s values, but * turns the bytes/us input value into MB/s values, but
...@@ -979,9 +991,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -979,9 +991,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
* are in 2 usec increments in the ITR registers, and make sure * are in 2 usec increments in the ITR registers, and make sure
* to use the smoothed values that the countdown timer gives us. * to use the smoothed values that the countdown timer gives us.
*/ */
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs;
switch (new_latency_range) { switch (new_latency_range) {
case I40E_LOWEST_LATENCY: case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10) if (bytes_per_int > 10)
...@@ -994,24 +1003,13 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -994,24 +1003,13 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
new_latency_range = I40E_LOWEST_LATENCY; new_latency_range = I40E_LOWEST_LATENCY;
break; break;
case I40E_BULK_LATENCY: case I40E_BULK_LATENCY:
case I40E_ULTRA_LATENCY:
default: default:
if (bytes_per_int <= 20) if (bytes_per_int <= 20)
new_latency_range = I40E_LOW_LATENCY; new_latency_range = I40E_LOW_LATENCY;
break; break;
} }
/* this is to adjust RX more aggressively when streaming small reset_latency:
* packets. The value of 40000 was picked as it is just beyond
* what the hardware can receive per second if in low latency
* mode.
*/
#define RX_ULTRA_PACKET_RATE 40000
if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
(&qv->rx == rc))
new_latency_range = I40E_ULTRA_LATENCY;
rc->latency_range = new_latency_range; rc->latency_range = new_latency_range;
switch (new_latency_range) { switch (new_latency_range) {
...@@ -1024,21 +1022,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -1024,21 +1022,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
case I40E_BULK_LATENCY: case I40E_BULK_LATENCY:
new_itr = I40E_ITR_18K; new_itr = I40E_ITR_18K;
break; break;
case I40E_ULTRA_LATENCY:
new_itr = I40E_ITR_8K;
break;
default: default:
break; break;
} }
rc->total_bytes = 0; rc->total_bytes = 0;
rc->total_packets = 0; rc->total_packets = 0;
rc->last_itr_update = jiffies;
if (new_itr != rc->itr) { if (new_itr != rc->itr) {
rc->itr = new_itr; rc->itr = new_itr;
return true; return true;
} }
return false; return false;
} }
...@@ -2243,6 +2238,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -2243,6 +2238,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
int idx = q_vector->v_idx; int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting; int rx_itr_setting, tx_itr_setting;
/* If we don't have MSIX, then we only need to re-enable icr0 */
if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
i40e_irq_dynamic_enable_icr0(vsi->back, false);
return;
}
vector = (q_vector->v_idx + vsi->base_vector); vector = (q_vector->v_idx + vsi->base_vector);
/* avoid dynamic calculation if in countdown mode OR if /* avoid dynamic calculation if in countdown mode OR if
...@@ -2363,7 +2364,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -2363,7 +2364,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
/* If work not completed, return budget and polling will return */ /* If work not completed, return budget and polling will return */
if (!clean_complete) { if (!clean_complete) {
const cpumask_t *aff_mask = &q_vector->affinity_mask;
int cpu_id = smp_processor_id(); int cpu_id = smp_processor_id();
/* It is possible that the interrupt affinity has changed but, /* It is possible that the interrupt affinity has changed but,
...@@ -2373,8 +2373,16 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -2373,8 +2373,16 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* continue to poll, otherwise we must stop polling so the * continue to poll, otherwise we must stop polling so the
* interrupt can move to the correct cpu. * interrupt can move to the correct cpu.
*/ */
if (likely(cpumask_test_cpu(cpu_id, aff_mask) || if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) { /* Tell napi that we are done polling */
napi_complete_done(napi, work_done);
/* Force an interrupt */
i40e_force_wb(vsi, q_vector);
/* Return budget-1 so that polling stops */
return budget - 1;
}
tx_only: tx_only:
if (arm_wb) { if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++; q_vector->tx.ring[0].tx_stats.tx_force_wb++;
...@@ -2382,7 +2390,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -2382,7 +2390,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
} }
return budget; return budget;
} }
}
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false; q_vector->arm_wb_state = false;
...@@ -2390,15 +2397,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -2390,15 +2397,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
/* Work is done so exit the polling mode and re-enable the interrupt */ /* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done); napi_complete_done(napi, work_done);
/* If we're prematurely stopping polling to fix the interrupt
* affinity we want to make sure polling starts back up so we
* issue a call to i40e_force_wb which triggers a SW interrupt.
*/
if (!clean_complete)
i40e_force_wb(vsi, q_vector);
else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
i40e_irq_dynamic_enable_icr0(vsi->back, false);
else
i40e_update_enable_itr(vsi, q_vector); i40e_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1); return min(work_done, budget - 1);
......
...@@ -454,7 +454,6 @@ enum i40e_latency_range { ...@@ -454,7 +454,6 @@ enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0, I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1, I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2, I40E_BULK_LATENCY = 2,
I40E_ULTRA_LATENCY = 3,
}; };
struct i40e_ring_container { struct i40e_ring_container {
...@@ -462,6 +461,7 @@ struct i40e_ring_container { ...@@ -462,6 +461,7 @@ struct i40e_ring_container {
struct i40e_ring *ring; struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */ unsigned int total_packets; /* total packets processed this int */
unsigned long last_itr_update; /* jiffies of last ITR update */
u16 count; u16 count;
enum i40e_latency_range latency_range; enum i40e_latency_range latency_range;
u16 itr; u16 itr;
......
...@@ -185,6 +185,7 @@ struct i40e_link_status { ...@@ -185,6 +185,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed; enum i40e_aq_link_speed link_speed;
u8 link_info; u8 link_info;
u8 an_info; u8 an_info;
u8 req_fec_info;
u8 fec_info; u8 fec_info;
u8 ext_info; u8 ext_info;
u8 loopback; u8 loopback;
......
...@@ -1758,7 +1758,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1758,7 +1758,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
} }
} else { } else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
allmulti, NULL, alluni, NULL,
true); true);
aq_err = pf->hw.aq.asq_last_status; aq_err = pf->hw.aq.asq_last_status;
if (aq_ret) { if (aq_ret) {
...@@ -2529,6 +2529,60 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2529,6 +2529,60 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
} }
/**
* i40e_vc_enable_vlan_stripping
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* Enable vlan header stripping for the VF
**/
static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
u16 msglen)
{
struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
i40e_vlan_stripping_enable(vsi);
/* send the response to the VF */
err:
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
aq_ret);
}
/**
* i40e_vc_disable_vlan_stripping
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* Disable vlan header stripping for the VF
**/
static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
u16 msglen)
{
struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
i40e_vlan_stripping_disable(vsi);
/* send the response to the VF */
err:
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
aq_ret);
}
/** /**
* i40e_vc_process_vf_msg * i40e_vc_process_vf_msg
* @pf: pointer to the PF structure * @pf: pointer to the PF structure
...@@ -2648,6 +2702,12 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, ...@@ -2648,6 +2702,12 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_SET_RSS_HENA: case VIRTCHNL_OP_SET_RSS_HENA:
ret = i40e_vc_set_rss_hena(vf, msg, msglen); ret = i40e_vc_set_rss_hena(vf, msg, msglen);
break; break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
break;
case VIRTCHNL_OP_UNKNOWN: case VIRTCHNL_OP_UNKNOWN:
default: default:
......
...@@ -333,9 +333,9 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, ...@@ -333,9 +333,9 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
len = buf_len; len = buf_len;
/* write the full 16-byte chunks */ /* write the full 16-byte chunks */
if (hw->debug_mask & mask) { if (hw->debug_mask & mask) {
char prefix[20]; char prefix[27];
snprintf(prefix, 20, snprintf(prefix, sizeof(prefix),
"i40evf %02x:%02x.%x: \t0x", "i40evf %02x:%02x.%x: \t0x",
hw->bus.bus_id, hw->bus.bus_id,
hw->bus.device, hw->bus.device,
......
...@@ -357,19 +357,31 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) ...@@ -357,19 +357,31 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{ {
enum i40e_latency_range new_latency_range = rc->latency_range; enum i40e_latency_range new_latency_range = rc->latency_range;
struct i40e_q_vector *qv = rc->ring->q_vector;
u32 new_itr = rc->itr; u32 new_itr = rc->itr;
int bytes_per_int; int bytes_per_int;
int usecs; unsigned int usecs, estimated_usecs;
if (rc->total_packets == 0 || !rc->itr) if (rc->total_packets == 0 || !rc->itr)
return false; return false;
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs;
/* The calculations in this algorithm depend on interrupts actually
* firing at the ITR rate. This may not happen if the packet rate is
* really low, or if we've been napi polling. Check to make sure
* that's not the case before we continue.
*/
estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
if (estimated_usecs > usecs) {
new_latency_range = I40E_LOW_LATENCY;
goto reset_latency;
}
/* simple throttlerate management /* simple throttlerate management
* 0-10MB/s lowest (50000 ints/s) * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s) * 10-20MB/s low (20000 ints/s)
* 20-1249MB/s bulk (18000 ints/s) * 20-1249MB/s bulk (18000 ints/s)
* > 40000 Rx packets per second (8000 ints/s)
* *
* The math works out because the divisor is in 10^(-6) which * The math works out because the divisor is in 10^(-6) which
* turns the bytes/us input value into MB/s values, but * turns the bytes/us input value into MB/s values, but
...@@ -377,9 +389,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -377,9 +389,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
* are in 2 usec increments in the ITR registers, and make sure * are in 2 usec increments in the ITR registers, and make sure
* to use the smoothed values that the countdown timer gives us. * to use the smoothed values that the countdown timer gives us.
*/ */
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs;
switch (new_latency_range) { switch (new_latency_range) {
case I40E_LOWEST_LATENCY: case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10) if (bytes_per_int > 10)
...@@ -392,24 +401,13 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -392,24 +401,13 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
new_latency_range = I40E_LOWEST_LATENCY; new_latency_range = I40E_LOWEST_LATENCY;
break; break;
case I40E_BULK_LATENCY: case I40E_BULK_LATENCY:
case I40E_ULTRA_LATENCY:
default: default:
if (bytes_per_int <= 20) if (bytes_per_int <= 20)
new_latency_range = I40E_LOW_LATENCY; new_latency_range = I40E_LOW_LATENCY;
break; break;
} }
/* this is to adjust RX more aggressively when streaming small reset_latency:
* packets. The value of 40000 was picked as it is just beyond
* what the hardware can receive per second if in low latency
* mode.
*/
#define RX_ULTRA_PACKET_RATE 40000
if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
(&qv->rx == rc))
new_latency_range = I40E_ULTRA_LATENCY;
rc->latency_range = new_latency_range; rc->latency_range = new_latency_range;
switch (new_latency_range) { switch (new_latency_range) {
...@@ -422,21 +420,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -422,21 +420,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
case I40E_BULK_LATENCY: case I40E_BULK_LATENCY:
new_itr = I40E_ITR_18K; new_itr = I40E_ITR_18K;
break; break;
case I40E_ULTRA_LATENCY:
new_itr = I40E_ITR_8K;
break;
default: default:
break; break;
} }
rc->total_bytes = 0; rc->total_bytes = 0;
rc->total_packets = 0; rc->total_packets = 0;
rc->last_itr_update = jiffies;
if (new_itr != rc->itr) { if (new_itr != rc->itr) {
rc->itr = new_itr; rc->itr = new_itr;
return true; return true;
} }
return false; return false;
} }
...@@ -1575,7 +1570,6 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1575,7 +1570,6 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
/* If work not completed, return budget and polling will return */ /* If work not completed, return budget and polling will return */
if (!clean_complete) { if (!clean_complete) {
const cpumask_t *aff_mask = &q_vector->affinity_mask;
int cpu_id = smp_processor_id(); int cpu_id = smp_processor_id();
/* It is possible that the interrupt affinity has changed but, /* It is possible that the interrupt affinity has changed but,
...@@ -1585,7 +1579,16 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1585,7 +1579,16 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* continue to poll, otherwise we must stop polling so the * continue to poll, otherwise we must stop polling so the
* interrupt can move to the correct cpu. * interrupt can move to the correct cpu.
*/ */
if (likely(cpumask_test_cpu(cpu_id, aff_mask))) { if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
/* Tell napi that we are done polling */
napi_complete_done(napi, work_done);
/* Force an interrupt */
i40evf_force_wb(vsi, q_vector);
/* Return budget-1 so that polling stops */
return budget - 1;
}
tx_only: tx_only:
if (arm_wb) { if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++; q_vector->tx.ring[0].tx_stats.tx_force_wb++;
...@@ -1593,7 +1596,6 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1593,7 +1596,6 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
} }
return budget; return budget;
} }
}
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false; q_vector->arm_wb_state = false;
...@@ -1601,13 +1603,6 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1601,13 +1603,6 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
/* Work is done so exit the polling mode and re-enable the interrupt */ /* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done); napi_complete_done(napi, work_done);
/* If we're prematurely stopping polling to fix the interrupt
* affinity we want to make sure polling starts back up so we
* issue a call to i40evf_force_wb which triggers a SW interrupt.
*/
if (!clean_complete)
i40evf_force_wb(vsi, q_vector);
else
i40e_update_enable_itr(vsi, q_vector); i40e_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1); return min(work_done, budget - 1);
......
...@@ -425,7 +425,6 @@ enum i40e_latency_range { ...@@ -425,7 +425,6 @@ enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0, I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1, I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2, I40E_BULK_LATENCY = 2,
I40E_ULTRA_LATENCY = 3,
}; };
struct i40e_ring_container { struct i40e_ring_container {
...@@ -433,6 +432,7 @@ struct i40e_ring_container { ...@@ -433,6 +432,7 @@ struct i40e_ring_container {
struct i40e_ring *ring; struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */ unsigned int total_packets; /* total packets processed this int */
unsigned long last_itr_update; /* jiffies of last ITR update */
u16 count; u16 count;
enum i40e_latency_range latency_range; enum i40e_latency_range latency_range;
u16 itr; u16 itr;
......
...@@ -159,6 +159,7 @@ struct i40e_link_status { ...@@ -159,6 +159,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed; enum i40e_aq_link_speed link_speed;
u8 link_info; u8 link_info;
u8 an_info; u8 an_info;
u8 req_fec_info;
u8 fec_info; u8 fec_info;
u8 ext_info; u8 ext_info;
u8 loopback; u8 loopback;
......
...@@ -121,7 +121,7 @@ struct i40e_q_vector { ...@@ -121,7 +121,7 @@ struct i40e_q_vector {
#define ITR_COUNTDOWN_START 100 #define ITR_COUNTDOWN_START 100
u8 itr_countdown; /* when 0 or 1 update ITR */ u8 itr_countdown; /* when 0 or 1 update ITR */
int v_idx; /* vector index in list */ int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9]; char name[IFNAMSIZ + 15];
bool arm_wb_state; bool arm_wb_state;
cpumask_t affinity_mask; cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify; struct irq_affinity_notify affinity_notify;
...@@ -261,6 +261,8 @@ struct i40evf_adapter { ...@@ -261,6 +261,8 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16) #define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16)
#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) #define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17)
#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) #define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19)
#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20)
/* OS defined structs */ /* OS defined structs */
struct net_device *netdev; struct net_device *netdev;
...@@ -358,6 +360,8 @@ void i40evf_get_hena(struct i40evf_adapter *adapter); ...@@ -358,6 +360,8 @@ void i40evf_get_hena(struct i40evf_adapter *adapter);
void i40evf_set_hena(struct i40evf_adapter *adapter); void i40evf_set_hena(struct i40evf_adapter *adapter);
void i40evf_set_rss_key(struct i40evf_adapter *adapter); void i40evf_set_rss_key(struct i40evf_adapter *adapter);
void i40evf_set_rss_lut(struct i40evf_adapter *adapter); void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter);
void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum virtchnl_ops v_opcode, enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen); i40e_status v_retval, u8 *msg, u16 msglen);
......
...@@ -543,9 +543,9 @@ static void i40evf_irq_affinity_release(struct kref *ref) {} ...@@ -543,9 +543,9 @@ static void i40evf_irq_affinity_release(struct kref *ref) {}
static int static int
i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
{ {
int vector, err, q_vectors; unsigned int vector, q_vectors;
int rx_int_idx = 0, tx_int_idx = 0; unsigned int rx_int_idx = 0, tx_int_idx = 0;
int irq_num; int irq_num, err;
i40evf_irq_disable(adapter); i40evf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */ /* Decrement for Other and TCP Timer vectors */
...@@ -556,18 +556,15 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) ...@@ -556,18 +556,15 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
if (q_vector->tx.ring && q_vector->rx.ring) { if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1, snprintf(q_vector->name, sizeof(q_vector->name),
"i40evf-%s-%s-%d", basename, "i40evf-%s-TxRx-%d", basename, rx_int_idx++);
"TxRx", rx_int_idx++);
tx_int_idx++; tx_int_idx++;
} else if (q_vector->rx.ring) { } else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1, snprintf(q_vector->name, sizeof(q_vector->name),
"i40evf-%s-%s-%d", basename, "i40evf-%s-rx-%d", basename, rx_int_idx++);
"rx", rx_int_idx++);
} else if (q_vector->tx.ring) { } else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1, snprintf(q_vector->name, sizeof(q_vector->name),
"i40evf-%s-%s-%d", basename, "i40evf-%s-tx-%d", basename, tx_int_idx++);
"tx", tx_int_idx++);
} else { } else {
/* skip this unused q_vector */ /* skip this unused q_vector */
continue; continue;
...@@ -587,8 +584,10 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) ...@@ -587,8 +584,10 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
q_vector->affinity_notify.release = q_vector->affinity_notify.release =
i40evf_irq_affinity_release; i40evf_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* assign the mask for this irq */ /* get_cpu_mask returns a static constant mask with
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); * a permanent lifetime so it's ok to use here.
*/
irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
} }
return 0; return 0;
...@@ -1459,6 +1458,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) ...@@ -1459,6 +1458,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->adapter = adapter; q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi; q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx; q_vector->v_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi, netif_napi_add(adapter->netdev, &q_vector->napi,
i40evf_napi_poll, NAPI_POLL_WEIGHT); i40evf_napi_poll, NAPI_POLL_WEIGHT);
} }
...@@ -1679,6 +1679,16 @@ static void i40evf_watchdog_task(struct work_struct *work) ...@@ -1679,6 +1679,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done; goto watchdog_done;
} }
if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
i40evf_enable_vlan_stripping(adapter);
goto watchdog_done;
}
if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
i40evf_disable_vlan_stripping(adapter);
goto watchdog_done;
}
if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
i40evf_configure_queues(adapter); i40evf_configure_queues(adapter);
goto watchdog_done; goto watchdog_done;
...@@ -2296,6 +2306,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -2296,6 +2306,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
return 0; return 0;
} }
/**
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
* Note: expects to be called while under rtnl_lock()
**/
static int i40evf_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
if (!VLAN_ALLOWED(adapter))
return -EINVAL;
if (features & NETIF_F_HW_VLAN_CTAG_RX)
adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
else
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
return 0;
}
/** /**
* i40evf_features_check - Validate encapsulated packet conforms to limits * i40evf_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buff * @skb: skb buff
...@@ -2389,6 +2421,7 @@ static const struct net_device_ops i40evf_netdev_ops = { ...@@ -2389,6 +2421,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
.ndo_features_check = i40evf_features_check, .ndo_features_check = i40evf_features_check,
.ndo_fix_features = i40evf_fix_features, .ndo_fix_features = i40evf_fix_features,
.ndo_set_features = i40evf_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll, .ndo_poll_controller = i40evf_netpoll,
#endif #endif
......
...@@ -820,6 +820,46 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter) ...@@ -820,6 +820,46 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
kfree(vrl); kfree(vrl);
} }
/**
* i40evf_enable_vlan_stripping
* @adapter: adapter structure
*
* Request VLAN header stripping to be enabled
**/
void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter)
{
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
adapter->current_op);
return;
}
adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
NULL, 0);
}
/**
* i40evf_disable_vlan_stripping
* @adapter: adapter structure
*
* Request VLAN header stripping to be disabled
**/
void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter)
{
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
adapter->current_op);
return;
}
adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
NULL, 0);
}
/** /**
* i40evf_print_link_message - print link up or down * i40evf_print_link_message - print link up or down
* @adapter: adapter structure * @adapter: adapter structure
......
...@@ -133,6 +133,8 @@ enum virtchnl_ops { ...@@ -133,6 +133,8 @@ enum virtchnl_ops {
VIRTCHNL_OP_CONFIG_RSS_LUT = 24, VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
VIRTCHNL_OP_SET_RSS_HENA = 26, VIRTCHNL_OP_SET_RSS_HENA = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
}; };
/* This macro is used to generate a compilation error if a structure /* This macro is used to generate a compilation error if a structure
...@@ -686,6 +688,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, ...@@ -686,6 +688,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_SET_RSS_HENA: case VIRTCHNL_OP_SET_RSS_HENA:
valid_len = sizeof(struct virtchnl_rss_hena); valid_len = sizeof(struct virtchnl_rss_hena);
break; break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
break;
/* These are always errors coming from the VF. */ /* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN: case VIRTCHNL_OP_UNKNOWN:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment