Commit be67101f authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-03-25

This series contains updates to the ice driver only.

Victor updates the ice driver to be able to update the VSI queue
configuration dynamically, by providing the ability to increase or
decrease the VSI's number of queues.

Michal fixes an issue when the VM starts or the VF driver is reloaded,
the VLAN switch rule was lost (i.e. not added), so ensure it gets added
in these cases.

Brett updates the driver to support link events over the admin receive
queue, instead of polling link events.

Maciej refactors the code a bit to introduce a new function to fetch the
receiver buffer and do the DMA synchronization to reduce the code
duplication.  Also added ice_can_reuse_rx_page() to verify whether the
page can be reused so that in the future, we can use this check
elsewhere in the driver.  Additional driver optimizations so that we can
drop the ice_pull_tail() altogether.  Added support for bulk updates of
refcount instead of doing it one by one.  Refactored the page counting
and buffer recycling so that we can use this code to clean up receive
buffers when there is no skb allocated, like XDP.  Added
DMA_ATTR_WEAK_ORDERING and DMA_ATTR_SKIP_CPU_SYNC attributes to the DMA
API during the mapping operations on the receive side, so that nonx86
platforms will be able to sync with what is being used (2k buffers)
instead of the entire page.

Dave fixes the driver to perform the most intrusive of the resets
requested and clear the other request bits so that we do not end up with
repeated reset, after reset.

Bruce adds a iterator macro to clean up several for() loops.

Chinh modifies the packet flags to be more generic so that they can be
used for both receive and transmit.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 68cc2999 86e81794
...@@ -262,7 +262,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) ...@@ -262,7 +262,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
* *
* Get Link Status (0x607). Returns the link status of the adapter. * Get Link Status (0x607). Returns the link status of the adapter.
*/ */
static enum ice_status enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd) struct ice_link_status *link, struct ice_sq_cd *cd)
{ {
...@@ -358,22 +358,22 @@ static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id) ...@@ -358,22 +358,22 @@ static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
*/ */
case ICE_RXDID_FLEX_NIC: case ICE_RXDID_FLEX_NIC:
case ICE_RXDID_FLEX_NIC_2: case ICE_RXDID_FLEX_NIC_2:
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG, ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI, ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
ICE_RXFLG_FIN, idx++); ICE_FLG_FIN, idx++);
/* flex flag 1 is not used for flexi-flag programming, skipping /* flex flag 1 is not used for flexi-flag programming, skipping
* these four FLG64 bits. * these four FLG64 bits.
*/ */
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST, ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI, ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100, ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
ICE_RXFLG_EVLAN_x9100, idx++); ICE_FLG_EVLAN_x9100, idx++);
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100, ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC, ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
ICE_RXFLG_TNL0, idx++); ICE_FLG_TNL0, idx++);
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
break; break;
default: default:
...@@ -2150,6 +2150,32 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, ...@@ -2150,6 +2150,32 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
} }
/**
* ice_aq_set_event_mask
* @hw: pointer to the HW struct
* @port_num: port number of the physical function
* @mask: event mask to be set
* @cd: pointer to command details structure or NULL
*
* Set event mask (0x0613)
*/
enum ice_status
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
struct ice_aqc_set_event_mask *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.set_event_mask;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
cmd->lport_num = port_num;
cmd->event_mask = cpu_to_le16(mask);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/** /**
* ice_aq_set_port_id_led * ice_aq_set_port_id_led
* @pi: pointer to the port information * @pi: pointer to the port information
...@@ -2923,7 +2949,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, ...@@ -2923,7 +2949,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
mutex_lock(&pi->sched_lock); mutex_lock(&pi->sched_lock);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { ice_for_each_traffic_class(i) {
/* configuration is possible only if TC node is present */ /* configuration is possible only if TC node is present */
if (!ice_sched_get_tc_node(pi, i)) if (!ice_sched_get_tc_node(pi, i))
continue; continue;
......
...@@ -89,6 +89,12 @@ enum ice_status ...@@ -89,6 +89,12 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
......
...@@ -208,23 +208,23 @@ enum ice_flex_rx_mdid { ...@@ -208,23 +208,23 @@ enum ice_flex_rx_mdid {
ICE_RX_MDID_HASH_HIGH, ICE_RX_MDID_HASH_HIGH,
}; };
/* Rx Flag64 packet flag bits */ /* RX/TX Flag64 packet flag bits */
enum ice_rx_flg64_bits { enum ice_flg64_bits {
ICE_RXFLG_PKT_DSI = 0, ICE_FLG_PKT_DSI = 0,
ICE_RXFLG_EVLAN_x8100 = 15, ICE_FLG_EVLAN_x8100 = 15,
ICE_RXFLG_EVLAN_x9100, ICE_FLG_EVLAN_x9100,
ICE_RXFLG_VLAN_x8100, ICE_FLG_VLAN_x8100,
ICE_RXFLG_TNL_MAC = 22, ICE_FLG_TNL_MAC = 22,
ICE_RXFLG_TNL_VLAN, ICE_FLG_TNL_VLAN,
ICE_RXFLG_PKT_FRG, ICE_FLG_PKT_FRG,
ICE_RXFLG_FIN = 32, ICE_FLG_FIN = 32,
ICE_RXFLG_SYN, ICE_FLG_SYN,
ICE_RXFLG_RST, ICE_FLG_RST,
ICE_RXFLG_TNL0 = 38, ICE_FLG_TNL0 = 38,
ICE_RXFLG_TNL1, ICE_FLG_TNL1,
ICE_RXFLG_TNL2, ICE_FLG_TNL2,
ICE_RXFLG_UDP_GRE, ICE_FLG_UDP_GRE,
ICE_RXFLG_RSVD = 63 ICE_FLG_RSVD = 63
}; };
/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */ /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
......
...@@ -856,7 +856,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) ...@@ -856,7 +856,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/* find the (rounded up) power-of-2 of qcount */ /* find the (rounded up) power-of-2 of qcount */
pow = order_base_2(qcount_rx); pow = order_base_2(qcount_rx);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */ /* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0; vsi->tc_cfg.tc_info[i].qoffset = 0;
...@@ -1689,7 +1689,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) ...@@ -1689,7 +1689,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
num_q_grps = 1; num_q_grps = 1;
/* set up and configure the Tx queues for each enabled TC */ /* set up and configure the Tx queues for each enabled TC */
for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) { ice_for_each_traffic_class(tc) {
if (!(vsi->tc_cfg.ena_tc & BIT(tc))) if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
break; break;
......
...@@ -478,8 +478,14 @@ static void ice_reset_subtask(struct ice_pf *pf) ...@@ -478,8 +478,14 @@ static void ice_reset_subtask(struct ice_pf *pf)
* for the reset now), poll for reset done, rebuild and return. * for the reset now), poll for reset done, rebuild and return.
*/ */
if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
clear_bit(__ICE_GLOBR_RECV, pf->state); /* Perform the largest reset requested */
clear_bit(__ICE_CORER_RECV, pf->state); if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
reset_type = ICE_RESET_CORER;
if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
reset_type = ICE_RESET_GLOBR;
/* return if no valid reset type requested */
if (reset_type == ICE_RESET_INVAL)
return;
if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
ice_prepare_for_reset(pf); ice_prepare_for_reset(pf);
...@@ -698,9 +704,6 @@ static void ice_watchdog_subtask(struct ice_pf *pf) ...@@ -698,9 +704,6 @@ static void ice_watchdog_subtask(struct ice_pf *pf)
pf->serv_tmr_prev = jiffies; pf->serv_tmr_prev = jiffies;
if (ice_link_event(pf, pf->hw.port_info))
dev_dbg(&pf->pdev->dev, "ice_link_event failed\n");
/* Update the stats for active netdevs so the network stack /* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to * can look at updated numbers whenever it cares to
*/ */
...@@ -710,6 +713,60 @@ static void ice_watchdog_subtask(struct ice_pf *pf) ...@@ -710,6 +713,60 @@ static void ice_watchdog_subtask(struct ice_pf *pf)
ice_update_vsi_stats(pf->vsi[i]); ice_update_vsi_stats(pf->vsi[i]);
} }
/**
* ice_init_link_events - enable/initialize link events
* @pi: pointer to the port_info instance
*
* Returns -EIO on failure, 0 on success
*/
static int ice_init_link_events(struct ice_port_info *pi)
{
u16 mask;
mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
dev_dbg(ice_hw_to_dev(pi->hw),
"Failed to set link event mask for port %d\n",
pi->lport);
return -EIO;
}
if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
dev_dbg(ice_hw_to_dev(pi->hw),
"Failed to enable link events for port %d\n",
pi->lport);
return -EIO;
}
return 0;
}
/**
* ice_handle_link_event - handle link event via ARQ
* @pf: pf that the link event is associated with
*
* Return -EINVAL if port_info is null
* Return status on success
*/
static int ice_handle_link_event(struct ice_pf *pf)
{
struct ice_port_info *port_info;
int status;
port_info = pf->hw.port_info;
if (!port_info)
return -EINVAL;
status = ice_link_event(pf, port_info);
if (status)
dev_dbg(&pf->pdev->dev,
"Could not process link event, error %d\n", status);
return status;
}
/** /**
* __ice_clean_ctrlq - helper function to clean controlq rings * __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf * @pf: ptr to struct ice_pf
...@@ -813,6 +870,11 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ...@@ -813,6 +870,11 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode); opcode = le16_to_cpu(event.desc.opcode);
switch (opcode) { switch (opcode) {
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf))
dev_err(&pf->pdev->dev,
"Could not handle link event\n");
break;
case ice_mbx_opc_send_msg_to_pf: case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event); ice_vc_process_vf_msg(pf, &event);
break; break;
...@@ -1248,10 +1310,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) ...@@ -1248,10 +1310,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */ /* skip this unused q_vector */
continue; continue;
} }
err = devm_request_irq(&pf->pdev->dev, err = devm_request_irq(&pf->pdev->dev, irq_num,
pf->msix_entries[base + vector].vector, vsi->irq_handler, 0,
vsi->irq_handler, 0, q_vector->name, q_vector->name, q_vector);
q_vector);
if (err) { if (err) {
netdev_err(vsi->netdev, netdev_err(vsi->netdev,
"MSIX request_irq failed, error: %d\n", err); "MSIX request_irq failed, error: %d\n", err);
...@@ -2268,6 +2329,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -2268,6 +2329,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* since everything is good, start the service timer */ /* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
goto err_alloc_sw_unroll;
}
ice_verify_cacheline_size(pf); ice_verify_cacheline_size(pf);
return 0; return 0;
......
...@@ -1268,42 +1268,6 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, ...@@ -1268,42 +1268,6 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
return 0; return 0;
} }
/**
* ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
* @pi: port information structure
* @vsi_node: pointer to the VSI node
* @num_nodes: pointer to the num nodes that needs to be removed per layer
* @owner: node owner (lan or rdma)
*
* This function removes the VSI child nodes from the tree. It gets called for
* lan and rdma separately.
*/
static void
ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
struct ice_sched_node *vsi_node, u16 *num_nodes,
u8 owner)
{
struct ice_sched_node *node, *next;
u8 i, qgl, vsil;
u16 num;
qgl = ice_sched_get_qgrp_layer(pi->hw);
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = qgl; i > vsil; i--) {
num = num_nodes[i];
node = ice_sched_get_first_node(pi->hw, vsi_node, i);
while (node && num) {
next = node->sibling;
if (node->owner == owner && !node->num_children) {
ice_free_sched_node(pi, node);
num--;
}
node = next;
}
}
}
/** /**
* ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
* @hw: pointer to the hw struct * @hw: pointer to the hw struct
...@@ -1444,7 +1408,6 @@ static enum ice_status ...@@ -1444,7 +1408,6 @@ static enum ice_status
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner) u8 tc, u16 new_numqs, u8 owner)
{ {
u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *vsi_node; struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node; struct ice_sched_node *tc_node;
...@@ -1452,7 +1415,6 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, ...@@ -1452,7 +1415,6 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
enum ice_status status = 0; enum ice_status status = 0;
struct ice_hw *hw = pi->hw; struct ice_hw *hw = pi->hw;
u16 prev_numqs; u16 prev_numqs;
u8 i;
tc_node = ice_sched_get_tc_node(pi, tc); tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node) if (!tc_node)
...@@ -1471,33 +1433,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, ...@@ -1471,33 +1433,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
else else
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
/* num queues are not changed */ /* num queues are not changed or less than the previous number */
if (prev_numqs == new_numqs) if (new_numqs <= prev_numqs)
return status; return status;
/* calculate number of nodes based on prev/new number of qs */
if (prev_numqs)
ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
if (new_numqs) if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
/* Keep the max number of queue configuration all the time. Update the
if (prev_numqs > new_numqs) { * tree only if number of queues > previous number of queues. This may
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) * leave some extra nodes in the tree if number of queues < previous
new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i]; * number but that wouldn't harm anything. Removing those extra nodes
* may complicate the code if those nodes are part of SRL or
ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes, * individually rate limited.
owner); */
} else { status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) new_num_nodes, owner);
new_num_nodes[i] -= prev_num_nodes[i]; if (status)
return status;
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner);
if (status)
return status;
}
vsi_ctx->sched.max_lanq[tc] = new_numqs; vsi_ctx->sched.max_lanq[tc] = new_numqs;
return 0; return 0;
...@@ -1655,7 +1606,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) ...@@ -1655,7 +1606,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_ctx) if (!vsi_ctx)
goto exit_sched_rm_vsi_cfg; goto exit_sched_rm_vsi_cfg;
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node; struct ice_sched_node *vsi_node, *tc_node;
u8 j = 0; u8 j = 0;
......
This diff is collapsed.
...@@ -47,6 +47,9 @@ ...@@ -47,6 +47,9 @@
#define ICE_TX_FLAGS_VLAN_M 0xffff0000 #define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_S 16 #define ICE_TX_FLAGS_VLAN_S 16
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
struct ice_tx_buf { struct ice_tx_buf {
struct ice_tx_desc *next_to_watch; struct ice_tx_desc *next_to_watch;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -73,6 +76,7 @@ struct ice_rx_buf { ...@@ -73,6 +76,7 @@ struct ice_rx_buf {
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
unsigned int page_offset; unsigned int page_offset;
u16 pagecnt_bias;
}; };
struct ice_q_stats { struct ice_q_stats {
......
...@@ -210,6 +210,9 @@ struct ice_nvm_info { ...@@ -210,6 +210,9 @@ struct ice_nvm_info {
#define ICE_MAX_TRAFFIC_CLASS 8 #define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
#define ice_for_each_traffic_class(_i) \
for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
struct ice_sched_node { struct ice_sched_node {
struct ice_sched_node *parent; struct ice_sched_node *parent;
struct ice_sched_node *sibling; /* next sibling in the same layer */ struct ice_sched_node *sibling; /* next sibling in the same layer */
......
...@@ -457,8 +457,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) ...@@ -457,8 +457,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vsi->hw_base_vector += 1; vsi->hw_base_vector += 1;
/* Check if port VLAN exist before, and restore it accordingly */ /* Check if port VLAN exist before, and restore it accordingly */
if (vf->port_vlan_id) if (vf->port_vlan_id) {
ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true); ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
}
eth_broadcast_addr(broadcast); eth_broadcast_addr(broadcast);
...@@ -1925,6 +1927,9 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1925,6 +1927,9 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
*/ */
vsi->num_txq = qci->num_queue_pairs; vsi->num_txq = qci->num_queue_pairs;
vsi->num_rxq = qci->num_queue_pairs; vsi->num_rxq = qci->num_queue_pairs;
/* All queues of VF VSI are in TC 0 */
vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs;
vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs;
if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi)) if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
aq_ret = 0; aq_ret = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment