Commit b67ea97f authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
This series contains updates to fm10k only.

Alex provides two fixes for the fm10k, first folds the fm10k_pull_tail()
call into fm10k_add_rx_frag(), this way the fragment does not have to be
modified after it is added to the skb.  The second fixes missing braces
to an if statement.

The remaining patches are from Jacob which contain improvements and fixes
for fm10k.  First fix makes it so that invalid address will simply be
skipped and allows synchronizing the full list to proceed with using
iproute2 tool.  Fixed a possible kernel panic by using the correct
transmit timestamp function.  Simplified the code flow for setting the
IN_PROGRESS bit of the shinfo for an skb that we will be timestamping.
Fix a bug in the timestamping transmit enqueue code responsible for a
NULL pointer dereference and invalid access of the skb list by freeing
the clone in the cases where we did not add it to the queue.  Update the
PF code so that it resets the empty TQMAP/RQMAP regirsters post-VFLR to
prevent innocent VF drivers from triggering malicious driver events.
The SYSTIME_CFG.Adjust direction bit is actually supposed to indicate
that the adjustment is positive, so fix the code to align correctly with
the hardware and documentation.  Cleanup local variable that is no longer
used after a previous refactor of the code.  Fix the code flow so that we
actually clear the enabled flag as part of our removal of the LPORT.

v2:
 - updated patch 07 description based on feedback from Sergei Shtylyov
 - updated patch 09 & 10 to use %d in error message based on feedback
   from Sergei Shtylyov
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d42202dc 986eec43
......@@ -124,7 +124,7 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
char *p = (char *)data;
int i;
unsigned int i;
switch (stringset) {
case ETH_SS_TEST:
......@@ -143,12 +143,13 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
if (interface->hw.mac.type != fm10k_mac_vf)
if (interface->hw.mac.type != fm10k_mac_vf) {
for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
for (i = 0; i < interface->hw.mac.max_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
......
......@@ -400,11 +400,31 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
return num_vfs;
}
static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
struct fm10k_vf_info *vf_info)
{
struct fm10k_hw *hw = &interface->hw;
/* assigning the MAC address will send a mailbox message */
fm10k_mbx_lock(interface);
/* disable LPORT for this VF which clears switch rules */
hw->iov.ops.reset_lport(hw, vf_info);
/* assign new MAC+VLAN for this VF */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
/* re-enable the LPORT for this VF */
hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
FM10K_VF_FLAG_MULTI_CAPABLE);
fm10k_mbx_unlock(interface);
}
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
struct fm10k_vf_info *vf_info;
/* verify SR-IOV is active and that vf idx is valid */
......@@ -419,13 +439,7 @@ int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
vf_info = &iov_data->vf_info[vf_idx];
ether_addr_copy(vf_info->mac, mac);
/* assigning the MAC will send a mailbox message so lock is needed */
fm10k_mbx_lock(interface);
/* assign MAC address to VF */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
fm10k_mbx_unlock(interface);
fm10k_reset_vf_info(interface, vf_info);
return 0;
}
......@@ -455,16 +469,10 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
/* record default VLAN ID for VF */
vf_info->pf_vid = vid;
/* assigning the VLAN will send a mailbox message so lock is needed */
fm10k_mbx_lock(interface);
/* Clear the VLAN table for the VF */
hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
/* Update VF assignment and trigger reset */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
fm10k_mbx_unlock(interface);
fm10k_reset_vf_info(interface, vf_info);
return 0;
}
......
......@@ -269,16 +269,19 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->w.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = FM10K_RX_BUFSZ;
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
unsigned int pull_len;
if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
unsigned char *va = page_address(page) + rx_buffer->page_offset;
if (unlikely(skb_is_nonlinear(skb)))
goto add_tail_frag;
if (likely(size <= FM10K_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */
......@@ -290,8 +293,21 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
return false;
}
/* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
/* update all of the pointers */
va += pull_len;
size -= pull_len;
add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rx_buffer->page_offset, size, truesize);
(unsigned long)va & ~PAGE_MASK, size, truesize);
return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
}
......@@ -517,44 +533,6 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
return true;
}
/**
* fm10k_pull_tail - fm10k specific version of skb_pull_tail
* @skb: pointer to current skb being adjusted
*
* This function is an fm10k specific version of __pskb_pull_tail. The
* main difference between this version and the original function is that
* this function can make several assumptions about the state of things
* that allow for significant optimizations versus the standard function.
* As a result we can do things like drop a frag and maintain an accurate
* truesize for the skb.
*/
static void fm10k_pull_tail(struct sk_buff *skb)
{
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned char *va;
unsigned int pull_len;
/* it is valid to use page_address instead of kmap since we are
* working with pages allocated out of the lomem pool per
* alloc_page(GFP_ATOMIC)
*/
va = skb_frag_address(frag);
/* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
/* update all of the pointers */
skb_frag_size_sub(frag, pull_len);
frag->page_offset += pull_len;
skb->data_len -= pull_len;
skb->tail += pull_len;
}
/**
* fm10k_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
......@@ -580,10 +558,6 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
return true;
}
/* place header in linear portion of buffer */
if (skb_is_nonlinear(skb))
fm10k_pull_tail(skb);
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
......
......@@ -1259,16 +1259,11 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
struct fm10k_mbx_info *mbx)
{
const u32 *hdr = &mbx->mbx_hdr;
s32 err_no;
u16 head;
/* we will need to pull all of the fields for verification */
head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
/* we only have lower 10 bits of error number so add upper bits */
err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO);
err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO);
switch (mbx->state) {
case FM10K_STATE_OPEN:
case FM10K_STATE_DISCONNECT:
......
......@@ -923,18 +923,12 @@ static int __fm10k_mc_sync(struct net_device *dev,
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
s32 err;
if (!is_multicast_ether_addr(addr))
return -EADDRNOTAVAIL;
/* update table with current entries */
for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
if (err)
return err;
hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
}
return 0;
......@@ -1339,8 +1333,7 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
dglort.glort = interface->glort;
if (l2_accel)
dglort.shared_l = fls(l2_accel->size);
dglort.shared_l = fls(l2_accel->size);
hw->mac.ops.configure_dglort_map(hw, &dglort);
/* If table is empty remove it */
......
......@@ -1559,6 +1559,7 @@ void fm10k_down(struct fm10k_intfc *interface)
/* free any buffers still on the rings */
fm10k_clean_all_tx_rings(interface);
fm10k_clean_all_rx_rings(interface);
}
/**
......@@ -1740,30 +1741,18 @@ static int fm10k_probe(struct pci_dev *pdev,
struct fm10k_intfc *interface;
struct fm10k_hw *hw;
int err;
u64 dma_mask;
err = pci_enable_device_mem(pdev);
if (err)
return err;
/* By default fm10k only supports a 48 bit DMA mask */
dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev);
if ((dma_mask <= DMA_BIT_MASK(32)) ||
dma_set_mask_and_coherent(&pdev->dev, dma_mask)) {
dma_mask &= DMA_BIT_MASK(32);
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev,
"No usable DMA configuration, aborting\n");
goto err_dma;
}
}
if (err) {
dev_err(&pdev->dev,
"DMA configuration failed: %d\n", err);
goto err_dma;
}
err = pci_request_selected_regions(pdev,
......@@ -1772,7 +1761,7 @@ static int fm10k_probe(struct pci_dev *pdev,
fm10k_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_selected_regions failed 0x%x\n", err);
"pci_request_selected_regions failed: %d\n", err);
goto err_pci_reg;
}
......
......@@ -1046,6 +1046,12 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
}
/* repeat the first ring for all the remaining VF rings */
for (i = queues_per_pool; i < qmap_stride; i++) {
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
}
return 0;
}
......@@ -1345,6 +1351,14 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
err = fm10k_update_lport_state_pf(hw, vf_info->glort,
1, false);
/* we need to clear VF_FLAG_ENABLED flags in order to ensure
* that we actually re-enable the LPORT state below. Note that
* this has no impact if the VF is already disabled, as the
* flags are already cleared.
*/
if (!err)
vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
/* when enabling the port we should reset the rate limiters */
hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
......@@ -1786,8 +1800,8 @@ static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
return FM10K_ERR_PARAM;
if (ppb < 0)
systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE;
if (ppb > 0)
systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
......
......@@ -81,26 +81,26 @@ struct fm10k_mac_update {
__le16 glort;
u8 flags;
u8 action;
};
} __packed;
struct fm10k_global_table_data {
__le32 used;
__le32 avail;
};
} __packed;
struct fm10k_swapi_error {
__le32 status;
struct fm10k_global_table_data mac;
struct fm10k_global_table_data nexthop;
struct fm10k_global_table_data ffu;
};
} __packed;
struct fm10k_swapi_1588_timestamp {
__le64 egress;
__le64 ingress;
__le16 dglort;
__le16 sglort;
};
} __packed;
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
......
......@@ -70,16 +70,16 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
* if none are present then insert skb in tail of list
*/
skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
if (!skb)
if (!skb) {
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
__skb_queue_tail(list, clone);
}
spin_unlock_irqrestore(&list->lock, flags);
/* if list is already has one then we just free the clone */
if (skb)
kfree_skb(skb);
else
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
dev_kfree_skb(clone);
}
void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
......@@ -103,9 +103,10 @@ void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
if (!skb)
return;
/* timestamp the sk_buff and return it to the socket */
/* timestamp the sk_buff and free out copy */
fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
skb_complete_tx_timestamp(skb, &shhwtstamps);
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
......
......@@ -369,7 +369,7 @@ struct fm10k_hw;
/* Registers contained in BAR 4 for Switch management */
#define FM10K_SW_SYSTIME_ADJUST 0x0224D
#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
#define FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE 0x80000000
#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000
#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
enum fm10k_int_source {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment