Commit 1c325aac authored by Alan Brady's avatar Alan Brady Committed by Tony Nguyen

idpf: configure resources for TX queues

IDPF supports two queue models i.e. single queue which is a traditional
queueing model as well as split queue model. In single queue model,
the same descriptor queue is used by SW to post descriptors to the HW,
HW to post completed descriptors to SW. In split queue model, "TX Queues"
are used to pass buffers from SW to HW and "TX Completion Queues"
are used to post descriptor completions from HW to SW. Device supports
asymmetric ratio of TX queues to TX completion queues. Considering
this, queue group mechanism is used i.e. some TX queues are grouped
together which will be serviced by only one TX completion queue
per TX queue group.

Add all the resources required for the TX queues initialization.
To start with, allocate memory for the TX queue groups, TX queues and
TX completion queues. Then, allocate the descriptors for both TX and
TX completion queues, and bookkeeping buffers for TX queues alone.
Also, allocate queue vectors for the vport and initialize the TX queue
related fields for each queue vector.

Initialize the queue parameters such as q_id, q_type and tail register
offset with the info received from the device control plane (CP).
Once all the TX queues are configured, send config TX queue virtchnl
message to the CP with all the TX queue context information.
Signed-off-by: default avatarAlan Brady <alan.brady@intel.com>
Co-developed-by: default avatarAlice Michael <alice.michael@intel.com>
Signed-off-by: default avatarAlice Michael <alice.michael@intel.com>
Co-developed-by: default avatarJoshua Hay <joshua.a.hay@intel.com>
Signed-off-by: default avatarJoshua Hay <joshua.a.hay@intel.com>
Co-developed-by: default avatarPhani Burra <phani.r.burra@intel.com>
Signed-off-by: default avatarPhani Burra <phani.r.burra@intel.com>
Reviewed-by: default avatarSridhar Samudrala <sridhar.samudrala@intel.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Co-developed-by: default avatarPavan Kumar Linga <pavan.kumar.linga@intel.com>
Signed-off-by: default avatarPavan Kumar Linga <pavan.kumar.linga@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent ce1b75d0
...@@ -15,9 +15,12 @@ struct idpf_vport_max_q; ...@@ -15,9 +15,12 @@ struct idpf_vport_max_q;
#include <linux/pci.h> #include <linux/pci.h>
#include "virtchnl2.h" #include "virtchnl2.h"
#include "idpf_lan_txrx.h"
#include "idpf_txrx.h" #include "idpf_txrx.h"
#include "idpf_controlq.h" #include "idpf_controlq.h"
#define GETMAXVAL(num_bits) GENMASK((num_bits) - 1, 0)
#define IDPF_NO_FREE_SLOT 0xffff #define IDPF_NO_FREE_SLOT 0xffff
/* Default Mailbox settings */ /* Default Mailbox settings */
...@@ -27,6 +30,8 @@ struct idpf_vport_max_q; ...@@ -27,6 +30,8 @@ struct idpf_vport_max_q;
#define IDPF_DFLT_MBX_ID -1 #define IDPF_DFLT_MBX_ID -1
/* maximum number of times to try before resetting mailbox */ /* maximum number of times to try before resetting mailbox */
#define IDPF_MB_MAX_ERR 20 #define IDPF_MB_MAX_ERR 20
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000 #define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
#define IDPF_WAIT_FOR_EVENT_TIMEO 60000 #define IDPF_WAIT_FOR_EVENT_TIMEO 60000
...@@ -201,6 +206,8 @@ struct idpf_dev_ops { ...@@ -201,6 +206,8 @@ struct idpf_dev_ops {
STATE(IDPF_VC_CREATE_VPORT_ERR) \ STATE(IDPF_VC_CREATE_VPORT_ERR) \
STATE(IDPF_VC_DESTROY_VPORT) \ STATE(IDPF_VC_DESTROY_VPORT) \
STATE(IDPF_VC_DESTROY_VPORT_ERR) \ STATE(IDPF_VC_DESTROY_VPORT_ERR) \
STATE(IDPF_VC_CONFIG_TXQ) \
STATE(IDPF_VC_CONFIG_TXQ_ERR) \
STATE(IDPF_VC_ALLOC_VECTORS) \ STATE(IDPF_VC_ALLOC_VECTORS) \
STATE(IDPF_VC_ALLOC_VECTORS_ERR) \ STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
STATE(IDPF_VC_DEALLOC_VECTORS) \ STATE(IDPF_VC_DEALLOC_VECTORS) \
...@@ -229,7 +236,9 @@ extern const char * const idpf_vport_vc_state_str[]; ...@@ -229,7 +236,9 @@ extern const char * const idpf_vport_vc_state_str[];
* @txq_desc_count: TX queue descriptor count * @txq_desc_count: TX queue descriptor count
* @complq_desc_count: Completion queue descriptor count * @complq_desc_count: Completion queue descriptor count
* @num_txq_grp: Number of TX queue groups * @num_txq_grp: Number of TX queue groups
* @txq_grps: Array of TX queue groups
* @txq_model: Split queue or single queue queuing model * @txq_model: Split queue or single queue queuing model
* @txqs: Used only in hotpath to get to the right queue very fast
* @num_rxq: Number of allocated RX queues * @num_rxq: Number of allocated RX queues
* @num_bufq: Number of allocated buffer queues * @num_bufq: Number of allocated buffer queues
* @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
...@@ -249,6 +258,8 @@ extern const char * const idpf_vport_vc_state_str[]; ...@@ -249,6 +258,8 @@ extern const char * const idpf_vport_vc_state_str[];
* @idx: Software index in adapter vports struct * @idx: Software index in adapter vports struct
* @default_vport: Use this vport if one isn't specified * @default_vport: Use this vport if one isn't specified
* @base_rxd: True if the driver should use base descriptors instead of flex * @base_rxd: True if the driver should use base descriptors instead of flex
* @num_q_vectors: Number of IRQ vectors allocated
* @q_vectors: Array of queue vectors
* @max_mtu: device given max possible MTU * @max_mtu: device given max possible MTU
* @default_mac_addr: device will give a default MAC to use * @default_mac_addr: device will give a default MAC to use
* @vc_msg: Virtchnl message buffer * @vc_msg: Virtchnl message buffer
...@@ -262,7 +273,10 @@ struct idpf_vport { ...@@ -262,7 +273,10 @@ struct idpf_vport {
u32 txq_desc_count; u32 txq_desc_count;
u32 complq_desc_count; u32 complq_desc_count;
u16 num_txq_grp; u16 num_txq_grp;
struct idpf_txq_group *txq_grps;
u32 txq_model; u32 txq_model;
struct idpf_queue **txqs;
u16 num_rxq; u16 num_rxq;
u16 num_bufq; u16 num_bufq;
u32 rxq_desc_count; u32 rxq_desc_count;
...@@ -281,6 +295,8 @@ struct idpf_vport { ...@@ -281,6 +295,8 @@ struct idpf_vport {
bool default_vport; bool default_vport;
bool base_rxd; bool base_rxd;
u16 num_q_vectors;
struct idpf_q_vector *q_vectors;
u16 max_mtu; u16 max_mtu;
u8 default_mac_addr[ETH_ALEN]; u8 default_mac_addr[ETH_ALEN];
...@@ -372,12 +388,14 @@ struct idpf_vector_lifo { ...@@ -372,12 +388,14 @@ struct idpf_vector_lifo {
* struct idpf_vport_config - Vport configuration data * struct idpf_vport_config - Vport configuration data
* @user_config: see struct idpf_vport_user_config_data * @user_config: see struct idpf_vport_user_config_data
* @max_q: Maximum possible queues * @max_q: Maximum possible queues
* @req_qs_chunks: Queue chunk data for requested queues
* @mac_filter_list_lock: Lock to protect mac filters * @mac_filter_list_lock: Lock to protect mac filters
* @flags: See enum idpf_vport_config_flags * @flags: See enum idpf_vport_config_flags
*/ */
struct idpf_vport_config { struct idpf_vport_config {
struct idpf_vport_user_config_data user_config; struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q; struct idpf_vport_max_q max_q;
void *req_qs_chunks;
spinlock_t mac_filter_list_lock; spinlock_t mac_filter_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS); DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
}; };
...@@ -577,6 +595,26 @@ static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter) ...@@ -577,6 +595,26 @@ static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter)
return le16_to_cpu(adapter->caps.max_vports); return le16_to_cpu(adapter->caps.max_vports);
} }
/**
* idpf_get_max_tx_bufs - Get max scatter-gather buffers supported by the device
* @adapter: private data struct
*/
static inline unsigned int idpf_get_max_tx_bufs(struct idpf_adapter *adapter)
{
return adapter->caps.max_sg_bufs_per_tx_pkt;
}
/**
* idpf_get_min_tx_pkt_len - Get min packet length supported by the device
* @adapter: private data struct
*/
static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
{
u8 pkt_len = adapter->caps.min_sso_packet_len;
return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN;
}
/** /**
* idpf_get_reg_addr - Get BAR0 register address * idpf_get_reg_addr - Get BAR0 register address
* @adapter: private data struct * @adapter: private data struct
...@@ -618,6 +656,42 @@ static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter) ...@@ -618,6 +656,42 @@ static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter)
test_bit(IDPF_HR_DRV_LOAD, adapter->flags)); test_bit(IDPF_HR_DRV_LOAD, adapter->flags));
} }
/**
* idpf_netdev_to_vport - get a vport handle from a netdev
* @netdev: network interface device structure
*/
static inline struct idpf_vport *idpf_netdev_to_vport(struct net_device *netdev)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
return np->vport;
}
/**
* idpf_vport_ctrl_lock - Acquire the vport control lock
* @netdev: Network interface device structure
*
* This lock should be used by non-datapath code to protect against vport
* destruction.
*/
static inline void idpf_vport_ctrl_lock(struct net_device *netdev)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
mutex_lock(&np->adapter->vport_ctrl_lock);
}
/**
* idpf_vport_ctrl_unlock - Release the vport control lock
* @netdev: Network interface device structure
*/
static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
mutex_unlock(&np->adapter->vport_ctrl_lock);
}
void idpf_init_task(struct work_struct *work); void idpf_init_task(struct work_struct *work);
void idpf_service_task(struct work_struct *work); void idpf_service_task(struct work_struct *work);
void idpf_mbx_task(struct work_struct *work); void idpf_mbx_task(struct work_struct *work);
...@@ -651,6 +725,9 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, ...@@ -651,6 +725,9 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
bool add, bool async); bool add, bool async);
void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q); void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport); u32 idpf_get_vport_id(struct idpf_vport *vport);
int idpf_vport_queue_ids_init(struct idpf_vport *vport);
int idpf_queue_reg_init(struct idpf_vport *vport);
int idpf_send_config_tx_queues_msg(struct idpf_vport *vport);
int idpf_send_create_vport_msg(struct idpf_adapter *adapter, int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q); struct idpf_vport_max_q *max_q);
int idpf_check_supported_desc_ids(struct idpf_vport *vport); int idpf_check_supported_desc_ids(struct idpf_vport *vport);
......
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2023 Intel Corporation */
#ifndef _IDPF_LAN_TXRX_H_
#define _IDPF_LAN_TXRX_H_
/* Transmit descriptors */
/* splitq tx buf, singleq tx buf and singleq compl desc */
struct idpf_base_tx_desc {
__le64 buf_addr; /* Address of descriptor's data buf */
__le64 qw1; /* type_cmd_offset_bsz_l2tag1 */
}; /* read used with buffer queues */
struct idpf_splitq_tx_compl_desc {
/* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */
__le16 qid_comptype_gen;
union {
__le16 q_head; /* Queue head */
__le16 compl_tag; /* Completion tag */
} q_head_compl_tag;
u8 ts[3];
u8 rsvd; /* Reserved */
}; /* writeback used with completion queues */
#endif /* _IDPF_LAN_TXRX_H_ */
...@@ -3,6 +3,9 @@ ...@@ -3,6 +3,9 @@
#include "idpf.h" #include "idpf.h"
static const struct net_device_ops idpf_netdev_ops_splitq;
static const struct net_device_ops idpf_netdev_ops_singleq;
const char * const idpf_vport_vc_state_str[] = { const char * const idpf_vport_vc_state_str[] = {
IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING) IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
}; };
...@@ -499,6 +502,12 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) ...@@ -499,6 +502,12 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
return err; return err;
} }
/* assign netdev_ops */
if (idpf_is_queue_model_split(vport->txq_model))
netdev->netdev_ops = &idpf_netdev_ops_splitq;
else
netdev->netdev_ops = &idpf_netdev_ops_singleq;
/* setup watchdog timeout value to be 5 second */ /* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ; netdev->watchdog_timeo = 5 * HZ;
...@@ -590,6 +599,52 @@ static int idpf_get_free_slot(struct idpf_adapter *adapter) ...@@ -590,6 +599,52 @@ static int idpf_get_free_slot(struct idpf_adapter *adapter)
return IDPF_NO_FREE_SLOT; return IDPF_NO_FREE_SLOT;
} }
/**
* idpf_vport_stop - Disable a vport
* @vport: vport to disable
*/
static void idpf_vport_stop(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
if (np->state <= __IDPF_VPORT_DOWN)
return;
netif_carrier_off(vport->netdev);
idpf_vport_intr_rel(vport);
idpf_vport_queues_rel(vport);
np->state = __IDPF_VPORT_DOWN;
}
/**
* idpf_stop - Disables a network interface
* @netdev: network interface device structure
*
* The stop entry point is called when an interface is de-activated by the OS,
* and the netdevice enters the DOWN state. The hardware is still under the
* driver's control, but the netdev interface is disabled.
*
* Returns success only - not allowed to fail
*/
static int idpf_stop(struct net_device *netdev)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport *vport;
if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
return 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
idpf_vport_stop(vport);
idpf_vport_ctrl_unlock(netdev);
return 0;
}
/** /**
* idpf_decfg_netdev - Unregister the netdev * idpf_decfg_netdev - Unregister the netdev
* @vport: vport for which netdev to be unregistered * @vport: vport for which netdev to be unregistered
...@@ -773,6 +828,67 @@ void idpf_service_task(struct work_struct *work) ...@@ -773,6 +828,67 @@ void idpf_service_task(struct work_struct *work)
msecs_to_jiffies(300)); msecs_to_jiffies(300));
} }
/**
* idpf_vport_open - Bring up a vport
* @vport: vport to bring up
* @alloc_res: allocate queue resources
*/
static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_adapter *adapter = vport->adapter;
int err;
if (np->state != __IDPF_VPORT_DOWN)
return -EBUSY;
/* we do not allow interface up just yet */
netif_carrier_off(vport->netdev);
if (alloc_res) {
err = idpf_vport_queues_alloc(vport);
if (err)
return err;
}
err = idpf_vport_intr_alloc(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
err = idpf_vport_queue_ids_init(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
vport->vport_id, err);
goto intr_rel;
}
err = idpf_queue_reg_init(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
goto intr_rel;
}
err = idpf_send_config_tx_queues_msg(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
vport->vport_id, err);
goto intr_rel;
}
return 0;
intr_rel:
idpf_vport_intr_rel(vport);
queues_rel:
idpf_vport_queues_rel(vport);
return err;
}
/** /**
* idpf_init_task - Delayed initialization task * idpf_init_task - Delayed initialization task
* @work: work_struct handle to our data * @work: work_struct handle to our data
...@@ -788,6 +904,7 @@ void idpf_init_task(struct work_struct *work) ...@@ -788,6 +904,7 @@ void idpf_init_task(struct work_struct *work)
struct idpf_vport_config *vport_config; struct idpf_vport_config *vport_config;
struct idpf_vport_max_q max_q; struct idpf_vport_max_q max_q;
struct idpf_adapter *adapter; struct idpf_adapter *adapter;
struct idpf_netdev_priv *np;
struct idpf_vport *vport; struct idpf_vport *vport;
u16 num_default_vports; u16 num_default_vports;
struct pci_dev *pdev; struct pci_dev *pdev;
...@@ -845,6 +962,12 @@ void idpf_init_task(struct work_struct *work) ...@@ -845,6 +962,12 @@ void idpf_init_task(struct work_struct *work)
if (err) if (err)
goto handle_err; goto handle_err;
/* Once state is put into DOWN, driver is ready for dev_open */
np = netdev_priv(vport->netdev);
np->state = __IDPF_VPORT_DOWN;
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
idpf_vport_open(vport, true);
/* Spawn and return 'idpf_init_task' work queue until all the /* Spawn and return 'idpf_init_task' work queue until all the
* default vports are created * default vports are created
*/ */
...@@ -1071,6 +1194,33 @@ void idpf_vc_event_task(struct work_struct *work) ...@@ -1071,6 +1194,33 @@ void idpf_vc_event_task(struct work_struct *work)
} }
} }
/**
* idpf_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the netdev watchdog is enabled,
* and the stack is notified that the interface is ready.
*
* Returns 0 on success, negative value on failure
*/
static int idpf_open(struct net_device *netdev)
{
struct idpf_vport *vport;
int err;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
err = idpf_vport_open(vport, true);
idpf_vport_ctrl_unlock(netdev);
return err;
}
/** /**
* idpf_alloc_dma_mem - Allocate dma memory * idpf_alloc_dma_mem - Allocate dma memory
* @hw: pointer to hw struct * @hw: pointer to hw struct
...@@ -1104,3 +1254,13 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) ...@@ -1104,3 +1254,13 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
mem->va = NULL; mem->va = NULL;
mem->pa = 0; mem->pa = 0;
} }
static const struct net_device_ops idpf_netdev_ops_splitq = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
};
static const struct net_device_ops idpf_netdev_ops_singleq = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
};
This diff is collapsed.
...@@ -4,10 +4,12 @@ ...@@ -4,10 +4,12 @@
#ifndef _IDPF_TXRX_H_ #ifndef _IDPF_TXRX_H_
#define _IDPF_TXRX_H_ #define _IDPF_TXRX_H_
#define IDPF_LARGE_MAX_Q 256
#define IDPF_MAX_Q 16 #define IDPF_MAX_Q 16
#define IDPF_MIN_Q 2 #define IDPF_MIN_Q 2
#define IDPF_MIN_TXQ_COMPLQ_DESC 256 #define IDPF_MIN_TXQ_COMPLQ_DESC 256
#define IDPF_MAX_QIDS 256
#define MIN_SUPPORT_TXDID (\ #define MIN_SUPPORT_TXDID (\
VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\ VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
...@@ -55,6 +57,51 @@ ...@@ -55,6 +57,51 @@
#define IDPF_PACKET_HDR_PAD \ #define IDPF_PACKET_HDR_PAD \
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2) (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1
#define IDPF_TX_MIN_PKT_LEN 17
/**
* struct idpf_tx_buf
* @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
* with completion tag returned in buffer completion event.
* Because the completion tag is expected to be the same in all
* data descriptors for a given packet, and a single packet can
* span multiple buffers, we need this field to track all
* buffers associated with this completion tag independently of
* the buf_id. The tag consists of a N bit buf_id and M upper
* order "generation bits". See compl_tag_bufid_m and
* compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
* to indicate the tag is not valid.
* @ctx_entry: Singleq only. Used to indicate the corresponding entry
* in the descriptor ring was used for a context descriptor and
* this buffer entry should be skipped.
*/
struct idpf_tx_buf {
union {
int compl_tag;
bool ctx_entry;
};
};
struct idpf_tx_stash {
/* stub */
};
/**
* struct idpf_buf_lifo - LIFO for managing OOO completions
* @top: Used to know how many buffers are left
* @size: Total size of LIFO
* @bufs: Backing array
*/
struct idpf_buf_lifo {
u16 top;
u16 size;
struct idpf_tx_stash **bufs;
};
#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32 #define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \ #define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
(sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS)) (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
...@@ -164,6 +211,29 @@ struct idpf_rx_ptype_decoded { ...@@ -164,6 +211,29 @@ struct idpf_rx_ptype_decoded {
u32 payload_layer:3; u32 payload_layer:3;
}; };
/**
* enum idpf_queue_flags_t
* @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
* identify new descriptor writebacks on the ring. HW sets
* the gen bit to 1 on the first writeback of any given
* descriptor. After the ring wraps, HW sets the gen bit of
* those descriptors to 0, and continues flipping
* 0->1 or 1->0 on each ring wrap. SW maintains its own
* gen bit to know what value will indicate writebacks on
* the next pass around the ring. E.g. it is initialized
* to 1 and knows that reading a gen bit of 1 in any
* descriptor on the initial pass of the ring indicates a
* writeback. It also flips on every ring wrap.
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
__IDPF_Q_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_FLAGS_NBITS,
};
/** /**
* struct idpf_intr_reg * struct idpf_intr_reg
* @dyn_ctl: Dynamic control interrupt register * @dyn_ctl: Dynamic control interrupt register
...@@ -182,16 +252,148 @@ struct idpf_intr_reg { ...@@ -182,16 +252,148 @@ struct idpf_intr_reg {
/** /**
* struct idpf_q_vector * struct idpf_q_vector
* @vport: Vport back pointer
* @v_idx: Vector index * @v_idx: Vector index
* @intr_reg: See struct idpf_intr_reg * @intr_reg: See struct idpf_intr_reg
* @tx: Array of TX queues to service
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
* @tx_itr_idx: TX ITR index
* @name: Queue vector name * @name: Queue vector name
*/ */
struct idpf_q_vector { struct idpf_q_vector {
struct idpf_vport *vport;
u16 v_idx; u16 v_idx;
struct idpf_intr_reg intr_reg; struct idpf_intr_reg intr_reg;
struct idpf_queue **tx;
u16 tx_itr_value;
bool tx_intr_mode;
u32 tx_itr_idx;
char *name; char *name;
}; };
#define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_20K 0x0032
#define IDPF_ITR_TX_DEF IDPF_ITR_20K
/**
* struct idpf_queue
* @dev: Device back pointer for DMA mapping
* @vport: Back pointer to associated vport
* @txq_grp: See struct idpf_txq_group
* @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
* buffer queue uses this index to determine which group of refill queues
* to clean.
* For TX queue, it is used as index to map between TX queue group and
* hot path TX pointers stored in vport. Used in both singleq/splitq.
* @tail: Tail offset. Used for both queue models single and split. In splitq
* model relevant only for TX queue.
* @tx_buf: See struct idpf_tx_buf
* @q_type: Queue type (TX, RX, TX completion, RX buffer)
* @q_id: Queue id
* @desc_count: Number of descriptors
* @next_to_use: Next descriptor to use. Relevant in both split & single txq
* and bufq.
* @next_to_clean: Next descriptor to clean. In split queue model, only
* relevant to TX completion queue and RX queue.
* @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
* only relevant to RX queue.
* @flags: See enum idpf_queue_flags_t
* @q_vector: Backreference to associated vector
* @size: Length of descriptor ring in bytes
* @dma: Physical address of ring
* @desc_ring: Descriptor ring memory
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @tx_min_pkt_len: Min supported packet length
* @buf_stack: Stack of empty buffers to store buffer info for out of order
* buffer completions. See struct idpf_buf_lifo.
* @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_gen_s: Completion tag generation bit
* The format of the completion tag will change based on the TXQ
* descriptor ring size so that we can maintain roughly the same level
* of "uniqueness" across all descriptor sizes. For example, if the
* TXQ descriptor ring size is 64 (the minimum size supported), the
* completion tag will be formatted as below:
* 15 6 5 0
* --------------------------------
* | GEN=0-1023 |IDX = 0-63|
* --------------------------------
*
* This gives us 64*1024 = 65536 possible unique values. Similarly, if
* the TXQ descriptor ring size is 8160 (the maximum size supported),
* the completion tag will be formatted as below:
* 15 13 12 0
* --------------------------------
* |GEN | IDX = 0-8159 |
* --------------------------------
*
* This gives us 8*8160 = 65280 possible unique values.
* @compl_tag_cur_gen: Used to keep track of current completion tag generation
* @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
* @sched_buf_hash: Hash table to stores buffers
*/
struct idpf_queue {
struct device *dev;
struct idpf_vport *vport;
struct idpf_txq_group *txq_grp;
u16 idx;
void __iomem *tail;
struct idpf_tx_buf *tx_buf;
u16 q_type;
u32 q_id;
u16 desc_count;
u16 next_to_use;
u16 next_to_clean;
u16 next_to_alloc;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
struct idpf_q_vector *q_vector;
unsigned int size;
dma_addr_t dma;
void *desc_ring;
u16 tx_max_bufs;
u8 tx_min_pkt_len;
struct idpf_buf_lifo buf_stack;
u16 compl_tag_bufid_m;
u16 compl_tag_gen_s;
u16 compl_tag_cur_gen;
u16 compl_tag_gen_max;
DECLARE_HASHTABLE(sched_buf_hash, 12);
} ____cacheline_internodealigned_in_smp;
/**
* struct idpf_txq_group
* @vport: Vport back pointer
* @num_txq: Number of TX queues associated
* @txqs: Array of TX queue pointers
* @complq: Associated completion queue pointer, split queue only
* @num_completions_pending: Total number of completions pending for the
* completion queue, acculumated for all TX queues
* associated with that completion queue.
*
* Between singleq and splitq, a txq_group is largely the same except for the
* complq. In splitq a single complq is responsible for handling completions
* for some number of txqs associated in this txq_group.
*/
struct idpf_txq_group {
struct idpf_vport *vport;
u16 num_txq;
struct idpf_queue *txqs[IDPF_LARGE_MAX_Q];
struct idpf_queue *complq;
u32 num_completions_pending;
};
void idpf_vport_init_num_qs(struct idpf_vport *vport, void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg); struct virtchnl2_create_vport *vport_msg);
void idpf_vport_calc_num_q_desc(struct idpf_vport *vport); void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
...@@ -199,5 +401,9 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index, ...@@ -199,5 +401,9 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
struct virtchnl2_create_vport *vport_msg, struct virtchnl2_create_vport *vport_msg,
struct idpf_vport_max_q *max_q); struct idpf_vport_max_q *max_q);
void idpf_vport_calc_num_q_groups(struct idpf_vport *vport); void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
int idpf_vport_queues_alloc(struct idpf_vport *vport);
void idpf_vport_queues_rel(struct idpf_vport *vport);
void idpf_vport_intr_rel(struct idpf_vport *vport);
int idpf_vport_intr_alloc(struct idpf_vport *vport);
#endif /* !_IDPF_TXRX_H_ */ #endif /* !_IDPF_TXRX_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment