Commit 4c8024f7 authored by David S. Miller's avatar David S. Miller

Merge branch 'ena-driver-new-features'

Arthur Kiyanovski says:

====================
ENA driver new features

V4 changes:
-----------
Add smp_rmb() to "net: ena: avoid unnecessary rearming of interrupt
vector when busy-polling" to adhere to the linux kernel memory model,
and update the commit message accordingly.

V3 changes:
-----------
1. Add "net: ena: enable support of rss hash key and function
   changes" patch again, with more explanations why it should
   be in net-next in commit message.
2. Add synchronization considerations to "net: ena: avoid unnecessary
   rearming of interrupt vector when busy-polling"

V2 changes:
-----------
1. Update commit messages of 2 patches to be more verbose.
2. Remove "net: ena: enable support of rss hash key and function
   changes" patch. Will be resubmitted net.

V1 cover letter:
----------------
This patchset contains performance improvements, support for new devices
and functionality:

1. Support for upcoming ENA devices
2. Avoid unnecessary IRQ unmasking in busy poll to reduce interrupt rate
3. Enabling device support for RSS function and key manipulation
4. Support for NIC-based traffic mirroring (SPAN port)
5. Additional PCI device ID
6. Cosmetic changes
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d4eae993 0e3a3f6d
...@@ -491,6 +491,36 @@ enum ena_admin_llq_stride_ctrl { ...@@ -491,6 +491,36 @@ enum ena_admin_llq_stride_ctrl {
ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2, ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2,
}; };
enum ena_admin_accel_mode_feat {
ENA_ADMIN_DISABLE_META_CACHING = 0,
ENA_ADMIN_LIMIT_TX_BURST = 1,
};
struct ena_admin_accel_mode_get {
/* bit field of enum ena_admin_accel_mode_feat */
u16 supported_flags;
/* maximum burst size between two doorbells. The size is in bytes */
u16 max_tx_burst_size;
};
struct ena_admin_accel_mode_set {
/* bit field of enum ena_admin_accel_mode_feat */
u16 enabled_flags;
u16 reserved;
};
struct ena_admin_accel_mode_req {
union {
u32 raw[2];
struct ena_admin_accel_mode_get get;
struct ena_admin_accel_mode_set set;
} u;
};
struct ena_admin_feature_llq_desc { struct ena_admin_feature_llq_desc {
u32 max_llq_num; u32 max_llq_num;
...@@ -536,10 +566,13 @@ struct ena_admin_feature_llq_desc { ...@@ -536,10 +566,13 @@ struct ena_admin_feature_llq_desc {
/* the stride control the driver selected to use */ /* the stride control the driver selected to use */
u16 descriptors_stride_ctrl_enabled; u16 descriptors_stride_ctrl_enabled;
/* Maximum size in bytes taken by llq entries in a single tx burst. /* reserved */
* Set to 0 when there is no such limit. u32 reserved1;
/* accelerated low latency queues requirement. driver needs to
* support those requirements in order to use accelerated llq
*/ */
u32 max_tx_burst_size; struct ena_admin_accel_mode_req accel_mode;
}; };
struct ena_admin_queue_ext_feature_fields { struct ena_admin_queue_ext_feature_fields {
...@@ -816,7 +849,9 @@ struct ena_admin_host_info { ...@@ -816,7 +849,9 @@ struct ena_admin_host_info {
/* 0 : reserved /* 0 : reserved
* 1 : rx_offset * 1 : rx_offset
* 2 : interrupt_moderation * 2 : interrupt_moderation
* 31:3 : reserved * 3 : rx_buf_mirroring
* 4 : rss_configurable_function_key
* 31:5 : reserved
*/ */
u32 driver_supported_features; u32 driver_supported_features;
}; };
...@@ -1129,6 +1164,10 @@ struct ena_admin_ena_mmio_req_read_less_resp { ...@@ -1129,6 +1164,10 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1) #define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1)
#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2
#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2)
#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT 3
#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK BIT(3)
#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4
#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
/* aenq_common_desc */ /* aenq_common_desc */
#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
......
...@@ -403,6 +403,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, ...@@ -403,6 +403,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
0x0, io_sq->llq_info.desc_list_entry_size); 0x0, io_sq->llq_info.desc_list_entry_size);
io_sq->llq_buf_ctrl.descs_left_in_line = io_sq->llq_buf_ctrl.descs_left_in_line =
io_sq->llq_info.descs_num_before_header; io_sq->llq_info.descs_num_before_header;
io_sq->disable_meta_caching =
io_sq->llq_info.disable_meta_caching;
if (io_sq->llq_info.max_entries_in_tx_burst > 0) if (io_sq->llq_info.max_entries_in_tx_burst > 0)
io_sq->entries_in_tx_burst_left = io_sq->entries_in_tx_burst_left =
...@@ -626,6 +628,10 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev) ...@@ -626,6 +628,10 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
cmd.u.llq.accel_mode.u.set.enabled_flags =
BIT(ENA_ADMIN_DISABLE_META_CACHING) |
BIT(ENA_ADMIN_LIMIT_TX_BURST);
ret = ena_com_execute_admin_command(admin_queue, ret = ena_com_execute_admin_command(admin_queue,
(struct ena_admin_aq_entry *)&cmd, (struct ena_admin_aq_entry *)&cmd,
sizeof(cmd), sizeof(cmd),
...@@ -643,6 +649,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, ...@@ -643,6 +649,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
struct ena_llq_configurations *llq_default_cfg) struct ena_llq_configurations *llq_default_cfg)
{ {
struct ena_com_llq_info *llq_info = &ena_dev->llq_info; struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
struct ena_admin_accel_mode_get llq_accel_mode_get;
u16 supported_feat; u16 supported_feat;
int rc; int rc;
...@@ -742,9 +749,17 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, ...@@ -742,9 +749,17 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_default_cfg->llq_num_decs_before_header, llq_default_cfg->llq_num_decs_before_header,
supported_feat, llq_info->descs_num_before_header); supported_feat, llq_info->descs_num_before_header);
} }
/* Check for accelerated queue supported */
llq_accel_mode_get = llq_features->accel_mode.u.get;
llq_info->disable_meta_caching =
!!(llq_accel_mode_get.supported_flags &
BIT(ENA_ADMIN_DISABLE_META_CACHING));
llq_info->max_entries_in_tx_burst = if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
(u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value); llq_info->max_entries_in_tx_burst =
llq_accel_mode_get.max_tx_burst_size /
llq_default_cfg->llq_ring_entry_size_value;
rc = ena_com_set_llq(ena_dev); rc = ena_com_set_llq(ena_dev);
if (rc) if (rc)
......
...@@ -127,6 +127,7 @@ struct ena_com_llq_info { ...@@ -127,6 +127,7 @@ struct ena_com_llq_info {
u16 descs_num_before_header; u16 descs_num_before_header;
u16 descs_per_entry; u16 descs_per_entry;
u16 max_entries_in_tx_burst; u16 max_entries_in_tx_burst;
bool disable_meta_caching;
}; };
struct ena_com_io_cq { struct ena_com_io_cq {
...@@ -189,6 +190,8 @@ struct ena_com_io_sq { ...@@ -189,6 +190,8 @@ struct ena_com_io_sq {
enum queue_direction direction; enum queue_direction direction;
enum ena_admin_placement_policy_type mem_queue_type; enum ena_admin_placement_policy_type mem_queue_type;
bool disable_meta_caching;
u32 msix_vector; u32 msix_vector;
struct ena_com_tx_meta cached_tx_meta; struct ena_com_tx_meta cached_tx_meta;
struct ena_com_llq_info llq_info; struct ena_com_llq_info llq_info;
...@@ -230,11 +233,11 @@ struct ena_com_admin_sq { ...@@ -230,11 +233,11 @@ struct ena_com_admin_sq {
}; };
struct ena_com_stats_admin { struct ena_com_stats_admin {
u32 aborted_cmd; u64 aborted_cmd;
u32 submitted_cmd; u64 submitted_cmd;
u32 completed_cmd; u64 completed_cmd;
u32 out_of_space; u64 out_of_space;
u32 no_completion; u64 no_completion;
}; };
struct ena_com_admin_queue { struct ena_com_admin_queue {
......
...@@ -285,11 +285,10 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, ...@@ -285,11 +285,10 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
return count; return count;
} }
static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
struct ena_com_tx_ctx *ena_tx_ctx) struct ena_com_tx_meta *ena_meta)
{ {
struct ena_eth_io_tx_meta_desc *meta_desc = NULL; struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
meta_desc = get_sq_desc(io_sq); meta_desc = get_sq_desc(io_sq);
memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
...@@ -309,12 +308,13 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, ...@@ -309,12 +308,13 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
/* Extended meta desc */ /* Extended meta desc */
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
meta_desc->len_ctrl |= (io_sq->phase << meta_desc->len_ctrl |= (io_sq->phase <<
ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_META_DESC_PHASE_MASK; ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK; meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
meta_desc->word2 |= ena_meta->l3_hdr_len & meta_desc->word2 |= ena_meta->l3_hdr_len &
ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
meta_desc->word2 |= (ena_meta->l3_hdr_offset << meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
...@@ -325,13 +325,36 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, ...@@ -325,13 +325,36 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; return ena_com_sq_update_tail(io_sq);
}
static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
struct ena_com_tx_ctx *ena_tx_ctx,
bool *have_meta)
{
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
/* Cached the meta desc */ /* When disable meta caching is set, don't bother to save the meta and
memcpy(&io_sq->cached_tx_meta, ena_meta, * compare it to the stored version, just create the meta
sizeof(struct ena_com_tx_meta)); */
if (io_sq->disable_meta_caching) {
if (unlikely(!ena_tx_ctx->meta_valid))
return -EINVAL;
return ena_com_sq_update_tail(io_sq); *have_meta = true;
return ena_com_create_meta(io_sq, ena_meta);
}
if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
*have_meta = true;
/* Cache the meta desc */
memcpy(&io_sq->cached_tx_meta, ena_meta,
sizeof(struct ena_com_tx_meta));
return ena_com_create_meta(io_sq, ena_meta);
}
*have_meta = false;
return 0;
} }
static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
...@@ -402,12 +425,10 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, ...@@ -402,12 +425,10 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (unlikely(rc)) if (unlikely(rc))
return rc; return rc;
have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
ena_tx_ctx); if (unlikely(rc)) {
if (have_meta) { pr_err("failed to create and store tx meta desc\n");
rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); return rc;
if (unlikely(rc))
return rc;
} }
/* If the caller doesn't want to send packets */ /* If the caller doesn't want to send packets */
......
...@@ -157,7 +157,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, ...@@ -157,7 +157,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
llq_info = &io_sq->llq_info; llq_info = &io_sq->llq_info;
num_descs = ena_tx_ctx->num_bufs; num_descs = ena_tx_ctx->num_bufs;
if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) if (llq_info->disable_meta_caching ||
unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
++num_descs; ++num_descs;
if (num_descs > llq_info->descs_num_before_header) { if (num_descs > llq_info->descs_num_before_header) {
......
...@@ -164,13 +164,13 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) ...@@ -164,13 +164,13 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
{ {
const struct ena_stats *ena_stats; const struct ena_stats *ena_stats;
u32 *ptr; u64 *ptr;
int i; int i;
for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
ena_stats = &ena_stats_ena_com_strings[i]; ena_stats = &ena_stats_ena_com_strings[i];
ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats + ptr = (u64 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats +
(uintptr_t)ena_stats->stat_offset); (uintptr_t)ena_stats->stat_offset);
*(*data)++ = *ptr; *(*data)++ = *ptr;
......
...@@ -167,6 +167,7 @@ struct ena_napi { ...@@ -167,6 +167,7 @@ struct ena_napi {
struct ena_ring *rx_ring; struct ena_ring *rx_ring;
struct ena_ring *xdp_ring; struct ena_ring *xdp_ring;
bool first_interrupt; bool first_interrupt;
bool interrupts_masked;
u32 qid; u32 qid;
struct dim dim; struct dim dim;
}; };
...@@ -297,6 +298,7 @@ struct ena_ring { ...@@ -297,6 +298,7 @@ struct ena_ring {
u8 tx_max_header_size; u8 tx_max_header_size;
bool first_interrupt; bool first_interrupt;
bool disable_meta_caching;
u16 no_interrupt_event_cnt; u16 no_interrupt_event_cnt;
/* cpu for TPH */ /* cpu for TPH */
...@@ -398,6 +400,7 @@ struct ena_adapter { ...@@ -398,6 +400,7 @@ struct ena_adapter {
bool wd_state; bool wd_state;
bool dev_up_before_reset; bool dev_up_before_reset;
bool disable_meta_caching;
unsigned long last_keep_alive_jiffies; unsigned long last_keep_alive_jiffies;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
......
...@@ -53,10 +53,15 @@ ...@@ -53,10 +53,15 @@
#define PCI_DEV_ID_ENA_LLQ_VF 0xec21 #define PCI_DEV_ID_ENA_LLQ_VF 0xec21
#endif #endif
#ifndef PCI_DEV_ID_ENA_RESRV0
#define PCI_DEV_ID_ENA_RESRV0 0x0051
#endif
#define ENA_PCI_ID_TABLE_ENTRY(devid) \ #define ENA_PCI_ID_TABLE_ENTRY(devid) \
{PCI_DEVICE(PCI_VENDOR_ID_AMAZON, devid)}, {PCI_DEVICE(PCI_VENDOR_ID_AMAZON, devid)},
static const struct pci_device_id ena_pci_tbl[] = { static const struct pci_device_id ena_pci_tbl[] = {
ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_RESRV0)
ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_PF) ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_PF)
ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_PF) ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_PF)
ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_VF) ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_VF)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment