Commit 199671ea authored by David S. Miller's avatar David S. Miller

Merge branch 'ENA-features-and-cosmetic-changes'

Arthur Kiyanovski says:

====================
ENA features and cosmetic changes

Diff from V1 of this patchset:
Removed error prints patch

This patchset includes:
1. new rx offset feature
2. reduction of the driver load time
3. multiple cosmetic changes to the code
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b79f91f1 4bb7f4cf
...@@ -768,8 +768,8 @@ enum ena_admin_os_type { ...@@ -768,8 +768,8 @@ enum ena_admin_os_type {
ENA_ADMIN_OS_DPDK = 3, ENA_ADMIN_OS_DPDK = 3,
ENA_ADMIN_OS_FREEBSD = 4, ENA_ADMIN_OS_FREEBSD = 4,
ENA_ADMIN_OS_IPXE = 5, ENA_ADMIN_OS_IPXE = 5,
ENA_ADMIN_OS_ESXI = 6, ENA_ADMIN_OS_ESXI = 6,
ENA_ADMIN_OS_GROUPS_NUM = 6, ENA_ADMIN_OS_GROUPS_NUM = 6,
}; };
struct ena_admin_host_info { struct ena_admin_host_info {
...@@ -813,7 +813,8 @@ struct ena_admin_host_info { ...@@ -813,7 +813,8 @@ struct ena_admin_host_info {
u16 reserved; u16 reserved;
/* 1 :0 : reserved /* 0 : reserved
* 1 : rx_offset
* 2 : interrupt_moderation * 2 : interrupt_moderation
* 31:3 : reserved * 31:3 : reserved
*/ */
...@@ -1124,6 +1125,8 @@ struct ena_admin_ena_mmio_req_read_less_resp { ...@@ -1124,6 +1125,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) #define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 #define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) #define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
#define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT 1
#define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1)
#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2
#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2)
...@@ -1133,4 +1136,4 @@ struct ena_admin_ena_mmio_req_read_less_resp { ...@@ -1133,4 +1136,4 @@ struct ena_admin_ena_mmio_req_read_less_resp {
/* aenq_link_change_desc */ /* aenq_link_change_desc */
#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) #define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
#endif /*_ENA_ADMIN_H_ */ #endif /* _ENA_ADMIN_H_ */
...@@ -62,7 +62,9 @@ ...@@ -62,7 +62,9 @@
#define ENA_REGS_ADMIN_INTR_MASK 1 #define ENA_REGS_ADMIN_INTR_MASK 1
#define ENA_POLL_MS 5 #define ENA_MIN_ADMIN_POLL_US 100
#define ENA_MAX_ADMIN_POLL_US 5000
/*****************************************************************************/ /*****************************************************************************/
/*****************************************************************************/ /*****************************************************************************/
...@@ -200,17 +202,17 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue, ...@@ -200,17 +202,17 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
u16 command_id, bool capture) u16 command_id, bool capture)
{ {
if (unlikely(!queue->comp_ctx)) {
pr_err("Completion context is NULL\n");
return NULL;
}
if (unlikely(command_id >= queue->q_depth)) { if (unlikely(command_id >= queue->q_depth)) {
pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
command_id, queue->q_depth); command_id, queue->q_depth);
return NULL; return NULL;
} }
if (unlikely(!queue->comp_ctx)) {
pr_err("Completion context is NULL\n");
return NULL;
}
if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
pr_err("Completion context is occupied\n"); pr_err("Completion context is occupied\n");
return NULL; return NULL;
...@@ -375,7 +377,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, ...@@ -375,7 +377,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
io_sq->bounce_buf_ctrl.next_to_use = 0; io_sq->bounce_buf_ctrl.next_to_use = 0;
size = io_sq->bounce_buf_ctrl.buffer_size * size = io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num; io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev); dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node); set_dev_node(ena_dev->dmadev, ctx->numa_node);
...@@ -523,9 +525,6 @@ static int ena_com_comp_status_to_errno(u8 comp_status) ...@@ -523,9 +525,6 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
if (unlikely(comp_status != 0)) if (unlikely(comp_status != 0))
pr_err("admin command failed[%u]\n", comp_status); pr_err("admin command failed[%u]\n", comp_status);
if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
return -EINVAL;
switch (comp_status) { switch (comp_status) {
case ENA_ADMIN_SUCCESS: case ENA_ADMIN_SUCCESS:
return 0; return 0;
...@@ -540,7 +539,14 @@ static int ena_com_comp_status_to_errno(u8 comp_status) ...@@ -540,7 +539,14 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
return -EINVAL; return -EINVAL;
} }
return 0; return -EINVAL;
}
static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
{
delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
usleep_range(delay_us, 2 * delay_us);
} }
static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
...@@ -549,6 +555,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c ...@@ -549,6 +555,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
unsigned long flags = 0; unsigned long flags = 0;
unsigned long timeout; unsigned long timeout;
int ret; int ret;
u32 exp = 0;
timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
...@@ -572,7 +579,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c ...@@ -572,7 +579,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err; goto err;
} }
msleep(ENA_POLL_MS); ena_delay_exponential_backoff_us(exp++,
admin_queue->ena_dev->ena_min_poll_delay_us);
} }
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
...@@ -702,8 +710,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, ...@@ -702,8 +710,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
/* The desc list entry size should be whole multiply of 8 /* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy() * This requirement comes from __iowrite64_copy()
*/ */
pr_err("illegal entry size %d\n", pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size);
llq_info->desc_list_entry_size);
return -EINVAL; return -EINVAL;
} }
...@@ -775,7 +782,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com ...@@ -775,7 +782,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (admin_queue->auto_polling) if (admin_queue->auto_polling)
admin_queue->polling = true; admin_queue->polling = true;
} else { } else {
pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n", pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status); comp_ctx->cmd_opcode, comp_ctx->status);
} }
/* Check if shifted to polling mode. /* Check if shifted to polling mode.
...@@ -943,12 +950,13 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, ...@@ -943,12 +950,13 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
u16 exp_state) u16 exp_state)
{ {
u32 val, i; u32 val, exp = 0;
unsigned long timeout_stamp;
/* Convert timeout from resolution of 100ms to ENA_POLL_MS */ /* Convert timeout from resolution of 100ms to us resolution. */
timeout = (timeout * 100) / ENA_POLL_MS; timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
for (i = 0; i < timeout; i++) { while (1) {
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
...@@ -960,10 +968,11 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, ...@@ -960,10 +968,11 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
exp_state) exp_state)
return 0; return 0;
msleep(ENA_POLL_MS); if (time_is_before_jiffies(timeout_stamp))
} return -ETIME;
return -ETIME; ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
}
} }
static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
...@@ -1284,13 +1293,9 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) ...@@ -1284,13 +1293,9 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 intr_delay_resolution) u16 intr_delay_resolution)
{ {
/* Initial value of intr_delay_resolution might be 0 */ u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
u16 prev_intr_delay_resolution =
ena_dev->intr_delay_resolution ?
ena_dev->intr_delay_resolution :
ENA_DEFAULT_INTR_DELAY_RESOLUTION;
if (!intr_delay_resolution) { if (unlikely(!intr_delay_resolution)) {
pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
} }
...@@ -1444,11 +1449,13 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) ...@@ -1444,11 +1449,13 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
{ {
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
unsigned long flags = 0; unsigned long flags = 0;
u32 exp = 0;
spin_lock_irqsave(&admin_queue->q_lock, flags); spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) { while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
spin_unlock_irqrestore(&admin_queue->q_lock, flags); spin_unlock_irqrestore(&admin_queue->q_lock, flags);
msleep(ENA_POLL_MS); ena_delay_exponential_backoff_us(exp++,
ena_dev->ena_min_poll_delay_us);
spin_lock_irqsave(&admin_queue->q_lock, flags); spin_lock_irqsave(&admin_queue->q_lock, flags);
} }
spin_unlock_irqrestore(&admin_queue->q_lock, flags); spin_unlock_irqrestore(&admin_queue->q_lock, flags);
...@@ -1796,6 +1803,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, ...@@ -1796,6 +1803,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
if (ret) if (ret)
goto error; goto error;
admin_queue->ena_dev = ena_dev;
admin_queue->running_state = true; admin_queue->running_state = true;
return 0; return 0;
...@@ -2003,7 +2011,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) ...@@ -2003,7 +2011,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
struct ena_admin_aenq_entry *aenq_e; struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common; struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq; struct ena_com_aenq *aenq = &dev->aenq;
unsigned long long timestamp; u64 timestamp;
ena_aenq_handler handler_cb; ena_aenq_handler handler_cb;
u16 masked_head, processed = 0; u16 masked_head, processed = 0;
u8 phase; u8 phase;
...@@ -2021,9 +2029,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) ...@@ -2021,9 +2029,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
*/ */
dma_rmb(); dma_rmb();
timestamp = timestamp = (u64)aenq_common->timestamp_low |
(unsigned long long)aenq_common->timestamp_low | ((u64)aenq_common->timestamp_high << 32);
((unsigned long long)aenq_common->timestamp_high << 32);
pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrom, timestamp); aenq_common->group, aenq_common->syndrom, timestamp);
...@@ -2053,8 +2060,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) ...@@ -2053,8 +2060,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */ /* write the aenq doorbell after all AENQ descriptors were read */
mb(); mb();
writel_relaxed((u32)aenq->head, writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
} }
int ena_com_dev_reset(struct ena_com_dev *ena_dev, int ena_com_dev_reset(struct ena_com_dev *ena_dev,
...@@ -2276,13 +2282,14 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, ...@@ -2276,13 +2282,14 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
enum ena_admin_hash_functions func, enum ena_admin_hash_functions func,
const u8 *key, u16 key_len, u32 init_val) const u8 *key, u16 key_len, u32 init_val)
{ {
struct ena_rss *rss = &ena_dev->rss; struct ena_admin_feature_rss_flow_hash_control *hash_key;
struct ena_admin_get_feat_resp get_resp; struct ena_admin_get_feat_resp get_resp;
struct ena_admin_feature_rss_flow_hash_control *hash_key =
rss->hash_key;
enum ena_admin_hash_functions old_func; enum ena_admin_hash_functions old_func;
struct ena_rss *rss = &ena_dev->rss;
int rc; int rc;
hash_key = rss->hash_key;
/* Make sure size is a mult of DWs */ /* Make sure size is a mult of DWs */
if (unlikely(key_len & 0x3)) if (unlikely(key_len & 0x3))
return -EINVAL; return -EINVAL;
...@@ -2294,7 +2301,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, ...@@ -2294,7 +2301,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc)) if (unlikely(rc))
return rc; return rc;
if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
pr_err("Flow hash function %d isn't supported\n", func); pr_err("Flow hash function %d isn't supported\n", func);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -77,6 +77,8 @@ ...@@ -77,6 +77,8 @@
#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
#define ENA_HASH_KEY_SIZE 40
#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF #define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1 #define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
...@@ -237,6 +239,7 @@ struct ena_com_stats_admin { ...@@ -237,6 +239,7 @@ struct ena_com_stats_admin {
struct ena_com_admin_queue { struct ena_com_admin_queue {
void *q_dmadev; void *q_dmadev;
struct ena_com_dev *ena_dev;
spinlock_t q_lock; /* spinlock for the admin queue */ spinlock_t q_lock; /* spinlock for the admin queue */
struct ena_comp_ctx *comp_ctx; struct ena_comp_ctx *comp_ctx;
...@@ -349,6 +352,8 @@ struct ena_com_dev { ...@@ -349,6 +352,8 @@ struct ena_com_dev {
struct ena_intr_moder_entry *intr_moder_tbl; struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info; struct ena_com_llq_info llq_info;
u32 ena_min_poll_delay_us;
}; };
struct ena_com_dev_get_features_ctx { struct ena_com_dev_get_features_ctx {
...@@ -393,7 +398,7 @@ struct ena_aenq_handlers { ...@@ -393,7 +398,7 @@ struct ena_aenq_handlers {
*/ */
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism /* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable) * @readless_supported: readless mode (enable/disable)
*/ */
...@@ -515,7 +520,7 @@ void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, ...@@ -515,7 +520,7 @@ void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
* *
* This method go over the admin completion queue and wake up all the pending * This method goes over the admin completion queue and wakes up all the pending
* threads that wait on the commands wait event. * threads that wait on the commands wait event.
* *
* @note: Should be called after MSI-X interrupt. * @note: Should be called after MSI-X interrupt.
...@@ -525,7 +530,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); ...@@ -525,7 +530,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
/* ena_com_aenq_intr_handler - AENQ interrupt handler /* ena_com_aenq_intr_handler - AENQ interrupt handler
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
* *
* This method go over the async event notification queue and call the proper * This method goes over the async event notification queue and calls the proper
* aenq handler. * aenq handler.
*/ */
void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data); void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
...@@ -542,14 +547,14 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); ...@@ -542,14 +547,14 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
/* ena_com_wait_for_abort_completion - Wait for admin commands abort. /* ena_com_wait_for_abort_completion - Wait for admin commands abort.
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
* *
* This method wait until all the outstanding admin commands will be completed. * This method waits until all the outstanding admin commands are completed.
*/ */
void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
/* ena_com_validate_version - Validate the device parameters /* ena_com_validate_version - Validate the device parameters
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
* *
* This method validate the device parameters are the same as the saved * This method verifies the device parameters are the same as the saved
* parameters in ena_dev. * parameters in ena_dev.
* This method is useful after device reset, to validate the device mac address * This method is useful after device reset, to validate the device mac address
* and the device offloads are the same as before the reset. * and the device offloads are the same as before the reset.
...@@ -689,7 +694,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev); ...@@ -689,7 +694,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
* *
* Retrieve the hash function from the device. * Retrieve the hash function from the device.
* *
* @note: If the caller called ena_com_fill_hash_function but didn't flash * @note: If the caller called ena_com_fill_hash_function but didn't flush
* it to the device, the new configuration will be lost. * it to the device, the new configuration will be lost.
* *
* @return: 0 on Success and negative value otherwise. * @return: 0 on Success and negative value otherwise.
...@@ -703,7 +708,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, ...@@ -703,7 +708,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
* *
* Retrieve the hash key. * Retrieve the hash key.
* *
* @note: If the caller called ena_com_fill_hash_key but didn't flash * @note: If the caller called ena_com_fill_hash_key but didn't flush
* it to the device, the new configuration will be lost. * it to the device, the new configuration will be lost.
* *
* @return: 0 on Success and negative value otherwise. * @return: 0 on Success and negative value otherwise.
...@@ -743,7 +748,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); ...@@ -743,7 +748,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
* *
* Retrieve the hash control from the device. * Retrieve the hash control from the device.
* *
* @note, If the caller called ena_com_fill_hash_ctrl but didn't flash * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush
* it to the device, the new configuration will be lost. * it to the device, the new configuration will be lost.
* *
* @return: 0 on Success and negative value otherwise. * @return: 0 on Success and negative value otherwise.
...@@ -795,7 +800,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); ...@@ -795,7 +800,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
* *
* Retrieve the RSS indirection table from the device. * Retrieve the RSS indirection table from the device.
* *
* @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush
* it to the device, the new configuration will be lost. * it to the device, the new configuration will be lost.
* *
* @return: 0 on Success and negative value otherwise. * @return: 0 on Success and negative value otherwise.
...@@ -821,14 +826,14 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, ...@@ -821,14 +826,14 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
/* ena_com_delete_debug_area - Free the debug area resources. /* ena_com_delete_debug_area - Free the debug area resources.
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
* *
* Free the allocate debug area. * Free the allocated debug area.
*/ */
void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
/* ena_com_delete_host_info - Free the host info resources. /* ena_com_delete_host_info - Free the host info resources.
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
* *
* Free the allocate host info. * Free the allocated host info.
*/ */
void ena_com_delete_host_info(struct ena_com_dev *ena_dev); void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
...@@ -869,9 +874,9 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, ...@@ -869,9 +874,9 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
* @cmd_completion: command completion return value. * @cmd_completion: command completion return value.
* @cmd_comp_size: command completion size. * @cmd_comp_size: command completion size.
* Submit an admin command and then wait until the device will return a * Submit an admin command and then wait until the device returns a
* completion. * completion.
* The completion will be copyed into cmd_comp. * The completion will be copied into cmd_comp.
* *
* @return - 0 on success, negative value on failure. * @return - 0 on success, negative value on failure.
*/ */
...@@ -934,7 +939,7 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev * ...@@ -934,7 +939,7 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *
/* ena_com_config_dev_mode - Configure the placement policy of the device. /* ena_com_config_dev_mode - Configure the placement policy of the device.
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
* @llq_features: LLQ feature descriptor, retrieve via * @llq_features: LLQ feature descriptor, retrieve via
* ena_com_get_dev_attr_feat. * ena_com_get_dev_attr_feat.
* @ena_llq_config: The default driver LLQ parameters configurations * @ena_llq_config: The default driver LLQ parameters configurations
*/ */
int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
...@@ -960,7 +965,7 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d ...@@ -960,7 +965,7 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d
* @intr_reg: interrupt register to update. * @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs * @rx_delay_interval: Rx interval in usecs
* @tx_delay_interval: Tx interval in usecs * @tx_delay_interval: Tx interval in usecs
* @unmask: unask enable/disable * @unmask: unmask enable/disable
* *
* Prepare interrupt update register with the supplied parameters. * Prepare interrupt update register with the supplied parameters.
*/ */
......
...@@ -45,4 +45,4 @@ struct ena_common_mem_addr { ...@@ -45,4 +45,4 @@ struct ena_common_mem_addr {
u16 reserved16; u16 reserved16;
}; };
#endif /*_ENA_COMMON_H_ */ #endif /* _ENA_COMMON_H_ */
...@@ -519,7 +519,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, ...@@ -519,7 +519,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
struct ena_eth_io_rx_cdesc_base *cdesc = NULL; struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
u16 cdesc_idx = 0; u16 cdesc_idx = 0;
u16 nb_hw_desc; u16 nb_hw_desc;
u16 i; u16 i = 0;
WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
...@@ -538,13 +538,19 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, ...@@ -538,13 +538,19 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return -ENOSPC; return -ENOSPC;
} }
for (i = 0; i < nb_hw_desc; i++) { cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
ena_rx_ctx->pkt_offset = cdesc->offset;
do {
ena_buf[i].len = cdesc->length;
ena_buf[i].req_id = cdesc->req_id;
if (++i >= nb_hw_desc)
break;
cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i); cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
ena_buf->len = cdesc->length; } while (1);
ena_buf->req_id = cdesc->req_id;
ena_buf++;
}
/* Update SQ head ptr */ /* Update SQ head ptr */
io_sq->next_to_comp += nb_hw_desc; io_sq->next_to_comp += nb_hw_desc;
...@@ -578,10 +584,10 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, ...@@ -578,10 +584,10 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->length = ena_buf->len; desc->length = ena_buf->len;
desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK; desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; ENA_ETH_IO_RX_DESC_LAST_MASK |
desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
desc->req_id = req_id; desc->req_id = req_id;
......
...@@ -73,6 +73,7 @@ struct ena_com_rx_ctx { ...@@ -73,6 +73,7 @@ struct ena_com_rx_ctx {
u32 hash; u32 hash;
u16 descs; u16 descs;
int max_bufs; int max_bufs;
u8 pkt_offset;
}; };
int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
...@@ -95,7 +96,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, ...@@ -95,7 +96,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
writel(intr_reg->intr_control, io_cq->unmask_reg); writel(intr_reg->intr_control, io_cq->unmask_reg);
} }
static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq) static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
{ {
u16 tail, next_to_comp, cnt; u16 tail, next_to_comp, cnt;
...@@ -113,7 +114,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, ...@@ -113,7 +114,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
int temp; int temp;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
return ena_com_free_desc(io_sq) >= required_buffers; return ena_com_free_q_entries(io_sq) >= required_buffers;
/* This calculation doesn't need to be 100% accurate. So to reduce /* This calculation doesn't need to be 100% accurate. So to reduce
* the calculation overhead just Subtract 2 lines from the free descs * the calculation overhead just Subtract 2 lines from the free descs
...@@ -122,7 +123,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, ...@@ -122,7 +123,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
*/ */
temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
return ena_com_free_desc(io_sq) > temp; return ena_com_free_q_entries(io_sq) > temp;
} }
static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
......
...@@ -264,7 +264,9 @@ struct ena_eth_io_rx_cdesc_base { ...@@ -264,7 +264,9 @@ struct ena_eth_io_rx_cdesc_base {
u16 sub_qid; u16 sub_qid;
u16 reserved; u8 offset;
u8 reserved;
}; };
/* 8-word format */ /* 8-word format */
...@@ -412,4 +414,4 @@ struct ena_eth_io_numa_node_cfg_reg { ...@@ -412,4 +414,4 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 #define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) #define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
#endif /*_ENA_ETH_IO_H_ */ #endif /* _ENA_ETH_IO_H_ */
...@@ -206,7 +206,7 @@ int ena_get_sset_count(struct net_device *netdev, int sset) ...@@ -206,7 +206,7 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
if (sset != ETH_SS_STATS) if (sset != ETH_SS_STATS)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
} }
...@@ -260,7 +260,6 @@ static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data) ...@@ -260,7 +260,6 @@ static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i]; ena_stats = &ena_stats_global_strings[i];
memcpy(data, ena_stats->name, ETH_GSTRING_LEN); memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN; data += ETH_GSTRING_LEN;
} }
...@@ -307,10 +306,8 @@ static int ena_get_coalesce(struct net_device *net_dev, ...@@ -307,10 +306,8 @@ static int ena_get_coalesce(struct net_device *net_dev,
struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_adapter *adapter = netdev_priv(net_dev);
struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_com_dev *ena_dev = adapter->ena_dev;
if (!ena_com_interrupt_moderation_supported(ena_dev)) { if (!ena_com_interrupt_moderation_supported(ena_dev))
/* the devie doesn't support interrupt moderation */
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
coalesce->tx_coalesce_usecs = coalesce->tx_coalesce_usecs =
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
...@@ -326,7 +323,7 @@ static int ena_get_coalesce(struct net_device *net_dev, ...@@ -326,7 +323,7 @@ static int ena_get_coalesce(struct net_device *net_dev,
return 0; return 0;
} }
static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
{ {
unsigned int val; unsigned int val;
int i; int i;
...@@ -337,7 +334,7 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) ...@@ -337,7 +334,7 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
adapter->tx_ring[i].smoothed_interval = val; adapter->tx_ring[i].smoothed_interval = val;
} }
static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter) static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
{ {
unsigned int val; unsigned int val;
int i; int i;
...@@ -355,24 +352,22 @@ static int ena_set_coalesce(struct net_device *net_dev, ...@@ -355,24 +352,22 @@ static int ena_set_coalesce(struct net_device *net_dev,
struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc; int rc;
if (!ena_com_interrupt_moderation_supported(ena_dev)) { if (!ena_com_interrupt_moderation_supported(ena_dev))
/* the devie doesn't support interrupt moderation */
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev,
coalesce->tx_coalesce_usecs); coalesce->tx_coalesce_usecs);
if (rc) if (rc)
return rc; return rc;
ena_update_tx_rings_intr_moderation(adapter); ena_update_tx_rings_nonadaptive_intr_moderation(adapter);
rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
coalesce->rx_coalesce_usecs); coalesce->rx_coalesce_usecs);
if (rc) if (rc)
return rc; return rc;
ena_update_rx_rings_intr_moderation(adapter); ena_update_rx_rings_nonadaptive_intr_moderation(adapter);
if (coalesce->use_adaptive_rx_coalesce && if (coalesce->use_adaptive_rx_coalesce &&
!ena_com_get_adaptive_moderation_enabled(ena_dev)) !ena_com_get_adaptive_moderation_enabled(ena_dev))
......
...@@ -1435,6 +1435,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, ...@@ -1435,6 +1435,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
rx_info->page_offset, len, ENA_PAGE_SIZE); rx_info->page_offset, len, ENA_PAGE_SIZE);
/* The offset is non zero only for the first buffer */
rx_info->page_offset = 0;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx skb updated. len %d. data_len %d\n", "rx skb updated. len %d. data_len %d\n",
...@@ -1590,6 +1592,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ...@@ -1590,6 +1592,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
{ {
u16 next_to_clean = rx_ring->next_to_clean; u16 next_to_clean = rx_ring->next_to_clean;
struct ena_com_rx_ctx ena_rx_ctx; struct ena_com_rx_ctx ena_rx_ctx;
struct ena_rx_buffer *rx_info;
struct ena_adapter *adapter; struct ena_adapter *adapter;
u32 res_budget, work_done; u32 res_budget, work_done;
int rx_copybreak_pkt = 0; int rx_copybreak_pkt = 0;
...@@ -1614,6 +1617,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ...@@ -1614,6 +1617,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size; ena_rx_ctx.max_bufs = rx_ring->sgl_size;
ena_rx_ctx.descs = 0; ena_rx_ctx.descs = 0;
ena_rx_ctx.pkt_offset = 0;
rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
rx_ring->ena_com_io_sq, rx_ring->ena_com_io_sq,
&ena_rx_ctx); &ena_rx_ctx);
...@@ -1623,6 +1627,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ...@@ -1623,6 +1627,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
if (unlikely(ena_rx_ctx.descs == 0)) if (unlikely(ena_rx_ctx.descs == 0))
break; break;
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
rx_info->page_offset = ena_rx_ctx.pkt_offset;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
...@@ -1684,7 +1691,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ...@@ -1684,7 +1691,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
rx_ring->next_to_clean = next_to_clean; rx_ring->next_to_clean = next_to_clean;
refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
refill_threshold = refill_threshold =
min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
ENA_RX_REFILL_THRESH_PACKET); ENA_RX_REFILL_THRESH_PACKET);
...@@ -2235,7 +2242,7 @@ static int ena_rss_configure(struct ena_adapter *adapter) ...@@ -2235,7 +2242,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
rc = ena_rss_init_default(adapter); rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) { if (rc && (rc != -EOPNOTSUPP)) {
netif_err(adapter, ifup, adapter->netdev, netif_err(adapter, ifup, adapter->netdev,
"Failed to init RSS rc: %d\n", rc); "Failed to init RSS rc: %d\n", rc);
return rc; return rc;
} }
} }
...@@ -2308,7 +2315,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) ...@@ -2308,7 +2315,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
if (rc) { if (rc) {
netif_err(adapter, ifup, adapter->netdev, netif_err(adapter, ifup, adapter->netdev,
"Failed to create I/O TX queue num %d rc: %d\n", "Failed to create I/O TX queue num %d rc: %d\n",
qid, rc); qid, rc);
return rc; return rc;
} }
...@@ -2457,7 +2464,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter) ...@@ -2457,7 +2464,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter)
* ones due to past queue allocation failures. * ones due to past queue allocation failures.
*/ */
set_io_rings_size(adapter, adapter->requested_tx_ring_size, set_io_rings_size(adapter, adapter->requested_tx_ring_size,
adapter->requested_rx_ring_size); adapter->requested_rx_ring_size);
while (1) { while (1) {
if (ena_xdp_present(adapter)) { if (ena_xdp_present(adapter)) {
...@@ -2498,7 +2505,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter) ...@@ -2498,7 +2505,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter)
if (rc != -ENOMEM) { if (rc != -ENOMEM) {
netif_err(adapter, ifup, adapter->netdev, netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with error code %d\n", "Queue creation failed with error code %d\n",
rc); rc);
return rc; return rc;
} }
...@@ -2521,7 +2528,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter) ...@@ -2521,7 +2528,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter)
new_rx_ring_size = cur_rx_ring_size / 2; new_rx_ring_size = cur_rx_ring_size / 2;
if (new_tx_ring_size < ENA_MIN_RING_SIZE || if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
new_rx_ring_size < ENA_MIN_RING_SIZE) { new_rx_ring_size < ENA_MIN_RING_SIZE) {
netif_err(adapter, ifup, adapter->netdev, netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n", "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
ENA_MIN_RING_SIZE); ENA_MIN_RING_SIZE);
...@@ -3080,8 +3087,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -3080,8 +3087,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
return qid; return qid;
} }
static void ena_config_host_info(struct ena_com_dev *ena_dev, static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
struct pci_dev *pdev)
{ {
struct ena_admin_host_info *host_info; struct ena_admin_host_info *host_info;
int rc; int rc;
...@@ -3111,6 +3117,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, ...@@ -3111,6 +3117,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
host_info->num_cpus = num_online_cpus(); host_info->num_cpus = num_online_cpus();
host_info->driver_supported_features = host_info->driver_supported_features =
ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK; ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
rc = ena_com_set_host_attributes(ena_dev); rc = ena_com_set_host_attributes(ena_dev);
...@@ -3686,8 +3693,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter) ...@@ -3686,8 +3693,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
for (i = 0; i < adapter->num_io_queues; i++) { for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i]; rx_ring = &adapter->rx_ring[i];
refill_required = refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
ena_com_free_desc(rx_ring->ena_com_io_sq);
if (unlikely(refill_required == (rx_ring->ring_size - 1))) { if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
rx_ring->empty_rx_queue++; rx_ring->empty_rx_queue++;
...@@ -3825,11 +3831,11 @@ static void ena_timer_service(struct timer_list *t) ...@@ -3825,11 +3831,11 @@ static void ena_timer_service(struct timer_list *t)
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
} }
static int ena_calc_max_io_queue_num(struct pci_dev *pdev, static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
struct ena_com_dev *ena_dev, struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx) struct ena_com_dev_get_features_ctx *get_feat_ctx)
{ {
int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext = struct ena_admin_queue_ext_feature_fields *max_queue_ext =
...@@ -4115,8 +4121,8 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) ...@@ -4115,8 +4121,8 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/ */
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_llq_configurations llq_config; struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL; struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter; struct ena_adapter *adapter;
...@@ -4160,6 +4166,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4160,6 +4166,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_region; goto err_free_region;
} }
ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
ena_dev->dmadev = &pdev->dev; ena_dev->dmadev = &pdev->dev;
rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
...@@ -4183,7 +4191,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4183,7 +4191,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
calc_queue_ctx.get_feat_ctx = &get_feat_ctx; calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
calc_queue_ctx.pdev = pdev; calc_queue_ctx.pdev = pdev;
/* Initial Tx and RX interrupt delay. Assumes 1 usec granularity. /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity * Updated during device initialization with the real granularity
*/ */
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
...@@ -4227,12 +4235,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4227,12 +4235,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->num_io_queues = max_num_io_queues; adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues; adapter->max_num_io_queues = max_num_io_queues;
adapter->last_monitored_tx_qid = 0;
adapter->xdp_first_ring = 0; adapter->xdp_first_ring = 0;
adapter->xdp_num_queues = 0; adapter->xdp_num_queues = 0;
adapter->last_monitored_tx_qid = 0;
adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
adapter->wd_state = wd_state; adapter->wd_state = wd_state;
......
...@@ -50,12 +50,6 @@ ...@@ -50,12 +50,6 @@
#define DRV_MODULE_GEN_SUBMINOR 0 #define DRV_MODULE_GEN_SUBMINOR 0
#define DRV_MODULE_NAME "ena" #define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_GENERATION
#define DRV_MODULE_GENERATION \
__stringify(DRV_MODULE_GEN_MAJOR) "." \
__stringify(DRV_MODULE_GEN_MINOR) "." \
__stringify(DRV_MODULE_GEN_SUBMINOR) "K"
#endif
#define DEVICE_NAME "Elastic Network Adapter (ENA)" #define DEVICE_NAME "Elastic Network Adapter (ENA)"
...@@ -104,8 +98,6 @@ ...@@ -104,8 +98,6 @@
#define ENA_RX_RSS_TABLE_LOG_SIZE 7 #define ENA_RX_RSS_TABLE_LOG_SIZE 7
#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
#define ENA_HASH_KEY_SIZE 40
/* The number of tx packet completions that will be handled each NAPI poll /* The number of tx packet completions that will be handled each NAPI poll
* cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER. * cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER.
*/ */
...@@ -137,6 +129,8 @@ ...@@ -137,6 +129,8 @@
#define ENA_IO_IRQ_FIRST_IDX 1 #define ENA_IO_IRQ_FIRST_IDX 1
#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q)) #define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
#define ENA_ADMIN_POLL_DELAY_US 100
/* ENA device should send keep alive msg every 1 sec. /* ENA device should send keep alive msg every 1 sec.
* We wait for 6 sec just to be on the safe side. * We wait for 6 sec just to be on the safe side.
*/ */
......
...@@ -154,4 +154,4 @@ enum ena_regs_reset_reason_types { ...@@ -154,4 +154,4 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
#endif /*_ENA_REGS_H_ */ #endif /* _ENA_REGS_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment