Commit 56184e01 authored by Jesse Brandeburg's avatar Jesse Brandeburg Committed by Jeff Kirsher

iavf: rename most of i40e strings

This is the big rename patch, it takes most of the i40e_
and I40E_ strings and renames them to iavf_ and IAVF_.

Some of the adminq code, as well as most of the client
interface code used by RDMA is left unchanged in order
to indicate that the driver is talking to non-internal to
iavf code.
Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent ad64ed8b
...@@ -36,7 +36,7 @@ static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw) ...@@ -36,7 +36,7 @@ static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)
{ {
iavf_status ret_code; iavf_status ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring, i40e_mem_atq_ring,
(hw->aq.num_asq_entries * (hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)), sizeof(struct i40e_aq_desc)),
...@@ -44,11 +44,11 @@ static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw) ...@@ -44,11 +44,11 @@ static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)
if (ret_code) if (ret_code)
return ret_code; return ret_code;
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries * (hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details))); sizeof(struct i40e_asq_cmd_details)));
if (ret_code) { if (ret_code) {
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code; return ret_code;
} }
...@@ -63,7 +63,7 @@ static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw) ...@@ -63,7 +63,7 @@ static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw)
{ {
iavf_status ret_code; iavf_status ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring, i40e_mem_arq_ring,
(hw->aq.num_arq_entries * (hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)), sizeof(struct i40e_aq_desc)),
...@@ -81,7 +81,7 @@ static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw) ...@@ -81,7 +81,7 @@ static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw)
**/ **/
static void i40e_free_adminq_asq(struct iavf_hw *hw) static void i40e_free_adminq_asq(struct iavf_hw *hw)
{ {
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
} }
/** /**
...@@ -93,7 +93,7 @@ static void i40e_free_adminq_asq(struct iavf_hw *hw) ...@@ -93,7 +93,7 @@ static void i40e_free_adminq_asq(struct iavf_hw *hw)
**/ **/
static void i40e_free_adminq_arq(struct iavf_hw *hw) static void i40e_free_adminq_arq(struct iavf_hw *hw)
{ {
i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
} }
/** /**
...@@ -103,7 +103,7 @@ static void i40e_free_adminq_arq(struct iavf_hw *hw) ...@@ -103,7 +103,7 @@ static void i40e_free_adminq_arq(struct iavf_hw *hw)
static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw) static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
{ {
struct i40e_aq_desc *desc; struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi; struct iavf_dma_mem *bi;
iavf_status ret_code; iavf_status ret_code;
int i; int i;
...@@ -112,17 +112,17 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw) ...@@ -112,17 +112,17 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
*/ */
/* buffer_info structures do not need alignment */ /* buffer_info structures do not need alignment */
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
(hw->aq.num_arq_entries * (hw->aq.num_arq_entries *
sizeof(struct i40e_dma_mem))); sizeof(struct iavf_dma_mem)));
if (ret_code) if (ret_code)
goto alloc_arq_bufs; goto alloc_arq_bufs;
hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */ /* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) { for (i = 0; i < hw->aq.num_arq_entries; i++) {
bi = &hw->aq.arq.r.arq_bi[i]; bi = &hw->aq.arq.r.arq_bi[i];
ret_code = i40e_allocate_dma_mem(hw, bi, ret_code = iavf_allocate_dma_mem(hw, bi,
i40e_mem_arq_buf, i40e_mem_arq_buf,
hw->aq.arq_buf_size, hw->aq.arq_buf_size,
IAVF_ADMINQ_DESC_ALIGNMENT); IAVF_ADMINQ_DESC_ALIGNMENT);
...@@ -158,8 +158,8 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw) ...@@ -158,8 +158,8 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
/* don't try to free the one that failed... */ /* don't try to free the one that failed... */
i--; i--;
for (; i >= 0; i--) for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code; return ret_code;
} }
...@@ -170,22 +170,22 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw) ...@@ -170,22 +170,22 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
**/ **/
static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw) static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)
{ {
struct i40e_dma_mem *bi; struct iavf_dma_mem *bi;
iavf_status ret_code; iavf_status ret_code;
int i; int i;
/* No mapped memory needed yet, just the buffer info structures */ /* No mapped memory needed yet, just the buffer info structures */
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
(hw->aq.num_asq_entries * (hw->aq.num_asq_entries *
sizeof(struct i40e_dma_mem))); sizeof(struct iavf_dma_mem)));
if (ret_code) if (ret_code)
goto alloc_asq_bufs; goto alloc_asq_bufs;
hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */ /* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) { for (i = 0; i < hw->aq.num_asq_entries; i++) {
bi = &hw->aq.asq.r.asq_bi[i]; bi = &hw->aq.asq.r.asq_bi[i];
ret_code = i40e_allocate_dma_mem(hw, bi, ret_code = iavf_allocate_dma_mem(hw, bi,
i40e_mem_asq_buf, i40e_mem_asq_buf,
hw->aq.asq_buf_size, hw->aq.asq_buf_size,
IAVF_ADMINQ_DESC_ALIGNMENT); IAVF_ADMINQ_DESC_ALIGNMENT);
...@@ -199,8 +199,8 @@ static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw) ...@@ -199,8 +199,8 @@ static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)
/* don't try to free the one that failed... */ /* don't try to free the one that failed... */
i--; i--;
for (; i >= 0; i--) for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code; return ret_code;
} }
...@@ -215,13 +215,13 @@ static void i40e_free_arq_bufs(struct iavf_hw *hw) ...@@ -215,13 +215,13 @@ static void i40e_free_arq_bufs(struct iavf_hw *hw)
/* free descriptors */ /* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++) for (i = 0; i < hw->aq.num_arq_entries; i++)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
/* free the descriptor memory */ /* free the descriptor memory */
i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
/* free the dma header */ /* free the dma header */
i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
} }
/** /**
...@@ -235,16 +235,16 @@ static void i40e_free_asq_bufs(struct iavf_hw *hw) ...@@ -235,16 +235,16 @@ static void i40e_free_asq_bufs(struct iavf_hw *hw)
/* only unmap if the address is non-NULL */ /* only unmap if the address is non-NULL */
for (i = 0; i < hw->aq.num_asq_entries; i++) for (i = 0; i < hw->aq.num_asq_entries; i++)
if (hw->aq.asq.r.asq_bi[i].pa) if (hw->aq.asq.r.asq_bi[i].pa)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
/* free the buffer info list */ /* free the buffer info list */
i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
/* free the descriptor memory */ /* free the descriptor memory */
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
/* free the dma header */ /* free the dma header */
i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
} }
/** /**
...@@ -570,7 +570,7 @@ iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) ...@@ -570,7 +570,7 @@ iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
**/ **/
static u16 i40e_clean_asq(struct iavf_hw *hw) static u16 i40e_clean_asq(struct iavf_hw *hw)
{ {
struct i40e_adminq_ring *asq = &hw->aq.asq; struct iavf_adminq_ring *asq = &hw->aq.asq;
struct i40e_asq_cmd_details *details; struct i40e_asq_cmd_details *details;
u16 ntc = asq->next_to_clean; u16 ntc = asq->next_to_clean;
struct i40e_aq_desc desc_cb; struct i40e_aq_desc desc_cb;
...@@ -579,7 +579,7 @@ static u16 i40e_clean_asq(struct iavf_hw *hw) ...@@ -579,7 +579,7 @@ static u16 i40e_clean_asq(struct iavf_hw *hw)
desc = IAVF_ADMINQ_DESC(*asq, ntc); desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) { while (rd32(hw, hw->aq.asq.head) != ntc) {
iavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) { if (details->callback) {
...@@ -600,7 +600,7 @@ static u16 i40e_clean_asq(struct iavf_hw *hw) ...@@ -600,7 +600,7 @@ static u16 i40e_clean_asq(struct iavf_hw *hw)
asq->next_to_clean = ntc; asq->next_to_clean = ntc;
return I40E_DESC_UNUSED(asq); return IAVF_DESC_UNUSED(asq);
} }
/** /**
...@@ -634,18 +634,18 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, ...@@ -634,18 +634,18 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
u16 buff_size, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details) struct i40e_asq_cmd_details *cmd_details)
{ {
iavf_status status = 0; struct iavf_dma_mem *dma_buff = NULL;
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details; struct i40e_asq_cmd_details *details;
struct i40e_aq_desc *desc_on_ring; struct i40e_aq_desc *desc_on_ring;
bool cmd_completed = false; bool cmd_completed = false;
iavf_status status = 0;
u16 retval = 0; u16 retval = 0;
u32 val = 0; u32 val = 0;
mutex_lock(&hw->aq.asq_mutex); mutex_lock(&hw->aq.asq_mutex);
if (hw->aq.asq.count == 0) { if (hw->aq.asq.count == 0) {
iavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n"); "AQTX: Admin queue not initialized.\n");
status = I40E_ERR_QUEUE_EMPTY; status = I40E_ERR_QUEUE_EMPTY;
goto asq_send_command_error; goto asq_send_command_error;
...@@ -655,7 +655,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, ...@@ -655,7 +655,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
val = rd32(hw, hw->aq.asq.head); val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) { if (val >= hw->aq.num_asq_entries) {
iavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val); "AQTX: head overrun at %d\n", val);
status = I40E_ERR_QUEUE_EMPTY; status = I40E_ERR_QUEUE_EMPTY;
goto asq_send_command_error; goto asq_send_command_error;
...@@ -685,7 +685,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, ...@@ -685,7 +685,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
if (buff_size > hw->aq.asq_buf_size) { if (buff_size > hw->aq.asq_buf_size) {
iavf_debug(hw, iavf_debug(hw,
I40E_DEBUG_AQ_MESSAGE, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Invalid buffer size: %d.\n", "AQTX: Invalid buffer size: %d.\n",
buff_size); buff_size);
status = I40E_ERR_INVALID_SIZE; status = I40E_ERR_INVALID_SIZE;
...@@ -694,7 +694,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, ...@@ -694,7 +694,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
if (details->postpone && !details->async) { if (details->postpone && !details->async) {
iavf_debug(hw, iavf_debug(hw,
I40E_DEBUG_AQ_MESSAGE, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Async flag not set along with postpone flag"); "AQTX: Async flag not set along with postpone flag");
status = I40E_ERR_PARAM; status = I40E_ERR_PARAM;
goto asq_send_command_error; goto asq_send_command_error;
...@@ -709,7 +709,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, ...@@ -709,7 +709,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
*/ */
if (i40e_clean_asq(hw) == 0) { if (i40e_clean_asq(hw) == 0) {
iavf_debug(hw, iavf_debug(hw,
I40E_DEBUG_AQ_MESSAGE, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Error queue is full.\n"); "AQTX: Error queue is full.\n");
status = I40E_ERR_ADMIN_QUEUE_FULL; status = I40E_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error; goto asq_send_command_error;
...@@ -738,8 +738,8 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, ...@@ -738,8 +738,8 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
} }
/* bump the tail */ /* bump the tail */
iavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
buff, buff_size); buff, buff_size);
(hw->aq.asq.next_to_use)++; (hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count) if (hw->aq.asq.next_to_use == hw->aq.asq.count)
...@@ -772,7 +772,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, ...@@ -772,7 +772,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
retval = le16_to_cpu(desc->retval); retval = le16_to_cpu(desc->retval);
if (retval != 0) { if (retval != 0) {
iavf_debug(hw, iavf_debug(hw,
I40E_DEBUG_AQ_MESSAGE, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Command completed with error 0x%X.\n", "AQTX: Command completed with error 0x%X.\n",
retval); retval);
...@@ -789,9 +789,9 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, ...@@ -789,9 +789,9 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
} }
iavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n"); "AQTX: desc and buffer writeback:\n");
iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
/* save writeback aq if requested */ /* save writeback aq if requested */
if (details->wb_desc) if (details->wb_desc)
...@@ -801,11 +801,11 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, ...@@ -801,11 +801,11 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
if ((!cmd_completed) && if ((!cmd_completed) &&
(!details->async && !details->postpone)) { (!details->async && !details->postpone)) {
if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
iavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n"); "AQTX: AQ Critical error.\n");
status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
} else { } else {
iavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n"); "AQTX: Writeback timeout.\n");
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
} }
...@@ -848,7 +848,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw, ...@@ -848,7 +848,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
u16 ntc = hw->aq.arq.next_to_clean; u16 ntc = hw->aq.arq.next_to_clean;
struct i40e_aq_desc *desc; struct i40e_aq_desc *desc;
iavf_status ret_code = 0; iavf_status ret_code = 0;
struct i40e_dma_mem *bi; struct iavf_dma_mem *bi;
u16 desc_idx; u16 desc_idx;
u16 datalen; u16 datalen;
u16 flags; u16 flags;
...@@ -861,7 +861,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw, ...@@ -861,7 +861,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
mutex_lock(&hw->aq.arq_mutex); mutex_lock(&hw->aq.arq_mutex);
if (hw->aq.arq.count == 0) { if (hw->aq.arq.count == 0) {
iavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Admin queue not initialized.\n"); "AQRX: Admin queue not initialized.\n");
ret_code = I40E_ERR_QUEUE_EMPTY; ret_code = I40E_ERR_QUEUE_EMPTY;
goto clean_arq_element_err; goto clean_arq_element_err;
...@@ -885,7 +885,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw, ...@@ -885,7 +885,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
if (flags & I40E_AQ_FLAG_ERR) { if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
iavf_debug(hw, iavf_debug(hw,
I40E_DEBUG_AQ_MESSAGE, IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n", "AQRX: Event received with error 0x%X.\n",
hw->aq.arq_last_status); hw->aq.arq_last_status);
} }
...@@ -897,8 +897,8 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw, ...@@ -897,8 +897,8 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_len); e->msg_len);
iavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
hw->aq.arq_buf_size); hw->aq.arq_buf_size);
/* Restore the original datalen and buffer address in the desc, /* Restore the original datalen and buffer address in the desc,
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ADMINQ_H_ #ifndef _IAVF_ADMINQ_H_
#define _I40E_ADMINQ_H_ #define _IAVF_ADMINQ_H_
#include "i40e_osdep.h" #include "i40e_osdep.h"
#include "i40e_status.h" #include "i40e_status.h"
...@@ -13,14 +13,14 @@ ...@@ -13,14 +13,14 @@
#define IAVF_ADMINQ_DESC_ALIGNMENT 4096 #define IAVF_ADMINQ_DESC_ALIGNMENT 4096
struct i40e_adminq_ring { struct iavf_adminq_ring {
struct i40e_virt_mem dma_head; /* space for dma structures */ struct iavf_virt_mem dma_head; /* space for dma structures */
struct i40e_dma_mem desc_buf; /* descriptor ring memory */ struct iavf_dma_mem desc_buf; /* descriptor ring memory */
struct i40e_virt_mem cmd_buf; /* command buffer memory */ struct iavf_virt_mem cmd_buf; /* command buffer memory */
union { union {
struct i40e_dma_mem *asq_bi; struct iavf_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi; struct iavf_dma_mem *arq_bi;
} r; } r;
u16 count; /* Number of descriptors */ u16 count; /* Number of descriptors */
...@@ -61,9 +61,9 @@ struct i40e_arq_event_info { ...@@ -61,9 +61,9 @@ struct i40e_arq_event_info {
}; };
/* Admin Queue information */ /* Admin Queue information */
struct i40e_adminq_info { struct iavf_adminq_info {
struct i40e_adminq_ring arq; /* receive queue */ struct iavf_adminq_ring arq; /* receive queue */
struct i40e_adminq_ring asq; /* send queue */ struct iavf_adminq_ring asq; /* send queue */
u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
u16 num_arq_entries; /* receive queue depth */ u16 num_arq_entries; /* receive queue depth */
u16 num_asq_entries; /* send queue depth */ u16 num_asq_entries; /* send queue depth */
...@@ -132,4 +132,4 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) ...@@ -132,4 +132,4 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode);
#endif /* _I40E_ADMINQ_H_ */ #endif /* _IAVF_ADMINQ_H_ */
...@@ -5,7 +5,9 @@ ...@@ -5,7 +5,9 @@
#define _I40E_ADMINQ_CMD_H_ #define _I40E_ADMINQ_CMD_H_
/* This header file defines the i40e Admin Queue commands and is shared between /* This header file defines the i40e Admin Queue commands and is shared between
* i40e Firmware and Software. * i40e Firmware and Software. Do not change the names in this file to IAVF
* because this file should be diff-able against the i40e version, even
* though many parts have been removed in this VF version.
* *
* This file needs to comply with the Linux Kernel coding style. * This file needs to comply with the Linux Kernel coding style.
*/ */
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ALLOC_H_ #ifndef _IAVF_ALLOC_H_
#define _I40E_ALLOC_H_ #define _IAVF_ALLOC_H_
struct iavf_hw; struct iavf_hw;
/* Memory allocation types */ /* Memory allocation types */
enum i40e_memory_type { enum iavf_memory_type {
i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ iavf_mem_arq_buf = 0, /* ARQ indirect command buffer */
i40e_mem_asq_buf = 1, iavf_mem_asq_buf = 1,
i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ iavf_mem_atq_buf = 2, /* ATQ indirect command buffer */
i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ iavf_mem_arq_ring = 3, /* ARQ descriptor ring */
i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ iavf_mem_atq_ring = 4, /* ATQ descriptor ring */
i40e_mem_pd = 5, /* Page Descriptor */ iavf_mem_pd = 5, /* Page Descriptor */
i40e_mem_bp = 6, /* Backing Page - 4KB */ iavf_mem_bp = 6, /* Backing Page - 4KB */
i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ iavf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
i40e_mem_reserved iavf_mem_reserved
}; };
/* prototype for functions used for dynamic memory allocation */ /* prototype for functions used for dynamic memory allocation */
iavf_status i40e_allocate_dma_mem(struct iavf_hw *hw, struct i40e_dma_mem *mem, iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem,
enum i40e_memory_type type, enum iavf_memory_type type,
u64 size, u32 alignment); u64 size, u32 alignment);
iavf_status i40e_free_dma_mem(struct iavf_hw *hw, struct i40e_dma_mem *mem); iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem);
iavf_status i40e_allocate_virt_mem(struct iavf_hw *hw, iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
struct i40e_virt_mem *mem, u32 size); struct iavf_virt_mem *mem, u32 size);
iavf_status i40e_free_virt_mem(struct iavf_hw *hw, struct i40e_virt_mem *mem); iavf_status iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem);
#endif /* _I40E_ALLOC_H_ */ #endif /* _IAVF_ALLOC_H_ */
...@@ -7,28 +7,28 @@ ...@@ -7,28 +7,28 @@
#include <linux/avf/virtchnl.h> #include <linux/avf/virtchnl.h>
/** /**
* i40e_set_mac_type - Sets MAC type * iavf_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* *
* This function sets the mac type of the adapter based on the * This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure. * vendor ID and device ID stored in the hw structure.
**/ **/
iavf_status i40e_set_mac_type(struct iavf_hw *hw) iavf_status iavf_set_mac_type(struct iavf_hw *hw)
{ {
iavf_status status = 0; iavf_status status = 0;
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) { switch (hw->device_id) {
case IAVF_DEV_ID_X722_VF: case IAVF_DEV_ID_X722_VF:
hw->mac.type = I40E_MAC_X722_VF; hw->mac.type = IAVF_MAC_X722_VF;
break; break;
case IAVF_DEV_ID_VF: case IAVF_DEV_ID_VF:
case IAVF_DEV_ID_VF_HV: case IAVF_DEV_ID_VF_HV:
case IAVF_DEV_ID_ADAPTIVE_VF: case IAVF_DEV_ID_ADAPTIVE_VF:
hw->mac.type = I40E_MAC_VF; hw->mac.type = IAVF_MAC_VF;
break; break;
default: default:
hw->mac.type = I40E_MAC_GENERIC; hw->mac.type = IAVF_MAC_GENERIC;
break; break;
} }
} else { } else {
...@@ -344,7 +344,7 @@ iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) ...@@ -344,7 +344,7 @@ iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
} }
/** /**
* i40e_aq_get_set_rss_lut * iavf_aq_get_set_rss_lut
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @vsi_id: vsi fw index * @vsi_id: vsi fw index
* @pf_lut: for PF table set true, for VSI table set false * @pf_lut: for PF table set true, for VSI table set false
...@@ -354,7 +354,7 @@ iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) ...@@ -354,7 +354,7 @@ iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
* *
* Internal function to get or set RSS look up table * Internal function to get or set RSS look up table
**/ **/
static iavf_status i40e_aq_get_set_rss_lut(struct iavf_hw *hw, static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
u16 vsi_id, bool pf_lut, u16 vsi_id, bool pf_lut,
u8 *lut, u16 lut_size, u8 *lut, u16 lut_size,
bool set) bool set)
...@@ -410,7 +410,7 @@ static iavf_status i40e_aq_get_set_rss_lut(struct iavf_hw *hw, ...@@ -410,7 +410,7 @@ static iavf_status i40e_aq_get_set_rss_lut(struct iavf_hw *hw,
iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id, iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size) bool pf_lut, u8 *lut, u16 lut_size)
{ {
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
false); false);
} }
...@@ -427,11 +427,11 @@ iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id, ...@@ -427,11 +427,11 @@ iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size) bool pf_lut, u8 *lut, u16 lut_size)
{ {
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
} }
/** /**
* i40e_aq_get_set_rss_key * iavf_aq_get_set_rss_key
* @hw: pointer to the hw struct * @hw: pointer to the hw struct
* @vsi_id: vsi fw index * @vsi_id: vsi fw index
* @key: pointer to key info struct * @key: pointer to key info struct
...@@ -440,7 +440,7 @@ iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, ...@@ -440,7 +440,7 @@ iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
* get the RSS key per VSI * get the RSS key per VSI
**/ **/
static static
iavf_status i40e_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key, struct i40e_aqc_get_set_rss_key_data *key,
bool set) bool set)
{ {
...@@ -482,7 +482,7 @@ iavf_status i40e_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, ...@@ -482,7 +482,7 @@ iavf_status i40e_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id, iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key) struct i40e_aqc_get_set_rss_key_data *key)
{ {
return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); return iavf_aq_get_set_rss_key(hw, vsi_id, key, false);
} }
/** /**
...@@ -496,7 +496,7 @@ iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id, ...@@ -496,7 +496,7 @@ iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,
iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key) struct i40e_aqc_get_set_rss_key_data *key)
{ {
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
} }
/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the /* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
...@@ -518,350 +518,350 @@ iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, ...@@ -518,350 +518,350 @@ iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
* ELSE IF iavf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP * ELSE IF iavf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
* Use the rest of the fields to look at the tunnels, inner protocols, etc * Use the rest of the fields to look at the tunnels, inner protocols, etc
* ELSE * ELSE
* Use the enum i40e_rx_l2_ptype to decode the packet type * Use the enum iavf_rx_l2_ptype to decode the packet type
* ENDIF * ENDIF
*/ */
/* macro to make the table lines short */ /* macro to make the table lines short */
#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ #define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
{ PTYPE, \ { PTYPE, \
1, \ 1, \
I40E_RX_PTYPE_OUTER_##OUTER_IP, \ IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
I40E_RX_PTYPE_##OUTER_FRAG, \ IAVF_RX_PTYPE_##OUTER_FRAG, \
I40E_RX_PTYPE_TUNNEL_##T, \ IAVF_RX_PTYPE_TUNNEL_##T, \
I40E_RX_PTYPE_TUNNEL_END_##TE, \ IAVF_RX_PTYPE_TUNNEL_END_##TE, \
I40E_RX_PTYPE_##TEF, \ IAVF_RX_PTYPE_##TEF, \
I40E_RX_PTYPE_INNER_PROT_##I, \ IAVF_RX_PTYPE_INNER_PROT_##I, \
I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ #define IAVF_PTT_UNUSED_ENTRY(PTYPE) \
{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
/* shorter macros makes the table fit but are terse */ /* shorter macros makes the table fit but are terse */
#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG #define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG
#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG #define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG
#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC #define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
/* Lookup table mapping the HW PTYPE to the bit field for decoding */ /* Lookup table mapping the HW PTYPE to the bit field for decoding */
struct i40e_rx_ptype_decoded iavf_ptype_lookup[] = { struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
/* L2 Packet types */ /* L2 Packet types */
I40E_PTT_UNUSED_ENTRY(0), IAVF_PTT_UNUSED_ENTRY(0),
I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
I40E_PTT_UNUSED_ENTRY(4), IAVF_PTT_UNUSED_ENTRY(4),
I40E_PTT_UNUSED_ENTRY(5), IAVF_PTT_UNUSED_ENTRY(5),
I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
I40E_PTT_UNUSED_ENTRY(8), IAVF_PTT_UNUSED_ENTRY(8),
I40E_PTT_UNUSED_ENTRY(9), IAVF_PTT_UNUSED_ENTRY(9),
I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
/* Non Tunneled IPv4 */ /* Non Tunneled IPv4 */
I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(25), IAVF_PTT_UNUSED_ENTRY(25),
I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
/* IPv4 --> IPv4 */ /* IPv4 --> IPv4 */
I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(32), IAVF_PTT_UNUSED_ENTRY(32),
I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
/* IPv4 --> IPv6 */ /* IPv4 --> IPv6 */
I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(39), IAVF_PTT_UNUSED_ENTRY(39),
I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT */ /* IPv4 --> GRE/NAT */
I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
/* IPv4 --> GRE/NAT --> IPv4 */ /* IPv4 --> GRE/NAT --> IPv4 */
I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(47), IAVF_PTT_UNUSED_ENTRY(47),
I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT --> IPv6 */ /* IPv4 --> GRE/NAT --> IPv6 */
I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(54), IAVF_PTT_UNUSED_ENTRY(54),
I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT --> MAC */ /* IPv4 --> GRE/NAT --> MAC */
I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
/* IPv4 --> GRE/NAT --> MAC --> IPv4 */ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(62), IAVF_PTT_UNUSED_ENTRY(62),
I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT -> MAC --> IPv6 */ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(69), IAVF_PTT_UNUSED_ENTRY(69),
I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT --> MAC/VLAN */ /* IPv4 --> GRE/NAT --> MAC/VLAN */
I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(77), IAVF_PTT_UNUSED_ENTRY(77),
I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(84), IAVF_PTT_UNUSED_ENTRY(84),
I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
/* Non Tunneled IPv6 */ /* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
I40E_PTT_UNUSED_ENTRY(91), IAVF_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
/* IPv6 --> IPv4 */ /* IPv6 --> IPv4 */
I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(98), IAVF_PTT_UNUSED_ENTRY(98),
I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> IPv6 */ /* IPv6 --> IPv6 */
I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(105), IAVF_PTT_UNUSED_ENTRY(105),
I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT */ /* IPv6 --> GRE/NAT */
I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
/* IPv6 --> GRE/NAT -> IPv4 */ /* IPv6 --> GRE/NAT -> IPv4 */
I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(113), IAVF_PTT_UNUSED_ENTRY(113),
I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> IPv6 */ /* IPv6 --> GRE/NAT -> IPv6 */
I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(120), IAVF_PTT_UNUSED_ENTRY(120),
I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC */ /* IPv6 --> GRE/NAT -> MAC */
I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
/* IPv6 --> GRE/NAT -> MAC -> IPv4 */ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(128), IAVF_PTT_UNUSED_ENTRY(128),
I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC -> IPv6 */ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(135), IAVF_PTT_UNUSED_ENTRY(135),
I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC/VLAN */ /* IPv6 --> GRE/NAT -> MAC/VLAN */
I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(143), IAVF_PTT_UNUSED_ENTRY(143),
I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(150), IAVF_PTT_UNUSED_ENTRY(150),
I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
/* unused entries */ /* unused entries */
I40E_PTT_UNUSED_ENTRY(154), IAVF_PTT_UNUSED_ENTRY(154),
I40E_PTT_UNUSED_ENTRY(155), IAVF_PTT_UNUSED_ENTRY(155),
I40E_PTT_UNUSED_ENTRY(156), IAVF_PTT_UNUSED_ENTRY(156),
I40E_PTT_UNUSED_ENTRY(157), IAVF_PTT_UNUSED_ENTRY(157),
I40E_PTT_UNUSED_ENTRY(158), IAVF_PTT_UNUSED_ENTRY(158),
I40E_PTT_UNUSED_ENTRY(159), IAVF_PTT_UNUSED_ENTRY(159),
I40E_PTT_UNUSED_ENTRY(160), IAVF_PTT_UNUSED_ENTRY(160),
I40E_PTT_UNUSED_ENTRY(161), IAVF_PTT_UNUSED_ENTRY(161),
I40E_PTT_UNUSED_ENTRY(162), IAVF_PTT_UNUSED_ENTRY(162),
I40E_PTT_UNUSED_ENTRY(163), IAVF_PTT_UNUSED_ENTRY(163),
I40E_PTT_UNUSED_ENTRY(164), IAVF_PTT_UNUSED_ENTRY(164),
I40E_PTT_UNUSED_ENTRY(165), IAVF_PTT_UNUSED_ENTRY(165),
I40E_PTT_UNUSED_ENTRY(166), IAVF_PTT_UNUSED_ENTRY(166),
I40E_PTT_UNUSED_ENTRY(167), IAVF_PTT_UNUSED_ENTRY(167),
I40E_PTT_UNUSED_ENTRY(168), IAVF_PTT_UNUSED_ENTRY(168),
I40E_PTT_UNUSED_ENTRY(169), IAVF_PTT_UNUSED_ENTRY(169),
I40E_PTT_UNUSED_ENTRY(170), IAVF_PTT_UNUSED_ENTRY(170),
I40E_PTT_UNUSED_ENTRY(171), IAVF_PTT_UNUSED_ENTRY(171),
I40E_PTT_UNUSED_ENTRY(172), IAVF_PTT_UNUSED_ENTRY(172),
I40E_PTT_UNUSED_ENTRY(173), IAVF_PTT_UNUSED_ENTRY(173),
I40E_PTT_UNUSED_ENTRY(174), IAVF_PTT_UNUSED_ENTRY(174),
I40E_PTT_UNUSED_ENTRY(175), IAVF_PTT_UNUSED_ENTRY(175),
I40E_PTT_UNUSED_ENTRY(176), IAVF_PTT_UNUSED_ENTRY(176),
I40E_PTT_UNUSED_ENTRY(177), IAVF_PTT_UNUSED_ENTRY(177),
I40E_PTT_UNUSED_ENTRY(178), IAVF_PTT_UNUSED_ENTRY(178),
I40E_PTT_UNUSED_ENTRY(179), IAVF_PTT_UNUSED_ENTRY(179),
I40E_PTT_UNUSED_ENTRY(180), IAVF_PTT_UNUSED_ENTRY(180),
I40E_PTT_UNUSED_ENTRY(181), IAVF_PTT_UNUSED_ENTRY(181),
I40E_PTT_UNUSED_ENTRY(182), IAVF_PTT_UNUSED_ENTRY(182),
I40E_PTT_UNUSED_ENTRY(183), IAVF_PTT_UNUSED_ENTRY(183),
I40E_PTT_UNUSED_ENTRY(184), IAVF_PTT_UNUSED_ENTRY(184),
I40E_PTT_UNUSED_ENTRY(185), IAVF_PTT_UNUSED_ENTRY(185),
I40E_PTT_UNUSED_ENTRY(186), IAVF_PTT_UNUSED_ENTRY(186),
I40E_PTT_UNUSED_ENTRY(187), IAVF_PTT_UNUSED_ENTRY(187),
I40E_PTT_UNUSED_ENTRY(188), IAVF_PTT_UNUSED_ENTRY(188),
I40E_PTT_UNUSED_ENTRY(189), IAVF_PTT_UNUSED_ENTRY(189),
I40E_PTT_UNUSED_ENTRY(190), IAVF_PTT_UNUSED_ENTRY(190),
I40E_PTT_UNUSED_ENTRY(191), IAVF_PTT_UNUSED_ENTRY(191),
I40E_PTT_UNUSED_ENTRY(192), IAVF_PTT_UNUSED_ENTRY(192),
I40E_PTT_UNUSED_ENTRY(193), IAVF_PTT_UNUSED_ENTRY(193),
I40E_PTT_UNUSED_ENTRY(194), IAVF_PTT_UNUSED_ENTRY(194),
I40E_PTT_UNUSED_ENTRY(195), IAVF_PTT_UNUSED_ENTRY(195),
I40E_PTT_UNUSED_ENTRY(196), IAVF_PTT_UNUSED_ENTRY(196),
I40E_PTT_UNUSED_ENTRY(197), IAVF_PTT_UNUSED_ENTRY(197),
I40E_PTT_UNUSED_ENTRY(198), IAVF_PTT_UNUSED_ENTRY(198),
I40E_PTT_UNUSED_ENTRY(199), IAVF_PTT_UNUSED_ENTRY(199),
I40E_PTT_UNUSED_ENTRY(200), IAVF_PTT_UNUSED_ENTRY(200),
I40E_PTT_UNUSED_ENTRY(201), IAVF_PTT_UNUSED_ENTRY(201),
I40E_PTT_UNUSED_ENTRY(202), IAVF_PTT_UNUSED_ENTRY(202),
I40E_PTT_UNUSED_ENTRY(203), IAVF_PTT_UNUSED_ENTRY(203),
I40E_PTT_UNUSED_ENTRY(204), IAVF_PTT_UNUSED_ENTRY(204),
I40E_PTT_UNUSED_ENTRY(205), IAVF_PTT_UNUSED_ENTRY(205),
I40E_PTT_UNUSED_ENTRY(206), IAVF_PTT_UNUSED_ENTRY(206),
I40E_PTT_UNUSED_ENTRY(207), IAVF_PTT_UNUSED_ENTRY(207),
I40E_PTT_UNUSED_ENTRY(208), IAVF_PTT_UNUSED_ENTRY(208),
I40E_PTT_UNUSED_ENTRY(209), IAVF_PTT_UNUSED_ENTRY(209),
I40E_PTT_UNUSED_ENTRY(210), IAVF_PTT_UNUSED_ENTRY(210),
I40E_PTT_UNUSED_ENTRY(211), IAVF_PTT_UNUSED_ENTRY(211),
I40E_PTT_UNUSED_ENTRY(212), IAVF_PTT_UNUSED_ENTRY(212),
I40E_PTT_UNUSED_ENTRY(213), IAVF_PTT_UNUSED_ENTRY(213),
I40E_PTT_UNUSED_ENTRY(214), IAVF_PTT_UNUSED_ENTRY(214),
I40E_PTT_UNUSED_ENTRY(215), IAVF_PTT_UNUSED_ENTRY(215),
I40E_PTT_UNUSED_ENTRY(216), IAVF_PTT_UNUSED_ENTRY(216),
I40E_PTT_UNUSED_ENTRY(217), IAVF_PTT_UNUSED_ENTRY(217),
I40E_PTT_UNUSED_ENTRY(218), IAVF_PTT_UNUSED_ENTRY(218),
I40E_PTT_UNUSED_ENTRY(219), IAVF_PTT_UNUSED_ENTRY(219),
I40E_PTT_UNUSED_ENTRY(220), IAVF_PTT_UNUSED_ENTRY(220),
I40E_PTT_UNUSED_ENTRY(221), IAVF_PTT_UNUSED_ENTRY(221),
I40E_PTT_UNUSED_ENTRY(222), IAVF_PTT_UNUSED_ENTRY(222),
I40E_PTT_UNUSED_ENTRY(223), IAVF_PTT_UNUSED_ENTRY(223),
I40E_PTT_UNUSED_ENTRY(224), IAVF_PTT_UNUSED_ENTRY(224),
I40E_PTT_UNUSED_ENTRY(225), IAVF_PTT_UNUSED_ENTRY(225),
I40E_PTT_UNUSED_ENTRY(226), IAVF_PTT_UNUSED_ENTRY(226),
I40E_PTT_UNUSED_ENTRY(227), IAVF_PTT_UNUSED_ENTRY(227),
I40E_PTT_UNUSED_ENTRY(228), IAVF_PTT_UNUSED_ENTRY(228),
I40E_PTT_UNUSED_ENTRY(229), IAVF_PTT_UNUSED_ENTRY(229),
I40E_PTT_UNUSED_ENTRY(230), IAVF_PTT_UNUSED_ENTRY(230),
I40E_PTT_UNUSED_ENTRY(231), IAVF_PTT_UNUSED_ENTRY(231),
I40E_PTT_UNUSED_ENTRY(232), IAVF_PTT_UNUSED_ENTRY(232),
I40E_PTT_UNUSED_ENTRY(233), IAVF_PTT_UNUSED_ENTRY(233),
I40E_PTT_UNUSED_ENTRY(234), IAVF_PTT_UNUSED_ENTRY(234),
I40E_PTT_UNUSED_ENTRY(235), IAVF_PTT_UNUSED_ENTRY(235),
I40E_PTT_UNUSED_ENTRY(236), IAVF_PTT_UNUSED_ENTRY(236),
I40E_PTT_UNUSED_ENTRY(237), IAVF_PTT_UNUSED_ENTRY(237),
I40E_PTT_UNUSED_ENTRY(238), IAVF_PTT_UNUSED_ENTRY(238),
I40E_PTT_UNUSED_ENTRY(239), IAVF_PTT_UNUSED_ENTRY(239),
I40E_PTT_UNUSED_ENTRY(240), IAVF_PTT_UNUSED_ENTRY(240),
I40E_PTT_UNUSED_ENTRY(241), IAVF_PTT_UNUSED_ENTRY(241),
I40E_PTT_UNUSED_ENTRY(242), IAVF_PTT_UNUSED_ENTRY(242),
I40E_PTT_UNUSED_ENTRY(243), IAVF_PTT_UNUSED_ENTRY(243),
I40E_PTT_UNUSED_ENTRY(244), IAVF_PTT_UNUSED_ENTRY(244),
I40E_PTT_UNUSED_ENTRY(245), IAVF_PTT_UNUSED_ENTRY(245),
I40E_PTT_UNUSED_ENTRY(246), IAVF_PTT_UNUSED_ENTRY(246),
I40E_PTT_UNUSED_ENTRY(247), IAVF_PTT_UNUSED_ENTRY(247),
I40E_PTT_UNUSED_ENTRY(248), IAVF_PTT_UNUSED_ENTRY(248),
I40E_PTT_UNUSED_ENTRY(249), IAVF_PTT_UNUSED_ENTRY(249),
I40E_PTT_UNUSED_ENTRY(250), IAVF_PTT_UNUSED_ENTRY(250),
I40E_PTT_UNUSED_ENTRY(251), IAVF_PTT_UNUSED_ENTRY(251),
I40E_PTT_UNUSED_ENTRY(252), IAVF_PTT_UNUSED_ENTRY(252),
I40E_PTT_UNUSED_ENTRY(253), IAVF_PTT_UNUSED_ENTRY(253),
I40E_PTT_UNUSED_ENTRY(254), IAVF_PTT_UNUSED_ENTRY(254),
I40E_PTT_UNUSED_ENTRY(255) IAVF_PTT_UNUSED_ENTRY(255)
}; };
/** /**
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_OSDEP_H_ #ifndef _IAVF_OSDEP_H_
#define _I40E_OSDEP_H_ #define _IAVF_OSDEP_H_
#include <linux/types.h> #include <linux/types.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
...@@ -27,26 +27,26 @@ ...@@ -27,26 +27,26 @@
#define iavf_flush(a) readl((a)->hw_addr + IAVF_VFGEN_RSTAT) #define iavf_flush(a) readl((a)->hw_addr + IAVF_VFGEN_RSTAT)
/* memory allocation tracking */ /* memory allocation tracking */
struct i40e_dma_mem { struct iavf_dma_mem {
void *va; void *va;
dma_addr_t pa; dma_addr_t pa;
u32 size; u32 size;
}; };
#define i40e_allocate_dma_mem(h, m, unused, s, a) \ #define iavf_allocate_dma_mem(h, m, unused, s, a) \
iavf_allocate_dma_mem_d(h, m, s, a) iavf_allocate_dma_mem_d(h, m, s, a)
#define i40e_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m) #define iavf_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m)
struct i40e_virt_mem { struct iavf_virt_mem {
void *va; void *va;
u32 size; u32 size;
}; };
#define i40e_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s) #define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s)
#define i40e_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m) #define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m)
#define iavf_debug(h, m, s, ...) iavf_debug_d(h, m, s, ##__VA_ARGS__) #define iavf_debug(h, m, s, ...) iavf_debug_d(h, m, s, ##__VA_ARGS__)
extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
__attribute__ ((format(gnu_printf, 3, 4))); __attribute__ ((format(gnu_printf, 3, 4)));
typedef enum i40e_status_code iavf_status; typedef enum iavf_status_code iavf_status;
#endif /* _I40E_OSDEP_H_ */ #endif /* _IAVF_OSDEP_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_PROTOTYPE_H_ #ifndef _IAVF_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_ #define _IAVF_PROTOTYPE_H_
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_alloc.h" #include "i40e_alloc.h"
...@@ -48,16 +48,15 @@ iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid, ...@@ -48,16 +48,15 @@ iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid,
iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
struct i40e_aqc_get_set_rss_key_data *key); struct i40e_aqc_get_set_rss_key_data *key);
iavf_status i40e_set_mac_type(struct iavf_hw *hw); iavf_status iavf_set_mac_type(struct iavf_hw *hw);
extern struct i40e_rx_ptype_decoded iavf_ptype_lookup[]; extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{ {
return iavf_ptype_lookup[ptype]; return iavf_ptype_lookup[ptype];
} }
/* i40e_common for VF drivers*/
void iavf_vf_parse_hw_config(struct iavf_hw *hw, void iavf_vf_parse_hw_config(struct iavf_hw *hw,
struct virtchnl_vf_resource *msg); struct virtchnl_vf_resource *msg);
iavf_status iavf_vf_reset(struct iavf_hw *hw); iavf_status iavf_vf_reset(struct iavf_hw *hw);
...@@ -65,4 +64,4 @@ iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, ...@@ -65,4 +64,4 @@ iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
enum virtchnl_ops v_opcode, enum virtchnl_ops v_opcode,
iavf_status v_retval, u8 *msg, u16 msglen, iavf_status v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details); struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */ #endif /* _IAVF_PROTOTYPE_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_STATUS_H_ #ifndef _IAVF_STATUS_H_
#define _I40E_STATUS_H_ #define _IAVF_STATUS_H_
/* Error Codes */ /* Error Codes */
enum i40e_status_code { enum iavf_status_code {
I40E_SUCCESS = 0, I40E_SUCCESS = 0,
I40E_ERR_NVM = -1, I40E_ERR_NVM = -1,
I40E_ERR_NVM_CHECKSUM = -2, I40E_ERR_NVM_CHECKSUM = -2,
...@@ -75,4 +75,4 @@ enum i40e_status_code { ...@@ -75,4 +75,4 @@ enum i40e_status_code {
I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
}; };
#endif /* _I40E_STATUS_H_ */ #endif /* _IAVF_STATUS_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TYPE_H_ #ifndef _IAVF_TYPE_H_
#define _I40E_TYPE_H_ #define _IAVF_TYPE_H_
#include "i40e_status.h" #include "i40e_status.h"
#include "i40e_osdep.h" #include "i40e_osdep.h"
...@@ -10,14 +10,14 @@ ...@@ -10,14 +10,14 @@
#include "i40e_adminq.h" #include "i40e_adminq.h"
#include "i40e_devids.h" #include "i40e_devids.h"
#define I40E_RXQ_CTX_DBUFF_SHIFT 7 #define IAVF_RXQ_CTX_DBUFF_SHIFT 7
/* I40E_MASK is a macro used on 32 bit registers */ /* IAVF_MASK is a macro used on 32 bit registers */
#define IAVF_MASK(mask, shift) ((u32)(mask) << (shift)) #define IAVF_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16 #define IAVF_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3 #define IAVF_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5 #define IAVF_MAX_CHAINED_RX_BUFFERS 5
/* forward declaration */ /* forward declaration */
struct iavf_hw; struct iavf_hw;
...@@ -25,40 +25,40 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct iavf_hw *, struct i40e_aq_desc *); ...@@ -25,40 +25,40 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct iavf_hw *, struct i40e_aq_desc *);
/* Data type manipulation macros. */ /* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \ #define IAVF_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1) (R)->next_to_clean - (R)->next_to_use - 1)
/* bitfields for Tx queue mapping in QTX_CTL */ /* bitfields for Tx queue mapping in QTX_CTL */
#define I40E_QTX_CTL_VF_QUEUE 0x0 #define IAVF_QTX_CTL_VF_QUEUE 0x0
#define I40E_QTX_CTL_VM_QUEUE 0x1 #define IAVF_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2 #define IAVF_QTX_CTL_PF_QUEUE 0x2
/* debug masks - set these bits in hw->debug_mask to control output */ /* debug masks - set these bits in hw->debug_mask to control output */
enum iavf_debug_mask { enum iavf_debug_mask {
I40E_DEBUG_INIT = 0x00000001, IAVF_DEBUG_INIT = 0x00000001,
I40E_DEBUG_RELEASE = 0x00000002, IAVF_DEBUG_RELEASE = 0x00000002,
I40E_DEBUG_LINK = 0x00000010, IAVF_DEBUG_LINK = 0x00000010,
I40E_DEBUG_PHY = 0x00000020, IAVF_DEBUG_PHY = 0x00000020,
I40E_DEBUG_HMC = 0x00000040, IAVF_DEBUG_HMC = 0x00000040,
I40E_DEBUG_NVM = 0x00000080, IAVF_DEBUG_NVM = 0x00000080,
I40E_DEBUG_LAN = 0x00000100, IAVF_DEBUG_LAN = 0x00000100,
I40E_DEBUG_FLOW = 0x00000200, IAVF_DEBUG_FLOW = 0x00000200,
I40E_DEBUG_DCB = 0x00000400, IAVF_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800, IAVF_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000, IAVF_DEBUG_FD = 0x00001000,
I40E_DEBUG_PACKAGE = 0x00002000, IAVF_DEBUG_PACKAGE = 0x00002000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000, IAVF_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, IAVF_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, IAVF_DEBUG_AQ_DESC_BUFFER = 0x04000000,
I40E_DEBUG_AQ_COMMAND = 0x06000000, IAVF_DEBUG_AQ_COMMAND = 0x06000000,
I40E_DEBUG_AQ = 0x0F000000, IAVF_DEBUG_AQ = 0x0F000000,
I40E_DEBUG_USER = 0xF0000000, IAVF_DEBUG_USER = 0xF0000000,
I40E_DEBUG_ALL = 0xFFFFFFFF IAVF_DEBUG_ALL = 0xFFFFFFFF
}; };
/* These are structs for managing the hardware information and the operations. /* These are structs for managing the hardware information and the operations.
...@@ -69,35 +69,35 @@ enum iavf_debug_mask { ...@@ -69,35 +69,35 @@ enum iavf_debug_mask {
* the Firmware and AdminQ are intended to insulate the driver from most of the * the Firmware and AdminQ are intended to insulate the driver from most of the
* future changes, but these structures will also do part of the job. * future changes, but these structures will also do part of the job.
*/ */
enum i40e_mac_type { enum iavf_mac_type {
I40E_MAC_UNKNOWN = 0, IAVF_MAC_UNKNOWN = 0,
I40E_MAC_XL710, IAVF_MAC_XL710,
I40E_MAC_VF, IAVF_MAC_VF,
I40E_MAC_X722, IAVF_MAC_X722,
I40E_MAC_X722_VF, IAVF_MAC_X722_VF,
I40E_MAC_GENERIC, IAVF_MAC_GENERIC,
}; };
enum i40e_vsi_type { enum iavf_vsi_type {
I40E_VSI_MAIN = 0, IAVF_VSI_MAIN = 0,
I40E_VSI_VMDQ1 = 1, IAVF_VSI_VMDQ1 = 1,
I40E_VSI_VMDQ2 = 2, IAVF_VSI_VMDQ2 = 2,
I40E_VSI_CTRL = 3, IAVF_VSI_CTRL = 3,
I40E_VSI_FCOE = 4, IAVF_VSI_FCOE = 4,
I40E_VSI_MIRROR = 5, IAVF_VSI_MIRROR = 5,
I40E_VSI_SRIOV = 6, IAVF_VSI_SRIOV = 6,
I40E_VSI_FDIR = 7, IAVF_VSI_FDIR = 7,
I40E_VSI_TYPE_UNKNOWN IAVF_VSI_TYPE_UNKNOWN
}; };
enum i40e_queue_type { enum iavf_queue_type {
I40E_QUEUE_TYPE_RX = 0, IAVF_QUEUE_TYPE_RX = 0,
I40E_QUEUE_TYPE_TX, IAVF_QUEUE_TYPE_TX,
I40E_QUEUE_TYPE_PE_CEQ, IAVF_QUEUE_TYPE_PE_CEQ,
I40E_QUEUE_TYPE_UNKNOWN IAVF_QUEUE_TYPE_UNKNOWN
}; };
#define I40E_HW_CAP_MAX_GPIO 30 #define IAVF_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */ /* Capabilities of a PF or a VF or the whole device */
struct iavf_hw_capabilities { struct iavf_hw_capabilities {
bool dcb; bool dcb;
...@@ -109,8 +109,8 @@ struct iavf_hw_capabilities { ...@@ -109,8 +109,8 @@ struct iavf_hw_capabilities {
u32 num_msix_vectors_vf; u32 num_msix_vectors_vf;
}; };
struct i40e_mac_info { struct iavf_mac_info {
enum i40e_mac_type type; enum iavf_mac_type type;
u8 addr[ETH_ALEN]; u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN];
u8 san_addr[ETH_ALEN]; u8 san_addr[ETH_ALEN];
...@@ -118,45 +118,45 @@ struct i40e_mac_info { ...@@ -118,45 +118,45 @@ struct i40e_mac_info {
}; };
/* PCI bus types */ /* PCI bus types */
enum i40e_bus_type { enum iavf_bus_type {
i40e_bus_type_unknown = 0, iavf_bus_type_unknown = 0,
i40e_bus_type_pci, iavf_bus_type_pci,
i40e_bus_type_pcix, iavf_bus_type_pcix,
i40e_bus_type_pci_express, iavf_bus_type_pci_express,
i40e_bus_type_reserved iavf_bus_type_reserved
}; };
/* PCI bus speeds */ /* PCI bus speeds */
enum i40e_bus_speed { enum iavf_bus_speed {
i40e_bus_speed_unknown = 0, iavf_bus_speed_unknown = 0,
i40e_bus_speed_33 = 33, iavf_bus_speed_33 = 33,
i40e_bus_speed_66 = 66, iavf_bus_speed_66 = 66,
i40e_bus_speed_100 = 100, iavf_bus_speed_100 = 100,
i40e_bus_speed_120 = 120, iavf_bus_speed_120 = 120,
i40e_bus_speed_133 = 133, iavf_bus_speed_133 = 133,
i40e_bus_speed_2500 = 2500, iavf_bus_speed_2500 = 2500,
i40e_bus_speed_5000 = 5000, iavf_bus_speed_5000 = 5000,
i40e_bus_speed_8000 = 8000, iavf_bus_speed_8000 = 8000,
i40e_bus_speed_reserved iavf_bus_speed_reserved
}; };
/* PCI bus widths */ /* PCI bus widths */
enum i40e_bus_width { enum iavf_bus_width {
i40e_bus_width_unknown = 0, iavf_bus_width_unknown = 0,
i40e_bus_width_pcie_x1 = 1, iavf_bus_width_pcie_x1 = 1,
i40e_bus_width_pcie_x2 = 2, iavf_bus_width_pcie_x2 = 2,
i40e_bus_width_pcie_x4 = 4, iavf_bus_width_pcie_x4 = 4,
i40e_bus_width_pcie_x8 = 8, iavf_bus_width_pcie_x8 = 8,
i40e_bus_width_32 = 32, iavf_bus_width_32 = 32,
i40e_bus_width_64 = 64, iavf_bus_width_64 = 64,
i40e_bus_width_reserved iavf_bus_width_reserved
}; };
/* Bus parameters */ /* Bus parameters */
struct i40e_bus_info { struct iavf_bus_info {
enum i40e_bus_speed speed; enum iavf_bus_speed speed;
enum i40e_bus_width width; enum iavf_bus_width width;
enum i40e_bus_type type; enum iavf_bus_type type;
u16 func; u16 func;
u16 device; u16 device;
...@@ -164,16 +164,15 @@ struct i40e_bus_info { ...@@ -164,16 +164,15 @@ struct i40e_bus_info {
u16 bus_id; u16 bus_id;
}; };
#define I40E_MAX_TRAFFIC_CLASS 8 #define IAVF_MAX_USER_PRIORITY 8
#define I40E_MAX_USER_PRIORITY 8
/* Port hardware description */ /* Port hardware description */
struct iavf_hw { struct iavf_hw {
u8 __iomem *hw_addr; u8 __iomem *hw_addr;
void *back; void *back;
/* subsystem structs */ /* subsystem structs */
struct i40e_mac_info mac; struct iavf_mac_info mac;
struct i40e_bus_info bus; struct iavf_bus_info bus;
/* pci info */ /* pci info */
u16 device_id; u16 device_id;
...@@ -186,14 +185,14 @@ struct iavf_hw { ...@@ -186,14 +185,14 @@ struct iavf_hw {
struct iavf_hw_capabilities dev_caps; struct iavf_hw_capabilities dev_caps;
/* Admin Queue info */ /* Admin Queue info */
struct i40e_adminq_info aq; struct iavf_adminq_info aq;
/* debug mask */ /* debug mask */
u32 debug_mask; u32 debug_mask;
char err_str[16]; char err_str[16];
}; };
struct i40e_driver_version { struct iavf_driver_version {
u8 major_version; u8 major_version;
u8 minor_version; u8 minor_version;
u8 build_version; u8 build_version;
...@@ -202,7 +201,7 @@ struct i40e_driver_version { ...@@ -202,7 +201,7 @@ struct i40e_driver_version {
}; };
/* RX Descriptors */ /* RX Descriptors */
union i40e_16byte_rx_desc { union iavf_16byte_rx_desc {
struct { struct {
__le64 pkt_addr; /* Packet buffer address */ __le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */ __le64 hdr_addr; /* Header buffer address */
...@@ -229,7 +228,7 @@ union i40e_16byte_rx_desc { ...@@ -229,7 +228,7 @@ union i40e_16byte_rx_desc {
} wb; /* writeback */ } wb; /* writeback */
}; };
union i40e_32byte_rx_desc { union iavf_32byte_rx_desc {
struct { struct {
__le64 pkt_addr; /* Packet buffer address */ __le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */ __le64 hdr_addr; /* Header buffer address */
...@@ -278,7 +277,7 @@ union i40e_32byte_rx_desc { ...@@ -278,7 +277,7 @@ union i40e_32byte_rx_desc {
} wb; /* writeback */ } wb; /* writeback */
}; };
enum i40e_rx_desc_status_bits { enum iavf_rx_desc_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
IAVF_RX_DESC_STATUS_DD_SHIFT = 0, IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
IAVF_RX_DESC_STATUS_EOF_SHIFT = 1, IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
...@@ -302,29 +301,29 @@ enum i40e_rx_desc_status_bits { ...@@ -302,29 +301,29 @@ enum i40e_rx_desc_status_bits {
IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */ IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
}; };
#define I40E_RXD_QW1_STATUS_SHIFT 0 #define IAVF_RXD_QW1_STATUS_SHIFT 0
#define I40E_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \ #define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT) << IAVF_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT #define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ #define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT #define IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ #define IAVF_RXD_QW1_STATUS_TSYNVALID_MASK \
BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) BIT_ULL(IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values { enum iavf_rx_desc_fltstat_values {
IAVF_RX_DESC_FLTSTAT_NO_DATA = 0, IAVF_RX_DESC_FLTSTAT_NO_DATA = 0,
IAVF_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ IAVF_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
IAVF_RX_DESC_FLTSTAT_RSV = 2, IAVF_RX_DESC_FLTSTAT_RSV = 2,
IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3, IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
}; };
#define I40E_RXD_QW1_ERROR_SHIFT 19 #define IAVF_RXD_QW1_ERROR_SHIFT 19
#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) #define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
enum i40e_rx_desc_error_bits { enum iavf_rx_desc_error_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
IAVF_RX_DESC_ERROR_RXE_SHIFT = 0, IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1, IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
...@@ -337,7 +336,7 @@ enum i40e_rx_desc_error_bits { ...@@ -337,7 +336,7 @@ enum i40e_rx_desc_error_bits {
IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7 IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
}; };
enum i40e_rx_desc_error_l3l4e_fcoe_masks { enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_NONE = 0, IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
IAVF_RX_DESC_ERROR_L3L4E_PROT = 1, IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
IAVF_RX_DESC_ERROR_L3L4E_FC = 2, IAVF_RX_DESC_ERROR_L3L4E_FC = 2,
...@@ -345,40 +344,40 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks { ...@@ -345,40 +344,40 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
}; };
#define I40E_RXD_QW1_PTYPE_SHIFT 30 #define IAVF_RXD_QW1_PTYPE_SHIFT 30
#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) #define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
/* Packet type non-ip values */ /* Packet type non-ip values */
enum i40e_rx_l2_ptype { enum iavf_rx_l2_ptype {
I40E_RX_PTYPE_L2_RESERVED = 0, IAVF_RX_PTYPE_L2_RESERVED = 0,
I40E_RX_PTYPE_L2_MAC_PAY2 = 1, IAVF_RX_PTYPE_L2_MAC_PAY2 = 1,
I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
I40E_RX_PTYPE_L2_FIP_PAY2 = 3, IAVF_RX_PTYPE_L2_FIP_PAY2 = 3,
I40E_RX_PTYPE_L2_OUI_PAY2 = 4, IAVF_RX_PTYPE_L2_OUI_PAY2 = 4,
I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
I40E_RX_PTYPE_L2_ECP_PAY2 = 7, IAVF_RX_PTYPE_L2_ECP_PAY2 = 7,
I40E_RX_PTYPE_L2_EVB_PAY2 = 8, IAVF_RX_PTYPE_L2_EVB_PAY2 = 8,
I40E_RX_PTYPE_L2_QCN_PAY2 = 9, IAVF_RX_PTYPE_L2_QCN_PAY2 = 9,
I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
I40E_RX_PTYPE_L2_ARP = 11, IAVF_RX_PTYPE_L2_ARP = 11,
I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
}; };
struct i40e_rx_ptype_decoded { struct iavf_rx_ptype_decoded {
u32 ptype:8; u32 ptype:8;
u32 known:1; u32 known:1;
u32 outer_ip:1; u32 outer_ip:1;
...@@ -391,64 +390,64 @@ struct i40e_rx_ptype_decoded { ...@@ -391,64 +390,64 @@ struct i40e_rx_ptype_decoded {
u32 payload_layer:3; u32 payload_layer:3;
}; };
enum i40e_rx_ptype_outer_ip { enum iavf_rx_ptype_outer_ip {
I40E_RX_PTYPE_OUTER_L2 = 0, IAVF_RX_PTYPE_OUTER_L2 = 0,
I40E_RX_PTYPE_OUTER_IP = 1 IAVF_RX_PTYPE_OUTER_IP = 1
}; };
enum i40e_rx_ptype_outer_ip_ver { enum iavf_rx_ptype_outer_ip_ver {
I40E_RX_PTYPE_OUTER_NONE = 0, IAVF_RX_PTYPE_OUTER_NONE = 0,
I40E_RX_PTYPE_OUTER_IPV4 = 0, IAVF_RX_PTYPE_OUTER_IPV4 = 0,
I40E_RX_PTYPE_OUTER_IPV6 = 1 IAVF_RX_PTYPE_OUTER_IPV6 = 1
}; };
enum i40e_rx_ptype_outer_fragmented { enum iavf_rx_ptype_outer_fragmented {
I40E_RX_PTYPE_NOT_FRAG = 0, IAVF_RX_PTYPE_NOT_FRAG = 0,
I40E_RX_PTYPE_FRAG = 1 IAVF_RX_PTYPE_FRAG = 1
}; };
enum i40e_rx_ptype_tunnel_type { enum iavf_rx_ptype_tunnel_type {
I40E_RX_PTYPE_TUNNEL_NONE = 0, IAVF_RX_PTYPE_TUNNEL_NONE = 0,
I40E_RX_PTYPE_TUNNEL_IP_IP = 1, IAVF_RX_PTYPE_TUNNEL_IP_IP = 1,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
}; };
enum i40e_rx_ptype_tunnel_end_prot { enum iavf_rx_ptype_tunnel_end_prot {
I40E_RX_PTYPE_TUNNEL_END_NONE = 0, IAVF_RX_PTYPE_TUNNEL_END_NONE = 0,
I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
}; };
enum i40e_rx_ptype_inner_prot { enum iavf_rx_ptype_inner_prot {
I40E_RX_PTYPE_INNER_PROT_NONE = 0, IAVF_RX_PTYPE_INNER_PROT_NONE = 0,
I40E_RX_PTYPE_INNER_PROT_UDP = 1, IAVF_RX_PTYPE_INNER_PROT_UDP = 1,
I40E_RX_PTYPE_INNER_PROT_TCP = 2, IAVF_RX_PTYPE_INNER_PROT_TCP = 2,
I40E_RX_PTYPE_INNER_PROT_SCTP = 3, IAVF_RX_PTYPE_INNER_PROT_SCTP = 3,
I40E_RX_PTYPE_INNER_PROT_ICMP = 4, IAVF_RX_PTYPE_INNER_PROT_ICMP = 4,
I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
}; };
enum i40e_rx_ptype_payload_layer { enum iavf_rx_ptype_payload_layer {
I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
}; };
#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ #define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 #define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ #define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
I40E_RXD_QW1_LENGTH_HBUF_SHIFT) IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 #define IAVF_RXD_QW1_LENGTH_SPH_SHIFT 63
#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) #define IAVF_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(IAVF_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits { enum iavf_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
IAVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, IAVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
...@@ -459,7 +458,7 @@ enum i40e_rx_desc_ext_status_bits { ...@@ -459,7 +458,7 @@ enum i40e_rx_desc_ext_status_bits {
IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
}; };
enum i40e_rx_desc_pe_status_bits { enum iavf_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
IAVF_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ IAVF_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
...@@ -472,47 +471,47 @@ enum i40e_rx_desc_pe_status_bits { ...@@ -472,47 +471,47 @@ enum i40e_rx_desc_pe_status_bits {
IAVF_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 IAVF_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
}; };
#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 #define IAVF_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 #define IAVF_RX_PROG_STATUS_DESC_LENGTH 0x2000000
#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 #define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ #define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 #define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ #define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
enum i40e_rx_prog_status_desc_status_bits { enum iavf_rx_prog_status_desc_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, IAVF_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ IAVF_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
}; };
enum i40e_rx_prog_status_desc_prog_id_masks { enum iavf_rx_prog_status_desc_prog_id_masks {
I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, IAVF_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
}; };
enum i40e_rx_prog_status_desc_error_bits { enum iavf_rx_prog_status_desc_error_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, IAVF_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, IAVF_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, IAVF_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 IAVF_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
}; };
/* TX Descriptor */ /* TX Descriptor */
struct i40e_tx_desc { struct iavf_tx_desc {
__le64 buffer_addr; /* Address of descriptor's data buf */ __le64 buffer_addr; /* Address of descriptor's data buf */
__le64 cmd_type_offset_bsz; __le64 cmd_type_offset_bsz;
}; };
#define I40E_TXD_QW1_DTYPE_SHIFT 0 #define IAVF_TXD_QW1_DTYPE_SHIFT 0
#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) #define IAVF_TXD_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
enum i40e_tx_desc_dtype_value { enum iavf_tx_desc_dtype_value {
IAVF_TX_DESC_DTYPE_DATA = 0x0, IAVF_TX_DESC_DTYPE_DATA = 0x0,
IAVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ IAVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
IAVF_TX_DESC_DTYPE_CONTEXT = 0x1, IAVF_TX_DESC_DTYPE_CONTEXT = 0x1,
...@@ -525,10 +524,10 @@ enum i40e_tx_desc_dtype_value { ...@@ -525,10 +524,10 @@ enum i40e_tx_desc_dtype_value {
IAVF_TX_DESC_DTYPE_DESC_DONE = 0xF IAVF_TX_DESC_DTYPE_DESC_DONE = 0xF
}; };
#define I40E_TXD_QW1_CMD_SHIFT 4 #define IAVF_TXD_QW1_CMD_SHIFT 4
#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) #define IAVF_TXD_QW1_CMD_MASK (0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)
enum i40e_tx_desc_cmd_bits { enum iavf_tx_desc_cmd_bits {
IAVF_TX_DESC_CMD_EOP = 0x0001, IAVF_TX_DESC_CMD_EOP = 0x0001,
IAVF_TX_DESC_CMD_RS = 0x0002, IAVF_TX_DESC_CMD_RS = 0x0002,
IAVF_TX_DESC_CMD_ICRC = 0x0004, IAVF_TX_DESC_CMD_ICRC = 0x0004,
...@@ -549,154 +548,130 @@ enum i40e_tx_desc_cmd_bits { ...@@ -549,154 +548,130 @@ enum i40e_tx_desc_cmd_bits {
IAVF_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
}; };
#define I40E_TXD_QW1_OFFSET_SHIFT 16 #define IAVF_TXD_QW1_OFFSET_SHIFT 16
#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ #define IAVF_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
I40E_TXD_QW1_OFFSET_SHIFT) IAVF_TXD_QW1_OFFSET_SHIFT)
enum i40e_tx_desc_length_fields { enum iavf_tx_desc_length_fields {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
IAVF_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ IAVF_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ IAVF_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
}; };
#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 #define IAVF_TXD_QW1_TX_BUF_SZ_SHIFT 34
#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ #define IAVF_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
I40E_TXD_QW1_TX_BUF_SZ_SHIFT) IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
#define I40E_TXD_QW1_L2TAG1_SHIFT 48 #define IAVF_TXD_QW1_L2TAG1_SHIFT 48
#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) #define IAVF_TXD_QW1_L2TAG1_MASK (0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)
/* Context descriptors */ /* Context descriptors */
struct i40e_tx_context_desc { struct iavf_tx_context_desc {
__le32 tunneling_params; __le32 tunneling_params;
__le16 l2tag2; __le16 l2tag2;
__le16 rsvd; __le16 rsvd;
__le64 type_cmd_tso_mss; __le64 type_cmd_tso_mss;
}; };
#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 #define IAVF_TXD_CTX_QW1_CMD_SHIFT 4
#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) #define IAVF_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)
enum i40e_tx_ctx_desc_cmd_bits { enum iavf_tx_ctx_desc_cmd_bits {
I40E_TX_CTX_DESC_TSO = 0x01, IAVF_TX_CTX_DESC_TSO = 0x01,
I40E_TX_CTX_DESC_TSYN = 0x02, IAVF_TX_CTX_DESC_TSYN = 0x02,
I40E_TX_CTX_DESC_IL2TAG2 = 0x04, IAVF_TX_CTX_DESC_IL2TAG2 = 0x04,
I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, IAVF_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, IAVF_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, IAVF_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, IAVF_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, IAVF_TX_CTX_DESC_SWTCH_VSI = 0x30,
I40E_TX_CTX_DESC_SWPE = 0x40 IAVF_TX_CTX_DESC_SWPE = 0x40
}; };
#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
I40E_TXD_CTX_QW1_MSS_SHIFT)
#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
enum i40e_tx_ctx_desc_eipt_offload {
I40E_TX_CTX_EXT_IP_NONE = 0x0,
I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
I40E_TX_CTX_EXT_IP_IPV4 = 0x3
};
#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
I40E_TXD_CTX_QW0_NATLEN_SHIFT)
#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
/* Packet Classifier Types for filters */ /* Packet Classifier Types for filters */
enum i40e_filter_pctype { enum iavf_filter_pctype {
/* Note: Values 0-28 are reserved for future use. /* Note: Values 0-28 are reserved for future use.
* Value 29, 30, 32 are not supported on XL710 and X710. * Value 29, 30, 32 are not supported on XL710 and X710.
*/ */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
/* Note: Values 37-38 are reserved for future use. /* Note: Values 37-38 are reserved for future use.
* Value 39, 40, 42 are not supported on XL710 and X710. * Value 39, 40, 42 are not supported on XL710 and X710.
*/ */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
/* Note: Value 47 is reserved for future use */ /* Note: Value 47 is reserved for future use */
I40E_FILTER_PCTYPE_FCOE_OX = 48, IAVF_FILTER_PCTYPE_FCOE_OX = 48,
I40E_FILTER_PCTYPE_FCOE_RX = 49, IAVF_FILTER_PCTYPE_FCOE_RX = 49,
I40E_FILTER_PCTYPE_FCOE_OTHER = 50, IAVF_FILTER_PCTYPE_FCOE_OTHER = 50,
/* Note: Values 51-62 are reserved for future use */ /* Note: Values 51-62 are reserved for future use */
I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
}; };
#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
struct i40e_vsi_context { #define IAVF_TXD_CTX_QW1_MSS_SHIFT 50
u16 seid; #define IAVF_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
u16 uplink_seid; IAVF_TXD_CTX_QW1_MSS_SHIFT)
u16 vsi_number;
u16 vsis_allocated; #define IAVF_TXD_CTX_QW1_VSI_SHIFT 50
u16 vsis_unallocated; #define IAVF_TXD_CTX_QW1_VSI_MASK (0x1FFULL << IAVF_TXD_CTX_QW1_VSI_SHIFT)
u16 flags;
u8 pf_num;
u8 vf_num;
u8 connection_type;
struct i40e_aqc_vsi_properties_data info;
};
struct i40e_veb_context { #define IAVF_TXD_CTX_QW0_EXT_IP_SHIFT 0
u16 seid; #define IAVF_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
u16 uplink_seid; IAVF_TXD_CTX_QW0_EXT_IP_SHIFT)
u16 veb_number;
u16 vebs_allocated; enum iavf_tx_ctx_desc_eipt_offload {
u16 vebs_unallocated; IAVF_TX_CTX_EXT_IP_NONE = 0x0,
u16 flags; IAVF_TX_CTX_EXT_IP_IPV6 = 0x1,
struct i40e_aqc_get_veb_parameters_completion info; IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
IAVF_TX_CTX_EXT_IP_IPV4 = 0x3
}; };
#define IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
#define IAVF_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
#define IAVF_TXD_CTX_QW0_NATT_SHIFT 9
#define IAVF_TXD_CTX_QW0_NATT_MASK (0x3ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)
#define IAVF_TXD_CTX_UDP_TUNNELING BIT_ULL(IAVF_TXD_CTX_QW0_NATT_SHIFT)
#define IAVF_TXD_CTX_GRE_TUNNELING (0x2ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)
#define IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
#define IAVF_TXD_CTX_QW0_EIP_NOINC_MASK \
BIT_ULL(IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define IAVF_TXD_CTX_EIP_NOINC_IPID_CONST IAVF_TXD_CTX_QW0_EIP_NOINC_MASK
#define IAVF_TXD_CTX_QW0_NATLEN_SHIFT 12
#define IAVF_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
IAVF_TXD_CTX_QW0_NATLEN_SHIFT)
#define IAVF_TXD_CTX_QW0_DECTTL_SHIFT 19
#define IAVF_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
IAVF_TXD_CTX_QW0_DECTTL_SHIFT)
#define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT 23
#define IAVF_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)
/* Statistics collected by each port, VSI, VEB, and S-channel */ /* Statistics collected by each port, VSI, VEB, and S-channel */
struct i40e_eth_stats { struct iavf_eth_stats {
u64 rx_bytes; /* gorc */ u64 rx_bytes; /* gorc */
u64 rx_unicast; /* uprc */ u64 rx_unicast; /* uprc */
u64 rx_multicast; /* mprc */ u64 rx_multicast; /* mprc */
...@@ -710,4 +685,4 @@ struct i40e_eth_stats { ...@@ -710,4 +685,4 @@ struct i40e_eth_stats {
u64 tx_discards; /* tdpc */ u64 tx_discards; /* tdpc */
u64 tx_errors; /* tepc */ u64 tx_errors; /* tepc */
}; };
#endif /* _I40E_TYPE_H_ */ #endif /* _IAVF_TYPE_H_ */
...@@ -43,19 +43,19 @@ ...@@ -43,19 +43,19 @@
/* VSI state flags shared with common code */ /* VSI state flags shared with common code */
enum iavf_vsi_state_t { enum iavf_vsi_state_t {
__I40E_VSI_DOWN, __IAVF_VSI_DOWN,
/* This must be last as it determines the size of the BITMAP */ /* This must be last as it determines the size of the BITMAP */
__I40E_VSI_STATE_SIZE__, __IAVF_VSI_STATE_SIZE__,
}; };
/* dummy struct to make common code less painful */ /* dummy struct to make common code less painful */
struct i40e_vsi { struct iavf_vsi {
struct iavf_adapter *back; struct iavf_adapter *back;
struct net_device *netdev; struct net_device *netdev;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid; u16 seid;
u16 id; u16 id;
DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__); DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
int base_vector; int base_vector;
u16 work_limit; u16 work_limit;
u16 qs_handle; u16 qs_handle;
...@@ -77,10 +77,10 @@ struct i40e_vsi { ...@@ -77,10 +77,10 @@ struct i40e_vsi {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define IAVF_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) #define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i]))
#define IAVF_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) #define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \ #define IAVF_TX_CTXTDESC(R, i) \
(&(((struct i40e_tx_context_desc *)((R)->desc))[i])) (&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
#define IAVF_MAX_REQ_QUEUES 4 #define IAVF_MAX_REQ_QUEUES 4
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4) #define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
...@@ -90,12 +90,12 @@ struct i40e_vsi { ...@@ -90,12 +90,12 @@ struct i40e_vsi {
/* MAX_MSIX_Q_VECTORS of these are allocated, /* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector. * but we only use one per queue-specific vector.
*/ */
struct i40e_q_vector { struct iavf_q_vector {
struct iavf_adapter *adapter; struct iavf_adapter *adapter;
struct i40e_vsi *vsi; struct iavf_vsi *vsi;
struct napi_struct napi; struct napi_struct napi;
struct i40e_ring_container rx; struct iavf_ring_container rx;
struct i40e_ring_container tx; struct iavf_ring_container tx;
u32 ring_mask; u32 ring_mask;
u8 itr_countdown; /* when 0 should adjust adaptive ITR */ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
u8 num_ringpairs; /* total number of ring pairs in vector */ u8 num_ringpairs; /* total number of ring pairs in vector */
...@@ -119,13 +119,6 @@ struct i40e_q_vector { ...@@ -119,13 +119,6 @@ struct i40e_q_vector {
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1) (R)->next_to_clean - (R)->next_to_use - 1)
#define IAVF_RX_DESC_ADV(R, i) \
(&(((union i40e_adv_rx_desc *)((R).desc))[i]))
#define IAVF_TX_DESC_ADV(R, i) \
(&(((union i40e_adv_tx_desc *)((R).desc))[i]))
#define IAVF_TX_CTXTDESC_ADV(R, i) \
(&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
#define OTHER_VECTOR 1 #define OTHER_VECTOR 1
#define NONQ_VECS (OTHER_VECTOR) #define NONQ_VECS (OTHER_VECTOR)
...@@ -209,7 +202,7 @@ enum iavf_critical_section_t { ...@@ -209,7 +202,7 @@ enum iavf_critical_section_t {
#define IAVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (IAVF_CLOUD_FIELD_IMAC |\ #define IAVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (IAVF_CLOUD_FIELD_IMAC |\
IAVF_CLOUD_FIELD_IVLAN |\ IAVF_CLOUD_FIELD_IVLAN |\
IAVF_CLOUD_FIELD_TEN_ID) IAVF_CLOUD_FIELD_TEN_ID)
#define IAVF_CF_FLAGS_IIP I40E_CLOUD_FIELD_IIP #define IAVF_CF_FLAGS_IIP IAVF_CLOUD_FIELD_IIP
/* bookkeeping of cloud filters */ /* bookkeeping of cloud filters */
struct iavf_cloud_filter { struct iavf_cloud_filter {
...@@ -229,7 +222,7 @@ struct iavf_adapter { ...@@ -229,7 +222,7 @@ struct iavf_adapter {
struct delayed_work client_task; struct delayed_work client_task;
struct delayed_work init_task; struct delayed_work init_task;
wait_queue_head_t down_waitqueue; wait_queue_head_t down_waitqueue;
struct i40e_q_vector *q_vectors; struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list; struct list_head vlan_filter_list;
struct list_head mac_filter_list; struct list_head mac_filter_list;
/* Lock to protect accesses to MAC and VLAN lists */ /* Lock to protect accesses to MAC and VLAN lists */
...@@ -239,12 +232,12 @@ struct iavf_adapter { ...@@ -239,12 +232,12 @@ struct iavf_adapter {
int num_req_queues; int num_req_queues;
/* TX */ /* TX */
struct i40e_ring *tx_rings; struct iavf_ring *tx_rings;
u32 tx_timeout_count; u32 tx_timeout_count;
u32 tx_desc_count; u32 tx_desc_count;
/* RX */ /* RX */
struct i40e_ring *rx_rings; struct iavf_ring *rx_rings;
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u32 rx_desc_count; u32 rx_desc_count;
int num_msix_vectors; int num_msix_vectors;
...@@ -271,9 +264,7 @@ struct iavf_adapter { ...@@ -271,9 +264,7 @@ struct iavf_adapter {
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17) #define IAVF_FLAG_QUEUES_DISABLED BIT(17)
/* duplicates for common code */ /* duplicates for common code */
#define I40E_FLAG_DCB_ENABLED 0 #define IAVF_FLAG_DCB_ENABLED 0
#define I40E_FLAG_RX_CSUM_ENABLED IAVF_FLAG_RX_CSUM_ENABLED
#define I40E_FLAG_LEGACY_RX IAVF_FLAG_LEGACY_RX
/* flags for admin queue service task */ /* flags for admin queue service task */
u32 aq_required; u32 aq_required;
#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0) #define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
...@@ -338,8 +329,8 @@ struct iavf_adapter { ...@@ -338,8 +329,8 @@ struct iavf_adapter {
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ #define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1)) ((_a)->pf_version.minor == 1))
u16 msg_enable; u16 msg_enable;
struct i40e_eth_stats current_stats; struct iavf_eth_stats current_stats;
struct i40e_vsi vsi; struct iavf_vsi vsi;
u32 aq_wait_count; u32 aq_wait_count;
/* RSS stuff */ /* RSS stuff */
u64 hena; u64 hena;
...@@ -359,7 +350,7 @@ struct iavf_adapter { ...@@ -359,7 +350,7 @@ struct iavf_adapter {
/* Ethtool Private Flags */ /* Ethtool Private Flags */
/* lan device */ /* lan device, used by client interface */
struct i40e_device { struct i40e_device {
struct list_head list; struct list_head list;
struct iavf_adapter *vf; struct iavf_adapter *vf;
...@@ -382,8 +373,8 @@ void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask); ...@@ -382,8 +373,8 @@ void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);
void iavf_free_all_tx_resources(struct iavf_adapter *adapter); void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
void iavf_free_all_rx_resources(struct iavf_adapter *adapter); void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
void i40e_napi_add_all(struct iavf_adapter *adapter); void iavf_napi_add_all(struct iavf_adapter *adapter);
void i40e_napi_del_all(struct iavf_adapter *adapter); void iavf_napi_del_all(struct iavf_adapter *adapter);
int iavf_send_api_ver(struct iavf_adapter *adapter); int iavf_send_api_ver(struct iavf_adapter *adapter);
int iavf_verify_api_ver(struct iavf_adapter *adapter); int iavf_verify_api_ver(struct iavf_adapter *adapter);
...@@ -416,10 +407,10 @@ int iavf_config_rss(struct iavf_adapter *adapter); ...@@ -416,10 +407,10 @@ int iavf_config_rss(struct iavf_adapter *adapter);
int iavf_lan_add_device(struct iavf_adapter *adapter); int iavf_lan_add_device(struct iavf_adapter *adapter);
int iavf_lan_del_device(struct iavf_adapter *adapter); int iavf_lan_del_device(struct iavf_adapter *adapter);
void iavf_client_subtask(struct iavf_adapter *adapter); void iavf_client_subtask(struct iavf_adapter *adapter);
void iavf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len); void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len);
void iavf_notify_client_l2_params(struct i40e_vsi *vsi); void iavf_notify_client_l2_params(struct iavf_vsi *vsi);
void iavf_notify_client_open(struct i40e_vsi *vsi); void iavf_notify_client_open(struct iavf_vsi *vsi);
void iavf_notify_client_close(struct i40e_vsi *vsi, bool reset); void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset);
void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter);
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
static static
const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR; const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR;
static struct i40e_client *vf_registered_client; static struct i40e_client *vf_registered_client;
static LIST_HEAD(iavf_devices); static LIST_HEAD(i40e_devices);
static DEFINE_MUTEX(iavf_device_mutex); static DEFINE_MUTEX(iavf_device_mutex);
static u32 iavf_client_virtchnl_send(struct i40e_info *ldev, static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
...@@ -33,7 +33,7 @@ static struct i40e_ops iavf_lan_ops = { ...@@ -33,7 +33,7 @@ static struct i40e_ops iavf_lan_ops = {
* @params: client param struct * @params: client param struct
**/ **/
static static
void iavf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params)
{ {
int i; int i;
...@@ -41,7 +41,7 @@ void iavf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) ...@@ -41,7 +41,7 @@ void iavf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
params->mtu = vsi->netdev->mtu; params->mtu = vsi->netdev->mtu;
params->link_up = vsi->back->link_up; params->link_up = vsi->back->link_up;
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { for (i = 0; i < IAVF_MAX_USER_PRIORITY; i++) {
params->qos.prio_qos[i].tc = 0; params->qos.prio_qos[i].tc = 0;
params->qos.prio_qos[i].qs_handle = vsi->qs_handle; params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
} }
...@@ -55,7 +55,7 @@ void iavf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) ...@@ -55,7 +55,7 @@ void iavf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
* *
* If there is a client to this VSI, call the client * If there is a client to this VSI, call the client
**/ **/
void iavf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len)
{ {
struct i40e_client_instance *cinst; struct i40e_client_instance *cinst;
...@@ -79,7 +79,7 @@ void iavf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) ...@@ -79,7 +79,7 @@ void iavf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
* *
* If there is a client to this VSI, call the client * If there is a client to this VSI, call the client
**/ **/
void iavf_notify_client_l2_params(struct i40e_vsi *vsi) void iavf_notify_client_l2_params(struct iavf_vsi *vsi)
{ {
struct i40e_client_instance *cinst; struct i40e_client_instance *cinst;
struct i40e_params params; struct i40e_params params;
...@@ -107,7 +107,7 @@ void iavf_notify_client_l2_params(struct i40e_vsi *vsi) ...@@ -107,7 +107,7 @@ void iavf_notify_client_l2_params(struct i40e_vsi *vsi)
* *
* If there is a client to this netdev, call the client with open * If there is a client to this netdev, call the client with open
**/ **/
void iavf_notify_client_open(struct i40e_vsi *vsi) void iavf_notify_client_open(struct iavf_vsi *vsi)
{ {
struct iavf_adapter *adapter = vsi->back; struct iavf_adapter *adapter = vsi->back;
struct i40e_client_instance *cinst = adapter->cinst; struct i40e_client_instance *cinst = adapter->cinst;
...@@ -159,7 +159,7 @@ static int iavf_client_release_qvlist(struct i40e_info *ldev) ...@@ -159,7 +159,7 @@ static int iavf_client_release_qvlist(struct i40e_info *ldev)
* *
* If there is a client to this netdev, call the client with close * If there is a client to this netdev, call the client with close
**/ **/
void iavf_notify_client_close(struct i40e_vsi *vsi, bool reset) void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset)
{ {
struct iavf_adapter *adapter = vsi->back; struct iavf_adapter *adapter = vsi->back;
struct i40e_client_instance *cinst = adapter->cinst; struct i40e_client_instance *cinst = adapter->cinst;
...@@ -185,7 +185,7 @@ static struct i40e_client_instance * ...@@ -185,7 +185,7 @@ static struct i40e_client_instance *
iavf_client_add_instance(struct iavf_adapter *adapter) iavf_client_add_instance(struct iavf_adapter *adapter)
{ {
struct i40e_client_instance *cinst = NULL; struct i40e_client_instance *cinst = NULL;
struct i40e_vsi *vsi = &adapter->vsi; struct iavf_vsi *vsi = &adapter->vsi;
struct netdev_hw_addr *mac = NULL; struct netdev_hw_addr *mac = NULL;
struct i40e_params params; struct i40e_params params;
...@@ -295,7 +295,7 @@ int iavf_lan_add_device(struct iavf_adapter *adapter) ...@@ -295,7 +295,7 @@ int iavf_lan_add_device(struct iavf_adapter *adapter)
int ret = 0; int ret = 0;
mutex_lock(&iavf_device_mutex); mutex_lock(&iavf_device_mutex);
list_for_each_entry(ldev, &iavf_devices, list) { list_for_each_entry(ldev, &i40e_devices, list) {
if (ldev->vf == adapter) { if (ldev->vf == adapter) {
ret = -EEXIST; ret = -EEXIST;
goto out; goto out;
...@@ -308,7 +308,7 @@ int iavf_lan_add_device(struct iavf_adapter *adapter) ...@@ -308,7 +308,7 @@ int iavf_lan_add_device(struct iavf_adapter *adapter)
} }
ldev->vf = adapter; ldev->vf = adapter;
INIT_LIST_HEAD(&ldev->list); INIT_LIST_HEAD(&ldev->list);
list_add(&ldev->list, &iavf_devices); list_add(&ldev->list, &i40e_devices);
dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
adapter->hw.bus.bus_id, adapter->hw.bus.device, adapter->hw.bus.bus_id, adapter->hw.bus.device,
adapter->hw.bus.func); adapter->hw.bus.func);
...@@ -335,7 +335,7 @@ int iavf_lan_del_device(struct iavf_adapter *adapter) ...@@ -335,7 +335,7 @@ int iavf_lan_del_device(struct iavf_adapter *adapter)
int ret = -ENODEV; int ret = -ENODEV;
mutex_lock(&iavf_device_mutex); mutex_lock(&iavf_device_mutex);
list_for_each_entry_safe(ldev, tmp, &iavf_devices, list) { list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
if (ldev->vf == adapter) { if (ldev->vf == adapter) {
dev_info(&adapter->pdev->dev, dev_info(&adapter->pdev->dev,
"Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
...@@ -364,7 +364,7 @@ static void iavf_client_release(struct i40e_client *client) ...@@ -364,7 +364,7 @@ static void iavf_client_release(struct i40e_client *client)
struct iavf_adapter *adapter; struct iavf_adapter *adapter;
mutex_lock(&iavf_device_mutex); mutex_lock(&iavf_device_mutex);
list_for_each_entry(ldev, &iavf_devices, list) { list_for_each_entry(ldev, &i40e_devices, list) {
adapter = ldev->vf; adapter = ldev->vf;
cinst = adapter->cinst; cinst = adapter->cinst;
if (!cinst) if (!cinst)
...@@ -398,7 +398,7 @@ static void iavf_client_prepare(struct i40e_client *client) ...@@ -398,7 +398,7 @@ static void iavf_client_prepare(struct i40e_client *client)
struct iavf_adapter *adapter; struct iavf_adapter *adapter;
mutex_lock(&iavf_device_mutex); mutex_lock(&iavf_device_mutex);
list_for_each_entry(ldev, &iavf_devices, list) { list_for_each_entry(ldev, &i40e_devices, list) {
adapter = ldev->vf; adapter = ldev->vf;
/* Signal the watchdog to service the client */ /* Signal the watchdog to service the client */
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
......
...@@ -9,57 +9,51 @@ ...@@ -9,57 +9,51 @@
/* ethtool statistics helpers */ /* ethtool statistics helpers */
/** /**
* struct i40e_stats - definition for an ethtool statistic * struct iavf_stats - definition for an ethtool statistic
* @stat_string: statistic name to display in ethtool -S output * @stat_string: statistic name to display in ethtool -S output
* @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
* @stat_offset: offsetof() the stat from a base pointer * @stat_offset: offsetof() the stat from a base pointer
* *
* This structure defines a statistic to be added to the ethtool stats buffer. * This structure defines a statistic to be added to the ethtool stats buffer.
* It defines a statistic as offset from a common base pointer. Stats should * It defines a statistic as offset from a common base pointer. Stats should
* be defined in constant arrays using the I40E_STAT macro, with every element * be defined in constant arrays using the IAVF_STAT macro, with every element
* of the array using the same _type for calculating the sizeof_stat and * of the array using the same _type for calculating the sizeof_stat and
* stat_offset. * stat_offset.
* *
* The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
* sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
* the i40e_add_ethtool_stat() helper function. * the iavf_add_ethtool_stat() helper function.
* *
* The @stat_string is interpreted as a format string, allowing formatted * The @stat_string is interpreted as a format string, allowing formatted
* values to be inserted while looping over multiple structures for a given * values to be inserted while looping over multiple structures for a given
* statistics array. Thus, every statistic string in an array should have the * statistics array. Thus, every statistic string in an array should have the
* same type and number of format specifiers, to be formatted by variadic * same type and number of format specifiers, to be formatted by variadic
* arguments to the i40e_add_stat_string() helper function. * arguments to the iavf_add_stat_string() helper function.
**/ **/
struct i40e_stats { struct iavf_stats {
char stat_string[ETH_GSTRING_LEN]; char stat_string[ETH_GSTRING_LEN];
int sizeof_stat; int sizeof_stat;
int stat_offset; int stat_offset;
}; };
/* Helper macro to define an i40e_stat structure with proper size and type. /* Helper macro to define an iavf_stat structure with proper size and type.
* Use this when defining constant statistics arrays. Note that @_type expects * Use this when defining constant statistics arrays. Note that @_type expects
* only a type name and is used multiple times. * only a type name and is used multiple times.
*/ */
#define I40E_STAT(_type, _name, _stat) { \ #define IAVF_STAT(_type, _name, _stat) { \
.stat_string = _name, \ .stat_string = _name, \
.sizeof_stat = FIELD_SIZEOF(_type, _stat), \ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \ .stat_offset = offsetof(_type, _stat) \
} }
/* Helper macro for defining some statistics directly copied from the netdev
* stats structure.
*/
#define I40E_NETDEV_STAT(_net_stat) \
I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
/* Helper macro for defining some statistics related to queues */ /* Helper macro for defining some statistics related to queues */
#define I40E_QUEUE_STAT(_name, _stat) \ #define IAVF_QUEUE_STAT(_name, _stat) \
I40E_STAT(struct i40e_ring, _name, _stat) IAVF_STAT(struct iavf_ring, _name, _stat)
/* Stats associated with a Tx or Rx ring */ /* Stats associated with a Tx or Rx ring */
static const struct i40e_stats i40e_gstrings_queue_stats[] = { static const struct iavf_stats iavf_gstrings_queue_stats[] = {
I40E_QUEUE_STAT("%s-%u.packets", stats.packets), IAVF_QUEUE_STAT("%s-%u.packets", stats.packets),
I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes), IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes),
}; };
/** /**
...@@ -69,12 +63,12 @@ static const struct i40e_stats i40e_gstrings_queue_stats[] = { ...@@ -69,12 +63,12 @@ static const struct i40e_stats i40e_gstrings_queue_stats[] = {
* @stat: the stat definition * @stat: the stat definition
* *
* Copies the stat data defined by the pointer and stat structure pair into * Copies the stat data defined by the pointer and stat structure pair into
* the memory supplied as data. Used to implement i40e_add_ethtool_stats and * the memory supplied as data. Used to implement iavf_add_ethtool_stats and
* iavf_add_queue_stats. If the pointer is null, data will be zero'd. * iavf_add_queue_stats. If the pointer is null, data will be zero'd.
*/ */
static void static void
iavf_add_one_ethtool_stat(u64 *data, void *pointer, iavf_add_one_ethtool_stat(u64 *data, void *pointer,
const struct i40e_stats *stat) const struct iavf_stats *stat)
{ {
char *p; char *p;
...@@ -122,7 +116,7 @@ iavf_add_one_ethtool_stat(u64 *data, void *pointer, ...@@ -122,7 +116,7 @@ iavf_add_one_ethtool_stat(u64 *data, void *pointer,
**/ **/
static void static void
__iavf_add_ethtool_stats(u64 **data, void *pointer, __iavf_add_ethtool_stats(u64 **data, void *pointer,
const struct i40e_stats stats[], const struct iavf_stats stats[],
const unsigned int size) const unsigned int size)
{ {
unsigned int i; unsigned int i;
...@@ -132,7 +126,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer, ...@@ -132,7 +126,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,
} }
/** /**
* i40e_add_ethtool_stats - copy stats into ethtool supplied buffer * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer
* @data: ethtool stats buffer * @data: ethtool stats buffer
* @pointer: location where stats are stored * @pointer: location where stats are stored
* @stats: static const array of stat definitions * @stats: static const array of stat definitions
...@@ -144,7 +138,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer, ...@@ -144,7 +138,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,
* The parameter @stats is evaluated twice, so parameters with side effects * The parameter @stats is evaluated twice, so parameters with side effects
* should be avoided. * should be avoided.
**/ **/
#define i40e_add_ethtool_stats(data, pointer, stats) \ #define iavf_add_ethtool_stats(data, pointer, stats) \
__iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
/** /**
...@@ -153,8 +147,8 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer, ...@@ -153,8 +147,8 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,
* @ring: the ring to copy * @ring: the ring to copy
* *
* Queue statistics must be copied while protected by * Queue statistics must be copied while protected by
* u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats.
* Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
* ring pointer is null, zero out the queue stat values and update the data * ring pointer is null, zero out the queue stat values and update the data
* pointer. Otherwise safely copy the stats from the ring into the supplied * pointer. Otherwise safely copy the stats from the ring into the supplied
* buffer and update the data pointer when finished. * buffer and update the data pointer when finished.
...@@ -162,10 +156,10 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer, ...@@ -162,10 +156,10 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,
* This function expects to be called while under rcu_read_lock(). * This function expects to be called while under rcu_read_lock().
**/ **/
static void static void
iavf_add_queue_stats(u64 **data, struct i40e_ring *ring) iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
{ {
const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats); const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats);
const struct i40e_stats *stats = i40e_gstrings_queue_stats; const struct iavf_stats *stats = iavf_gstrings_queue_stats;
unsigned int start; unsigned int start;
unsigned int i; unsigned int i;
...@@ -185,7 +179,7 @@ iavf_add_queue_stats(u64 **data, struct i40e_ring *ring) ...@@ -185,7 +179,7 @@ iavf_add_queue_stats(u64 **data, struct i40e_ring *ring)
} }
/** /**
* __i40e_add_stat_strings - copy stat strings into ethtool buffer * __iavf_add_stat_strings - copy stat strings into ethtool buffer
* @p: ethtool supplied buffer * @p: ethtool supplied buffer
* @stats: stat definitions array * @stats: stat definitions array
* @size: size of the stats array * @size: size of the stats array
...@@ -193,7 +187,7 @@ iavf_add_queue_stats(u64 **data, struct i40e_ring *ring) ...@@ -193,7 +187,7 @@ iavf_add_queue_stats(u64 **data, struct i40e_ring *ring)
* Format and copy the strings described by stats into the buffer pointed at * Format and copy the strings described by stats into the buffer pointed at
* by p. * by p.
**/ **/
static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],
const unsigned int size, ...) const unsigned int size, ...)
{ {
unsigned int i; unsigned int i;
...@@ -209,7 +203,7 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], ...@@ -209,7 +203,7 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
} }
/** /**
* i40e_add_stat_strings - copy stat strings into ethtool buffer * iavf_add_stat_strings - copy stat strings into ethtool buffer
* @p: ethtool supplied buffer * @p: ethtool supplied buffer
* @stats: stat definitions array * @stats: stat definitions array
* *
...@@ -220,30 +214,30 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], ...@@ -220,30 +214,30 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
* should be avoided. Additionally, stats must be an array such that * should be avoided. Additionally, stats must be an array such that
* ARRAY_SIZE can be called on it. * ARRAY_SIZE can be called on it.
**/ **/
#define i40e_add_stat_strings(p, stats, ...) \ #define iavf_add_stat_strings(p, stats, ...) \
__i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
#define IAVF_STAT(_name, _stat) \ #define VF_STAT(_name, _stat) \
I40E_STAT(struct iavf_adapter, _name, _stat) IAVF_STAT(struct iavf_adapter, _name, _stat)
static const struct i40e_stats iavf_gstrings_stats[] = { static const struct iavf_stats iavf_gstrings_stats[] = {
IAVF_STAT("rx_bytes", current_stats.rx_bytes), VF_STAT("rx_bytes", current_stats.rx_bytes),
IAVF_STAT("rx_unicast", current_stats.rx_unicast), VF_STAT("rx_unicast", current_stats.rx_unicast),
IAVF_STAT("rx_multicast", current_stats.rx_multicast), VF_STAT("rx_multicast", current_stats.rx_multicast),
IAVF_STAT("rx_broadcast", current_stats.rx_broadcast), VF_STAT("rx_broadcast", current_stats.rx_broadcast),
IAVF_STAT("rx_discards", current_stats.rx_discards), VF_STAT("rx_discards", current_stats.rx_discards),
IAVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
IAVF_STAT("tx_bytes", current_stats.tx_bytes), VF_STAT("tx_bytes", current_stats.tx_bytes),
IAVF_STAT("tx_unicast", current_stats.tx_unicast), VF_STAT("tx_unicast", current_stats.tx_unicast),
IAVF_STAT("tx_multicast", current_stats.tx_multicast), VF_STAT("tx_multicast", current_stats.tx_multicast),
IAVF_STAT("tx_broadcast", current_stats.tx_broadcast), VF_STAT("tx_broadcast", current_stats.tx_broadcast),
IAVF_STAT("tx_discards", current_stats.tx_discards), VF_STAT("tx_discards", current_stats.tx_discards),
IAVF_STAT("tx_errors", current_stats.tx_errors), VF_STAT("tx_errors", current_stats.tx_errors),
}; };
#define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats) #define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats)
#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats) #define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
/* For now we have one and only one private flag and it is only defined /* For now we have one and only one private flag and it is only defined
* when we have support for the SKIP_CPU_SYNC DMA attribute. Instead * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
...@@ -349,11 +343,11 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, ...@@ -349,11 +343,11 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adapter *adapter = netdev_priv(netdev);
unsigned int i; unsigned int i;
i40e_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) { for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) {
struct i40e_ring *ring; struct iavf_ring *ring;
/* Avoid accessing un-allocated queues */ /* Avoid accessing un-allocated queues */
ring = (i < adapter->num_active_queues ? ring = (i < adapter->num_active_queues ?
...@@ -397,15 +391,15 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) ...@@ -397,15 +391,15 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
{ {
unsigned int i; unsigned int i;
i40e_add_stat_strings(&data, iavf_gstrings_stats); iavf_add_stat_strings(&data, iavf_gstrings_stats);
/* Queues are always allocated in pairs, so we just use num_tx_queues /* Queues are always allocated in pairs, so we just use num_tx_queues
* for both Tx and Rx queues. * for both Tx and Rx queues.
*/ */
for (i = 0; i < netdev->num_tx_queues; i++) { for (i = 0; i < netdev->num_tx_queues; i++) {
i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
"tx", i); "tx", i);
i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
"rx", i); "rx", i);
} }
} }
...@@ -437,7 +431,7 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data) ...@@ -437,7 +431,7 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
* @netdev: network interface device structure * @netdev: network interface device structure
* *
* The get string set count and the string set should be matched for each * The get string set count and the string set should be matched for each
* flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags
* array. * array.
* *
* Returns a u32 bitmap of flags. * Returns a u32 bitmap of flags.
...@@ -548,7 +542,7 @@ static void iavf_set_msglevel(struct net_device *netdev, u32 data) ...@@ -548,7 +542,7 @@ static void iavf_set_msglevel(struct net_device *netdev, u32 data)
{ {
struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adapter *adapter = netdev_priv(netdev);
if (I40E_DEBUG_USER & data) if (IAVF_DEBUG_USER & data)
adapter->hw.debug_mask = data; adapter->hw.debug_mask = data;
adapter->msg_enable = data; adapter->msg_enable = data;
} }
...@@ -648,8 +642,8 @@ static int __iavf_get_coalesce(struct net_device *netdev, ...@@ -648,8 +642,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue) struct ethtool_coalesce *ec, int queue)
{ {
struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi; struct iavf_vsi *vsi = &adapter->vsi;
struct i40e_ring *rx_ring, *tx_ring; struct iavf_ring *rx_ring, *tx_ring;
ec->tx_max_coalesced_frames = vsi->work_limit; ec->tx_max_coalesced_frames = vsi->work_limit;
ec->rx_max_coalesced_frames = vsi->work_limit; ec->rx_max_coalesced_frames = vsi->work_limit;
...@@ -671,8 +665,8 @@ static int __iavf_get_coalesce(struct net_device *netdev, ...@@ -671,8 +665,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,
if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
ec->use_adaptive_tx_coalesce = 1; ec->use_adaptive_tx_coalesce = 1;
ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC; ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
return 0; return 0;
} }
...@@ -718,20 +712,20 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, ...@@ -718,20 +712,20 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
struct ethtool_coalesce *ec, int queue) struct ethtool_coalesce *ec, int queue)
{ {
struct i40e_ring *rx_ring = &adapter->rx_rings[queue]; struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
struct i40e_ring *tx_ring = &adapter->tx_rings[queue]; struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
struct i40e_q_vector *q_vector; struct iavf_q_vector *q_vector;
rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
rx_ring->itr_setting |= I40E_ITR_DYNAMIC; rx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
if (!ec->use_adaptive_rx_coalesce) if (!ec->use_adaptive_rx_coalesce)
rx_ring->itr_setting ^= I40E_ITR_DYNAMIC; rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
tx_ring->itr_setting |= I40E_ITR_DYNAMIC; tx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
if (!ec->use_adaptive_tx_coalesce) if (!ec->use_adaptive_tx_coalesce)
tx_ring->itr_setting ^= I40E_ITR_DYNAMIC; tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
q_vector = rx_ring->q_vector; q_vector = rx_ring->q_vector;
q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
...@@ -757,7 +751,7 @@ static int __iavf_set_coalesce(struct net_device *netdev, ...@@ -757,7 +751,7 @@ static int __iavf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue) struct ethtool_coalesce *ec, int queue)
{ {
struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi; struct iavf_vsi *vsi = &adapter->vsi;
int i; int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
...@@ -766,15 +760,15 @@ static int __iavf_set_coalesce(struct net_device *netdev, ...@@ -766,15 +760,15 @@ static int __iavf_set_coalesce(struct net_device *netdev,
if (ec->rx_coalesce_usecs == 0) { if (ec->rx_coalesce_usecs == 0) {
if (ec->use_adaptive_rx_coalesce) if (ec->use_adaptive_rx_coalesce)
netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
} else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) || } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
(ec->rx_coalesce_usecs > I40E_MAX_ITR)) { (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL; return -EINVAL;
} else if (ec->tx_coalesce_usecs == 0) { } else if (ec->tx_coalesce_usecs == 0) {
if (ec->use_adaptive_tx_coalesce) if (ec->use_adaptive_tx_coalesce)
netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
} else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) || } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
(ec->tx_coalesce_usecs > I40E_MAX_ITR)) { (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -67,7 +67,7 @@ static struct workqueue_struct *iavf_wq; ...@@ -67,7 +67,7 @@ static struct workqueue_struct *iavf_wq;
* @alignment: what to align the allocation to * @alignment: what to align the allocation to
**/ **/
iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
struct i40e_dma_mem *mem, struct iavf_dma_mem *mem,
u64 size, u32 alignment) u64 size, u32 alignment)
{ {
struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
...@@ -89,7 +89,7 @@ iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, ...@@ -89,7 +89,7 @@ iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @mem: ptr to mem struct to free * @mem: ptr to mem struct to free
**/ **/
iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct i40e_dma_mem *mem) iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)
{ {
struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
...@@ -107,7 +107,7 @@ iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct i40e_dma_mem *mem) ...@@ -107,7 +107,7 @@ iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct i40e_dma_mem *mem)
* @size: size of memory requested * @size: size of memory requested
**/ **/
iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
struct i40e_virt_mem *mem, u32 size) struct iavf_virt_mem *mem, u32 size)
{ {
if (!mem) if (!mem)
return I40E_ERR_PARAM; return I40E_ERR_PARAM;
...@@ -126,8 +126,7 @@ iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, ...@@ -126,8 +126,7 @@ iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @mem: ptr to mem struct to free * @mem: ptr to mem struct to free
**/ **/
iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem)
struct i40e_virt_mem *mem)
{ {
if (!mem) if (!mem)
return I40E_ERR_PARAM; return I40E_ERR_PARAM;
...@@ -300,7 +299,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data) ...@@ -300,7 +299,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
**/ **/
static irqreturn_t iavf_msix_clean_rings(int irq, void *data) static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
{ {
struct i40e_q_vector *q_vector = data; struct iavf_q_vector *q_vector = data;
if (!q_vector->tx.ring && !q_vector->rx.ring) if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -319,8 +318,8 @@ static irqreturn_t iavf_msix_clean_rings(int irq, void *data) ...@@ -319,8 +318,8 @@ static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
static void static void
iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
{ {
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx]; struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
struct iavf_hw *hw = &adapter->hw; struct iavf_hw *hw = &adapter->hw;
rx_ring->q_vector = q_vector; rx_ring->q_vector = q_vector;
...@@ -331,7 +330,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) ...@@ -331,7 +330,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.next_update = jiffies + 1; q_vector->rx.next_update = jiffies + 1;
q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
q_vector->ring_mask |= BIT(r_idx); q_vector->ring_mask |= BIT(r_idx);
wr32(hw, IAVF_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx), wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
q_vector->rx.current_itr); q_vector->rx.current_itr);
q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->rx.current_itr = q_vector->rx.target_itr;
} }
...@@ -345,8 +344,8 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) ...@@ -345,8 +344,8 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
static void static void
iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
{ {
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx]; struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
struct iavf_hw *hw = &adapter->hw; struct iavf_hw *hw = &adapter->hw;
tx_ring->q_vector = q_vector; tx_ring->q_vector = q_vector;
...@@ -357,7 +356,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) ...@@ -357,7 +356,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.next_update = jiffies + 1; q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
q_vector->num_ringpairs++; q_vector->num_ringpairs++;
wr32(hw, IAVF_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx), wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
q_vector->tx.target_itr); q_vector->tx.target_itr);
q_vector->tx.current_itr = q_vector->tx.target_itr; q_vector->tx.current_itr = q_vector->tx.target_itr;
} }
...@@ -409,7 +408,7 @@ static void iavf_netpoll(struct net_device *netdev) ...@@ -409,7 +408,7 @@ static void iavf_netpoll(struct net_device *netdev)
int i; int i;
/* if interface is down do nothing */ /* if interface is down do nothing */
if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state)) if (test_bit(__IAVF_VSI_DOWN, adapter->vsi.state))
return; return;
for (i = 0; i < q_vectors; i++) for (i = 0; i < q_vectors; i++)
...@@ -428,8 +427,8 @@ static void iavf_netpoll(struct net_device *netdev) ...@@ -428,8 +427,8 @@ static void iavf_netpoll(struct net_device *netdev)
static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask) const cpumask_t *mask)
{ {
struct i40e_q_vector *q_vector = struct iavf_q_vector *q_vector =
container_of(notify, struct i40e_q_vector, affinity_notify); container_of(notify, struct iavf_q_vector, affinity_notify);
cpumask_copy(&q_vector->affinity_mask, mask); cpumask_copy(&q_vector->affinity_mask, mask);
} }
...@@ -465,7 +464,7 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) ...@@ -465,7 +464,7 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
q_vectors = adapter->num_msix_vectors - NONQ_VECS; q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (vector = 0; vector < q_vectors; vector++) { for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = &adapter->q_vectors[vector]; struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
...@@ -609,7 +608,7 @@ static void iavf_configure_tx(struct iavf_adapter *adapter) ...@@ -609,7 +608,7 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
**/ **/
static void iavf_configure_rx(struct iavf_adapter *adapter) static void iavf_configure_rx(struct iavf_adapter *adapter)
{ {
unsigned int rx_buf_len = I40E_RXBUFFER_2048; unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
struct iavf_hw *hw = &adapter->hw; struct iavf_hw *hw = &adapter->hw;
int i; int i;
...@@ -622,15 +621,15 @@ static void iavf_configure_rx(struct iavf_adapter *adapter) ...@@ -622,15 +621,15 @@ static void iavf_configure_rx(struct iavf_adapter *adapter)
* an order 1 page, so we might as well increase the size * an order 1 page, so we might as well increase the size
* of our Rx buffer to make better use of the available space * of our Rx buffer to make better use of the available space
*/ */
rx_buf_len = I40E_RXBUFFER_3072; rx_buf_len = IAVF_RXBUFFER_3072;
/* We use a 1536 buffer size for configurations with /* We use a 1536 buffer size for configurations with
* standard Ethernet mtu. On x86 this gives us enough room * standard Ethernet mtu. On x86 this gives us enough room
* for shared info and 192 bytes of padding. * for shared info and 192 bytes of padding.
*/ */
if (!I40E_2K_TOO_SMALL_WITH_PADDING && if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
(netdev->mtu <= ETH_DATA_LEN)) (netdev->mtu <= ETH_DATA_LEN))
rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
} }
#endif #endif
...@@ -779,7 +778,7 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, ...@@ -779,7 +778,7 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
} }
/** /**
* i40e_add_filter - Add a mac filter to the filter list * iavf_add_filter - Add a mac filter to the filter list
* @adapter: board private structure * @adapter: board private structure
* @macaddr: the MAC address * @macaddr: the MAC address
* *
...@@ -937,7 +936,7 @@ static void iavf_set_rx_mode(struct net_device *netdev) ...@@ -937,7 +936,7 @@ static void iavf_set_rx_mode(struct net_device *netdev)
static void iavf_napi_enable_all(struct iavf_adapter *adapter) static void iavf_napi_enable_all(struct iavf_adapter *adapter)
{ {
int q_idx; int q_idx;
struct i40e_q_vector *q_vector; struct iavf_q_vector *q_vector;
int q_vectors = adapter->num_msix_vectors - NONQ_VECS; int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) { for (q_idx = 0; q_idx < q_vectors; q_idx++) {
...@@ -956,7 +955,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter) ...@@ -956,7 +955,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter)
static void iavf_napi_disable_all(struct iavf_adapter *adapter) static void iavf_napi_disable_all(struct iavf_adapter *adapter)
{ {
int q_idx; int q_idx;
struct i40e_q_vector *q_vector; struct iavf_q_vector *q_vector;
int q_vectors = adapter->num_msix_vectors - NONQ_VECS; int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) { for (q_idx = 0; q_idx < q_vectors; q_idx++) {
...@@ -981,9 +980,9 @@ static void iavf_configure(struct iavf_adapter *adapter) ...@@ -981,9 +980,9 @@ static void iavf_configure(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
for (i = 0; i < adapter->num_active_queues; i++) { for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = &adapter->rx_rings[i]; struct iavf_ring *ring = &adapter->rx_rings[i];
iavf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
} }
} }
...@@ -996,7 +995,7 @@ static void iavf_configure(struct iavf_adapter *adapter) ...@@ -996,7 +995,7 @@ static void iavf_configure(struct iavf_adapter *adapter)
static void iavf_up_complete(struct iavf_adapter *adapter) static void iavf_up_complete(struct iavf_adapter *adapter)
{ {
adapter->state = __IAVF_RUNNING; adapter->state = __IAVF_RUNNING;
clear_bit(__I40E_VSI_DOWN, adapter->vsi.state); clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
iavf_napi_enable_all(adapter); iavf_napi_enable_all(adapter);
...@@ -1007,7 +1006,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter) ...@@ -1007,7 +1006,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
} }
/** /**
* i40e_down - Shutdown the connection processing * iavf_down - Shutdown the connection processing
* @adapter: board private structure * @adapter: board private structure
* *
* Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
...@@ -1159,17 +1158,17 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) ...@@ -1159,17 +1158,17 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)
adapter->tx_rings = kcalloc(num_active_queues, adapter->tx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL); sizeof(struct iavf_ring), GFP_KERNEL);
if (!adapter->tx_rings) if (!adapter->tx_rings)
goto err_out; goto err_out;
adapter->rx_rings = kcalloc(num_active_queues, adapter->rx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL); sizeof(struct iavf_ring), GFP_KERNEL);
if (!adapter->rx_rings) if (!adapter->rx_rings)
goto err_out; goto err_out;
for (i = 0; i < num_active_queues; i++) { for (i = 0; i < num_active_queues; i++) {
struct i40e_ring *tx_ring; struct iavf_ring *tx_ring;
struct i40e_ring *rx_ring; struct iavf_ring *rx_ring;
tx_ring = &adapter->tx_rings[i]; tx_ring = &adapter->tx_rings[i];
...@@ -1177,16 +1176,16 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) ...@@ -1177,16 +1176,16 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)
tx_ring->netdev = adapter->netdev; tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev; tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count; tx_ring->count = adapter->tx_desc_count;
tx_ring->itr_setting = I40E_ITR_TX_DEF; tx_ring->itr_setting = IAVF_ITR_TX_DEF;
if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
rx_ring = &adapter->rx_rings[i]; rx_ring = &adapter->rx_rings[i];
rx_ring->queue_index = i; rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev; rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev; rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count; rx_ring->count = adapter->rx_desc_count;
rx_ring->itr_setting = I40E_ITR_RX_DEF; rx_ring->itr_setting = IAVF_ITR_RX_DEF;
} }
adapter->num_active_queues = num_active_queues; adapter->num_active_queues = num_active_queues;
...@@ -1244,7 +1243,7 @@ static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) ...@@ -1244,7 +1243,7 @@ static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
} }
/** /**
* i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
* @adapter: board private structure * @adapter: board private structure
* *
* Return 0 on success, negative on failure * Return 0 on success, negative on failure
...@@ -1356,9 +1355,9 @@ static int iavf_init_rss(struct iavf_adapter *adapter) ...@@ -1356,9 +1355,9 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
if (adapter->vf_res->vf_cap_flags & if (adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
else else
adapter->hena = I40E_DEFAULT_RSS_HENA; adapter->hena = IAVF_DEFAULT_RSS_HENA;
wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
...@@ -1381,7 +1380,7 @@ static int iavf_init_rss(struct iavf_adapter *adapter) ...@@ -1381,7 +1380,7 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
{ {
int q_idx = 0, num_q_vectors; int q_idx = 0, num_q_vectors;
struct i40e_q_vector *q_vector; struct iavf_q_vector *q_vector;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
...@@ -1423,7 +1422,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter) ...@@ -1423,7 +1422,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
napi_vectors = adapter->num_active_queues; napi_vectors = adapter->num_active_queues;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx]; struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
if (q_idx < napi_vectors) if (q_idx < napi_vectors)
netif_napi_del(&q_vector->napi); netif_napi_del(&q_vector->napi);
...@@ -1543,7 +1542,7 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) ...@@ -1543,7 +1542,7 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
if (err) if (err)
goto err; goto err;
set_bit(__I40E_VSI_DOWN, adapter->vsi.state); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
iavf_map_rings_to_vectors(adapter); iavf_map_rings_to_vectors(adapter);
...@@ -1783,7 +1782,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) ...@@ -1783,7 +1782,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
* tasks have finished, since we're not holding the rtnl_lock here. * tasks have finished, since we're not holding the rtnl_lock here.
*/ */
if (adapter->state == __IAVF_RUNNING) { if (adapter->state == __IAVF_RUNNING) {
set_bit(__I40E_VSI_DOWN, adapter->vsi.state); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev); netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev); netif_tx_disable(adapter->netdev);
adapter->link_up = false; adapter->link_up = false;
...@@ -3056,7 +3055,7 @@ static int iavf_close(struct net_device *netdev) ...@@ -3056,7 +3055,7 @@ static int iavf_close(struct net_device *netdev)
&adapter->crit_section)) &adapter->crit_section))
usleep_range(500, 1000); usleep_range(500, 1000);
set_bit(__I40E_VSI_DOWN, adapter->vsi.state); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter)) if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
...@@ -3108,7 +3107,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3108,7 +3107,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
} }
/** /**
* i40e_set_features - set the netdev feature flags * iavf_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted * @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting * @features: the feature set that the stack is suggesting
* Note: expects to be called while under rtnl_lock() * Note: expects to be called while under rtnl_lock()
...@@ -3268,7 +3267,7 @@ int iavf_process_config(struct iavf_adapter *adapter) ...@@ -3268,7 +3267,7 @@ int iavf_process_config(struct iavf_adapter *adapter)
struct virtchnl_vf_resource *vfres = adapter->vf_res; struct virtchnl_vf_resource *vfres = adapter->vf_res;
int i, num_req_queues = adapter->num_req_queues; int i, num_req_queues = adapter->num_req_queues;
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct i40e_vsi *vsi = &adapter->vsi; struct iavf_vsi *vsi = &adapter->vsi;
netdev_features_t hw_enc_features; netdev_features_t hw_enc_features;
netdev_features_t hw_features; netdev_features_t hw_features;
...@@ -3381,7 +3380,7 @@ int iavf_process_config(struct iavf_adapter *adapter) ...@@ -3381,7 +3380,7 @@ int iavf_process_config(struct iavf_adapter *adapter)
adapter->vsi.back = adapter; adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1; adapter->vsi.base_vector = 1;
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
vsi->netdev = adapter->netdev; vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle; vsi->qs_handle = adapter->vsi_res->qset_handle;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
...@@ -3422,7 +3421,7 @@ static void iavf_init_task(struct work_struct *work) ...@@ -3422,7 +3421,7 @@ static void iavf_init_task(struct work_struct *work)
/* driver loaded, probe complete */ /* driver loaded, probe complete */
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
adapter->flags &= ~IAVF_FLAG_RESET_PENDING; adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
err = i40e_set_mac_type(hw); err = iavf_set_mac_type(hw);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
err); err);
...@@ -3486,7 +3485,7 @@ static void iavf_init_task(struct work_struct *work) ...@@ -3486,7 +3485,7 @@ static void iavf_init_task(struct work_struct *work)
/* aq msg sent, awaiting reply */ /* aq msg sent, awaiting reply */
if (!adapter->vf_res) { if (!adapter->vf_res) {
bufsz = sizeof(struct virtchnl_vf_resource) + bufsz = sizeof(struct virtchnl_vf_resource) +
(I40E_MAX_VF_VSI * (IAVF_MAX_VF_VSI *
sizeof(struct virtchnl_vsi_resource)); sizeof(struct virtchnl_vsi_resource));
adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
if (!adapter->vf_res) if (!adapter->vf_res)
...@@ -3528,7 +3527,7 @@ static void iavf_init_task(struct work_struct *work) ...@@ -3528,7 +3527,7 @@ static void iavf_init_task(struct work_struct *work)
/* MTU range: 68 - 9710 */ /* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU; netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) { if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
...@@ -3582,7 +3581,7 @@ static void iavf_init_task(struct work_struct *work) ...@@ -3582,7 +3581,7 @@ static void iavf_init_task(struct work_struct *work)
dev_info(&pdev->dev, "GRO is enabled\n"); dev_info(&pdev->dev, "GRO is enabled\n");
adapter->state = __IAVF_DOWN; adapter->state = __IAVF_DOWN;
set_bit(__I40E_VSI_DOWN, adapter->vsi.state); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
iavf_misc_irq_enable(adapter); iavf_misc_irq_enable(adapter);
wake_up(&adapter->down_waitqueue); wake_up(&adapter->down_waitqueue);
...@@ -3968,9 +3967,9 @@ static struct pci_driver iavf_driver = { ...@@ -3968,9 +3967,9 @@ static struct pci_driver iavf_driver = {
}; };
/** /**
* i40e_init_module - Driver Registration Routine * iavf_init_module - Driver Registration Routine
* *
* i40e_init_module is the first routine called when the driver is * iavf_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem. * loaded. All it does is register with the PCI subsystem.
**/ **/
static int __init iavf_init_module(void) static int __init iavf_init_module(void)
...@@ -3995,9 +3994,9 @@ static int __init iavf_init_module(void) ...@@ -3995,9 +3994,9 @@ static int __init iavf_init_module(void)
module_init(iavf_init_module); module_init(iavf_init_module);
/** /**
* i40e_exit_module - Driver Exit Cleanup Routine * iavf_exit_module - Driver Exit Cleanup Routine
* *
* i40e_exit_module is called just before the driver is removed * iavf_exit_module is called just before the driver is removed
* from memory. * from memory.
**/ **/
static void __exit iavf_exit_module(void) static void __exit iavf_exit_module(void)
......
...@@ -59,9 +59,9 @@ ...@@ -59,9 +59,9 @@
DECLARE_EVENT_CLASS( DECLARE_EVENT_CLASS(
iavf_tx_template, iavf_tx_template,
TP_PROTO(struct i40e_ring *ring, TP_PROTO(struct iavf_ring *ring,
struct i40e_tx_desc *desc, struct iavf_tx_desc *desc,
struct i40e_tx_buffer *buf), struct iavf_tx_buffer *buf),
TP_ARGS(ring, desc, buf), TP_ARGS(ring, desc, buf),
...@@ -94,25 +94,25 @@ DECLARE_EVENT_CLASS( ...@@ -94,25 +94,25 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT( DEFINE_EVENT(
iavf_tx_template, iavf_clean_tx_irq, iavf_tx_template, iavf_clean_tx_irq,
TP_PROTO(struct i40e_ring *ring, TP_PROTO(struct iavf_ring *ring,
struct i40e_tx_desc *desc, struct iavf_tx_desc *desc,
struct i40e_tx_buffer *buf), struct iavf_tx_buffer *buf),
TP_ARGS(ring, desc, buf)); TP_ARGS(ring, desc, buf));
DEFINE_EVENT( DEFINE_EVENT(
iavf_tx_template, iavf_clean_tx_irq_unmap, iavf_tx_template, iavf_clean_tx_irq_unmap,
TP_PROTO(struct i40e_ring *ring, TP_PROTO(struct iavf_ring *ring,
struct i40e_tx_desc *desc, struct iavf_tx_desc *desc,
struct i40e_tx_buffer *buf), struct iavf_tx_buffer *buf),
TP_ARGS(ring, desc, buf)); TP_ARGS(ring, desc, buf));
DECLARE_EVENT_CLASS( DECLARE_EVENT_CLASS(
iavf_rx_template, iavf_rx_template,
TP_PROTO(struct i40e_ring *ring, TP_PROTO(struct iavf_ring *ring,
union i40e_32byte_rx_desc *desc, union iavf_32byte_rx_desc *desc,
struct sk_buff *skb), struct sk_buff *skb),
TP_ARGS(ring, desc, skb), TP_ARGS(ring, desc, skb),
...@@ -139,16 +139,16 @@ DECLARE_EVENT_CLASS( ...@@ -139,16 +139,16 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT( DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq, iavf_rx_template, iavf_clean_rx_irq,
TP_PROTO(struct i40e_ring *ring, TP_PROTO(struct iavf_ring *ring,
union i40e_32byte_rx_desc *desc, union iavf_32byte_rx_desc *desc,
struct sk_buff *skb), struct sk_buff *skb),
TP_ARGS(ring, desc, skb)); TP_ARGS(ring, desc, skb));
DEFINE_EVENT( DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_rx, iavf_rx_template, iavf_clean_rx_irq_rx,
TP_PROTO(struct i40e_ring *ring, TP_PROTO(struct iavf_ring *ring,
union i40e_32byte_rx_desc *desc, union iavf_32byte_rx_desc *desc,
struct sk_buff *skb), struct sk_buff *skb),
TP_ARGS(ring, desc, skb)); TP_ARGS(ring, desc, skb));
...@@ -157,7 +157,7 @@ DECLARE_EVENT_CLASS( ...@@ -157,7 +157,7 @@ DECLARE_EVENT_CLASS(
iavf_xmit_template, iavf_xmit_template,
TP_PROTO(struct sk_buff *skb, TP_PROTO(struct sk_buff *skb,
struct i40e_ring *ring), struct iavf_ring *ring),
TP_ARGS(skb, ring), TP_ARGS(skb, ring),
...@@ -182,14 +182,14 @@ DECLARE_EVENT_CLASS( ...@@ -182,14 +182,14 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT( DEFINE_EVENT(
iavf_xmit_template, iavf_xmit_frame_ring, iavf_xmit_template, iavf_xmit_frame_ring,
TP_PROTO(struct sk_buff *skb, TP_PROTO(struct sk_buff *skb,
struct i40e_ring *ring), struct iavf_ring *ring),
TP_ARGS(skb, ring)); TP_ARGS(skb, ring));
DEFINE_EVENT( DEFINE_EVENT(
iavf_xmit_template, iavf_xmit_frame_ring_drop, iavf_xmit_template, iavf_xmit_frame_ring_drop,
TP_PROTO(struct sk_buff *skb, TP_PROTO(struct sk_buff *skb,
struct i40e_ring *ring), struct iavf_ring *ring),
TP_ARGS(skb, ring)); TP_ARGS(skb, ring));
......
...@@ -12,24 +12,24 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, ...@@ -12,24 +12,24 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag) u32 td_tag)
{ {
return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA | return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
} }
#define I40E_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS) #define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
/** /**
* i40e_unmap_and_free_tx_resource - Release a Tx buffer * iavf_unmap_and_free_tx_resource - Release a Tx buffer
* @ring: the ring that owns the buffer * @ring: the ring that owns the buffer
* @tx_buffer: the buffer to free * @tx_buffer: the buffer to free
**/ **/
static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
struct i40e_tx_buffer *tx_buffer) struct iavf_tx_buffer *tx_buffer)
{ {
if (tx_buffer->skb) { if (tx_buffer->skb) {
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf); kfree(tx_buffer->raw_buf);
else else
dev_kfree_skb_any(tx_buffer->skb); dev_kfree_skb_any(tx_buffer->skb);
...@@ -55,7 +55,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, ...@@ -55,7 +55,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
* iavf_clean_tx_ring - Free any empty Tx buffers * iavf_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned * @tx_ring: ring to be cleaned
**/ **/
void iavf_clean_tx_ring(struct i40e_ring *tx_ring) void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
{ {
unsigned long bi_size; unsigned long bi_size;
u16 i; u16 i;
...@@ -66,9 +66,9 @@ void iavf_clean_tx_ring(struct i40e_ring *tx_ring) ...@@ -66,9 +66,9 @@ void iavf_clean_tx_ring(struct i40e_ring *tx_ring)
/* Free all the Tx ring sk_buffs */ /* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) for (i = 0; i < tx_ring->count; i++)
i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_bi, 0, bi_size); memset(tx_ring->tx_bi, 0, bi_size);
/* Zero out the descriptor ring */ /* Zero out the descriptor ring */
...@@ -90,7 +90,7 @@ void iavf_clean_tx_ring(struct i40e_ring *tx_ring) ...@@ -90,7 +90,7 @@ void iavf_clean_tx_ring(struct i40e_ring *tx_ring)
* *
* Free all transmit software resources * Free all transmit software resources
**/ **/
void iavf_free_tx_resources(struct i40e_ring *tx_ring) void iavf_free_tx_resources(struct iavf_ring *tx_ring)
{ {
iavf_clean_tx_ring(tx_ring); iavf_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi); kfree(tx_ring->tx_bi);
...@@ -111,7 +111,7 @@ void iavf_free_tx_resources(struct i40e_ring *tx_ring) ...@@ -111,7 +111,7 @@ void iavf_free_tx_resources(struct i40e_ring *tx_ring)
* Since there is no access to the ring head register * Since there is no access to the ring head register
* in XL710, we need to use our local copies * in XL710, we need to use our local copies
**/ **/
u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw) u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{ {
u32 head, tail; u32 head, tail;
...@@ -132,9 +132,9 @@ u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw) ...@@ -132,9 +132,9 @@ u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
* VSI has netdev and netdev has TX queues. This function is to check each of * VSI has netdev and netdev has TX queues. This function is to check each of
* those TX queues if they are hung, trigger recovery by issuing SW interrupt. * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
**/ **/
void iavf_detect_recover_hung(struct i40e_vsi *vsi) void iavf_detect_recover_hung(struct iavf_vsi *vsi)
{ {
struct i40e_ring *tx_ring = NULL; struct iavf_ring *tx_ring = NULL;
struct net_device *netdev; struct net_device *netdev;
unsigned int i; unsigned int i;
int packets; int packets;
...@@ -142,7 +142,7 @@ void iavf_detect_recover_hung(struct i40e_vsi *vsi) ...@@ -142,7 +142,7 @@ void iavf_detect_recover_hung(struct i40e_vsi *vsi)
if (!vsi) if (!vsi)
return; return;
if (test_bit(__I40E_VSI_DOWN, vsi->state)) if (test_bit(__IAVF_VSI_DOWN, vsi->state))
return; return;
netdev = vsi->netdev; netdev = vsi->netdev;
...@@ -181,19 +181,19 @@ void iavf_detect_recover_hung(struct i40e_vsi *vsi) ...@@ -181,19 +181,19 @@ void iavf_detect_recover_hung(struct i40e_vsi *vsi)
#define WB_STRIDE 4 #define WB_STRIDE 4
/** /**
* i40e_clean_tx_irq - Reclaim resources after transmit completes * iavf_clean_tx_irq - Reclaim resources after transmit completes
* @vsi: the VSI we care about * @vsi: the VSI we care about
* @tx_ring: Tx ring to clean * @tx_ring: Tx ring to clean
* @napi_budget: Used to determine if we are in netpoll * @napi_budget: Used to determine if we are in netpoll
* *
* Returns true if there's any budget left (e.g. the clean is finished) * Returns true if there's any budget left (e.g. the clean is finished)
**/ **/
static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
struct i40e_ring *tx_ring, int napi_budget) struct iavf_ring *tx_ring, int napi_budget)
{ {
u16 i = tx_ring->next_to_clean; u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf; struct iavf_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_desc; struct iavf_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = vsi->work_limit; unsigned int budget = vsi->work_limit;
...@@ -202,7 +202,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -202,7 +202,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
i -= tx_ring->count; i -= tx_ring->count;
do { do {
struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
/* if next_to_watch is not set then there is no work pending */ /* if next_to_watch is not set then there is no work pending */
if (!eop_desc) if (!eop_desc)
...@@ -286,7 +286,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -286,7 +286,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets; tx_ring->q_vector->tx.total_packets += total_packets;
if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
/* check to see if there are < 4 descriptors /* check to see if there are < 4 descriptors
* waiting to be written back, then kick the hardware to force * waiting to be written back, then kick the hardware to force
* them to be written back in case we stay in NAPI. * them to be written back in case we stay in NAPI.
...@@ -296,8 +296,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -296,8 +296,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (budget && if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) && ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_VSI_DOWN, vsi->state) && !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true; tx_ring->arm_wb = true;
} }
...@@ -307,14 +307,14 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -307,14 +307,14 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this /* Make sure that anybody stopping the queue after this
* sees the new next_to_clean. * sees the new next_to_clean.
*/ */
smp_mb(); smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev, if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) && tx_ring->queue_index) &&
!test_bit(__I40E_VSI_DOWN, vsi->state)) { !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev, netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index); tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue; ++tx_ring->tx_stats.restart_queue;
...@@ -330,13 +330,13 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -330,13 +330,13 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
* @q_vector: the vector on which to enable writeback * @q_vector: the vector on which to enable writeback
* *
**/ **/
static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
struct i40e_q_vector *q_vector) struct iavf_q_vector *q_vector)
{ {
u16 flags = q_vector->tx.ring[0].flags; u16 flags = q_vector->tx.ring[0].flags;
u32 val; u32 val;
if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
return; return;
if (q_vector->arm_wb_state) if (q_vector->arm_wb_state)
...@@ -356,7 +356,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, ...@@ -356,7 +356,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
* @q_vector: the vector on which to force writeback * @q_vector: the vector on which to force writeback
* *
**/ **/
void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
{ {
u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
...@@ -369,31 +369,31 @@ void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) ...@@ -369,31 +369,31 @@ void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
val); val);
} }
static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
struct i40e_ring_container *rc) struct iavf_ring_container *rc)
{ {
return &q_vector->rx == rc; return &q_vector->rx == rc;
} }
static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
{ {
unsigned int divisor; unsigned int divisor;
switch (q_vector->adapter->link_speed) { switch (q_vector->adapter->link_speed) {
case I40E_LINK_SPEED_40GB: case I40E_LINK_SPEED_40GB:
divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
break; break;
case I40E_LINK_SPEED_25GB: case I40E_LINK_SPEED_25GB:
case I40E_LINK_SPEED_20GB: case I40E_LINK_SPEED_20GB:
divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
break; break;
default: default:
case I40E_LINK_SPEED_10GB: case I40E_LINK_SPEED_10GB:
divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
break; break;
case I40E_LINK_SPEED_1GB: case I40E_LINK_SPEED_1GB:
case I40E_LINK_SPEED_100MB: case I40E_LINK_SPEED_100MB:
divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
break; break;
} }
...@@ -401,7 +401,7 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) ...@@ -401,7 +401,7 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
} }
/** /**
* i40e_update_itr - update the dynamic ITR value based on statistics * iavf_update_itr - update the dynamic ITR value based on statistics
* @q_vector: structure containing interrupt and ring information * @q_vector: structure containing interrupt and ring information
* @rc: structure containing ring performance data * @rc: structure containing ring performance data
* *
...@@ -413,8 +413,8 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) ...@@ -413,8 +413,8 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
* on testing data as well as attempting to minimize response time * on testing data as well as attempting to minimize response time
* while increasing bulk throughput. * while increasing bulk throughput.
**/ **/
static void i40e_update_itr(struct i40e_q_vector *q_vector, static void iavf_update_itr(struct iavf_q_vector *q_vector,
struct i40e_ring_container *rc) struct iavf_ring_container *rc)
{ {
unsigned int avg_wire_size, packets, bytes, itr; unsigned int avg_wire_size, packets, bytes, itr;
unsigned long next_update = jiffies; unsigned long next_update = jiffies;
...@@ -428,9 +428,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, ...@@ -428,9 +428,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
/* For Rx we want to push the delay up and default to low latency. /* For Rx we want to push the delay up and default to low latency.
* for Tx we want to pull the delay down and default to high latency. * for Tx we want to pull the delay down and default to high latency.
*/ */
itr = i40e_container_is_rx(q_vector, rc) ? itr = iavf_container_is_rx(q_vector, rc) ?
I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
/* If we didn't update within up to 1 - 2 jiffies we can assume /* If we didn't update within up to 1 - 2 jiffies we can assume
* that either packets are coming in so slow there hasn't been * that either packets are coming in so slow there hasn't been
...@@ -454,15 +454,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, ...@@ -454,15 +454,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
packets = rc->total_packets; packets = rc->total_packets;
bytes = rc->total_bytes; bytes = rc->total_bytes;
if (i40e_container_is_rx(q_vector, rc)) { if (iavf_container_is_rx(q_vector, rc)) {
/* If Rx there are 1 to 4 packets and bytes are less than /* If Rx there are 1 to 4 packets and bytes are less than
* 9000 assume insufficient data to use bulk rate limiting * 9000 assume insufficient data to use bulk rate limiting
* approach unless Tx is already in bulk rate limiting. We * approach unless Tx is already in bulk rate limiting. We
* are likely latency driven. * are likely latency driven.
*/ */
if (packets && packets < 4 && bytes < 9000 && if (packets && packets < 4 && bytes < 9000 &&
(q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
itr = I40E_ITR_ADAPTIVE_LATENCY; itr = IAVF_ITR_ADAPTIVE_LATENCY;
goto adjust_by_size; goto adjust_by_size;
} }
} else if (packets < 4) { } else if (packets < 4) {
...@@ -471,15 +471,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, ...@@ -471,15 +471,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* reset the ITR_ADAPTIVE_LATENCY bit for latency mode so * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
* that the Rx can relax. * that the Rx can relax.
*/ */
if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
(q_vector->rx.target_itr & I40E_ITR_MASK) == (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
I40E_ITR_ADAPTIVE_MAX_USECS) IAVF_ITR_ADAPTIVE_MAX_USECS)
goto clear_counts; goto clear_counts;
} else if (packets > 32) { } else if (packets > 32) {
/* If we have processed over 32 packets in a single interrupt /* If we have processed over 32 packets in a single interrupt
* for Tx assume we need to switch over to "bulk" mode. * for Tx assume we need to switch over to "bulk" mode.
*/ */
rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
} }
/* We have no packets to actually measure against. This means /* We have no packets to actually measure against. This means
...@@ -491,17 +491,17 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, ...@@ -491,17 +491,17 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* fixed amount. * fixed amount.
*/ */
if (packets < 56) { if (packets < 56) {
itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
itr &= I40E_ITR_ADAPTIVE_LATENCY; itr &= IAVF_ITR_ADAPTIVE_LATENCY;
itr += I40E_ITR_ADAPTIVE_MAX_USECS; itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
} }
goto clear_counts; goto clear_counts;
} }
if (packets <= 256) { if (packets <= 256) {
itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
itr &= I40E_ITR_MASK; itr &= IAVF_ITR_MASK;
/* Between 56 and 112 is our "goldilocks" zone where we are /* Between 56 and 112 is our "goldilocks" zone where we are
* working out "just right". Just report that our current * working out "just right". Just report that our current
...@@ -516,9 +516,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, ...@@ -516,9 +516,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* in half per interrupt. * in half per interrupt.
*/ */
itr /= 2; itr /= 2;
itr &= I40E_ITR_MASK; itr &= IAVF_ITR_MASK;
if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
itr = I40E_ITR_ADAPTIVE_MIN_USECS; itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
goto clear_counts; goto clear_counts;
} }
...@@ -529,7 +529,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, ...@@ -529,7 +529,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* though for smaller packet sizes there isn't much we can do as * though for smaller packet sizes there isn't much we can do as
* NAPI polling will likely be kicking in sooner rather than later. * NAPI polling will likely be kicking in sooner rather than later.
*/ */
itr = I40E_ITR_ADAPTIVE_BULK; itr = IAVF_ITR_ADAPTIVE_BULK;
adjust_by_size: adjust_by_size:
/* If packet counts are 256 or greater we can assume we have a gross /* If packet counts are 256 or greater we can assume we have a gross
...@@ -577,7 +577,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, ...@@ -577,7 +577,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
/* If we are in low latency mode halve our delay which doubles the /* If we are in low latency mode halve our delay which doubles the
* rate to somewhere between 100K to 16K ints/sec * rate to somewhere between 100K to 16K ints/sec
*/ */
if (itr & I40E_ITR_ADAPTIVE_LATENCY) if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
avg_wire_size /= 2; avg_wire_size /= 2;
/* Resultant value is 256 times larger than it needs to be. This /* Resultant value is 256 times larger than it needs to be. This
...@@ -587,12 +587,12 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, ...@@ -587,12 +587,12 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* Use addition as we have already recorded the new latency flag * Use addition as we have already recorded the new latency flag
* for the ITR value. * for the ITR value.
*/ */
itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
I40E_ITR_ADAPTIVE_MIN_INC; IAVF_ITR_ADAPTIVE_MIN_INC;
if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
itr &= I40E_ITR_ADAPTIVE_LATENCY; itr &= IAVF_ITR_ADAPTIVE_LATENCY;
itr += I40E_ITR_ADAPTIVE_MAX_USECS; itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
} }
clear_counts: clear_counts:
...@@ -612,7 +612,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, ...@@ -612,7 +612,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* *
* Return 0 on success, negative on error * Return 0 on success, negative on error
**/ **/
int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring) int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
{ {
struct device *dev = tx_ring->dev; struct device *dev = tx_ring->dev;
int bi_size; int bi_size;
...@@ -622,13 +622,13 @@ int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring) ...@@ -622,13 +622,13 @@ int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* warn if we are about to overwrite the pointer */ /* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_bi); WARN_ON(tx_ring->tx_bi);
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!tx_ring->tx_bi) if (!tx_ring->tx_bi)
goto err; goto err;
/* round up to nearest 4K */ /* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL); &tx_ring->dma, GFP_KERNEL);
...@@ -653,7 +653,7 @@ int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring) ...@@ -653,7 +653,7 @@ int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring)
* iavf_clean_rx_ring - Free Rx buffers * iavf_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned * @rx_ring: ring to be cleaned
**/ **/
void iavf_clean_rx_ring(struct i40e_ring *rx_ring) void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
{ {
unsigned long bi_size; unsigned long bi_size;
u16 i; u16 i;
...@@ -669,7 +669,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -669,7 +669,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)
/* Free all the Rx ring sk_buffs */ /* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) { for (i = 0; i < rx_ring->count; i++) {
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
if (!rx_bi->page) if (!rx_bi->page)
continue; continue;
...@@ -685,9 +685,9 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -685,9 +685,9 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)
/* free resources associated with mapping */ /* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
i40e_rx_pg_size(rx_ring), iavf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
I40E_RX_DMA_ATTR); IAVF_RX_DMA_ATTR);
__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
...@@ -695,7 +695,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -695,7 +695,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)
rx_bi->page_offset = 0; rx_bi->page_offset = 0;
} }
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_bi, 0, bi_size); memset(rx_ring->rx_bi, 0, bi_size);
/* Zero out the descriptor ring */ /* Zero out the descriptor ring */
...@@ -712,7 +712,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -712,7 +712,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)
* *
* Free all receive software resources * Free all receive software resources
**/ **/
void iavf_free_rx_resources(struct i40e_ring *rx_ring) void iavf_free_rx_resources(struct iavf_ring *rx_ring)
{ {
iavf_clean_rx_ring(rx_ring); iavf_clean_rx_ring(rx_ring);
kfree(rx_ring->rx_bi); kfree(rx_ring->rx_bi);
...@@ -731,14 +731,14 @@ void iavf_free_rx_resources(struct i40e_ring *rx_ring) ...@@ -731,14 +731,14 @@ void iavf_free_rx_resources(struct i40e_ring *rx_ring)
* *
* Returns 0 on success, negative on failure * Returns 0 on success, negative on failure
**/ **/
int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring) int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
{ {
struct device *dev = rx_ring->dev; struct device *dev = rx_ring->dev;
int bi_size; int bi_size;
/* warn if we are about to overwrite the pointer */ /* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_bi); WARN_ON(rx_ring->rx_bi);
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!rx_ring->rx_bi) if (!rx_ring->rx_bi)
goto err; goto err;
...@@ -746,7 +746,7 @@ int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring) ...@@ -746,7 +746,7 @@ int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp); u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */ /* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL); &rx_ring->dma, GFP_KERNEL);
...@@ -769,11 +769,11 @@ int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring) ...@@ -769,11 +769,11 @@ int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring)
} }
/** /**
* i40e_release_rx_desc - Store the new tail and head values * iavf_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump * @rx_ring: ring to bump
* @val: new head index * @val: new head index
**/ **/
static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{ {
rx_ring->next_to_use = val; rx_ring->next_to_use = val;
...@@ -790,26 +790,26 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) ...@@ -790,26 +790,26 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
} }
/** /**
* i40e_rx_offset - Return expected offset into page to access data * iavf_rx_offset - Return expected offset into page to access data
* @rx_ring: Ring we are requesting offset of * @rx_ring: Ring we are requesting offset of
* *
* Returns the offset value for ring into the data buffer. * Returns the offset value for ring into the data buffer.
*/ */
static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
{ {
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
} }
/** /**
* i40e_alloc_mapped_page - recycle or make a new page * iavf_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use * @rx_ring: ring to use
* @bi: rx_buffer struct to modify * @bi: rx_buffer struct to modify
* *
* Returns true if the page was successfully allocated or * Returns true if the page was successfully allocated or
* reused. * reused.
**/ **/
static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
struct i40e_rx_buffer *bi) struct iavf_rx_buffer *bi)
{ {
struct page *page = bi->page; struct page *page = bi->page;
dma_addr_t dma; dma_addr_t dma;
...@@ -821,7 +821,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -821,7 +821,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
} }
/* alloc new page for storage */ /* alloc new page for storage */
page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
if (unlikely(!page)) { if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++; rx_ring->rx_stats.alloc_page_failed++;
return false; return false;
...@@ -829,22 +829,22 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -829,22 +829,22 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
/* map page for use */ /* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0, dma = dma_map_page_attrs(rx_ring->dev, page, 0,
i40e_rx_pg_size(rx_ring), iavf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
I40E_RX_DMA_ATTR); IAVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since /* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use * there isn't much point in holding memory we can't use
*/ */
if (dma_mapping_error(rx_ring->dev, dma)) { if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, i40e_rx_pg_order(rx_ring)); __free_pages(page, iavf_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++; rx_ring->rx_stats.alloc_page_failed++;
return false; return false;
} }
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = i40e_rx_offset(rx_ring); bi->page_offset = iavf_rx_offset(rx_ring);
/* initialize pagecnt_bias to 1 representing we fully own page */ /* initialize pagecnt_bias to 1 representing we fully own page */
bi->pagecnt_bias = 1; bi->pagecnt_bias = 1;
...@@ -853,15 +853,15 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -853,15 +853,15 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
} }
/** /**
* i40e_receive_skb - Send a completed packet up the stack * iavf_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play * @rx_ring: rx ring in play
* @skb: packet to send up * @skb: packet to send up
* @vlan_tag: vlan tag for packet * @vlan_tag: vlan tag for packet
**/ **/
static void i40e_receive_skb(struct i40e_ring *rx_ring, static void iavf_receive_skb(struct iavf_ring *rx_ring,
struct sk_buff *skb, u16 vlan_tag) struct sk_buff *skb, u16 vlan_tag)
{ {
struct i40e_q_vector *q_vector = rx_ring->q_vector; struct iavf_q_vector *q_vector = rx_ring->q_vector;
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK)) (vlan_tag & VLAN_VID_MASK))
...@@ -877,11 +877,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, ...@@ -877,11 +877,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
* *
* Returns false if all allocations were successful, true if any fail * Returns false if all allocations were successful, true if any fail
**/ **/
bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
{ {
u16 ntu = rx_ring->next_to_use; u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc; union iavf_rx_desc *rx_desc;
struct i40e_rx_buffer *bi; struct iavf_rx_buffer *bi;
/* do nothing if no valid netdev defined */ /* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count) if (!rx_ring->netdev || !cleaned_count)
...@@ -891,7 +891,7 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -891,7 +891,7 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_bi[ntu]; bi = &rx_ring->rx_bi[ntu];
do { do {
if (!i40e_alloc_mapped_page(rx_ring, bi)) if (!iavf_alloc_mapped_page(rx_ring, bi))
goto no_buffers; goto no_buffers;
/* sync the buffer for use by the device */ /* sync the buffer for use by the device */
...@@ -921,13 +921,13 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -921,13 +921,13 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
} while (cleaned_count); } while (cleaned_count);
if (rx_ring->next_to_use != ntu) if (rx_ring->next_to_use != ntu)
i40e_release_rx_desc(rx_ring, ntu); iavf_release_rx_desc(rx_ring, ntu);
return false; return false;
no_buffers: no_buffers:
if (rx_ring->next_to_use != ntu) if (rx_ring->next_to_use != ntu)
i40e_release_rx_desc(rx_ring, ntu); iavf_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after /* make sure to come back via polling to try again after
* allocation failure * allocation failure
...@@ -936,27 +936,27 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -936,27 +936,27 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
} }
/** /**
* i40e_rx_checksum - Indicate in skb if hw indicated a good cksum * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about * @vsi: the VSI we care about
* @skb: skb currently being received and modified * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor * @rx_desc: the receive descriptor
**/ **/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi, static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
struct sk_buff *skb, struct sk_buff *skb,
union i40e_rx_desc *rx_desc) union iavf_rx_desc *rx_desc)
{ {
struct i40e_rx_ptype_decoded decoded; struct iavf_rx_ptype_decoded decoded;
u32 rx_error, rx_status; u32 rx_error, rx_status;
bool ipv4, ipv6; bool ipv4, ipv6;
u8 ptype; u8 ptype;
u64 qword; u64 qword;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT; IAVF_RXD_QW1_ERROR_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT; IAVF_RXD_QW1_STATUS_SHIFT;
decoded = decode_rx_desc_ptype(ptype); decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
...@@ -975,10 +975,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -975,10 +975,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (!(decoded.known && decoded.outer_ip)) if (!(decoded.known && decoded.outer_ip))
return; return;
ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
if (ipv4 && if (ipv4 &&
(rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
...@@ -1004,9 +1004,9 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -1004,9 +1004,9 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
/* Only report checksum unnecessary for TCP, UDP, or SCTP */ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) { switch (decoded.inner_prot) {
case I40E_RX_PTYPE_INNER_PROT_TCP: case IAVF_RX_PTYPE_INNER_PROT_TCP:
case I40E_RX_PTYPE_INNER_PROT_UDP: case IAVF_RX_PTYPE_INNER_PROT_UDP:
case I40E_RX_PTYPE_INNER_PROT_SCTP: case IAVF_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
/* fall though */ /* fall though */
default: default:
...@@ -1020,37 +1020,37 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -1020,37 +1020,37 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
} }
/** /**
* i40e_ptype_to_htype - get a hash type * iavf_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor * @ptype: the ptype value from the descriptor
* *
* Returns a hash type to be used by skb_set_hash * Returns a hash type to be used by skb_set_hash
**/ **/
static inline int i40e_ptype_to_htype(u8 ptype) static inline int iavf_ptype_to_htype(u8 ptype)
{ {
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
if (!decoded.known) if (!decoded.known)
return PKT_HASH_TYPE_NONE; return PKT_HASH_TYPE_NONE;
if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
return PKT_HASH_TYPE_L4; return PKT_HASH_TYPE_L4;
else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
return PKT_HASH_TYPE_L3; return PKT_HASH_TYPE_L3;
else else
return PKT_HASH_TYPE_L2; return PKT_HASH_TYPE_L2;
} }
/** /**
* i40e_rx_hash - set the hash value in the skb * iavf_rx_hash - set the hash value in the skb
* @ring: descriptor ring * @ring: descriptor ring
* @rx_desc: specific descriptor * @rx_desc: specific descriptor
* @skb: skb currently being received and modified * @skb: skb currently being received and modified
* @rx_ptype: Rx packet type * @rx_ptype: Rx packet type
**/ **/
static inline void i40e_rx_hash(struct i40e_ring *ring, static inline void iavf_rx_hash(struct iavf_ring *ring,
union i40e_rx_desc *rx_desc, union iavf_rx_desc *rx_desc,
struct sk_buff *skb, struct sk_buff *skb,
u8 rx_ptype) u8 rx_ptype)
{ {
...@@ -1064,7 +1064,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, ...@@ -1064,7 +1064,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
} }
} }
...@@ -1080,13 +1080,13 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, ...@@ -1080,13 +1080,13 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* other fields within the skb. * other fields within the skb.
**/ **/
static inline static inline
void iavf_process_skb_fields(struct i40e_ring *rx_ring, void iavf_process_skb_fields(struct iavf_ring *rx_ring,
union i40e_rx_desc *rx_desc, struct sk_buff *skb, union iavf_rx_desc *rx_desc, struct sk_buff *skb,
u8 rx_ptype) u8 rx_ptype)
{ {
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index); skb_record_rx_queue(skb, rx_ring->queue_index);
...@@ -1095,7 +1095,7 @@ void iavf_process_skb_fields(struct i40e_ring *rx_ring, ...@@ -1095,7 +1095,7 @@ void iavf_process_skb_fields(struct i40e_ring *rx_ring,
} }
/** /**
* i40e_cleanup_headers - Correct empty headers * iavf_cleanup_headers - Correct empty headers
* @rx_ring: rx descriptor ring packet is being transacted on * @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being fixed * @skb: pointer to current skb being fixed
* *
...@@ -1107,7 +1107,7 @@ void iavf_process_skb_fields(struct i40e_ring *rx_ring, ...@@ -1107,7 +1107,7 @@ void iavf_process_skb_fields(struct i40e_ring *rx_ring,
* *
* Returns true if an error was encountered and skb was freed. * Returns true if an error was encountered and skb was freed.
**/ **/
static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
{ {
/* if eth_skb_pad returns an error the skb was freed */ /* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb)) if (eth_skb_pad(skb))
...@@ -1117,16 +1117,16 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) ...@@ -1117,16 +1117,16 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
} }
/** /**
* i40e_reuse_rx_page - page flip buffer and store it back on the ring * iavf_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on * @rx_ring: rx descriptor ring to store buffers on
* @old_buff: donor buffer to have page reused * @old_buff: donor buffer to have page reused
* *
* Synchronizes page for reuse by the adapter * Synchronizes page for reuse by the adapter
**/ **/
static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
struct i40e_rx_buffer *old_buff) struct iavf_rx_buffer *old_buff)
{ {
struct i40e_rx_buffer *new_buff; struct iavf_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc; u16 nta = rx_ring->next_to_alloc;
new_buff = &rx_ring->rx_bi[nta]; new_buff = &rx_ring->rx_bi[nta];
...@@ -1143,20 +1143,20 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, ...@@ -1143,20 +1143,20 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
} }
/** /**
* i40e_page_is_reusable - check if any reuse is possible * iavf_page_is_reusable - check if any reuse is possible
* @page: page struct to check * @page: page struct to check
* *
* A page is not reusable if it was allocated under low memory * A page is not reusable if it was allocated under low memory
* conditions, or it's not in the same NUMA node as this CPU. * conditions, or it's not in the same NUMA node as this CPU.
*/ */
static inline bool i40e_page_is_reusable(struct page *page) static inline bool iavf_page_is_reusable(struct page *page)
{ {
return (page_to_nid(page) == numa_mem_id()) && return (page_to_nid(page) == numa_mem_id()) &&
!page_is_pfmemalloc(page); !page_is_pfmemalloc(page);
} }
/** /**
* i40e_can_reuse_rx_page - Determine if this page can be reused by * iavf_can_reuse_rx_page - Determine if this page can be reused by
* the adapter for another receive * the adapter for another receive
* *
* @rx_buffer: buffer containing the page * @rx_buffer: buffer containing the page
...@@ -1182,13 +1182,13 @@ static inline bool i40e_page_is_reusable(struct page *page) ...@@ -1182,13 +1182,13 @@ static inline bool i40e_page_is_reusable(struct page *page)
* *
* In either case, if the page is reusable its refcount is increased. * In either case, if the page is reusable its refcount is increased.
**/ **/
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
{ {
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
/* Is any reuse possible? */ /* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page))) if (unlikely(!iavf_page_is_reusable(page)))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
...@@ -1196,9 +1196,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) ...@@ -1196,9 +1196,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
if (unlikely((page_count(page) - pagecnt_bias) > 1)) if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false; return false;
#else #else
#define I40E_LAST_OFFSET \ #define IAVF_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
if (rx_buffer->page_offset > I40E_LAST_OFFSET) if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
return false; return false;
#endif #endif
...@@ -1215,7 +1215,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) ...@@ -1215,7 +1215,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
} }
/** /**
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add * @rx_buffer: buffer containing page to add
* @skb: sk_buff to place the data into * @skb: sk_buff to place the data into
...@@ -1226,15 +1226,15 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) ...@@ -1226,15 +1226,15 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
* *
* The function will then update the page offset. * The function will then update the page offset.
**/ **/
static void i40e_add_rx_frag(struct i40e_ring *rx_ring, static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer, struct iavf_rx_buffer *rx_buffer,
struct sk_buff *skb, struct sk_buff *skb,
unsigned int size) unsigned int size)
{ {
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else #else
unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
#endif #endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
...@@ -1249,17 +1249,17 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring, ...@@ -1249,17 +1249,17 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
} }
/** /**
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb * @size: size of buffer to add to skb
* *
* This function will pull an Rx buffer from the ring and synchronize it * This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU. * for use by the CPU.
*/ */
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
const unsigned int size) const unsigned int size)
{ {
struct i40e_rx_buffer *rx_buffer; struct iavf_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page); prefetchw(rx_buffer->page);
...@@ -1278,7 +1278,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, ...@@ -1278,7 +1278,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
} }
/** /**
* i40e_construct_skb - Allocate skb and populate it * iavf_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from * @rx_buffer: rx buffer to pull data from
* @size: size of buffer to add to skb * @size: size of buffer to add to skb
...@@ -1287,13 +1287,13 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, ...@@ -1287,13 +1287,13 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
* data from the current receive descriptor, taking care to set up the * data from the current receive descriptor, taking care to set up the
* skb correctly. * skb correctly.
*/ */
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer, struct iavf_rx_buffer *rx_buffer,
unsigned int size) unsigned int size)
{ {
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else #else
unsigned int truesize = SKB_DATA_ALIGN(size); unsigned int truesize = SKB_DATA_ALIGN(size);
#endif #endif
...@@ -1308,15 +1308,15 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, ...@@ -1308,15 +1308,15 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
I40E_RX_HDR_SIZE, IAVF_RX_HDR_SIZE,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
/* Determine available headroom for copy */ /* Determine available headroom for copy */
headlen = size; headlen = size;
if (headlen > I40E_RX_HDR_SIZE) if (headlen > IAVF_RX_HDR_SIZE)
headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE); headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */ /* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
...@@ -1343,7 +1343,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, ...@@ -1343,7 +1343,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
} }
/** /**
* i40e_build_skb - Build skb around an existing buffer * iavf_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buffer: Rx buffer to pull data from * @rx_buffer: Rx buffer to pull data from
* @size: size of buffer to add to skb * @size: size of buffer to add to skb
...@@ -1351,16 +1351,16 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, ...@@ -1351,16 +1351,16 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* This function builds an skb around an existing Rx buffer, taking care * This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead. * to set up the skb correctly and avoid any memcpy overhead.
*/ */
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer, struct iavf_rx_buffer *rx_buffer,
unsigned int size) unsigned int size)
{ {
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else #else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(I40E_SKB_PAD + size); SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
#endif #endif
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1370,12 +1370,12 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, ...@@ -1370,12 +1370,12 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
prefetch(va + L1_CACHE_BYTES); prefetch(va + L1_CACHE_BYTES);
#endif #endif
/* build an skb around the page buffer */ /* build an skb around the page buffer */
skb = build_skb(va - I40E_SKB_PAD, truesize); skb = build_skb(va - IAVF_SKB_PAD, truesize);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
/* update pointers within the skb to store the data */ /* update pointers within the skb to store the data */
skb_reserve(skb, I40E_SKB_PAD); skb_reserve(skb, IAVF_SKB_PAD);
__skb_put(skb, size); __skb_put(skb, size);
/* buffer is used by skb, update page_offset */ /* buffer is used by skb, update page_offset */
...@@ -1389,25 +1389,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, ...@@ -1389,25 +1389,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
} }
/** /**
* i40e_put_rx_buffer - Clean up used buffer and either recycle or free * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from * @rx_buffer: rx buffer to pull data from
* *
* This function will clean up the contents of the rx_buffer. It will * This function will clean up the contents of the rx_buffer. It will
* either recycle the buffer or unmap it and free the associated resources. * either recycle the buffer or unmap it and free the associated resources.
*/ */
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer) struct iavf_rx_buffer *rx_buffer)
{ {
if (i40e_can_reuse_rx_page(rx_buffer)) { if (iavf_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer); iavf_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++; rx_ring->rx_stats.page_reuse_count++;
} else { } else {
/* we are not reusing the buffer so unmap it */ /* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
i40e_rx_pg_size(rx_ring), iavf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page, __page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias); rx_buffer->pagecnt_bias);
} }
...@@ -1417,7 +1417,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, ...@@ -1417,7 +1417,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
} }
/** /**
* i40e_is_non_eop - process handling of non-EOP buffers * iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed * @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer * @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress * @skb: Current socket buffer containing buffer in progress
...@@ -1427,8 +1427,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, ...@@ -1427,8 +1427,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
* sk_buff in the next buffer to be chained and return true indicating * sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer. * that this is in fact a non-EOP buffer.
**/ **/
static bool i40e_is_non_eop(struct i40e_ring *rx_ring, static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
union i40e_rx_desc *rx_desc, union iavf_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb)
{ {
u32 ntc = rx_ring->next_to_clean + 1; u32 ntc = rx_ring->next_to_clean + 1;
...@@ -1440,8 +1440,8 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, ...@@ -1440,8 +1440,8 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
prefetch(IAVF_RX_DESC(rx_ring, ntc)); prefetch(IAVF_RX_DESC(rx_ring, ntc));
/* if we are the last buffer then there is nothing else to do */ /* if we are the last buffer then there is nothing else to do */
#define I40E_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT) #define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
return false; return false;
rx_ring->rx_stats.non_eop_descs++; rx_ring->rx_stats.non_eop_descs++;
...@@ -1450,7 +1450,7 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, ...@@ -1450,7 +1450,7 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
} }
/** /**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process * @budget: Total limit on number of packets to process
* *
...@@ -1461,23 +1461,23 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, ...@@ -1461,23 +1461,23 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
* *
* Returns amount of work completed * Returns amount of work completed
**/ **/
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb; struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
bool failure = false; bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) { while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer; struct iavf_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc; union iavf_rx_desc *rx_desc;
unsigned int size; unsigned int size;
u16 vlan_tag; u16 vlan_tag;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) { if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
failure = failure || failure = failure ||
iavf_alloc_rx_buffers(rx_ring, cleaned_count); iavf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0; cleaned_count = 0;
...@@ -1498,21 +1498,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1498,21 +1498,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/ */
dma_rmb(); dma_rmb();
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT; IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size) if (!size)
break; break;
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = i40e_get_rx_buffer(rx_ring, size); rx_buffer = iavf_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
if (skb) if (skb)
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring)) else if (ring_uses_build_skb(rx_ring))
skb = i40e_build_skb(rx_ring, rx_buffer, size); skb = iavf_build_skb(rx_ring, rx_buffer, size);
else else
skb = i40e_construct_skb(rx_ring, rx_buffer, size); skb = iavf_construct_skb(rx_ring, rx_buffer, size);
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
if (!skb) { if (!skb) {
...@@ -1521,10 +1521,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1521,10 +1521,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break; break;
} }
i40e_put_rx_buffer(rx_ring, rx_buffer); iavf_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++; cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb)) if (iavf_is_non_eop(rx_ring, rx_desc, skb))
continue; continue;
/* ERR_MASK will only have valid bits if EOP set, and /* ERR_MASK will only have valid bits if EOP set, and
...@@ -1532,13 +1532,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1532,13 +1532,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
* IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
* the error field * the error field
*/ */
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
skb = NULL; skb = NULL;
continue; continue;
} }
if (i40e_cleanup_headers(rx_ring, skb)) { if (iavf_cleanup_headers(rx_ring, skb)) {
skb = NULL; skb = NULL;
continue; continue;
} }
...@@ -1547,8 +1547,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1547,8 +1547,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
total_rx_bytes += skb->len; total_rx_bytes += skb->len;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT; IAVF_RXD_QW1_PTYPE_SHIFT;
/* populate checksum, VLAN, and protocol */ /* populate checksum, VLAN, and protocol */
iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
...@@ -1558,7 +1558,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1558,7 +1558,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
i40e_receive_skb(rx_ring, skb, vlan_tag); iavf_receive_skb(rx_ring, skb, vlan_tag);
skb = NULL; skb = NULL;
/* update budget accounting */ /* update budget accounting */
...@@ -1578,7 +1578,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1578,7 +1578,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
return failure ? budget : (int)total_rx_packets; return failure ? budget : (int)total_rx_packets;
} }
static inline u32 i40e_buildreg_itr(const int type, u16 itr) static inline u32 iavf_buildreg_itr(const int type, u16 itr)
{ {
u32 val; u32 val;
...@@ -1597,7 +1597,7 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr) ...@@ -1597,7 +1597,7 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
* only need to shift by the interval shift - 1 instead of the * only need to shift by the interval shift - 1 instead of the
* full value. * full value.
*/ */
itr &= I40E_ITR_MASK; itr &= IAVF_ITR_MASK;
val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
(type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
...@@ -1619,20 +1619,20 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr) ...@@ -1619,20 +1619,20 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
#define ITR_COUNTDOWN_START 3 #define ITR_COUNTDOWN_START 3
/** /**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt
* @vsi: the VSI we care about * @vsi: the VSI we care about
* @q_vector: q_vector for which itr is being updated and interrupt enabled * @q_vector: q_vector for which itr is being updated and interrupt enabled
* *
**/ **/
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
struct i40e_q_vector *q_vector) struct iavf_q_vector *q_vector)
{ {
struct iavf_hw *hw = &vsi->back->hw; struct iavf_hw *hw = &vsi->back->hw;
u32 intval; u32 intval;
/* These will do nothing if dynamic updates are not enabled */ /* These will do nothing if dynamic updates are not enabled */
i40e_update_itr(q_vector, &q_vector->tx); iavf_update_itr(q_vector, &q_vector->tx);
i40e_update_itr(q_vector, &q_vector->rx); iavf_update_itr(q_vector, &q_vector->rx);
/* This block of logic allows us to get away with only updating /* This block of logic allows us to get away with only updating
* one ITR value with each interrupt. The idea is to perform a * one ITR value with each interrupt. The idea is to perform a
...@@ -1644,7 +1644,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -1644,7 +1644,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/ */
if (q_vector->rx.target_itr < q_vector->rx.current_itr) { if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */ /* Rx ITR needs to be reduced, this is highest priority */
intval = i40e_buildreg_itr(I40E_RX_ITR, intval = iavf_buildreg_itr(IAVF_RX_ITR,
q_vector->rx.target_itr); q_vector->rx.target_itr);
q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->itr_countdown = ITR_COUNTDOWN_START;
...@@ -1654,24 +1654,24 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -1654,24 +1654,24 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
/* Tx ITR needs to be reduced, this is second priority /* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority * Tx ITR needs to be increased more than Rx, fourth priority
*/ */
intval = i40e_buildreg_itr(I40E_TX_ITR, intval = iavf_buildreg_itr(IAVF_TX_ITR,
q_vector->tx.target_itr); q_vector->tx.target_itr);
q_vector->tx.current_itr = q_vector->tx.target_itr; q_vector->tx.current_itr = q_vector->tx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
/* Rx ITR needs to be increased, third priority */ /* Rx ITR needs to be increased, third priority */
intval = i40e_buildreg_itr(I40E_RX_ITR, intval = iavf_buildreg_itr(IAVF_RX_ITR,
q_vector->rx.target_itr); q_vector->rx.target_itr);
q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else { } else {
/* No ITR update, lowest priority */ /* No ITR update, lowest priority */
intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
if (q_vector->itr_countdown) if (q_vector->itr_countdown)
q_vector->itr_countdown--; q_vector->itr_countdown--;
} }
if (!test_bit(__I40E_VSI_DOWN, vsi->state)) if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
wr32(hw, INTREG(q_vector->reg_idx), intval); wr32(hw, INTREG(q_vector->reg_idx), intval);
} }
...@@ -1686,16 +1686,16 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -1686,16 +1686,16 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
**/ **/
int iavf_napi_poll(struct napi_struct *napi, int budget) int iavf_napi_poll(struct napi_struct *napi, int budget)
{ {
struct i40e_q_vector *q_vector = struct iavf_q_vector *q_vector =
container_of(napi, struct i40e_q_vector, napi); container_of(napi, struct iavf_q_vector, napi);
struct i40e_vsi *vsi = q_vector->vsi; struct iavf_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring; struct iavf_ring *ring;
bool clean_complete = true; bool clean_complete = true;
bool arm_wb = false; bool arm_wb = false;
int budget_per_ring; int budget_per_ring;
int work_done = 0; int work_done = 0;
if (test_bit(__I40E_VSI_DOWN, vsi->state)) { if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
napi_complete(napi); napi_complete(napi);
return 0; return 0;
} }
...@@ -1703,8 +1703,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1703,8 +1703,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger /* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors. * budget and be more aggressive about cleaning up the Tx descriptors.
*/ */
i40e_for_each_ring(ring, q_vector->tx) { iavf_for_each_ring(ring, q_vector->tx) {
if (!i40e_clean_tx_irq(vsi, ring, budget)) { if (!iavf_clean_tx_irq(vsi, ring, budget)) {
clean_complete = false; clean_complete = false;
continue; continue;
} }
...@@ -1721,8 +1721,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1721,8 +1721,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
*/ */
budget_per_ring = max(budget/q_vector->num_ringpairs, 1); budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) { iavf_for_each_ring(ring, q_vector->rx) {
int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned; work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */ /* if we clean as many as budgeted, we must not be done */
...@@ -1754,18 +1754,18 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1754,18 +1754,18 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
tx_only: tx_only:
if (arm_wb) { if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++; q_vector->tx.ring[0].tx_stats.tx_force_wb++;
i40e_enable_wb_on_itr(vsi, q_vector); iavf_enable_wb_on_itr(vsi, q_vector);
} }
return budget; return budget;
} }
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false; q_vector->arm_wb_state = false;
/* Work is done so exit the polling mode and re-enable the interrupt */ /* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done); napi_complete_done(napi, work_done);
i40e_update_enable_itr(vsi, q_vector); iavf_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1); return min(work_done, budget - 1);
} }
...@@ -1783,7 +1783,7 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1783,7 +1783,7 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
* otherwise returns 0 to indicate the flags has been set properly. * otherwise returns 0 to indicate the flags has been set properly.
**/ **/
static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, struct iavf_ring *tx_ring,
u32 *flags) u32 *flags)
{ {
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
...@@ -1804,8 +1804,8 @@ static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, ...@@ -1804,8 +1804,8 @@ static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
/* if we have a HW VLAN tag being added, default to the HW one */ /* if we have a HW VLAN tag being added, default to the HW one */
if (skb_vlan_tag_present(skb)) { if (skb_vlan_tag_present(skb)) {
tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN; tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */ /* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) { } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr; struct vlan_hdr *vhdr, _vhdr;
...@@ -1815,8 +1815,8 @@ static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, ...@@ -1815,8 +1815,8 @@ static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
return -EINVAL; return -EINVAL;
protocol = vhdr->h_vlan_encapsulated_proto; protocol = vhdr->h_vlan_encapsulated_proto;
tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_SW_VLAN; tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
} }
out: out:
...@@ -1825,14 +1825,14 @@ static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, ...@@ -1825,14 +1825,14 @@ static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
} }
/** /**
* i40e_tso - set up the tso context descriptor * iavf_tso - set up the tso context descriptor
* @first: pointer to first Tx buffer for xmit * @first: pointer to first Tx buffer for xmit
* @hdr_len: ptr to the size of the packet header * @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1 * @cd_type_cmd_tso_mss: Quad Word 1
* *
* Returns 0 if no TSO can happen, 1 if tso is going, or error * Returns 0 if no TSO can happen, 1 if tso is going, or error
**/ **/
static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
u64 *cd_type_cmd_tso_mss) u64 *cd_type_cmd_tso_mss)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
...@@ -1923,17 +1923,17 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, ...@@ -1923,17 +1923,17 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
first->bytecount += (first->gso_segs - 1) * *hdr_len; first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* find the field values */ /* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO; cd_cmd = IAVF_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len; cd_tso_len = skb->len - *hdr_len;
cd_mss = gso_size; cd_mss = gso_size;
*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
return 1; return 1;
} }
/** /**
* i40e_tx_enable_csum - Enable Tx checksum offloads * iavf_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer * @skb: send buffer
* @tx_flags: pointer to Tx flags currently set * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set * @td_cmd: Tx descriptor command bits to set
...@@ -1941,9 +1941,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, ...@@ -1941,9 +1941,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
* @tx_ring: Tx descriptor ring * @tx_ring: Tx descriptor ring
* @cd_tunneling: ptr to context desc bits * @cd_tunneling: ptr to context desc bits
**/ **/
static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset, u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring, struct iavf_ring *tx_ring,
u32 *cd_tunneling) u32 *cd_tunneling)
{ {
union { union {
...@@ -1973,14 +1973,14 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ...@@ -1973,14 +1973,14 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
if (skb->encapsulation) { if (skb->encapsulation) {
u32 tunnel = 0; u32 tunnel = 0;
/* define outer network header type */ /* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) { if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
I40E_TX_CTX_EXT_IP_IPV4 : IAVF_TX_CTX_EXT_IP_IPV4 :
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol; l4_proto = ip.v4->protocol;
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) { } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
tunnel |= I40E_TX_CTX_EXT_IP_IPV6; tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6); exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr; l4_proto = ip.v6->nexthdr;
...@@ -1992,20 +1992,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ...@@ -1992,20 +1992,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
/* define outer transport */ /* define outer transport */
switch (l4_proto) { switch (l4_proto) {
case IPPROTO_UDP: case IPPROTO_UDP:
tunnel |= I40E_TXD_CTX_UDP_TUNNELING; tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
break; break;
case IPPROTO_GRE: case IPPROTO_GRE:
tunnel |= I40E_TXD_CTX_GRE_TUNNELING; tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
break; break;
case IPPROTO_IPIP: case IPPROTO_IPIP:
case IPPROTO_IPV6: case IPPROTO_IPV6:
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
l4.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_network_header(skb);
break; break;
default: default:
if (*tx_flags & I40E_TX_FLAGS_TSO) if (*tx_flags & IAVF_TX_FLAGS_TSO)
return -1; return -1;
skb_checksum_help(skb); skb_checksum_help(skb);
...@@ -2014,20 +2014,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ...@@ -2014,20 +2014,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
/* compute outer L3 header size */ /* compute outer L3 header size */
tunnel |= ((l4.hdr - ip.hdr) / 4) << tunnel |= ((l4.hdr - ip.hdr) / 4) <<
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
/* switch IP header pointer from outer to inner header */ /* switch IP header pointer from outer to inner header */
ip.hdr = skb_inner_network_header(skb); ip.hdr = skb_inner_network_header(skb);
/* compute tunnel header size */ /* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) << tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT; IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */ /* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) && if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
/* record tunnel offload values */ /* record tunnel offload values */
*cd_tunneling |= tunnel; *cd_tunneling |= tunnel;
...@@ -2037,23 +2037,23 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ...@@ -2037,23 +2037,23 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
l4_proto = 0; l4_proto = 0;
/* reset type as we transition from outer to inner headers */ /* reset type as we transition from outer to inner headers */
*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
if (ip.v4->version == 4) if (ip.v4->version == 4)
*tx_flags |= I40E_TX_FLAGS_IPV4; *tx_flags |= IAVF_TX_FLAGS_IPV4;
if (ip.v6->version == 6) if (ip.v6->version == 6)
*tx_flags |= I40E_TX_FLAGS_IPV6; *tx_flags |= IAVF_TX_FLAGS_IPV6;
} }
/* Enable IP checksum offloads */ /* Enable IP checksum offloads */
if (*tx_flags & I40E_TX_FLAGS_IPV4) { if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
l4_proto = ip.v4->protocol; l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we /* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO. * need the hardware to recompute it is in the case of TSO.
*/ */
cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM : IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
IAVF_TX_DESC_CMD_IIPT_IPV4; IAVF_TX_DESC_CMD_IIPT_IPV4;
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) { } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6); exthdr = ip.hdr + sizeof(*ip.v6);
...@@ -2086,7 +2086,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ...@@ -2086,7 +2086,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break; break;
default: default:
if (*tx_flags & I40E_TX_FLAGS_TSO) if (*tx_flags & IAVF_TX_FLAGS_TSO)
return -1; return -1;
skb_checksum_help(skb); skb_checksum_help(skb);
return 0; return 0;
...@@ -2099,17 +2099,17 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ...@@ -2099,17 +2099,17 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
} }
/** /**
* i40e_create_tx_ctx Build the Tx context descriptor * iavf_create_tx_ctx Build the Tx context descriptor
* @tx_ring: ring to create the descriptor on * @tx_ring: ring to create the descriptor on
* @cd_type_cmd_tso_mss: Quad Word 1 * @cd_type_cmd_tso_mss: Quad Word 1
* @cd_tunneling: Quad Word 0 - bits 0-31 * @cd_tunneling: Quad Word 0 - bits 0-31
* @cd_l2tag2: Quad Word 0 - bits 32-63 * @cd_l2tag2: Quad Word 0 - bits 32-63
**/ **/
static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
const u64 cd_type_cmd_tso_mss, const u64 cd_type_cmd_tso_mss,
const u32 cd_tunneling, const u32 cd_l2tag2) const u32 cd_tunneling, const u32 cd_l2tag2)
{ {
struct i40e_tx_context_desc *context_desc; struct iavf_tx_context_desc *context_desc;
int i = tx_ring->next_to_use; int i = tx_ring->next_to_use;
if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) && if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
...@@ -2149,13 +2149,13 @@ bool __iavf_chk_linearize(struct sk_buff *skb) ...@@ -2149,13 +2149,13 @@ bool __iavf_chk_linearize(struct sk_buff *skb)
/* no need to check if number of frags is less than 7 */ /* no need to check if number of frags is less than 7 */
nr_frags = skb_shinfo(skb)->nr_frags; nr_frags = skb_shinfo(skb)->nr_frags;
if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
return false; return false;
/* We need to walk through the list and validate that each group /* We need to walk through the list and validate that each group
* of 6 fragments totals at least gso_size. * of 6 fragments totals at least gso_size.
*/ */
nr_frags -= I40E_MAX_BUFFER_TXD - 2; nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0]; frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We /* Initialize size to the negative value of gso_size minus 1. We
...@@ -2187,17 +2187,17 @@ bool __iavf_chk_linearize(struct sk_buff *skb) ...@@ -2187,17 +2187,17 @@ bool __iavf_chk_linearize(struct sk_buff *skb)
* figure out what the remainder would be in the last * figure out what the remainder would be in the last
* descriptor associated with the fragment. * descriptor associated with the fragment.
*/ */
if (stale_size > I40E_MAX_DATA_PER_TXD) { if (stale_size > IAVF_MAX_DATA_PER_TXD) {
int align_pad = -(stale->page_offset) & int align_pad = -(stale->page_offset) &
(I40E_MAX_READ_REQ_SIZE - 1); (IAVF_MAX_READ_REQ_SIZE - 1);
sum -= align_pad; sum -= align_pad;
stale_size -= align_pad; stale_size -= align_pad;
do { do {
sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
} while (stale_size > I40E_MAX_DATA_PER_TXD); } while (stale_size > IAVF_MAX_DATA_PER_TXD);
} }
/* if sum is negative we failed to make sufficient progress */ /* if sum is negative we failed to make sufficient progress */
...@@ -2220,14 +2220,14 @@ bool __iavf_chk_linearize(struct sk_buff *skb) ...@@ -2220,14 +2220,14 @@ bool __iavf_chk_linearize(struct sk_buff *skb)
* *
* Returns -EBUSY if a stop is needed, else 0 * Returns -EBUSY if a stop is needed, else 0
**/ **/
int __iavf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
{ {
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */ /* Memory barrier before checking head and tail */
smp_mb(); smp_mb();
/* Check again in a case another CPU has just made room available. */ /* Check again in a case another CPU has just made room available. */
if (likely(I40E_DESC_UNUSED(tx_ring) < size)) if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
return -EBUSY; return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */ /* A reprieve! - use start_queue because it doesn't call schedule */
...@@ -2246,23 +2246,23 @@ int __iavf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -2246,23 +2246,23 @@ int __iavf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* @td_cmd: the command field in the descriptor * @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc * @td_offset: offset for checksum or crc
**/ **/
static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags, struct iavf_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset) const u8 hdr_len, u32 td_cmd, u32 td_offset)
{ {
unsigned int data_len = skb->data_len; unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb); unsigned int size = skb_headlen(skb);
struct skb_frag_struct *frag; struct skb_frag_struct *frag;
struct i40e_tx_buffer *tx_bi; struct iavf_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc; struct iavf_tx_desc *tx_desc;
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
I40E_TX_FLAGS_VLAN_SHIFT; IAVF_TX_FLAGS_VLAN_SHIFT;
} }
first->tx_flags = tx_flags; first->tx_flags = tx_flags;
...@@ -2273,7 +2273,7 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2273,7 +2273,7 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = first; tx_bi = first;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) { for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
if (dma_mapping_error(tx_ring->dev, dma)) if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error; goto dma_error;
...@@ -2283,10 +2283,10 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2283,10 +2283,10 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma_unmap_addr_set(tx_bi, dma, dma); dma_unmap_addr_set(tx_bi, dma, dma);
/* align size to end of page */ /* align size to end of page */
max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma); tx_desc->buffer_addr = cpu_to_le64(dma);
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, build_ctob(td_cmd, td_offset,
max_data, td_tag); max_data, td_tag);
...@@ -2302,7 +2302,7 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2302,7 +2302,7 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma += max_data; dma += max_data;
size -= max_data; size -= max_data;
max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma); tx_desc->buffer_addr = cpu_to_le64(dma);
} }
...@@ -2337,10 +2337,10 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2337,10 +2337,10 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* write last descriptor with RS and EOP bits */ /* write last descriptor with RS and EOP bits */
td_cmd |= I40E_TXD_CMD; td_cmd |= IAVF_TXD_CMD;
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag); build_ctob(td_cmd, td_offset, size, td_tag);
...@@ -2373,7 +2373,7 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2373,7 +2373,7 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* clear dma mappings for failed tx_bi map */ /* clear dma mappings for failed tx_bi map */
for (;;) { for (;;) {
tx_bi = &tx_ring->tx_bi[i]; tx_bi = &tx_ring->tx_bi[i];
i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
if (tx_bi == first) if (tx_bi == first)
break; break;
if (i == 0) if (i == 0)
...@@ -2385,18 +2385,18 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2385,18 +2385,18 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
} }
/** /**
* i40e_xmit_frame_ring - Sends buffer on Tx ring * iavf_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer * @skb: send buffer
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
* *
* Returns NETDEV_TX_OK if sent, else an error code * Returns NETDEV_TX_OK if sent, else an error code
**/ **/
static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
struct i40e_ring *tx_ring) struct iavf_ring *tx_ring)
{ {
u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT; u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
u32 cd_tunneling = 0, cd_l2tag2 = 0; u32 cd_tunneling = 0, cd_l2tag2 = 0;
struct i40e_tx_buffer *first; struct iavf_tx_buffer *first;
u32 td_offset = 0; u32 td_offset = 0;
u32 tx_flags = 0; u32 tx_flags = 0;
__be16 protocol; __be16 protocol;
...@@ -2409,23 +2409,23 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2409,23 +2409,23 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
iavf_trace(xmit_frame_ring, skb, tx_ring); iavf_trace(xmit_frame_ring, skb, tx_ring);
count = i40e_xmit_descriptor_count(skb); count = iavf_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) { if (iavf_chk_linearize(skb, count)) {
if (__skb_linearize(skb)) { if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
count = i40e_txd_use_count(skb->len); count = iavf_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++; tx_ring->tx_stats.tx_linearize++;
} }
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD,
* + 4 desc gap to avoid the cache line where head is, * + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor, * + 1 desc for context descriptor,
* otherwise try next time * otherwise try next time
*/ */
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++; tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -2445,19 +2445,19 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2445,19 +2445,19 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* setup IPv4/IPv6 offloads */ /* setup IPv4/IPv6 offloads */
if (protocol == htons(ETH_P_IP)) if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4; tx_flags |= IAVF_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6)) else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6; tx_flags |= IAVF_TX_FLAGS_IPV6;
tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
else if (tso) else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO; tx_flags |= IAVF_TX_FLAGS_TSO;
/* Always offload the checksum, since it's in the data descriptor */ /* Always offload the checksum, since it's in the data descriptor */
tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling); tx_ring, &cd_tunneling);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
...@@ -2467,7 +2467,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2467,7 +2467,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* always enable CRC insertion offload */ /* always enable CRC insertion offload */
td_cmd |= IAVF_TX_DESC_CMD_ICRC; td_cmd |= IAVF_TX_DESC_CMD_ICRC;
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2); cd_tunneling, cd_l2tag2);
iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
...@@ -2492,17 +2492,17 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2492,17 +2492,17 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{ {
struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adapter *adapter = netdev_priv(netdev);
struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works /* hardware can't handle really short frames, hardware padding works
* beyond this point * beyond this point
*/ */
if (unlikely(skb->len < I40E_MIN_TX_LEN)) { if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
return NETDEV_TX_OK; return NETDEV_TX_OK;
skb->len = I40E_MIN_TX_LEN; skb->len = IAVF_MIN_TX_LEN;
skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
} }
return i40e_xmit_frame_ring(skb, tx_ring); return iavf_xmit_frame_ring(skb, tx_ring);
} }
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TXRX_H_ #ifndef _IAVF_TXRX_H_
#define _I40E_TXRX_H_ #define _IAVF_TXRX_H_
/* Interrupt Throttling and Rate Limiting Goodies */ /* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_DEFAULT_IRQ_WORK 256 #define IAVF_DEFAULT_IRQ_WORK 256
/* The datasheet for the X710 and XL710 indicate that the maximum value for /* The datasheet for the X710 and XL710 indicate that the maximum value for
* the ITR is 8160usec which is then called out as 0xFF0 with a 2usec * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
...@@ -13,80 +13,80 @@ ...@@ -13,80 +13,80 @@
* the register value which is divided by 2 lets use the actual values and * the register value which is divided by 2 lets use the actual values and
* avoid an excessive amount of translation. * avoid an excessive amount of translation.
*/ */
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ #define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */
#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ #define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 10 /* all values below must be even */ #define IAVF_ITR_100K 10 /* all values below must be even */
#define I40E_ITR_50K 20 #define IAVF_ITR_50K 20
#define I40E_ITR_20K 50 #define IAVF_ITR_20K 50
#define I40E_ITR_18K 60 #define IAVF_ITR_18K 60
#define I40E_ITR_8K 122 #define IAVF_ITR_8K 122
#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ #define IAVF_MAX_ITR 8160 /* maximum value as per datasheet */
#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) #define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK) #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC)) #define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) #define IAVF_ITR_RX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) #define IAVF_ITR_TX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
/* 0x40 is the enable bit for interrupt rate limiting, and must be set if /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
* the value of the rate limit is non-zero * the value of the rate limit is non-zero
*/ */
#define INTRL_ENA BIT(6) #define INTRL_ENA BIT(6)
#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ #define IAVF_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
#define I40E_INTRL_8K 125 /* 8000 ints/sec */ #define IAVF_INTRL_8K 125 /* 8000 ints/sec */
#define I40E_INTRL_62K 16 /* 62500 ints/sec */ #define IAVF_INTRL_62K 16 /* 62500 ints/sec */
#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define IAVF_INTRL_83K 12 /* 83333 ints/sec */
#define I40E_QUEUE_END_OF_LIST 0x7FF #define IAVF_QUEUE_END_OF_LIST 0x7FF
/* this enum matches hardware bits and is meant to be used by DYN_CTLN /* this enum matches hardware bits and is meant to be used by DYN_CTLN
* registers and QINT registers or more generally anywhere in the manual * registers and QINT registers or more generally anywhere in the manual
* mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
* register but instead is a special value meaning "don't update" ITR0/1/2. * register but instead is a special value meaning "don't update" ITR0/1/2.
*/ */
enum i40e_dyn_idx_t { enum iavf_dyn_idx_t {
I40E_IDX_ITR0 = 0, IAVF_IDX_ITR0 = 0,
I40E_IDX_ITR1 = 1, IAVF_IDX_ITR1 = 1,
I40E_IDX_ITR2 = 2, IAVF_IDX_ITR2 = 2,
I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ IAVF_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
}; };
/* these are indexes into ITRN registers */ /* these are indexes into ITRN registers */
#define I40E_RX_ITR I40E_IDX_ITR0 #define IAVF_RX_ITR IAVF_IDX_ITR0
#define I40E_TX_ITR I40E_IDX_ITR1 #define IAVF_TX_ITR IAVF_IDX_ITR1
#define I40E_PE_ITR I40E_IDX_ITR2 #define IAVF_PE_ITR IAVF_IDX_ITR2
/* Supported RSS offloads */ /* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \ #define IAVF_DEFAULT_RSS_HENA ( \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ #define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
/* Supported Rx Buffer Sizes (a multiple of 128) */ /* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256 #define IAVF_RXBUFFER_256 256
#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ #define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
#define I40E_RXBUFFER_2048 2048 #define IAVF_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ #define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ #define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more, * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
...@@ -95,11 +95,11 @@ enum i40e_dyn_idx_t { ...@@ -95,11 +95,11 @@ enum i40e_dyn_idx_t {
* i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
* i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/ */
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 #define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) #define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
#define i40e_rx_desc i40e_32byte_rx_desc #define iavf_rx_desc iavf_32byte_rx_desc
#define I40E_RX_DMA_ATTR \ #define IAVF_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
/* Attempt to maximize the headroom available for incoming frames. We /* Attempt to maximize the headroom available for incoming frames. We
...@@ -113,10 +113,10 @@ enum i40e_dyn_idx_t { ...@@ -113,10 +113,10 @@ enum i40e_dyn_idx_t {
* receive path. * receive path.
*/ */
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
#define I40E_2K_TOO_SMALL_WITH_PADDING \ #define IAVF_2K_TOO_SMALL_WITH_PADDING \
((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) ((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
static inline int i40e_compute_pad(int rx_buf_len) static inline int iavf_compute_pad(int rx_buf_len)
{ {
int page_size, pad_size; int page_size, pad_size;
...@@ -126,7 +126,7 @@ static inline int i40e_compute_pad(int rx_buf_len) ...@@ -126,7 +126,7 @@ static inline int i40e_compute_pad(int rx_buf_len)
return pad_size; return pad_size;
} }
static inline int i40e_skb_pad(void) static inline int iavf_skb_pad(void)
{ {
int rx_buf_len; int rx_buf_len;
...@@ -137,25 +137,25 @@ static inline int i40e_skb_pad(void) ...@@ -137,25 +137,25 @@ static inline int i40e_skb_pad(void)
* tailroom due to NET_IP_ALIGN possibly shifting us out of * tailroom due to NET_IP_ALIGN possibly shifting us out of
* cache-line alignment. * cache-line alignment.
*/ */
if (I40E_2K_TOO_SMALL_WITH_PADDING) if (IAVF_2K_TOO_SMALL_WITH_PADDING)
rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
else else
rx_buf_len = I40E_RXBUFFER_1536; rx_buf_len = IAVF_RXBUFFER_1536;
/* if needed make room for NET_IP_ALIGN */ /* if needed make room for NET_IP_ALIGN */
rx_buf_len -= NET_IP_ALIGN; rx_buf_len -= NET_IP_ALIGN;
return i40e_compute_pad(rx_buf_len); return iavf_compute_pad(rx_buf_len);
} }
#define I40E_SKB_PAD i40e_skb_pad() #define IAVF_SKB_PAD iavf_skb_pad()
#else #else
#define I40E_2K_TOO_SMALL_WITH_PADDING false #define IAVF_2K_TOO_SMALL_WITH_PADDING false
#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) #define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif #endif
/** /**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields * iavf_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format) * @rx_desc: pointer to receive descriptor (in le64 format)
* @stat_err_bits: value to mask * @stat_err_bits: value to mask
* *
...@@ -164,7 +164,7 @@ static inline int i40e_skb_pad(void) ...@@ -164,7 +164,7 @@ static inline int i40e_skb_pad(void)
* The status_error_len doesn't need to be shifted because it begins * The status_error_len doesn't need to be shifted because it begins
* at offset zero. * at offset zero.
*/ */
static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
const u64 stat_err_bits) const u64 stat_err_bits)
{ {
return !!(rx_desc->wb.qword1.status_error_len & return !!(rx_desc->wb.qword1.status_error_len &
...@@ -172,8 +172,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, ...@@ -172,8 +172,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
} }
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ #define IAVF_RX_INCREMENT(r, i) \
#define I40E_RX_INCREMENT(r, i) \
do { \ do { \
(i)++; \ (i)++; \
if ((i) == (r)->count) \ if ((i) == (r)->count) \
...@@ -181,7 +180,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, ...@@ -181,7 +180,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
r->next_to_clean = i; \ r->next_to_clean = i; \
} while (0) } while (0)
#define I40E_RX_NEXT_DESC(r, i, n) \ #define IAVF_RX_NEXT_DESC(r, i, n) \
do { \ do { \
(i)++; \ (i)++; \
if ((i) == (r)->count) \ if ((i) == (r)->count) \
...@@ -189,26 +188,26 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, ...@@ -189,26 +188,26 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
(n) = IAVF_RX_DESC((r), (i)); \ (n) = IAVF_RX_DESC((r), (i)); \
} while (0) } while (0)
#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ #define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n) \
do { \ do { \
I40E_RX_NEXT_DESC((r), (i), (n)); \ IAVF_RX_NEXT_DESC((r), (i), (n)); \
prefetch((n)); \ prefetch((n)); \
} while (0) } while (0)
#define I40E_MAX_BUFFER_TXD 8 #define IAVF_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17 #define IAVF_MIN_TX_LEN 17
/* The size limit for a transmit buffer in a descriptor is (16K - 1). /* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to * In order to align with the read requests we will align the value to
* the nearest 4K which represents our maximum read request size. * the nearest 4K which represents our maximum read request size.
*/ */
#define I40E_MAX_READ_REQ_SIZE 4096 #define IAVF_MAX_READ_REQ_SIZE 4096
#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) #define IAVF_MAX_DATA_PER_TXD (16 * 1024 - 1)
#define I40E_MAX_DATA_PER_TXD_ALIGNED \ #define IAVF_MAX_DATA_PER_TXD_ALIGNED \
(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) (IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
/** /**
* i40e_txd_use_count - estimate the number of descriptors needed for Tx * iavf_txd_use_count - estimate the number of descriptors needed for Tx
* @size: transmit request size in bytes * @size: transmit request size in bytes
* *
* Due to hardware alignment restrictions (4K alignment), we need to * Due to hardware alignment restrictions (4K alignment), we need to
...@@ -235,31 +234,31 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, ...@@ -235,31 +234,31 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
* operations into: * operations into:
* return ((size * 85) >> 20) + 1; * return ((size * 85) >> 20) + 1;
*/ */
static inline unsigned int i40e_txd_use_count(unsigned int size) static inline unsigned int iavf_txd_use_count(unsigned int size)
{ {
return ((size * 85) >> 20) + 1; return ((size * 85) >> 20) + 1;
} }
/* Tx Descriptors needed, worst case */ /* Tx Descriptors needed, worst case */
#define DESC_NEEDED (MAX_SKB_FRAGS + 6) #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define I40E_MIN_DESC_PENDING 4 #define IAVF_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_HW_VLAN BIT(1) #define IAVF_TX_FLAGS_HW_VLAN BIT(1)
#define I40E_TX_FLAGS_SW_VLAN BIT(2) #define IAVF_TX_FLAGS_SW_VLAN BIT(2)
#define I40E_TX_FLAGS_TSO BIT(3) #define IAVF_TX_FLAGS_TSO BIT(3)
#define I40E_TX_FLAGS_IPV4 BIT(4) #define IAVF_TX_FLAGS_IPV4 BIT(4)
#define I40E_TX_FLAGS_IPV6 BIT(5) #define IAVF_TX_FLAGS_IPV6 BIT(5)
#define I40E_TX_FLAGS_FCCRC BIT(6) #define IAVF_TX_FLAGS_FCCRC BIT(6)
#define I40E_TX_FLAGS_FSO BIT(7) #define IAVF_TX_FLAGS_FSO BIT(7)
#define I40E_TX_FLAGS_FD_SB BIT(9) #define IAVF_TX_FLAGS_FD_SB BIT(9)
#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) #define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define I40E_TX_FLAGS_VLAN_SHIFT 16 #define IAVF_TX_FLAGS_VLAN_SHIFT 16
struct i40e_tx_buffer { struct iavf_tx_buffer {
struct i40e_tx_desc *next_to_watch; struct iavf_tx_desc *next_to_watch;
union { union {
struct sk_buff *skb; struct sk_buff *skb;
void *raw_buf; void *raw_buf;
...@@ -272,7 +271,7 @@ struct i40e_tx_buffer { ...@@ -272,7 +271,7 @@ struct i40e_tx_buffer {
u32 tx_flags; u32 tx_flags;
}; };
struct i40e_rx_buffer { struct iavf_rx_buffer {
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
...@@ -283,12 +282,12 @@ struct i40e_rx_buffer { ...@@ -283,12 +282,12 @@ struct i40e_rx_buffer {
__u16 pagecnt_bias; __u16 pagecnt_bias;
}; };
struct i40e_queue_stats { struct iavf_queue_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
}; };
struct i40e_tx_queue_stats { struct iavf_tx_queue_stats {
u64 restart_queue; u64 restart_queue;
u64 tx_busy; u64 tx_busy;
u64 tx_done_old; u64 tx_done_old;
...@@ -298,7 +297,7 @@ struct i40e_tx_queue_stats { ...@@ -298,7 +297,7 @@ struct i40e_tx_queue_stats {
u64 tx_lost_interrupt; u64 tx_lost_interrupt;
}; };
struct i40e_rx_queue_stats { struct iavf_rx_queue_stats {
u64 non_eop_descs; u64 non_eop_descs;
u64 alloc_page_failed; u64 alloc_page_failed;
u64 alloc_buff_failed; u64 alloc_buff_failed;
...@@ -306,34 +305,34 @@ struct i40e_rx_queue_stats { ...@@ -306,34 +305,34 @@ struct i40e_rx_queue_stats {
u64 realloc_count; u64 realloc_count;
}; };
enum i40e_ring_state_t { enum iavf_ring_state_t {
__I40E_TX_FDIR_INIT_DONE, __IAVF_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE, __IAVF_TX_XPS_INIT_DONE,
__I40E_RING_STATE_NBITS /* must be last */ __IAVF_RING_STATE_NBITS /* must be last */
}; };
/* some useful defines for virtchannel interface, which /* some useful defines for virtchannel interface, which
* is the only remaining user of header split * is the only remaining user of header split
*/ */
#define I40E_RX_DTYPE_NO_SPLIT 0 #define IAVF_RX_DTYPE_NO_SPLIT 0
#define I40E_RX_DTYPE_HEADER_SPLIT 1 #define IAVF_RX_DTYPE_HEADER_SPLIT 1
#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 #define IAVF_RX_DTYPE_SPLIT_ALWAYS 2
#define I40E_RX_SPLIT_L2 0x1 #define IAVF_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2 #define IAVF_RX_SPLIT_IP 0x2
#define I40E_RX_SPLIT_TCP_UDP 0x4 #define IAVF_RX_SPLIT_TCP_UDP 0x4
#define I40E_RX_SPLIT_SCTP 0x8 #define IAVF_RX_SPLIT_SCTP 0x8
/* struct that defines a descriptor ring, associated with a VSI */ /* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring { struct iavf_ring {
struct i40e_ring *next; /* pointer to next ring in q_vector */ struct iavf_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */ void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */ struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */ struct net_device *netdev; /* netdev ring maps to */
union { union {
struct i40e_tx_buffer *tx_bi; struct iavf_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi; struct iavf_rx_buffer *rx_bi;
}; };
DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */ u16 queue_index; /* Queue number of ring */
u8 dcb_tc; /* Traffic class of ring */ u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail; u8 __iomem *tail;
...@@ -361,22 +360,22 @@ struct i40e_ring { ...@@ -361,22 +360,22 @@ struct i40e_ring {
u8 packet_stride; u8 packet_stride;
u16 flags; u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
/* stats structs */ /* stats structs */
struct i40e_queue_stats stats; struct iavf_queue_stats stats;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
union { union {
struct i40e_tx_queue_stats tx_stats; struct iavf_tx_queue_stats tx_stats;
struct i40e_rx_queue_stats rx_stats; struct iavf_rx_queue_stats rx_stats;
}; };
unsigned int size; /* length of descriptor ring in bytes */ unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */ dma_addr_t dma; /* physical address of ring */
struct i40e_vsi *vsi; /* Backreference to associated VSI */ struct iavf_vsi *vsi; /* Backreference to associated VSI */
struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct iavf_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */ struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc; u16 next_to_alloc;
...@@ -390,30 +389,30 @@ struct i40e_ring { ...@@ -390,30 +389,30 @@ struct i40e_ring {
*/ */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring) static inline bool ring_uses_build_skb(struct iavf_ring *ring)
{ {
return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
} }
static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
{ {
ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
} }
static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
{ {
ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
} }
#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 #define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 #define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e #define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e
#define I40E_ITR_ADAPTIVE_LATENCY 0x8000 #define IAVF_ITR_ADAPTIVE_LATENCY 0x8000
#define I40E_ITR_ADAPTIVE_BULK 0x0000 #define IAVF_ITR_ADAPTIVE_BULK 0x0000
#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) #define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
struct i40e_ring_container { struct iavf_ring_container {
struct i40e_ring *ring; /* pointer to linked list of ring(s) */ struct iavf_ring *ring; /* pointer to linked list of ring(s) */
unsigned long next_update; /* jiffies value of next update */ unsigned long next_update; /* jiffies value of next update */
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */ unsigned int total_packets; /* total packets processed this int */
...@@ -423,10 +422,10 @@ struct i40e_ring_container { ...@@ -423,10 +422,10 @@ struct i40e_ring_container {
}; };
/* iterator for handling rings in ring container */ /* iterator for handling rings in ring container */
#define i40e_for_each_ring(pos, head) \ #define iavf_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next) for (pos = (head).ring; pos != NULL; pos = pos->next)
static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
{ {
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
if (ring->rx_buf_len > (PAGE_SIZE / 2)) if (ring->rx_buf_len > (PAGE_SIZE / 2))
...@@ -435,25 +434,25 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) ...@@ -435,25 +434,25 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
return 0; return 0;
} }
#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) #define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
bool iavf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void iavf_clean_tx_ring(struct i40e_ring *tx_ring); void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
void iavf_clean_rx_ring(struct i40e_ring *rx_ring); void iavf_clean_rx_ring(struct iavf_ring *rx_ring);
int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring); int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring); int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
void iavf_free_tx_resources(struct i40e_ring *tx_ring); void iavf_free_tx_resources(struct iavf_ring *tx_ring);
void iavf_free_rx_resources(struct i40e_ring *rx_ring); void iavf_free_rx_resources(struct iavf_ring *rx_ring);
int iavf_napi_poll(struct napi_struct *napi, int budget); int iavf_napi_poll(struct napi_struct *napi, int budget);
void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);
u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw); u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
void iavf_detect_recover_hung(struct i40e_vsi *vsi); void iavf_detect_recover_hung(struct iavf_vsi *vsi);
int __iavf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
bool __iavf_chk_linearize(struct sk_buff *skb); bool __iavf_chk_linearize(struct sk_buff *skb);
/** /**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer * @skb: send buffer
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
* *
...@@ -461,14 +460,14 @@ bool __iavf_chk_linearize(struct sk_buff *skb); ...@@ -461,14 +460,14 @@ bool __iavf_chk_linearize(struct sk_buff *skb);
* there is not enough descriptors available in this ring since we need at least * there is not enough descriptors available in this ring since we need at least
* one descriptor. * one descriptor.
**/ **/
static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
{ {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb); int count = 0, size = skb_headlen(skb);
for (;;) { for (;;) {
count += i40e_txd_use_count(size); count += iavf_txd_use_count(size);
if (!nr_frags--) if (!nr_frags--)
break; break;
...@@ -480,21 +479,21 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) ...@@ -480,21 +479,21 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
} }
/** /**
* i40e_maybe_stop_tx - 1st level check for Tx stop conditions * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked * @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available * @size: the size buffer we want to assure is available
* *
* Returns 0 if stop is not needed * Returns 0 if stop is not needed
**/ **/
static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
{ {
if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
return 0; return 0;
return __iavf_maybe_stop_tx(tx_ring, size); return __iavf_maybe_stop_tx(tx_ring, size);
} }
/** /**
* i40e_chk_linearize - Check if there are more than 8 fragments per packet * iavf_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer * @skb: send buffer
* @count: number of buffers used * @count: number of buffers used
* *
...@@ -502,23 +501,23 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -502,23 +501,23 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* a packet on the wire and so we need to figure out the cases where we * a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb. * need to linearize the skb.
**/ **/
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
{ {
/* Both TSO and single send will work if count is less than 8 */ /* Both TSO and single send will work if count is less than 8 */
if (likely(count < I40E_MAX_BUFFER_TXD)) if (likely(count < IAVF_MAX_BUFFER_TXD))
return false; return false;
if (skb_is_gso(skb)) if (skb_is_gso(skb))
return __iavf_chk_linearize(skb); return __iavf_chk_linearize(skb);
/* we can support up to 8 data buffers for a single send */ /* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD; return count != IAVF_MAX_BUFFER_TXD;
} }
/** /**
* @ring: Tx ring to find the netdev equivalent of * @ring: Tx ring to find the netdev equivalent of
**/ **/
static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
{ {
return netdev_get_tx_queue(ring->netdev, ring->queue_index); return netdev_get_tx_queue(ring->netdev, ring->queue_index);
} }
#endif /* _I40E_TXRX_H_ */ #endif /* _IAVF_TXRX_H_ */
...@@ -195,7 +195,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) ...@@ -195,7 +195,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
u16 len; u16 len;
len = sizeof(struct virtchnl_vf_resource) + len = sizeof(struct virtchnl_vf_resource) +
I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
event.buf_len = len; event.buf_len = len;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) { if (!event.msg_buf) {
...@@ -242,7 +242,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter) ...@@ -242,7 +242,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
struct virtchnl_vsi_queue_config_info *vqci; struct virtchnl_vsi_queue_config_info *vqci;
struct virtchnl_queue_pair_info *vqpi; struct virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues; int pairs = adapter->num_active_queues;
int i, len, max_frame = I40E_MAX_RXBUFFER; int i, len, max_frame = IAVF_MAX_RXBUFFER;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
...@@ -260,7 +260,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter) ...@@ -260,7 +260,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
/* Limit maximum frame size when jumbo frames is not enabled */ /* Limit maximum frame size when jumbo frames is not enabled */
if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
(adapter->netdev->mtu <= ETH_DATA_LEN)) (adapter->netdev->mtu <= ETH_DATA_LEN))
max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs; vqci->num_queue_pairs = pairs;
...@@ -280,7 +280,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter) ...@@ -280,7 +280,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.max_pkt_size = max_frame; vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size = vqpi->rxq.databuffer_size =
ALIGN(adapter->rx_rings[i].rx_buf_len, ALIGN(adapter->rx_rings[i].rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
vqpi++; vqpi++;
} }
...@@ -352,7 +352,7 @@ void iavf_map_queues(struct iavf_adapter *adapter) ...@@ -352,7 +352,7 @@ void iavf_map_queues(struct iavf_adapter *adapter)
struct virtchnl_irq_map_info *vimi; struct virtchnl_irq_map_info *vimi;
struct virtchnl_vector_map *vecmap; struct virtchnl_vector_map *vecmap;
int v_idx, q_vectors, len; int v_idx, q_vectors, len;
struct i40e_q_vector *q_vector; struct iavf_q_vector *q_vector;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
...@@ -381,8 +381,8 @@ void iavf_map_queues(struct iavf_adapter *adapter) ...@@ -381,8 +381,8 @@ void iavf_map_queues(struct iavf_adapter *adapter)
vecmap->vector_id = v_idx + NONQ_VECS; vecmap->vector_id = v_idx + NONQ_VECS;
vecmap->txq_map = q_vector->ring_mask; vecmap->txq_map = q_vector->ring_mask;
vecmap->rxq_map = q_vector->ring_mask; vecmap->rxq_map = q_vector->ring_mask;
vecmap->rxitr_idx = I40E_RX_ITR; vecmap->rxitr_idx = IAVF_RX_ITR;
vecmap->txitr_idx = I40E_TX_ITR; vecmap->txitr_idx = IAVF_TX_ITR;
} }
/* Misc vector last - this is only for AdminQ messages */ /* Misc vector last - this is only for AdminQ messages */
vecmap = &vimi->vecmap[v_idx]; vecmap = &vimi->vecmap[v_idx];
...@@ -1325,8 +1325,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, ...@@ -1325,8 +1325,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
} }
switch (v_opcode) { switch (v_opcode) {
case VIRTCHNL_OP_GET_STATS: { case VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats = struct iavf_eth_stats *stats =
(struct i40e_eth_stats *)msg; (struct iavf_eth_stats *)msg;
netdev->stats.rx_packets = stats->rx_unicast + netdev->stats.rx_packets = stats->rx_unicast +
stats->rx_multicast + stats->rx_multicast +
stats->rx_broadcast; stats->rx_broadcast;
...@@ -1343,7 +1343,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, ...@@ -1343,7 +1343,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
break; break;
case VIRTCHNL_OP_GET_VF_RESOURCES: { case VIRTCHNL_OP_GET_VF_RESOURCES: {
u16 len = sizeof(struct virtchnl_vf_resource) + u16 len = sizeof(struct virtchnl_vf_resource) +
I40E_MAX_VF_VSI * IAVF_MAX_VF_VSI *
sizeof(struct virtchnl_vsi_resource); sizeof(struct virtchnl_vsi_resource);
memcpy(adapter->vf_res, msg, min(msglen, len)); memcpy(adapter->vf_res, msg, min(msglen, len));
iavf_validate_num_queues(adapter); iavf_validate_num_queues(adapter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment