Commit 3ba3faeb authored by Paul M Stillwell Jr's avatar Paul M Stillwell Jr Committed by Jeff Kirsher

i40e/i40evf: Big endian fixes for handling HMC

Fix HMC handling for big endian architectures.

Change-ID: Id8c46fc341815d47bfe0af8b819f0ab9a1e9e515
Signed-off-by: default avatarPaul M Stillwell Jr <paul.m.stillwell.jr@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 7a208e83
......@@ -746,6 +746,194 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
{ 0 }
};
/**
* i40e_write_byte - replace HMC context byte
* @hmc_bits: pointer to the HMC memory
* @ce_info: a description of the struct to be read from
* @src: the struct to be read from
**/
static void i40e_write_byte(u8 *hmc_bits,
struct i40e_context_ele *ce_info,
u8 *src)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
u16 shift_width;
/* copy from the next struct field */
from = src + ce_info->offset;
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
mask = ((u8)1 << ce_info->width) - 1;
src_byte = *from;
src_byte &= mask;
/* shift to correct alignment */
mask <<= shift_width;
src_byte <<= shift_width;
/* get the current bits from the target bit string */
dest = hmc_bits + (ce_info->lsb / 8);
memcpy(&dest_byte, dest, sizeof(dest_byte));
dest_byte &= ~mask; /* get the bits not changing */
dest_byte |= src_byte; /* add in the new bits */
/* put it all back */
memcpy(dest, &dest_byte, sizeof(dest_byte));
}
/**
* i40e_write_word - replace HMC context word
* @hmc_bits: pointer to the HMC memory
* @ce_info: a description of the struct to be read from
* @src: the struct to be read from
**/
static void i40e_write_word(u8 *hmc_bits,
struct i40e_context_ele *ce_info,
u8 *src)
{
u16 src_word, mask;
u8 *from, *dest;
u16 shift_width;
__le16 dest_word;
/* copy from the next struct field */
from = src + ce_info->offset;
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
mask = ((u16)1 << ce_info->width) - 1;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_word = *(u16 *)from;
src_word &= mask;
/* shift to correct alignment */
mask <<= shift_width;
src_word <<= shift_width;
/* get the current bits from the target bit string */
dest = hmc_bits + (ce_info->lsb / 8);
memcpy(&dest_word, dest, sizeof(dest_word));
dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
dest_word |= cpu_to_le16(src_word); /* add in the new bits */
/* put it all back */
memcpy(dest, &dest_word, sizeof(dest_word));
}
/**
* i40e_write_dword - replace HMC context dword
* @hmc_bits: pointer to the HMC memory
* @ce_info: a description of the struct to be read from
* @src: the struct to be read from
**/
static void i40e_write_dword(u8 *hmc_bits,
struct i40e_context_ele *ce_info,
u8 *src)
{
u32 src_dword, mask;
u8 *from, *dest;
u16 shift_width;
__le32 dest_dword;
/* copy from the next struct field */
from = src + ce_info->offset;
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
/* if the field width is exactly 32 on an x86 machine, then the shift
* operation will not work because the SHL instructions count is masked
* to 5 bits so the shift will do nothing
*/
if (ce_info->width < 32)
mask = ((u32)1 << ce_info->width) - 1;
else
mask = -1;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_dword = *(u32 *)from;
src_dword &= mask;
/* shift to correct alignment */
mask <<= shift_width;
src_dword <<= shift_width;
/* get the current bits from the target bit string */
dest = hmc_bits + (ce_info->lsb / 8);
memcpy(&dest_dword, dest, sizeof(dest_dword));
dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
/* put it all back */
memcpy(dest, &dest_dword, sizeof(dest_dword));
}
/**
* i40e_write_qword - replace HMC context qword
* @hmc_bits: pointer to the HMC memory
* @ce_info: a description of the struct to be read from
* @src: the struct to be read from
**/
static void i40e_write_qword(u8 *hmc_bits,
struct i40e_context_ele *ce_info,
u8 *src)
{
u64 src_qword, mask;
u8 *from, *dest;
u16 shift_width;
__le64 dest_qword;
/* copy from the next struct field */
from = src + ce_info->offset;
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
/* if the field width is exactly 64 on an x86 machine, then the shift
* operation will not work because the SHL instructions count is masked
* to 6 bits so the shift will do nothing
*/
if (ce_info->width < 64)
mask = ((u64)1 << ce_info->width) - 1;
else
mask = -1;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_qword = *(u64 *)from;
src_qword &= mask;
/* shift to correct alignment */
mask <<= shift_width;
src_qword <<= shift_width;
/* get the current bits from the target bit string */
dest = hmc_bits + (ce_info->lsb / 8);
memcpy(&dest_qword, dest, sizeof(dest_qword));
dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
/* put it all back */
memcpy(dest, &dest_qword, sizeof(dest_qword));
}
/**
* i40e_clear_hmc_context - zero out the HMC context bits
* @hw: the hardware struct
......@@ -772,71 +960,28 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
struct i40e_context_ele *ce_info,
u8 *dest)
{
u16 shift_width;
u64 bitfield;
u8 hi_byte;
u8 hi_mask;
u64 t_bits;
u64 mask;
u8 *p;
int f;
for (f = 0; ce_info[f].width != 0; f++) {
/* clear out the field */
bitfield = 0;
/* copy from the next struct field */
p = dest + ce_info[f].offset;
/* we have to deal with each element of the HMC using the
* correct size so that we are correct regardless of the
* endianness of the machine
*/
switch (ce_info[f].size_of) {
case 1:
bitfield = *p;
i40e_write_byte(context_bytes, &ce_info[f], dest);
break;
case 2:
bitfield = cpu_to_le16(*(u16 *)p);
i40e_write_word(context_bytes, &ce_info[f], dest);
break;
case 4:
bitfield = cpu_to_le32(*(u32 *)p);
i40e_write_dword(context_bytes, &ce_info[f], dest);
break;
case 8:
bitfield = cpu_to_le64(*(u64 *)p);
i40e_write_qword(context_bytes, &ce_info[f], dest);
break;
}
/* prepare the bits and mask */
shift_width = ce_info[f].lsb % 8;
mask = ((u64)1 << ce_info[f].width) - 1;
/* save upper bytes for special case */
hi_mask = (u8)((mask >> 56) & 0xff);
hi_byte = (u8)((bitfield >> 56) & 0xff);
/* shift to correct alignment */
mask <<= shift_width;
bitfield <<= shift_width;
/* get the current bits from the target bit string */
p = context_bytes + (ce_info[f].lsb / 8);
memcpy(&t_bits, p, sizeof(u64));
t_bits &= ~mask; /* get the bits not changing */
t_bits |= bitfield; /* add in the new bits */
/* put it all back */
memcpy(p, &t_bits, sizeof(u64));
/* deal with the special case if needed
* example: 62 bit field that starts in bit 5 of first byte
* will overlap 3 bits into byte 9
*/
if ((shift_width + ce_info[f].width) > 64) {
u8 byte;
hi_mask >>= (8 - shift_width);
hi_byte >>= (8 - shift_width);
byte = p[8] & ~hi_mask; /* get the bits not changing */
byte |= hi_byte; /* add in the new bits */
p[8] = byte; /* put it back */
}
}
return 0;
......
......@@ -32,16 +32,22 @@ struct i40e_hw;
/* HMC element context information */
/* Rx queue context data */
/* Rx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct i40e_hmc_obj_rxq {
u16 head;
u8 cpuid;
u16 cpuid; /* bigger than needed, see above for reason */
u64 base;
u16 qlen;
#define I40E_RXQ_CTX_DBUFF_SHIFT 7
u8 dbuff;
u16 dbuff; /* bigger than needed, see above for reason */
#define I40E_RXQ_CTX_HBUFF_SHIFT 6
u8 hbuff;
u16 hbuff; /* bigger than needed, see above for reason */
u8 dtype;
u8 dsize;
u8 crcstrip;
......@@ -50,16 +56,22 @@ struct i40e_hmc_obj_rxq {
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u16 rxmax;
u32 rxmax; /* bigger than needed, see above for reason */
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
u16 lrxqthresh; /* bigger than needed, see above for reason */
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
/* Tx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct i40e_hmc_obj_txq {
u16 head;
u8 new_context;
......@@ -69,7 +81,7 @@ struct i40e_hmc_obj_txq {
u8 fd_ena;
u8 alt_vlan_ena;
u16 thead_wb;
u16 cpuid;
u8 cpuid;
u8 head_wb_ena;
u16 qlen;
u8 tphrdesc_ena;
......
......@@ -32,16 +32,22 @@ struct i40e_hw;
/* HMC element context information */
/* Rx queue context data */
/* Rx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct i40e_hmc_obj_rxq {
u16 head;
u8 cpuid;
u16 cpuid; /* bigger than needed, see above for reason */
u64 base;
u16 qlen;
#define I40E_RXQ_CTX_DBUFF_SHIFT 7
u8 dbuff;
u16 dbuff; /* bigger than needed, see above for reason */
#define I40E_RXQ_CTX_HBUFF_SHIFT 6
u8 hbuff;
u16 hbuff; /* bigger than needed, see above for reason */
u8 dtype;
u8 dsize;
u8 crcstrip;
......@@ -50,16 +56,22 @@ struct i40e_hmc_obj_rxq {
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u16 rxmax;
u32 rxmax; /* bigger than needed, see above for reason */
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
u16 lrxqthresh; /* bigger than needed, see above for reason */
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
/* Tx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct i40e_hmc_obj_txq {
u16 head;
u8 new_context;
......@@ -69,7 +81,7 @@ struct i40e_hmc_obj_txq {
u8 fd_ena;
u8 alt_vlan_ena;
u16 thead_wb;
u16 cpuid;
u8 cpuid;
u8 head_wb_ena;
u16 qlen;
u8 tphrdesc_ena;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment