Commit a3f72307 authored by Denis Bolotin's avatar Denis Bolotin Committed by David S. Miller

qed*: Utilize FW 8.37.7.0

This patch adds a new qed firmware with fixes and support for new features.

Fixes:
- Fix a rare case of device crash with iWARP, iSCSI or FCoE offload.
- Fix GRE tunneled traffic when iWARP offload is enabled.
- Fix RoCE failure in ib_send_bw when using inline data.
- Fix latency optimization flow for inline WQEs.
- BigBear 100G fix

RDMA:
- Reduce task context size.
- Application page sizes above 2GB support.
- Performance improvements.

ETH:
- Tenant DCB support.
- Replace RSS indirection table update interface.

Misc:
- Debug Tools changes.
Signed-off-by: default avatarDenis Bolotin <denis.bolotin@cavium.com>
Signed-off-by: default avatarAriel Elior <ariel.elior@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6ef848ef
...@@ -623,6 +623,7 @@ struct qed_hwfn { ...@@ -623,6 +623,7 @@ struct qed_hwfn {
void *unzip_buf; void *unzip_buf;
struct dbg_tools_data dbg_info; struct dbg_tools_data dbg_info;
void *dbg_user_info;
/* PWM region specific data */ /* PWM region specific data */
u16 wid_count; u16 wid_count;
......
...@@ -3454,6 +3454,7 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, ...@@ -3454,6 +3454,7 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
SEM_FAST_REG_STORM_REG_FILE) + SEM_FAST_REG_STORM_REG_FILE) +
IOR_SET_OFFSET(set_id); IOR_SET_OFFSET(set_id);
if (strlen(buf) > 0)
buf[strlen(buf) - 1] = '0' + set_id; buf[strlen(buf) - 1] = '0' + set_id;
offset += qed_grc_dump_mem(p_hwfn, offset += qed_grc_dump_mem(p_hwfn,
p_ptt, p_ptt,
...@@ -5563,35 +5564,6 @@ struct block_info { ...@@ -5563,35 +5564,6 @@ struct block_info {
enum block_id id; enum block_id id;
}; };
struct mcp_trace_format {
u32 data;
#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
#define MCP_TRACE_FORMAT_LEN_SHIFT 24
char *format_str;
};
/* Meta data structure, generated by a perl script during MFW build. therefore,
* the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
* script.
*/
struct mcp_trace_meta {
u32 modules_num;
char **modules;
u32 formats_num;
struct mcp_trace_format *formats;
};
/* REG fifo element */ /* REG fifo element */
struct reg_fifo_element { struct reg_fifo_element {
u64 data; u64 data;
...@@ -5714,6 +5686,20 @@ struct igu_fifo_addr_data { ...@@ -5714,6 +5686,20 @@ struct igu_fifo_addr_data {
enum igu_fifo_addr_types type; enum igu_fifo_addr_types type;
}; };
struct mcp_trace_meta {
u32 modules_num;
char **modules;
u32 formats_num;
struct mcp_trace_format *formats;
bool is_allocated;
};
/* Debug Tools user data */
struct dbg_tools_user_data {
struct mcp_trace_meta mcp_trace_meta;
const u32 *mcp_trace_user_meta_buf;
};
/******************************** Constants **********************************/ /******************************** Constants **********************************/
#define MAX_MSG_LEN 1024 #define MAX_MSG_LEN 1024
...@@ -6137,15 +6123,6 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { ...@@ -6137,15 +6123,6 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
/******************************** Variables **********************************/ /******************************** Variables **********************************/
/* MCP Trace meta data array - used in case the dump doesn't contain the
* meta data (e.g. due to no NVRAM access).
*/
static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 };
/* Parsed MCP Trace meta data info, based on MCP trace meta array */
static struct mcp_trace_meta s_mcp_trace_meta;
static bool s_mcp_trace_meta_valid;
/* Temporary buffer, used for print size calculations */ /* Temporary buffer, used for print size calculations */
static char s_temp_buf[MAX_MSG_LEN]; static char s_temp_buf[MAX_MSG_LEN];
...@@ -6311,6 +6288,12 @@ static u32 qed_print_section_params(u32 *dump_buf, ...@@ -6311,6 +6288,12 @@ static u32 qed_print_section_params(u32 *dump_buf,
return dump_offset; return dump_offset;
} }
static struct dbg_tools_user_data *
qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
{
return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
}
/* Parses the idle check rules and returns the number of characters printed. /* Parses the idle check rules and returns the number of characters printed.
* In case of parsing error, returns 0. * In case of parsing error, returns 0.
*/ */
...@@ -6570,43 +6553,26 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf, ...@@ -6570,43 +6553,26 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
return DBG_STATUS_OK; return DBG_STATUS_OK;
} }
/* Frees the specified MCP Trace meta data */
static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
struct mcp_trace_meta *meta)
{
u32 i;
s_mcp_trace_meta_valid = false;
/* Release modules */
if (meta->modules) {
for (i = 0; i < meta->modules_num; i++)
kfree(meta->modules[i]);
kfree(meta->modules);
}
/* Release formats */
if (meta->formats) {
for (i = 0; i < meta->formats_num; i++)
kfree(meta->formats[i].format_str);
kfree(meta->formats);
}
}
/* Allocates and fills MCP Trace meta data based on the specified meta data /* Allocates and fills MCP Trace meta data based on the specified meta data
* dump buffer. * dump buffer.
* Returns debug status code. * Returns debug status code.
*/ */
static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, static enum dbg_status
const u32 *meta_buf, qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
struct mcp_trace_meta *meta) const u32 *meta_buf)
{ {
u8 *meta_buf_bytes = (u8 *)meta_buf; struct dbg_tools_user_data *dev_user_data;
u32 offset = 0, signature, i; u32 offset = 0, signature, i;
struct mcp_trace_meta *meta;
u8 *meta_buf_bytes;
dev_user_data = qed_dbg_get_user_data(p_hwfn);
meta = &dev_user_data->mcp_trace_meta;
meta_buf_bytes = (u8 *)meta_buf;
/* Free the previous meta before loading a new one. */ /* Free the previous meta before loading a new one. */
if (s_mcp_trace_meta_valid) if (meta->is_allocated)
qed_mcp_trace_free_meta(p_hwfn, meta); qed_mcp_trace_free_meta_data(p_hwfn);
memset(meta, 0, sizeof(*meta)); memset(meta, 0, sizeof(*meta));
...@@ -6674,7 +6640,7 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, ...@@ -6674,7 +6640,7 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
format_len, format_ptr->format_str); format_len, format_ptr->format_str);
} }
s_mcp_trace_meta_valid = true; meta->is_allocated = true;
return DBG_STATUS_OK; return DBG_STATUS_OK;
} }
...@@ -6687,21 +6653,26 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, ...@@ -6687,21 +6653,26 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
* buffer. * buffer.
* data_size - size in bytes of data to parse. * data_size - size in bytes of data to parse.
* parsed_buf - destination buffer for parsed data. * parsed_buf - destination buffer for parsed data.
* parsed_bytes - size of parsed data in bytes. * parsed_results_bytes - size of parsed data in bytes.
*/ */
static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
u8 *trace_buf,
u32 trace_buf_size, u32 trace_buf_size,
u32 data_offset, u32 data_offset,
u32 data_size, u32 data_size,
char *parsed_buf, char *parsed_buf,
u32 *parsed_bytes) u32 *parsed_results_bytes)
{ {
struct dbg_tools_user_data *dev_user_data;
struct mcp_trace_meta *meta;
u32 param_mask, param_shift; u32 param_mask, param_shift;
enum dbg_status status; enum dbg_status status;
*parsed_bytes = 0; dev_user_data = qed_dbg_get_user_data(p_hwfn);
meta = &dev_user_data->mcp_trace_meta;
*parsed_results_bytes = 0;
if (!s_mcp_trace_meta_valid) if (!meta->is_allocated)
return DBG_STATUS_MCP_TRACE_BAD_DATA; return DBG_STATUS_MCP_TRACE_BAD_DATA;
status = DBG_STATUS_OK; status = DBG_STATUS_OK;
...@@ -6723,7 +6694,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, ...@@ -6723,7 +6694,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
format_idx = header & MFW_TRACE_EVENTID_MASK; format_idx = header & MFW_TRACE_EVENTID_MASK;
/* Skip message if its index doesn't exist in the meta data */ /* Skip message if its index doesn't exist in the meta data */
if (format_idx >= s_mcp_trace_meta.formats_num) { if (format_idx >= meta->formats_num) {
u8 format_size = u8 format_size =
(u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
MFW_TRACE_PRM_SIZE_SHIFT); MFW_TRACE_PRM_SIZE_SHIFT);
...@@ -6738,7 +6709,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, ...@@ -6738,7 +6709,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
continue; continue;
} }
format_ptr = &s_mcp_trace_meta.formats[format_idx]; format_ptr = &meta->formats[format_idx];
for (i = 0, for (i = 0,
param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
...@@ -6783,19 +6754,20 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, ...@@ -6783,19 +6754,20 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
return DBG_STATUS_MCP_TRACE_BAD_DATA; return DBG_STATUS_MCP_TRACE_BAD_DATA;
/* Print current message to results buffer */ /* Print current message to results buffer */
*parsed_bytes += *parsed_results_bytes +=
sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes), sprintf(qed_get_buf_ptr(parsed_buf,
*parsed_results_bytes),
"%s %-8s: ", "%s %-8s: ",
s_mcp_trace_level_str[format_level], s_mcp_trace_level_str[format_level],
s_mcp_trace_meta.modules[format_module]); meta->modules[format_module]);
*parsed_bytes += *parsed_results_bytes +=
sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes), sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
format_ptr->format_str, format_ptr->format_str,
params[0], params[1], params[2]); params[0], params[1], params[2]);
} }
/* Add string NULL terminator */ /* Add string NULL terminator */
(*parsed_bytes)++; (*parsed_results_bytes)++;
return status; return status;
} }
...@@ -6803,24 +6775,25 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, ...@@ -6803,24 +6775,25 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
/* Parses an MCP Trace dump buffer. /* Parses an MCP Trace dump buffer.
* If result_buf is not NULL, the MCP Trace results are printed to it. * If result_buf is not NULL, the MCP Trace results are printed to it.
* In any case, the required results buffer size is assigned to * In any case, the required results buffer size is assigned to
* parsed_bytes. * parsed_results_bytes.
* The parsing status is returned. * The parsing status is returned.
*/ */
static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf, u32 *dump_buf,
char *parsed_buf, char *results_buf,
u32 *parsed_bytes) u32 *parsed_results_bytes,
bool free_meta_data)
{ {
const char *section_name, *param_name, *param_str_val; const char *section_name, *param_name, *param_str_val;
u32 data_size, trace_data_dwords, trace_meta_dwords; u32 data_size, trace_data_dwords, trace_meta_dwords;
u32 offset, results_offset, parsed_buf_bytes; u32 offset, results_offset, results_buf_bytes;
u32 param_num_val, num_section_params; u32 param_num_val, num_section_params;
struct mcp_trace *trace; struct mcp_trace *trace;
enum dbg_status status; enum dbg_status status;
const u32 *meta_buf; const u32 *meta_buf;
u8 *trace_buf; u8 *trace_buf;
*parsed_bytes = 0; *parsed_results_bytes = 0;
/* Read global_params section */ /* Read global_params section */
dump_buf += qed_read_section_hdr(dump_buf, dump_buf += qed_read_section_hdr(dump_buf,
...@@ -6831,7 +6804,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, ...@@ -6831,7 +6804,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Print global params */ /* Print global params */
dump_buf += qed_print_section_params(dump_buf, dump_buf += qed_print_section_params(dump_buf,
num_section_params, num_section_params,
parsed_buf, &results_offset); results_buf, &results_offset);
/* Read trace_data section */ /* Read trace_data section */
dump_buf += qed_read_section_hdr(dump_buf, dump_buf += qed_read_section_hdr(dump_buf,
...@@ -6846,6 +6819,9 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, ...@@ -6846,6 +6819,9 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Prepare trace info */ /* Prepare trace info */
trace = (struct mcp_trace *)dump_buf; trace = (struct mcp_trace *)dump_buf;
if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
return DBG_STATUS_MCP_TRACE_BAD_DATA;
trace_buf = (u8 *)dump_buf + sizeof(*trace); trace_buf = (u8 *)dump_buf + sizeof(*trace);
offset = trace->trace_oldest; offset = trace->trace_oldest;
data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size); data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
...@@ -6865,31 +6841,39 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, ...@@ -6865,31 +6841,39 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Choose meta data buffer */ /* Choose meta data buffer */
if (!trace_meta_dwords) { if (!trace_meta_dwords) {
/* Dump doesn't include meta data */ /* Dump doesn't include meta data */
if (!s_mcp_trace_meta_arr.ptr) struct dbg_tools_user_data *dev_user_data =
qed_dbg_get_user_data(p_hwfn);
if (!dev_user_data->mcp_trace_user_meta_buf)
return DBG_STATUS_MCP_TRACE_NO_META; return DBG_STATUS_MCP_TRACE_NO_META;
meta_buf = s_mcp_trace_meta_arr.ptr;
meta_buf = dev_user_data->mcp_trace_user_meta_buf;
} else { } else {
/* Dump includes meta data */ /* Dump includes meta data */
meta_buf = dump_buf; meta_buf = dump_buf;
} }
/* Allocate meta data memory */ /* Allocate meta data memory */
status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta); status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
if (status != DBG_STATUS_OK) if (status != DBG_STATUS_OK)
return status; return status;
status = qed_parse_mcp_trace_buf(trace_buf, status = qed_parse_mcp_trace_buf(p_hwfn,
trace_buf,
trace->size, trace->size,
offset, offset,
data_size, data_size,
parsed_buf ? results_buf ?
parsed_buf + results_offset : results_buf + results_offset :
NULL, NULL,
&parsed_buf_bytes); &results_buf_bytes);
if (status != DBG_STATUS_OK) if (status != DBG_STATUS_OK)
return status; return status;
*parsed_bytes = results_offset + parsed_buf_bytes; if (free_meta_data)
qed_mcp_trace_free_meta_data(p_hwfn);
*parsed_results_bytes = results_offset + results_buf_bytes;
return DBG_STATUS_OK; return DBG_STATUS_OK;
} }
...@@ -7361,6 +7345,16 @@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) ...@@ -7361,6 +7345,16 @@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
return DBG_STATUS_OK; return DBG_STATUS_OK;
} }
enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn)
{
p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data),
GFP_KERNEL);
if (!p_hwfn->dbg_user_info)
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
return DBG_STATUS_OK;
}
const char *qed_dbg_get_status_str(enum dbg_status status) const char *qed_dbg_get_status_str(enum dbg_status status)
{ {
return (status < return (status <
...@@ -7397,10 +7391,13 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, ...@@ -7397,10 +7391,13 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
num_errors, num_warnings); num_errors, num_warnings);
} }
void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size) void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
const u32 *meta_buf)
{ {
s_mcp_trace_meta_arr.ptr = data; struct dbg_tools_user_data *dev_user_data =
s_mcp_trace_meta_arr.size_in_dwords = size; qed_dbg_get_user_data(p_hwfn);
dev_user_data->mcp_trace_user_meta_buf = meta_buf;
} }
enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
...@@ -7409,7 +7406,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, ...@@ -7409,7 +7406,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *results_buf_size) u32 *results_buf_size)
{ {
return qed_parse_mcp_trace_dump(p_hwfn, return qed_parse_mcp_trace_dump(p_hwfn,
dump_buf, NULL, results_buf_size); dump_buf, NULL, results_buf_size, true);
} }
enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
...@@ -7421,20 +7418,61 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, ...@@ -7421,20 +7418,61 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
return qed_parse_mcp_trace_dump(p_hwfn, return qed_parse_mcp_trace_dump(p_hwfn,
dump_buf, dump_buf,
results_buf, &parsed_buf_size); results_buf, &parsed_buf_size, true);
} }
enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
char *results_buf)
{
u32 parsed_buf_size;
return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
&parsed_buf_size, false);
}
enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
u8 *dump_buf,
u32 num_dumped_bytes, u32 num_dumped_bytes,
char *results_buf) char *results_buf)
{ {
u32 parsed_bytes; u32 parsed_results_bytes;
return qed_parse_mcp_trace_buf(dump_buf, return qed_parse_mcp_trace_buf(p_hwfn,
dump_buf,
num_dumped_bytes, num_dumped_bytes,
0, 0,
num_dumped_bytes, num_dumped_bytes,
results_buf, &parsed_bytes); results_buf, &parsed_results_bytes);
}
/* Frees the specified MCP Trace meta data */
void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
{
struct dbg_tools_user_data *dev_user_data;
struct mcp_trace_meta *meta;
u32 i;
dev_user_data = qed_dbg_get_user_data(p_hwfn);
meta = &dev_user_data->mcp_trace_meta;
if (!meta->is_allocated)
return;
/* Release modules */
if (meta->modules) {
for (i = 0; i < meta->modules_num; i++)
kfree(meta->modules[i]);
kfree(meta->modules);
}
/* Release formats */
if (meta->formats) {
for (i = 0; i < meta->formats_num; i++)
kfree(meta->formats[i].format_str);
kfree(meta->formats);
}
meta->is_allocated = false;
} }
enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
......
...@@ -144,6 +144,12 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn) ...@@ -144,6 +144,12 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
qm_info->wfq_data = NULL; qm_info->wfq_data = NULL;
} }
static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->dbg_user_info);
p_hwfn->dbg_user_info = NULL;
}
void qed_resc_free(struct qed_dev *cdev) void qed_resc_free(struct qed_dev *cdev)
{ {
int i; int i;
...@@ -183,6 +189,7 @@ void qed_resc_free(struct qed_dev *cdev) ...@@ -183,6 +189,7 @@ void qed_resc_free(struct qed_dev *cdev)
qed_l2_free(p_hwfn); qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn); qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
} }
} }
...@@ -1083,6 +1090,10 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -1083,6 +1090,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
rc = qed_dcbx_info_alloc(p_hwfn); rc = qed_dcbx_info_alloc(p_hwfn);
if (rc) if (rc)
goto alloc_err; goto alloc_err;
rc = qed_dbg_alloc_user_data(p_hwfn);
if (rc)
goto alloc_err;
} }
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
......
...@@ -274,7 +274,8 @@ struct core_rx_start_ramrod_data { ...@@ -274,7 +274,8 @@ struct core_rx_start_ramrod_data {
u8 mf_si_mcast_accept_all; u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error; struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag; u8 gsi_offload_flag;
u8 reserved[6]; u8 wipe_inner_vlan_pri_en;
u8 reserved[5];
}; };
/* Ramrod data for rx queue stop ramrod */ /* Ramrod data for rx queue stop ramrod */
...@@ -351,7 +352,8 @@ struct core_tx_start_ramrod_data { ...@@ -351,7 +352,8 @@ struct core_tx_start_ramrod_data {
__le16 pbl_size; __le16 pbl_size;
__le16 qm_pq_id; __le16 qm_pq_id;
u8 gsi_offload_flag; u8 gsi_offload_flag;
u8 resrved[3]; u8 vport_id;
u8 resrved[2];
}; };
/* Ramrod data for tx queue stop ramrod */ /* Ramrod data for tx queue stop ramrod */
...@@ -914,6 +916,16 @@ struct eth_rx_rate_limit { ...@@ -914,6 +916,16 @@ struct eth_rx_rate_limit {
__le16 reserved1; __le16 reserved1;
}; };
/* Update RSS indirection table entry command */
struct eth_tstorm_rss_update_data {
u8 valid;
u8 vport_id;
u8 ind_table_index;
u8 reserved;
__le16 ind_table_value;
__le16 reserved1;
};
struct eth_ustorm_per_pf_stat { struct eth_ustorm_per_pf_stat {
struct regpair rcv_lb_ucast_bytes; struct regpair rcv_lb_ucast_bytes;
struct regpair rcv_lb_mcast_bytes; struct regpair rcv_lb_mcast_bytes;
...@@ -1241,6 +1253,10 @@ struct rl_update_ramrod_data { ...@@ -1241,6 +1253,10 @@ struct rl_update_ramrod_data {
u8 rl_id_first; u8 rl_id_first;
u8 rl_id_last; u8 rl_id_last;
u8 rl_dc_qcn_flg; u8 rl_dc_qcn_flg;
u8 dcqcn_reset_alpha_on_idle;
u8 rl_bc_stage_th;
u8 rl_timer_stage_th;
u8 reserved1;
__le32 rl_bc_rate; __le32 rl_bc_rate;
__le16 rl_max_rate; __le16 rl_max_rate;
__le16 rl_r_ai; __le16 rl_r_ai;
...@@ -1249,7 +1265,7 @@ struct rl_update_ramrod_data { ...@@ -1249,7 +1265,7 @@ struct rl_update_ramrod_data {
__le32 dcqcn_k_us; __le32 dcqcn_k_us;
__le32 dcqcn_timeuot_us; __le32 dcqcn_timeuot_us;
__le32 qcn_timeuot_us; __le32 qcn_timeuot_us;
__le32 reserved[2]; __le32 reserved2;
}; };
/* Slowpath Element (SPQE) */ /* Slowpath Element (SPQE) */
...@@ -3322,6 +3338,25 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, ...@@ -3322,6 +3338,25 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results); struct dbg_attn_block_result *results);
/******************************* Data Types **********************************/
struct mcp_trace_format {
u32 data;
#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
#define MCP_TRACE_FORMAT_LEN_SHIFT 24
char *format_str;
};
/******************************** Constants **********************************/ /******************************** Constants **********************************/
#define MAX_NAME_LEN 16 #define MAX_NAME_LEN 16
...@@ -3336,6 +3371,13 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, ...@@ -3336,6 +3371,13 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
*/ */
enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr); enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
/**
* @brief qed_dbg_alloc_user_data - Allocates user debug data.
*
* @param p_hwfn - HW device data
*/
enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn);
/** /**
* @brief qed_dbg_get_status_str - Returns a string for the specified status. * @brief qed_dbg_get_status_str - Returns a string for the specified status.
* *
...@@ -3381,8 +3423,7 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, ...@@ -3381,8 +3423,7 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
u32 *num_warnings); u32 *num_warnings);
/** /**
* @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data.
* meta data.
* *
* Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
* no NVRAM access). * no NVRAM access).
...@@ -3390,7 +3431,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, ...@@ -3390,7 +3431,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
* @param data - pointer to MCP Trace meta data * @param data - pointer to MCP Trace meta data
* @param size - size of MCP Trace meta data in dwords * @param size - size of MCP Trace meta data in dwords
*/ */
void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size); void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
const u32 *meta_buf);
/** /**
* @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
...@@ -3424,19 +3466,45 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, ...@@ -3424,19 +3466,45 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords, u32 num_dumped_dwords,
char *results_buf); char *results_buf);
/**
* @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and
* keeps the MCP trace meta data allocated, to support continuous MCP Trace
* parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
* be called to free the meta data.
*
* @param p_hwfn - HW device data
* @param dump_buf - mcp trace dump buffer, starting from the header.
* @param results_buf - buffer for printing the mcp trace results.
*
* @return error if the parsing fails, ok otherwise.
*/
enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
char *results_buf);
/** /**
* @brief print_mcp_trace_line - Prints MCP Trace results for a single line * @brief print_mcp_trace_line - Prints MCP Trace results for a single line
* *
* @param p_hwfn - HW device data
* @param dump_buf - mcp trace dump buffer, starting from the header. * @param dump_buf - mcp trace dump buffer, starting from the header.
* @param num_dumped_bytes - number of bytes that were dumped. * @param num_dumped_bytes - number of bytes that were dumped.
* @param results_buf - buffer for printing the mcp trace results. * @param results_buf - buffer for printing the mcp trace results.
* *
* @return error if the parsing fails, ok otherwise. * @return error if the parsing fails, ok otherwise.
*/ */
enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
u8 *dump_buf,
u32 num_dumped_bytes, u32 num_dumped_bytes,
char *results_buf); char *results_buf);
/**
* @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data.
* Should be called after continuous MCP Trace parsing.
*
* @param p_hwfn - HW device data
*/
void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
/** /**
* @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
* for reg_fifo results (in bytes). * for reg_fifo results (in bytes).
...@@ -4303,154 +4371,161 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, ...@@ -4303,154 +4371,161 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
(IRO[29].base + ((pf_id) * IRO[29].m1)) (IRO[29].base + ((pf_id) * IRO[29].m1))
#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) #define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
* Use eth_tstorm_rss_update_data for update.
*/
#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
(IRO[30].base + ((pf_id) * IRO[30].m1))
#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size)
/* Xstorm queue zone */ /* Xstorm queue zone */
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
(IRO[30].base + ((queue_id) * IRO[30].m1)) (IRO[31].base + ((queue_id) * IRO[31].m1))
#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) #define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
/* Ystorm cqe producer */ /* Ystorm cqe producer */
#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ #define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
(IRO[31].base + ((rss_id) * IRO[31].m1)) (IRO[32].base + ((rss_id) * IRO[32].m1))
#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size) #define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
/* Ustorm cqe producer */ /* Ustorm cqe producer */
#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ #define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
(IRO[32].base + ((rss_id) * IRO[32].m1)) (IRO[33].base + ((rss_id) * IRO[33].m1))
#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size) #define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
/* Ustorm grq producer */ /* Ustorm grq producer */
#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ #define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
(IRO[33].base + ((pf_id) * IRO[33].m1)) (IRO[34].base + ((pf_id) * IRO[34].m1))
#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size) #define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
/* Tstorm cmdq-cons of given command queue-id */ /* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
(IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1))
#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) #define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, /* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
* BDqueue-id. * BDqueue-id.
*/ */
#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
(IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) #define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ /* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
(IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2))
#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) #define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
/* Tstorm iSCSI RX stats */ /* Tstorm iSCSI RX stats */
#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
(IRO[37].base + ((pf_id) * IRO[37].m1)) (IRO[38].base + ((pf_id) * IRO[38].m1))
#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) #define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
/* Mstorm iSCSI RX stats */ /* Mstorm iSCSI RX stats */
#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
(IRO[38].base + ((pf_id) * IRO[38].m1)) (IRO[39].base + ((pf_id) * IRO[39].m1))
#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) #define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
/* Ustorm iSCSI RX stats */ /* Ustorm iSCSI RX stats */
#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
(IRO[39].base + ((pf_id) * IRO[39].m1)) (IRO[40].base + ((pf_id) * IRO[40].m1))
#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) #define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
/* Xstorm iSCSI TX stats */ /* Xstorm iSCSI TX stats */
#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
(IRO[40].base + ((pf_id) * IRO[40].m1)) (IRO[41].base + ((pf_id) * IRO[41].m1))
#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) #define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
/* Ystorm iSCSI TX stats */ /* Ystorm iSCSI TX stats */
#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
(IRO[41].base + ((pf_id) * IRO[41].m1)) (IRO[42].base + ((pf_id) * IRO[42].m1))
#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) #define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
/* Pstorm iSCSI TX stats */ /* Pstorm iSCSI TX stats */
#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
(IRO[42].base + ((pf_id) * IRO[42].m1)) (IRO[43].base + ((pf_id) * IRO[43].m1))
#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) #define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
/* Tstorm FCoE RX stats */ /* Tstorm FCoE RX stats */
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
(IRO[43].base + ((pf_id) * IRO[43].m1)) (IRO[44].base + ((pf_id) * IRO[44].m1))
#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size) #define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
/* Pstorm FCoE TX stats */ /* Pstorm FCoE TX stats */
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
(IRO[44].base + ((pf_id) * IRO[44].m1)) (IRO[45].base + ((pf_id) * IRO[45].m1))
#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size) #define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
/* Pstorm RDMA queue statistics */ /* Pstorm RDMA queue statistics */
#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ #define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
(IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) #define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
/* Tstorm RDMA queue statistics */ /* Tstorm RDMA queue statistics */
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
(IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1))
#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
/* Xstorm error level for assert */ /* Xstorm error level for assert */
#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ #define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[47].base + ((pf_id) * IRO[47].m1)) (IRO[48].base + ((pf_id) * IRO[48].m1))
#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) #define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
/* Ystorm error level for assert */ /* Ystorm error level for assert */
#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ #define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[48].base + ((pf_id) * IRO[48].m1)) (IRO[49].base + ((pf_id) * IRO[49].m1))
#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) #define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
/* Pstorm error level for assert */ /* Pstorm error level for assert */
#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ #define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[49].base + ((pf_id) * IRO[49].m1)) (IRO[50].base + ((pf_id) * IRO[50].m1))
#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) #define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
/* Tstorm error level for assert */ /* Tstorm error level for assert */
#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ #define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[50].base + ((pf_id) * IRO[50].m1)) (IRO[51].base + ((pf_id) * IRO[51].m1))
#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) #define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
/* Mstorm error level for assert */ /* Mstorm error level for assert */
#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ #define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[51].base + ((pf_id) * IRO[51].m1)) (IRO[52].base + ((pf_id) * IRO[52].m1))
#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) #define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
/* Ustorm error level for assert */ /* Ustorm error level for assert */
#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ #define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[52].base + ((pf_id) * IRO[52].m1)) (IRO[53].base + ((pf_id) * IRO[53].m1))
#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) #define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
/* Xstorm iWARP rxmit stats */ /* Xstorm iWARP rxmit stats */
#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
(IRO[53].base + ((pf_id) * IRO[53].m1)) (IRO[54].base + ((pf_id) * IRO[54].m1))
#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) #define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
/* Tstorm RoCE Event Statistics */ /* Tstorm RoCE Event Statistics */
#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
(IRO[54].base + ((roce_pf_id) * IRO[54].m1)) (IRO[55].base + ((roce_pf_id) * IRO[55].m1))
#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) #define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
/* DCQCN Received Statistics */ /* DCQCN Received Statistics */
#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
(IRO[55].base + ((roce_pf_id) * IRO[55].m1)) (IRO[56].base + ((roce_pf_id) * IRO[56].m1))
#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
/* RoCE Error Statistics */ /* RoCE Error Statistics */
#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ #define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
(IRO[56].base + ((roce_pf_id) * IRO[56].m1)) (IRO[57].base + ((roce_pf_id) * IRO[57].m1))
#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) #define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
/* DCQCN Sent Statistics */ /* DCQCN Sent Statistics */
#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
(IRO[57].base + ((roce_pf_id) * IRO[57].m1)) (IRO[58].base + ((roce_pf_id) * IRO[58].m1))
#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) #define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
/* RoCE CQEs Statistics */ /* RoCE CQEs Statistics */
#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ #define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
(IRO[58].base + ((roce_pf_id) * IRO[58].m1)) (IRO[59].base + ((roce_pf_id) * IRO[59].m1))
#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) #define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
static const struct iro iro_arr[59] = { static const struct iro iro_arr[60] = {
{0x0, 0x0, 0x0, 0x0, 0x8}, {0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x4cb8, 0x88, 0x0, 0x0, 0x88},
{0x6530, 0x20, 0x0, 0x0, 0x20}, {0x6530, 0x20, 0x0, 0x0, 0x20},
...@@ -4461,14 +4536,14 @@ static const struct iro iro_arr[59] = { ...@@ -4461,14 +4536,14 @@ static const struct iro iro_arr[59] = {
{0x84, 0x8, 0x0, 0x0, 0x2}, {0x84, 0x8, 0x0, 0x0, 0x2},
{0x4c48, 0x0, 0x0, 0x0, 0x78}, {0x4c48, 0x0, 0x0, 0x0, 0x78},
{0x3e38, 0x0, 0x0, 0x0, 0x78}, {0x3e38, 0x0, 0x0, 0x0, 0x78},
{0x2b78, 0x0, 0x0, 0x0, 0x78}, {0x3ef8, 0x0, 0x0, 0x0, 0x78},
{0x4c40, 0x0, 0x0, 0x0, 0x78}, {0x4c40, 0x0, 0x0, 0x0, 0x78},
{0x4998, 0x0, 0x0, 0x0, 0x78}, {0x4998, 0x0, 0x0, 0x0, 0x78},
{0x7f50, 0x0, 0x0, 0x0, 0x78}, {0x7f50, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8}, {0xa28, 0x8, 0x0, 0x0, 0x8},
{0x6210, 0x10, 0x0, 0x0, 0x10}, {0x6210, 0x10, 0x0, 0x0, 0x10},
{0xb820, 0x30, 0x0, 0x0, 0x30}, {0xb820, 0x30, 0x0, 0x0, 0x30},
{0x96c0, 0x30, 0x0, 0x0, 0x30}, {0xa990, 0x30, 0x0, 0x0, 0x30},
{0x4b68, 0x80, 0x0, 0x0, 0x40}, {0x4b68, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4}, {0x1f8, 0x4, 0x0, 0x0, 0x4},
{0x53a8, 0x80, 0x4, 0x0, 0x4}, {0x53a8, 0x80, 0x4, 0x0, 0x4},
...@@ -4476,11 +4551,12 @@ static const struct iro iro_arr[59] = { ...@@ -4476,11 +4551,12 @@ static const struct iro iro_arr[59] = {
{0x4ba8, 0x80, 0x0, 0x0, 0x20}, {0x4ba8, 0x80, 0x0, 0x0, 0x20},
{0x8158, 0x40, 0x0, 0x0, 0x30}, {0x8158, 0x40, 0x0, 0x0, 0x30},
{0xe770, 0x60, 0x0, 0x0, 0x60}, {0xe770, 0x60, 0x0, 0x0, 0x60},
{0x2d10, 0x80, 0x0, 0x0, 0x38}, {0x4090, 0x80, 0x0, 0x0, 0x38},
{0xf2b8, 0x78, 0x0, 0x0, 0x78}, {0xfea8, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4}, {0x1f8, 0x4, 0x0, 0x0, 0x4},
{0xaf20, 0x0, 0x0, 0x0, 0xf0}, {0xaf20, 0x0, 0x0, 0x0, 0xf0},
{0xb010, 0x8, 0x0, 0x0, 0x8}, {0xb010, 0x8, 0x0, 0x0, 0x8},
{0xc00, 0x8, 0x0, 0x0, 0x8},
{0x1f8, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8},
{0xac0, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8},
{0x2578, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8},
...@@ -4492,23 +4568,23 @@ static const struct iro iro_arr[59] = { ...@@ -4492,23 +4568,23 @@ static const struct iro iro_arr[59] = {
{0x12908, 0x18, 0x0, 0x0, 0x10}, {0x12908, 0x18, 0x0, 0x0, 0x10},
{0x11aa8, 0x40, 0x0, 0x0, 0x18}, {0x11aa8, 0x40, 0x0, 0x0, 0x18},
{0xa588, 0x50, 0x0, 0x0, 0x20}, {0xa588, 0x50, 0x0, 0x0, 0x20},
{0x8700, 0x40, 0x0, 0x0, 0x28}, {0x8f00, 0x40, 0x0, 0x0, 0x28},
{0x10300, 0x18, 0x0, 0x0, 0x10}, {0x10e30, 0x18, 0x0, 0x0, 0x10},
{0xde48, 0x48, 0x0, 0x0, 0x38}, {0xde48, 0x48, 0x0, 0x0, 0x38},
{0x10768, 0x20, 0x0, 0x0, 0x20}, {0x11298, 0x20, 0x0, 0x0, 0x20},
{0x2d48, 0x80, 0x0, 0x0, 0x10}, {0x40c8, 0x80, 0x0, 0x0, 0x10},
{0x5048, 0x10, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10},
{0xc748, 0x8, 0x0, 0x0, 0x1}, {0xc748, 0x8, 0x0, 0x0, 0x1},
{0xa128, 0x8, 0x0, 0x0, 0x1}, {0xa928, 0x8, 0x0, 0x0, 0x1},
{0x10f00, 0x8, 0x0, 0x0, 0x1}, {0x11a30, 0x8, 0x0, 0x0, 0x1},
{0xf030, 0x8, 0x0, 0x0, 0x1}, {0xf030, 0x8, 0x0, 0x0, 0x1},
{0x13028, 0x8, 0x0, 0x0, 0x1}, {0x13028, 0x8, 0x0, 0x0, 0x1},
{0x12c58, 0x8, 0x0, 0x0, 0x1}, {0x12c58, 0x8, 0x0, 0x0, 0x1},
{0xc9b8, 0x30, 0x0, 0x0, 0x10}, {0xc9b8, 0x30, 0x0, 0x0, 0x10},
{0xed90, 0x28, 0x0, 0x0, 0x28}, {0xed90, 0x28, 0x0, 0x0, 0x28},
{0xa520, 0x18, 0x0, 0x0, 0x18}, {0xad20, 0x18, 0x0, 0x0, 0x18},
{0xa6a0, 0x8, 0x0, 0x0, 0x8}, {0xaea0, 0x8, 0x0, 0x0, 0x8},
{0x13108, 0x8, 0x0, 0x0, 0x8}, {0x13c38, 0x8, 0x0, 0x0, 0x8},
{0x13c50, 0x18, 0x0, 0x0, 0x18}, {0x13c50, 0x18, 0x0, 0x0, 0x18},
}; };
...@@ -5661,6 +5737,14 @@ enum eth_filter_type { ...@@ -5661,6 +5737,14 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE MAX_ETH_FILTER_TYPE
}; };
/* inner to inner vlan priority translation configurations */
struct eth_in_to_in_pri_map_cfg {
u8 inner_vlan_pri_remap_en;
u8 reserved[7];
u8 non_rdma_in_to_in_pri_map[8];
u8 rdma_in_to_in_pri_map[8];
};
/* Eth IPv4 Fragment Type */ /* Eth IPv4 Fragment Type */
enum eth_ipv4_frag_type { enum eth_ipv4_frag_type {
ETH_IPV4_NOT_FRAG, ETH_IPV4_NOT_FRAG,
...@@ -6018,6 +6102,14 @@ struct tx_queue_update_ramrod_data { ...@@ -6018,6 +6102,14 @@ struct tx_queue_update_ramrod_data {
struct regpair reserved1[5]; struct regpair reserved1[5];
}; };
/* Inner to Inner VLAN priority map update mode */
enum update_in_to_in_pri_map_mode_enum {
ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED,
ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL,
ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL,
MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM
};
/* Ramrod data for vport update ramrod */ /* Ramrod data for vport update ramrod */
struct vport_filter_update_ramrod_data { struct vport_filter_update_ramrod_data {
struct eth_filter_cmd_header filter_cmd_hdr; struct eth_filter_cmd_header filter_cmd_hdr;
...@@ -6048,7 +6140,8 @@ struct vport_start_ramrod_data { ...@@ -6048,7 +6140,8 @@ struct vport_start_ramrod_data {
u8 zero_placement_offset; u8 zero_placement_offset;
u8 ctl_frame_mac_check_en; u8 ctl_frame_mac_check_en;
u8 ctl_frame_ethtype_check_en; u8 ctl_frame_ethtype_check_en;
u8 reserved[1]; u8 wipe_inner_vlan_pri_en;
struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg;
}; };
/* Ramrod data for vport stop ramrod */ /* Ramrod data for vport stop ramrod */
...@@ -6100,7 +6193,9 @@ struct vport_update_ramrod_data_cmn { ...@@ -6100,7 +6193,9 @@ struct vport_update_ramrod_data_cmn {
u8 update_ctl_frame_checks_en_flg; u8 update_ctl_frame_checks_en_flg;
u8 ctl_frame_mac_check_en; u8 ctl_frame_mac_check_en;
u8 ctl_frame_ethtype_check_en; u8 ctl_frame_ethtype_check_en;
u8 reserved[15]; u8 update_in_to_in_pri_map_mode;
u8 in_to_in_pri_map[8];
u8 reserved[6];
}; };
struct vport_update_ramrod_mcast { struct vport_update_ramrod_mcast {
...@@ -6929,11 +7024,6 @@ struct mstorm_rdma_task_st_ctx { ...@@ -6929,11 +7024,6 @@ struct mstorm_rdma_task_st_ctx {
struct regpair temp[4]; struct regpair temp[4];
}; };
/* The roce task context of Ustorm */
struct ustorm_rdma_task_st_ctx {
struct regpair temp[2];
};
struct e4_ustorm_rdma_task_ag_ctx { struct e4_ustorm_rdma_task_ag_ctx {
u8 reserved; u8 reserved;
u8 state; u8 state;
...@@ -7007,8 +7097,6 @@ struct e4_rdma_task_context { ...@@ -7007,8 +7097,6 @@ struct e4_rdma_task_context {
struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context; struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context; struct rdif_task_context rdif_context;
struct ustorm_rdma_task_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
}; };
...@@ -7388,7 +7476,7 @@ struct e4_ustorm_rdma_conn_ag_ctx { ...@@ -7388,7 +7476,7 @@ struct e4_ustorm_rdma_conn_ag_ctx {
#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; u8 byte2;
u8 byte3; u8 nvmf_only;
__le16 conn_dpi; __le16 conn_dpi;
__le16 word1; __le16 word1;
__le32 cq_cons; __le32 cq_cons;
...@@ -7831,7 +7919,12 @@ struct roce_create_qp_req_ramrod_data { ...@@ -7831,7 +7919,12 @@ struct roce_create_qp_req_ramrod_data {
struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async; struct regpair qp_handle_for_async;
u8 stats_counter_id; u8 stats_counter_id;
u8 reserved3[7]; u8 reserved3[6];
u8 flags2;
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1
__le16 regular_latency_phy_queue; __le16 regular_latency_phy_queue;
__le16 dpi; __le16 dpi;
}; };
...@@ -7954,6 +8047,7 @@ enum roce_event_opcode { ...@@ -7954,6 +8047,7 @@ enum roce_event_opcode {
ROCE_EVENT_DESTROY_QP, ROCE_EVENT_DESTROY_QP,
ROCE_EVENT_CREATE_UD_QP, ROCE_EVENT_CREATE_UD_QP,
ROCE_EVENT_DESTROY_UD_QP, ROCE_EVENT_DESTROY_UD_QP,
ROCE_EVENT_FUNC_UPDATE,
MAX_ROCE_EVENT_OPCODE MAX_ROCE_EVENT_OPCODE
}; };
...@@ -7962,7 +8056,13 @@ struct roce_init_func_params { ...@@ -7962,7 +8056,13 @@ struct roce_init_func_params {
u8 ll2_queue_id; u8 ll2_queue_id;
u8 cnp_vlan_priority; u8 cnp_vlan_priority;
u8 cnp_dscp; u8 cnp_dscp;
u8 reserved; u8 flags;
#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1
#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0
#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1
#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1
#define ROCE_INIT_FUNC_PARAMS_RESERVED0_MASK 0x3F
#define ROCE_INIT_FUNC_PARAMS_RESERVED0_SHIFT 2
__le32 cnp_send_timeout; __le32 cnp_send_timeout;
__le16 rl_offset; __le16 rl_offset;
u8 rl_count_log; u8 rl_count_log;
...@@ -8109,9 +8209,24 @@ enum roce_ramrod_cmd_id { ...@@ -8109,9 +8209,24 @@ enum roce_ramrod_cmd_id {
ROCE_RAMROD_DESTROY_QP, ROCE_RAMROD_DESTROY_QP,
ROCE_RAMROD_CREATE_UD_QP, ROCE_RAMROD_CREATE_UD_QP,
ROCE_RAMROD_DESTROY_UD_QP, ROCE_RAMROD_DESTROY_UD_QP,
ROCE_RAMROD_FUNC_UPDATE,
MAX_ROCE_RAMROD_CMD_ID MAX_ROCE_RAMROD_CMD_ID
}; };
/* RoCE func init ramrod data */
struct roce_update_func_params {
u8 cnp_vlan_priority;
u8 cnp_dscp;
__le16 flags;
#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1
#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0
#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1
#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1
#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_MASK 0x3FFF
#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_SHIFT 2
__le32 cnp_send_timeout;
};
struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
u8 reserved0; u8 reserved0;
u8 state; u8 state;
......
...@@ -110,7 +110,7 @@ ...@@ -110,7 +110,7 @@
#define FW_MAJOR_VERSION 8 #define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 37 #define FW_MINOR_VERSION 37
#define FW_REVISION_VERSION 2 #define FW_REVISION_VERSION 7
#define FW_ENGINEERING_VERSION 0 #define FW_ENGINEERING_VERSION 0
/***********************/ /***********************/
...@@ -931,12 +931,12 @@ struct db_rdma_dpm_params { ...@@ -931,12 +931,12 @@ struct db_rdma_dpm_params {
#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 #define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 #define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 #define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 #define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1
#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 #define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28
#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 #define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 #define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 #define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 #define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
}; };
......
...@@ -896,7 +896,7 @@ struct e4_ustorm_iscsi_task_ag_ctx { ...@@ -896,7 +896,7 @@ struct e4_ustorm_iscsi_task_ag_ctx {
__le32 exp_cont_len; __le32 exp_cont_len;
__le32 total_data_acked; __le32 total_data_acked;
__le32 exp_data_acked; __le32 exp_data_acked;
u8 next_tid_valid; u8 byte2;
u8 byte3; u8 byte3;
__le16 word1; __le16 word1;
__le16 next_tid; __le16 next_tid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment