Commit 2deeb495 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-collect-hardware-logs-via-ethtool'

Rahul Lakkireddy says:

====================
cxgb4: collect hardware logs via ethtool

Collect more hardware logs via ethtool --get-dump facility.

Patch 1 collects on-chip memory layout information.

Patch 2 collects on-chip MC memory dumps.

Patch 3 collects HMA memory dump.

Patch 4 evaluates and skips TX and RX payload regions in memory dumps.

Patch 5 collects egress and ingress SGE queue contexts.

Patch 6 collects PCIe configuration logs
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 62fd8b18 6078ab19
...@@ -18,17 +18,15 @@ ...@@ -18,17 +18,15 @@
#ifndef __CUDBG_ENTITY_H__ #ifndef __CUDBG_ENTITY_H__
#define __CUDBG_ENTITY_H__ #define __CUDBG_ENTITY_H__
#define EDC0_FLAG 3 #define EDC0_FLAG 0
#define EDC1_FLAG 4 #define EDC1_FLAG 1
#define MC_FLAG 2
#define MC0_FLAG 3
#define MC1_FLAG 4
#define HMA_FLAG 5
#define CUDBG_ENTITY_SIGNATURE 0xCCEDB001 #define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
struct card_mem {
u16 size_edc0;
u16 size_edc1;
u16 mem_flag;
};
struct cudbg_mbox_log { struct cudbg_mbox_log {
struct mbox_cmd entry; struct mbox_cmd entry;
u32 hi[MBOX_LEN / 8]; u32 hi[MBOX_LEN / 8];
...@@ -87,6 +85,48 @@ struct cudbg_tp_la { ...@@ -87,6 +85,48 @@ struct cudbg_tp_la {
u8 data[0]; u8 data[0];
}; };
static const char * const cudbg_region[] = {
"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
"RQUDP region:", "PBL region:", "TXPBL region:",
"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
"On-chip queues:"
};
/* Memory region info relative to current memory (i.e. wrt 0). */
struct cudbg_region_info {
bool exist; /* Does region exists in current memory? */
u32 start; /* Start wrt 0 */
u32 end; /* End wrt 0 */
};
struct cudbg_mem_desc {
u32 base;
u32 limit;
u32 idx;
};
struct cudbg_meminfo {
struct cudbg_mem_desc avail[4];
struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
u32 avail_c;
u32 mem_c;
u32 up_ram_lo;
u32 up_ram_hi;
u32 up_extmem2_lo;
u32 up_extmem2_hi;
u32 rx_pages_data[3];
u32 tx_pages_data[4];
u32 p_structs;
u32 reserved[12];
u32 port_used[4];
u32 port_alloc[4];
u32 loopback_used[NCHAN];
u32 loopback_alloc[NCHAN];
};
struct cudbg_cim_pif_la { struct cudbg_cim_pif_la {
int size; int size;
u8 data[0]; u8 data[0];
...@@ -145,6 +185,7 @@ struct cudbg_tid_info_region_rev1 { ...@@ -145,6 +185,7 @@ struct cudbg_tid_info_region_rev1 {
u32 reserved[16]; u32 reserved[16];
}; };
#define CUDBG_LOWMEM_MAX_CTXT_QIDS 256
#define CUDBG_MAX_FL_QIDS 1024 #define CUDBG_MAX_FL_QIDS 1024
struct cudbg_ch_cntxt { struct cudbg_ch_cntxt {
...@@ -334,6 +375,25 @@ static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = { ...@@ -334,6 +375,25 @@ static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
{0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */ {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
}; };
#define CUDBG_NUM_PCIE_CONFIG_REGS 0x61
static const u32 t5_pcie_config_array[][2] = {
{0x0, 0x34},
{0x3c, 0x40},
{0x50, 0x64},
{0x70, 0x80},
{0x94, 0xa0},
{0xb0, 0xb8},
{0xd0, 0xd4},
{0x100, 0x128},
{0x140, 0x148},
{0x150, 0x164},
{0x170, 0x178},
{0x180, 0x194},
{0x1a0, 0x1b8},
{0x1c0, 0x208},
};
static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = { static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */ {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
{0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */ {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
......
...@@ -47,6 +47,8 @@ enum cudbg_dbg_entity_type { ...@@ -47,6 +47,8 @@ enum cudbg_dbg_entity_type {
CUDBG_CIM_OBQ_NCSI = 17, CUDBG_CIM_OBQ_NCSI = 17,
CUDBG_EDC0 = 18, CUDBG_EDC0 = 18,
CUDBG_EDC1 = 19, CUDBG_EDC1 = 19,
CUDBG_MC0 = 20,
CUDBG_MC1 = 21,
CUDBG_RSS = 22, CUDBG_RSS = 22,
CUDBG_RSS_VF_CONF = 25, CUDBG_RSS_VF_CONF = 25,
CUDBG_PATH_MTU = 27, CUDBG_PATH_MTU = 27,
...@@ -56,6 +58,7 @@ enum cudbg_dbg_entity_type { ...@@ -56,6 +58,7 @@ enum cudbg_dbg_entity_type {
CUDBG_SGE_INDIRECT = 37, CUDBG_SGE_INDIRECT = 37,
CUDBG_ULPRX_LA = 41, CUDBG_ULPRX_LA = 41,
CUDBG_TP_LA = 43, CUDBG_TP_LA = 43,
CUDBG_MEMINFO = 44,
CUDBG_CIM_PIF_LA = 45, CUDBG_CIM_PIF_LA = 45,
CUDBG_CLK = 46, CUDBG_CLK = 46,
CUDBG_CIM_OBQ_RXQ0 = 47, CUDBG_CIM_OBQ_RXQ0 = 47,
...@@ -63,6 +66,7 @@ enum cudbg_dbg_entity_type { ...@@ -63,6 +66,7 @@ enum cudbg_dbg_entity_type {
CUDBG_PCIE_INDIRECT = 50, CUDBG_PCIE_INDIRECT = 50,
CUDBG_PM_INDIRECT = 51, CUDBG_PM_INDIRECT = 51,
CUDBG_TID_INFO = 54, CUDBG_TID_INFO = 54,
CUDBG_PCIE_CONFIG = 55,
CUDBG_DUMP_CONTEXT = 56, CUDBG_DUMP_CONTEXT = 56,
CUDBG_MPS_TCAM = 57, CUDBG_MPS_TCAM = 57,
CUDBG_VPD_DATA = 58, CUDBG_VPD_DATA = 58,
...@@ -74,6 +78,7 @@ enum cudbg_dbg_entity_type { ...@@ -74,6 +78,7 @@ enum cudbg_dbg_entity_type {
CUDBG_PBT_TABLE = 65, CUDBG_PBT_TABLE = 65,
CUDBG_MBOX_LOG = 66, CUDBG_MBOX_LOG = 66,
CUDBG_HMA_INDIRECT = 67, CUDBG_HMA_INDIRECT = 67,
CUDBG_HMA = 68,
CUDBG_MAX_ENTITY = 70, CUDBG_MAX_ENTITY = 70,
}; };
......
...@@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, ...@@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err); struct cudbg_error *cudbg_err);
int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_rss(struct cudbg_init *pdbg_init, int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err); struct cudbg_error *cudbg_err);
...@@ -102,6 +108,9 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, ...@@ -102,6 +108,9 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err); struct cudbg_error *cudbg_err);
int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err); struct cudbg_error *cudbg_err);
...@@ -123,6 +132,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, ...@@ -123,6 +132,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
int cudbg_collect_tid(struct cudbg_init *pdbg_init, int cudbg_collect_tid(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err); struct cudbg_error *cudbg_err);
int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err); struct cudbg_error *cudbg_err);
...@@ -156,6 +168,9 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, ...@@ -156,6 +168,9 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err); struct cudbg_error *cudbg_err);
int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
...@@ -163,7 +178,8 @@ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, ...@@ -163,7 +178,8 @@ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
u32 cudbg_cim_obq_size(struct adapter *padap, int qid); u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
int cudbg_dump_context_size(struct adapter *padap); int cudbg_dump_context_size(struct adapter *padap);
struct cudbg_tcam; int cudbg_fill_meminfo(struct adapter *padap,
struct cudbg_meminfo *meminfo_buff);
void cudbg_fill_le_tcam_info(struct adapter *padap, void cudbg_fill_le_tcam_info(struct adapter *padap,
struct cudbg_tcam *tcam_region); struct cudbg_tcam *tcam_region);
#endif /* __CUDBG_LIB_H__ */ #endif /* __CUDBG_LIB_H__ */
...@@ -77,7 +77,8 @@ enum { ...@@ -77,7 +77,8 @@ enum {
MEM_EDC1, MEM_EDC1,
MEM_MC, MEM_MC,
MEM_MC0 = MEM_MC, MEM_MC0 = MEM_MC,
MEM_MC1 MEM_MC1,
MEM_HMA,
}; };
enum { enum {
...@@ -1653,7 +1654,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, ...@@ -1653,7 +1654,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid); unsigned int vf, unsigned int eqid);
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid); unsigned int vf, unsigned int eqid);
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl); void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
int t4_update_port_info(struct port_info *pi); int t4_update_port_info(struct port_info *pi);
int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
......
...@@ -18,11 +18,13 @@ ...@@ -18,11 +18,13 @@
#include "t4_regs.h" #include "t4_regs.h"
#include "cxgb4.h" #include "cxgb4.h"
#include "cxgb4_cudbg.h" #include "cxgb4_cudbg.h"
#include "cudbg_entity.h"
static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
{ CUDBG_EDC0, cudbg_collect_edc0_meminfo }, { CUDBG_EDC0, cudbg_collect_edc0_meminfo },
{ CUDBG_EDC1, cudbg_collect_edc1_meminfo }, { CUDBG_EDC1, cudbg_collect_edc1_meminfo },
{ CUDBG_MC0, cudbg_collect_mc0_meminfo },
{ CUDBG_MC1, cudbg_collect_mc1_meminfo },
{ CUDBG_HMA, cudbg_collect_hma_meminfo },
}; };
static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
...@@ -53,6 +55,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { ...@@ -53,6 +55,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect }, { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
{ CUDBG_ULPRX_LA, cudbg_collect_ulprx_la }, { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
{ CUDBG_TP_LA, cudbg_collect_tp_la }, { CUDBG_TP_LA, cudbg_collect_tp_la },
{ CUDBG_MEMINFO, cudbg_collect_meminfo },
{ CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la }, { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
{ CUDBG_CLK, cudbg_collect_clk_info }, { CUDBG_CLK, cudbg_collect_clk_info },
{ CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 }, { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
...@@ -60,6 +63,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { ...@@ -60,6 +63,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
{ CUDBG_TID_INFO, cudbg_collect_tid }, { CUDBG_TID_INFO, cudbg_collect_tid },
{ CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
{ CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context }, { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
{ CUDBG_MPS_TCAM, cudbg_collect_mps_tcam }, { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
{ CUDBG_VPD_DATA, cudbg_collect_vpd_data }, { CUDBG_VPD_DATA, cudbg_collect_vpd_data },
...@@ -158,6 +162,22 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) ...@@ -158,6 +162,22 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
} }
len = cudbg_mbytes_to_bytes(len); len = cudbg_mbytes_to_bytes(len);
break; break;
case CUDBG_MC0:
value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
if (value & EXT_MEM0_ENABLE_F) {
value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
len = EXT_MEM0_SIZE_G(value);
}
len = cudbg_mbytes_to_bytes(len);
break;
case CUDBG_MC1:
value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
if (value & EXT_MEM1_ENABLE_F) {
value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
len = EXT_MEM1_SIZE_G(value);
}
len = cudbg_mbytes_to_bytes(len);
break;
case CUDBG_RSS: case CUDBG_RSS:
len = RSS_NENTRIES * sizeof(u16); len = RSS_NENTRIES * sizeof(u16);
break; break;
...@@ -201,6 +221,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) ...@@ -201,6 +221,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TP_LA: case CUDBG_TP_LA:
len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
break; break;
case CUDBG_MEMINFO:
len = sizeof(struct cudbg_meminfo);
break;
case CUDBG_CIM_PIF_LA: case CUDBG_CIM_PIF_LA:
len = sizeof(struct cudbg_cim_pif_la); len = sizeof(struct cudbg_cim_pif_la);
len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
...@@ -219,6 +242,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) ...@@ -219,6 +242,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TID_INFO: case CUDBG_TID_INFO:
len = sizeof(struct cudbg_tid_info_region_rev1); len = sizeof(struct cudbg_tid_info_region_rev1);
break; break;
case CUDBG_PCIE_CONFIG:
len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
break;
case CUDBG_DUMP_CONTEXT: case CUDBG_DUMP_CONTEXT:
len = cudbg_dump_context_size(adap); len = cudbg_dump_context_size(adap);
break; break;
...@@ -264,6 +290,17 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) ...@@ -264,6 +290,17 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct ireg_buf) * n; len = sizeof(struct ireg_buf) * n;
} }
break; break;
case CUDBG_HMA:
value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
if (value & HMA_MUX_F) {
/* In T6, there's no MC1. So, HMA shares MC1
* address space.
*/
value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
len = EXT_MEM1_SIZE_G(value);
}
len = cudbg_mbytes_to_bytes(len);
break;
default: default:
break; break;
} }
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "cudbg_if.h" #include "cudbg_if.h"
#include "cudbg_lib_common.h" #include "cudbg_lib_common.h"
#include "cudbg_entity.h"
#include "cudbg_lib.h" #include "cudbg_lib.h"
typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init, typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
......
...@@ -45,6 +45,10 @@ ...@@ -45,6 +45,10 @@
#include "cxgb4_debugfs.h" #include "cxgb4_debugfs.h"
#include "clip_tbl.h" #include "clip_tbl.h"
#include "l2t.h" #include "l2t.h"
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
#include "cudbg_entity.h"
#include "cudbg_lib.h"
/* generic seq_file support for showing a table of size rows x width. */ /* generic seq_file support for showing a table of size rows x width. */
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos) static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
...@@ -2794,18 +2798,6 @@ static const struct file_operations blocked_fl_fops = { ...@@ -2794,18 +2798,6 @@ static const struct file_operations blocked_fl_fops = {
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
}; };
struct mem_desc {
unsigned int base;
unsigned int limit;
unsigned int idx;
};
static int mem_desc_cmp(const void *a, const void *b)
{
return ((const struct mem_desc *)a)->base -
((const struct mem_desc *)b)->base;
}
static void mem_region_show(struct seq_file *seq, const char *name, static void mem_region_show(struct seq_file *seq, const char *name,
unsigned int from, unsigned int to) unsigned int from, unsigned int to)
{ {
...@@ -2819,250 +2811,60 @@ static void mem_region_show(struct seq_file *seq, const char *name, ...@@ -2819,250 +2811,60 @@ static void mem_region_show(struct seq_file *seq, const char *name,
static int meminfo_show(struct seq_file *seq, void *v) static int meminfo_show(struct seq_file *seq, void *v)
{ {
static const char * const memory[] = { "EDC0:", "EDC1:", "MC:", static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
"MC0:", "MC1:"}; "MC0:", "MC1:", "HMA:"};
static const char * const region[] = {
"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
"RQUDP region:", "PBL region:", "TXPBL region:",
"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
"On-chip queues:"
};
int i, n;
u32 lo, hi, used, alloc;
struct mem_desc avail[4];
struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
struct mem_desc *md = mem;
struct adapter *adap = seq->private; struct adapter *adap = seq->private;
struct cudbg_meminfo meminfo;
int i, rc;
for (i = 0; i < ARRAY_SIZE(mem); i++) { memset(&meminfo, 0, sizeof(struct cudbg_meminfo));
mem[i].limit = 0; rc = cudbg_fill_meminfo(adap, &meminfo);
mem[i].idx = i; if (rc)
} return -ENXIO;
/* Find and sort the populated memory ranges */
i = 0;
lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
if (lo & EDRAM0_ENABLE_F) {
hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
avail[i].base = EDRAM0_BASE_G(hi) << 20;
avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
avail[i].idx = 0;
i++;
}
if (lo & EDRAM1_ENABLE_F) {
hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
avail[i].base = EDRAM1_BASE_G(hi) << 20;
avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
avail[i].idx = 1;
i++;
}
if (is_t5(adap->params.chip)) {
if (lo & EXT_MEM0_ENABLE_F) {
hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
avail[i].limit =
avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
avail[i].idx = 3;
i++;
}
if (lo & EXT_MEM1_ENABLE_F) {
hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
avail[i].limit =
avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
avail[i].idx = 4;
i++;
}
} else {
if (lo & EXT_MEM_ENABLE_F) {
hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
avail[i].base = EXT_MEM_BASE_G(hi) << 20;
avail[i].limit =
avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
avail[i].idx = 2;
i++;
}
}
if (!i) /* no memory available */
return 0;
sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
(md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
(md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
(md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
(md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
(md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
(md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
(md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
(md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
(md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
/* the next few have explicit upper bounds */
md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
md->limit = md->base - 1 +
t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
md++;
md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
md->limit = md->base - 1 +
t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
md++;
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
} else {
hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
md->base = t4_read_reg(adap,
LE_DB_HASH_TBL_BASE_ADDR_A);
}
md->limit = 0;
} else {
md->base = 0;
md->idx = ARRAY_SIZE(region); /* hide it */
}
md++;
#define ulp_region(reg) do { \
md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
(md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
} while (0)
ulp_region(RX_ISCSI);
ulp_region(RX_TDDP);
ulp_region(TX_TPT);
ulp_region(RX_STAG);
ulp_region(RX_RQ);
ulp_region(RX_RQUDP);
ulp_region(RX_PBL);
ulp_region(TX_PBL);
#undef ulp_region
md->base = 0;
md->idx = ARRAY_SIZE(region);
if (!is_t4(adap->params.chip)) {
u32 size = 0;
u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
if (is_t5(adap->params.chip)) {
if (sge_ctrl & VFIFO_ENABLE_F)
size = DBVFIFO_SIZE_G(fifo_size);
} else {
size = T6_DBVFIFO_SIZE_G(fifo_size);
}
if (size) {
md->base = BASEADDR_G(t4_read_reg(adap,
SGE_DBVFIFO_BADDR_A));
md->limit = md->base + (size << 2) - 1;
}
}
md++;
md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
md->limit = 0;
md++;
md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
md->limit = 0;
md++;
md->base = adap->vres.ocq.start;
if (adap->vres.ocq.size)
md->limit = md->base + adap->vres.ocq.size - 1;
else
md->idx = ARRAY_SIZE(region); /* hide it */
md++;
/* add any address-space holes, there can be up to 3 */
for (n = 0; n < i - 1; n++)
if (avail[n].limit < avail[n + 1].base)
(md++)->base = avail[n].limit;
if (avail[n].limit)
(md++)->base = avail[n].limit;
n = md - mem;
sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
for (lo = 0; lo < i; lo++) for (i = 0; i < meminfo.avail_c; i++)
mem_region_show(seq, memory[avail[lo].idx], avail[lo].base, mem_region_show(seq, memory[meminfo.avail[i].idx],
avail[lo].limit - 1); meminfo.avail[i].base,
meminfo.avail[i].limit - 1);
seq_putc(seq, '\n'); seq_putc(seq, '\n');
for (i = 0; i < n; i++) { for (i = 0; i < meminfo.mem_c; i++) {
if (mem[i].idx >= ARRAY_SIZE(region)) if (meminfo.mem[i].idx >= ARRAY_SIZE(cudbg_region))
continue; /* skip holes */ continue; /* skip holes */
if (!mem[i].limit) if (!meminfo.mem[i].limit)
mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; meminfo.mem[i].limit =
mem_region_show(seq, region[mem[i].idx], mem[i].base, i < meminfo.mem_c - 1 ?
mem[i].limit); meminfo.mem[i + 1].base - 1 : ~0;
mem_region_show(seq, cudbg_region[meminfo.mem[i].idx],
meminfo.mem[i].base, meminfo.mem[i].limit);
} }
seq_putc(seq, '\n'); seq_putc(seq, '\n');
lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A); mem_region_show(seq, "uP RAM:", meminfo.up_ram_lo, meminfo.up_ram_hi);
hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1; mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
mem_region_show(seq, "uP RAM:", lo, hi); meminfo.up_extmem2_hi);
lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
mem_region_show(seq, "uP Extmem2:", lo, hi);
lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n", seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
PMRXMAXPAGE_G(lo), meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10, meminfo.rx_pages_data[2]);
(lo & PMRXNUMCHN_F) ? 2 : 1);
lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n", seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
PMTXMAXPAGE_G(lo), meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
seq_printf(seq, "%u p-structs\n\n", seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
for (i = 0; i < 4; i++)
for (i = 0; i < 4; i++) {
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
else
lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
if (is_t5(adap->params.chip)) {
used = T5_USED_G(lo);
alloc = T5_ALLOC_G(lo);
} else {
used = USED_G(lo);
alloc = ALLOC_G(lo);
}
/* For T6 these are MAC buffer groups */ /* For T6 these are MAC buffer groups */
seq_printf(seq, "Port %d using %u pages out of %u allocated\n", seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
i, used, alloc); i, meminfo.port_used[i], meminfo.port_alloc[i]);
}
for (i = 0; i < adap->params.arch.nchan; i++) { for (i = 0; i < adap->params.arch.nchan; i++)
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
lo = t4_read_reg(adap,
MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
else
lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
if (is_t5(adap->params.chip)) {
used = T5_USED_G(lo);
alloc = T5_ALLOC_G(lo);
} else {
used = USED_G(lo);
alloc = ALLOC_G(lo);
}
/* For T6 these are MAC buffer groups */ /* For T6 these are MAC buffer groups */
seq_printf(seq, seq_printf(seq,
"Loopback %d using %u pages out of %u allocated\n", "Loopback %d using %u pages out of %u allocated\n",
i, used, alloc); i, meminfo.loopback_used[i],
} meminfo.loopback_alloc[i]);
return 0; return 0;
} }
......
...@@ -1673,7 +1673,7 @@ int cxgb4_flush_eq_cache(struct net_device *dev) ...@@ -1673,7 +1673,7 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
{ {
struct adapter *adap = netdev2adap(dev); struct adapter *adap = netdev2adap(dev);
return t4_sge_ctxt_flush(adap, adap->mbox); return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
} }
EXPORT_SYMBOL(cxgb4_flush_eq_cache); EXPORT_SYMBOL(cxgb4_flush_eq_cache);
......
...@@ -524,11 +524,14 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, ...@@ -524,11 +524,14 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* MEM_EDC1 = 1 * MEM_EDC1 = 1
* MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
* MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
* MEM_HMA = 4
*/ */
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1) if (mtype == MEM_HMA) {
memoffset = 2 * (edc_size * 1024 * 1024);
} else if (mtype != MEM_MC1) {
memoffset = (mtype * (edc_size * 1024 * 1024)); memoffset = (mtype * (edc_size * 1024 * 1024));
else { } else {
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
MA_EXT_MEMORY0_BAR_A)); MA_EXT_MEMORY0_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
...@@ -6527,18 +6530,21 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state) ...@@ -6527,18 +6530,21 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
* t4_sge_ctxt_flush - flush the SGE context cache * t4_sge_ctxt_flush - flush the SGE context cache
* @adap: the adapter * @adap: the adapter
* @mbox: mailbox to use for the FW command * @mbox: mailbox to use for the FW command
* @ctx_type: Egress or Ingress
* *
* Issues a FW command through the given mailbox to flush the * Issues a FW command through the given mailbox to flush the
* SGE context cache. * SGE context cache.
*/ */
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
{ {
int ret; int ret;
u32 ldst_addrspace; u32 ldst_addrspace;
struct fw_ldst_cmd c; struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC); ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
FW_LDST_ADDRSPC_SGE_EGRC :
FW_LDST_ADDRSPC_SGE_INGC);
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F | FW_CMD_REQUEST_F | FW_CMD_READ_F |
ldst_addrspace); ldst_addrspace);
......
...@@ -70,7 +70,9 @@ enum { ...@@ -70,7 +70,9 @@ enum {
/* SGE context types */ /* SGE context types */
enum ctxt_type { enum ctxt_type {
CTXT_FLM = 2, CTXT_EGRESS,
CTXT_INGRESS,
CTXT_FLM,
CTXT_CNM, CTXT_CNM,
}; };
......
...@@ -961,6 +961,10 @@ ...@@ -961,6 +961,10 @@
#define MA_EXT_MEMORY1_BAR_A 0x7808 #define MA_EXT_MEMORY1_BAR_A 0x7808
#define HMA_MUX_S 5
#define HMA_MUX_V(x) ((x) << HMA_MUX_S)
#define HMA_MUX_F HMA_MUX_V(1U)
#define EXT_MEM1_BASE_S 16 #define EXT_MEM1_BASE_S 16
#define EXT_MEM1_BASE_M 0xfffU #define EXT_MEM1_BASE_M 0xfffU
#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M) #define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment