Commit b12349ae authored by Dean Luick's avatar Dean Luick Committed by Doug Ledford

IB/hfi1: Create a routine to set a receive side mapping rule

Move the rule setting code into its own routine for improved
searchability and reuse.
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDean Luick <dean.luick@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 4a818bed
...@@ -13429,6 +13429,21 @@ struct rsm_map_table { ...@@ -13429,6 +13429,21 @@ struct rsm_map_table {
unsigned int used; unsigned int used;
}; };
struct rsm_rule_data {
u8 offset;
u8 pkt_type;
u32 field1_off;
u32 field2_off;
u32 index1_off;
u32 index1_width;
u32 index2_off;
u32 index2_width;
u32 mask1;
u32 value1;
u32 mask2;
u32 value2;
};
/* /*
* Return an initialized RMT map table for users to fill in. OK if it * Return an initialized RMT map table for users to fill in. OK if it
* returns NULL, indicating no table. * returns NULL, indicating no table.
...@@ -13466,6 +13481,30 @@ static void complete_rsm_map_table(struct hfi1_devdata *dd, ...@@ -13466,6 +13481,30 @@ static void complete_rsm_map_table(struct hfi1_devdata *dd,
} }
} }
/*
* Add a receive side mapping rule.
*/
static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
struct rsm_rule_data *rrd)
{
write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
(u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
1ull << rule_index | /* enable bit */
(u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
(u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
(u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
(u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
(u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
(u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
(u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
(u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
(u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
(u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
(u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
}
/* return the number of RSM map table entries that will be used for QOS */ /* return the number of RSM map table entries that will be used for QOS */
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
unsigned int *np) unsigned int *np)
...@@ -13526,6 +13565,7 @@ static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, ...@@ -13526,6 +13565,7 @@ static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
*/ */
static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
{ {
struct rsm_rule_data rrd;
unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m; unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
unsigned int rmt_entries; unsigned int rmt_entries;
u64 reg; u64 reg;
...@@ -13565,24 +13605,23 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) ...@@ -13565,24 +13605,23 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
} }
ctxt += krcvqs[i]; ctxt += krcvqs[i];
} }
/* add rule0 */
write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */, rrd.offset = rmt->used;
(u64)rmt->used << RCV_RSM_CFG_OFFSET_SHIFT | rrd.pkt_type = 2;
RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK << rrd.field1_off = LRH_BTH_MATCH_OFFSET;
RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT | rrd.field2_off = LRH_SC_MATCH_OFFSET;
2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT); rrd.index1_off = LRH_SC_SELECT_OFFSET;
write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */, rrd.index1_width = n;
LRH_BTH_MATCH_OFFSET << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | rrd.index2_off = QPN_SELECT_OFFSET;
LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | rrd.index2_width = m + n;
LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | rrd.mask1 = LRH_BTH_MASK;
((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | rrd.value1 = LRH_BTH_VALUE;
QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | rrd.mask2 = LRH_SC_MASK;
((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); rrd.value2 = LRH_SC_VALUE;
write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT | /* add rule 0 */
LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT | add_rsm_rule(dd, 0, &rrd);
LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
/* mark RSM map entries as used */ /* mark RSM map entries as used */
rmt->used += rmt_entries; rmt->used += rmt_entries;
/* map everything else to the mcast/err/vl15 context */ /* map everything else to the mcast/err/vl15 context */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment