Commit 84e39eeb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull second round of rdma updates from Doug Ledford:
 "This can be split out into just two categories:

   - fixes to the RDMA R/W API in regards to SG list length limits
     (about 5 patches)

   - fixes/features for the Intel hfi1 driver (everything else)

  The hfi1 driver is still being brought to full feature support by
  Intel, and they have a lot of people working on it, so that amounts to
  almost the entirety of this pull request"

* tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (84 commits)
  IB/hfi1: Add cache evict LRU list
  IB/hfi1: Fix memory leak during unexpected shutdown
  IB/hfi1: Remove unneeded mm argument in remove function
  IB/hfi1: Consistently call ops->remove outside spinlock
  IB/hfi1: Use evict mmu rb operation
  IB/hfi1: Add evict operation to the mmu rb handler
  IB/hfi1: Fix TID caching actions
  IB/hfi1: Make the cache handler own its rb tree root
  IB/hfi1: Make use of mm consistent
  IB/hfi1: Fix user SDMA racy user request claim
  IB/hfi1: Fix error condition that needs to clean up
  IB/hfi1: Release node on insert failure
  IB/hfi1: Validate SDMA user iovector count
  IB/hfi1: Validate SDMA user request index
  IB/hfi1: Use the same capability state for all shared contexts
  IB/hfi1: Prevent null pointer dereference
  IB/hfi1: Rename TID mmu_rb_* functions
  IB/hfi1: Remove unneeded empty check in hfi1_mmu_rb_unregister()
  IB/hfi1: Restructure hfi1_file_open
  IB/hfi1: Make iovec loop index easy to understand
  ...
parents 0cda6113 7c41765d
......@@ -58,19 +58,13 @@ static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
return false;
}
static inline u32 rdma_rw_max_sge(struct ib_device *dev,
enum dma_data_direction dir)
{
return dir == DMA_TO_DEVICE ?
dev->attrs.max_sge : dev->attrs.max_sge_rd;
}
static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
{
/* arbitrary limit to avoid allocating gigantic resources */
return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
}
/* Caller must have zero-initialized *reg. */
static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
u32 sg_cnt, u32 offset)
......@@ -114,6 +108,7 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
struct rdma_rw_reg_ctx *prev = NULL;
u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
int i, j, ret = 0, count = 0;
......@@ -125,7 +120,6 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
}
for (i = 0; i < ctx->nr_ops; i++) {
struct rdma_rw_reg_ctx *prev = i ? &ctx->reg[i - 1] : NULL;
struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
u32 nents = min(sg_cnt, pages_per_mr);
......@@ -162,9 +156,13 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
sg_cnt -= nents;
for (j = 0; j < nents; j++)
sg = sg_next(sg);
prev = reg;
offset = 0;
}
if (prev)
prev->wr.wr.next = NULL;
ctx->type = RDMA_RW_MR;
return count;
......@@ -181,7 +179,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
struct ib_device *dev = qp->pd->device;
u32 max_sge = rdma_rw_max_sge(dev, dir);
u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
qp->max_read_sge;
struct ib_sge *sge;
u32 total_len = 0, i, j;
......@@ -205,11 +204,10 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
rdma_wr->wr.opcode = IB_WR_RDMA_READ;
rdma_wr->remote_addr = remote_addr + total_len;
rdma_wr->rkey = rkey;
rdma_wr->wr.num_sge = nr_sge;
rdma_wr->wr.sg_list = sge;
for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
rdma_wr->wr.num_sge++;
sge->addr = ib_sg_dma_address(dev, sg) + offset;
sge->length = ib_sg_dma_len(dev, sg) - offset;
sge->lkey = qp->pd->local_dma_lkey;
......@@ -220,8 +218,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
offset = 0;
}
if (i + 1 < ctx->nr_ops)
rdma_wr->wr.next = &ctx->map.wrs[i + 1].wr;
rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
&ctx->map.wrs[i + 1].wr : NULL;
}
ctx->type = RDMA_RW_MULTI_WR;
......
......@@ -825,6 +825,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
}
}
/*
* Note: all hw drivers guarantee that max_send_sge is lower than
* the device RDMA WRITE SGE limit but not all hw drivers ensure that
* max_send_sge <= max_sge_rd.
*/
qp->max_write_sge = qp_init_attr->cap.max_send_sge;
qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
device->attrs.max_sge_rd);
return qp;
}
EXPORT_SYMBOL(ib_create_qp);
......
config INFINIBAND_HFI1
tristate "Intel OPA Gen1 support"
depends on X86_64 && INFINIBAND_RDMAVT
depends on X86_64 && INFINIBAND_RDMAVT && I2C
select MMU_NOTIFIER
select CRC32
select I2C_ALGOBIT
---help---
This is a low-level driver for Intel OPA Gen1 adapter.
config HFI1_DEBUG_SDMA_ORDER
......
......@@ -10,7 +10,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
eprom.o file_ops.o firmware.o \
init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \
qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o \
uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
verbs_txreq.o
hfi1-$(CONFIG_DEBUG_FS) += debugfs.o
......
This diff is collapsed.
......@@ -73,7 +73,6 @@ struct cpu_mask_set {
struct hfi1_affinity {
struct cpu_mask_set def_intr;
struct cpu_mask_set rcv_intr;
struct cpu_mask_set proc;
struct cpumask real_cpu_mask;
/* spin lock to protect affinity struct */
spinlock_t lock;
......@@ -82,11 +81,9 @@ struct hfi1_affinity {
struct hfi1_msix_entry;
/* Initialize non-HT cpu cores mask */
int init_real_cpu_mask(struct hfi1_devdata *);
void init_real_cpu_mask(void);
/* Initialize driver affinity data */
void hfi1_dev_affinity_init(struct hfi1_devdata *);
/* Free driver affinity data */
void hfi1_dev_affinity_free(struct hfi1_devdata *);
int hfi1_dev_affinity_init(struct hfi1_devdata *);
/*
* Set IRQ affinity to a CPU. The function will determine the
* CPU and set the affinity to it.
......@@ -101,8 +98,35 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *);
* Determine a CPU affinity for a user process, if the process does not
* have an affinity set yet.
*/
int hfi1_get_proc_affinity(struct hfi1_devdata *, int);
int hfi1_get_proc_affinity(int);
/* Release a CPU used by a user process. */
void hfi1_put_proc_affinity(struct hfi1_devdata *, int);
void hfi1_put_proc_affinity(int);
int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf);
int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
size_t count);
struct hfi1_affinity_node {
int node;
struct cpu_mask_set def_intr;
struct cpu_mask_set rcv_intr;
struct cpumask general_intr_mask;
struct list_head list;
};
struct hfi1_affinity_node_list {
struct list_head list;
struct cpumask real_cpu_mask;
struct cpu_mask_set proc;
int num_core_siblings;
int num_online_nodes;
int num_online_cpus;
/* protect affinity node list */
spinlock_t lock;
};
int node_affinity_init(void);
void node_affinity_destroy(void);
extern struct hfi1_affinity_node_list node_affinity;
#endif /* _HFI1_AFFINITY_H */
This diff is collapsed.
......@@ -640,6 +640,7 @@ extern uint platform_config_load;
/* SBus commands */
#define RESET_SBUS_RECEIVER 0x20
#define WRITE_SBUS_RECEIVER 0x21
#define READ_SBUS_RECEIVER 0x22
void sbus_request(struct hfi1_devdata *dd,
u8 receiver_addr, u8 data_addr, u8 command, u32 data_in);
int sbus_request_slow(struct hfi1_devdata *dd,
......@@ -1336,10 +1337,6 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd);
void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
struct hfi1_message_header *hfi1_get_msgheader(
struct hfi1_devdata *dd, __le32 *rhf_addr);
int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
struct hfi1_ctxt_info *kinfo);
u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
u32 mask);
int hfi1_init_ctxt(struct send_context *sc);
void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
u32 type, unsigned long pa, u16 order);
......
......@@ -471,6 +471,10 @@
#define ASIC_STS_SBUS_RESULT (ASIC + 0x000000000010)
#define ASIC_STS_SBUS_RESULT_DONE_SMASK 0x1ull
#define ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK 0x2ull
#define ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT 2
#define ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK 0x7ull
#define ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT 32
#define ASIC_STS_SBUS_RESULT_DATA_OUT_MASK 0xFFFFFFFFull
#define ASIC_STS_THERM (ASIC + 0x000000000058)
#define ASIC_STS_THERM_CRIT_TEMP_MASK 0x7FFull
#define ASIC_STS_THERM_CRIT_TEMP_SHIFT 18
......
......@@ -392,9 +392,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
u16 rlid;
u8 svc_type, sl, sc5;
sc5 = (be16_to_cpu(rhdr->lrh[0]) >> 12) & 0xf;
if (rhf_dc_info(packet->rhf))
sc5 |= 0x10;
sc5 = hdr2sc(rhdr, packet->rhf);
sl = ibp->sc_to_sl[sc5];
lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK;
......@@ -450,14 +448,20 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd,
packet->rcv_flags = 0;
}
static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
struct hfi1_other_headers *ohdr,
u64 rhf, u32 bth1, struct ib_grh *grh)
void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
u32 rqpn = 0;
u16 rlid;
u8 sc5, svc_type;
struct hfi1_ib_header *hdr = pkt->hdr;
struct hfi1_other_headers *ohdr = pkt->ohdr;
struct ib_grh *grh = NULL;
u32 rqpn = 0, bth1;
u16 rlid, dlid = be16_to_cpu(hdr->lrh[1]);
u8 sc, svc_type;
bool is_mcast = false;
if (pkt->rcv_flags & HFI1_HAS_GRH)
grh = &hdr->u.l.grh;
switch (qp->ibqp.qp_type) {
case IB_QPT_SMI:
......@@ -466,6 +470,8 @@ static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
rlid = be16_to_cpu(hdr->lrh[3]);
rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
svc_type = IB_CC_SVCTYPE_UD;
is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
(dlid != be16_to_cpu(IB_LID_PERMISSIVE));
break;
case IB_QPT_UC:
rlid = qp->remote_ah_attr.dlid;
......@@ -481,24 +487,23 @@ static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
return;
}
sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
if (rhf_dc_info(rhf))
sc5 |= 0x10;
sc = hdr2sc((struct hfi1_message_header *)hdr, pkt->rhf);
if (bth1 & HFI1_FECN_SMASK) {
bth1 = be32_to_cpu(ohdr->bth[1]);
if (do_cnp && (bth1 & HFI1_FECN_SMASK)) {
u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
u16 dlid = be16_to_cpu(hdr->lrh[1]);
return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc5, grh);
return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc, grh);
}
if (bth1 & HFI1_BECN_SMASK) {
if (!is_mcast && (bth1 & HFI1_BECN_SMASK)) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 lqpn = bth1 & RVT_QPN_MASK;
u8 sl = ibp->sc_to_sl[sc5];
u8 sl = ibp->sc_to_sl[sc];
process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
}
}
struct ps_mdata {
......@@ -596,7 +601,6 @@ static void __prescan_rxq(struct hfi1_packet *packet)
struct rvt_qp *qp;
struct hfi1_ib_header *hdr;
struct hfi1_other_headers *ohdr;
struct ib_grh *grh = NULL;
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
......@@ -616,14 +620,13 @@ static void __prescan_rxq(struct hfi1_packet *packet)
hfi1_get_msgheader(dd, rhf_addr);
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
if (lnh == HFI1_LRH_BTH) {
if (lnh == HFI1_LRH_BTH)
ohdr = &hdr->u.oth;
} else if (lnh == HFI1_LRH_GRH) {
else if (lnh == HFI1_LRH_GRH)
ohdr = &hdr->u.l.oth;
grh = &hdr->u.l.grh;
} else {
else
goto next; /* just in case */
}
bth1 = be32_to_cpu(ohdr->bth[1]);
is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
......@@ -639,7 +642,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
goto next;
}
process_ecn(qp, hdr, ohdr, rhf, bth1, grh);
process_ecn(qp, packet, true);
rcu_read_unlock();
/* turn off BECN, FECN */
......@@ -1362,6 +1365,7 @@ int process_receive_bypass(struct hfi1_packet *packet)
dd_dev_err(packet->rcd->dd,
"Bypass packets are not supported in normal operation. Dropping\n");
incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors);
return RHF_RCV_CONTINUE;
}
......
......@@ -168,6 +168,7 @@ static inline int is_valid_mmap(u64 token)
static int hfi1_file_open(struct inode *inode, struct file *fp)
{
struct hfi1_filedata *fd;
struct hfi1_devdata *dd = container_of(inode->i_cdev,
struct hfi1_devdata,
user_cdev);
......@@ -176,10 +177,17 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
kobject_get(&dd->kobj);
/* The real work is performed later in assign_ctxt() */
fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
if (fp->private_data) /* no cpu affinity by default */
((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
return fp->private_data ? 0 : -ENOMEM;
fd = kzalloc(sizeof(*fd), GFP_KERNEL);
if (fd) {
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
}
fp->private_data = fd;
return fd ? 0 : -ENOMEM;
}
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
......@@ -392,41 +400,38 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq;
int ret = 0, done = 0, reqs = 0;
int done = 0, reqs = 0;
unsigned long dim = from->nr_segs;
if (!cq || !pq) {
ret = -EIO;
goto done;
}
if (!cq || !pq)
return -EIO;
if (!iter_is_iovec(from) || !dim) {
ret = -EINVAL;
goto done;
}
if (!iter_is_iovec(from) || !dim)
return -EINVAL;
hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
fd->uctxt->ctxt, fd->subctxt, dim);
if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
ret = -ENOSPC;
goto done;
}
if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
return -ENOSPC;
while (dim) {
int ret;
unsigned long count = 0;
ret = hfi1_user_sdma_process_request(
kiocb->ki_filp, (struct iovec *)(from->iov + done),
dim, &count);
if (ret)
goto done;
if (ret) {
reqs = ret;
break;
}
dim -= count;
done += count;
reqs++;
}
done:
return ret ? ret : reqs;
return reqs;
}
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
......@@ -718,7 +723,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
hfi1_user_sdma_free_queues(fdata);
/* release the cpu */
hfi1_put_proc_affinity(dd, fdata->rec_cpu_num);
hfi1_put_proc_affinity(fdata->rec_cpu_num);
/*
* Clear any left over, unhandled events so the next process that
......@@ -730,7 +735,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
if (--uctxt->cnt) {
uctxt->active_slaves &= ~(1 << fdata->subctxt);
uctxt->subpid[fdata->subctxt] = 0;
mutex_unlock(&hfi1_mutex);
goto done;
}
......@@ -756,7 +760,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
sc_disable(uctxt->sc);
uctxt->pid = 0;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd->rcd[uctxt->ctxt] = NULL;
......@@ -818,9 +821,10 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
ret = find_shared_ctxt(fp, uinfo);
if (ret < 0)
goto done_unlock;
if (ret)
fd->rec_cpu_num = hfi1_get_proc_affinity(
fd->uctxt->dd, fd->uctxt->numa_id);
if (ret) {
fd->rec_cpu_num =
hfi1_get_proc_affinity(fd->uctxt->numa_id);
}
}
/*
......@@ -895,7 +899,6 @@ static int find_shared_ctxt(struct file *fp,
}
fd->uctxt = uctxt;
fd->subctxt = uctxt->cnt++;
uctxt->subpid[fd->subctxt] = current->pid;
uctxt->active_slaves |= 1 << fd->subctxt;
ret = 1;
goto done;
......@@ -932,7 +935,11 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
if (ctxt == dd->num_rcv_contexts)
return -EBUSY;
fd->rec_cpu_num = hfi1_get_proc_affinity(dd, -1);
/*
* If we don't have a NUMA node requested, preference is towards
* device NUMA node.
*/
fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
if (fd->rec_cpu_num != -1)
numa = cpu_to_node(fd->rec_cpu_num);
else
......@@ -976,8 +983,7 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
return ret;
}
uctxt->userversion = uinfo->userversion;
uctxt->pid = current->pid;
uctxt->flags = HFI1_CAP_UGET(MASK);
uctxt->flags = hfi1_cap_mask; /* save current flag state */
init_waitqueue_head(&uctxt->wait);
strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
......@@ -1080,18 +1086,18 @@ static int user_init(struct file *fp)
hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
/*
* Ignore the bit in the flags for now until proper
* support for multiple packet per rcv array entry is
* added.
*/
if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
/*
* The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
......@@ -1099,7 +1105,7 @@ static int user_init(struct file *fp)
* uses of the chip or ctxt. Therefore, add the rcvctrl op
* for both cases.
*/
if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
else
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
......@@ -1122,9 +1128,14 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
int ret = 0;
memset(&cinfo, 0, sizeof(cinfo));
ret = hfi1_get_base_kinfo(uctxt, &cinfo);
if (ret < 0)
goto done;
cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
/* adjust flag if this fd is not able to cache */
if (!fd->handler)
cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
cinfo.num_active = hfi1_count_active_units();
cinfo.unit = uctxt->dd->unit;
cinfo.ctxt = uctxt->ctxt;
......@@ -1146,7 +1157,7 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
ret = -EFAULT;
done:
return ret;
}
......
......@@ -206,6 +206,9 @@ static const struct firmware *platform_config;
/* the number of fabric SerDes on the SBus */
#define NUM_FABRIC_SERDES 4
/* ASIC_STS_SBUS_RESULT.RESULT_CODE value */
#define SBUS_READ_COMPLETE 0x4
/* SBus fabric SerDes addresses, one set per HFI */
static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = {
{ 0x01, 0x02, 0x03, 0x04 },
......@@ -240,6 +243,7 @@ static const u8 all_pcie_serdes_broadcast = 0xe0;
static void dispose_one_firmware(struct firmware_details *fdet);
static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
struct firmware_details *fdet);
static void dump_fw_version(struct hfi1_devdata *dd);
/*
* Read a single 64-bit value from 8051 data memory.
......@@ -1078,6 +1082,44 @@ void sbus_request(struct hfi1_devdata *dd,
ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
}
/*
* Read a value from the SBus.
*
* Requires the caller to be in fast mode
*/
static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr,
u32 data_in)
{
u64 reg;
int retries;
int success = 0;
u32 result = 0;
u32 result_code = 0;
sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in);
for (retries = 0; retries < 100; retries++) {
usleep_range(1000, 1200); /* arbitrary */
reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
result_code = (reg >> ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT)
& ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK;
if (result_code != SBUS_READ_COMPLETE)
continue;
success = 1;
result = (reg >> ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT)
& ASIC_STS_SBUS_RESULT_DATA_OUT_MASK;
break;
}
if (!success) {
dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__,
result_code);
}
return result;
}
/*
* Turn off the SBus and fabric serdes spicos.
*
......@@ -1636,6 +1678,7 @@ int load_firmware(struct hfi1_devdata *dd)
return ret;
}
dump_fw_version(dd);
return 0;
}
......@@ -2054,3 +2097,85 @@ void read_guid(struct hfi1_devdata *dd)
dd_dev_info(dd, "GUID %llx",
(unsigned long long)dd->base_guid);
}
/* read and display firmware version info */
static void dump_fw_version(struct hfi1_devdata *dd)
{
u32 pcie_vers[NUM_PCIE_SERDES];
u32 fabric_vers[NUM_FABRIC_SERDES];
u32 sbus_vers;
int i;
int all_same;
int ret;
u8 rcv_addr;
ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
if (ret) {
dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n");
return;
}
/* set fast mode */
set_sbus_fast_mode(dd);
/* read version for SBus Master */
sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0);
sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1);
/* wait for interrupt to be processed */
usleep_range(10000, 11000);
sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1);
dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers);
/* read version for PCIe SerDes */
all_same = 1;
pcie_vers[0] = 0;
for (i = 0; i < NUM_PCIE_SERDES; i++) {
rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i];
sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
/* wait for interrupt to be processed */
usleep_range(10000, 11000);
pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
if (i > 0 && pcie_vers[0] != pcie_vers[i])
all_same = 0;
}
if (all_same) {
dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n",
pcie_vers[0]);
} else {
dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n");
for (i = 0; i < NUM_PCIE_SERDES; i++) {
dd_dev_info(dd,
"PCIe SerDes lane %d firmware version 0x%x\n",
i, pcie_vers[i]);
}
}
/* read version for fabric SerDes */
all_same = 1;
fabric_vers[0] = 0;
for (i = 0; i < NUM_FABRIC_SERDES; i++) {
rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i];
sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
/* wait for interrupt to be processed */
usleep_range(10000, 11000);
fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
if (i > 0 && fabric_vers[0] != fabric_vers[i])
all_same = 0;
}
if (all_same) {
dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n",
fabric_vers[0]);
} else {
dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n");
for (i = 0; i < NUM_FABRIC_SERDES; i++) {
dd_dev_info(dd,
"Fabric SerDes lane %d firmware version 0x%x\n",
i, fabric_vers[i]);
}
}
clear_sbus_fast_mode(dd);
release_chip_resource(dd, CR_SBUS);
}
......@@ -62,6 +62,8 @@
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <rdma/rdma_vt.h>
#include "chip_registers.h"
......@@ -253,7 +255,7 @@ struct hfi1_ctxtdata {
/* chip offset of PIO buffers for this ctxt */
u32 piobufs;
/* per-context configuration flags */
u32 flags;
unsigned long flags;
/* per-context event flags for fileops/intr communication */
unsigned long event_flags;
/* WAIT_RCV that timed out, no interrupt */
......@@ -268,9 +270,6 @@ struct hfi1_ctxtdata {
u32 urgent;
/* saved total number of polled urgent packets for poll edge trigger */
u32 urgent_poll;
/* pid of process using this ctxt */
pid_t pid;
pid_t subpid[HFI1_MAX_SHARED_CTXTS];
/* same size as task_struct .comm[], command that opened context */
char comm[TASK_COMM_LEN];
/* so file ops can get at unit */
......@@ -366,11 +365,6 @@ struct hfi1_packet {
u8 etype;
};
static inline bool has_sc4_bit(struct hfi1_packet *p)
{
return !!rhf_dc_info(p->rhf);
}
/*
* Private data for snoop/capture support.
*/
......@@ -805,10 +799,19 @@ struct hfi1_temp {
u8 triggers; /* temperature triggers */
};
struct hfi1_i2c_bus {
struct hfi1_devdata *controlling_dd; /* current controlling device */
struct i2c_adapter adapter; /* bus details */
struct i2c_algo_bit_data algo; /* bus algorithm details */
int num; /* bus number, 0 or 1 */
};
/* common data between shared ASIC HFIs */
struct hfi1_asic_data {
struct hfi1_devdata *dds[2]; /* back pointers */
struct mutex asic_resource_mutex;
struct hfi1_i2c_bus *i2c_bus0;
struct hfi1_i2c_bus *i2c_bus1;
};
/* device data struct now contains only "general per-device" info.
......@@ -1128,7 +1131,8 @@ struct hfi1_devdata {
NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS];
/* Software counter that aggregates all cce_err_status errors */
u64 sw_cce_err_status_aggregate;
/* Software counter that aggregates all bypass packet rcv errors */
u64 sw_rcv_bypass_packet_errors;
/* receive interrupt functions */
rhf_rcv_function_ptr *rhf_rcv_function_map;
rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
......@@ -1184,6 +1188,7 @@ struct hfi1_devdata {
struct tid_rb_node;
struct mmu_rb_node;
struct mmu_rb_handler;
/* Private data for file operations */
struct hfi1_filedata {
......@@ -1194,7 +1199,7 @@ struct hfi1_filedata {
/* for cpu affinity; -1 if none */
int rec_cpu_num;
u32 tid_n_pinned;
struct rb_root tid_rb_root;
struct mmu_rb_handler *handler;
struct tid_rb_node **entry_to_rb;
spinlock_t tid_lock; /* protect tid_[limit,used] counters */
u32 tid_limit;
......@@ -1203,6 +1208,7 @@ struct hfi1_filedata {
u32 invalid_tid_idx;
/* protect invalid_tids array and invalid_tid_idx */
spinlock_t invalid_lock;
struct mm_struct *mm;
};
extern struct list_head hfi1_dev_list;
......@@ -1236,6 +1242,8 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *, int);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int);
void set_all_slowpath(struct hfi1_devdata *dd);
extern const struct pci_device_id hfi1_pci_tbl[];
/* receive packet handler dispositions */
#define RCV_PKT_OK 0x0 /* keep going */
#define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */
......@@ -1261,7 +1269,7 @@ void receive_interrupt_work(struct work_struct *work);
static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
{
return ((be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf) |
((!!(rhf & RHF_DC_INFO_SMASK)) << 4);
((!!(rhf_dc_info(rhf))) << 4);
}
static inline u16 generate_jkey(kuid_t uid)
......@@ -1571,6 +1579,22 @@ static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
return &dd->pport[pidx].ibport_data;
}
void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp);
static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp)
{
struct hfi1_other_headers *ohdr = pkt->ohdr;
u32 bth1;
bth1 = be32_to_cpu(ohdr->bth[1]);
if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
return bth1 & HFI1_FECN_SMASK;
}
return false;
}
/*
* Return the indexed PKEY from the port PKEY table.
*/
......@@ -1588,14 +1612,23 @@ static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
}
/*
* Readers of cc_state must call get_cc_state() under rcu_read_lock().
* Writers of cc_state must call get_cc_state() under cc_state_lock.
* Called by readers of cc_state only, must call under rcu_read_lock().
*/
static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
{
return rcu_dereference(ppd->cc_state);
}
/*
* Called by writers of cc_state only, must call under cc_state_lock.
*/
static inline
struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
{
return rcu_dereference_protected(ppd->cc_state,
lockdep_is_held(&ppd->cc_state_lock));
}
/*
* values for dd->flags (_device_ related flags)
*/
......@@ -1671,9 +1704,12 @@ void shutdown_led_override(struct hfi1_pportdata *ppd);
*/
#define DEFAULT_RCVHDR_ENTSIZE 32
bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32);
int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
void hfi1_release_user_pages(struct mm_struct *, struct page **, size_t, bool);
bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
u32 nlocked, u32 npages);
int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
size_t npages, bool writable, struct page **pages);
void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
size_t npages, bool dirty);
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{
......@@ -1949,4 +1985,55 @@ static inline u32 qsfp_resource(struct hfi1_devdata *dd)
int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
#define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
#define show_packettype(etype) \
__print_symbolic(etype, \
packettype_name(EXPECTED), \
packettype_name(EAGER), \
packettype_name(IB), \
packettype_name(ERROR), \
packettype_name(BYPASS))
#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
#define show_ib_opcode(opcode) \
__print_symbolic(opcode, \
ib_opcode_name(RC_SEND_FIRST), \
ib_opcode_name(RC_SEND_MIDDLE), \
ib_opcode_name(RC_SEND_LAST), \
ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
ib_opcode_name(RC_SEND_ONLY), \
ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
ib_opcode_name(RC_RDMA_WRITE_FIRST), \
ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
ib_opcode_name(RC_RDMA_WRITE_LAST), \
ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
ib_opcode_name(RC_RDMA_WRITE_ONLY), \
ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
ib_opcode_name(RC_RDMA_READ_REQUEST), \
ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
ib_opcode_name(RC_ACKNOWLEDGE), \
ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
ib_opcode_name(RC_COMPARE_SWAP), \
ib_opcode_name(RC_FETCH_ADD), \
ib_opcode_name(UC_SEND_FIRST), \
ib_opcode_name(UC_SEND_MIDDLE), \
ib_opcode_name(UC_SEND_LAST), \
ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
ib_opcode_name(UC_SEND_ONLY), \
ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
ib_opcode_name(UC_RDMA_WRITE_FIRST), \
ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
ib_opcode_name(UC_RDMA_WRITE_LAST), \
ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
ib_opcode_name(UC_RDMA_WRITE_ONLY), \
ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
ib_opcode_name(UD_SEND_ONLY), \
ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
ib_opcode_name(CNP))
#endif /* _HFI1_KERNEL_H */
......@@ -64,6 +64,7 @@
#include "debugfs.h"
#include "verbs.h"
#include "aspm.h"
#include "affinity.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
......@@ -474,8 +475,9 @@ static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
{
int i, size;
int i;
uint default_pkey_idx;
struct cc_state *cc_state;
ppd->dd = dd;
ppd->hw_pidx = hw_pidx;
......@@ -526,9 +528,9 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
spin_lock_init(&ppd->cc_state_lock);
spin_lock_init(&ppd->cc_log_lock);
size = sizeof(struct cc_state);
RCU_INIT_POINTER(ppd->cc_state, kzalloc(size, GFP_KERNEL));
if (!rcu_dereference(ppd->cc_state))
cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
RCU_INIT_POINTER(ppd->cc_state, cc_state);
if (!cc_state)
goto bail;
return;
......@@ -972,39 +974,49 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
/*
* Release our hold on the shared asic data. If we are the last one,
* free the structure. Must be holding hfi1_devs_lock.
* return the structure to be finalized outside the lock. Must be
* holding hfi1_devs_lock.
*/
static void release_asic_data(struct hfi1_devdata *dd)
static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
{
struct hfi1_asic_data *ad;
int other;
if (!dd->asic_data)
return;
return NULL;
dd->asic_data->dds[dd->hfi1_id] = NULL;
other = dd->hfi1_id ? 0 : 1;
if (!dd->asic_data->dds[other]) {
/* we are the last holder, free it */
kfree(dd->asic_data);
}
ad = dd->asic_data;
dd->asic_data = NULL;
/* return NULL if the other dd still has a link */
return ad->dds[other] ? NULL : ad;
}
static void finalize_asic_data(struct hfi1_devdata *dd,
struct hfi1_asic_data *ad)
{
clean_up_i2c(dd, ad);
kfree(ad);
}
static void __hfi1_free_devdata(struct kobject *kobj)
{
struct hfi1_devdata *dd =
container_of(kobj, struct hfi1_devdata, kobj);
struct hfi1_asic_data *ad;
unsigned long flags;
spin_lock_irqsave(&hfi1_devs_lock, flags);
idr_remove(&hfi1_unit_table, dd->unit);
list_del(&dd->list);
release_asic_data(dd);
ad = release_asic_data(dd);
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
if (ad)
finalize_asic_data(dd, ad);
free_platform_config(dd);
rcu_barrier(); /* wait for rcu callbacks to complete */
free_percpu(dd->int_counter);
free_percpu(dd->rcv_limit);
hfi1_dev_affinity_free(dd);
free_percpu(dd->send_schedule);
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
......@@ -1162,7 +1174,7 @@ static int init_one(struct pci_dev *, const struct pci_device_id *);
#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
#define PFX DRIVER_NAME ": "
static const struct pci_device_id hfi1_pci_tbl[] = {
const struct pci_device_id hfi1_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
{ 0, }
......@@ -1198,6 +1210,10 @@ static int __init hfi1_mod_init(void)
if (ret)
goto bail;
ret = node_affinity_init();
if (ret)
goto bail;
/* validate max MTU before any devices start */
if (!valid_opa_max_mtu(hfi1_max_mtu)) {
pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
......@@ -1278,6 +1294,7 @@ module_init(hfi1_mod_init);
static void __exit hfi1_mod_cleanup(void)
{
pci_unregister_driver(&hfi1_pci_driver);
node_affinity_destroy();
hfi1_wss_exit();
hfi1_dbg_exit();
hfi1_cpulist_count = 0;
......@@ -1311,7 +1328,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
spin_lock(&ppd->cc_state_lock);
cc_state = get_cc_state(ppd);
cc_state = get_cc_state_protected(ppd);
RCU_INIT_POINTER(ppd->cc_state, NULL);
spin_unlock(&ppd->cc_state_lock);
......@@ -1760,8 +1777,8 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
hfi1_cdbg(PROC,
"ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size,
rcd->egrbufs.size);
rcd->ctxt, rcd->egrbufs.alloced,
rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
/*
* Set the contexts rcv array head update threshold to the closest
......
......@@ -588,7 +588,6 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
pi->port_phys_conf = (ppd->port_type & 0xf);
#if PI_LED_ENABLE_SUP
pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
pi->port_states.ledenable_offlinereason |=
ppd->is_sm_config_started << 5;
......@@ -602,11 +601,6 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
pi->port_states.ledenable_offlinereason |=
ppd->offline_disabled_reason;
#else
pi->port_states.offline_reason = ppd->neighbor_normal << 4;
pi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
pi->port_states.offline_reason |= ppd->offline_disabled_reason;
#endif /* PI_LED_ENABLE_SUP */
pi->port_states.portphysstate_portstate =
(hfi1_ibphys_portstate(ppd) << 4) | state;
......@@ -1752,17 +1746,11 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
if (start_of_sm_config && (lstate == IB_PORT_INIT))
ppd->is_sm_config_started = 1;
#if PI_LED_ENABLE_SUP
psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
psi->port_states.ledenable_offlinereason |=
ppd->is_sm_config_started << 5;
psi->port_states.ledenable_offlinereason |=
ppd->offline_disabled_reason;
#else
psi->port_states.offline_reason = ppd->neighbor_normal << 4;
psi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
psi->port_states.offline_reason |= ppd->offline_disabled_reason;
#endif /* PI_LED_ENABLE_SUP */
psi->port_states.portphysstate_portstate =
(hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
......@@ -2430,14 +2418,9 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
rsp->port_rcv_remote_physical_errors =
cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
CNTR_INVALID_VL));
tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
if (tmp2 < tmp) {
/* overflow/wrapped */
rsp->local_link_integrity_errors = cpu_to_be64(~0);
} else {
rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
}
rsp->local_link_integrity_errors =
cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
CNTR_INVALID_VL));
tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
CNTR_INVALID_VL);
......@@ -2499,6 +2482,9 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
idx_from_vl(vl)));
rsp->vls[vfi].port_vl_xmit_discards =
cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
idx_from_vl(vl)));
vlinfo++;
vfi++;
}
......@@ -2529,9 +2515,8 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
CNTR_INVALID_VL);
/* local link integrity must be right-shifted by the lli resolution */
tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
tmp += read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
error_counter_summary += (tmp >> res_lli);
error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
CNTR_INVALID_VL) >> res_lli);
/* link error recovery must b right-shifted by the ler resolution */
tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
......@@ -2800,14 +2785,9 @@ static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
rsp->port_rcv_constraint_errors =
cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
CNTR_INVALID_VL));
tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
if (tmp2 < tmp) {
/* overflow/wrapped */
rsp->local_link_integrity_errors = cpu_to_be64(~0);
} else {
rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
}
rsp->local_link_integrity_errors =
cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
CNTR_INVALID_VL));
rsp->excessive_buffer_overruns =
cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
}
......@@ -2883,14 +2863,17 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
rsp->port_rcv_errors =
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
vlinfo = &rsp->vls[0];
vfi = 0;
vl_select_mask = be32_to_cpu(req->vl_select_mask);
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
8 * sizeof(req->vl_select_mask)) {
memset(vlinfo, 0, sizeof(*vlinfo));
/* vlinfo->vls[vfi].port_vl_xmit_discards ??? */
rsp->vls[vfi].port_vl_xmit_discards =
cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
idx_from_vl(vl)));
vlinfo += 1;
vfi++;
}
......@@ -3162,10 +3145,8 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS) {
write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS)
write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
}
if (counter_select & CS_LINK_ERROR_RECOVERY) {
write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
......@@ -3223,7 +3204,9 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
/* if (counter_select & CS_PORT_MARK_FECN)
* write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
*/
/* port_vl_xmit_discards ??? */
if (counter_select & C_SW_XMIT_DSCD_VL)
write_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
idx_from_vl(vl), 0);
}
if (resp_len)
......@@ -3392,7 +3375,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
*/
spin_lock(&ppd->cc_state_lock);
old_cc_state = get_cc_state(ppd);
old_cc_state = get_cc_state_protected(ppd);
if (!old_cc_state) {
/* never active, or shutting down */
spin_unlock(&ppd->cc_state_lock);
......@@ -3960,7 +3943,6 @@ void clear_linkup_counters(struct hfi1_devdata *dd)
write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
/* LocalLinkIntegrityErrors */
write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
/* ExcessiveBufferOverruns */
write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
......
......@@ -48,15 +48,8 @@
#define _HFI1_MAD_H
#include <rdma/ib_pma.h>
#define USE_PI_LED_ENABLE 1 /*
* use led enabled bit in struct
* opa_port_states, if available
*/
#include <rdma/opa_smi.h>
#include <rdma/opa_port_info.h>
#ifndef PI_LED_ENABLE_SUP
#define PI_LED_ENABLE_SUP 0
#endif
#include "opa_compat.h"
/*
......
This diff is collapsed.
......@@ -54,23 +54,34 @@ struct mmu_rb_node {
unsigned long len;
unsigned long __last;
struct rb_node node;
struct list_head list;
};
/*
* NOTE: filter, insert, invalidate, and evict must not sleep. Only remove is
* allowed to sleep.
*/
struct mmu_rb_ops {
bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
int (*insert)(struct rb_root *, struct mmu_rb_node *);
void (*remove)(struct rb_root *, struct mmu_rb_node *,
struct mm_struct *);
int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
bool (*filter)(struct mmu_rb_node *node, unsigned long addr,
unsigned long len);
int (*insert)(void *ops_arg, struct mmu_rb_node *mnode);
void (*remove)(void *ops_arg, struct mmu_rb_node *mnode);
int (*invalidate)(void *ops_arg, struct mmu_rb_node *node);
int (*evict)(void *ops_arg, struct mmu_rb_node *mnode,
void *evict_arg, bool *stop);
};
int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops);
void hfi1_mmu_rb_unregister(struct rb_root *);
int hfi1_mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
void hfi1_mmu_rb_remove(struct rb_root *, struct mmu_rb_node *);
struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *, unsigned long,
unsigned long);
struct mmu_rb_node *hfi1_mmu_rb_extract(struct rb_root *, unsigned long,
unsigned long);
int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
struct mmu_rb_ops *ops,
struct workqueue_struct *wq,
struct mmu_rb_handler **handler);
void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
struct mmu_rb_node *mnode);
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
struct mmu_rb_node *mnode);
struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
unsigned long addr, unsigned long len);
#endif /* _HFI1_MMU_RB_H */
......@@ -679,6 +679,10 @@ static uint pcie_pset = UNSET_PSET;
module_param(pcie_pset, uint, S_IRUGO);
MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
static uint pcie_ctle = 1; /* discrete on, integrated off */
module_param(pcie_ctle, uint, S_IRUGO);
MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off");
/* equalization columns */
#define PREC 0
#define ATTN 1
......@@ -716,6 +720,36 @@ static const u8 integrated_preliminary_eq[11][3] = {
{ 0x00, 0x1e, 0x0a }, /* p10 */
};
static const u8 discrete_ctle_tunings[11][4] = {
/* DC LF HF BW */
{ 0x48, 0x0b, 0x04, 0x04 }, /* p0 */
{ 0x60, 0x05, 0x0f, 0x0a }, /* p1 */
{ 0x50, 0x09, 0x06, 0x06 }, /* p2 */
{ 0x68, 0x05, 0x0f, 0x0a }, /* p3 */
{ 0x80, 0x05, 0x0f, 0x0a }, /* p4 */
{ 0x70, 0x05, 0x0f, 0x0a }, /* p5 */
{ 0x68, 0x05, 0x0f, 0x0a }, /* p6 */
{ 0x38, 0x0f, 0x00, 0x00 }, /* p7 */
{ 0x48, 0x09, 0x06, 0x06 }, /* p8 */
{ 0x60, 0x05, 0x0f, 0x0a }, /* p9 */
{ 0x38, 0x0f, 0x00, 0x00 }, /* p10 */
};
static const u8 integrated_ctle_tunings[11][4] = {
/* DC LF HF BW */
{ 0x38, 0x0f, 0x00, 0x00 }, /* p0 */
{ 0x38, 0x0f, 0x00, 0x00 }, /* p1 */
{ 0x38, 0x0f, 0x00, 0x00 }, /* p2 */
{ 0x38, 0x0f, 0x00, 0x00 }, /* p3 */
{ 0x58, 0x0a, 0x05, 0x05 }, /* p4 */
{ 0x48, 0x0a, 0x05, 0x05 }, /* p5 */
{ 0x40, 0x0a, 0x05, 0x05 }, /* p6 */
{ 0x38, 0x0f, 0x00, 0x00 }, /* p7 */
{ 0x38, 0x0f, 0x00, 0x00 }, /* p8 */
{ 0x38, 0x09, 0x06, 0x06 }, /* p9 */
{ 0x38, 0x0e, 0x01, 0x01 }, /* p10 */
};
/* helper to format the value to write to hardware */
#define eq_value(pre, curr, post) \
((((u32)(pre)) << \
......@@ -951,11 +985,14 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
u32 status, err;
int ret;
int do_retry, retry_count = 0;
int intnum = 0;
uint default_pset;
u16 target_vector, target_speed;
u16 lnkctl2, vendor;
u8 div;
const u8 (*eq)[3];
const u8 (*ctle_tunings)[4];
uint static_ctle_mode;
int return_error = 0;
/* PCIe Gen3 is for the ASIC only */
......@@ -1089,6 +1126,9 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
div = 3;
eq = discrete_preliminary_eq;
default_pset = DEFAULT_DISCRETE_PSET;
ctle_tunings = discrete_ctle_tunings;
/* bit 0 - discrete on/off */
static_ctle_mode = pcie_ctle & 0x1;
} else {
/* 400mV, FS=29, LF = 9 */
fs = 29;
......@@ -1096,6 +1136,9 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
div = 1;
eq = integrated_preliminary_eq;
default_pset = DEFAULT_MCP_PSET;
ctle_tunings = integrated_ctle_tunings;
/* bit 1 - integrated on/off */
static_ctle_mode = (pcie_ctle >> 1) & 0x1;
}
pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
(fs <<
......@@ -1135,16 +1178,33 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
* step 5c: Program gasket interrupts
*/
/* set the Rx Bit Rate to REFCLK ratio */
write_gasket_interrupt(dd, 0, 0x0006, 0x0050);
write_gasket_interrupt(dd, intnum++, 0x0006, 0x0050);
/* disable pCal for PCIe Gen3 RX equalization */
write_gasket_interrupt(dd, 1, 0x0026, 0x5b01);
/* select adaptive or static CTLE */
write_gasket_interrupt(dd, intnum++, 0x0026,
0x5b01 | (static_ctle_mode << 3));
/*
* Enable iCal for PCIe Gen3 RX equalization, and set which
* evaluation of RX_EQ_EVAL will launch the iCal procedure.
*/
write_gasket_interrupt(dd, 2, 0x0026, 0x5202);
write_gasket_interrupt(dd, intnum++, 0x0026, 0x5202);
if (static_ctle_mode) {
/* apply static CTLE tunings */
u8 pcie_dc, pcie_lf, pcie_hf, pcie_bw;
pcie_dc = ctle_tunings[pcie_pset][0];
pcie_lf = ctle_tunings[pcie_pset][1];
pcie_hf = ctle_tunings[pcie_pset][2];
pcie_bw = ctle_tunings[pcie_pset][3];
write_gasket_interrupt(dd, intnum++, 0x0026, 0x0200 | pcie_dc);
write_gasket_interrupt(dd, intnum++, 0x0026, 0x0100 | pcie_lf);
write_gasket_interrupt(dd, intnum++, 0x0026, 0x0000 | pcie_hf);
write_gasket_interrupt(dd, intnum++, 0x0026, 0x5500 | pcie_bw);
}
/* terminate list */
write_gasket_interrupt(dd, 3, 0x0000, 0x0000);
write_gasket_interrupt(dd, intnum++, 0x0000, 0x0000);
/*
* step 5d: program XMT margin
......
......@@ -1952,13 +1952,17 @@ int init_pervl_scs(struct hfi1_devdata *dd)
dd->vld[15].sc = sc_alloc(dd, SC_VL15,
dd->rcd[0]->rcvhdrqentsize, dd->node);
if (!dd->vld[15].sc)
goto nomem;
return -ENOMEM;
hfi1_init_ctxt(dd->vld[15].sc);
dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
dd->kernel_send_context = kmalloc_node(dd->num_send_contexts *
dd->kernel_send_context = kzalloc_node(dd->num_send_contexts *
sizeof(struct send_context *),
GFP_KERNEL, dd->node);
if (!dd->kernel_send_context)
goto freesc15;
dd->kernel_send_context[0] = dd->vld[15].sc;
for (i = 0; i < num_vls; i++) {
......@@ -2010,12 +2014,21 @@ int init_pervl_scs(struct hfi1_devdata *dd)
if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
goto nomem;
return 0;
nomem:
sc_free(dd->vld[15].sc);
for (i = 0; i < num_vls; i++)
for (i = 0; i < num_vls; i++) {
sc_free(dd->vld[i].sc);
dd->vld[i].sc = NULL;
}
for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
sc_free(dd->kernel_send_context[i + 1]);
kfree(dd->kernel_send_context);
dd->kernel_send_context = NULL;
freesc15:
sc_free(dd->vld[15].sc);
return -ENOMEM;
}
......
......@@ -537,20 +537,6 @@ static void apply_tunings(
u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
u8 *cache = ppd->qsfp_info.cache;
/* Enable external device config if channel is limiting active */
read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
GENERAL_CONFIG, &config_data);
config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT);
config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT);
ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
GENERAL_CONFIG, config_data);
if (ret != HCMD_SUCCESS)
dd_dev_err(
ppd->dd,
"%s: Failed to set enable external device config\n",
__func__);
config_data = 0; /* re-init */
/* Pass tuning method to 8051 */
read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
&config_data);
......@@ -638,9 +624,13 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
if (ret)
return ret;
/*
* We'll change the QSFP memory contents from here on out, thus we set a
* flag here to remind ourselves to reset the QSFP module. This prevents
* reuse of stale settings established in our previous pass through.
*/
if (ppd->qsfp_info.reset_needed) {
reset_qsfp(ppd);
ppd->qsfp_info.reset_needed = 0;
refresh_qsfp_cache(ppd, &ppd->qsfp_info);
} else {
ppd->qsfp_info.reset_needed = 1;
......
......@@ -52,6 +52,7 @@
#include <linux/seq_file.h>
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
#include <rdma/ib_verbs.h>
#include "hfi.h"
#include "qp.h"
......@@ -115,6 +116,66 @@ static const u16 credit_table[31] = {
32768 /* 1E */
};
const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
[IB_WR_RDMA_WRITE] = {
.length = sizeof(struct ib_rdma_wr),
.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
},
[IB_WR_RDMA_READ] = {
.length = sizeof(struct ib_rdma_wr),
.qpt_support = BIT(IB_QPT_RC),
.flags = RVT_OPERATION_ATOMIC,
},
[IB_WR_ATOMIC_CMP_AND_SWP] = {
.length = sizeof(struct ib_atomic_wr),
.qpt_support = BIT(IB_QPT_RC),
.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
},
[IB_WR_ATOMIC_FETCH_AND_ADD] = {
.length = sizeof(struct ib_atomic_wr),
.qpt_support = BIT(IB_QPT_RC),
.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
},
[IB_WR_RDMA_WRITE_WITH_IMM] = {
.length = sizeof(struct ib_rdma_wr),
.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
},
[IB_WR_SEND] = {
.length = sizeof(struct ib_send_wr),
.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
},
[IB_WR_SEND_WITH_IMM] = {
.length = sizeof(struct ib_send_wr),
.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
},
[IB_WR_REG_MR] = {
.length = sizeof(struct ib_reg_wr),
.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
.flags = RVT_OPERATION_LOCAL,
},
[IB_WR_LOCAL_INV] = {
.length = sizeof(struct ib_send_wr),
.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
.flags = RVT_OPERATION_LOCAL,
},
[IB_WR_SEND_WITH_INV] = {
.length = sizeof(struct ib_send_wr),
.qpt_support = BIT(IB_QPT_RC),
},
};
static void flush_tx_list(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
......@@ -745,8 +806,9 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
priv->owner = qp;
priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node);
if (!priv->s_hdr) {
priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp,
rdi->dparms.node);
if (!priv->s_ahg) {
kfree(priv);
return ERR_PTR(-ENOMEM);
}
......@@ -759,7 +821,7 @@ void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
kfree(priv->s_hdr);
kfree(priv->s_ahg);
kfree(priv);
}
......
......@@ -54,6 +54,8 @@
extern unsigned int hfi1_qp_table_size;
extern const struct rvt_operation_params hfi1_post_parms[];
/*
* free_ahg - clear ahg from QP
*/
......@@ -61,7 +63,7 @@ static inline void clear_ahg(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
priv->s_hdr->ahgcount = 0;
priv->s_ahg->ahgcount = 0;
qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR);
if (priv->s_sde && qp->s_ahgidx >= 0)
sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
......
This diff is collapsed.
......@@ -238,3 +238,6 @@ int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
struct hfi1_asic_data;
int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad);
void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad);
......@@ -477,6 +477,37 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_flags |= RVT_S_WAIT_FENCE;
goto bail;
}
/*
* Local operations are processed immediately
* after all prior requests have completed
*/
if (wqe->wr.opcode == IB_WR_REG_MR ||
wqe->wr.opcode == IB_WR_LOCAL_INV) {
int local_ops = 0;
int err = 0;
if (qp->s_last != qp->s_cur)
goto bail;
if (++qp->s_cur == qp->s_size)
qp->s_cur = 0;
if (++qp->s_tail == qp->s_size)
qp->s_tail = 0;
if (!(wqe->wr.send_flags &
RVT_SEND_COMPLETION_ONLY)) {
err = rvt_invalidate_rkey(
qp,
wqe->wr.ex.invalidate_rkey);
local_ops = 1;
}
hfi1_send_complete(qp, wqe,
err ? IB_WC_LOC_PROT_ERR
: IB_WC_SUCCESS);
if (local_ops)
atomic_dec(&qp->local_ops_pending);
qp->s_hdrwords = 0;
goto done_free_tx;
}
newreq = 1;
qp->s_psn = wqe->psn;
}
......@@ -491,6 +522,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
switch (wqe->wr.opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
case IB_WR_SEND_WITH_INV:
/* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
......@@ -504,11 +536,17 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_ONLY);
} else {
} else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
} else {
qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
/* Invalidate rkey comes after the BTH */
ohdr->u.ieth = cpu_to_be32(
wqe->wr.ex.invalidate_rkey);
hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
......@@ -671,11 +709,16 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_LAST);
} else {
} else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
} else {
qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
/* invalidate data comes after the BTH */
ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
......@@ -1047,7 +1090,7 @@ void hfi1_rc_timeout(unsigned long arg)
ibp->rvp.n_rc_timeouts++;
qp->s_flags &= ~RVT_S_TIMER;
del_timer(&qp->s_timer);
trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1);
trace_hfi1_timeout(qp, qp->s_last_psn + 1);
restart_rc(qp, qp->s_last_psn + 1, 1);
hfi1_schedule_send(qp);
}
......@@ -1171,7 +1214,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
* If we were waiting for sends to complete before re-sending,
* and they are now complete, restart sending.
*/
trace_hfi1_rc_sendcomplete(qp, psn);
trace_hfi1_sendcomplete(qp, psn);
if (qp->s_flags & RVT_S_WAIT_PSN &&
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
qp->s_flags &= ~RVT_S_WAIT_PSN;
......@@ -1567,7 +1610,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
spin_lock_irqsave(&qp->s_lock, flags);
trace_hfi1_rc_ack(qp, psn);
trace_hfi1_ack(qp, psn);
/* Ignore invalid responses. */
smp_read_barrier_depends(); /* see post_one_send */
......@@ -1782,7 +1825,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
u8 i, prev;
int old_req;
trace_hfi1_rc_rcv_error(qp, psn);
trace_hfi1_rcv_error(qp, psn);
if (diff > 0) {
/*
* Packet sequence error.
......@@ -2086,7 +2129,6 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_other_headers *ohdr = packet->ohdr;
u32 bth0, opcode;
u32 hdrsize = packet->hlen;
......@@ -2097,30 +2139,15 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
int diff;
struct ib_reth *reth;
unsigned long flags;
u32 bth1;
int ret, is_fecn = 0;
int copy_last = 0;
u32 rkey;
bth0 = be32_to_cpu(ohdr->bth[0]);
if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
return;
bth1 = be32_to_cpu(ohdr->bth[1]);
if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
if (bth1 & HFI1_BECN_SMASK) {
u16 rlid = qp->remote_ah_attr.dlid;
u32 lqpn, rqpn;
lqpn = qp->ibqp.qp_num;
rqpn = qp->remote_qpn;
process_becn(
ppd,
qp->remote_ah_attr.sl,
rlid, lqpn, rqpn,
IB_CC_SVCTYPE_RC);
}
is_fecn = bth1 & HFI1_FECN_SMASK;
}
is_fecn = process_ecn(qp, packet, false);
psn = be32_to_cpu(ohdr->bth[2]);
opcode = (bth0 >> 24) & 0xff;
......@@ -2154,7 +2181,8 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
case OP(SEND_MIDDLE):
if (opcode == OP(SEND_MIDDLE) ||
opcode == OP(SEND_LAST) ||
opcode == OP(SEND_LAST_WITH_IMMEDIATE))
opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
opcode == OP(SEND_LAST_WITH_INVALIDATE))
break;
goto nack_inv;
......@@ -2170,6 +2198,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
if (opcode == OP(SEND_MIDDLE) ||
opcode == OP(SEND_LAST) ||
opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
opcode == OP(RDMA_WRITE_MIDDLE) ||
opcode == OP(RDMA_WRITE_LAST) ||
opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
......@@ -2218,6 +2247,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
case OP(SEND_ONLY_WITH_INVALIDATE):
ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0)
goto nack_op_err;
......@@ -2226,12 +2256,22 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
goto no_immediate_data;
if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
goto send_last_inv;
/* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
goto send_last;
case OP(SEND_LAST_WITH_INVALIDATE):
send_last_inv:
rkey = be32_to_cpu(ohdr->u.ieth);
if (rvt_invalidate_rkey(qp, rkey))
goto no_immediate_data;
wc.ex.invalidate_rkey = rkey;
wc.wc_flags = IB_WC_WITH_INVALIDATE;
goto send_last;
case OP(RDMA_WRITE_LAST):
copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
/* fall through */
......
......@@ -372,6 +372,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
int ret;
int copy_last = 0;
u32 to;
int local_ops = 0;
rcu_read_lock();
......@@ -440,11 +441,31 @@ static void ruc_loopback(struct rvt_qp *sqp)
sqp->s_sge.num_sge = wqe->wr.num_sge;
sqp->s_len = wqe->length;
switch (wqe->wr.opcode) {
case IB_WR_REG_MR:
goto send_comp;
case IB_WR_LOCAL_INV:
if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
if (rvt_invalidate_rkey(sqp,
wqe->wr.ex.invalidate_rkey))
send_status = IB_WC_LOC_PROT_ERR;
local_ops = 1;
}
goto send_comp;
case IB_WR_SEND_WITH_INV:
if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
wc.wc_flags = IB_WC_WITH_INVALIDATE;
wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
}
goto send;
case IB_WR_SEND_WITH_IMM:
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
send:
ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0)
goto op_err;
......@@ -583,6 +604,10 @@ static void ruc_loopback(struct rvt_qp *sqp)
flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
hfi1_send_complete(sqp, wqe, send_status);
if (local_ops) {
atomic_dec(&sqp->local_ops_pending);
local_ops = 0;
}
goto again;
rnr_nak:
......@@ -683,10 +708,10 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
return sizeof(struct ib_grh) / sizeof(u32);
}
#define BTH2_OFFSET (offsetof(struct hfi1_pio_header, hdr.u.oth.bth[2]) / 4)
#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, hdr.u.oth.bth[2]) / 4)
/**
* build_ahg - create ahg in s_hdr
* build_ahg - create ahg in s_ahg
* @qp: a pointer to QP
* @npsn: the next PSN for the request/response
*
......@@ -708,19 +733,18 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
if (qp->s_ahgidx >= 0) {
qp->s_ahgpsn = npsn;
priv->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
/* save to protect a change in another thread */
priv->s_hdr->sde = priv->s_sde;
priv->s_hdr->ahgidx = qp->s_ahgidx;
priv->s_ahg->ahgidx = qp->s_ahgidx;
qp->s_flags |= RVT_S_AHG_VALID;
}
} else {
/* subsequent middle after valid */
if (qp->s_ahgidx >= 0) {
priv->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG;
priv->s_hdr->ahgidx = qp->s_ahgidx;
priv->s_hdr->ahgcount++;
priv->s_hdr->ahgdesc[0] =
priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
priv->s_ahg->ahgidx = qp->s_ahgidx;
priv->s_ahg->ahgcount++;
priv->s_ahg->ahgdesc[0] =
sdma_build_ahg_descriptor(
(__force u16)cpu_to_be16((u16)npsn),
BTH2_OFFSET,
......@@ -728,8 +752,8 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
16);
if ((npsn & 0xffff0000) !=
(qp->s_ahgpsn & 0xffff0000)) {
priv->s_hdr->ahgcount++;
priv->s_hdr->ahgdesc[1] =
priv->s_ahg->ahgcount++;
priv->s_ahg->ahgdesc[1] =
sdma_build_ahg_descriptor(
(__force u16)cpu_to_be16(
(u16)(npsn >> 16)),
......@@ -766,7 +790,7 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
}
lrh0 |= (priv->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
/*
* reset s_hdr/AHG fields
* reset s_ahg/AHG fields
*
* This insures that the ahgentry/ahgcount
* are at a non-AHG default to protect
......@@ -776,10 +800,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
* build_ahg() will modify as appropriate
* to use the AHG feature.
*/
priv->s_hdr->tx_flags = 0;
priv->s_hdr->ahgcount = 0;
priv->s_hdr->ahgidx = 0;
priv->s_hdr->sde = NULL;
priv->s_ahg->tx_flags = 0;
priv->s_ahg->ahgcount = 0;
priv->s_ahg->ahgidx = 0;
if (qp->s_mig_state == IB_MIG_MIGRATED)
bth0 |= IB_BTH_MIG_REQ;
else
......@@ -890,7 +913,7 @@ void hfi1_do_send(struct rvt_qp *qp)
*/
if (hfi1_verbs_send(qp, &ps))
return;
/* Record that s_hdr is empty. */
/* Record that s_ahg is empty. */
qp->s_hdrwords = 0;
/* allow other tasks to run */
if (unlikely(time_after(jiffies, timeout))) {
......
......@@ -49,6 +49,7 @@
#include "hfi.h"
#include "mad.h"
#include "trace.h"
#include "affinity.h"
/*
* Start of per-port congestion control structures and support code
......@@ -622,6 +623,27 @@ static ssize_t show_tempsense(struct device *device,
return ret;
}
static ssize_t show_sdma_affinity(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
struct hfi1_devdata *dd = dd_from_dev(dev);
return hfi1_get_sdma_affinity(dd, buf);
}
static ssize_t store_sdma_affinity(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hfi1_ibdev *dev =
container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
struct hfi1_devdata *dd = dd_from_dev(dev);
return hfi1_set_sdma_affinity(dd, buf, count);
}
/*
* end of per-unit (or driver, in some cases, but replicated
* per unit) functions
......@@ -636,6 +658,8 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity,
store_sdma_affinity);
static struct device_attribute *hfi1_attributes[] = {
&dev_attr_hw_rev,
......@@ -646,6 +670,7 @@ static struct device_attribute *hfi1_attributes[] = {
&dev_attr_boardversion,
&dev_attr_tempsense,
&dev_attr_chip_reset,
&dev_attr_sdma_affinity,
};
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
......
This diff is collapsed.
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(__HFI1_TRACE_CTXTS_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_CTXTS_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "hfi.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_ctxts
#define UCTXT_FMT \
"cred:%u, credaddr:0x%llx, piobase:0x%p, rcvhdr_cnt:%u, " \
"rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
TRACE_EVENT(hfi1_uctxtdata,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
TP_ARGS(dd, uctxt),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(unsigned int, ctxt)
__field(u32, credits)
__field(u64, hw_free)
__field(void __iomem *, piobase)
__field(u16, rcvhdrq_cnt)
__field(u64, rcvhdrq_phys)
__field(u32, eager_cnt)
__field(u64, rcvegr_phys)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = uctxt->ctxt;
__entry->credits = uctxt->sc->credits;
__entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free);
__entry->piobase = uctxt->sc->base_addr;
__entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
__entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
__entry->eager_cnt = uctxt->egrbufs.alloced;
__entry->rcvegr_phys =
uctxt->egrbufs.rcvtids[0].phys;
),
TP_printk("[%s] ctxt %u " UCTXT_FMT,
__get_str(dev),
__entry->ctxt,
__entry->credits,
__entry->hw_free,
__entry->piobase,
__entry->rcvhdrq_cnt,
__entry->rcvhdrq_phys,
__entry->eager_cnt,
__entry->rcvegr_phys
)
);
#define CINFO_FMT \
"egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
TRACE_EVENT(hfi1_ctxt_info,
TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt,
unsigned int subctxt,
struct hfi1_ctxt_info cinfo),
TP_ARGS(dd, ctxt, subctxt, cinfo),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(unsigned int, ctxt)
__field(unsigned int, subctxt)
__field(u16, egrtids)
__field(u16, rcvhdrq_cnt)
__field(u16, rcvhdrq_size)
__field(u16, sdma_ring_size)
__field(u32, rcvegr_size)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
__entry->egrtids = cinfo.egrtids;
__entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
__entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
__entry->sdma_ring_size = cinfo.sdma_ring_size;
__entry->rcvegr_size = cinfo.rcvegr_size;
),
TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
__get_str(dev),
__entry->ctxt,
__entry->subctxt,
__entry->egrtids,
__entry->rcvegr_size,
__entry->rcvhdrq_cnt,
__entry->rcvhdrq_size,
__entry->sdma_ring_size
)
);
#endif /* __HFI1_TRACE_CTXTS_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_ctxts
#include <trace/define_trace.h>
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(__HFI1_TRACE_EXTRA_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_EXTRA_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "hfi.h"
/*
* Note:
* This produces a REALLY ugly trace in the console output when the string is
* too long.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_dbg
#define MAX_MSG_LEN 512
DECLARE_EVENT_CLASS(hfi1_trace_template,
TP_PROTO(const char *function, struct va_format *vaf),
TP_ARGS(function, vaf),
TP_STRUCT__entry(__string(function, function)
__dynamic_array(char, msg, MAX_MSG_LEN)
),
TP_fast_assign(__assign_str(function, function);
WARN_ON_ONCE(vsnprintf
(__get_dynamic_array(msg),
MAX_MSG_LEN, vaf->fmt,
*vaf->va) >=
MAX_MSG_LEN);
),
TP_printk("(%s) %s",
__get_str(function),
__get_str(msg))
);
/*
* It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
* actual function to work and can not be in a macro.
*/
#define __hfi1_trace_def(lvl) \
void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
\
DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
TP_PROTO(const char *function, struct va_format *vaf), \
TP_ARGS(function, vaf))
#define __hfi1_trace_fn(lvl) \
void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
{ \
struct va_format vaf = { \
.fmt = fmt, \
}; \
va_list args; \
\
va_start(args, fmt); \
vaf.va = &args; \
trace_hfi1_ ##lvl(func, &vaf); \
va_end(args); \
return; \
}
/*
* To create a new trace level simply define it below and as a __hfi1_trace_fn
* in trace.c. This will create all the hooks for calling
* hfi1_cdbg(LVL, fmt, ...); as well as take care of all
* the debugfs stuff.
*/
__hfi1_trace_def(PKT);
__hfi1_trace_def(PROC);
__hfi1_trace_def(SDMA);
__hfi1_trace_def(LINKVERB);
__hfi1_trace_def(DEBUG);
__hfi1_trace_def(SNOOP);
__hfi1_trace_def(CNTR);
__hfi1_trace_def(PIO);
__hfi1_trace_def(DC8051);
__hfi1_trace_def(FIRMWARE);
__hfi1_trace_def(RCVCTRL);
__hfi1_trace_def(TID);
__hfi1_trace_def(MMU);
__hfi1_trace_def(IOCTL);
#define hfi1_cdbg(which, fmt, ...) \
__hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
#define hfi1_dbg(fmt, ...) \
hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
/*
* Define HFI1_EARLY_DBG at compile time or here to enable early trace
* messages. Do not check in an enablement for this.
*/
#ifdef HFI1_EARLY_DBG
#define hfi1_dbg_early(fmt, ...) \
trace_printk(fmt, ##__VA_ARGS__)
#else
#define hfi1_dbg_early(fmt, ...)
#endif
#endif /* __HFI1_TRACE_EXTRA_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_dbg
#include <trace/define_trace.h>
#ifndef _TWSI_H
#define _TWSI_H
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*
......@@ -46,20 +44,166 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(__HFI1_TRACE_IBHDRS_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_IBHDRS_H
#define HFI1_TWSI_NO_DEV 0xFF
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
struct hfi1_devdata;
#include "hfi.h"
/* Bit position of SDA/SCL pins in ASIC_QSFP* registers */
#define GPIO_SDA_NUM 1
#define GPIO_SCL_NUM 0
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_ibhdrs
/* these functions must be called with qsfp_lock held */
int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target);
int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr,
void *buffer, int len);
int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr,
const void *buffer, int len);
u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
#endif /* _TWSI_H */
#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
#define lrh_name(lrh) { HFI1_##lrh, #lrh }
#define show_lnh(lrh) \
__print_symbolic(lrh, \
lrh_name(LRH_BTH), \
lrh_name(LRH_GRH))
#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
#define BTH_PRN \
"op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
"f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
#define EHDR_PRN "%s"
DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
TP_PROTO(struct hfi1_devdata *dd,
struct hfi1_ib_header *hdr),
TP_ARGS(dd, hdr),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
/* LRH */
__field(u8, vl)
__field(u8, lver)
__field(u8, sl)
__field(u8, lnh)
__field(u16, dlid)
__field(u16, len)
__field(u16, slid)
/* BTH */
__field(u8, opcode)
__field(u8, se)
__field(u8, m)
__field(u8, pad)
__field(u8, tver)
__field(u16, pkey)
__field(u8, f)
__field(u8, b)
__field(u32, qpn)
__field(u8, a)
__field(u32, psn)
/* extended headers */
__dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
),
TP_fast_assign(
struct hfi1_other_headers *ohdr;
DD_DEV_ASSIGN(dd);
/* LRH */
__entry->vl =
(u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
__entry->lver =
(u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
__entry->sl =
(u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
__entry->lnh =
(u8)(be16_to_cpu(hdr->lrh[0]) & 3);
__entry->dlid =
be16_to_cpu(hdr->lrh[1]);
/* allow for larger len */
__entry->len =
be16_to_cpu(hdr->lrh[2]);
__entry->slid =
be16_to_cpu(hdr->lrh[3]);
/* BTH */
if (__entry->lnh == HFI1_LRH_BTH)
ohdr = &hdr->u.oth;
else
ohdr = &hdr->u.l.oth;
__entry->opcode =
(be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
__entry->se =
(be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
__entry->m =
(be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
__entry->pad =
(be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
__entry->tver =
(be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
__entry->pkey =
be32_to_cpu(ohdr->bth[0]) & 0xffff;
__entry->f =
(be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
HFI1_FECN_MASK;
__entry->b =
(be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
HFI1_BECN_MASK;
__entry->qpn =
be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
__entry->a =
(be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
/* allow for larger PSN */
__entry->psn =
be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
/* extended headers */
memcpy(__get_dynamic_array(ehdrs), &ohdr->u,
ibhdr_exhdr_len(hdr));
),
TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
__get_str(dev),
/* LRH */
__entry->vl,
__entry->lver,
__entry->sl,
__entry->lnh, show_lnh(__entry->lnh),
__entry->dlid,
__entry->len,
__entry->slid,
/* BTH */
__entry->opcode, show_ib_opcode(__entry->opcode),
__entry->se,
__entry->m,
__entry->pad,
__entry->tver,
__entry->pkey,
__entry->f,
__entry->b,
__entry->qpn,
__entry->a,
__entry->psn,
/* extended headers */
__parse_ib_ehdrs(
__entry->opcode,
(void *)__get_dynamic_array(ehdrs))
)
);
DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
TP_ARGS(dd, hdr));
DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
TP_ARGS(dd, hdr));
DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
TP_ARGS(dd, hdr));
DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
TP_ARGS(dd, hdr));
#endif /* __HFI1_TRACE_IBHDRS_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_ibhdrs
#include <trace/define_trace.h>
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(__HFI1_TRACE_MISC_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_MISC_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "hfi.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_misc
TRACE_EVENT(hfi1_interrupt,
TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
int src),
TP_ARGS(dd, is_entry, src),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__array(char, buf, 64)
__field(int, src)
),
TP_fast_assign(DD_DEV_ASSIGN(dd)
is_entry->is_name(__entry->buf, 64,
src - is_entry->start);
__entry->src = src;
),
TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
__entry->src)
);
#endif /* __HFI1_TRACE_MISC_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_misc
#include <trace/define_trace.h>
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(__HFI1_TRACE_RC_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_RC_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "hfi.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_rc
DECLARE_EVENT_CLASS(hfi1_rc_template,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
__field(u32, qpn)
__field(u32, s_flags)
__field(u32, psn)
__field(u32, s_psn)
__field(u32, s_next_psn)
__field(u32, s_sending_psn)
__field(u32, s_sending_hpsn)
__field(u32, r_psn)
),
TP_fast_assign(
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
__entry->qpn = qp->ibqp.qp_num;
__entry->s_flags = qp->s_flags;
__entry->psn = psn;
__entry->s_psn = qp->s_psn;
__entry->s_next_psn = qp->s_next_psn;
__entry->s_sending_psn = qp->s_sending_psn;
__entry->s_sending_hpsn = qp->s_sending_hpsn;
__entry->r_psn = qp->r_psn;
),
TP_printk(
"[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
__get_str(dev),
__entry->qpn,
__entry->s_flags,
__entry->psn,
__entry->s_psn,
__entry->s_next_psn,
__entry->s_sending_psn,
__entry->s_sending_hpsn,
__entry->r_psn
)
);
DEFINE_EVENT(hfi1_rc_template, hfi1_sendcomplete,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(hfi1_rc_template, hfi1_ack,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(hfi1_rc_template, hfi1_timeout,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(hfi1_rc_template, hfi1_rcv_error,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
#endif /* __HFI1_TRACE_RC_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_rc
#include <trace/define_trace.h>
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(__HFI1_TRACE_RX_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_RX_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "hfi.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_rx
TRACE_EVENT(hfi1_rcvhdr,
TP_PROTO(struct hfi1_devdata *dd,
u32 ctxt,
u64 eflags,
u32 etype,
u32 hlen,
u32 tlen,
u32 updegr,
u32 etail
),
TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(u64, eflags)
__field(u32, ctxt)
__field(u32, etype)
__field(u32, hlen)
__field(u32, tlen)
__field(u32, updegr)
__field(u32, etail)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->eflags = eflags;
__entry->ctxt = ctxt;
__entry->etype = etype;
__entry->hlen = hlen;
__entry->tlen = tlen;
__entry->updegr = updegr;
__entry->etail = etail;
),
TP_printk(
"[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
__get_str(dev),
__entry->ctxt,
__entry->eflags,
__entry->etype, show_packettype(__entry->etype),
__entry->hlen,
__entry->tlen,
__entry->updegr,
__entry->etail
)
);
TRACE_EVENT(hfi1_receive_interrupt,
TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
TP_ARGS(dd, ctxt),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(u32, ctxt)
__field(u8, slow_path)
__field(u8, dma_rtail)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = ctxt;
if (dd->rcd[ctxt]->do_interrupt ==
&handle_receive_interrupt) {
__entry->slow_path = 1;
__entry->dma_rtail = 0xFF;
} else if (dd->rcd[ctxt]->do_interrupt ==
&handle_receive_interrupt_dma_rtail){
__entry->dma_rtail = 1;
__entry->slow_path = 0;
} else if (dd->rcd[ctxt]->do_interrupt ==
&handle_receive_interrupt_nodma_rtail) {
__entry->dma_rtail = 0;
__entry->slow_path = 0;
}
),
TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
__get_str(dev),
__entry->ctxt,
__entry->slow_path,
__entry->dma_rtail
)
);
TRACE_EVENT(hfi1_exp_tid_reg,
TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr,
u32 npages, unsigned long va, unsigned long pa,
dma_addr_t dma),
TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
TP_STRUCT__entry(
__field(unsigned int, ctxt)
__field(u16, subctxt)
__field(u32, rarr)
__field(u32, npages)
__field(unsigned long, va)
__field(unsigned long, pa)
__field(dma_addr_t, dma)
),
TP_fast_assign(
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
__entry->rarr = rarr;
__entry->npages = npages;
__entry->va = va;
__entry->pa = pa;
__entry->dma = dma;
),
TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
__entry->ctxt,
__entry->subctxt,
__entry->rarr,
__entry->npages,
__entry->pa,
__entry->va,
__entry->dma
)
);
TRACE_EVENT(hfi1_exp_tid_unreg,
TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
unsigned long va, unsigned long pa, dma_addr_t dma),
TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
TP_STRUCT__entry(
__field(unsigned int, ctxt)
__field(u16, subctxt)
__field(u32, rarr)
__field(u32, npages)
__field(unsigned long, va)
__field(unsigned long, pa)
__field(dma_addr_t, dma)
),
TP_fast_assign(
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
__entry->rarr = rarr;
__entry->npages = npages;
__entry->va = va;
__entry->pa = pa;
__entry->dma = dma;
),
TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
__entry->ctxt,
__entry->subctxt,
__entry->rarr,
__entry->npages,
__entry->pa,
__entry->va,
__entry->dma
)
);
TRACE_EVENT(hfi1_exp_tid_inval,
TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr,
u32 npages, dma_addr_t dma),
TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
TP_STRUCT__entry(
__field(unsigned int, ctxt)
__field(u16, subctxt)
__field(unsigned long, va)
__field(u32, rarr)
__field(u32, npages)
__field(dma_addr_t, dma)
),
TP_fast_assign(
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
__entry->va = va;
__entry->rarr = rarr;
__entry->npages = npages;
__entry->dma = dma;
),
TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
__entry->ctxt,
__entry->subctxt,
__entry->rarr,
__entry->npages,
__entry->va,
__entry->dma
)
);
TRACE_EVENT(hfi1_mmu_invalidate,
TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type,
unsigned long start, unsigned long end),
TP_ARGS(ctxt, subctxt, type, start, end),
TP_STRUCT__entry(
__field(unsigned int, ctxt)
__field(u16, subctxt)
__string(type, type)
__field(unsigned long, start)
__field(unsigned long, end)
),
TP_fast_assign(
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
__assign_str(type, type);
__entry->start = start;
__entry->end = end;
),
TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx",
__entry->ctxt,
__entry->subctxt,
__get_str(type),
__entry->start,
__entry->end
)
);
#define SNOOP_PRN \
"slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
"svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
TRACE_EVENT(snoop_capture,
TP_PROTO(struct hfi1_devdata *dd,
int hdr_len,
struct hfi1_ib_header *hdr,
int data_len,
void *data),
TP_ARGS(dd, hdr_len, hdr, data_len, data),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
__field(u16, slid)
__field(u16, dlid)
__field(u32, qpn)
__field(u8, opcode)
__field(u8, sl)
__field(u16, pkey)
__field(u32, hdr_len)
__field(u32, data_len)
__field(u8, lnh)
__dynamic_array(u8, raw_hdr, hdr_len)
__dynamic_array(u8, raw_pkt, data_len)
),
TP_fast_assign(
struct hfi1_other_headers *ohdr;
__entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
if (__entry->lnh == HFI1_LRH_BTH)
ohdr = &hdr->u.oth;
else
ohdr = &hdr->u.l.oth;
DD_DEV_ASSIGN(dd);
__entry->slid = be16_to_cpu(hdr->lrh[3]);
__entry->dlid = be16_to_cpu(hdr->lrh[1]);
__entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
__entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
__entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
__entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
__entry->hdr_len = hdr_len;
__entry->data_len = data_len;
memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
memcpy(__get_dynamic_array(raw_pkt), data, data_len);
),
TP_printk(
"[%s] " SNOOP_PRN,
__get_str(dev),
__entry->slid,
__entry->dlid,
__entry->qpn,
__entry->opcode,
show_ib_opcode(__entry->opcode),
__entry->sl,
__entry->pkey,
__entry->hdr_len,
__entry->data_len
)
);
#endif /* __HFI1_TRACE_RX_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_rx
#include <trace/define_trace.h>
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -63,14 +63,14 @@ struct hfi1_user_sdma_pkt_q {
struct hfi1_devdata *dd;
struct kmem_cache *txreq_cache;
struct user_sdma_request *reqs;
unsigned long *req_in_use;
struct iowait busy;
unsigned state;
wait_queue_head_t wait;
unsigned long unpinned;
struct rb_root sdma_rb_root;
u32 n_locked;
struct list_head evict;
spinlock_t evict_lock; /* protect evict and n_locked */
struct mmu_rb_handler *handler;
atomic_t n_locked;
struct mm_struct *mm;
};
struct hfi1_user_sdma_comp_q {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment