Commit f2d04231 authored by Robert Walsh's avatar Robert Walsh Committed by Roland Dreier

IB/ipath: ipath_poll fixups and enhancements

Fix ipath_poll and enhance it so we can poll for urgent packets or
regular packets and receive notifications of when a header queue
overflows.
Signed-off-by: default avatarRobert Walsh <robert.walsh@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent b506e1dc
......@@ -431,8 +431,15 @@ struct ipath_user_info {
#define IPATH_CMD_UNUSED_1 25
#define IPATH_CMD_UNUSED_2 26
#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
#define IPATH_CMD_MAX 27
#define IPATH_CMD_MAX 28
/*
* Poll types
*/
#define IPATH_POLL_TYPE_URGENT 0x01
#define IPATH_POLL_TYPE_OVERFLOW 0x02
struct ipath_port_info {
__u32 num_active; /* number of active units */
......@@ -473,6 +480,8 @@ struct ipath_cmd {
__u16 part_key;
/* user address of __u32 bitmask of active slaves */
__u64 slave_mask_addr;
/* type of polling we want */
__u16 poll_type;
} cmd;
};
......
......@@ -1341,65 +1341,98 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
return ret;
}
static unsigned int ipath_poll(struct file *fp,
struct poll_table_struct *pt)
static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
struct file *fp,
struct poll_table_struct *pt)
{
struct ipath_portdata *pd;
u32 head, tail;
int bit;
unsigned pollflag = 0;
struct ipath_devdata *dd;
pd = port_fp(fp);
if (!pd)
goto bail;
dd = pd->port_dd;
bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT;
set_bit(bit, &dd->ipath_rcvctrl);
if (test_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag)) {
pollflag |= POLLERR;
clear_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag);
}
/*
* Before blocking, make sure that head is still == tail,
* reading from the chip, so we can be sure the interrupt
* enable has made it to the chip. If not equal, disable
* interrupt again and return immediately. This avoids races,
* and the overhead of the chip read doesn't matter much at
* this point, since we are waiting for something anyway.
*/
if (test_bit(IPATH_PORT_WAITING_URG, &pd->int_flag)) {
pollflag |= POLLIN | POLLRDNORM;
clear_bit(IPATH_PORT_WAITING_URG, &pd->int_flag);
}
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
if (!pollflag) {
set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
if (pd->poll_type & IPATH_POLL_TYPE_OVERFLOW)
set_bit(IPATH_PORT_WAITING_OVERFLOW,
&pd->port_flag);
poll_wait(fp, &pd->port_wait, pt);
}
return pollflag;
}
static unsigned int ipath_poll_next(struct ipath_portdata *pd,
struct file *fp,
struct poll_table_struct *pt)
{
u32 head, tail;
unsigned pollflag = 0;
struct ipath_devdata *dd;
dd = pd->port_dd;
head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
tail = *(volatile u64 *)pd->port_rcvhdrtail_kvaddr;
if (test_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag)) {
pollflag |= POLLERR;
clear_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag);
}
if (tail == head) {
if (tail != head ||
test_bit(IPATH_PORT_WAITING_RCV, &pd->int_flag)) {
pollflag |= POLLIN | POLLRDNORM;
clear_bit(IPATH_PORT_WAITING_RCV, &pd->int_flag);
}
if (!pollflag) {
set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
if (pd->poll_type & IPATH_POLL_TYPE_OVERFLOW)
set_bit(IPATH_PORT_WAITING_OVERFLOW,
&pd->port_flag);
set_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT,
&dd->ipath_rcvctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
(void)ipath_write_ureg(dd, ur_rcvhdrhead,
dd->ipath_rhdrhead_intr_off
| head, pd->port_port);
poll_wait(fp, &pd->port_wait, pt);
ipath_write_ureg(dd, ur_rcvhdrhead,
dd->ipath_rhdrhead_intr_off | head,
pd->port_port);
if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
/* timed out, no packets received */
clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
pd->port_rcvwait_to++;
}
else
pollflag = POLLIN | POLLRDNORM;
}
else {
/* it's already happened; don't do wait_event overhead */
pollflag = POLLIN | POLLRDNORM;
pd->port_rcvnowait++;
poll_wait(fp, &pd->port_wait, pt);
}
clear_bit(bit, &dd->ipath_rcvctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
return pollflag;
}
static unsigned int ipath_poll(struct file *fp,
struct poll_table_struct *pt)
{
struct ipath_portdata *pd;
unsigned pollflag;
pd = port_fp(fp);
if (!pd)
pollflag = 0;
else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
pollflag = ipath_poll_urgent(pd, fp, pt);
else
pollflag = ipath_poll_next(pd, fp, pt);
bail:
return pollflag;
}
......@@ -2173,6 +2206,11 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
src = NULL;
dest = NULL;
break;
case IPATH_CMD_POLL_TYPE:
copy = sizeof(cmd.cmd.poll_type);
dest = &cmd.cmd.poll_type;
src = &ucmd->cmd.poll_type;
break;
default:
ret = -EINVAL;
goto bail;
......@@ -2245,6 +2283,9 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
case IPATH_CMD_PIOAVAILUPD:
ret = ipath_force_pio_avail_update(pd->port_dd);
break;
case IPATH_CMD_POLL_TYPE:
pd->poll_type = cmd.cmd.poll_type;
break;
}
if (ret >= 0)
......
......@@ -680,6 +680,17 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
chkerrpkts = 1;
dd->ipath_lastrcvhdrqtails[i] = tl;
pd->port_hdrqfull++;
if (test_bit(IPATH_PORT_WAITING_OVERFLOW,
&pd->port_flag)) {
clear_bit(
IPATH_PORT_WAITING_OVERFLOW,
&pd->port_flag);
set_bit(
IPATH_PORT_WAITING_OVERFLOW,
&pd->int_flag);
wake_up_interruptible(
&pd->port_wait);
}
}
}
}
......@@ -877,14 +888,25 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
dd->ipath_i_rcvurg_mask);
for (i = 1; i < dd->ipath_cfgports; i++) {
struct ipath_portdata *pd = dd->ipath_pd[i];
if (portr & (1 << i) && pd && pd->port_cnt &&
test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
clear_bit(IPATH_PORT_WAITING_RCV,
&pd->port_flag);
clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT,
&dd->ipath_rcvctrl);
wake_up_interruptible(&pd->port_wait);
rcvdint = 1;
if (portr & (1 << i) && pd && pd->port_cnt) {
if (test_bit(IPATH_PORT_WAITING_RCV,
&pd->port_flag)) {
clear_bit(IPATH_PORT_WAITING_RCV,
&pd->port_flag);
set_bit(IPATH_PORT_WAITING_RCV,
&pd->int_flag);
clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT,
&dd->ipath_rcvctrl);
wake_up_interruptible(&pd->port_wait);
rcvdint = 1;
} else if (test_bit(IPATH_PORT_WAITING_URG,
&pd->port_flag)) {
clear_bit(IPATH_PORT_WAITING_URG,
&pd->port_flag);
set_bit(IPATH_PORT_WAITING_URG,
&pd->int_flag);
wake_up_interruptible(&pd->port_wait);
}
}
}
if (rcvdint) {
......
......@@ -127,6 +127,8 @@ struct ipath_portdata {
u32 port_tidcursor;
/* next expected TID to check */
unsigned long port_flag;
/* what happened */
unsigned long int_flag;
/* WAIT_RCV that timed out, no interrupt */
u32 port_rcvwait_to;
/* WAIT_PIO that timed out, no interrupt */
......@@ -155,6 +157,8 @@ struct ipath_portdata {
u32 userversion;
/* Bitmask of active slaves */
u32 active_slaves;
/* Type of packets or conditions we want to poll for */
u16 poll_type;
};
struct sk_buff;
......@@ -754,6 +758,10 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
#define IPATH_PORT_WAITING_PIO 3
/* master has not finished initializing */
#define IPATH_PORT_MASTER_UNINIT 4
/* waiting for an urgent packet to arrive */
#define IPATH_PORT_WAITING_URG 5
/* waiting for a header overflow */
#define IPATH_PORT_WAITING_OVERFLOW 6
/* free up any allocated data at closes */
void ipath_free_data(struct ipath_portdata *dd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment