Commit 4915a404 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-iucv-updates-2021-01-28'

Julian Wiedmann says:

====================
net/iucv: updates 2021-01-28

This reworks & simplifies the TX notification path in af_iucv, so that we
can send out SG skbs over TRANS_HIPER sockets. Also remove a noisy
WARN_ONCE() in the RX path.
====================

Link: https://lore.kernel.org/r/20210128114108.39409-1-jwi@linux.ibm.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 14a6daf3 2c3b4456
...@@ -1409,10 +1409,12 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, ...@@ -1409,10 +1409,12 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
struct sk_buff *skb; struct sk_buff *skb;
skb_queue_walk(&buf->skb_list, skb) { skb_queue_walk(&buf->skb_list, skb) {
struct sock *sk = skb->sk;
QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
if (skb->sk && skb->sk->sk_family == PF_IUCV) if (sk && sk->sk_family == PF_IUCV)
iucv_sk(skb->sk)->sk_txnotify(skb, notification); iucv_sk(sk)->sk_txnotify(sk, notification);
} }
} }
......
...@@ -128,11 +128,12 @@ struct iucv_sock { ...@@ -128,11 +128,12 @@ struct iucv_sock {
u8 flags; u8 flags;
u16 msglimit; u16 msglimit;
u16 msglimit_peer; u16 msglimit_peer;
atomic_t skbs_in_xmit;
atomic_t msg_sent; atomic_t msg_sent;
atomic_t msg_recv; atomic_t msg_recv;
atomic_t pendings; atomic_t pendings;
int transport; int transport;
void (*sk_txnotify)(struct sk_buff *skb, void (*sk_txnotify)(struct sock *sk,
enum iucv_tx_notify n); enum iucv_tx_notify n);
}; };
......
...@@ -89,7 +89,7 @@ static struct sock *iucv_accept_dequeue(struct sock *parent, ...@@ -89,7 +89,7 @@ static struct sock *iucv_accept_dequeue(struct sock *parent,
static void iucv_sock_kill(struct sock *sk); static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk); static void iucv_sock_close(struct sock *sk);
static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify); static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify);
/* Call Back functions */ /* Call Back functions */
static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
...@@ -182,7 +182,7 @@ static inline int iucv_below_msglim(struct sock *sk) ...@@ -182,7 +182,7 @@ static inline int iucv_below_msglim(struct sock *sk)
if (sk->sk_state != IUCV_CONNECTED) if (sk->sk_state != IUCV_CONNECTED)
return 1; return 1;
if (iucv->transport == AF_IUCV_TRANS_IUCV) if (iucv->transport == AF_IUCV_TRANS_IUCV)
return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim);
else else
return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
(atomic_read(&iucv->pendings) <= 0)); (atomic_read(&iucv->pendings) <= 0));
...@@ -211,7 +211,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, ...@@ -211,7 +211,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
{ {
struct iucv_sock *iucv = iucv_sk(sock); struct iucv_sock *iucv = iucv_sk(sock);
struct af_iucv_trans_hdr *phs_hdr; struct af_iucv_trans_hdr *phs_hdr;
struct sk_buff *nskb;
int err, confirm_recv = 0; int err, confirm_recv = 0;
phs_hdr = skb_push(skb, sizeof(*phs_hdr)); phs_hdr = skb_push(skb, sizeof(*phs_hdr));
...@@ -257,22 +256,16 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, ...@@ -257,22 +256,16 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
err = -EMSGSIZE; err = -EMSGSIZE;
goto err_free; goto err_free;
} }
skb_trim(skb, skb->dev->mtu); err = pskb_trim(skb, skb->dev->mtu);
if (err)
goto err_free;
} }
skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
__skb_header_release(skb); atomic_inc(&iucv->skbs_in_xmit);
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb) {
err = -ENOMEM;
goto err_free;
}
skb_queue_tail(&iucv->send_skb_q, nskb);
err = dev_queue_xmit(skb); err = dev_queue_xmit(skb);
if (net_xmit_eval(err)) { if (net_xmit_eval(err)) {
skb_unlink(nskb, &iucv->send_skb_q); atomic_dec(&iucv->skbs_in_xmit);
kfree_skb(nskb);
} else { } else {
atomic_sub(confirm_recv, &iucv->msg_recv); atomic_sub(confirm_recv, &iucv->msg_recv);
WARN_ON(atomic_read(&iucv->msg_recv) < 0); WARN_ON(atomic_read(&iucv->msg_recv) < 0);
...@@ -424,7 +417,7 @@ static void iucv_sock_close(struct sock *sk) ...@@ -424,7 +417,7 @@ static void iucv_sock_close(struct sock *sk)
sk->sk_state = IUCV_CLOSING; sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk); sk->sk_state_change(sk);
if (!err && !skb_queue_empty(&iucv->send_skb_q)) { if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) {
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
timeo = sk->sk_lingertime; timeo = sk->sk_lingertime;
else else
...@@ -491,6 +484,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, ...@@ -491,6 +484,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio,
atomic_set(&iucv->pendings, 0); atomic_set(&iucv->pendings, 0);
iucv->flags = 0; iucv->flags = 0;
iucv->msglimit = 0; iucv->msglimit = 0;
atomic_set(&iucv->skbs_in_xmit, 0);
atomic_set(&iucv->msg_sent, 0); atomic_set(&iucv->msg_sent, 0);
atomic_set(&iucv->msg_recv, 0); atomic_set(&iucv->msg_recv, 0);
iucv->path = NULL; iucv->path = NULL;
...@@ -1004,7 +998,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1004,7 +998,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
if (iucv->transport == AF_IUCV_TRANS_HIPER) { if (iucv->transport == AF_IUCV_TRANS_HIPER) {
headroom = sizeof(struct af_iucv_trans_hdr) + headroom = sizeof(struct af_iucv_trans_hdr) +
LL_RESERVED_SPACE(iucv->hs_dev); LL_RESERVED_SPACE(iucv->hs_dev);
linear = len; linear = min(len, PAGE_SIZE - headroom);
} else { } else {
if (len < PAGE_SIZE) { if (len < PAGE_SIZE) {
linear = len; linear = len;
...@@ -1055,6 +1049,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1055,6 +1049,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
} }
} else { /* Classic VM IUCV transport */ } else { /* Classic VM IUCV transport */
skb_queue_tail(&iucv->send_skb_q, skb); skb_queue_tail(&iucv->send_skb_q, skb);
atomic_inc(&iucv->skbs_in_xmit);
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
skb->len <= 7) { skb->len <= 7) {
...@@ -1063,6 +1058,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1063,6 +1058,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
/* on success: there is no message_complete callback */ /* on success: there is no message_complete callback */
/* for an IPRMDATA msg; remove skb from send queue */ /* for an IPRMDATA msg; remove skb from send queue */
if (err == 0) { if (err == 0) {
atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q); skb_unlink(skb, &iucv->send_skb_q);
kfree_skb(skb); kfree_skb(skb);
} }
...@@ -1071,6 +1067,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1071,6 +1067,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
/* IUCV_IPRMDATA path flag is set... sever path */ /* IUCV_IPRMDATA path flag is set... sever path */
if (err == 0x15) { if (err == 0x15) {
pr_iucv->path_sever(iucv->path, NULL); pr_iucv->path_sever(iucv->path, NULL);
atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q); skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE; err = -EPIPE;
goto fail; goto fail;
...@@ -1109,6 +1106,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1109,6 +1106,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
} else { } else {
err = -EPIPE; err = -EPIPE;
} }
atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q); skb_unlink(skb, &iucv->send_skb_q);
goto fail; goto fail;
} }
...@@ -1748,10 +1747,14 @@ static void iucv_callback_txdone(struct iucv_path *path, ...@@ -1748,10 +1747,14 @@ static void iucv_callback_txdone(struct iucv_path *path,
{ {
struct sock *sk = path->private; struct sock *sk = path->private;
struct sk_buff *this = NULL; struct sk_buff *this = NULL;
struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; struct sk_buff_head *list;
struct sk_buff *list_skb; struct sk_buff *list_skb;
struct iucv_sock *iucv;
unsigned long flags; unsigned long flags;
iucv = iucv_sk(sk);
list = &iucv->send_skb_q;
bh_lock_sock(sk); bh_lock_sock(sk);
spin_lock_irqsave(&list->lock, flags); spin_lock_irqsave(&list->lock, flags);
...@@ -1761,8 +1764,11 @@ static void iucv_callback_txdone(struct iucv_path *path, ...@@ -1761,8 +1764,11 @@ static void iucv_callback_txdone(struct iucv_path *path,
break; break;
} }
} }
if (this) if (this) {
atomic_dec(&iucv->skbs_in_xmit);
__skb_unlink(this, list); __skb_unlink(this, list);
}
spin_unlock_irqrestore(&list->lock, flags); spin_unlock_irqrestore(&list->lock, flags);
if (this) { if (this) {
...@@ -1772,7 +1778,7 @@ static void iucv_callback_txdone(struct iucv_path *path, ...@@ -1772,7 +1778,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
} }
if (sk->sk_state == IUCV_CLOSING) { if (sk->sk_state == IUCV_CLOSING) {
if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { if (atomic_read(&iucv->skbs_in_xmit) == 0) {
sk->sk_state = IUCV_CLOSED; sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk); sk->sk_state_change(sk);
} }
...@@ -2036,7 +2042,6 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, ...@@ -2036,7 +2042,6 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
char nullstring[8]; char nullstring[8];
if (!pskb_may_pull(skb, sizeof(*trans_hdr))) { if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len);
kfree_skb(skb); kfree_skb(skb);
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
} }
...@@ -2132,73 +2137,40 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, ...@@ -2132,73 +2137,40 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
* afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
* transport * transport
**/ **/
static void afiucv_hs_callback_txnotify(struct sk_buff *skb, static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n)
enum iucv_tx_notify n)
{ {
struct sock *isk = skb->sk; struct iucv_sock *iucv = iucv_sk(sk);
struct sock *sk = NULL;
struct iucv_sock *iucv = NULL;
struct sk_buff_head *list;
struct sk_buff *list_skb;
struct sk_buff *nskb;
unsigned long flags;
read_lock_irqsave(&iucv_sk_list.lock, flags);
sk_for_each(sk, &iucv_sk_list.head)
if (sk == isk) {
iucv = iucv_sk(sk);
break;
}
read_unlock_irqrestore(&iucv_sk_list.lock, flags);
if (!iucv || sock_flag(sk, SOCK_ZAPPED)) if (sock_flag(sk, SOCK_ZAPPED))
return; return;
list = &iucv->send_skb_q; switch (n) {
spin_lock_irqsave(&list->lock, flags); case TX_NOTIFY_OK:
skb_queue_walk_safe(list, list_skb, nskb) { atomic_dec(&iucv->skbs_in_xmit);
if (skb_shinfo(list_skb) == skb_shinfo(skb)) { iucv_sock_wake_msglim(sk);
switch (n) { break;
case TX_NOTIFY_OK: case TX_NOTIFY_PENDING:
__skb_unlink(list_skb, list); atomic_inc(&iucv->pendings);
kfree_skb(list_skb); break;
iucv_sock_wake_msglim(sk); case TX_NOTIFY_DELAYED_OK:
break; atomic_dec(&iucv->skbs_in_xmit);
case TX_NOTIFY_PENDING: if (atomic_dec_return(&iucv->pendings) <= 0)
atomic_inc(&iucv->pendings); iucv_sock_wake_msglim(sk);
break; break;
case TX_NOTIFY_DELAYED_OK: default:
__skb_unlink(list_skb, list); atomic_dec(&iucv->skbs_in_xmit);
atomic_dec(&iucv->pendings); if (sk->sk_state == IUCV_CONNECTED) {
if (atomic_read(&iucv->pendings) <= 0) sk->sk_state = IUCV_DISCONN;
iucv_sock_wake_msglim(sk); sk->sk_state_change(sk);
kfree_skb(list_skb);
break;
case TX_NOTIFY_UNREACHABLE:
case TX_NOTIFY_DELAYED_UNREACHABLE:
case TX_NOTIFY_TPQFULL: /* not yet used */
case TX_NOTIFY_GENERALERROR:
case TX_NOTIFY_DELAYED_GENERALERROR:
__skb_unlink(list_skb, list);
kfree_skb(list_skb);
if (sk->sk_state == IUCV_CONNECTED) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
break;
}
break;
} }
} }
spin_unlock_irqrestore(&list->lock, flags);
if (sk->sk_state == IUCV_CLOSING) { if (sk->sk_state == IUCV_CLOSING) {
if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { if (atomic_read(&iucv->skbs_in_xmit) == 0) {
sk->sk_state = IUCV_CLOSED; sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk); sk->sk_state_change(sk);
} }
} }
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment