Commit 403fa186 authored by David S. Miller's avatar David S. Miller

Merge branch 'iucv-next'

Karsten Graul says:

====================
net/iucv: updates 2021-08-09

Please apply the following iucv patches to netdev's net-next tree.

Remove the usage of register asm statements and replace deprecated
CPU-hotplug functions with the current version.
Use use consume_skb() instead of kfree_skb() to avoid flooding
dropwatch with false-positives, and 2 patches with cleanups.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 00335237 8c39ed48
...@@ -1044,7 +1044,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1044,7 +1044,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
if (err == 0) { if (err == 0) {
atomic_dec(&iucv->skbs_in_xmit); atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q); skb_unlink(skb, &iucv->send_skb_q);
kfree_skb(skb); consume_skb(skb);
} }
/* this error should never happen since the */ /* this error should never happen since the */
...@@ -1293,7 +1293,7 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, ...@@ -1293,7 +1293,7 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
} }
} }
kfree_skb(skb); consume_skb(skb);
if (iucv->transport == AF_IUCV_TRANS_HIPER) { if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_recv); atomic_inc(&iucv->msg_recv);
if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
...@@ -1756,7 +1756,7 @@ static void iucv_callback_txdone(struct iucv_path *path, ...@@ -1756,7 +1756,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
spin_unlock_irqrestore(&list->lock, flags); spin_unlock_irqrestore(&list->lock, flags);
if (this) { if (this) {
kfree_skb(this); consume_skb(this);
/* wake up any process waiting for sending */ /* wake up any process waiting for sending */
iucv_sock_wake_msglim(sk); iucv_sock_wake_msglim(sk);
} }
...@@ -1903,17 +1903,17 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) ...@@ -1903,17 +1903,17 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
{ {
struct iucv_sock *iucv = iucv_sk(sk); struct iucv_sock *iucv = iucv_sk(sk);
if (!iucv) if (!iucv || sk->sk_state != IUCV_BOUND) {
goto out; kfree_skb(skb);
if (sk->sk_state != IUCV_BOUND) return NET_RX_SUCCESS;
goto out; }
bh_lock_sock(sk); bh_lock_sock(sk);
iucv->msglimit_peer = iucv_trans_hdr(skb)->window; iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
sk->sk_state = IUCV_CONNECTED; sk->sk_state = IUCV_CONNECTED;
sk->sk_state_change(sk); sk->sk_state_change(sk);
bh_unlock_sock(sk); bh_unlock_sock(sk);
out: consume_skb(skb);
kfree_skb(skb);
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
} }
...@@ -1924,16 +1924,16 @@ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) ...@@ -1924,16 +1924,16 @@ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
{ {
struct iucv_sock *iucv = iucv_sk(sk); struct iucv_sock *iucv = iucv_sk(sk);
if (!iucv) if (!iucv || sk->sk_state != IUCV_BOUND) {
goto out; kfree_skb(skb);
if (sk->sk_state != IUCV_BOUND) return NET_RX_SUCCESS;
goto out; }
bh_lock_sock(sk); bh_lock_sock(sk);
sk->sk_state = IUCV_DISCONN; sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk); sk->sk_state_change(sk);
bh_unlock_sock(sk); bh_unlock_sock(sk);
out: consume_skb(skb);
kfree_skb(skb);
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
} }
...@@ -1945,16 +1945,18 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) ...@@ -1945,16 +1945,18 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
struct iucv_sock *iucv = iucv_sk(sk); struct iucv_sock *iucv = iucv_sk(sk);
/* other end of connection closed */ /* other end of connection closed */
if (!iucv) if (!iucv) {
goto out; kfree_skb(skb);
return NET_RX_SUCCESS;
}
bh_lock_sock(sk); bh_lock_sock(sk);
if (sk->sk_state == IUCV_CONNECTED) { if (sk->sk_state == IUCV_CONNECTED) {
sk->sk_state = IUCV_DISCONN; sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk); sk->sk_state_change(sk);
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
out: consume_skb(skb);
kfree_skb(skb);
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
} }
...@@ -2107,7 +2109,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, ...@@ -2107,7 +2109,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
case (AF_IUCV_FLAG_WIN): case (AF_IUCV_FLAG_WIN):
err = afiucv_hs_callback_win(sk, skb); err = afiucv_hs_callback_win(sk, skb);
if (skb->len == sizeof(struct af_iucv_trans_hdr)) { if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
kfree_skb(skb); consume_skb(skb);
break; break;
} }
fallthrough; /* and receive non-zero length data */ fallthrough; /* and receive non-zero length data */
...@@ -2262,21 +2264,11 @@ static struct packet_type iucv_packet_type = { ...@@ -2262,21 +2264,11 @@ static struct packet_type iucv_packet_type = {
.func = afiucv_hs_rcv, .func = afiucv_hs_rcv,
}; };
static int afiucv_iucv_init(void)
{
return pr_iucv->iucv_register(&af_iucv_handler, 0);
}
static void afiucv_iucv_exit(void)
{
pr_iucv->iucv_unregister(&af_iucv_handler, 0);
}
static int __init afiucv_init(void) static int __init afiucv_init(void)
{ {
int err; int err;
if (MACHINE_IS_VM) { if (MACHINE_IS_VM && IS_ENABLED(CONFIG_IUCV)) {
cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
if (unlikely(err)) { if (unlikely(err)) {
WARN_ON(err); WARN_ON(err);
...@@ -2284,11 +2276,7 @@ static int __init afiucv_init(void) ...@@ -2284,11 +2276,7 @@ static int __init afiucv_init(void)
goto out; goto out;
} }
pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv"); pr_iucv = &iucv_if;
if (!pr_iucv) {
printk(KERN_WARNING "iucv_if lookup failed\n");
memset(&iucv_userid, 0, sizeof(iucv_userid));
}
} else { } else {
memset(&iucv_userid, 0, sizeof(iucv_userid)); memset(&iucv_userid, 0, sizeof(iucv_userid));
pr_iucv = NULL; pr_iucv = NULL;
...@@ -2302,7 +2290,7 @@ static int __init afiucv_init(void) ...@@ -2302,7 +2290,7 @@ static int __init afiucv_init(void)
goto out_proto; goto out_proto;
if (pr_iucv) { if (pr_iucv) {
err = afiucv_iucv_init(); err = pr_iucv->iucv_register(&af_iucv_handler, 0);
if (err) if (err)
goto out_sock; goto out_sock;
} }
...@@ -2316,23 +2304,19 @@ static int __init afiucv_init(void) ...@@ -2316,23 +2304,19 @@ static int __init afiucv_init(void)
out_notifier: out_notifier:
if (pr_iucv) if (pr_iucv)
afiucv_iucv_exit(); pr_iucv->iucv_unregister(&af_iucv_handler, 0);
out_sock: out_sock:
sock_unregister(PF_IUCV); sock_unregister(PF_IUCV);
out_proto: out_proto:
proto_unregister(&iucv_proto); proto_unregister(&iucv_proto);
out: out:
if (pr_iucv)
symbol_put(iucv_if);
return err; return err;
} }
static void __exit afiucv_exit(void) static void __exit afiucv_exit(void)
{ {
if (pr_iucv) { if (pr_iucv)
afiucv_iucv_exit(); pr_iucv->iucv_unregister(&af_iucv_handler, 0);
symbol_put(iucv_if);
}
unregister_netdevice_notifier(&afiucv_netdev_notifier); unregister_netdevice_notifier(&afiucv_netdev_notifier);
dev_remove_pack(&iucv_packet_type); dev_remove_pack(&iucv_packet_type);
......
...@@ -286,19 +286,19 @@ static union iucv_param *iucv_param_irq[NR_CPUS]; ...@@ -286,19 +286,19 @@ static union iucv_param *iucv_param_irq[NR_CPUS];
*/ */
static inline int __iucv_call_b2f0(int command, union iucv_param *parm) static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
{ {
register unsigned long reg0 asm ("0"); int cc;
register unsigned long reg1 asm ("1");
int ccode;
reg0 = command;
reg1 = (unsigned long)parm;
asm volatile( asm volatile(
" .long 0xb2f01000\n" " lgr 0,%[reg0]\n"
" ipm %0\n" " lgr 1,%[reg1]\n"
" srl %0,28\n" " .long 0xb2f01000\n"
: "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) " ipm %[cc]\n"
: "m" (*parm) : "cc"); " srl %[cc],28\n"
return ccode; : [cc] "=&d" (cc), "+m" (*parm)
: [reg0] "d" ((unsigned long)command),
[reg1] "d" ((unsigned long)parm)
: "cc", "0", "1");
return cc;
} }
static inline int iucv_call_b2f0(int command, union iucv_param *parm) static inline int iucv_call_b2f0(int command, union iucv_param *parm)
...@@ -319,19 +319,21 @@ static inline int iucv_call_b2f0(int command, union iucv_param *parm) ...@@ -319,19 +319,21 @@ static inline int iucv_call_b2f0(int command, union iucv_param *parm)
*/ */
static int __iucv_query_maxconn(void *param, unsigned long *max_pathid) static int __iucv_query_maxconn(void *param, unsigned long *max_pathid)
{ {
register unsigned long reg0 asm ("0"); unsigned long reg1 = (unsigned long)param;
register unsigned long reg1 asm ("1"); int cc;
int ccode;
reg0 = IUCV_QUERY;
reg1 = (unsigned long) param;
asm volatile ( asm volatile (
" lghi 0,%[cmd]\n"
" lgr 1,%[reg1]\n"
" .long 0xb2f01000\n" " .long 0xb2f01000\n"
" ipm %0\n" " ipm %[cc]\n"
" srl %0,28\n" " srl %[cc],28\n"
: "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); " lgr %[reg1],1\n"
: [cc] "=&d" (cc), [reg1] "+&d" (reg1)
: [cmd] "K" (IUCV_QUERY)
: "cc", "0", "1");
*max_pathid = reg1; *max_pathid = reg1;
return ccode; return cc;
} }
static int iucv_query_maxconn(void) static int iucv_query_maxconn(void)
...@@ -500,14 +502,14 @@ static void iucv_setmask_mp(void) ...@@ -500,14 +502,14 @@ static void iucv_setmask_mp(void)
{ {
int cpu; int cpu;
get_online_cpus(); cpus_read_lock();
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
/* Enable all cpus with a declared buffer. */ /* Enable all cpus with a declared buffer. */
if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
!cpumask_test_cpu(cpu, &iucv_irq_cpumask)) !cpumask_test_cpu(cpu, &iucv_irq_cpumask))
smp_call_function_single(cpu, iucv_allow_cpu, smp_call_function_single(cpu, iucv_allow_cpu,
NULL, 1); NULL, 1);
put_online_cpus(); cpus_read_unlock();
} }
/** /**
...@@ -540,7 +542,7 @@ static int iucv_enable(void) ...@@ -540,7 +542,7 @@ static int iucv_enable(void)
size_t alloc_size; size_t alloc_size;
int cpu, rc; int cpu, rc;
get_online_cpus(); cpus_read_lock();
rc = -ENOMEM; rc = -ENOMEM;
alloc_size = iucv_max_pathid * sizeof(struct iucv_path); alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
...@@ -553,12 +555,12 @@ static int iucv_enable(void) ...@@ -553,12 +555,12 @@ static int iucv_enable(void)
if (cpumask_empty(&iucv_buffer_cpumask)) if (cpumask_empty(&iucv_buffer_cpumask))
/* No cpu could declare an iucv buffer. */ /* No cpu could declare an iucv buffer. */
goto out; goto out;
put_online_cpus(); cpus_read_unlock();
return 0; return 0;
out: out:
kfree(iucv_path_table); kfree(iucv_path_table);
iucv_path_table = NULL; iucv_path_table = NULL;
put_online_cpus(); cpus_read_unlock();
return rc; return rc;
} }
...@@ -571,11 +573,11 @@ static int iucv_enable(void) ...@@ -571,11 +573,11 @@ static int iucv_enable(void)
*/ */
static void iucv_disable(void) static void iucv_disable(void)
{ {
get_online_cpus(); cpus_read_lock();
on_each_cpu(iucv_retrieve_cpu, NULL, 1); on_each_cpu(iucv_retrieve_cpu, NULL, 1);
kfree(iucv_path_table); kfree(iucv_path_table);
iucv_path_table = NULL; iucv_path_table = NULL;
put_online_cpus(); cpus_read_unlock();
} }
static int iucv_cpu_dead(unsigned int cpu) static int iucv_cpu_dead(unsigned int cpu)
...@@ -784,7 +786,7 @@ static int iucv_reboot_event(struct notifier_block *this, ...@@ -784,7 +786,7 @@ static int iucv_reboot_event(struct notifier_block *this,
if (cpumask_empty(&iucv_irq_cpumask)) if (cpumask_empty(&iucv_irq_cpumask))
return NOTIFY_DONE; return NOTIFY_DONE;
get_online_cpus(); cpus_read_lock();
on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1); on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1);
preempt_disable(); preempt_disable();
for (i = 0; i < iucv_max_pathid; i++) { for (i = 0; i < iucv_max_pathid; i++) {
...@@ -792,7 +794,7 @@ static int iucv_reboot_event(struct notifier_block *this, ...@@ -792,7 +794,7 @@ static int iucv_reboot_event(struct notifier_block *this,
iucv_sever_pathid(i, NULL); iucv_sever_pathid(i, NULL);
} }
preempt_enable(); preempt_enable();
put_online_cpus(); cpus_read_unlock();
iucv_disable(); iucv_disable();
return NOTIFY_DONE; return NOTIFY_DONE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment