Commit 3dd9c8b5 authored by David Howells's avatar David Howells

rxrpc: Remove the _bh annotation from all the spinlocks

None of the spinlocks in rxrpc need a _bh annotation now as the RCU
callback routines no longer take spinlocks and the bulk of the packet
wrangling code is now run in the I/O thread, not softirq context.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
parent 5e6ef4f1
...@@ -359,9 +359,9 @@ void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) ...@@ -359,9 +359,9 @@ void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
/* Make sure we're not going to call back into a kernel service */ /* Make sure we're not going to call back into a kernel service */
if (call->notify_rx) { if (call->notify_rx) {
spin_lock_bh(&call->notify_lock); spin_lock(&call->notify_lock);
call->notify_rx = rxrpc_dummy_notify_rx; call->notify_rx = rxrpc_dummy_notify_rx;
spin_unlock_bh(&call->notify_lock); spin_unlock(&call->notify_lock);
} }
mutex_unlock(&call->user_mutex); mutex_unlock(&call->user_mutex);
......
...@@ -138,9 +138,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, ...@@ -138,9 +138,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock); write_unlock(&rx->call_lock);
rxnet = call->rxnet; rxnet = call->rxnet;
spin_lock_bh(&rxnet->call_lock); spin_lock(&rxnet->call_lock);
list_add_tail_rcu(&call->link, &rxnet->calls); list_add_tail_rcu(&call->link, &rxnet->calls);
spin_unlock_bh(&rxnet->call_lock); spin_unlock(&rxnet->call_lock);
b->call_backlog[call_head] = call; b->call_backlog[call_head] = call;
smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
...@@ -188,8 +188,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) ...@@ -188,8 +188,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
/* Make sure that there aren't any incoming calls in progress before we /* Make sure that there aren't any incoming calls in progress before we
* clear the preallocation buffers. * clear the preallocation buffers.
*/ */
spin_lock_bh(&rx->incoming_lock); spin_lock(&rx->incoming_lock);
spin_unlock_bh(&rx->incoming_lock); spin_unlock(&rx->incoming_lock);
head = b->peer_backlog_head; head = b->peer_backlog_head;
tail = b->peer_backlog_tail; tail = b->peer_backlog_tail;
......
...@@ -101,9 +101,9 @@ void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason, ...@@ -101,9 +101,9 @@ void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
return; return;
} }
spin_lock_bh(&local->ack_tx_lock); spin_lock(&local->ack_tx_lock);
list_add_tail(&txb->tx_link, &local->ack_tx_queue); list_add_tail(&txb->tx_link, &local->ack_tx_queue);
spin_unlock_bh(&local->ack_tx_lock); spin_unlock(&local->ack_tx_lock);
trace_rxrpc_send_ack(call, why, ack_reason, serial); trace_rxrpc_send_ack(call, why, ack_reason, serial);
rxrpc_wake_up_io_thread(local); rxrpc_wake_up_io_thread(local);
......
...@@ -354,9 +354,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, ...@@ -354,9 +354,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock); write_unlock(&rx->call_lock);
rxnet = call->rxnet; rxnet = call->rxnet;
spin_lock_bh(&rxnet->call_lock); spin_lock(&rxnet->call_lock);
list_add_tail_rcu(&call->link, &rxnet->calls); list_add_tail_rcu(&call->link, &rxnet->calls);
spin_unlock_bh(&rxnet->call_lock); spin_unlock(&rxnet->call_lock);
/* From this point on, the call is protected by its own lock. */ /* From this point on, the call is protected by its own lock. */
release_sock(&rx->sk); release_sock(&rx->sk);
...@@ -537,7 +537,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) ...@@ -537,7 +537,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
del_timer_sync(&call->timer); del_timer_sync(&call->timer);
/* Make sure we don't get any more notifications */ /* Make sure we don't get any more notifications */
write_lock_bh(&rx->recvmsg_lock); write_lock(&rx->recvmsg_lock);
if (!list_empty(&call->recvmsg_link)) { if (!list_empty(&call->recvmsg_link)) {
_debug("unlinking once-pending call %p { e=%lx f=%lx }", _debug("unlinking once-pending call %p { e=%lx f=%lx }",
...@@ -550,7 +550,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) ...@@ -550,7 +550,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
call->recvmsg_link.next = NULL; call->recvmsg_link.next = NULL;
call->recvmsg_link.prev = NULL; call->recvmsg_link.prev = NULL;
write_unlock_bh(&rx->recvmsg_lock); write_unlock(&rx->recvmsg_lock);
if (put) if (put)
rxrpc_put_call(call, rxrpc_call_put_unnotify); rxrpc_put_call(call, rxrpc_call_put_unnotify);
...@@ -622,9 +622,9 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why) ...@@ -622,9 +622,9 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
if (!list_empty(&call->link)) { if (!list_empty(&call->link)) {
spin_lock_bh(&rxnet->call_lock); spin_lock(&rxnet->call_lock);
list_del_init(&call->link); list_del_init(&call->link);
spin_unlock_bh(&rxnet->call_lock); spin_unlock(&rxnet->call_lock);
} }
rxrpc_cleanup_call(call); rxrpc_cleanup_call(call);
...@@ -706,7 +706,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet) ...@@ -706,7 +706,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
_enter(""); _enter("");
if (!list_empty(&rxnet->calls)) { if (!list_empty(&rxnet->calls)) {
spin_lock_bh(&rxnet->call_lock); spin_lock(&rxnet->call_lock);
while (!list_empty(&rxnet->calls)) { while (!list_empty(&rxnet->calls)) {
call = list_entry(rxnet->calls.next, call = list_entry(rxnet->calls.next,
...@@ -721,12 +721,12 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet) ...@@ -721,12 +721,12 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
rxrpc_call_states[call->state], rxrpc_call_states[call->state],
call->flags, call->events); call->flags, call->events);
spin_unlock_bh(&rxnet->call_lock); spin_unlock(&rxnet->call_lock);
cond_resched(); cond_resched();
spin_lock_bh(&rxnet->call_lock); spin_lock(&rxnet->call_lock);
} }
spin_unlock_bh(&rxnet->call_lock); spin_unlock(&rxnet->call_lock);
} }
atomic_dec(&rxnet->nr_calls); atomic_dec(&rxnet->nr_calls);
......
...@@ -557,9 +557,9 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, ...@@ -557,9 +557,9 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
trace_rxrpc_connect_call(call); trace_rxrpc_connect_call(call);
write_lock_bh(&call->state_lock); write_lock(&call->state_lock);
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
write_unlock_bh(&call->state_lock); write_unlock(&call->state_lock);
/* Paired with the read barrier in rxrpc_connect_call(). This orders /* Paired with the read barrier in rxrpc_connect_call(). This orders
* cid and epoch in the connection wrt to call_id without the need to * cid and epoch in the connection wrt to call_id without the need to
......
...@@ -198,9 +198,9 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, ...@@ -198,9 +198,9 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
_enter("%d,,%u,%u", conn->debug_id, error, abort_code); _enter("%d,,%u,%u", conn->debug_id, error, abort_code);
/* generate a connection-level abort */ /* generate a connection-level abort */
spin_lock_bh(&conn->state_lock); spin_lock(&conn->state_lock);
if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) { if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
spin_unlock_bh(&conn->state_lock); spin_unlock(&conn->state_lock);
_leave(" = 0 [already dead]"); _leave(" = 0 [already dead]");
return 0; return 0;
} }
...@@ -209,7 +209,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, ...@@ -209,7 +209,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
conn->abort_code = abort_code; conn->abort_code = abort_code;
conn->state = RXRPC_CONN_LOCALLY_ABORTED; conn->state = RXRPC_CONN_LOCALLY_ABORTED;
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
spin_unlock_bh(&conn->state_lock); spin_unlock(&conn->state_lock);
msg.msg_name = &conn->peer->srx.transport; msg.msg_name = &conn->peer->srx.transport;
msg.msg_namelen = conn->peer->srx.transport_len; msg.msg_namelen = conn->peer->srx.transport_len;
...@@ -265,12 +265,12 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call) ...@@ -265,12 +265,12 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call)
{ {
_enter("%p", call); _enter("%p", call);
if (call) { if (call) {
write_lock_bh(&call->state_lock); write_lock(&call->state_lock);
if (call->state == RXRPC_CALL_SERVER_SECURING) { if (call->state == RXRPC_CALL_SERVER_SECURING) {
call->state = RXRPC_CALL_SERVER_RECV_REQUEST; call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
rxrpc_notify_socket(call); rxrpc_notify_socket(call);
} }
write_unlock_bh(&call->state_lock); write_unlock(&call->state_lock);
} }
} }
...@@ -325,18 +325,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, ...@@ -325,18 +325,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return ret; return ret;
spin_lock(&conn->bundle->channel_lock); spin_lock(&conn->bundle->channel_lock);
spin_lock_bh(&conn->state_lock); spin_lock(&conn->state_lock);
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
conn->state = RXRPC_CONN_SERVICE; conn->state = RXRPC_CONN_SERVICE;
spin_unlock_bh(&conn->state_lock); spin_unlock(&conn->state_lock);
for (loop = 0; loop < RXRPC_MAXCALLS; loop++) for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
rxrpc_call_is_secure( rxrpc_call_is_secure(
rcu_dereference_protected( rcu_dereference_protected(
conn->channels[loop].call, conn->channels[loop].call,
lockdep_is_held(&conn->bundle->channel_lock))); lockdep_is_held(&conn->bundle->channel_lock)));
} else { } else {
spin_unlock_bh(&conn->state_lock); spin_unlock(&conn->state_lock);
} }
spin_unlock(&conn->bundle->channel_lock); spin_unlock(&conn->bundle->channel_lock);
......
...@@ -73,7 +73,7 @@ static void rxrpc_publish_service_conn(struct rxrpc_peer *peer, ...@@ -73,7 +73,7 @@ static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
struct rxrpc_conn_proto k = conn->proto; struct rxrpc_conn_proto k = conn->proto;
struct rb_node **pp, *parent; struct rb_node **pp, *parent;
write_seqlock_bh(&peer->service_conn_lock); write_seqlock(&peer->service_conn_lock);
pp = &peer->service_conns.rb_node; pp = &peer->service_conns.rb_node;
parent = NULL; parent = NULL;
...@@ -94,14 +94,14 @@ static void rxrpc_publish_service_conn(struct rxrpc_peer *peer, ...@@ -94,14 +94,14 @@ static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
rb_insert_color(&conn->service_node, &peer->service_conns); rb_insert_color(&conn->service_node, &peer->service_conns);
conn_published: conn_published:
set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags); set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
write_sequnlock_bh(&peer->service_conn_lock); write_sequnlock(&peer->service_conn_lock);
_leave(" = %d [new]", conn->debug_id); _leave(" = %d [new]", conn->debug_id);
return; return;
found_extant_conn: found_extant_conn:
if (refcount_read(&cursor->ref) == 0) if (refcount_read(&cursor->ref) == 0)
goto replace_old_connection; goto replace_old_connection;
write_sequnlock_bh(&peer->service_conn_lock); write_sequnlock(&peer->service_conn_lock);
/* We should not be able to get here. rxrpc_incoming_connection() is /* We should not be able to get here. rxrpc_incoming_connection() is
* called in a non-reentrant context, so there can't be a race to * called in a non-reentrant context, so there can't be a race to
* insert a new connection. * insert a new connection.
...@@ -195,8 +195,8 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn) ...@@ -195,8 +195,8 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
{ {
struct rxrpc_peer *peer = conn->peer; struct rxrpc_peer *peer = conn->peer;
write_seqlock_bh(&peer->service_conn_lock); write_seqlock(&peer->service_conn_lock);
if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags)) if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
rb_erase(&conn->service_node, &peer->service_conns); rb_erase(&conn->service_node, &peer->service_conns);
write_sequnlock_bh(&peer->service_conn_lock); write_sequnlock(&peer->service_conn_lock);
} }
...@@ -669,10 +669,10 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -669,10 +669,10 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
peer = call->peer; peer = call->peer;
if (mtu < peer->maxdata) { if (mtu < peer->maxdata) {
spin_lock_bh(&peer->lock); spin_lock(&peer->lock);
peer->maxdata = mtu; peer->maxdata = mtu;
peer->mtu = mtu + peer->hdrsize; peer->mtu = mtu + peer->hdrsize;
spin_unlock_bh(&peer->lock); spin_unlock(&peer->lock);
} }
if (wake) if (wake)
......
...@@ -286,9 +286,9 @@ void rxrpc_transmit_ack_packets(struct rxrpc_local *local) ...@@ -286,9 +286,9 @@ void rxrpc_transmit_ack_packets(struct rxrpc_local *local)
if (list_empty(&local->ack_tx_queue)) if (list_empty(&local->ack_tx_queue))
return; return;
spin_lock_bh(&local->ack_tx_lock); spin_lock(&local->ack_tx_lock);
list_splice_tail_init(&local->ack_tx_queue, &queue); list_splice_tail_init(&local->ack_tx_queue, &queue);
spin_unlock_bh(&local->ack_tx_lock); spin_unlock(&local->ack_tx_lock);
while (!list_empty(&queue)) { while (!list_empty(&queue)) {
struct rxrpc_txbuf *txb = struct rxrpc_txbuf *txb =
...@@ -296,9 +296,9 @@ void rxrpc_transmit_ack_packets(struct rxrpc_local *local) ...@@ -296,9 +296,9 @@ void rxrpc_transmit_ack_packets(struct rxrpc_local *local)
ret = rxrpc_send_ack_packet(local, txb); ret = rxrpc_send_ack_packet(local, txb);
if (ret < 0 && ret != -ECONNRESET) { if (ret < 0 && ret != -ECONNRESET) {
spin_lock_bh(&local->ack_tx_lock); spin_lock(&local->ack_tx_lock);
list_splice_init(&queue, &local->ack_tx_queue); list_splice_init(&queue, &local->ack_tx_queue);
spin_unlock_bh(&local->ack_tx_lock); spin_unlock(&local->ack_tx_lock);
break; break;
} }
......
...@@ -121,10 +121,10 @@ static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu) ...@@ -121,10 +121,10 @@ static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
} }
if (mtu < peer->mtu) { if (mtu < peer->mtu) {
spin_lock_bh(&peer->lock); spin_lock(&peer->lock);
peer->mtu = mtu; peer->mtu = mtu;
peer->maxdata = peer->mtu - peer->hdrsize; peer->maxdata = peer->mtu - peer->hdrsize;
spin_unlock_bh(&peer->lock); spin_unlock(&peer->lock);
} }
} }
...@@ -237,7 +237,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, ...@@ -237,7 +237,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
time64_t keepalive_at; time64_t keepalive_at;
int slot; int slot;
spin_lock_bh(&rxnet->peer_hash_lock); spin_lock(&rxnet->peer_hash_lock);
while (!list_empty(collector)) { while (!list_empty(collector)) {
peer = list_entry(collector->next, peer = list_entry(collector->next,
...@@ -248,7 +248,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, ...@@ -248,7 +248,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
continue; continue;
if (__rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive)) { if (__rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive)) {
spin_unlock_bh(&rxnet->peer_hash_lock); spin_unlock(&rxnet->peer_hash_lock);
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME; keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
slot = keepalive_at - base; slot = keepalive_at - base;
...@@ -267,7 +267,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, ...@@ -267,7 +267,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
*/ */
slot += cursor; slot += cursor;
slot &= mask; slot &= mask;
spin_lock_bh(&rxnet->peer_hash_lock); spin_lock(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link, list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]); &rxnet->peer_keepalive[slot & mask]);
rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive); rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
...@@ -275,7 +275,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, ...@@ -275,7 +275,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
rxrpc_put_peer_locked(peer, rxrpc_peer_put_keepalive); rxrpc_put_peer_locked(peer, rxrpc_peer_put_keepalive);
} }
spin_unlock_bh(&rxnet->peer_hash_lock); spin_unlock(&rxnet->peer_hash_lock);
} }
/* /*
...@@ -305,7 +305,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work) ...@@ -305,7 +305,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
* second; the bucket at cursor + 1 goes at now + 1s and so * second; the bucket at cursor + 1 goes at now + 1s and so
* on... * on...
*/ */
spin_lock_bh(&rxnet->peer_hash_lock); spin_lock(&rxnet->peer_hash_lock);
list_splice_init(&rxnet->peer_keepalive_new, &collector); list_splice_init(&rxnet->peer_keepalive_new, &collector);
stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive); stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
...@@ -317,7 +317,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work) ...@@ -317,7 +317,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
} }
base = now; base = now;
spin_unlock_bh(&rxnet->peer_hash_lock); spin_unlock(&rxnet->peer_hash_lock);
rxnet->peer_keepalive_base = base; rxnet->peer_keepalive_base = base;
rxnet->peer_keepalive_cursor = cursor; rxnet->peer_keepalive_cursor = cursor;
......
...@@ -349,7 +349,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, ...@@ -349,7 +349,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
return NULL; return NULL;
} }
spin_lock_bh(&rxnet->peer_hash_lock); spin_lock(&rxnet->peer_hash_lock);
/* Need to check that we aren't racing with someone else */ /* Need to check that we aren't racing with someone else */
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
...@@ -362,7 +362,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, ...@@ -362,7 +362,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
&rxnet->peer_keepalive_new); &rxnet->peer_keepalive_new);
} }
spin_unlock_bh(&rxnet->peer_hash_lock); spin_unlock(&rxnet->peer_hash_lock);
if (peer) if (peer)
rxrpc_free_peer(candidate); rxrpc_free_peer(candidate);
...@@ -412,10 +412,10 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer) ...@@ -412,10 +412,10 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
ASSERT(hlist_empty(&peer->error_targets)); ASSERT(hlist_empty(&peer->error_targets));
spin_lock_bh(&rxnet->peer_hash_lock); spin_lock(&rxnet->peer_hash_lock);
hash_del_rcu(&peer->hash_link); hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link); list_del_init(&peer->keepalive_link);
spin_unlock_bh(&rxnet->peer_hash_lock); spin_unlock(&rxnet->peer_hash_lock);
rxrpc_free_peer(peer); rxrpc_free_peer(peer);
} }
......
...@@ -36,16 +36,16 @@ void rxrpc_notify_socket(struct rxrpc_call *call) ...@@ -36,16 +36,16 @@ void rxrpc_notify_socket(struct rxrpc_call *call)
sk = &rx->sk; sk = &rx->sk;
if (rx && sk->sk_state < RXRPC_CLOSE) { if (rx && sk->sk_state < RXRPC_CLOSE) {
if (call->notify_rx) { if (call->notify_rx) {
spin_lock_bh(&call->notify_lock); spin_lock(&call->notify_lock);
call->notify_rx(sk, call, call->user_call_ID); call->notify_rx(sk, call, call->user_call_ID);
spin_unlock_bh(&call->notify_lock); spin_unlock(&call->notify_lock);
} else { } else {
write_lock_bh(&rx->recvmsg_lock); write_lock(&rx->recvmsg_lock);
if (list_empty(&call->recvmsg_link)) { if (list_empty(&call->recvmsg_link)) {
rxrpc_get_call(call, rxrpc_call_get_notify_socket); rxrpc_get_call(call, rxrpc_call_get_notify_socket);
list_add_tail(&call->recvmsg_link, &rx->recvmsg_q); list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
} }
write_unlock_bh(&rx->recvmsg_lock); write_unlock(&rx->recvmsg_lock);
if (!sock_flag(sk, SOCK_DEAD)) { if (!sock_flag(sk, SOCK_DEAD)) {
_debug("call %ps", sk->sk_data_ready); _debug("call %ps", sk->sk_data_ready);
...@@ -87,9 +87,9 @@ bool rxrpc_set_call_completion(struct rxrpc_call *call, ...@@ -87,9 +87,9 @@ bool rxrpc_set_call_completion(struct rxrpc_call *call,
bool ret = false; bool ret = false;
if (call->state < RXRPC_CALL_COMPLETE) { if (call->state < RXRPC_CALL_COMPLETE) {
write_lock_bh(&call->state_lock); write_lock(&call->state_lock);
ret = __rxrpc_set_call_completion(call, compl, abort_code, error); ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
write_unlock_bh(&call->state_lock); write_unlock(&call->state_lock);
} }
return ret; return ret;
} }
...@@ -107,9 +107,9 @@ bool rxrpc_call_completed(struct rxrpc_call *call) ...@@ -107,9 +107,9 @@ bool rxrpc_call_completed(struct rxrpc_call *call)
bool ret = false; bool ret = false;
if (call->state < RXRPC_CALL_COMPLETE) { if (call->state < RXRPC_CALL_COMPLETE) {
write_lock_bh(&call->state_lock); write_lock(&call->state_lock);
ret = __rxrpc_call_completed(call); ret = __rxrpc_call_completed(call);
write_unlock_bh(&call->state_lock); write_unlock(&call->state_lock);
} }
return ret; return ret;
} }
...@@ -131,9 +131,9 @@ bool rxrpc_abort_call(const char *why, struct rxrpc_call *call, ...@@ -131,9 +131,9 @@ bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
{ {
bool ret; bool ret;
write_lock_bh(&call->state_lock); write_lock(&call->state_lock);
ret = __rxrpc_abort_call(why, call, seq, abort_code, error); ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
write_unlock_bh(&call->state_lock); write_unlock(&call->state_lock);
return ret; return ret;
} }
...@@ -193,23 +193,23 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) ...@@ -193,23 +193,23 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY)
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack); rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
write_lock_bh(&call->state_lock); write_lock(&call->state_lock);
switch (call->state) { switch (call->state) {
case RXRPC_CALL_CLIENT_RECV_REPLY: case RXRPC_CALL_CLIENT_RECV_REPLY:
__rxrpc_call_completed(call); __rxrpc_call_completed(call);
write_unlock_bh(&call->state_lock); write_unlock(&call->state_lock);
break; break;
case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_RECV_REQUEST:
call->state = RXRPC_CALL_SERVER_ACK_REQUEST; call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
write_unlock_bh(&call->state_lock); write_unlock(&call->state_lock);
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_delay_ACK(call, serial,
rxrpc_propose_ack_processing_op); rxrpc_propose_ack_processing_op);
break; break;
default: default:
write_unlock_bh(&call->state_lock); write_unlock(&call->state_lock);
break; break;
} }
} }
...@@ -442,14 +442,14 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ...@@ -442,14 +442,14 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
/* Find the next call and dequeue it if we're not just peeking. If we /* Find the next call and dequeue it if we're not just peeking. If we
* do dequeue it, that comes with a ref that we will need to release. * do dequeue it, that comes with a ref that we will need to release.
*/ */
write_lock_bh(&rx->recvmsg_lock); write_lock(&rx->recvmsg_lock);
l = rx->recvmsg_q.next; l = rx->recvmsg_q.next;
call = list_entry(l, struct rxrpc_call, recvmsg_link); call = list_entry(l, struct rxrpc_call, recvmsg_link);
if (!(flags & MSG_PEEK)) if (!(flags & MSG_PEEK))
list_del_init(&call->recvmsg_link); list_del_init(&call->recvmsg_link);
else else
rxrpc_get_call(call, rxrpc_call_get_recvmsg); rxrpc_get_call(call, rxrpc_call_get_recvmsg);
write_unlock_bh(&rx->recvmsg_lock); write_unlock(&rx->recvmsg_lock);
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0);
...@@ -538,9 +538,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ...@@ -538,9 +538,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
error_requeue_call: error_requeue_call:
if (!(flags & MSG_PEEK)) { if (!(flags & MSG_PEEK)) {
write_lock_bh(&rx->recvmsg_lock); write_lock(&rx->recvmsg_lock);
list_add(&call->recvmsg_link, &rx->recvmsg_q); list_add(&call->recvmsg_link, &rx->recvmsg_q);
write_unlock_bh(&rx->recvmsg_lock); write_unlock(&rx->recvmsg_lock);
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0);
} else { } else {
rxrpc_put_call(call, rxrpc_call_put_recvmsg); rxrpc_put_call(call, rxrpc_call_put_recvmsg);
......
...@@ -195,7 +195,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, ...@@ -195,7 +195,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
_debug("________awaiting reply/ACK__________"); _debug("________awaiting reply/ACK__________");
write_lock_bh(&call->state_lock); write_lock(&call->state_lock);
switch (call->state) { switch (call->state) {
case RXRPC_CALL_CLIENT_SEND_REQUEST: case RXRPC_CALL_CLIENT_SEND_REQUEST:
call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
...@@ -218,7 +218,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, ...@@ -218,7 +218,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
default: default:
break; break;
} }
write_unlock_bh(&call->state_lock); write_unlock(&call->state_lock);
} }
if (poke) if (poke)
...@@ -357,10 +357,10 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, ...@@ -357,10 +357,10 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
success: success:
ret = copied; ret = copied;
if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) { if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) {
read_lock_bh(&call->state_lock); read_lock(&call->state_lock);
if (call->error < 0) if (call->error < 0)
ret = call->error; ret = call->error;
read_unlock_bh(&call->state_lock); read_unlock(&call->state_lock);
} }
out: out:
call->tx_pending = txb; call->tx_pending = txb;
...@@ -725,9 +725,9 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, ...@@ -725,9 +725,9 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
notify_end_tx, &dropped_lock); notify_end_tx, &dropped_lock);
break; break;
case RXRPC_CALL_COMPLETE: case RXRPC_CALL_COMPLETE:
read_lock_bh(&call->state_lock); read_lock(&call->state_lock);
ret = call->error; ret = call->error;
read_unlock_bh(&call->state_lock); read_unlock(&call->state_lock);
break; break;
default: default:
/* Request phase complete for this client call */ /* Request phase complete for this client call */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment