Commit 2fbafb82 authored by David S. Miller's avatar David S. Miller

Merge branch 'static-inlines'

Jakub Kicinski says:

====================
net: get rid of unused static inlines

I noticed a couple of unused static inline functions reviewing
net/sched patches so I run a grep thru all of include/ and net/
to catch other cases. This set removes the cases which look like
obvious dead code.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d9f393f4 5e4eca5d
......@@ -456,7 +456,7 @@ static const struct nfc_vendor_cmd st_nci_vendor_cmds[] = {
int st_nci_vendor_cmds_init(struct nci_dev *ndev)
{
return nfc_set_vendor_cmds(ndev->nfc_dev, st_nci_vendor_cmds,
return nci_set_vendor_cmds(ndev, st_nci_vendor_cmds,
sizeof(st_nci_vendor_cmds));
}
EXPORT_SYMBOL(st_nci_vendor_cmds_init);
......@@ -358,7 +358,7 @@ int st21nfca_vendor_cmds_init(struct nfc_hci_dev *hdev)
struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
init_completion(&info->vendor_info.req_completion);
return nfc_set_vendor_cmds(hdev->ndev, st21nfca_vendor_cmds,
sizeof(st21nfca_vendor_cmds));
return nfc_hci_set_vendor_cmds(hdev, st21nfca_vendor_cmds,
sizeof(st21nfca_vendor_cmds));
}
EXPORT_SYMBOL(st21nfca_vendor_cmds_init);
......@@ -371,19 +371,12 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
return NULL;
}
static inline struct inet6_request_sock *
inet6_rsk(const struct request_sock *rsk)
{
return NULL;
}
static inline struct raw6_sock *raw6_sk(const struct sock *sk)
{
return NULL;
}
#define inet6_rcv_saddr(__sk) NULL
#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _IPV6_H */
......@@ -66,11 +66,6 @@ static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
linkmode_clear_bit(nr, addr);
}
static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
{
__change_bit(nr, addr);
}
static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
{
return test_bit(nr, addr);
......
......@@ -387,23 +387,6 @@ mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa)
speed_duplex == LPA_SGMII_10FULL);
}
/**
* mii_lpa_to_linkmode_adv_sgmii
* @advertising: pointer to destination link mode.
* @lpa: value of the MII_LPA register
*
* A small helper function that translates MII_ADVERTISE bits
* to linkmode advertisement settings when in SGMII mode.
* Clears the old value of advertising.
*/
static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising,
u32 lpa)
{
linkmode_zero(lp_advertising);
mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa);
}
/**
* mii_adv_mod_linkmode_adv_t
* @advertising:pointer to destination link mode.
......
......@@ -135,15 +135,6 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
extack->cookie_len = sizeof(cookie);
}
static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack,
u32 cookie)
{
if (!extack)
return;
memcpy(extack->cookie, &cookie, sizeof(cookie));
extack->cookie_len = sizeof(cookie);
}
void netlink_kernel_release(struct sock *sk);
int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
int netlink_change_ngroups(struct sock *sk, unsigned int groups);
......
......@@ -3899,11 +3899,6 @@ static inline ktime_t net_timedelta(ktime_t t)
return ktime_sub(ktime_get_real(), t);
}
static inline ktime_t net_invalid_timestamp(void)
{
return 0;
}
static inline u8 skb_metadata_len(const struct sk_buff *skb)
{
return skb_shinfo(skb)->meta_len;
......
......@@ -23,11 +23,6 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
return (struct udphdr *)skb_transport_header(skb);
}
static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb)
{
return (struct udphdr *)skb_inner_transport_header(skb);
}
#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256)
static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
......
......@@ -187,18 +187,12 @@ typedef struct {
typedef struct ax25_route {
struct ax25_route *next;
refcount_t refcount;
ax25_address callsign;
struct net_device *dev;
ax25_digi *digipeat;
char ip_mode;
} ax25_route;
static inline void ax25_hold_route(ax25_route *ax25_rt)
{
refcount_inc(&ax25_rt->refcount);
}
void __ax25_put_route(ax25_route *ax25_rt);
extern rwlock_t ax25_route_lock;
......@@ -213,12 +207,6 @@ static inline void ax25_route_lock_unuse(void)
read_unlock(&ax25_route_lock);
}
static inline void ax25_put_route(ax25_route *ax25_rt)
{
if (refcount_dec_and_test(&ax25_rt->refcount))
__ax25_put_route(ax25_rt);
}
typedef struct {
char slave; /* slave_mode? */
struct timer_list slave_timer; /* timeout timer */
......
......@@ -698,20 +698,6 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
return NULL;
}
/* Caller must hold rcu_read_lock() for read */
static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
const u8 *mac)
{
struct list_head *iter;
struct slave *tmp;
bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return tmp;
return NULL;
}
/* Caller must hold rcu_read_lock() for read */
static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
{
......
......@@ -63,12 +63,6 @@ static inline psched_time_t psched_get_time(void)
return PSCHED_NS2TICKS(ktime_get_ns());
}
static inline psched_tdiff_t
psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
{
return min(tv1 - tv2, bound);
}
struct qdisc_watchdog {
u64 last_expires;
struct hrtimer timer;
......
......@@ -518,11 +518,6 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
BUILD_BUG_ON(sizeof(qcb->data) < sz);
}
static inline int qdisc_qlen_cpu(const struct Qdisc *q)
{
return this_cpu_ptr(q->cpu_qstats)->qlen;
}
static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
......
......@@ -70,49 +70,6 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
return 0;
}
/* Slow-path computation of checksum. Socket is locked. */
static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
{
const struct udp_sock *up = udp_sk(skb->sk);
int cscov = up->len;
__wsum csum = 0;
if (up->pcflag & UDPLITE_SEND_CC) {
/*
* Sender has set `partial coverage' option on UDP-Lite socket.
* The special case "up->pcslen == 0" signifies full coverage.
*/
if (up->pcslen < up->len) {
if (0 < up->pcslen)
cscov = up->pcslen;
udp_hdr(skb)->len = htons(up->pcslen);
}
/*
* NOTE: Causes for the error case `up->pcslen > up->len':
* (i) Application error (will not be penalized).
* (ii) Payload too big for send buffer: data is split
* into several packets, each with its own header.
* In this case (e.g. last segment), coverage may
* exceed packet length.
* Since packets with coverage length > packet length are
* illegal, we fall back to the defaults here.
*/
}
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
skb_queue_walk(&sk->sk_write_queue, skb) {
const int off = skb_transport_offset(skb);
const int len = skb->len - off;
csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum);
if ((cscov -= len) <= 0)
break;
}
return csum;
}
/* Fast-path computation of checksum. Socket may not be locked. */
static inline __wsum udplite_csum(struct sk_buff *skb)
{
......
......@@ -111,7 +111,6 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
return -ENOMEM;
}
refcount_set(&ax25_rt->refcount, 1);
ax25_rt->callsign = route->dest_addr;
ax25_rt->dev = ax25_dev->dev;
ax25_rt->digipeat = NULL;
......@@ -160,12 +159,12 @@ static int ax25_rt_del(struct ax25_routes_struct *route)
ax25cmp(&route->dest_addr, &s->callsign) == 0) {
if (ax25_route_list == s) {
ax25_route_list = s->next;
ax25_put_route(s);
__ax25_put_route(s);
} else {
for (t = ax25_route_list; t != NULL; t = t->next) {
if (t->next == s) {
t->next = s->next;
ax25_put_route(s);
__ax25_put_route(s);
break;
}
}
......
......@@ -136,11 +136,6 @@ static inline int between48(const u64 seq1, const u64 seq2, const u64 seq3)
return (seq3 << 16) - (seq2 << 16) >= (seq1 << 16) - (seq2 << 16);
}
static inline u64 max48(const u64 seq1, const u64 seq2)
{
return after48(seq1, seq2) ? seq1 : seq2;
}
/**
* dccp_loss_count - Approximate the number of lost data packets in a burst loss
* @s1: last known sequence number before the loss ('hole')
......
......@@ -259,11 +259,6 @@ static inline u16 prp_get_skb_sequence_nr(struct prp_rct *rct)
return ntohs(rct->sequence_nr);
}
static inline u16 get_prp_lan_id(struct prp_rct *rct)
{
return ntohs(rct->lan_id_and_LSDU_size) >> 12;
}
/* assume there is a valid rct */
static inline bool prp_check_lsdu_size(struct sk_buff *skb,
struct prp_rct *rct,
......
......@@ -226,14 +226,6 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
m->hdr[w] |= htonl(val);
}
static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b)
{
u32 temp = msg->hdr[a];
msg->hdr[a] = msg->hdr[b];
msg->hdr[b] = temp;
}
/*
* Word 0
*/
......@@ -480,11 +472,6 @@ static inline void msg_incr_reroute_cnt(struct tipc_msg *m)
msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1);
}
static inline void msg_reset_reroute_cnt(struct tipc_msg *m)
{
msg_set_bits(m, 1, 21, 0xf, 0);
}
static inline u32 msg_lookup_scope(struct tipc_msg *m)
{
return msg_bits(m, 1, 19, 0x3);
......@@ -800,11 +787,6 @@ static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n)
msg_set_word(m, 2, n);
}
static inline u32 msg_bcgap_after(struct tipc_msg *m)
{
return msg_bits(m, 2, 16, 0xffff);
}
static inline void msg_set_bcgap_after(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 2, 16, 0xffff, n);
......@@ -868,11 +850,6 @@ static inline void msg_set_next_sent(struct tipc_msg *m, u16 n)
msg_set_bits(m, 4, 0, 0xffff, n);
}
static inline void msg_set_long_msgno(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 4, 0, 0xffff, n);
}
static inline u32 msg_bc_netid(struct tipc_msg *m)
{
return msg_word(m, 4);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment