Commit 36341de2 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-driver-refcont_t'

Elena Reshetova says:

====================
networking drivers refcount_t conversions

Note: these are the last patches related to networking that perform
conversion of refcounters from atomic_t to refcount_t.
In contrast to the core network refcounter conversions that
were merged earlier, these are much more straightforward ones.

This series, for various networking drivers, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.

The patches are fully independent and can be cherry-picked separately.
Patches are based on top of net-next.
If there are no objections to the patches, please merge them via respective trees
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 86f540c1 e65f7ee3
...@@ -45,7 +45,7 @@ cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name, ...@@ -45,7 +45,7 @@ cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
return NULL; return NULL;
} }
atomic_set(&cbq->refcnt, 1); refcount_set(&cbq->refcnt, 1);
atomic_inc(&dev->refcnt); atomic_inc(&dev->refcnt);
cbq->pdev = dev; cbq->pdev = dev;
...@@ -58,7 +58,7 @@ cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name, ...@@ -58,7 +58,7 @@ cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
void cn_queue_release_callback(struct cn_callback_entry *cbq) void cn_queue_release_callback(struct cn_callback_entry *cbq)
{ {
if (!atomic_dec_and_test(&cbq->refcnt)) if (!refcount_dec_and_test(&cbq->refcnt))
return; return;
atomic_dec(&cbq->pdev->refcnt); atomic_dec(&cbq->pdev->refcnt);
......
...@@ -157,7 +157,7 @@ static int cn_call_callback(struct sk_buff *skb) ...@@ -157,7 +157,7 @@ static int cn_call_callback(struct sk_buff *skb)
spin_lock_bh(&dev->cbdev->queue_lock); spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) { list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&i->id.id, &msg->id)) { if (cn_cb_equal(&i->id.id, &msg->id)) {
atomic_inc(&i->refcnt); refcount_inc(&i->refcnt);
cbq = i; cbq = i;
break; break;
} }
......
...@@ -96,7 +96,8 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) ...@@ -96,7 +96,8 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
if (!ret) { if (!ret) {
ce = cte; ce = cte;
read_unlock_bh(&ctbl->lock); read_unlock_bh(&ctbl->lock);
goto found; refcount_inc(&ce->refcnt);
return 0;
} }
} }
read_unlock_bh(&ctbl->lock); read_unlock_bh(&ctbl->lock);
...@@ -108,7 +109,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) ...@@ -108,7 +109,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
list_del(&ce->list); list_del(&ce->list);
INIT_LIST_HEAD(&ce->list); INIT_LIST_HEAD(&ce->list);
spin_lock_init(&ce->lock); spin_lock_init(&ce->lock);
atomic_set(&ce->refcnt, 0); refcount_set(&ce->refcnt, 0);
atomic_dec(&ctbl->nfree); atomic_dec(&ctbl->nfree);
list_add_tail(&ce->list, &ctbl->hash_list[hash]); list_add_tail(&ce->list, &ctbl->hash_list[hash]);
if (v6) { if (v6) {
...@@ -138,9 +139,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) ...@@ -138,9 +139,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
return -ENOMEM; return -ENOMEM;
} }
write_unlock_bh(&ctbl->lock); write_unlock_bh(&ctbl->lock);
found: refcount_set(&ce->refcnt, 1);
atomic_inc(&ce->refcnt);
return 0; return 0;
} }
EXPORT_SYMBOL(cxgb4_clip_get); EXPORT_SYMBOL(cxgb4_clip_get);
...@@ -179,7 +178,7 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) ...@@ -179,7 +178,7 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
found: found:
write_lock_bh(&ctbl->lock); write_lock_bh(&ctbl->lock);
spin_lock_bh(&ce->lock); spin_lock_bh(&ce->lock);
if (atomic_dec_and_test(&ce->refcnt)) { if (refcount_dec_and_test(&ce->refcnt)) {
list_del(&ce->list); list_del(&ce->list);
INIT_LIST_HEAD(&ce->list); INIT_LIST_HEAD(&ce->list);
list_add_tail(&ce->list, &ctbl->ce_free_head); list_add_tail(&ce->list, &ctbl->ce_free_head);
...@@ -266,7 +265,7 @@ int clip_tbl_show(struct seq_file *seq, void *v) ...@@ -266,7 +265,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
ip[0] = '\0'; ip[0] = '\0';
sprintf(ip, "%pISc", &ce->addr); sprintf(ip, "%pISc", &ce->addr);
seq_printf(seq, "%-25s %u\n", ip, seq_printf(seq, "%-25s %u\n", ip,
atomic_read(&ce->refcnt)); refcount_read(&ce->refcnt));
} }
} }
seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree)); seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
......
...@@ -10,9 +10,11 @@ ...@@ -10,9 +10,11 @@
* release for licensing terms and conditions. * release for licensing terms and conditions.
*/ */
#include <linux/refcount.h>
struct clip_entry { struct clip_entry {
spinlock_t lock; /* Hold while modifying clip reference */ spinlock_t lock; /* Hold while modifying clip reference */
atomic_t refcnt; refcount_t refcnt;
struct list_head list; struct list_head list;
union { union {
struct sockaddr_in addr; struct sockaddr_in addr;
......
...@@ -1817,7 +1817,7 @@ static int mtk_open(struct net_device *dev) ...@@ -1817,7 +1817,7 @@ static int mtk_open(struct net_device *dev)
struct mtk_eth *eth = mac->hw; struct mtk_eth *eth = mac->hw;
/* we run 2 netdevs on the same dma ring so we only bring it up once */ /* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!atomic_read(&eth->dma_refcnt)) { if (!refcount_read(&eth->dma_refcnt)) {
int err = mtk_start_dma(eth); int err = mtk_start_dma(eth);
if (err) if (err)
...@@ -1827,8 +1827,10 @@ static int mtk_open(struct net_device *dev) ...@@ -1827,8 +1827,10 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->rx_napi); napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
refcount_set(&eth->dma_refcnt, 1);
} }
atomic_inc(&eth->dma_refcnt); else
refcount_inc(&eth->dma_refcnt);
phy_start(dev->phydev); phy_start(dev->phydev);
netif_start_queue(dev); netif_start_queue(dev);
...@@ -1868,7 +1870,7 @@ static int mtk_stop(struct net_device *dev) ...@@ -1868,7 +1870,7 @@ static int mtk_stop(struct net_device *dev)
phy_stop(dev->phydev); phy_stop(dev->phydev);
/* only shutdown DMA if this is the last user */ /* only shutdown DMA if this is the last user */
if (!atomic_dec_and_test(&eth->dma_refcnt)) if (!refcount_dec_and_test(&eth->dma_refcnt))
return 0; return 0;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#ifndef MTK_ETH_H #ifndef MTK_ETH_H
#define MTK_ETH_H #define MTK_ETH_H
#include <linux/refcount.h>
#define MTK_QDMA_PAGE_SIZE 2048 #define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536 #define MTK_MAX_RX_LENGTH 1536
#define MTK_TX_DMA_BUF_LEN 0x3fff #define MTK_TX_DMA_BUF_LEN 0x3fff
...@@ -632,7 +634,7 @@ struct mtk_eth { ...@@ -632,7 +634,7 @@ struct mtk_eth {
struct regmap *pctl; struct regmap *pctl;
u32 chip_id; u32 chip_id;
bool hwlro; bool hwlro;
atomic_t dma_refcnt; refcount_t dma_refcnt;
struct mtk_tx_ring tx_ring; struct mtk_tx_ring tx_ring;
struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
struct mtk_rx_ring rx_ring_qdma; struct mtk_rx_ring rx_ring_qdma;
......
...@@ -69,7 +69,7 @@ void mlx4_cq_tasklet_cb(unsigned long data) ...@@ -69,7 +69,7 @@ void mlx4_cq_tasklet_cb(unsigned long data)
list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list); list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq); mcq->tasklet_ctx.comp(mcq);
if (atomic_dec_and_test(&mcq->refcount)) if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free); complete(&mcq->free);
if (time_after(jiffies, end)) if (time_after(jiffies, end))
break; break;
...@@ -92,7 +92,7 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) ...@@ -92,7 +92,7 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
* still arrive. * still arrive.
*/ */
if (list_empty_careful(&cq->tasklet_ctx.list)) { if (list_empty_careful(&cq->tasklet_ctx.list)) {
atomic_inc(&cq->refcount); refcount_inc(&cq->refcount);
kick = list_empty(&tasklet_ctx->list); kick = list_empty(&tasklet_ctx->list);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
if (kick) if (kick)
...@@ -344,7 +344,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, ...@@ -344,7 +344,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq->cons_index = 0; cq->cons_index = 0;
cq->arm_sn = 1; cq->arm_sn = 1;
cq->uar = uar; cq->uar = uar;
atomic_set(&cq->refcount, 1); refcount_set(&cq->refcount, 1);
init_completion(&cq->free); init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet; cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv = cq->tasklet_ctx.priv =
...@@ -386,7 +386,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) ...@@ -386,7 +386,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
priv->eq_table.eq[MLX4_EQ_ASYNC].irq) priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
if (atomic_dec_and_test(&cq->refcount)) if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free); complete(&cq->free);
wait_for_completion(&cq->free); wait_for_completion(&cq->free);
......
...@@ -55,7 +55,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) ...@@ -55,7 +55,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
qp = __mlx4_qp_lookup(dev, qpn); qp = __mlx4_qp_lookup(dev, qpn);
if (qp) if (qp)
atomic_inc(&qp->refcount); refcount_inc(&qp->refcount);
spin_unlock(&qp_table->lock); spin_unlock(&qp_table->lock);
...@@ -66,7 +66,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) ...@@ -66,7 +66,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
qp->event(qp, event_type); qp->event(qp, event_type);
if (atomic_dec_and_test(&qp->refcount)) if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free); complete(&qp->free);
} }
...@@ -420,7 +420,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) ...@@ -420,7 +420,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
if (err) if (err)
goto err_icm; goto err_icm;
atomic_set(&qp->refcount, 1); refcount_set(&qp->refcount, 1);
init_completion(&qp->free); init_completion(&qp->free);
return 0; return 0;
...@@ -520,7 +520,7 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove); ...@@ -520,7 +520,7 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{ {
if (atomic_dec_and_test(&qp->refcount)) if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free); complete(&qp->free);
wait_for_completion(&qp->free); wait_for_completion(&qp->free);
......
...@@ -49,7 +49,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) ...@@ -49,7 +49,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
rcu_read_unlock(); rcu_read_unlock();
if (srq) if (srq)
atomic_inc(&srq->refcount); refcount_inc(&srq->refcount);
else { else {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn); mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return; return;
...@@ -57,7 +57,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) ...@@ -57,7 +57,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
srq->event(srq, event_type); srq->event(srq, event_type);
if (atomic_dec_and_test(&srq->refcount)) if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free); complete(&srq->free);
} }
...@@ -203,7 +203,7 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, ...@@ -203,7 +203,7 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
if (err) if (err)
goto err_radix; goto err_radix;
atomic_set(&srq->refcount, 1); refcount_set(&srq->refcount, 1);
init_completion(&srq->free); init_completion(&srq->free);
return 0; return 0;
...@@ -232,7 +232,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) ...@@ -232,7 +232,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
radix_tree_delete(&srq_table->tree, srq->srqn); radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock); spin_unlock_irq(&srq_table->lock);
if (atomic_dec_and_test(&srq->refcount)) if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free); complete(&srq->free);
wait_for_completion(&srq->free); wait_for_completion(&srq->free);
......
...@@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data) ...@@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
tasklet_ctx.list) { tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list); list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq); mcq->tasklet_ctx.comp(mcq);
if (atomic_dec_and_test(&mcq->refcount)) if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free); complete(&mcq->free);
if (time_after(jiffies, end)) if (time_after(jiffies, end))
break; break;
...@@ -80,7 +80,7 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq) ...@@ -80,7 +80,7 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
* still arrive. * still arrive.
*/ */
if (list_empty_careful(&cq->tasklet_ctx.list)) { if (list_empty_careful(&cq->tasklet_ctx.list)) {
atomic_inc(&cq->refcount); refcount_inc(&cq->refcount);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
} }
spin_unlock_irqrestore(&tasklet_ctx->lock, flags); spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
...@@ -94,7 +94,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) ...@@ -94,7 +94,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
spin_lock(&table->lock); spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn); cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq)) if (likely(cq))
atomic_inc(&cq->refcount); refcount_inc(&cq->refcount);
spin_unlock(&table->lock); spin_unlock(&table->lock);
if (!cq) { if (!cq) {
...@@ -106,7 +106,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) ...@@ -106,7 +106,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
cq->comp(cq); cq->comp(cq);
if (atomic_dec_and_test(&cq->refcount)) if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free); complete(&cq->free);
} }
...@@ -119,7 +119,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) ...@@ -119,7 +119,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
cq = radix_tree_lookup(&table->tree, cqn); cq = radix_tree_lookup(&table->tree, cqn);
if (cq) if (cq)
atomic_inc(&cq->refcount); refcount_inc(&cq->refcount);
spin_unlock(&table->lock); spin_unlock(&table->lock);
...@@ -130,7 +130,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) ...@@ -130,7 +130,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
cq->event(cq, event_type); cq->event(cq, event_type);
if (atomic_dec_and_test(&cq->refcount)) if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free); complete(&cq->free);
} }
...@@ -159,7 +159,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, ...@@ -159,7 +159,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cqn = MLX5_GET(create_cq_out, out, cqn); cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0; cq->cons_index = 0;
cq->arm_sn = 0; cq->arm_sn = 0;
atomic_set(&cq->refcount, 1); refcount_set(&cq->refcount, 1);
init_completion(&cq->free); init_completion(&cq->free);
if (!cq->comp) if (!cq->comp)
cq->comp = mlx5_add_cq_to_tasklet; cq->comp = mlx5_add_cq_to_tasklet;
...@@ -222,7 +222,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) ...@@ -222,7 +222,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
synchronize_irq(cq->irqn); synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq); mlx5_debug_cq_remove(dev, cq);
if (atomic_dec_and_test(&cq->refcount)) if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free); complete(&cq->free);
wait_for_completion(&cq->free); wait_for_completion(&cq->free);
......
...@@ -188,7 +188,7 @@ static void tree_init_node(struct fs_node *node, ...@@ -188,7 +188,7 @@ static void tree_init_node(struct fs_node *node,
void (*del_hw_func)(struct fs_node *), void (*del_hw_func)(struct fs_node *),
void (*del_sw_func)(struct fs_node *)) void (*del_sw_func)(struct fs_node *))
{ {
atomic_set(&node->refcount, 1); refcount_set(&node->refcount, 1);
INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children); INIT_LIST_HEAD(&node->children);
init_rwsem(&node->lock); init_rwsem(&node->lock);
...@@ -200,7 +200,7 @@ static void tree_init_node(struct fs_node *node, ...@@ -200,7 +200,7 @@ static void tree_init_node(struct fs_node *node,
static void tree_add_node(struct fs_node *node, struct fs_node *parent) static void tree_add_node(struct fs_node *node, struct fs_node *parent)
{ {
if (parent) if (parent)
atomic_inc(&parent->refcount); refcount_inc(&parent->refcount);
node->parent = parent; node->parent = parent;
/* Parent is the root */ /* Parent is the root */
...@@ -212,7 +212,7 @@ static void tree_add_node(struct fs_node *node, struct fs_node *parent) ...@@ -212,7 +212,7 @@ static void tree_add_node(struct fs_node *node, struct fs_node *parent)
static int tree_get_node(struct fs_node *node) static int tree_get_node(struct fs_node *node)
{ {
return atomic_add_unless(&node->refcount, 1, 0); return refcount_inc_not_zero(&node->refcount);
} }
static void nested_down_read_ref_node(struct fs_node *node, static void nested_down_read_ref_node(struct fs_node *node,
...@@ -220,7 +220,7 @@ static void nested_down_read_ref_node(struct fs_node *node, ...@@ -220,7 +220,7 @@ static void nested_down_read_ref_node(struct fs_node *node,
{ {
if (node) { if (node) {
down_read_nested(&node->lock, class); down_read_nested(&node->lock, class);
atomic_inc(&node->refcount); refcount_inc(&node->refcount);
} }
} }
...@@ -229,7 +229,7 @@ static void nested_down_write_ref_node(struct fs_node *node, ...@@ -229,7 +229,7 @@ static void nested_down_write_ref_node(struct fs_node *node,
{ {
if (node) { if (node) {
down_write_nested(&node->lock, class); down_write_nested(&node->lock, class);
atomic_inc(&node->refcount); refcount_inc(&node->refcount);
} }
} }
...@@ -237,19 +237,19 @@ static void down_write_ref_node(struct fs_node *node) ...@@ -237,19 +237,19 @@ static void down_write_ref_node(struct fs_node *node)
{ {
if (node) { if (node) {
down_write(&node->lock); down_write(&node->lock);
atomic_inc(&node->refcount); refcount_inc(&node->refcount);
} }
} }
static void up_read_ref_node(struct fs_node *node) static void up_read_ref_node(struct fs_node *node)
{ {
atomic_dec(&node->refcount); refcount_dec(&node->refcount);
up_read(&node->lock); up_read(&node->lock);
} }
static void up_write_ref_node(struct fs_node *node) static void up_write_ref_node(struct fs_node *node)
{ {
atomic_dec(&node->refcount); refcount_dec(&node->refcount);
up_write(&node->lock); up_write(&node->lock);
} }
...@@ -257,7 +257,7 @@ static void tree_put_node(struct fs_node *node) ...@@ -257,7 +257,7 @@ static void tree_put_node(struct fs_node *node)
{ {
struct fs_node *parent_node = node->parent; struct fs_node *parent_node = node->parent;
if (atomic_dec_and_test(&node->refcount)) { if (refcount_dec_and_test(&node->refcount)) {
if (node->del_hw_func) if (node->del_hw_func)
node->del_hw_func(node); node->del_hw_func(node);
if (parent_node) { if (parent_node) {
...@@ -280,8 +280,8 @@ static void tree_put_node(struct fs_node *node) ...@@ -280,8 +280,8 @@ static void tree_put_node(struct fs_node *node)
static int tree_remove_node(struct fs_node *node) static int tree_remove_node(struct fs_node *node)
{ {
if (atomic_read(&node->refcount) > 1) { if (refcount_read(&node->refcount) > 1) {
atomic_dec(&node->refcount); refcount_dec(&node->refcount);
return -EEXIST; return -EEXIST;
} }
tree_put_node(node); tree_put_node(node);
...@@ -1184,7 +1184,7 @@ static void destroy_flow_handle(struct fs_fte *fte, ...@@ -1184,7 +1184,7 @@ static void destroy_flow_handle(struct fs_fte *fte,
int i) int i)
{ {
for (; --i >= 0;) { for (; --i >= 0;) {
if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) { if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
fte->dests_size--; fte->dests_size--;
list_del(&handle->rule[i]->node.list); list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]); kfree(handle->rule[i]);
...@@ -1215,7 +1215,7 @@ create_flow_handle(struct fs_fte *fte, ...@@ -1215,7 +1215,7 @@ create_flow_handle(struct fs_fte *fte,
if (dest) { if (dest) {
rule = find_flow_rule(fte, dest + i); rule = find_flow_rule(fte, dest + i);
if (rule) { if (rule) {
atomic_inc(&rule->node.refcount); refcount_inc(&rule->node.refcount);
goto rule_found; goto rule_found;
} }
} }
...@@ -1466,7 +1466,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, ...@@ -1466,7 +1466,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
trace_mlx5_fs_set_fte(fte, false); trace_mlx5_fs_set_fte(fte, false);
for (i = 0; i < handle->num_rules; i++) { for (i = 0; i < handle->num_rules; i++) {
if (atomic_read(&handle->rule[i]->node.refcount) == 1) { if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
tree_add_node(&handle->rule[i]->node, &fte->node); tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]); trace_mlx5_fs_add_rule(handle->rule[i]);
} }
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#ifndef _MLX5_FS_CORE_ #ifndef _MLX5_FS_CORE_
#define _MLX5_FS_CORE_ #define _MLX5_FS_CORE_
#include <linux/refcount.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
...@@ -84,7 +85,7 @@ struct fs_node { ...@@ -84,7 +85,7 @@ struct fs_node {
struct fs_node *root; struct fs_node *root;
/* lock the node for writing and traversing */ /* lock the node for writing and traversing */
struct rw_semaphore lock; struct rw_semaphore lock;
atomic_t refcount; refcount_t refcount;
bool active; bool active;
void (*del_hw_func)(struct fs_node *); void (*del_hw_func)(struct fs_node *);
void (*del_sw_func)(struct fs_node *); void (*del_sw_func)(struct fs_node *);
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/atomic.h> #include <linux/refcount.h>
#define SIXPACK_VERSION "Revision: 0.3.0" #define SIXPACK_VERSION "Revision: 0.3.0"
...@@ -120,7 +120,7 @@ struct sixpack { ...@@ -120,7 +120,7 @@ struct sixpack {
struct timer_list tx_t; struct timer_list tx_t;
struct timer_list resync_t; struct timer_list resync_t;
atomic_t refcnt; refcount_t refcnt;
struct semaphore dead_sem; struct semaphore dead_sem;
spinlock_t lock; spinlock_t lock;
}; };
...@@ -381,7 +381,7 @@ static struct sixpack *sp_get(struct tty_struct *tty) ...@@ -381,7 +381,7 @@ static struct sixpack *sp_get(struct tty_struct *tty)
read_lock(&disc_data_lock); read_lock(&disc_data_lock);
sp = tty->disc_data; sp = tty->disc_data;
if (sp) if (sp)
atomic_inc(&sp->refcnt); refcount_inc(&sp->refcnt);
read_unlock(&disc_data_lock); read_unlock(&disc_data_lock);
return sp; return sp;
...@@ -389,7 +389,7 @@ static struct sixpack *sp_get(struct tty_struct *tty) ...@@ -389,7 +389,7 @@ static struct sixpack *sp_get(struct tty_struct *tty)
static void sp_put(struct sixpack *sp) static void sp_put(struct sixpack *sp)
{ {
if (atomic_dec_and_test(&sp->refcnt)) if (refcount_dec_and_test(&sp->refcnt))
up(&sp->dead_sem); up(&sp->dead_sem);
} }
...@@ -576,7 +576,7 @@ static int sixpack_open(struct tty_struct *tty) ...@@ -576,7 +576,7 @@ static int sixpack_open(struct tty_struct *tty)
sp->dev = dev; sp->dev = dev;
spin_lock_init(&sp->lock); spin_lock_init(&sp->lock);
atomic_set(&sp->refcnt, 1); refcount_set(&sp->refcnt, 1);
sema_init(&sp->dead_sem, 0); sema_init(&sp->dead_sem, 0);
/* !!! length of the buffers. MTU is IP MTU, not PACLEN! */ /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
...@@ -670,7 +670,7 @@ static void sixpack_close(struct tty_struct *tty) ...@@ -670,7 +670,7 @@ static void sixpack_close(struct tty_struct *tty)
* We have now ensured that nobody can start using ap from now on, but * We have now ensured that nobody can start using ap from now on, but
* we have to wait for all existing users to finish. * we have to wait for all existing users to finish.
*/ */
if (!atomic_dec_and_test(&sp->refcnt)) if (!refcount_dec_and_test(&sp->refcnt))
down(&sp->dead_sem); down(&sp->dead_sem);
/* We must stop the queue to avoid potentially scribbling /* We must stop the queue to avoid potentially scribbling
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <crypto/aead.h> #include <crypto/aead.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/refcount.h>
#include <net/genetlink.h> #include <net/genetlink.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/gro_cells.h> #include <net/gro_cells.h>
...@@ -146,7 +147,7 @@ struct macsec_rx_sa { ...@@ -146,7 +147,7 @@ struct macsec_rx_sa {
struct macsec_key key; struct macsec_key key;
spinlock_t lock; spinlock_t lock;
u32 next_pn; u32 next_pn;
atomic_t refcnt; refcount_t refcnt;
bool active; bool active;
struct macsec_rx_sa_stats __percpu *stats; struct macsec_rx_sa_stats __percpu *stats;
struct macsec_rx_sc *sc; struct macsec_rx_sc *sc;
...@@ -171,7 +172,7 @@ struct macsec_rx_sc { ...@@ -171,7 +172,7 @@ struct macsec_rx_sc {
bool active; bool active;
struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
struct pcpu_rx_sc_stats __percpu *stats; struct pcpu_rx_sc_stats __percpu *stats;
atomic_t refcnt; refcount_t refcnt;
struct rcu_head rcu_head; struct rcu_head rcu_head;
}; };
...@@ -187,7 +188,7 @@ struct macsec_tx_sa { ...@@ -187,7 +188,7 @@ struct macsec_tx_sa {
struct macsec_key key; struct macsec_key key;
spinlock_t lock; spinlock_t lock;
u32 next_pn; u32 next_pn;
atomic_t refcnt; refcount_t refcnt;
bool active; bool active;
struct macsec_tx_sa_stats __percpu *stats; struct macsec_tx_sa_stats __percpu *stats;
struct rcu_head rcu; struct rcu_head rcu;
...@@ -314,7 +315,7 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) ...@@ -314,7 +315,7 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
if (!sa || !sa->active) if (!sa || !sa->active)
return NULL; return NULL;
if (!atomic_inc_not_zero(&sa->refcnt)) if (!refcount_inc_not_zero(&sa->refcnt))
return NULL; return NULL;
return sa; return sa;
...@@ -330,12 +331,12 @@ static void free_rx_sc_rcu(struct rcu_head *head) ...@@ -330,12 +331,12 @@ static void free_rx_sc_rcu(struct rcu_head *head)
static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
{ {
return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL; return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
} }
static void macsec_rxsc_put(struct macsec_rx_sc *sc) static void macsec_rxsc_put(struct macsec_rx_sc *sc)
{ {
if (atomic_dec_and_test(&sc->refcnt)) if (refcount_dec_and_test(&sc->refcnt))
call_rcu(&sc->rcu_head, free_rx_sc_rcu); call_rcu(&sc->rcu_head, free_rx_sc_rcu);
} }
...@@ -350,7 +351,7 @@ static void free_rxsa(struct rcu_head *head) ...@@ -350,7 +351,7 @@ static void free_rxsa(struct rcu_head *head)
static void macsec_rxsa_put(struct macsec_rx_sa *sa) static void macsec_rxsa_put(struct macsec_rx_sa *sa)
{ {
if (atomic_dec_and_test(&sa->refcnt)) if (refcount_dec_and_test(&sa->refcnt))
call_rcu(&sa->rcu, free_rxsa); call_rcu(&sa->rcu, free_rxsa);
} }
...@@ -361,7 +362,7 @@ static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) ...@@ -361,7 +362,7 @@ static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
if (!sa || !sa->active) if (!sa || !sa->active)
return NULL; return NULL;
if (!atomic_inc_not_zero(&sa->refcnt)) if (!refcount_inc_not_zero(&sa->refcnt))
return NULL; return NULL;
return sa; return sa;
...@@ -378,7 +379,7 @@ static void free_txsa(struct rcu_head *head) ...@@ -378,7 +379,7 @@ static void free_txsa(struct rcu_head *head)
static void macsec_txsa_put(struct macsec_tx_sa *sa) static void macsec_txsa_put(struct macsec_tx_sa *sa)
{ {
if (atomic_dec_and_test(&sa->refcnt)) if (refcount_dec_and_test(&sa->refcnt))
call_rcu(&sa->rcu, free_txsa); call_rcu(&sa->rcu, free_txsa);
} }
...@@ -1339,7 +1340,7 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, ...@@ -1339,7 +1340,7 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
rx_sa->active = false; rx_sa->active = false;
rx_sa->next_pn = 1; rx_sa->next_pn = 1;
atomic_set(&rx_sa->refcnt, 1); refcount_set(&rx_sa->refcnt, 1);
spin_lock_init(&rx_sa->lock); spin_lock_init(&rx_sa->lock);
return 0; return 0;
...@@ -1410,7 +1411,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) ...@@ -1410,7 +1411,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
rx_sc->sci = sci; rx_sc->sci = sci;
rx_sc->active = true; rx_sc->active = true;
atomic_set(&rx_sc->refcnt, 1); refcount_set(&rx_sc->refcnt, 1);
secy = &macsec_priv(dev)->secy; secy = &macsec_priv(dev)->secy;
rcu_assign_pointer(rx_sc->next, secy->rx_sc); rcu_assign_pointer(rx_sc->next, secy->rx_sc);
...@@ -1436,7 +1437,7 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, ...@@ -1436,7 +1437,7 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
} }
tx_sa->active = false; tx_sa->active = false;
atomic_set(&tx_sa->refcnt, 1); refcount_set(&tx_sa->refcnt, 1);
spin_lock_init(&tx_sa->lock); spin_lock_init(&tx_sa->lock);
return 0; return 0;
......
...@@ -69,7 +69,7 @@ struct asyncppp { ...@@ -69,7 +69,7 @@ struct asyncppp {
struct tasklet_struct tsk; struct tasklet_struct tsk;
atomic_t refcnt; refcount_t refcnt;
struct semaphore dead_sem; struct semaphore dead_sem;
struct ppp_channel chan; /* interface to generic ppp layer */ struct ppp_channel chan; /* interface to generic ppp layer */
unsigned char obuf[OBUFSIZE]; unsigned char obuf[OBUFSIZE];
...@@ -140,14 +140,14 @@ static struct asyncppp *ap_get(struct tty_struct *tty) ...@@ -140,14 +140,14 @@ static struct asyncppp *ap_get(struct tty_struct *tty)
read_lock(&disc_data_lock); read_lock(&disc_data_lock);
ap = tty->disc_data; ap = tty->disc_data;
if (ap != NULL) if (ap != NULL)
atomic_inc(&ap->refcnt); refcount_inc(&ap->refcnt);
read_unlock(&disc_data_lock); read_unlock(&disc_data_lock);
return ap; return ap;
} }
static void ap_put(struct asyncppp *ap) static void ap_put(struct asyncppp *ap)
{ {
if (atomic_dec_and_test(&ap->refcnt)) if (refcount_dec_and_test(&ap->refcnt))
up(&ap->dead_sem); up(&ap->dead_sem);
} }
...@@ -185,7 +185,7 @@ ppp_asynctty_open(struct tty_struct *tty) ...@@ -185,7 +185,7 @@ ppp_asynctty_open(struct tty_struct *tty)
skb_queue_head_init(&ap->rqueue); skb_queue_head_init(&ap->rqueue);
tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
atomic_set(&ap->refcnt, 1); refcount_set(&ap->refcnt, 1);
sema_init(&ap->dead_sem, 0); sema_init(&ap->dead_sem, 0);
ap->chan.private = ap; ap->chan.private = ap;
...@@ -234,7 +234,7 @@ ppp_asynctty_close(struct tty_struct *tty) ...@@ -234,7 +234,7 @@ ppp_asynctty_close(struct tty_struct *tty)
* our channel ops (i.e. ppp_async_send/ioctl) are in progress * our channel ops (i.e. ppp_async_send/ioctl) are in progress
* by the time it returns. * by the time it returns.
*/ */
if (!atomic_dec_and_test(&ap->refcnt)) if (!refcount_dec_and_test(&ap->refcnt))
down(&ap->dead_sem); down(&ap->dead_sem);
tasklet_kill(&ap->tsk); tasklet_kill(&ap->tsk);
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <net/slhc_vj.h> #include <net/slhc_vj.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/refcount.h>
#include <linux/nsproxy.h> #include <linux/nsproxy.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
...@@ -84,7 +85,7 @@ struct ppp_file { ...@@ -84,7 +85,7 @@ struct ppp_file {
struct sk_buff_head xq; /* pppd transmit queue */ struct sk_buff_head xq; /* pppd transmit queue */
struct sk_buff_head rq; /* receive queue for pppd */ struct sk_buff_head rq; /* receive queue for pppd */
wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
atomic_t refcnt; /* # refs (incl /dev/ppp attached) */ refcount_t refcnt; /* # refs (incl /dev/ppp attached) */
int hdrlen; /* space to leave for headers */ int hdrlen; /* space to leave for headers */
int index; /* interface unit / channel number */ int index; /* interface unit / channel number */
int dead; /* unit/channel has been shut down */ int dead; /* unit/channel has been shut down */
...@@ -408,7 +409,7 @@ static int ppp_release(struct inode *unused, struct file *file) ...@@ -408,7 +409,7 @@ static int ppp_release(struct inode *unused, struct file *file)
unregister_netdevice(ppp->dev); unregister_netdevice(ppp->dev);
rtnl_unlock(); rtnl_unlock();
} }
if (atomic_dec_and_test(&pf->refcnt)) { if (refcount_dec_and_test(&pf->refcnt)) {
switch (pf->kind) { switch (pf->kind) {
case INTERFACE: case INTERFACE:
ppp_destroy_interface(PF_TO_PPP(pf)); ppp_destroy_interface(PF_TO_PPP(pf));
...@@ -881,7 +882,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, ...@@ -881,7 +882,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
mutex_lock(&pn->all_ppp_mutex); mutex_lock(&pn->all_ppp_mutex);
ppp = ppp_find_unit(pn, unit); ppp = ppp_find_unit(pn, unit);
if (ppp) { if (ppp) {
atomic_inc(&ppp->file.refcnt); refcount_inc(&ppp->file.refcnt);
file->private_data = &ppp->file; file->private_data = &ppp->file;
err = 0; err = 0;
} }
...@@ -896,7 +897,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, ...@@ -896,7 +897,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
spin_lock_bh(&pn->all_channels_lock); spin_lock_bh(&pn->all_channels_lock);
chan = ppp_find_channel(pn, unit); chan = ppp_find_channel(pn, unit);
if (chan) { if (chan) {
atomic_inc(&chan->file.refcnt); refcount_inc(&chan->file.refcnt);
file->private_data = &chan->file; file->private_data = &chan->file;
err = 0; err = 0;
} }
...@@ -1348,7 +1349,7 @@ static int ppp_dev_init(struct net_device *dev) ...@@ -1348,7 +1349,7 @@ static int ppp_dev_init(struct net_device *dev)
* that ppp_destroy_interface() won't run before the device gets * that ppp_destroy_interface() won't run before the device gets
* unregistered. * unregistered.
*/ */
atomic_inc(&ppp->file.refcnt); refcount_inc(&ppp->file.refcnt);
return 0; return 0;
} }
...@@ -1377,7 +1378,7 @@ static void ppp_dev_priv_destructor(struct net_device *dev) ...@@ -1377,7 +1378,7 @@ static void ppp_dev_priv_destructor(struct net_device *dev)
struct ppp *ppp; struct ppp *ppp;
ppp = netdev_priv(dev); ppp = netdev_priv(dev);
if (atomic_dec_and_test(&ppp->file.refcnt)) if (refcount_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp); ppp_destroy_interface(ppp);
} }
...@@ -2676,7 +2677,7 @@ ppp_unregister_channel(struct ppp_channel *chan) ...@@ -2676,7 +2677,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
pch->file.dead = 1; pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait); wake_up_interruptible(&pch->file.rwait);
if (atomic_dec_and_test(&pch->file.refcnt)) if (refcount_dec_and_test(&pch->file.refcnt))
ppp_destroy_channel(pch); ppp_destroy_channel(pch);
} }
...@@ -3046,7 +3047,7 @@ init_ppp_file(struct ppp_file *pf, int kind) ...@@ -3046,7 +3047,7 @@ init_ppp_file(struct ppp_file *pf, int kind)
pf->kind = kind; pf->kind = kind;
skb_queue_head_init(&pf->xq); skb_queue_head_init(&pf->xq);
skb_queue_head_init(&pf->rq); skb_queue_head_init(&pf->rq);
atomic_set(&pf->refcnt, 1); refcount_set(&pf->refcnt, 1);
init_waitqueue_head(&pf->rwait); init_waitqueue_head(&pf->rwait);
} }
...@@ -3164,7 +3165,7 @@ ppp_connect_channel(struct channel *pch, int unit) ...@@ -3164,7 +3165,7 @@ ppp_connect_channel(struct channel *pch, int unit)
list_add_tail(&pch->clist, &ppp->channels); list_add_tail(&pch->clist, &ppp->channels);
++ppp->n_channels; ++ppp->n_channels;
pch->ppp = ppp; pch->ppp = ppp;
atomic_inc(&ppp->file.refcnt); refcount_inc(&ppp->file.refcnt);
ppp_unlock(ppp); ppp_unlock(ppp);
ret = 0; ret = 0;
...@@ -3195,7 +3196,7 @@ ppp_disconnect_channel(struct channel *pch) ...@@ -3195,7 +3196,7 @@ ppp_disconnect_channel(struct channel *pch)
if (--ppp->n_channels == 0) if (--ppp->n_channels == 0)
wake_up_interruptible(&ppp->file.rwait); wake_up_interruptible(&ppp->file.rwait);
ppp_unlock(ppp); ppp_unlock(ppp);
if (atomic_dec_and_test(&ppp->file.refcnt)) if (refcount_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp); ppp_destroy_interface(ppp);
err = 0; err = 0;
} }
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/refcount.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -72,7 +73,7 @@ struct syncppp { ...@@ -72,7 +73,7 @@ struct syncppp {
struct tasklet_struct tsk; struct tasklet_struct tsk;
atomic_t refcnt; refcount_t refcnt;
struct completion dead_cmp; struct completion dead_cmp;
struct ppp_channel chan; /* interface to generic ppp layer */ struct ppp_channel chan; /* interface to generic ppp layer */
}; };
...@@ -141,14 +142,14 @@ static struct syncppp *sp_get(struct tty_struct *tty) ...@@ -141,14 +142,14 @@ static struct syncppp *sp_get(struct tty_struct *tty)
read_lock(&disc_data_lock); read_lock(&disc_data_lock);
ap = tty->disc_data; ap = tty->disc_data;
if (ap != NULL) if (ap != NULL)
atomic_inc(&ap->refcnt); refcount_inc(&ap->refcnt);
read_unlock(&disc_data_lock); read_unlock(&disc_data_lock);
return ap; return ap;
} }
static void sp_put(struct syncppp *ap) static void sp_put(struct syncppp *ap)
{ {
if (atomic_dec_and_test(&ap->refcnt)) if (refcount_dec_and_test(&ap->refcnt))
complete(&ap->dead_cmp); complete(&ap->dead_cmp);
} }
...@@ -182,7 +183,7 @@ ppp_sync_open(struct tty_struct *tty) ...@@ -182,7 +183,7 @@ ppp_sync_open(struct tty_struct *tty)
skb_queue_head_init(&ap->rqueue); skb_queue_head_init(&ap->rqueue);
tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap); tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
atomic_set(&ap->refcnt, 1); refcount_set(&ap->refcnt, 1);
init_completion(&ap->dead_cmp); init_completion(&ap->dead_cmp);
ap->chan.private = ap; ap->chan.private = ap;
...@@ -232,7 +233,7 @@ ppp_sync_close(struct tty_struct *tty) ...@@ -232,7 +233,7 @@ ppp_sync_close(struct tty_struct *tty)
* our channel ops (i.e. ppp_sync_send/ioctl) are in progress * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
* by the time it returns. * by the time it returns.
*/ */
if (!atomic_dec_and_test(&ap->refcnt)) if (!refcount_dec_and_test(&ap->refcnt))
wait_for_completion(&ap->dead_cmp); wait_for_completion(&ap->dead_cmp);
tasklet_kill(&ap->tsk); tasklet_kill(&ap->tsk);
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#define __CONNECTOR_H #define __CONNECTOR_H
#include <linux/atomic.h> #include <linux/refcount.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
...@@ -49,7 +49,7 @@ struct cn_callback_id { ...@@ -49,7 +49,7 @@ struct cn_callback_id {
struct cn_callback_entry { struct cn_callback_entry {
struct list_head callback_entry; struct list_head callback_entry;
atomic_t refcnt; refcount_t refcnt;
struct cn_queue_dev *pdev; struct cn_queue_dev *pdev;
struct cn_callback_id id; struct cn_callback_id id;
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <linux/cpu_rmap.h> #include <linux/cpu_rmap.h>
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <linux/atomic.h> #include <linux/refcount.h>
#include <linux/timecounter.h> #include <linux/timecounter.h>
...@@ -751,7 +751,7 @@ struct mlx4_cq { ...@@ -751,7 +751,7 @@ struct mlx4_cq {
int cqn; int cqn;
unsigned vector; unsigned vector;
atomic_t refcount; refcount_t refcount;
struct completion free; struct completion free;
struct { struct {
struct list_head list; struct list_head list;
...@@ -768,7 +768,7 @@ struct mlx4_qp { ...@@ -768,7 +768,7 @@ struct mlx4_qp {
int qpn; int qpn;
atomic_t refcount; refcount_t refcount;
struct completion free; struct completion free;
u8 usage; u8 usage;
}; };
...@@ -781,7 +781,7 @@ struct mlx4_srq { ...@@ -781,7 +781,7 @@ struct mlx4_srq {
int max_gs; int max_gs;
int wqe_shift; int wqe_shift;
atomic_t refcount; refcount_t refcount;
struct completion free; struct completion free;
}; };
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/refcount.h>
struct mlx5_core_cq { struct mlx5_core_cq {
u32 cqn; u32 cqn;
...@@ -43,7 +43,7 @@ struct mlx5_core_cq { ...@@ -43,7 +43,7 @@ struct mlx5_core_cq {
__be32 *set_ci_db; __be32 *set_ci_db;
__be32 *arm_db; __be32 *arm_db;
struct mlx5_uars_page *uar; struct mlx5_uars_page *uar;
atomic_t refcount; refcount_t refcount;
struct completion free; struct completion free;
unsigned vector; unsigned vector;
unsigned int irqn; unsigned int irqn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment