Commit ad2dcba0 authored by Chuhong Yuan's avatar Chuhong Yuan Committed by David S. Miller

cxgb4: smt: Use normal int for refcount

All refcount operations are protected by spinlocks now.
Then the atomic counter can be replaced by a normal int.

This patch depends on PATCH 1/2.
Signed-off-by: default avatarChuhong Yuan <hslester96@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4a8937b8
...@@ -57,7 +57,7 @@ struct smt_data *t4_init_smt(void) ...@@ -57,7 +57,7 @@ struct smt_data *t4_init_smt(void)
s->smtab[i].state = SMT_STATE_UNUSED; s->smtab[i].state = SMT_STATE_UNUSED;
memset(&s->smtab[i].src_mac, 0, ETH_ALEN); memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
spin_lock_init(&s->smtab[i].lock); spin_lock_init(&s->smtab[i].lock);
atomic_set(&s->smtab[i].refcnt, 0); s->smtab[i].refcnt = 0;
} }
return s; return s;
} }
...@@ -68,7 +68,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac) ...@@ -68,7 +68,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end; struct smt_entry *e, *end;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) { for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
if (atomic_read(&e->refcnt) == 0) { if (e->refcnt == 0) {
if (!first_free) if (!first_free)
first_free = e; first_free = e;
} else { } else {
...@@ -97,7 +97,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac) ...@@ -97,7 +97,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
static void t4_smte_free(struct smt_entry *e) static void t4_smte_free(struct smt_entry *e)
{ {
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ if (e->refcnt == 0) { /* hasn't been recycled */
e->state = SMT_STATE_UNUSED; e->state = SMT_STATE_UNUSED;
} }
} }
...@@ -110,7 +110,7 @@ static void t4_smte_free(struct smt_entry *e) ...@@ -110,7 +110,7 @@ static void t4_smte_free(struct smt_entry *e)
void cxgb4_smt_release(struct smt_entry *e) void cxgb4_smt_release(struct smt_entry *e)
{ {
spin_lock_bh(&e->lock); spin_lock_bh(&e->lock);
if (atomic_dec_and_test(&e->refcnt)) if ((--e->refcnt) == 0)
t4_smte_free(e); t4_smte_free(e);
spin_unlock_bh(&e->lock); spin_unlock_bh(&e->lock);
} }
...@@ -215,14 +215,14 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf, ...@@ -215,14 +215,14 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
e = find_or_alloc_smte(s, smac); e = find_or_alloc_smte(s, smac);
if (e) { if (e) {
spin_lock(&e->lock); spin_lock(&e->lock);
if (!atomic_read(&e->refcnt)) { if (!e->refcnt) {
atomic_set(&e->refcnt, 1); e->refcnt = 1;
e->state = SMT_STATE_SWITCHING; e->state = SMT_STATE_SWITCHING;
e->pfvf = pfvf; e->pfvf = pfvf;
memcpy(e->src_mac, smac, ETH_ALEN); memcpy(e->src_mac, smac, ETH_ALEN);
write_smt_entry(adap, e); write_smt_entry(adap, e);
} else { } else {
atomic_inc(&e->refcnt); ++e->refcnt;
} }
spin_unlock(&e->lock); spin_unlock(&e->lock);
} }
......
...@@ -59,7 +59,7 @@ struct smt_entry { ...@@ -59,7 +59,7 @@ struct smt_entry {
u16 idx; u16 idx;
u16 pfvf; u16 pfvf;
u8 src_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN];
atomic_t refcnt; int refcnt;
spinlock_t lock; /* protect smt entry add,removal */ spinlock_t lock; /* protect smt entry add,removal */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment