net: convert inet_peer.refcnt from atomic_t to refcount_t

refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.
This conversion requires overall +1 on the whole
refcounting scheme.

Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David Windsor <dwindsor@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Reshetova, Elena 2017-06-30 13:07:54 +03:00 committed by David S. Miller
parent 574a602087
commit 1cc9a98b59
2 changed files with 11 additions and 11 deletions

View File

@ -46,7 +46,7 @@ struct inet_peer {
struct rcu_head gc_rcu;
};
/*
* Once inet_peer is queued for deletion (refcnt == -1), following field
* Once inet_peer is queued for deletion (refcnt == 0), following field
* is not available: rid
* We can share memory with rcu_head to help keep inet_peer small.
*/
@ -60,7 +60,7 @@ struct inet_peer {
/* following fields might be frequently dirtied */
__u32 dtime; /* the time of last use of not referenced entries */
atomic_t refcnt;
refcount_t refcnt;
};
struct inet_peer_base {

View File

@ -115,7 +115,7 @@ static void inetpeer_gc_worker(struct work_struct *work)
n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
if (!atomic_read(&p->refcnt)) {
if (refcount_read(&p->refcnt) == 1) {
list_del(&p->gc_list);
kmem_cache_free(peer_cachep, p);
}
@ -202,10 +202,11 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
if (cmp == 0) {
/* Before taking a reference, check if this entry was
* deleted (refcnt=-1)
* deleted (refcnt=0)
*/
if (!atomic_add_unless(&u->refcnt, 1, -1))
if (!refcount_inc_not_zero(&u->refcnt)) {
u = NULL;
}
return u;
}
if (cmp == -1)
@ -382,11 +383,10 @@ static int inet_peer_gc(struct inet_peer_base *base,
while (stackptr > stack) {
stackptr--;
p = rcu_deref_locked(**stackptr, base);
if (atomic_read(&p->refcnt) == 0) {
if (refcount_read(&p->refcnt) == 1) {
smp_rmb();
delta = (__u32)jiffies - p->dtime;
if (delta >= ttl &&
atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
if (delta >= ttl && refcount_dec_if_one(&p->refcnt)) {
p->gc_next = gchead;
gchead = p;
}
@ -432,7 +432,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
relookup:
p = lookup(daddr, stack, base);
if (p != peer_avl_empty) {
atomic_inc(&p->refcnt);
refcount_inc(&p->refcnt);
write_sequnlock_bh(&base->lock);
return p;
}
@ -444,7 +444,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) {
p->daddr = *daddr;
atomic_set(&p->refcnt, 1);
refcount_set(&p->refcnt, 2);
atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
@ -468,7 +468,7 @@ void inet_putpeer(struct inet_peer *p)
{
p->dtime = (__u32)jiffies;
smp_mb__before_atomic();
atomic_dec(&p->refcnt);
refcount_dec(&p->refcnt);
}
EXPORT_SYMBOL_GPL(inet_putpeer);