forked from luck/tmp_suning_uos_patched
dma-buf: Restart reservation_object_get_fences_rcu() after writes
In order to be completely generic, we have to double check the read seqlock after acquiring a reference to the fence. If the driver is allocating fences from a SLAB_DESTROY_BY_RCU, or similar freelist, then within an RCU grace period a fence may be freed and reallocated. The RCU read side critical section does not prevent this reallocation, instead we have to inspect the reservation's seqlock to double check if the fences have been reassigned as we were acquiring our reference. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Christian König <christian.koenig@amd.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: linux-media@vger.kernel.org Cc: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org> Link: http://patchwork.freedesktop.org/patch/msgid/20160829070834.22296-7-chris@chris-wilson.co.uk
This commit is contained in:
parent
4be0542073
commit
fedf54132d
|
@ -280,18 +280,24 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
|||
unsigned *pshared_count,
|
||||
struct fence ***pshared)
|
||||
{
|
||||
unsigned shared_count = 0;
|
||||
unsigned retry = 1;
|
||||
struct fence **shared = NULL, *fence_excl = NULL;
|
||||
int ret = 0;
|
||||
struct fence **shared = NULL;
|
||||
struct fence *fence_excl;
|
||||
unsigned int shared_count;
|
||||
int ret = 1;
|
||||
|
||||
while (retry) {
|
||||
do {
|
||||
struct reservation_object_list *fobj;
|
||||
unsigned seq;
|
||||
unsigned int i;
|
||||
|
||||
seq = read_seqcount_begin(&obj->seq);
|
||||
shared_count = i = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
seq = read_seqcount_begin(&obj->seq);
|
||||
|
||||
fence_excl = rcu_dereference(obj->fence_excl);
|
||||
if (fence_excl && !fence_get_rcu(fence_excl))
|
||||
goto unlock;
|
||||
|
||||
fobj = rcu_dereference(obj->fence);
|
||||
if (fobj) {
|
||||
|
@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
|||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
shared_count = 0;
|
||||
break;
|
||||
}
|
||||
shared = nshared;
|
||||
memcpy(shared, fobj->shared, sz);
|
||||
shared_count = fobj->shared_count;
|
||||
} else
|
||||
shared_count = 0;
|
||||
fence_excl = rcu_dereference(obj->fence_excl);
|
||||
|
||||
retry = read_seqcount_retry(&obj->seq, seq);
|
||||
if (retry)
|
||||
goto unlock;
|
||||
|
||||
if (!fence_excl || fence_get_rcu(fence_excl)) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < shared_count; ++i) {
|
||||
if (fence_get_rcu(shared[i]))
|
||||
continue;
|
||||
|
||||
/* uh oh, refcount failed, abort and retry */
|
||||
while (i--)
|
||||
fence_put(shared[i]);
|
||||
|
||||
if (fence_excl) {
|
||||
fence_put(fence_excl);
|
||||
fence_excl = NULL;
|
||||
}
|
||||
|
||||
retry = 1;
|
||||
break;
|
||||
shared[i] = rcu_dereference(fobj->shared[i]);
|
||||
if (!fence_get_rcu(shared[i]))
|
||||
break;
|
||||
}
|
||||
} else
|
||||
retry = 1;
|
||||
}
|
||||
|
||||
if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
|
||||
while (i--)
|
||||
fence_put(shared[i]);
|
||||
fence_put(fence_excl);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
*pshared_count = shared_count;
|
||||
if (shared_count)
|
||||
*pshared = shared;
|
||||
else {
|
||||
*pshared = NULL;
|
||||
} while (ret);
|
||||
|
||||
if (!shared_count) {
|
||||
kfree(shared);
|
||||
shared = NULL;
|
||||
}
|
||||
|
||||
*pshared_count = shared_count;
|
||||
*pshared = shared;
|
||||
*pfence_excl = fence_excl;
|
||||
|
||||
return ret;
|
||||
|
|
Loading…
Reference in New Issue
Block a user