dma-buf: Restart reservation_object_get_fences_rcu() after writes

In order to be completely generic, we have to double check the read
seqlock after acquiring a reference to the fence. If the driver is
allocating fences from a SLAB_DESTROY_BY_RCU, or similar freelist, then
within an RCU grace period a fence may be freed and reallocated. The RCU
read side critical section does not prevent this reallocation, instead
we have to inspect the reservation's seqlock to double check if the
fences have been reassigned as we were acquiring our reference.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20160829070834.22296-7-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-08-29 08:08:30 +01:00 committed by Sumit Semwal
parent 4be0542073
commit fedf54132d

View File

@ -280,18 +280,24 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
unsigned *pshared_count, unsigned *pshared_count,
struct fence ***pshared) struct fence ***pshared)
{ {
unsigned shared_count = 0; struct fence **shared = NULL;
unsigned retry = 1; struct fence *fence_excl;
struct fence **shared = NULL, *fence_excl = NULL; unsigned int shared_count;
int ret = 0; int ret = 1;
while (retry) { do {
struct reservation_object_list *fobj; struct reservation_object_list *fobj;
unsigned seq; unsigned seq;
unsigned int i;
seq = read_seqcount_begin(&obj->seq); shared_count = i = 0;
rcu_read_lock(); rcu_read_lock();
seq = read_seqcount_begin(&obj->seq);
fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl && !fence_get_rcu(fence_excl))
goto unlock;
fobj = rcu_dereference(obj->fence); fobj = rcu_dereference(obj->fence);
if (fobj) { if (fobj) {
@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
} }
ret = -ENOMEM; ret = -ENOMEM;
shared_count = 0;
break; break;
} }
shared = nshared; shared = nshared;
memcpy(shared, fobj->shared, sz);
shared_count = fobj->shared_count; shared_count = fobj->shared_count;
} else
shared_count = 0;
fence_excl = rcu_dereference(obj->fence_excl);
retry = read_seqcount_retry(&obj->seq, seq);
if (retry)
goto unlock;
if (!fence_excl || fence_get_rcu(fence_excl)) {
unsigned i;
for (i = 0; i < shared_count; ++i) { for (i = 0; i < shared_count; ++i) {
if (fence_get_rcu(shared[i])) shared[i] = rcu_dereference(fobj->shared[i]);
continue; if (!fence_get_rcu(shared[i]))
break;
/* uh oh, refcount failed, abort and retry */
while (i--)
fence_put(shared[i]);
if (fence_excl) {
fence_put(fence_excl);
fence_excl = NULL;
}
retry = 1;
break;
} }
} else }
retry = 1;
if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
while (i--)
fence_put(shared[i]);
fence_put(fence_excl);
goto unlock;
}
ret = 0;
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
} } while (ret);
*pshared_count = shared_count;
if (shared_count) if (!shared_count) {
*pshared = shared;
else {
*pshared = NULL;
kfree(shared); kfree(shared);
shared = NULL;
} }
*pshared_count = shared_count;
*pshared = shared;
*pfence_excl = fence_excl; *pfence_excl = fence_excl;
return ret; return ret;