drm/radeon: add error handling to fence_wait_empty_locked
Instead of returning the error handle it directly and while at it fix the comments about the ring lock. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -239,7 +239,7 @@ void radeon_fence_process(struct radeon_device *rdev, int ring);
|
|||||||
bool radeon_fence_signaled(struct radeon_fence *fence);
|
bool radeon_fence_signaled(struct radeon_fence *fence);
|
||||||
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
|
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
|
||||||
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
|
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
|
||||||
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
|
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
|
||||||
int radeon_fence_wait_any(struct radeon_device *rdev,
|
int radeon_fence_wait_any(struct radeon_device *rdev,
|
||||||
struct radeon_fence **fences,
|
struct radeon_fence **fences,
|
||||||
bool intr);
|
bool intr);
|
||||||
|
@@ -440,14 +440,11 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* caller must hold ring lock */
|
||||||
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
||||||
{
|
{
|
||||||
uint64_t seq;
|
uint64_t seq;
|
||||||
|
|
||||||
/* We are not protected by ring lock when reading current seq but
|
|
||||||
* it's ok as worst case is we return to early while we could have
|
|
||||||
* wait.
|
|
||||||
*/
|
|
||||||
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
|
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
|
||||||
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
|
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
|
||||||
/* nothing to wait for, last_seq is
|
/* nothing to wait for, last_seq is
|
||||||
@@ -457,15 +454,27 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
|||||||
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
|
/* caller must hold ring lock */
|
||||||
|
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
|
||||||
{
|
{
|
||||||
/* We are not protected by ring lock when reading current seq
|
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
|
||||||
* but it's ok as wait empty is call from place where no more
|
|
||||||
* activity can be scheduled so there won't be concurrent access
|
while(1) {
|
||||||
* to seq value.
|
int r;
|
||||||
*/
|
r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
||||||
return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring],
|
if (r == -EDEADLK) {
|
||||||
ring, false, false);
|
mutex_unlock(&rdev->ring_lock);
|
||||||
|
r = radeon_gpu_reset(rdev);
|
||||||
|
mutex_lock(&rdev->ring_lock);
|
||||||
|
if (!r)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (r) {
|
||||||
|
dev_err(rdev->dev, "error waiting for ring to become"
|
||||||
|
" idle (%d)\n", r);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
|
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
|
||||||
|
Reference in New Issue
Block a user