drm/amdgpu: remove process_job callback from the scheduler
Just free the resources immediately after submitting the job. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
258f3f99d5
commit
1886d1a9ca
@@ -35,8 +35,8 @@ static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
|
|||||||
|
|
||||||
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
|
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_fence *fence = NULL;
|
||||||
struct amdgpu_job *sched_job;
|
struct amdgpu_job *sched_job;
|
||||||
struct amdgpu_fence *fence;
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!job) {
|
if (!job) {
|
||||||
@@ -49,41 +49,26 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
|
|||||||
sched_job->num_ibs,
|
sched_job->num_ibs,
|
||||||
sched_job->ibs,
|
sched_job->ibs,
|
||||||
sched_job->base.owner);
|
sched_job->base.owner);
|
||||||
if (r)
|
if (r) {
|
||||||
|
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||||
goto err;
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
|
fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
|
||||||
|
|
||||||
|
err:
|
||||||
if (sched_job->free_job)
|
if (sched_job->free_job)
|
||||||
sched_job->free_job(sched_job);
|
sched_job->free_job(sched_job);
|
||||||
|
|
||||||
mutex_unlock(&sched_job->job_lock);
|
mutex_unlock(&sched_job->job_lock);
|
||||||
return &fence->base;
|
|
||||||
|
|
||||||
err:
|
|
||||||
DRM_ERROR("Run job error\n");
|
|
||||||
mutex_unlock(&sched_job->job_lock);
|
|
||||||
job->sched->ops->process_job(job);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void amdgpu_sched_process_job(struct amd_sched_job *job)
|
|
||||||
{
|
|
||||||
struct amdgpu_job *sched_job;
|
|
||||||
|
|
||||||
if (!job) {
|
|
||||||
DRM_ERROR("job is null\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
sched_job = (struct amdgpu_job *)job;
|
|
||||||
/* after processing job, free memory */
|
|
||||||
fence_put(&sched_job->base.s_fence->base);
|
fence_put(&sched_job->base.s_fence->base);
|
||||||
kfree(sched_job);
|
kfree(sched_job);
|
||||||
|
return fence ? &fence->base : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct amd_sched_backend_ops amdgpu_sched_ops = {
|
struct amd_sched_backend_ops amdgpu_sched_ops = {
|
||||||
.dependency = amdgpu_sched_dependency,
|
.dependency = amdgpu_sched_dependency,
|
||||||
.run_job = amdgpu_sched_run_job,
|
.run_job = amdgpu_sched_run_job,
|
||||||
.process_job = amdgpu_sched_process_job
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
|
int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
|
||||||
|
@@ -354,7 +354,6 @@ static int amd_sched_main(void *param)
|
|||||||
s_fence = job->s_fence;
|
s_fence = job->s_fence;
|
||||||
atomic_inc(&sched->hw_rq_count);
|
atomic_inc(&sched->hw_rq_count);
|
||||||
fence = sched->ops->run_job(job);
|
fence = sched->ops->run_job(job);
|
||||||
sched->ops->process_job(job);
|
|
||||||
if (fence) {
|
if (fence) {
|
||||||
r = fence_add_callback(fence, &s_fence->cb,
|
r = fence_add_callback(fence, &s_fence->cb,
|
||||||
amd_sched_process_job);
|
amd_sched_process_job);
|
||||||
|
@@ -93,7 +93,6 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
|
|||||||
struct amd_sched_backend_ops {
|
struct amd_sched_backend_ops {
|
||||||
struct fence *(*dependency)(struct amd_sched_job *job);
|
struct fence *(*dependency)(struct amd_sched_job *job);
|
||||||
struct fence *(*run_job)(struct amd_sched_job *job);
|
struct fence *(*run_job)(struct amd_sched_job *job);
|
||||||
void (*process_job)(struct amd_sched_job *job);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Reference in New Issue
Block a user