Skip to content

Commit

Permalink
drm/scheduler: remove full_recover from drm_sched_start
Browse files Browse the repository at this point in the history
This was basically just another one of amdgpus hacks. The parameter
allowed to restart the scheduler without turning fence signaling on
again.

That this is absolutely not a good idea should be obvious by now since
the fences will then just sit there and never signal.

While at it cleanup the code a bit.

Signed-off-by: Christian König <[email protected]>
Reviewed-by: Matthew Brost <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
  • Loading branch information
ChristianKoenigAMD authored and boogieeeee committed Dec 21, 2024
1 parent e35577d commit d4c1137
Show file tree
Hide file tree
Showing 8 changed files with 16 additions and 25 deletions.
4 changes: 2 additions & 2 deletions drivers/gpu/drm/etnaviv/etnaviv_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,12 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job

drm_sched_resubmit_jobs(&gpu->sched);

drm_sched_start(&gpu->sched, true);
drm_sched_start(&gpu->sched);
return DRM_GPU_SCHED_STAT_NOMINAL;

out_no_timeout:
/* restart scheduler after GPU is usable again */
drm_sched_start(&gpu->sched, true);
drm_sched_start(&gpu->sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/lima/lima_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
lima_pm_idle(ldev);

drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, true);
drm_sched_start(&pipe->base);

return DRM_GPU_SCHED_STAT_NOMINAL;
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/panfrost/panfrost_job.c
Original file line number Diff line number Diff line change
Expand Up @@ -696,7 +696,7 @@ panfrost_reset(struct panfrost_device *pfdev,

/* Restart the schedulers */
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_start(&pfdev->js->queue[i].sched, true);
drm_sched_start(&pfdev->js->queue[i].sched);

/* Re-enable job interrupts now that everything has been restarted. */
job_write(pfdev, JOB_INT_MASK,
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/panthor/panthor_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -826,7 +826,7 @@ static void panthor_vm_stop(struct panthor_vm *vm)

static void panthor_vm_start(struct panthor_vm *vm)
{
drm_sched_start(&vm->sched, true);
drm_sched_start(&vm->sched);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/panthor/panthor_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2538,7 +2538,7 @@ static void queue_start(struct panthor_queue *queue)
list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
job->base.s_fence->parent = dma_fence_get(job->done_fence);

drm_sched_start(&queue->scheduler, true);
drm_sched_start(&queue->scheduler);
}

static void panthor_group_stop(struct panthor_group *group)
Expand Down
25 changes: 8 additions & 17 deletions drivers/gpu/drm/scheduler/sched_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -674,13 +674,11 @@ EXPORT_SYMBOL(drm_sched_stop);
* drm_sched_start - recover jobs after a reset
*
* @sched: scheduler instance
* @full_recovery: proceed with complete sched restart
*
*/
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
void drm_sched_start(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
int r;

/*
* Locking the list is not required here as the sched thread is parked
Expand All @@ -692,24 +690,17 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)

atomic_add(s_job->credits, &sched->credit_count);

if (!full_recovery)
if (!fence) {
drm_sched_job_done(s_job, -ECANCELED);
continue;
}

if (fence) {
r = dma_fence_add_callback(fence, &s_job->cb,
drm_sched_job_done_cb);
if (r == -ENOENT)
drm_sched_job_done(s_job, fence->error);
else if (r)
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
r);
} else
drm_sched_job_done(s_job, -ECANCELED);
if (dma_fence_add_callback(fence, &s_job->cb,
drm_sched_job_done_cb))
drm_sched_job_done(s_job, fence->error);
}

if (full_recovery)
drm_sched_start_timeout_unlocked(sched);

drm_sched_start_timeout_unlocked(sched);
drm_sched_wqueue_start(sched);
}
EXPORT_SYMBOL(drm_sched_start);
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/v3d/v3d_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)

/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {
drm_sched_start(&v3d->queue[q].sched, true);
drm_sched_start(&v3d->queue[q].sched);
}

mutex_unlock(&v3d->reset_lock);
Expand Down
2 changes: 1 addition & 1 deletion include/drm/gpu_scheduler.h
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,7 @@ bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_start(struct drm_gpu_scheduler *sched);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
Expand Down

0 comments on commit d4c1137

Please sign in to comment.