diff options
author | Dave Airlie <airlied@redhat.com> | 2017-06-16 09:54:02 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-06-16 09:56:53 +1000 |
commit | 04d4fb5fa63876d8e7cf67f2788aecfafc6a28a7 (patch) | |
tree | 92aec67d7b5a1359baff1a508d381234f046743e /drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |
parent | bfda9aa15317838ddb259406027ef9911a1dffbc (diff) | |
parent | a1924005a2e9bfcc4e217b4acd0a4f2421969040 (diff) | |
download | linux-04d4fb5fa63876d8e7cf67f2788aecfafc6a28a7.tar.gz linux-04d4fb5fa63876d8e7cf67f2788aecfafc6a28a7.tar.xz |
Merge branch 'drm-next-4.13' of git://people.freedesktop.org/~agd5f/linux into drm-next
New radeon and amdgpu features for 4.13:
- Lots of Vega10 bug fixes
- Preliminary Raven support
- KIQ support for compute rings
- MEC queue management rework from Andres
- Audio support for DCE6
- SR-IOV improvements
- Improved module parameters for controlling radeon vs amdgpu support
for SI and CIK
- Bug fixes
- General code cleanups
[airlied: dropped drmP.h header from one file was needed and build broke]
* 'drm-next-4.13' of git://people.freedesktop.org/~agd5f/linux: (362 commits)
drm/amdgpu: Fix compiler warnings
drm/amdgpu: vm_update_ptes remove code duplication
drm/amd/amdgpu: Port VCN over to new SOC15 macros
drm/amd/amdgpu: Port PSP v10.0 over to new SOC15 macros
drm/amd/amdgpu: Port PSP v3.1 over to new SOC15 macros
drm/amd/amdgpu: Port NBIO v7.0 driver over to new SOC15 macros
drm/amd/amdgpu: Port NBIO v6.1 driver over to new SOC15 macros
drm/amd/amdgpu: Port UVD 7.0 over to new SOC15 macros
drm/amd/amdgpu: Port MMHUB over to new SOC15 macros
drm/amd/amdgpu: Cleanup gfxhub read-modify-write patterns
drm/amd/amdgpu: Port GFXHUB over to new SOC15 macros
drm/amd/amdgpu: Add offset variant to SOC15 macros
drm/amd/powerplay: add avfs control for Vega10
drm/amdgpu: add virtual display support for raven
drm/amdgpu/gfx9: fix compute ring doorbell index
drm/amd/amdgpu: Rename KIQ ring to avoid spaces
drm/amd/amdgpu: gfx9 tidy ups (v2)
drm/amdgpu: add contiguous flag in ucode bo create
drm/amdgpu: fix missed gpu info firmware when cache firmware during S3
drm/amdgpu: export test ib debugfs interface
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 42 |
1 files changed, 31 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 7570f2439a11..3d641e10e6b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -36,7 +36,11 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job) job->base.sched->name, atomic_read(&job->ring->fence_drv.last_seq), job->ring->fence_drv.sync_seq); - amdgpu_gpu_reset(job->adev); + + if (amdgpu_sriov_vf(job->adev)) + amdgpu_sriov_gpu_reset(job->adev, job); + else + amdgpu_gpu_reset(job->adev); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, @@ -57,9 +61,10 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->vm = vm; (*job)->ibs = (void *)&(*job)[1]; (*job)->num_ibs = num_ibs; - (*job)->need_pipeline_sync = false; amdgpu_sync_create(&(*job)->sync); + amdgpu_sync_create(&(*job)->dep_sync); + amdgpu_sync_create(&(*job)->sched_sync); return 0; } @@ -98,6 +103,8 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job) dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); + amdgpu_sync_free(&job->dep_sync); + amdgpu_sync_free(&job->sched_sync); kfree(job); } @@ -107,6 +114,8 @@ void amdgpu_job_free(struct amdgpu_job *job) dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); + amdgpu_sync_free(&job->dep_sync); + amdgpu_sync_free(&job->sched_sync); kfree(job); } @@ -138,11 +147,18 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; - struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync); + struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync); + int r; + if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) { + r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence); + if (r) + DRM_ERROR("Error adding fence to sync (%d)\n", r); + } + if (!fence) + fence = amdgpu_sync_get_fence(&job->sync); while (fence == NULL && vm && !job->vm_id) { struct amdgpu_ring *ring = job->ring; - int r; r = amdgpu_vm_grab_id(vm, ring, &job->sync, &job->base.s_fence->finished, @@ -153,9 +169,6 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) fence = amdgpu_sync_get_fence(&job->sync); } - if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) - job->need_pipeline_sync = true; - return fence; } @@ -163,6 +176,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) { struct dma_fence *fence = NULL; struct amdgpu_job *job; + struct amdgpu_fpriv *fpriv = NULL; int r; if (!sched_job) { @@ -174,10 +188,16 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); trace_amdgpu_sched_run_job(job); - r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence); - if (r) - DRM_ERROR("Error scheduling IBs (%d)\n", r); - + if (job->vm) + fpriv = container_of(job->vm, struct amdgpu_fpriv, vm); + /* skip ib schedule when vram is lost */ + if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv)) + DRM_ERROR("Skip scheduling IBs!\n"); + else { + r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence); + if (r) + DRM_ERROR("Error scheduling IBs (%d)\n", r); + } /* if gpu reset, hw fence will be replaced here */ dma_fence_put(job->fence); job->fence = dma_fence_get(fence); |