summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>2017-10-10 16:50:17 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-10-19 15:26:59 -0400
commit0ae94444c08a0adf2fab4aab26be0646ee445a19 (patch)
treed5bc47553183dfe5ec29a23fe3e7d78f1906fc2d
parentad864d243826cedc53404a1c0db7d1e38ddceb84 (diff)
downloadlinux-0ae94444c08a0adf2fab4aab26be0646ee445a19.tar.gz
linux-0ae94444c08a0adf2fab4aab26be0646ee445a19.tar.xz
drm/amdgpu: Move old fence waiting before reservation lock is aquired v2
Helps avoiding deadlock during GPU reset. Added mutex to amdgpu_ctx to preserve order of fences on a ring. v2: Put waiting logic in a function in a seperate function in amdgpu_ctx.c Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c30
3 files changed, 34 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 951c8db01412..76033e2cdba8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -738,6 +738,7 @@ struct amdgpu_ctx {
bool preamble_presented;
enum amd_sched_priority init_priority;
enum amd_sched_priority override_priority;
+ struct mutex lock;
};
struct amdgpu_ctx_mgr {
@@ -760,9 +761,12 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
+
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+
/*
* file private structure
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9166d5e1e557..5de092eab0fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -90,6 +90,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
@@ -737,8 +739,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence);
- if (parser->ctx)
+ if (parser->ctx) {
+ mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
+ }
if (parser->bo_list)
amdgpu_bo_list_put(parser->bo_list);
@@ -895,9 +899,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
r = amdgpu_ring_parse_cs(ring, p, j);
if (r)
return r;
-
}
-
j++;
}
@@ -985,7 +987,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
- return 0;
+ return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
}
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a78b03f65c69..4309820658c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -67,6 +67,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (!ctx->fences)
return -ENOMEM;
+ mutex_init(&ctx->lock);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
@@ -126,6 +128,8 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
&ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
+
+ mutex_destroy(&ctx->lock);
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
@@ -296,12 +300,8 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
- if (other) {
- signed long r;
- r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
- }
+ if (other)
+ BUG_ON(!dma_fence_is_signaled(other));
dma_fence_get(fence);
@@ -372,6 +372,24 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
}
}
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
+{
+ struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
+ unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
+ struct dma_fence *other = cring->fences[idx];
+
+ if (other) {
+ signed long r;
+ r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0) {
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{
mutex_init(&mgr->lock);