drm/virtio: move remaining virtio_gpu_notify calls

Move all remaining virtio_gpu_notify() calls from virtio_gpu_cmd_*
to the callers, for consistency reasons.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Gurchetan Singh <gurchetansingh@chromium.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20200214125535.26349-7-kraxel@redhat.com
This commit is contained in:
Gerd Hoffmann 2020-02-14 13:55:35 +01:00
parent 234489ea55
commit 97452907ec
5 changed files with 9 additions and 9 deletions

View File

@ -123,6 +123,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
objs);
virtio_gpu_notify(vgdev);
return 0;
}
@ -143,6 +144,7 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
objs);
virtio_gpu_notify(vgdev);
}
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)

View File

@ -158,6 +158,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, buflist, out_fence);
virtio_gpu_notify(vgdev);
return 0;
out_unresv:
@ -314,6 +315,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
(vgdev, vfpriv->ctx_id, offset, args->level,
&args->box, objs, fence);
dma_fence_put(&fence->f);
virtio_gpu_notify(vgdev);
return 0;
err_unlock:
@ -446,6 +448,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
/* not in cache - need to talk to hw */
virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
&cache_ent);
virtio_gpu_notify(vgdev);
copy_exit:
ret = wait_event_timeout(vgdev->resp_wq,

View File

@ -61,6 +61,7 @@ static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
return handle;
handle += 1;
virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
virtio_gpu_notify(vgdev);
return handle;
}
@ -68,6 +69,7 @@ static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t ctx_id)
{
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
virtio_gpu_notify(vgdev);
ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
}
@ -93,6 +95,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
}
for (i = 0; i < num_capsets; i++) {
virtio_gpu_cmd_get_capset_info(vgdev, i);
virtio_gpu_notify(vgdev);
ret = wait_event_timeout(vgdev->resp_wq,
vgdev->capsets[i].id > 0, 5 * HZ);
if (ret == 0) {

View File

@ -88,6 +88,7 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
if (bo->created) {
virtio_gpu_cmd_unref_resource(vgdev, bo);
virtio_gpu_notify(vgdev);
/* completion handler calls virtio_gpu_cleanup_object() */
return;
}

View File

@ -544,7 +544,6 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
vbuf->resp_cb_data = bo;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
virtio_gpu_notify(vgdev);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@ -798,7 +797,6 @@ int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
cmd_p->capset_index = cpu_to_le32(idx);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
virtio_gpu_notify(vgdev);
return 0;
}
@ -874,7 +872,6 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
cmd_p->capset_version = cpu_to_le32(version);
*cache_p = cache_ent;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
virtio_gpu_notify(vgdev);
return 0;
}
@ -922,7 +919,6 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
virtio_gpu_notify(vgdev);
}
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
@ -937,7 +933,6 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
virtio_gpu_notify(vgdev);
}
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
@ -956,7 +951,6 @@ void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
virtio_gpu_notify(vgdev);
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
@ -975,7 +969,6 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
virtio_gpu_notify(vgdev);
}
void
@ -1067,7 +1060,6 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->level = cpu_to_le32(level);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
virtio_gpu_notify(vgdev);
}
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
@ -1091,7 +1083,6 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
cmd_p->size = cpu_to_le32(data_size);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
virtio_gpu_notify(vgdev);
}
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,