forked from luck/tmp_suning_uos_patched
dma-buf: Remove range-based flush
This patch removes range-based information used for optimizations in begin_cpu_access and end_cpu_access. We don't have any user nor implementation using range-based flush. It seems a consensus that if we ever want something like that again (or even more robust using 2D, 3D sub-range regions) we can use the upcoming dma-buf sync ioctl for such. Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Daniel Vetter <daniel.vetter@intel.com> Signed-off-by: Tiago Vignatti <tiago.vignatti@intel.com> Reviewed-by: Stéphane Marchesin <marcheu@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1450820214-12509-3-git-send-email-tiago.vignatti@intel.com
This commit is contained in:
parent
bfe981a095
commit
831e9da7dc
|
@ -257,17 +257,15 @@ Access to a dma_buf from the kernel context involves three steps:
|
|||
|
||||
Interface:
|
||||
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
size_t start, size_t len,
|
||||
enum dma_data_direction direction)
|
||||
|
||||
This allows the exporter to ensure that the memory is actually available for
|
||||
cpu access - the exporter might need to allocate or swap-in and pin the
|
||||
backing storage. The exporter also needs to ensure that cpu access is
|
||||
coherent for the given range and access direction. The range and access
|
||||
direction can be used by the exporter to optimize the cache flushing, i.e.
|
||||
access outside of the range or with a different direction (read instead of
|
||||
write) might return stale or even bogus data (e.g. when the exporter needs to
|
||||
copy the data to temporary storage).
|
||||
coherent for the access direction. The direction can be used by the exporter
|
||||
to optimize the cache flushing, i.e. access with a different direction (read
|
||||
instead of write) might return stale or even bogus data (e.g. when the
|
||||
exporter needs to copy the data to temporary storage).
|
||||
|
||||
This step might fail, e.g. in oom conditions.
|
||||
|
||||
|
@ -322,14 +320,13 @@ Access to a dma_buf from the kernel context involves three steps:
|
|||
|
||||
3. Finish access
|
||||
|
||||
When the importer is done accessing the range specified in begin_cpu_access,
|
||||
it needs to announce this to the exporter (to facilitate cache flushing and
|
||||
unpinning of any pinned resources). The result of any dma_buf kmap calls
|
||||
after end_cpu_access is undefined.
|
||||
When the importer is done accessing the CPU, it needs to announce this to
|
||||
the exporter (to facilitate cache flushing and unpinning of any pinned
|
||||
resources). The result of any dma_buf kmap calls after end_cpu_access is
|
||||
undefined.
|
||||
|
||||
Interface:
|
||||
void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
|
||||
size_t start, size_t len,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
|
||||
|
|
|
@ -539,13 +539,11 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
|
|||
* preparations. Coherency is only guaranteed in the specified range for the
|
||||
* specified access direction.
|
||||
* @dmabuf: [in] buffer to prepare cpu access for.
|
||||
* @start: [in] start of range for cpu access.
|
||||
* @len: [in] length of range for cpu access.
|
||||
* @direction: [in] length of range for cpu access.
|
||||
*
|
||||
* Can return negative error values, returns 0 on success.
|
||||
*/
|
||||
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
|
||||
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -554,8 +552,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
|
|||
return -EINVAL;
|
||||
|
||||
if (dmabuf->ops->begin_cpu_access)
|
||||
ret = dmabuf->ops->begin_cpu_access(dmabuf, start,
|
||||
len, direction);
|
||||
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -567,19 +564,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
|
|||
* actions. Coherency is only guaranteed in the specified range for the
|
||||
* specified access direction.
|
||||
* @dmabuf: [in] buffer to complete cpu access for.
|
||||
* @start: [in] start of range for cpu access.
|
||||
* @len: [in] length of range for cpu access.
|
||||
* @direction: [in] length of range for cpu access.
|
||||
*
|
||||
* This call must always succeed.
|
||||
*/
|
||||
void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
|
||||
void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
WARN_ON(!dmabuf);
|
||||
|
||||
if (dmabuf->ops->end_cpu_access)
|
||||
dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
|
||||
dmabuf->ops->end_cpu_access(dmabuf, direction);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
|
||||
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
|
|
|
@ -79,7 +79,7 @@ static void omap_gem_dmabuf_release(struct dma_buf *buffer)
|
|||
|
||||
|
||||
static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
|
||||
size_t start, size_t len, enum dma_data_direction dir)
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = buffer->priv;
|
||||
struct page **pages;
|
||||
|
@ -94,7 +94,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
|
|||
}
|
||||
|
||||
static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
|
||||
size_t start, size_t len, enum dma_data_direction dir)
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = buffer->priv;
|
||||
omap_gem_put_pages(obj);
|
||||
|
|
|
@ -409,7 +409,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
|
|||
|
||||
if (ufb->obj->base.import_attach) {
|
||||
ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
|
||||
0, ufb->obj->base.size,
|
||||
DMA_FROM_DEVICE);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
@ -425,7 +424,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
|
|||
|
||||
if (ufb->obj->base.import_attach) {
|
||||
dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
|
||||
0, ufb->obj->base.size,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
|
|
|
@ -1057,8 +1057,7 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
|
|||
{
|
||||
}
|
||||
|
||||
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
|
||||
size_t len,
|
||||
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
@ -1076,8 +1075,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
|
|||
return PTR_ERR_OR_ZERO(vaddr);
|
||||
}
|
||||
|
||||
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
|
||||
size_t len,
|
||||
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
|
|
@ -109,7 +109,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
|
|||
if (offset > dma_buf->size || size > dma_buf->size - offset)
|
||||
return -EINVAL;
|
||||
|
||||
ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
|
||||
ret = dma_buf_begin_cpu_access(dma_buf, dir);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -139,7 +139,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
|
|||
copy_offset = 0;
|
||||
}
|
||||
err:
|
||||
dma_buf_end_cpu_access(dma_buf, offset, size, dir);
|
||||
dma_buf_end_cpu_access(dma_buf, dir);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ struct dma_buf_attachment;
|
|||
* @release: release this buffer; to be called after the last dma_buf_put.
|
||||
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
|
||||
* caches and allocate backing storage (if not yet done)
|
||||
* respectively pin the objet into memory.
|
||||
* respectively pin the object into memory.
|
||||
* @end_cpu_access: [optional] called after cpu access to flush caches.
|
||||
* @kmap_atomic: maps a page from the buffer into kernel address
|
||||
* space, users may not block until the subsequent unmap call.
|
||||
|
@ -93,10 +93,8 @@ struct dma_buf_ops {
|
|||
/* after final dma_buf_put() */
|
||||
void (*release)(struct dma_buf *);
|
||||
|
||||
int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
|
||||
enum dma_data_direction);
|
||||
void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
|
||||
enum dma_data_direction);
|
||||
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
||||
void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
||||
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
|
||||
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
|
||||
void *(*kmap)(struct dma_buf *, unsigned long);
|
||||
|
@ -224,9 +222,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
|
|||
enum dma_data_direction);
|
||||
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
|
||||
enum dma_data_direction);
|
||||
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
|
||||
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
|
||||
enum dma_data_direction dir);
|
||||
void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
|
||||
void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
|
||||
enum dma_data_direction dir);
|
||||
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
|
||||
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
|
||||
|
|
Loading…
Reference in New Issue
Block a user