forked from luck/tmp_suning_uos_patched
file_inode(f)->i_mapping is f->f_mapping
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
affda48410
commit
93c76a3d43
|
@ -231,7 +231,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
|
|||
|
||||
obj->dev_addr = DMA_ERROR_CODE;
|
||||
|
||||
mapping = file_inode(obj->obj.filp)->i_mapping;
|
||||
mapping = obj->obj.filp->f_mapping;
|
||||
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
|
||||
|
||||
DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
|
||||
|
@ -441,7 +441,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
|||
if (sg_alloc_table(sgt, count, GFP_KERNEL))
|
||||
goto free_sgt;
|
||||
|
||||
mapping = file_inode(dobj->obj.filp)->i_mapping;
|
||||
mapping = dobj->obj.filp->f_mapping;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, count, i) {
|
||||
struct page *page;
|
||||
|
|
|
@ -511,7 +511,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
|
|||
int i, npages;
|
||||
|
||||
/* This is the shared memory object that backs the GEM resource */
|
||||
mapping = file_inode(obj->filp)->i_mapping;
|
||||
mapping = obj->filp->f_mapping;
|
||||
|
||||
/* We already BUG_ON() for non-page-aligned sizes in
|
||||
* drm_gem_object_init(), so we should never hit this unless
|
||||
|
|
|
@ -660,7 +660,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
|
|||
* why this is required _and_ expected if you're
|
||||
* going to pin these pages.
|
||||
*/
|
||||
mapping = file_inode(obj->filp)->i_mapping;
|
||||
mapping = obj->filp->f_mapping;
|
||||
mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
|
||||
}
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
|||
static int
|
||||
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
char *vaddr = obj->phys_handle->vaddr;
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
|
@ -218,7 +218,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
|
|||
obj->dirty = 0;
|
||||
|
||||
if (obj->dirty) {
|
||||
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
char *vaddr = obj->phys_handle->vaddr;
|
||||
int i;
|
||||
|
||||
|
@ -2155,7 +2155,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
|
|||
if (obj->base.filp == NULL)
|
||||
return;
|
||||
|
||||
mapping = file_inode(obj->base.filp)->i_mapping,
|
||||
mapping = obj->base.filp->f_mapping,
|
||||
invalidate_mapping_pages(mapping, 0, (loff_t)-1);
|
||||
}
|
||||
|
||||
|
@ -2271,7 +2271,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
*
|
||||
* Fail silently without starting the shrinker
|
||||
*/
|
||||
mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
mapping = obj->base.filp->f_mapping;
|
||||
gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
sg = st->sgl;
|
||||
|
@ -4522,7 +4522,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
|||
mask |= __GFP_DMA32;
|
||||
}
|
||||
|
||||
mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
mapping = obj->base.filp->f_mapping;
|
||||
mapping_set_gfp_mask(mapping, mask);
|
||||
|
||||
i915_gem_object_init(obj, &i915_gem_object_ops);
|
||||
|
|
|
@ -1406,7 +1406,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
mapping = file_inode(obj->filp)->i_mapping;
|
||||
mapping = obj->filp->f_mapping;
|
||||
mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
|
||||
}
|
||||
|
||||
|
|
|
@ -298,7 +298,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
|
|||
swap_storage = ttm->swap_storage;
|
||||
BUG_ON(swap_storage == NULL);
|
||||
|
||||
swap_space = file_inode(swap_storage)->i_mapping;
|
||||
swap_space = swap_storage->f_mapping;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = shmem_read_mapping_page(swap_space, i);
|
||||
|
@ -347,7 +347,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
|
|||
} else
|
||||
swap_storage = persistent_swap_storage;
|
||||
|
||||
swap_space = file_inode(swap_storage)->i_mapping;
|
||||
swap_space = swap_storage->f_mapping;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = ttm->pages[i];
|
||||
|
|
|
@ -128,7 +128,7 @@ static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
|||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct inode *inode = file_inode(file)->i_mapping->host;
|
||||
struct inode *inode = mapping->host;
|
||||
size_t count = iov_iter_count(iter);
|
||||
ssize_t ret;
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
|||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct inode *inode = file_inode(file)->i_mapping->host;
|
||||
struct inode *inode = mapping->host;
|
||||
size_t count = iov_iter_count(iter);
|
||||
ssize_t ret;
|
||||
|
||||
|
|
|
@ -729,7 +729,7 @@ struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
|
|||
struct page *page;
|
||||
|
||||
for (;;) {
|
||||
page = read_cache_page(file_inode(desc->file)->i_mapping,
|
||||
page = read_cache_page(desc->file->f_mapping,
|
||||
desc->page_index, (filler_t *)nfs_readdir_filler, desc);
|
||||
if (IS_ERR(page) || grab_page(page))
|
||||
break;
|
||||
|
|
|
@ -2426,7 +2426,7 @@ static int ocfs2_dio_end_io(struct kiocb *iocb,
|
|||
static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(file)->i_mapping->host;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||
get_block_t *get_block;
|
||||
|
||||
|
|
|
@ -3287,7 +3287,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
address = address & huge_page_mask(h);
|
||||
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
|
||||
vma->vm_pgoff;
|
||||
mapping = file_inode(vma->vm_file)->i_mapping;
|
||||
mapping = vma->vm_file->f_mapping;
|
||||
|
||||
/*
|
||||
* Take the mapping lock for the duration of the table walk. As
|
||||
|
|
Loading…
Reference in New Issue
Block a user