forked from luck/tmp_suning_uos_patched
6e58e79db8
generic_file_aio_read() was looping over the target iovec, with loop over (source) pages nested inside that. Just set an iov_iter up and pass *that* to do_generic_file_aio_read(). With copy_page_to_iter() doing all work of mapping and copying a page to iovec and advancing iov_iter. Switch shmem_file_aio_read() to the same and kill file_read_actor(), while we are at it. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
94 lines
2.5 KiB
C
94 lines
2.5 KiB
C
/*
|
|
* Berkeley style UIO structures - Alan Cox 1994.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
#ifndef __LINUX_UIO_H
|
|
#define __LINUX_UIO_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <uapi/linux/uio.h>
|
|
|
|
struct page;
|
|
|
|
struct kvec {
|
|
void *iov_base; /* and that should *never* hold a userland pointer */
|
|
size_t iov_len;
|
|
};
|
|
|
|
struct iov_iter {
|
|
const struct iovec *iov;
|
|
unsigned long nr_segs;
|
|
size_t iov_offset;
|
|
size_t count;
|
|
};
|
|
|
|
/*
|
|
* Total number of bytes covered by an iovec.
|
|
*
|
|
* NOTE that it is not safe to use this function until all the iovec's
|
|
* segment lengths have been validated. Because the individual lengths can
|
|
* overflow a size_t when added together.
|
|
*/
|
|
static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
|
|
{
|
|
unsigned long seg;
|
|
size_t ret = 0;
|
|
|
|
for (seg = 0; seg < nr_segs; seg++)
|
|
ret += iov[seg].iov_len;
|
|
return ret;
|
|
}
|
|
|
|
static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
|
|
{
|
|
return (struct iovec) {
|
|
.iov_base = iter->iov->iov_base + iter->iov_offset,
|
|
.iov_len = min(iter->count,
|
|
iter->iov->iov_len - iter->iov_offset),
|
|
};
|
|
}
|
|
|
|
#define iov_for_each(iov, iter, start) \
|
|
for (iter = (start); \
|
|
(iter).count && \
|
|
((iov = iov_iter_iovec(&(iter))), 1); \
|
|
iov_iter_advance(&(iter), (iov).iov_len))
|
|
|
|
unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
|
|
|
|
size_t iov_iter_copy_from_user_atomic(struct page *page,
|
|
struct iov_iter *i, unsigned long offset, size_t bytes);
|
|
size_t iov_iter_copy_from_user(struct page *page,
|
|
struct iov_iter *i, unsigned long offset, size_t bytes);
|
|
void iov_iter_advance(struct iov_iter *i, size_t bytes);
|
|
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
|
|
size_t iov_iter_single_seg_count(const struct iov_iter *i);
|
|
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
|
struct iov_iter *i);
|
|
|
|
static inline void iov_iter_init(struct iov_iter *i,
|
|
const struct iovec *iov, unsigned long nr_segs,
|
|
size_t count, size_t written)
|
|
{
|
|
i->iov = iov;
|
|
i->nr_segs = nr_segs;
|
|
i->iov_offset = 0;
|
|
i->count = count + written;
|
|
|
|
iov_iter_advance(i, written);
|
|
}
|
|
|
|
static inline size_t iov_iter_count(struct iov_iter *i)
|
|
{
|
|
return i->count;
|
|
}
|
|
|
|
int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
|
|
int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
|
|
|
|
#endif
|