forked from luck/tmp_suning_uos_patched
IB/core: Replace ib_umem's offset field with a full address
In order to allow umems that do not pin memory, we need the umem to keep track of its region's address. This makes the offset field redundant, and so this patch removes it. Signed-off-by: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
parent
968e78dd96
commit
406f9e5fa9
|
@ -103,7 +103,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
|
||||
umem->context = context;
|
||||
umem->length = size;
|
||||
umem->offset = addr & ~PAGE_MASK;
|
||||
umem->address = addr;
|
||||
umem->page_size = PAGE_SIZE;
|
||||
umem->pid = get_task_pid(current, PIDTYPE_PID);
|
||||
/*
|
||||
|
@ -132,7 +132,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
if (!vma_list)
|
||||
umem->hugetlb = 0;
|
||||
|
||||
npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
|
||||
npages = ib_umem_num_pages(umem);
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
|
||||
|
@ -246,7 +246,7 @@ void ib_umem_release(struct ib_umem *umem)
|
|||
if (!mm)
|
||||
goto out;
|
||||
|
||||
diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
|
||||
diff = ib_umem_num_pages(umem);
|
||||
|
||||
/*
|
||||
* We may be called with the mm's mmap_sem already held. This
|
||||
|
|
|
@ -476,7 +476,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
c2mr->umem->page_size,
|
||||
i,
|
||||
length,
|
||||
c2mr->umem->offset,
|
||||
ib_umem_offset(c2mr->umem),
|
||||
&kva,
|
||||
c2_convert_access(acc),
|
||||
c2mr);
|
||||
|
|
|
@ -399,7 +399,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
pginfo.num_kpages = num_kpages;
|
||||
pginfo.num_hwpages = num_hwpages;
|
||||
pginfo.u.usr.region = e_mr->umem;
|
||||
pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
|
||||
pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
|
||||
pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
|
||||
ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
|
||||
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
|
||||
|
|
|
@ -214,7 +214,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
mr->mr.user_base = start;
|
||||
mr->mr.iova = virt_addr;
|
||||
mr->mr.length = length;
|
||||
mr->mr.offset = umem->offset;
|
||||
mr->mr.offset = ib_umem_offset(umem);
|
||||
mr->mr.access_flags = mr_access_flags;
|
||||
mr->mr.max_segs = n;
|
||||
mr->umem = umem;
|
||||
|
|
|
@ -2341,9 +2341,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
|
||||
" offset = %u, page size = %u.\n",
|
||||
(unsigned long int)start, (unsigned long int)virt, (u32)length,
|
||||
region->offset, region->page_size);
|
||||
ib_umem_offset(region), region->page_size);
|
||||
|
||||
skip_pages = ((u32)region->offset) >> 12;
|
||||
skip_pages = ((u32)ib_umem_offset(region)) >> 12;
|
||||
|
||||
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
|
||||
ib_umem_release(region);
|
||||
|
@ -2408,7 +2408,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
region_length -= skip_pages << 12;
|
||||
for (page_index = skip_pages; page_index < chunk_pages; page_index++) {
|
||||
skip_pages = 0;
|
||||
if ((page_count != 0) && (page_count<<12)-(region->offset&(4096-1)) >= region->length)
|
||||
if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->length)
|
||||
goto enough_pages;
|
||||
if ((page_count&0x01FF) == 0) {
|
||||
if (page_count >= 1024 * 512) {
|
||||
|
|
|
@ -805,7 +805,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
|
|||
goto umem_err;
|
||||
|
||||
mr->hwmr.pbe_size = mr->umem->page_size;
|
||||
mr->hwmr.fbo = mr->umem->offset;
|
||||
mr->hwmr.fbo = ib_umem_offset(mr->umem);
|
||||
mr->hwmr.va = usr_addr;
|
||||
mr->hwmr.len = len;
|
||||
mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
|
||||
|
|
|
@ -258,7 +258,7 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
mr->mr.user_base = start;
|
||||
mr->mr.iova = virt_addr;
|
||||
mr->mr.length = length;
|
||||
mr->mr.offset = umem->offset;
|
||||
mr->mr.offset = ib_umem_offset(umem);
|
||||
mr->mr.access_flags = mr_access_flags;
|
||||
mr->umem = umem;
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ struct ib_ucontext;
|
|||
struct ib_umem {
|
||||
struct ib_ucontext *context;
|
||||
size_t length;
|
||||
int offset;
|
||||
unsigned long address;
|
||||
int page_size;
|
||||
int writable;
|
||||
int hugetlb;
|
||||
|
@ -55,6 +55,29 @@ struct ib_umem {
|
|||
int npages;
|
||||
};
|
||||
|
||||
/* Returns the offset of the umem start relative to the first page. */
|
||||
static inline int ib_umem_offset(struct ib_umem *umem)
|
||||
{
|
||||
return umem->address & ((unsigned long)umem->page_size - 1);
|
||||
}
|
||||
|
||||
/* Returns the first page of an ODP umem. */
|
||||
static inline unsigned long ib_umem_start(struct ib_umem *umem)
|
||||
{
|
||||
return umem->address - ib_umem_offset(umem);
|
||||
}
|
||||
|
||||
/* Returns the address of the page after the last one of an ODP umem. */
|
||||
static inline unsigned long ib_umem_end(struct ib_umem *umem)
|
||||
{
|
||||
return PAGE_ALIGN(umem->address + umem->length);
|
||||
}
|
||||
|
||||
static inline size_t ib_umem_num_pages(struct ib_umem *umem)
|
||||
{
|
||||
return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_USER_MEM
|
||||
|
||||
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||
|
|
Loading…
Reference in New Issue
Block a user