2007-10-22 07:41:48 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2006, Intel Corporation.
|
|
|
|
*
|
|
|
|
* This file is released under the GPLv2.
|
|
|
|
*
|
2008-02-24 07:23:35 +08:00
|
|
|
* Copyright (C) 2006-2008 Intel Corporation
|
|
|
|
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
2007-10-22 07:41:48 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _IOVA_H_
|
|
|
|
#define _IOVA_H_
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
|
|
|
|
/* iova structure */
|
|
|
|
struct iova {
|
|
|
|
struct rb_node node;
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 16:34:11 +08:00
|
|
|
unsigned long pfn_hi; /* Highest allocated pfn */
|
|
|
|
unsigned long pfn_lo; /* Lowest allocated pfn */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct iova_magazine;
|
|
|
|
struct iova_cpu_rcache;
|
|
|
|
|
|
|
|
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
|
|
|
|
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
|
|
|
|
|
|
|
|
struct iova_rcache {
|
|
|
|
spinlock_t lock;
|
|
|
|
unsigned long depot_size;
|
|
|
|
struct iova_magazine *depot[MAX_GLOBAL_MAGS];
|
|
|
|
struct iova_cpu_rcache __percpu *cpu_rcaches;
|
2007-10-22 07:41:48 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* holds all the iova translations for a domain */
|
|
|
|
struct iova_domain {
|
|
|
|
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
|
|
|
|
struct rb_root rbroot; /* iova domain rbtree root */
|
|
|
|
struct rb_node *cached32_node; /* Save last alloced node */
|
2015-01-13 01:51:16 +08:00
|
|
|
unsigned long granule; /* pfn granularity for this domain */
|
2015-01-13 01:51:15 +08:00
|
|
|
unsigned long start_pfn; /* Lower limit for this domain */
|
2008-02-06 17:36:23 +08:00
|
|
|
unsigned long dma_32bit_pfn;
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 16:34:11 +08:00
|
|
|
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
|
2007-10-22 07:41:48 +08:00
|
|
|
};
|
|
|
|
|
2014-07-11 14:19:36 +08:00
|
|
|
static inline unsigned long iova_size(struct iova *iova)
|
|
|
|
{
|
|
|
|
return iova->pfn_hi - iova->pfn_lo + 1;
|
|
|
|
}
|
|
|
|
|
2015-01-13 01:51:16 +08:00
|
|
|
static inline unsigned long iova_shift(struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
return __ffs(iovad->granule);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long iova_mask(struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
return iovad->granule - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
|
|
|
|
{
|
|
|
|
return iova & iova_mask(iovad);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t iova_align(struct iova_domain *iovad, size_t size)
|
|
|
|
{
|
|
|
|
return ALIGN(size, iovad->granule);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
|
|
|
|
{
|
|
|
|
return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
|
|
|
|
{
|
|
|
|
return iova >> iova_shift(iovad);
|
|
|
|
}
|
|
|
|
|
2017-03-21 03:11:28 +08:00
|
|
|
#ifdef CONFIG_IOMMU_IOVA
|
2015-07-13 19:31:28 +08:00
|
|
|
int iova_cache_get(void);
|
|
|
|
void iova_cache_put(void);
|
2015-01-13 01:51:14 +08:00
|
|
|
|
2007-10-22 07:41:48 +08:00
|
|
|
struct iova *alloc_iova_mem(void);
|
|
|
|
void free_iova_mem(struct iova *iova);
|
|
|
|
void free_iova(struct iova_domain *iovad, unsigned long pfn);
|
|
|
|
void __free_iova(struct iova_domain *iovad, struct iova *iova);
|
|
|
|
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
|
2007-10-22 07:41:58 +08:00
|
|
|
unsigned long limit_pfn,
|
|
|
|
bool size_aligned);
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 16:34:11 +08:00
|
|
|
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
|
|
|
|
unsigned long size);
|
|
|
|
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
|
|
|
|
unsigned long limit_pfn);
|
2007-10-22 07:41:48 +08:00
|
|
|
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
|
|
|
unsigned long pfn_hi);
|
|
|
|
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
2015-01-13 01:51:16 +08:00
|
|
|
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
|
|
|
unsigned long start_pfn, unsigned long pfn_32bit);
|
2007-10-22 07:41:48 +08:00
|
|
|
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
|
|
|
void put_iova_domain(struct iova_domain *iovad);
|
2014-02-19 14:07:37 +08:00
|
|
|
struct iova *split_and_remove_iova(struct iova_domain *iovad,
|
|
|
|
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 16:34:11 +08:00
|
|
|
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
|
2017-03-21 03:11:28 +08:00
|
|
|
#else
|
|
|
|
static inline int iova_cache_get(void)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void iova_cache_put(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *alloc_iova_mem(void)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_iova_mem(struct iova *iova)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *alloc_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long size,
|
|
|
|
unsigned long limit_pfn,
|
|
|
|
bool size_aligned)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_iova_fast(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
|
|
|
|
unsigned long size,
|
|
|
|
unsigned long limit_pfn)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *reserve_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn_lo,
|
|
|
|
unsigned long pfn_hi)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void copy_reserved_iova(struct iova_domain *from,
|
|
|
|
struct iova_domain *to)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void init_iova_domain(struct iova_domain *iovad,
|
|
|
|
unsigned long granule,
|
|
|
|
unsigned long start_pfn,
|
|
|
|
unsigned long pfn_32bit)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *find_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void put_iova_domain(struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
|
|
|
|
struct iova *iova,
|
|
|
|
unsigned long pfn_lo,
|
|
|
|
unsigned long pfn_hi)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_cpu_cached_iovas(unsigned int cpu,
|
|
|
|
struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
2007-10-22 07:41:48 +08:00
|
|
|
|
|
|
|
#endif
|