forked from luck/tmp_suning_uos_patched
Merge branch 'iommu/guest-msi' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/core
This commit is contained in:
commit
93fa6cf60a
|
@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub-
|
||||||
file if the IOMMU driver has chosen to register a more
|
file if the IOMMU driver has chosen to register a more
|
||||||
common name for the group.
|
common name for the group.
|
||||||
Users:
|
Users:
|
||||||
|
|
||||||
|
What: /sys/kernel/iommu_groups/reserved_regions
|
||||||
|
Date: January 2017
|
||||||
|
KernelVersion: v4.11
|
||||||
|
Contact: Eric Auger <eric.auger@redhat.com>
|
||||||
|
Description: /sys/kernel/iommu_groups/reserved_regions list IOVA
|
||||||
|
regions that are reserved. Not necessarily all
|
||||||
|
reserved regions are listed. This is typically used to
|
||||||
|
output direct-mapped, MSI, non mappable regions. Each
|
||||||
|
region is described on a single line: the 1st field is
|
||||||
|
the base IOVA, the second is the end IOVA and the third
|
||||||
|
field describes the type of the region.
|
||||||
|
|
|
@ -3161,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_iommu_get_dm_regions(struct device *dev,
|
static void amd_iommu_get_resv_regions(struct device *dev,
|
||||||
struct list_head *head)
|
struct list_head *head)
|
||||||
{
|
{
|
||||||
|
struct iommu_resv_region *region;
|
||||||
struct unity_map_entry *entry;
|
struct unity_map_entry *entry;
|
||||||
int devid;
|
int devid;
|
||||||
|
|
||||||
|
@ -3172,41 +3173,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
|
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
|
||||||
struct iommu_dm_region *region;
|
size_t length;
|
||||||
|
int prot = 0;
|
||||||
|
|
||||||
if (devid < entry->devid_start || devid > entry->devid_end)
|
if (devid < entry->devid_start || devid > entry->devid_end)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
region = kzalloc(sizeof(*region), GFP_KERNEL);
|
length = entry->address_end - entry->address_start;
|
||||||
|
if (entry->prot & IOMMU_PROT_IR)
|
||||||
|
prot |= IOMMU_READ;
|
||||||
|
if (entry->prot & IOMMU_PROT_IW)
|
||||||
|
prot |= IOMMU_WRITE;
|
||||||
|
|
||||||
|
region = iommu_alloc_resv_region(entry->address_start,
|
||||||
|
length, prot,
|
||||||
|
IOMMU_RESV_DIRECT);
|
||||||
if (!region) {
|
if (!region) {
|
||||||
pr_err("Out of memory allocating dm-regions for %s\n",
|
pr_err("Out of memory allocating dm-regions for %s\n",
|
||||||
dev_name(dev));
|
dev_name(dev));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
region->start = entry->address_start;
|
|
||||||
region->length = entry->address_end - entry->address_start;
|
|
||||||
if (entry->prot & IOMMU_PROT_IR)
|
|
||||||
region->prot |= IOMMU_READ;
|
|
||||||
if (entry->prot & IOMMU_PROT_IW)
|
|
||||||
region->prot |= IOMMU_WRITE;
|
|
||||||
|
|
||||||
list_add_tail(®ion->list, head);
|
list_add_tail(®ion->list, head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
region = iommu_alloc_resv_region(MSI_RANGE_START,
|
||||||
|
MSI_RANGE_END - MSI_RANGE_START + 1,
|
||||||
|
0, IOMMU_RESV_RESERVED);
|
||||||
|
if (!region)
|
||||||
|
return;
|
||||||
|
list_add_tail(®ion->list, head);
|
||||||
|
|
||||||
|
region = iommu_alloc_resv_region(HT_RANGE_START,
|
||||||
|
HT_RANGE_END - HT_RANGE_START + 1,
|
||||||
|
0, IOMMU_RESV_RESERVED);
|
||||||
|
if (!region)
|
||||||
|
return;
|
||||||
|
list_add_tail(®ion->list, head);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_iommu_put_dm_regions(struct device *dev,
|
static void amd_iommu_put_resv_regions(struct device *dev,
|
||||||
struct list_head *head)
|
struct list_head *head)
|
||||||
{
|
{
|
||||||
struct iommu_dm_region *entry, *next;
|
struct iommu_resv_region *entry, *next;
|
||||||
|
|
||||||
list_for_each_entry_safe(entry, next, head, list)
|
list_for_each_entry_safe(entry, next, head, list)
|
||||||
kfree(entry);
|
kfree(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_iommu_apply_dm_region(struct device *dev,
|
static void amd_iommu_apply_resv_region(struct device *dev,
|
||||||
struct iommu_domain *domain,
|
struct iommu_domain *domain,
|
||||||
struct iommu_dm_region *region)
|
struct iommu_resv_region *region)
|
||||||
{
|
{
|
||||||
struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
|
struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
|
||||||
unsigned long start, end;
|
unsigned long start, end;
|
||||||
|
@ -3230,9 +3246,9 @@ static const struct iommu_ops amd_iommu_ops = {
|
||||||
.add_device = amd_iommu_add_device,
|
.add_device = amd_iommu_add_device,
|
||||||
.remove_device = amd_iommu_remove_device,
|
.remove_device = amd_iommu_remove_device,
|
||||||
.device_group = amd_iommu_device_group,
|
.device_group = amd_iommu_device_group,
|
||||||
.get_dm_regions = amd_iommu_get_dm_regions,
|
.get_resv_regions = amd_iommu_get_resv_regions,
|
||||||
.put_dm_regions = amd_iommu_put_dm_regions,
|
.put_resv_regions = amd_iommu_put_resv_regions,
|
||||||
.apply_dm_region = amd_iommu_apply_dm_region,
|
.apply_resv_region = amd_iommu_apply_resv_region,
|
||||||
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
|
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -412,6 +412,9 @@
|
||||||
/* High-level queue structures */
|
/* High-level queue structures */
|
||||||
#define ARM_SMMU_POLL_TIMEOUT_US 100
|
#define ARM_SMMU_POLL_TIMEOUT_US 100
|
||||||
|
|
||||||
|
#define MSI_IOVA_BASE 0x8000000
|
||||||
|
#define MSI_IOVA_LENGTH 0x100000
|
||||||
|
|
||||||
static bool disable_bypass;
|
static bool disable_bypass;
|
||||||
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
|
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
|
||||||
MODULE_PARM_DESC(disable_bypass,
|
MODULE_PARM_DESC(disable_bypass,
|
||||||
|
@ -1372,8 +1375,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
|
||||||
switch (cap) {
|
switch (cap) {
|
||||||
case IOMMU_CAP_CACHE_COHERENCY:
|
case IOMMU_CAP_CACHE_COHERENCY:
|
||||||
return true;
|
return true;
|
||||||
case IOMMU_CAP_INTR_REMAP:
|
|
||||||
return true; /* MSIs are just memory writes */
|
|
||||||
case IOMMU_CAP_NOEXEC:
|
case IOMMU_CAP_NOEXEC:
|
||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
|
@ -1883,6 +1884,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||||
return iommu_fwspec_add_ids(dev, args->args, 1);
|
return iommu_fwspec_add_ids(dev, args->args, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_get_resv_regions(struct device *dev,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
struct iommu_resv_region *region;
|
||||||
|
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
||||||
|
|
||||||
|
region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
|
||||||
|
prot, IOMMU_RESV_MSI);
|
||||||
|
if (!region)
|
||||||
|
return;
|
||||||
|
|
||||||
|
list_add_tail(®ion->list, head);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_put_resv_regions(struct device *dev,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
struct iommu_resv_region *entry, *next;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(entry, next, head, list)
|
||||||
|
kfree(entry);
|
||||||
|
}
|
||||||
|
|
||||||
static struct iommu_ops arm_smmu_ops = {
|
static struct iommu_ops arm_smmu_ops = {
|
||||||
.capable = arm_smmu_capable,
|
.capable = arm_smmu_capable,
|
||||||
.domain_alloc = arm_smmu_domain_alloc,
|
.domain_alloc = arm_smmu_domain_alloc,
|
||||||
|
@ -1898,6 +1922,8 @@ static struct iommu_ops arm_smmu_ops = {
|
||||||
.domain_get_attr = arm_smmu_domain_get_attr,
|
.domain_get_attr = arm_smmu_domain_get_attr,
|
||||||
.domain_set_attr = arm_smmu_domain_set_attr,
|
.domain_set_attr = arm_smmu_domain_set_attr,
|
||||||
.of_xlate = arm_smmu_of_xlate,
|
.of_xlate = arm_smmu_of_xlate,
|
||||||
|
.get_resv_regions = arm_smmu_get_resv_regions,
|
||||||
|
.put_resv_regions = arm_smmu_put_resv_regions,
|
||||||
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -281,6 +281,9 @@ enum arm_smmu_s2cr_privcfg {
|
||||||
|
|
||||||
#define FSYNR0_WNR (1 << 4)
|
#define FSYNR0_WNR (1 << 4)
|
||||||
|
|
||||||
|
#define MSI_IOVA_BASE 0x8000000
|
||||||
|
#define MSI_IOVA_LENGTH 0x100000
|
||||||
|
|
||||||
static int force_stage;
|
static int force_stage;
|
||||||
module_param(force_stage, int, S_IRUGO);
|
module_param(force_stage, int, S_IRUGO);
|
||||||
MODULE_PARM_DESC(force_stage,
|
MODULE_PARM_DESC(force_stage,
|
||||||
|
@ -1371,8 +1374,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
|
||||||
* requests.
|
* requests.
|
||||||
*/
|
*/
|
||||||
return true;
|
return true;
|
||||||
case IOMMU_CAP_INTR_REMAP:
|
|
||||||
return true; /* MSIs are just memory writes */
|
|
||||||
case IOMMU_CAP_NOEXEC:
|
case IOMMU_CAP_NOEXEC:
|
||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
|
@ -1549,6 +1550,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||||
return iommu_fwspec_add_ids(dev, &fwid, 1);
|
return iommu_fwspec_add_ids(dev, &fwid, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_get_resv_regions(struct device *dev,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
struct iommu_resv_region *region;
|
||||||
|
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
||||||
|
|
||||||
|
region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
|
||||||
|
prot, IOMMU_RESV_MSI);
|
||||||
|
if (!region)
|
||||||
|
return;
|
||||||
|
|
||||||
|
list_add_tail(®ion->list, head);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_put_resv_regions(struct device *dev,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
struct iommu_resv_region *entry, *next;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(entry, next, head, list)
|
||||||
|
kfree(entry);
|
||||||
|
}
|
||||||
|
|
||||||
static struct iommu_ops arm_smmu_ops = {
|
static struct iommu_ops arm_smmu_ops = {
|
||||||
.capable = arm_smmu_capable,
|
.capable = arm_smmu_capable,
|
||||||
.domain_alloc = arm_smmu_domain_alloc,
|
.domain_alloc = arm_smmu_domain_alloc,
|
||||||
|
@ -1564,6 +1588,8 @@ static struct iommu_ops arm_smmu_ops = {
|
||||||
.domain_get_attr = arm_smmu_domain_get_attr,
|
.domain_get_attr = arm_smmu_domain_get_attr,
|
||||||
.domain_set_attr = arm_smmu_domain_set_attr,
|
.domain_set_attr = arm_smmu_domain_set_attr,
|
||||||
.of_xlate = arm_smmu_of_xlate,
|
.of_xlate = arm_smmu_of_xlate,
|
||||||
|
.get_resv_regions = arm_smmu_get_resv_regions,
|
||||||
|
.put_resv_regions = arm_smmu_put_resv_regions,
|
||||||
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
|
||||||
phys_addr_t phys;
|
phys_addr_t phys;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct iommu_dma_cookie {
|
enum iommu_dma_cookie_type {
|
||||||
struct iova_domain iovad;
|
IOMMU_DMA_IOVA_COOKIE,
|
||||||
struct list_head msi_page_list;
|
IOMMU_DMA_MSI_COOKIE,
|
||||||
spinlock_t msi_lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct iommu_dma_cookie {
|
||||||
|
enum iommu_dma_cookie_type type;
|
||||||
|
union {
|
||||||
|
/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
|
||||||
|
struct iova_domain iovad;
|
||||||
|
/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
|
||||||
|
dma_addr_t msi_iova;
|
||||||
|
};
|
||||||
|
struct list_head msi_page_list;
|
||||||
|
spinlock_t msi_lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
|
||||||
|
{
|
||||||
|
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
|
||||||
|
return cookie->iovad.granule;
|
||||||
|
return PAGE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
|
static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
|
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||||
|
|
||||||
|
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
|
||||||
|
return &cookie->iovad;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
|
||||||
|
{
|
||||||
|
struct iommu_dma_cookie *cookie;
|
||||||
|
|
||||||
|
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
|
||||||
|
if (cookie) {
|
||||||
|
spin_lock_init(&cookie->msi_lock);
|
||||||
|
INIT_LIST_HEAD(&cookie->msi_page_list);
|
||||||
|
cookie->type = type;
|
||||||
|
}
|
||||||
|
return cookie;
|
||||||
}
|
}
|
||||||
|
|
||||||
int iommu_dma_init(void)
|
int iommu_dma_init(void)
|
||||||
|
@ -62,25 +97,53 @@ int iommu_dma_init(void)
|
||||||
*/
|
*/
|
||||||
int iommu_get_dma_cookie(struct iommu_domain *domain)
|
int iommu_get_dma_cookie(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct iommu_dma_cookie *cookie;
|
|
||||||
|
|
||||||
if (domain->iova_cookie)
|
if (domain->iova_cookie)
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
|
|
||||||
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
|
domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
|
||||||
if (!cookie)
|
if (!domain->iova_cookie)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
spin_lock_init(&cookie->msi_lock);
|
|
||||||
INIT_LIST_HEAD(&cookie->msi_page_list);
|
|
||||||
domain->iova_cookie = cookie;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(iommu_get_dma_cookie);
|
EXPORT_SYMBOL(iommu_get_dma_cookie);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iommu_get_msi_cookie - Acquire just MSI remapping resources
|
||||||
|
* @domain: IOMMU domain to prepare
|
||||||
|
* @base: Start address of IOVA region for MSI mappings
|
||||||
|
*
|
||||||
|
* Users who manage their own IOVA allocation and do not want DMA API support,
|
||||||
|
* but would still like to take advantage of automatic MSI remapping, can use
|
||||||
|
* this to initialise their own domain appropriately. Users should reserve a
|
||||||
|
* contiguous IOVA region, starting at @base, large enough to accommodate the
|
||||||
|
* number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
|
||||||
|
* used by the devices attached to @domain.
|
||||||
|
*/
|
||||||
|
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
|
||||||
|
{
|
||||||
|
struct iommu_dma_cookie *cookie;
|
||||||
|
|
||||||
|
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (domain->iova_cookie)
|
||||||
|
return -EEXIST;
|
||||||
|
|
||||||
|
cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
|
||||||
|
if (!cookie)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
cookie->msi_iova = base;
|
||||||
|
domain->iova_cookie = cookie;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(iommu_get_msi_cookie);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iommu_put_dma_cookie - Release a domain's DMA mapping resources
|
* iommu_put_dma_cookie - Release a domain's DMA mapping resources
|
||||||
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
|
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
|
||||||
|
* iommu_get_msi_cookie()
|
||||||
*
|
*
|
||||||
* IOMMU drivers should normally call this from their domain_free callback.
|
* IOMMU drivers should normally call this from their domain_free callback.
|
||||||
*/
|
*/
|
||||||
|
@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
|
||||||
if (!cookie)
|
if (!cookie)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (cookie->iovad.granule)
|
if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
|
||||||
put_iova_domain(&cookie->iovad);
|
put_iova_domain(&cookie->iovad);
|
||||||
|
|
||||||
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
|
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
|
||||||
|
@ -137,11 +200,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
|
||||||
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||||
u64 size, struct device *dev)
|
u64 size, struct device *dev)
|
||||||
{
|
{
|
||||||
struct iova_domain *iovad = cookie_iovad(domain);
|
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||||
|
struct iova_domain *iovad = &cookie->iovad;
|
||||||
unsigned long order, base_pfn, end_pfn;
|
unsigned long order, base_pfn, end_pfn;
|
||||||
|
|
||||||
if (!iovad)
|
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
|
||||||
return -ENODEV;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Use the smallest supported page size for IOVA granularity */
|
/* Use the smallest supported page size for IOVA granularity */
|
||||||
order = __ffs(domain->pgsize_bitmap);
|
order = __ffs(domain->pgsize_bitmap);
|
||||||
|
@ -662,11 +726,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||||
{
|
{
|
||||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||||
struct iommu_dma_msi_page *msi_page;
|
struct iommu_dma_msi_page *msi_page;
|
||||||
struct iova_domain *iovad = &cookie->iovad;
|
struct iova_domain *iovad = cookie_iovad(domain);
|
||||||
struct iova *iova;
|
struct iova *iova;
|
||||||
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
||||||
|
size_t size = cookie_msi_granule(cookie);
|
||||||
|
|
||||||
msi_addr &= ~(phys_addr_t)iova_mask(iovad);
|
msi_addr &= ~(phys_addr_t)(size - 1);
|
||||||
list_for_each_entry(msi_page, &cookie->msi_page_list, list)
|
list_for_each_entry(msi_page, &cookie->msi_page_list, list)
|
||||||
if (msi_page->phys == msi_addr)
|
if (msi_page->phys == msi_addr)
|
||||||
return msi_page;
|
return msi_page;
|
||||||
|
@ -675,13 +740,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||||
if (!msi_page)
|
if (!msi_page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
|
|
||||||
if (!iova)
|
|
||||||
goto out_free_page;
|
|
||||||
|
|
||||||
msi_page->phys = msi_addr;
|
msi_page->phys = msi_addr;
|
||||||
msi_page->iova = iova_dma_addr(iovad, iova);
|
if (iovad) {
|
||||||
if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
|
iova = __alloc_iova(domain, size, dma_get_mask(dev));
|
||||||
|
if (!iova)
|
||||||
|
goto out_free_page;
|
||||||
|
msi_page->iova = iova_dma_addr(iovad, iova);
|
||||||
|
} else {
|
||||||
|
msi_page->iova = cookie->msi_iova;
|
||||||
|
cookie->msi_iova += size;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
|
||||||
goto out_free_iova;
|
goto out_free_iova;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&msi_page->list);
|
INIT_LIST_HEAD(&msi_page->list);
|
||||||
|
@ -689,7 +759,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||||
return msi_page;
|
return msi_page;
|
||||||
|
|
||||||
out_free_iova:
|
out_free_iova:
|
||||||
__free_iova(iovad, iova);
|
if (iovad)
|
||||||
|
__free_iova(iovad, iova);
|
||||||
|
else
|
||||||
|
cookie->msi_iova -= size;
|
||||||
out_free_page:
|
out_free_page:
|
||||||
kfree(msi_page);
|
kfree(msi_page);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -730,7 +803,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
|
||||||
msg->data = ~0U;
|
msg->data = ~0U;
|
||||||
} else {
|
} else {
|
||||||
msg->address_hi = upper_32_bits(msi_page->iova);
|
msg->address_hi = upper_32_bits(msi_page->iova);
|
||||||
msg->address_lo &= iova_mask(&cookie->iovad);
|
msg->address_lo &= cookie_msi_granule(cookie) - 1;
|
||||||
msg->address_lo += lower_32_bits(msi_page->iova);
|
msg->address_lo += lower_32_bits(msi_page->iova);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
|
||||||
u64 end_address; /* reserved end address */
|
u64 end_address; /* reserved end address */
|
||||||
struct dmar_dev_scope *devices; /* target devices */
|
struct dmar_dev_scope *devices; /* target devices */
|
||||||
int devices_cnt; /* target device count */
|
int devices_cnt; /* target device count */
|
||||||
|
struct iommu_resv_region *resv; /* reserved region handle */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dmar_atsr_unit {
|
struct dmar_atsr_unit {
|
||||||
|
@ -4246,27 +4247,40 @@ static inline void init_iommu_pm_ops(void) {}
|
||||||
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
|
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
|
||||||
{
|
{
|
||||||
struct acpi_dmar_reserved_memory *rmrr;
|
struct acpi_dmar_reserved_memory *rmrr;
|
||||||
|
int prot = DMA_PTE_READ|DMA_PTE_WRITE;
|
||||||
struct dmar_rmrr_unit *rmrru;
|
struct dmar_rmrr_unit *rmrru;
|
||||||
|
size_t length;
|
||||||
|
|
||||||
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
|
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
|
||||||
if (!rmrru)
|
if (!rmrru)
|
||||||
return -ENOMEM;
|
goto out;
|
||||||
|
|
||||||
rmrru->hdr = header;
|
rmrru->hdr = header;
|
||||||
rmrr = (struct acpi_dmar_reserved_memory *)header;
|
rmrr = (struct acpi_dmar_reserved_memory *)header;
|
||||||
rmrru->base_address = rmrr->base_address;
|
rmrru->base_address = rmrr->base_address;
|
||||||
rmrru->end_address = rmrr->end_address;
|
rmrru->end_address = rmrr->end_address;
|
||||||
|
|
||||||
|
length = rmrr->end_address - rmrr->base_address + 1;
|
||||||
|
rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
|
||||||
|
IOMMU_RESV_DIRECT);
|
||||||
|
if (!rmrru->resv)
|
||||||
|
goto free_rmrru;
|
||||||
|
|
||||||
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
|
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
|
||||||
((void *)rmrr) + rmrr->header.length,
|
((void *)rmrr) + rmrr->header.length,
|
||||||
&rmrru->devices_cnt);
|
&rmrru->devices_cnt);
|
||||||
if (rmrru->devices_cnt && rmrru->devices == NULL) {
|
if (rmrru->devices_cnt && rmrru->devices == NULL)
|
||||||
kfree(rmrru);
|
goto free_all;
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
list_add(&rmrru->list, &dmar_rmrr_units);
|
list_add(&rmrru->list, &dmar_rmrr_units);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
free_all:
|
||||||
|
kfree(rmrru->resv);
|
||||||
|
free_rmrru:
|
||||||
|
kfree(rmrru);
|
||||||
|
out:
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
|
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
|
||||||
|
@ -4480,6 +4494,7 @@ static void intel_iommu_free_dmars(void)
|
||||||
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
|
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
|
||||||
list_del(&rmrru->list);
|
list_del(&rmrru->list);
|
||||||
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
|
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
|
||||||
|
kfree(rmrru->resv);
|
||||||
kfree(rmrru);
|
kfree(rmrru);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5203,6 +5218,45 @@ static void intel_iommu_remove_device(struct device *dev)
|
||||||
iommu_device_unlink(iommu->iommu_dev, dev);
|
iommu_device_unlink(iommu->iommu_dev, dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_iommu_get_resv_regions(struct device *device,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
struct iommu_resv_region *reg;
|
||||||
|
struct dmar_rmrr_unit *rmrr;
|
||||||
|
struct device *i_dev;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
for_each_rmrr_units(rmrr) {
|
||||||
|
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
|
||||||
|
i, i_dev) {
|
||||||
|
if (i_dev != device)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
list_add_tail(&rmrr->resv->list, head);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
|
||||||
|
IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
|
||||||
|
0, IOMMU_RESV_RESERVED);
|
||||||
|
if (!reg)
|
||||||
|
return;
|
||||||
|
list_add_tail(®->list, head);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_iommu_put_resv_regions(struct device *dev,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
struct iommu_resv_region *entry, *next;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(entry, next, head, list) {
|
||||||
|
if (entry->type == IOMMU_RESV_RESERVED)
|
||||||
|
kfree(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||||
#define MAX_NR_PASID_BITS (20)
|
#define MAX_NR_PASID_BITS (20)
|
||||||
static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
|
static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
|
||||||
|
@ -5333,19 +5387,21 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
|
||||||
#endif /* CONFIG_INTEL_IOMMU_SVM */
|
#endif /* CONFIG_INTEL_IOMMU_SVM */
|
||||||
|
|
||||||
static const struct iommu_ops intel_iommu_ops = {
|
static const struct iommu_ops intel_iommu_ops = {
|
||||||
.capable = intel_iommu_capable,
|
.capable = intel_iommu_capable,
|
||||||
.domain_alloc = intel_iommu_domain_alloc,
|
.domain_alloc = intel_iommu_domain_alloc,
|
||||||
.domain_free = intel_iommu_domain_free,
|
.domain_free = intel_iommu_domain_free,
|
||||||
.attach_dev = intel_iommu_attach_device,
|
.attach_dev = intel_iommu_attach_device,
|
||||||
.detach_dev = intel_iommu_detach_device,
|
.detach_dev = intel_iommu_detach_device,
|
||||||
.map = intel_iommu_map,
|
.map = intel_iommu_map,
|
||||||
.unmap = intel_iommu_unmap,
|
.unmap = intel_iommu_unmap,
|
||||||
.map_sg = default_iommu_map_sg,
|
.map_sg = default_iommu_map_sg,
|
||||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||||
.add_device = intel_iommu_add_device,
|
.add_device = intel_iommu_add_device,
|
||||||
.remove_device = intel_iommu_remove_device,
|
.remove_device = intel_iommu_remove_device,
|
||||||
.device_group = pci_device_group,
|
.get_resv_regions = intel_iommu_get_resv_regions,
|
||||||
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
|
.put_resv_regions = intel_iommu_put_resv_regions,
|
||||||
|
.device_group = pci_device_group,
|
||||||
|
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
|
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
|
||||||
|
|
|
@ -68,6 +68,12 @@ struct iommu_group_attribute {
|
||||||
const char *buf, size_t count);
|
const char *buf, size_t count);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const char * const iommu_group_resv_type_string[] = {
|
||||||
|
[IOMMU_RESV_DIRECT] = "direct",
|
||||||
|
[IOMMU_RESV_RESERVED] = "reserved",
|
||||||
|
[IOMMU_RESV_MSI] = "msi",
|
||||||
|
};
|
||||||
|
|
||||||
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
|
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
|
||||||
struct iommu_group_attribute iommu_group_attr_##_name = \
|
struct iommu_group_attribute iommu_group_attr_##_name = \
|
||||||
__ATTR(_name, _mode, _show, _store)
|
__ATTR(_name, _mode, _show, _store)
|
||||||
|
@ -133,8 +139,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
|
||||||
return sprintf(buf, "%s\n", group->name);
|
return sprintf(buf, "%s\n", group->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iommu_insert_resv_region - Insert a new region in the
|
||||||
|
* list of reserved regions.
|
||||||
|
* @new: new region to insert
|
||||||
|
* @regions: list of regions
|
||||||
|
*
|
||||||
|
* The new element is sorted by address with respect to the other
|
||||||
|
* regions of the same type. In case it overlaps with another
|
||||||
|
* region of the same type, regions are merged. In case it
|
||||||
|
* overlaps with another region of different type, regions are
|
||||||
|
* not merged.
|
||||||
|
*/
|
||||||
|
static int iommu_insert_resv_region(struct iommu_resv_region *new,
|
||||||
|
struct list_head *regions)
|
||||||
|
{
|
||||||
|
struct iommu_resv_region *region;
|
||||||
|
phys_addr_t start = new->start;
|
||||||
|
phys_addr_t end = new->start + new->length - 1;
|
||||||
|
struct list_head *pos = regions->next;
|
||||||
|
|
||||||
|
while (pos != regions) {
|
||||||
|
struct iommu_resv_region *entry =
|
||||||
|
list_entry(pos, struct iommu_resv_region, list);
|
||||||
|
phys_addr_t a = entry->start;
|
||||||
|
phys_addr_t b = entry->start + entry->length - 1;
|
||||||
|
int type = entry->type;
|
||||||
|
|
||||||
|
if (end < a) {
|
||||||
|
goto insert;
|
||||||
|
} else if (start > b) {
|
||||||
|
pos = pos->next;
|
||||||
|
} else if ((start >= a) && (end <= b)) {
|
||||||
|
if (new->type == type)
|
||||||
|
goto done;
|
||||||
|
else
|
||||||
|
pos = pos->next;
|
||||||
|
} else {
|
||||||
|
if (new->type == type) {
|
||||||
|
phys_addr_t new_start = min(a, start);
|
||||||
|
phys_addr_t new_end = max(b, end);
|
||||||
|
|
||||||
|
list_del(&entry->list);
|
||||||
|
entry->start = new_start;
|
||||||
|
entry->length = new_end - new_start + 1;
|
||||||
|
iommu_insert_resv_region(entry, regions);
|
||||||
|
} else {
|
||||||
|
pos = pos->next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
insert:
|
||||||
|
region = iommu_alloc_resv_region(new->start, new->length,
|
||||||
|
new->prot, new->type);
|
||||||
|
if (!region)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
list_add_tail(®ion->list, pos);
|
||||||
|
done:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
|
||||||
|
struct list_head *group_resv_regions)
|
||||||
|
{
|
||||||
|
struct iommu_resv_region *entry;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
list_for_each_entry(entry, dev_resv_regions, list) {
|
||||||
|
ret = iommu_insert_resv_region(entry, group_resv_regions);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int iommu_get_group_resv_regions(struct iommu_group *group,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
struct iommu_device *device;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
mutex_lock(&group->mutex);
|
||||||
|
list_for_each_entry(device, &group->devices, list) {
|
||||||
|
struct list_head dev_resv_regions;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&dev_resv_regions);
|
||||||
|
iommu_get_resv_regions(device->dev, &dev_resv_regions);
|
||||||
|
ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
|
||||||
|
iommu_put_resv_regions(device->dev, &dev_resv_regions);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
mutex_unlock(&group->mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
|
||||||
|
|
||||||
|
static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct iommu_resv_region *region, *next;
|
||||||
|
struct list_head group_resv_regions;
|
||||||
|
char *str = buf;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&group_resv_regions);
|
||||||
|
iommu_get_group_resv_regions(group, &group_resv_regions);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(region, next, &group_resv_regions, list) {
|
||||||
|
str += sprintf(str, "0x%016llx 0x%016llx %s\n",
|
||||||
|
(long long int)region->start,
|
||||||
|
(long long int)(region->start +
|
||||||
|
region->length - 1),
|
||||||
|
iommu_group_resv_type_string[region->type]);
|
||||||
|
kfree(region);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (str - buf);
|
||||||
|
}
|
||||||
|
|
||||||
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
|
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
|
||||||
|
|
||||||
|
static IOMMU_GROUP_ATTR(reserved_regions, 0444,
|
||||||
|
iommu_group_show_resv_regions, NULL);
|
||||||
|
|
||||||
static void iommu_group_release(struct kobject *kobj)
|
static void iommu_group_release(struct kobject *kobj)
|
||||||
{
|
{
|
||||||
struct iommu_group *group = to_iommu_group(kobj);
|
struct iommu_group *group = to_iommu_group(kobj);
|
||||||
|
@ -212,6 +341,11 @@ struct iommu_group *iommu_group_alloc(void)
|
||||||
*/
|
*/
|
||||||
kobject_put(&group->kobj);
|
kobject_put(&group->kobj);
|
||||||
|
|
||||||
|
ret = iommu_group_create_file(group,
|
||||||
|
&iommu_group_attr_reserved_regions);
|
||||||
|
if (ret)
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
pr_debug("Allocated group %d\n", group->id);
|
pr_debug("Allocated group %d\n", group->id);
|
||||||
|
|
||||||
return group;
|
return group;
|
||||||
|
@ -318,7 +452,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_domain *domain = group->default_domain;
|
struct iommu_domain *domain = group->default_domain;
|
||||||
struct iommu_dm_region *entry;
|
struct iommu_resv_region *entry;
|
||||||
struct list_head mappings;
|
struct list_head mappings;
|
||||||
unsigned long pg_size;
|
unsigned long pg_size;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -331,18 +465,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
||||||
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
|
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
|
||||||
INIT_LIST_HEAD(&mappings);
|
INIT_LIST_HEAD(&mappings);
|
||||||
|
|
||||||
iommu_get_dm_regions(dev, &mappings);
|
iommu_get_resv_regions(dev, &mappings);
|
||||||
|
|
||||||
/* We need to consider overlapping regions for different devices */
|
/* We need to consider overlapping regions for different devices */
|
||||||
list_for_each_entry(entry, &mappings, list) {
|
list_for_each_entry(entry, &mappings, list) {
|
||||||
dma_addr_t start, end, addr;
|
dma_addr_t start, end, addr;
|
||||||
|
|
||||||
if (domain->ops->apply_dm_region)
|
if (domain->ops->apply_resv_region)
|
||||||
domain->ops->apply_dm_region(dev, domain, entry);
|
domain->ops->apply_resv_region(dev, domain, entry);
|
||||||
|
|
||||||
start = ALIGN(entry->start, pg_size);
|
start = ALIGN(entry->start, pg_size);
|
||||||
end = ALIGN(entry->start + entry->length, pg_size);
|
end = ALIGN(entry->start + entry->length, pg_size);
|
||||||
|
|
||||||
|
if (entry->type != IOMMU_RESV_DIRECT)
|
||||||
|
continue;
|
||||||
|
|
||||||
for (addr = start; addr < end; addr += pg_size) {
|
for (addr = start; addr < end; addr += pg_size) {
|
||||||
phys_addr_t phys_addr;
|
phys_addr_t phys_addr;
|
||||||
|
|
||||||
|
@ -358,7 +495,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
iommu_put_dm_regions(dev, &mappings);
|
iommu_put_resv_regions(dev, &mappings);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1559,20 +1696,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
|
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
|
||||||
|
|
||||||
void iommu_get_dm_regions(struct device *dev, struct list_head *list)
|
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
|
||||||
{
|
{
|
||||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||||
|
|
||||||
if (ops && ops->get_dm_regions)
|
if (ops && ops->get_resv_regions)
|
||||||
ops->get_dm_regions(dev, list);
|
ops->get_resv_regions(dev, list);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iommu_put_dm_regions(struct device *dev, struct list_head *list)
|
void iommu_put_resv_regions(struct device *dev, struct list_head *list)
|
||||||
{
|
{
|
||||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||||
|
|
||||||
if (ops && ops->put_dm_regions)
|
if (ops && ops->put_resv_regions)
|
||||||
ops->put_dm_regions(dev, list);
|
ops->put_resv_regions(dev, list);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
|
||||||
|
size_t length,
|
||||||
|
int prot, int type)
|
||||||
|
{
|
||||||
|
struct iommu_resv_region *region;
|
||||||
|
|
||||||
|
region = kzalloc(sizeof(*region), GFP_KERNEL);
|
||||||
|
if (!region)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(®ion->list);
|
||||||
|
region->start = start;
|
||||||
|
region->length = length;
|
||||||
|
region->prot = prot;
|
||||||
|
region->type = type;
|
||||||
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Request that a device is direct mapped by the IOMMU */
|
/* Request that a device is direct mapped by the IOMMU */
|
||||||
|
|
|
@ -1642,6 +1642,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
|
||||||
|
|
||||||
inner_domain->parent = its_parent;
|
inner_domain->parent = its_parent;
|
||||||
inner_domain->bus_token = DOMAIN_BUS_NEXUS;
|
inner_domain->bus_token = DOMAIN_BUS_NEXUS;
|
||||||
|
inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
|
||||||
info->ops = &its_msi_domain_ops;
|
info->ops = &its_msi_domain_ops;
|
||||||
info->data = its;
|
info->data = its;
|
||||||
inner_domain->host_data = info;
|
inner_domain->host_data = info;
|
||||||
|
|
|
@ -38,6 +38,8 @@
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/mdev.h>
|
#include <linux/mdev.h>
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
|
#include <linux/dma-iommu.h>
|
||||||
|
#include <linux/irqdomain.h>
|
||||||
|
|
||||||
#define DRIVER_VERSION "0.2"
|
#define DRIVER_VERSION "0.2"
|
||||||
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
|
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
|
||||||
|
@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
|
||||||
|
phys_addr_t *base)
|
||||||
|
{
|
||||||
|
struct list_head group_resv_regions;
|
||||||
|
struct iommu_resv_region *region, *next;
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&group_resv_regions);
|
||||||
|
iommu_get_group_resv_regions(group, &group_resv_regions);
|
||||||
|
list_for_each_entry(region, &group_resv_regions, list) {
|
||||||
|
if (region->type & IOMMU_RESV_MSI) {
|
||||||
|
*base = region->start;
|
||||||
|
ret = true;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
list_for_each_entry_safe(region, next, &group_resv_regions, list)
|
||||||
|
kfree(region);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int vfio_iommu_type1_attach_group(void *iommu_data,
|
static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
struct iommu_group *iommu_group)
|
struct iommu_group *iommu_group)
|
||||||
{
|
{
|
||||||
|
@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
struct vfio_domain *domain, *d;
|
struct vfio_domain *domain, *d;
|
||||||
struct bus_type *bus = NULL, *mdev_bus;
|
struct bus_type *bus = NULL, *mdev_bus;
|
||||||
int ret;
|
int ret;
|
||||||
|
bool resv_msi, msi_remap;
|
||||||
|
phys_addr_t resv_msi_base;
|
||||||
|
|
||||||
mutex_lock(&iommu->lock);
|
mutex_lock(&iommu->lock);
|
||||||
|
|
||||||
|
@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_domain;
|
goto out_domain;
|
||||||
|
|
||||||
|
resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&domain->group_list);
|
INIT_LIST_HEAD(&domain->group_list);
|
||||||
list_add(&group->next, &domain->group_list);
|
list_add(&group->next, &domain->group_list);
|
||||||
|
|
||||||
if (!allow_unsafe_interrupts &&
|
msi_remap = resv_msi ? irq_domain_check_msi_remap() :
|
||||||
!iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
|
iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
|
||||||
|
|
||||||
|
if (!allow_unsafe_interrupts && !msi_remap) {
|
||||||
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
|
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
|
||||||
__func__);
|
__func__);
|
||||||
ret = -EPERM;
|
ret = -EPERM;
|
||||||
|
@ -1302,6 +1332,9 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_detach;
|
goto out_detach;
|
||||||
|
|
||||||
|
if (resv_msi && iommu_get_msi_cookie(domain->domain, resv_msi_base))
|
||||||
|
goto out_detach;
|
||||||
|
|
||||||
list_add(&domain->next, &iommu->domain_list);
|
list_add(&domain->next, &iommu->domain_list);
|
||||||
|
|
||||||
mutex_unlock(&iommu->lock);
|
mutex_unlock(&iommu->lock);
|
||||||
|
|
|
@ -27,6 +27,7 @@ int iommu_dma_init(void);
|
||||||
|
|
||||||
/* Domain management interface for IOMMU drivers */
|
/* Domain management interface for IOMMU drivers */
|
||||||
int iommu_get_dma_cookie(struct iommu_domain *domain);
|
int iommu_get_dma_cookie(struct iommu_domain *domain);
|
||||||
|
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
|
||||||
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
||||||
|
|
||||||
/* Setup call for arch DMA mapping code */
|
/* Setup call for arch DMA mapping code */
|
||||||
|
@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
|
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,18 +117,25 @@ enum iommu_attr {
|
||||||
DOMAIN_ATTR_MAX,
|
DOMAIN_ATTR_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* These are the possible reserved region types */
|
||||||
|
#define IOMMU_RESV_DIRECT (1 << 0)
|
||||||
|
#define IOMMU_RESV_RESERVED (1 << 1)
|
||||||
|
#define IOMMU_RESV_MSI (1 << 2)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iommu_dm_region - descriptor for a direct mapped memory region
|
* struct iommu_resv_region - descriptor for a reserved memory region
|
||||||
* @list: Linked list pointers
|
* @list: Linked list pointers
|
||||||
* @start: System physical start address of the region
|
* @start: System physical start address of the region
|
||||||
* @length: Length of the region in bytes
|
* @length: Length of the region in bytes
|
||||||
* @prot: IOMMU Protection flags (READ/WRITE/...)
|
* @prot: IOMMU Protection flags (READ/WRITE/...)
|
||||||
|
* @type: Type of the reserved region
|
||||||
*/
|
*/
|
||||||
struct iommu_dm_region {
|
struct iommu_resv_region {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
phys_addr_t start;
|
phys_addr_t start;
|
||||||
size_t length;
|
size_t length;
|
||||||
int prot;
|
int prot;
|
||||||
|
int type;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_API
|
#ifdef CONFIG_IOMMU_API
|
||||||
|
@ -150,9 +157,9 @@ struct iommu_dm_region {
|
||||||
* @device_group: find iommu group for a particular device
|
* @device_group: find iommu group for a particular device
|
||||||
* @domain_get_attr: Query domain attributes
|
* @domain_get_attr: Query domain attributes
|
||||||
* @domain_set_attr: Change domain attributes
|
* @domain_set_attr: Change domain attributes
|
||||||
* @get_dm_regions: Request list of direct mapping requirements for a device
|
* @get_resv_regions: Request list of reserved regions for a device
|
||||||
* @put_dm_regions: Free list of direct mapping requirements for a device
|
* @put_resv_regions: Free list of reserved regions for a device
|
||||||
* @apply_dm_region: Temporary helper call-back for iova reserved ranges
|
* @apply_resv_region: Temporary helper call-back for iova reserved ranges
|
||||||
* @domain_window_enable: Configure and enable a particular window for a domain
|
* @domain_window_enable: Configure and enable a particular window for a domain
|
||||||
* @domain_window_disable: Disable a particular window for a domain
|
* @domain_window_disable: Disable a particular window for a domain
|
||||||
* @domain_set_windows: Set the number of windows for a domain
|
* @domain_set_windows: Set the number of windows for a domain
|
||||||
|
@ -184,11 +191,12 @@ struct iommu_ops {
|
||||||
int (*domain_set_attr)(struct iommu_domain *domain,
|
int (*domain_set_attr)(struct iommu_domain *domain,
|
||||||
enum iommu_attr attr, void *data);
|
enum iommu_attr attr, void *data);
|
||||||
|
|
||||||
/* Request/Free a list of direct mapping requirements for a device */
|
/* Request/Free a list of reserved regions for a device */
|
||||||
void (*get_dm_regions)(struct device *dev, struct list_head *list);
|
void (*get_resv_regions)(struct device *dev, struct list_head *list);
|
||||||
void (*put_dm_regions)(struct device *dev, struct list_head *list);
|
void (*put_resv_regions)(struct device *dev, struct list_head *list);
|
||||||
void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
|
void (*apply_resv_region)(struct device *dev,
|
||||||
struct iommu_dm_region *region);
|
struct iommu_domain *domain,
|
||||||
|
struct iommu_resv_region *region);
|
||||||
|
|
||||||
/* Window handling functions */
|
/* Window handling functions */
|
||||||
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
|
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
|
||||||
|
@ -233,9 +241,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
|
||||||
extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||||
iommu_fault_handler_t handler, void *token);
|
iommu_fault_handler_t handler, void *token);
|
||||||
|
|
||||||
extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
|
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
|
||||||
extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
|
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
|
||||||
extern int iommu_request_dm_for_dev(struct device *dev);
|
extern int iommu_request_dm_for_dev(struct device *dev);
|
||||||
|
extern struct iommu_resv_region *
|
||||||
|
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
|
||||||
|
extern int iommu_get_group_resv_regions(struct iommu_group *group,
|
||||||
|
struct list_head *head);
|
||||||
|
|
||||||
extern int iommu_attach_group(struct iommu_domain *domain,
|
extern int iommu_attach_group(struct iommu_domain *domain,
|
||||||
struct iommu_group *group);
|
struct iommu_group *group);
|
||||||
|
@ -443,16 +455,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iommu_get_dm_regions(struct device *dev,
|
static inline void iommu_get_resv_regions(struct device *dev,
|
||||||
struct list_head *list)
|
struct list_head *list)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iommu_put_dm_regions(struct device *dev,
|
static inline void iommu_put_resv_regions(struct device *dev,
|
||||||
struct list_head *list)
|
struct list_head *list)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int iommu_get_group_resv_regions(struct iommu_group *group,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int iommu_request_dm_for_dev(struct device *dev)
|
static inline int iommu_request_dm_for_dev(struct device *dev)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
|
@ -183,6 +183,12 @@ enum {
|
||||||
/* Irq domain is an IPI domain with single virq */
|
/* Irq domain is an IPI domain with single virq */
|
||||||
IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
|
IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
|
||||||
|
|
||||||
|
/* Irq domain implements MSIs */
|
||||||
|
IRQ_DOMAIN_FLAG_MSI = (1 << 4),
|
||||||
|
|
||||||
|
/* Irq domain implements MSI remapping */
|
||||||
|
IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
|
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
|
||||||
* for implementation specific purposes and ignored by the
|
* for implementation specific purposes and ignored by the
|
||||||
|
@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
|
||||||
void *host_data);
|
void *host_data);
|
||||||
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
|
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
|
||||||
enum irq_domain_bus_token bus_token);
|
enum irq_domain_bus_token bus_token);
|
||||||
|
extern bool irq_domain_check_msi_remap(void);
|
||||||
extern void irq_set_default_host(struct irq_domain *host);
|
extern void irq_set_default_host(struct irq_domain *host);
|
||||||
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
|
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
|
||||||
irq_hw_number_t hwirq, int node,
|
irq_hw_number_t hwirq, int node,
|
||||||
|
@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
|
||||||
{
|
{
|
||||||
return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
|
return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool irq_domain_is_msi(struct irq_domain *domain)
|
||||||
|
{
|
||||||
|
return domain->flags & IRQ_DOMAIN_FLAG_MSI;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
|
||||||
|
{
|
||||||
|
return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
|
||||||
|
|
||||||
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||||
static inline void irq_domain_activate_irq(struct irq_data *data) { }
|
static inline void irq_domain_activate_irq(struct irq_data *data) { }
|
||||||
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
|
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
|
||||||
|
@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool irq_domain_is_msi(struct irq_domain *domain)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||||
|
|
||||||
#else /* CONFIG_IRQ_DOMAIN */
|
#else /* CONFIG_IRQ_DOMAIN */
|
||||||
|
|
|
@ -277,6 +277,31 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
|
EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* irq_domain_check_msi_remap - Check whether all MSI irq domains implement
|
||||||
|
* IRQ remapping
|
||||||
|
*
|
||||||
|
* Return: false if any MSI irq domain does not support IRQ remapping,
|
||||||
|
* true otherwise (including if there is no MSI irq domain)
|
||||||
|
*/
|
||||||
|
bool irq_domain_check_msi_remap(void)
|
||||||
|
{
|
||||||
|
struct irq_domain *h;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
|
mutex_lock(&irq_domain_mutex);
|
||||||
|
list_for_each_entry(h, &irq_domain_list, link) {
|
||||||
|
if (irq_domain_is_msi(h) &&
|
||||||
|
!irq_domain_hierarchical_is_msi_remap(h)) {
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&irq_domain_mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* irq_set_default_host() - Set a "default" irq domain
|
* irq_set_default_host() - Set a "default" irq domain
|
||||||
* @domain: default domain pointer
|
* @domain: default domain pointer
|
||||||
|
@ -1392,6 +1417,20 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain)
|
||||||
if (domain->ops->alloc)
|
if (domain->ops->alloc)
|
||||||
domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
|
domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* irq_domain_hierarchical_is_msi_remap - Check if the domain or any
|
||||||
|
* parent has MSI remapping support
|
||||||
|
* @domain: domain pointer
|
||||||
|
*/
|
||||||
|
bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
|
||||||
|
{
|
||||||
|
for (; domain; domain = domain->parent) {
|
||||||
|
if (irq_domain_is_msi_remap(domain))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||||
/**
|
/**
|
||||||
* irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
|
* irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
|
||||||
|
|
|
@ -270,8 +270,8 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
|
||||||
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
|
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
|
||||||
msi_domain_update_chip_ops(info);
|
msi_domain_update_chip_ops(info);
|
||||||
|
|
||||||
return irq_domain_create_hierarchy(parent, 0, 0, fwnode,
|
return irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
|
||||||
&msi_domain_ops, info);
|
fwnode, &msi_domain_ops, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
|
int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
|
||||||
|
|
Loading…
Reference in New Issue
Block a user