forked from luck/tmp_suning_uos_patched
Merge git://git.infradead.org/~dwmw2/iommu-2.6.31
* git://git.infradead.org/~dwmw2/iommu-2.6.31: intel-iommu: Fix one last ia64 build problem in Pass Through Support VT-d: support the device IOTLB VT-d: cleanup iommu_flush_iotlb_psi and flush_unmaps VT-d: add device IOTLB invalidation support VT-d: parse ATSR in DMA Remapping Reporting Structure PCI: handle Virtual Function ATS enabling PCI: support the ATS capability intel-iommu: dmar_set_interrupt return error value intel-iommu: Tidy up iommu->gcmd handling intel-iommu: Fix tiny theoretical race in write-buffer flush. intel-iommu: Clean up handling of "caching mode" vs. IOTLB flushing. intel-iommu: Clean up handling of "caching mode" vs. context flushing. VT-d: fix invalid domain id for KVM context flush Fix !CONFIG_DMAR build failure introduced by Intel IOMMU Pass Through Support Intel IOMMU Pass Through Support Fix up trivial conflicts in drivers/pci/{intel-iommu.c,intr_remapping.c}
This commit is contained in:
commit
687d680985
|
@ -1006,6 +1006,7 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
nomerge
|
||||
forcesac
|
||||
soft
|
||||
pt [x86, IA64]
|
||||
|
||||
io7= [HW] IO7 for Marvel based alpha systems
|
||||
See comment before marvel_specify_io7 in
|
||||
|
|
|
@ -9,6 +9,11 @@ extern void pci_iommu_shutdown(void);
|
|||
extern void no_iommu_init(void);
|
||||
extern int force_iommu, no_iommu;
|
||||
extern int iommu_detected;
|
||||
#ifdef CONFIG_DMAR
|
||||
extern int iommu_pass_through;
|
||||
#else
|
||||
#define iommu_pass_through (0)
|
||||
#endif
|
||||
extern void iommu_dma_init(void);
|
||||
extern void machvec_init(const char *name);
|
||||
|
||||
|
|
|
@ -32,6 +32,8 @@ int force_iommu __read_mostly = 1;
|
|||
int force_iommu __read_mostly;
|
||||
#endif
|
||||
|
||||
int iommu_pass_through;
|
||||
|
||||
/* Dummy device used for NULL arguments (normally ISA). Better would
|
||||
be probably a smaller DMA mask, but this is bug-to-bug compatible
|
||||
to i386. */
|
||||
|
|
|
@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void)
|
|||
|
||||
void __init pci_swiotlb_init(void)
|
||||
{
|
||||
if (!iommu_detected) {
|
||||
if (!iommu_detected || iommu_pass_through) {
|
||||
#ifdef CONFIG_IA64_GENERIC
|
||||
swiotlb = 1;
|
||||
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
|
||||
|
|
|
@ -6,6 +6,7 @@ extern void no_iommu_init(void);
|
|||
extern struct dma_map_ops nommu_dma_ops;
|
||||
extern int force_iommu, no_iommu;
|
||||
extern int iommu_detected;
|
||||
extern int iommu_pass_through;
|
||||
|
||||
/* 10 seconds */
|
||||
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
|
||||
|
|
|
@ -32,6 +32,8 @@ int no_iommu __read_mostly;
|
|||
/* Set this to 1 if there is a HW IOMMU in the system */
|
||||
int iommu_detected __read_mostly = 0;
|
||||
|
||||
int iommu_pass_through;
|
||||
|
||||
dma_addr_t bad_dma_address __read_mostly = 0;
|
||||
EXPORT_SYMBOL(bad_dma_address);
|
||||
|
||||
|
@ -209,6 +211,10 @@ static __init int iommu_setup(char *p)
|
|||
#ifdef CONFIG_SWIOTLB
|
||||
if (!strncmp(p, "soft", 4))
|
||||
swiotlb = 1;
|
||||
if (!strncmp(p, "pt", 2)) {
|
||||
iommu_pass_through = 1;
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
gart_parse_options(p);
|
||||
|
|
|
@ -71,7 +71,8 @@ void __init pci_swiotlb_init(void)
|
|||
{
|
||||
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
||||
#ifdef CONFIG_X86_64
|
||||
if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
|
||||
if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) ||
|
||||
iommu_pass_through)
|
||||
swiotlb = 1;
|
||||
#endif
|
||||
if (swiotlb_force)
|
||||
|
|
|
@ -267,6 +267,84 @@ rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
|
|||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static LIST_HEAD(dmar_atsr_units);
|
||||
|
||||
static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
|
||||
{
|
||||
struct acpi_dmar_atsr *atsr;
|
||||
struct dmar_atsr_unit *atsru;
|
||||
|
||||
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
|
||||
atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
|
||||
if (!atsru)
|
||||
return -ENOMEM;
|
||||
|
||||
atsru->hdr = hdr;
|
||||
atsru->include_all = atsr->flags & 0x1;
|
||||
|
||||
list_add(&atsru->list, &dmar_atsr_units);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
|
||||
{
|
||||
int rc;
|
||||
struct acpi_dmar_atsr *atsr;
|
||||
|
||||
if (atsru->include_all)
|
||||
return 0;
|
||||
|
||||
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
|
||||
rc = dmar_parse_dev_scope((void *)(atsr + 1),
|
||||
(void *)atsr + atsr->header.length,
|
||||
&atsru->devices_cnt, &atsru->devices,
|
||||
atsr->segment);
|
||||
if (rc || !atsru->devices_cnt) {
|
||||
list_del(&atsru->list);
|
||||
kfree(atsru);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
|
||||
{
|
||||
int i;
|
||||
struct pci_bus *bus;
|
||||
struct acpi_dmar_atsr *atsr;
|
||||
struct dmar_atsr_unit *atsru;
|
||||
|
||||
list_for_each_entry(atsru, &dmar_atsr_units, list) {
|
||||
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
|
||||
if (atsr->segment == pci_domain_nr(dev->bus))
|
||||
goto found;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
found:
|
||||
for (bus = dev->bus; bus; bus = bus->parent) {
|
||||
struct pci_dev *bridge = bus->self;
|
||||
|
||||
if (!bridge || !bridge->is_pcie ||
|
||||
bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
|
||||
return 0;
|
||||
|
||||
if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
|
||||
for (i = 0; i < atsru->devices_cnt; i++)
|
||||
if (atsru->devices[i] == bridge)
|
||||
return 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (atsru->include_all)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init
|
||||
|
@ -274,22 +352,28 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
|
|||
{
|
||||
struct acpi_dmar_hardware_unit *drhd;
|
||||
struct acpi_dmar_reserved_memory *rmrr;
|
||||
struct acpi_dmar_atsr *atsr;
|
||||
|
||||
switch (header->type) {
|
||||
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
|
||||
drhd = (struct acpi_dmar_hardware_unit *)header;
|
||||
drhd = container_of(header, struct acpi_dmar_hardware_unit,
|
||||
header);
|
||||
printk (KERN_INFO PREFIX
|
||||
"DRHD (flags: 0x%08x)base: 0x%016Lx\n",
|
||||
drhd->flags, (unsigned long long)drhd->address);
|
||||
"DRHD base: %#016Lx flags: %#x\n",
|
||||
(unsigned long long)drhd->address, drhd->flags);
|
||||
break;
|
||||
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
|
||||
rmrr = (struct acpi_dmar_reserved_memory *)header;
|
||||
|
||||
rmrr = container_of(header, struct acpi_dmar_reserved_memory,
|
||||
header);
|
||||
printk (KERN_INFO PREFIX
|
||||
"RMRR base: 0x%016Lx end: 0x%016Lx\n",
|
||||
"RMRR base: %#016Lx end: %#016Lx\n",
|
||||
(unsigned long long)rmrr->base_address,
|
||||
(unsigned long long)rmrr->end_address);
|
||||
break;
|
||||
case ACPI_DMAR_TYPE_ATSR:
|
||||
atsr = container_of(header, struct acpi_dmar_atsr, header);
|
||||
printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -361,6 +445,11 @@ parse_dmar_table(void)
|
|||
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
|
||||
#ifdef CONFIG_DMAR
|
||||
ret = dmar_parse_one_rmrr(entry_header);
|
||||
#endif
|
||||
break;
|
||||
case ACPI_DMAR_TYPE_ATSR:
|
||||
#ifdef CONFIG_DMAR
|
||||
ret = dmar_parse_one_atsr(entry_header);
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
|
@ -431,11 +520,19 @@ int __init dmar_dev_scope_init(void)
|
|||
#ifdef CONFIG_DMAR
|
||||
{
|
||||
struct dmar_rmrr_unit *rmrr, *rmrr_n;
|
||||
struct dmar_atsr_unit *atsr, *atsr_n;
|
||||
|
||||
list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
|
||||
ret = rmrr_parse_dev(rmrr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
|
||||
ret = atsr_parse_dev(atsr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -468,6 +565,9 @@ int __init dmar_table_init(void)
|
|||
#ifdef CONFIG_DMAR
|
||||
if (list_empty(&dmar_rmrr_units))
|
||||
printk(KERN_INFO PREFIX "No RMRR found\n");
|
||||
|
||||
if (list_empty(&dmar_atsr_units))
|
||||
printk(KERN_INFO PREFIX "No ATSR found\n");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
|
@ -515,6 +615,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|||
u32 ver;
|
||||
static int iommu_allocated = 0;
|
||||
int agaw = 0;
|
||||
int msagaw = 0;
|
||||
|
||||
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
|
||||
if (!iommu)
|
||||
|
@ -535,12 +636,20 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|||
agaw = iommu_calculate_agaw(iommu);
|
||||
if (agaw < 0) {
|
||||
printk(KERN_ERR
|
||||
"Cannot get a valid agaw for iommu (seq_id = %d)\n",
|
||||
"Cannot get a valid agaw for iommu (seq_id = %d)\n",
|
||||
iommu->seq_id);
|
||||
goto error;
|
||||
}
|
||||
msagaw = iommu_calculate_max_sagaw(iommu);
|
||||
if (msagaw < 0) {
|
||||
printk(KERN_ERR
|
||||
"Cannot get a valid max agaw for iommu (seq_id = %d)\n",
|
||||
iommu->seq_id);
|
||||
goto error;
|
||||
}
|
||||
#endif
|
||||
iommu->agaw = agaw;
|
||||
iommu->msagaw = msagaw;
|
||||
|
||||
/* the registers might be more than one page */
|
||||
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
|
||||
|
@ -590,7 +699,8 @@ void free_iommu(struct intel_iommu *iommu)
|
|||
*/
|
||||
static inline void reclaim_free_desc(struct q_inval *qi)
|
||||
{
|
||||
while (qi->desc_status[qi->free_tail] == QI_DONE) {
|
||||
while (qi->desc_status[qi->free_tail] == QI_DONE ||
|
||||
qi->desc_status[qi->free_tail] == QI_ABORT) {
|
||||
qi->desc_status[qi->free_tail] = QI_FREE;
|
||||
qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
|
||||
qi->free_cnt++;
|
||||
|
@ -600,10 +710,13 @@ static inline void reclaim_free_desc(struct q_inval *qi)
|
|||
static int qi_check_fault(struct intel_iommu *iommu, int index)
|
||||
{
|
||||
u32 fault;
|
||||
int head;
|
||||
int head, tail;
|
||||
struct q_inval *qi = iommu->qi;
|
||||
int wait_index = (index + 1) % QI_LENGTH;
|
||||
|
||||
if (qi->desc_status[wait_index] == QI_ABORT)
|
||||
return -EAGAIN;
|
||||
|
||||
fault = readl(iommu->reg + DMAR_FSTS_REG);
|
||||
|
||||
/*
|
||||
|
@ -613,7 +726,11 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
|
|||
*/
|
||||
if (fault & DMA_FSTS_IQE) {
|
||||
head = readl(iommu->reg + DMAR_IQH_REG);
|
||||
if ((head >> 4) == index) {
|
||||
if ((head >> DMAR_IQ_SHIFT) == index) {
|
||||
printk(KERN_ERR "VT-d detected invalid descriptor: "
|
||||
"low=%llx, high=%llx\n",
|
||||
(unsigned long long)qi->desc[index].low,
|
||||
(unsigned long long)qi->desc[index].high);
|
||||
memcpy(&qi->desc[index], &qi->desc[wait_index],
|
||||
sizeof(struct qi_desc));
|
||||
__iommu_flush_cache(iommu, &qi->desc[index],
|
||||
|
@ -623,6 +740,32 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If ITE happens, all pending wait_desc commands are aborted.
|
||||
* No new descriptors are fetched until the ITE is cleared.
|
||||
*/
|
||||
if (fault & DMA_FSTS_ITE) {
|
||||
head = readl(iommu->reg + DMAR_IQH_REG);
|
||||
head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
|
||||
head |= 1;
|
||||
tail = readl(iommu->reg + DMAR_IQT_REG);
|
||||
tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
|
||||
|
||||
writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
|
||||
|
||||
do {
|
||||
if (qi->desc_status[head] == QI_IN_USE)
|
||||
qi->desc_status[head] = QI_ABORT;
|
||||
head = (head - 2 + QI_LENGTH) % QI_LENGTH;
|
||||
} while (head != tail);
|
||||
|
||||
if (qi->desc_status[wait_index] == QI_ABORT)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (fault & DMA_FSTS_ICE)
|
||||
writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -632,7 +775,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
|
|||
*/
|
||||
int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc;
|
||||
struct q_inval *qi = iommu->qi;
|
||||
struct qi_desc *hw, wait_desc;
|
||||
int wait_index, index;
|
||||
|
@ -643,6 +786,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|||
|
||||
hw = qi->desc;
|
||||
|
||||
restart:
|
||||
rc = 0;
|
||||
|
||||
spin_lock_irqsave(&qi->q_lock, flags);
|
||||
while (qi->free_cnt < 3) {
|
||||
spin_unlock_irqrestore(&qi->q_lock, flags);
|
||||
|
@ -673,7 +819,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|||
* update the HW tail register indicating the presence of
|
||||
* new descriptors.
|
||||
*/
|
||||
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
|
||||
writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
|
||||
|
||||
while (qi->desc_status[wait_index] != QI_DONE) {
|
||||
/*
|
||||
|
@ -685,18 +831,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|||
*/
|
||||
rc = qi_check_fault(iommu, index);
|
||||
if (rc)
|
||||
goto out;
|
||||
break;
|
||||
|
||||
spin_unlock(&qi->q_lock);
|
||||
cpu_relax();
|
||||
spin_lock(&qi->q_lock);
|
||||
}
|
||||
out:
|
||||
qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
|
||||
|
||||
qi->desc_status[index] = QI_DONE;
|
||||
|
||||
reclaim_free_desc(qi);
|
||||
spin_unlock_irqrestore(&qi->q_lock, flags);
|
||||
|
||||
if (rc == -EAGAIN)
|
||||
goto restart;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -714,41 +863,26 @@ void qi_global_iec(struct intel_iommu *iommu)
|
|||
qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
|
||||
u64 type, int non_present_entry_flush)
|
||||
void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
|
||||
u64 type)
|
||||
{
|
||||
struct qi_desc desc;
|
||||
|
||||
if (non_present_entry_flush) {
|
||||
if (!cap_caching_mode(iommu->cap))
|
||||
return 1;
|
||||
else
|
||||
did = 0;
|
||||
}
|
||||
|
||||
desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
|
||||
| QI_CC_GRAN(type) | QI_CC_TYPE;
|
||||
desc.high = 0;
|
||||
|
||||
return qi_submit_sync(&desc, iommu);
|
||||
qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||
unsigned int size_order, u64 type,
|
||||
int non_present_entry_flush)
|
||||
void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||
unsigned int size_order, u64 type)
|
||||
{
|
||||
u8 dw = 0, dr = 0;
|
||||
|
||||
struct qi_desc desc;
|
||||
int ih = 0;
|
||||
|
||||
if (non_present_entry_flush) {
|
||||
if (!cap_caching_mode(iommu->cap))
|
||||
return 1;
|
||||
else
|
||||
did = 0;
|
||||
}
|
||||
|
||||
if (cap_write_drain(iommu->cap))
|
||||
dw = 1;
|
||||
|
||||
|
@ -760,7 +894,28 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
|||
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
|
||||
| QI_IOTLB_AM(size_order);
|
||||
|
||||
return qi_submit_sync(&desc, iommu);
|
||||
qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
|
||||
u64 addr, unsigned mask)
|
||||
{
|
||||
struct qi_desc desc;
|
||||
|
||||
if (mask) {
|
||||
BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
|
||||
addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
|
||||
desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
|
||||
} else
|
||||
desc.high = QI_DEV_IOTLB_ADDR(addr);
|
||||
|
||||
if (qdep >= QI_DEV_IOTLB_MAX_INVS)
|
||||
qdep = 0;
|
||||
|
||||
desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
|
||||
QI_DIOTLB_TYPE;
|
||||
|
||||
qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -790,7 +945,6 @@ void dmar_disable_qi(struct intel_iommu *iommu)
|
|||
cpu_relax();
|
||||
|
||||
iommu->gcmd &= ~DMA_GCMD_QIE;
|
||||
|
||||
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
|
||||
|
||||
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
|
||||
|
@ -804,7 +958,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
|
|||
*/
|
||||
static void __dmar_enable_qi(struct intel_iommu *iommu)
|
||||
{
|
||||
u32 cmd, sts;
|
||||
u32 sts;
|
||||
unsigned long flags;
|
||||
struct q_inval *qi = iommu->qi;
|
||||
|
||||
|
@ -818,9 +972,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
|
|||
|
||||
dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
|
||||
|
||||
cmd = iommu->gcmd | DMA_GCMD_QIE;
|
||||
iommu->gcmd |= DMA_GCMD_QIE;
|
||||
writel(cmd, iommu->reg + DMAR_GCMD_REG);
|
||||
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
|
||||
|
||||
/* Make sure hardware complete it */
|
||||
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
|
||||
|
@ -1096,7 +1249,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
|
|||
set_irq_data(irq, NULL);
|
||||
iommu->irq = 0;
|
||||
destroy_irq(irq);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
|
||||
|
|
|
@ -53,6 +53,8 @@
|
|||
|
||||
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
|
||||
|
||||
#define MAX_AGAW_WIDTH 64
|
||||
|
||||
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
|
||||
|
||||
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
||||
|
@ -131,8 +133,6 @@ static inline void context_set_fault_enable(struct context_entry *context)
|
|||
context->lo &= (((u64)-1) << 2) | 1;
|
||||
}
|
||||
|
||||
#define CONTEXT_TT_MULTI_LEVEL 0
|
||||
|
||||
static inline void context_set_translation_type(struct context_entry *context,
|
||||
unsigned long value)
|
||||
{
|
||||
|
@ -256,6 +256,7 @@ struct device_domain_info {
|
|||
u8 bus; /* PCI bus number */
|
||||
u8 devfn; /* PCI devfn number */
|
||||
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
|
||||
struct intel_iommu *iommu; /* IOMMU used by this device */
|
||||
struct dmar_domain *domain; /* pointer to domain */
|
||||
};
|
||||
|
||||
|
@ -401,17 +402,13 @@ void free_iova_mem(struct iova *iova)
|
|||
|
||||
static inline int width_to_agaw(int width);
|
||||
|
||||
/* calculate agaw for each iommu.
|
||||
* "SAGAW" may be different across iommus, use a default agaw, and
|
||||
* get a supported less agaw for iommus that don't support the default agaw.
|
||||
*/
|
||||
int iommu_calculate_agaw(struct intel_iommu *iommu)
|
||||
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
|
||||
{
|
||||
unsigned long sagaw;
|
||||
int agaw = -1;
|
||||
|
||||
sagaw = cap_sagaw(iommu->cap);
|
||||
for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
|
||||
for (agaw = width_to_agaw(max_gaw);
|
||||
agaw >= 0; agaw--) {
|
||||
if (test_bit(agaw, &sagaw))
|
||||
break;
|
||||
|
@ -420,6 +417,24 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
|
|||
return agaw;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate max SAGAW for each iommu.
|
||||
*/
|
||||
int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
|
||||
{
|
||||
return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
|
||||
}
|
||||
|
||||
/*
|
||||
* calculate agaw for each iommu.
|
||||
* "SAGAW" may be different across iommus, use a default agaw, and
|
||||
* get a supported less agaw for iommus that don't support the default agaw.
|
||||
*/
|
||||
int iommu_calculate_agaw(struct intel_iommu *iommu)
|
||||
{
|
||||
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
|
||||
}
|
||||
|
||||
/* in native case, each domain is related to only one iommu */
|
||||
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
|
||||
{
|
||||
|
@ -809,7 +824,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
|
|||
static void iommu_set_root_entry(struct intel_iommu *iommu)
|
||||
{
|
||||
void *addr;
|
||||
u32 cmd, sts;
|
||||
u32 sts;
|
||||
unsigned long flag;
|
||||
|
||||
addr = iommu->root_entry;
|
||||
|
@ -817,12 +832,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
|
|||
spin_lock_irqsave(&iommu->register_lock, flag);
|
||||
dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
|
||||
|
||||
cmd = iommu->gcmd | DMA_GCMD_SRTP;
|
||||
writel(cmd, iommu->reg + DMAR_GCMD_REG);
|
||||
writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
|
||||
|
||||
/* Make sure hardware complete it */
|
||||
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
|
||||
readl, (sts & DMA_GSTS_RTPS), sts);
|
||||
readl, (sts & DMA_GSTS_RTPS), sts);
|
||||
|
||||
spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||
}
|
||||
|
@ -834,39 +848,25 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
|
|||
|
||||
if (!rwbf_quirk && !cap_rwbf(iommu->cap))
|
||||
return;
|
||||
val = iommu->gcmd | DMA_GCMD_WBF;
|
||||
|
||||
spin_lock_irqsave(&iommu->register_lock, flag);
|
||||
writel(val, iommu->reg + DMAR_GCMD_REG);
|
||||
writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
|
||||
|
||||
/* Make sure hardware complete it */
|
||||
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
|
||||
readl, (!(val & DMA_GSTS_WBFS)), val);
|
||||
readl, (!(val & DMA_GSTS_WBFS)), val);
|
||||
|
||||
spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||
}
|
||||
|
||||
/* return value determine if we need a write buffer flush */
|
||||
static int __iommu_flush_context(struct intel_iommu *iommu,
|
||||
u16 did, u16 source_id, u8 function_mask, u64 type,
|
||||
int non_present_entry_flush)
|
||||
static void __iommu_flush_context(struct intel_iommu *iommu,
|
||||
u16 did, u16 source_id, u8 function_mask,
|
||||
u64 type)
|
||||
{
|
||||
u64 val = 0;
|
||||
unsigned long flag;
|
||||
|
||||
/*
|
||||
* In the non-present entry flush case, if hardware doesn't cache
|
||||
* non-present entry we do nothing and if hardware cache non-present
|
||||
* entry, we flush entries of domain 0 (the domain id is used to cache
|
||||
* any non-present entries)
|
||||
*/
|
||||
if (non_present_entry_flush) {
|
||||
if (!cap_caching_mode(iommu->cap))
|
||||
return 1;
|
||||
else
|
||||
did = 0;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case DMA_CCMD_GLOBAL_INVL:
|
||||
val = DMA_CCMD_GLOBAL_INVL;
|
||||
|
@ -891,33 +891,16 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
|
|||
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
|
||||
|
||||
spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||
|
||||
/* flush context entry will implicitly flush write buffer */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* return value determine if we need a write buffer flush */
|
||||
static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
|
||||
u64 addr, unsigned int size_order, u64 type,
|
||||
int non_present_entry_flush)
|
||||
static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
|
||||
u64 addr, unsigned int size_order, u64 type)
|
||||
{
|
||||
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
|
||||
u64 val = 0, val_iva = 0;
|
||||
unsigned long flag;
|
||||
|
||||
/*
|
||||
* In the non-present entry flush case, if hardware doesn't cache
|
||||
* non-present entry we do nothing and if hardware cache non-present
|
||||
* entry, we flush entries of domain 0 (the domain id is used to cache
|
||||
* any non-present entries)
|
||||
*/
|
||||
if (non_present_entry_flush) {
|
||||
if (!cap_caching_mode(iommu->cap))
|
||||
return 1;
|
||||
else
|
||||
did = 0;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case DMA_TLB_GLOBAL_FLUSH:
|
||||
/* global flush doesn't need set IVA_REG */
|
||||
|
@ -965,37 +948,101 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
|
|||
pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
|
||||
(unsigned long long)DMA_TLB_IIRG(type),
|
||||
(unsigned long long)DMA_TLB_IAIG(val));
|
||||
/* flush iotlb entry will implicitly flush write buffer */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
|
||||
u64 addr, unsigned int pages, int non_present_entry_flush)
|
||||
static struct device_domain_info *iommu_support_dev_iotlb(
|
||||
struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
|
||||
{
|
||||
unsigned int mask;
|
||||
int found = 0;
|
||||
unsigned long flags;
|
||||
struct device_domain_info *info;
|
||||
struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
|
||||
|
||||
if (!ecap_dev_iotlb_support(iommu->ecap))
|
||||
return NULL;
|
||||
|
||||
if (!iommu->qi)
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&device_domain_lock, flags);
|
||||
list_for_each_entry(info, &domain->devices, link)
|
||||
if (info->bus == bus && info->devfn == devfn) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
|
||||
if (!found || !info->dev)
|
||||
return NULL;
|
||||
|
||||
if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
|
||||
return NULL;
|
||||
|
||||
if (!dmar_find_matched_atsr_unit(info->dev))
|
||||
return NULL;
|
||||
|
||||
info->iommu = iommu;
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
|
||||
{
|
||||
if (!info)
|
||||
return;
|
||||
|
||||
pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static void iommu_disable_dev_iotlb(struct device_domain_info *info)
|
||||
{
|
||||
if (!info->dev || !pci_ats_enabled(info->dev))
|
||||
return;
|
||||
|
||||
pci_disable_ats(info->dev);
|
||||
}
|
||||
|
||||
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
|
||||
u64 addr, unsigned mask)
|
||||
{
|
||||
u16 sid, qdep;
|
||||
unsigned long flags;
|
||||
struct device_domain_info *info;
|
||||
|
||||
spin_lock_irqsave(&device_domain_lock, flags);
|
||||
list_for_each_entry(info, &domain->devices, link) {
|
||||
if (!info->dev || !pci_ats_enabled(info->dev))
|
||||
continue;
|
||||
|
||||
sid = info->bus << 8 | info->devfn;
|
||||
qdep = pci_ats_queue_depth(info->dev);
|
||||
qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
}
|
||||
|
||||
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
|
||||
u64 addr, unsigned int pages)
|
||||
{
|
||||
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
|
||||
|
||||
BUG_ON(addr & (~VTD_PAGE_MASK));
|
||||
BUG_ON(pages == 0);
|
||||
|
||||
/* Fallback to domain selective flush if no PSI support */
|
||||
if (!cap_pgsel_inv(iommu->cap))
|
||||
return iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||
DMA_TLB_DSI_FLUSH,
|
||||
non_present_entry_flush);
|
||||
|
||||
/*
|
||||
* Fallback to domain selective flush if no PSI support or the size is
|
||||
* too big.
|
||||
* PSI requires page size to be 2 ^ x, and the base address is naturally
|
||||
* aligned to the size
|
||||
*/
|
||||
mask = ilog2(__roundup_pow_of_two(pages));
|
||||
/* Fallback to domain selective flush if size is too big */
|
||||
if (mask > cap_max_amask_val(iommu->cap))
|
||||
return iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||
DMA_TLB_DSI_FLUSH, non_present_entry_flush);
|
||||
|
||||
return iommu->flush.flush_iotlb(iommu, did, addr, mask,
|
||||
DMA_TLB_PSI_FLUSH,
|
||||
non_present_entry_flush);
|
||||
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||
DMA_TLB_DSI_FLUSH);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, did, addr, mask,
|
||||
DMA_TLB_PSI_FLUSH);
|
||||
if (did)
|
||||
iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
|
||||
}
|
||||
|
||||
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
|
||||
|
@ -1021,13 +1068,13 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&iommu->register_lock, flags);
|
||||
writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
|
||||
iommu->gcmd |= DMA_GCMD_TE;
|
||||
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
|
||||
|
||||
/* Make sure hardware complete it */
|
||||
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
|
||||
readl, (sts & DMA_GSTS_TES), sts);
|
||||
readl, (sts & DMA_GSTS_TES), sts);
|
||||
|
||||
iommu->gcmd |= DMA_GCMD_TE;
|
||||
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1043,7 +1090,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
|
|||
|
||||
/* Make sure hardware complete it */
|
||||
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
|
||||
readl, (!(sts & DMA_GSTS_TES)), sts);
|
||||
readl, (!(sts & DMA_GSTS_TES)), sts);
|
||||
|
||||
spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||
return 0;
|
||||
|
@ -1325,8 +1372,8 @@ static void domain_exit(struct dmar_domain *domain)
|
|||
free_domain_mem(domain);
|
||||
}
|
||||
|
||||
static int domain_context_mapping_one(struct dmar_domain *domain,
|
||||
int segment, u8 bus, u8 devfn)
|
||||
static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
|
||||
u8 bus, u8 devfn, int translation)
|
||||
{
|
||||
struct context_entry *context;
|
||||
unsigned long flags;
|
||||
|
@ -1336,10 +1383,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
|
|||
unsigned long ndomains;
|
||||
int id;
|
||||
int agaw;
|
||||
struct device_domain_info *info = NULL;
|
||||
|
||||
pr_debug("Set context mapping for %02x:%02x.%d\n",
|
||||
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
|
||||
|
||||
BUG_ON(!domain->pgd);
|
||||
BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
|
||||
translation != CONTEXT_TT_MULTI_LEVEL);
|
||||
|
||||
iommu = device_to_iommu(segment, bus, devfn);
|
||||
if (!iommu)
|
||||
|
@ -1399,21 +1450,44 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
|
|||
}
|
||||
|
||||
context_set_domain_id(context, id);
|
||||
context_set_address_width(context, iommu->agaw);
|
||||
context_set_address_root(context, virt_to_phys(pgd));
|
||||
context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
|
||||
|
||||
if (translation != CONTEXT_TT_PASS_THROUGH) {
|
||||
info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
|
||||
translation = info ? CONTEXT_TT_DEV_IOTLB :
|
||||
CONTEXT_TT_MULTI_LEVEL;
|
||||
}
|
||||
/*
|
||||
* In pass through mode, AW must be programmed to indicate the largest
|
||||
* AGAW value supported by hardware. And ASR is ignored by hardware.
|
||||
*/
|
||||
if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
|
||||
context_set_address_width(context, iommu->msagaw);
|
||||
else {
|
||||
context_set_address_root(context, virt_to_phys(pgd));
|
||||
context_set_address_width(context, iommu->agaw);
|
||||
}
|
||||
|
||||
context_set_translation_type(context, translation);
|
||||
context_set_fault_enable(context);
|
||||
context_set_present(context);
|
||||
domain_flush_cache(domain, context, sizeof(*context));
|
||||
|
||||
/* it's a non-present to present mapping */
|
||||
if (iommu->flush.flush_context(iommu, domain->id,
|
||||
(((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
|
||||
DMA_CCMD_DEVICE_INVL, 1))
|
||||
/*
|
||||
* It's a non-present to present mapping. If hardware doesn't cache
|
||||
* non-present entry we only need to flush the write-buffer. If the
|
||||
* _does_ cache non-present entries, then it does so in the special
|
||||
* domain #0, which we have to flush:
|
||||
*/
|
||||
if (cap_caching_mode(iommu->cap)) {
|
||||
iommu->flush.flush_context(iommu, 0,
|
||||
(((u16)bus) << 8) | devfn,
|
||||
DMA_CCMD_MASK_NOBIT,
|
||||
DMA_CCMD_DEVICE_INVL);
|
||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
|
||||
} else {
|
||||
iommu_flush_write_buffer(iommu);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
|
||||
|
||||
}
|
||||
iommu_enable_dev_iotlb(info);
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
spin_lock_irqsave(&domain->iommu_lock, flags);
|
||||
|
@ -1426,13 +1500,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
|
|||
}
|
||||
|
||||
static int
|
||||
domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
|
||||
domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
|
||||
int translation)
|
||||
{
|
||||
int ret;
|
||||
struct pci_dev *tmp, *parent;
|
||||
|
||||
ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
|
||||
pdev->bus->number, pdev->devfn);
|
||||
pdev->bus->number, pdev->devfn,
|
||||
translation);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1446,7 +1522,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
|
|||
ret = domain_context_mapping_one(domain,
|
||||
pci_domain_nr(parent->bus),
|
||||
parent->bus->number,
|
||||
parent->devfn);
|
||||
parent->devfn, translation);
|
||||
if (ret)
|
||||
return ret;
|
||||
parent = parent->bus->self;
|
||||
|
@ -1454,12 +1530,14 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
|
|||
if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
|
||||
return domain_context_mapping_one(domain,
|
||||
pci_domain_nr(tmp->subordinate),
|
||||
tmp->subordinate->number, 0);
|
||||
tmp->subordinate->number, 0,
|
||||
translation);
|
||||
else /* this is a legacy PCI bridge */
|
||||
return domain_context_mapping_one(domain,
|
||||
pci_domain_nr(tmp->bus),
|
||||
tmp->bus->number,
|
||||
tmp->devfn);
|
||||
tmp->devfn,
|
||||
translation);
|
||||
}
|
||||
|
||||
static int domain_context_mapped(struct pci_dev *pdev)
|
||||
|
@ -1540,9 +1618,8 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
|
|||
|
||||
clear_context_table(iommu, bus, devfn);
|
||||
iommu->flush.flush_context(iommu, 0, 0, 0,
|
||||
DMA_CCMD_GLOBAL_INVL, 0);
|
||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
|
||||
DMA_TLB_GLOBAL_FLUSH, 0);
|
||||
DMA_CCMD_GLOBAL_INVL);
|
||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
|
||||
}
|
||||
|
||||
static void domain_remove_dev_info(struct dmar_domain *domain)
|
||||
|
@ -1561,6 +1638,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
|
|||
info->dev->dev.archdata.iommu = NULL;
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
|
||||
iommu_disable_dev_iotlb(info);
|
||||
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
|
||||
iommu_detach_dev(iommu, info->bus, info->devfn);
|
||||
free_devinfo_mem(info);
|
||||
|
@ -1756,7 +1834,7 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
|
|||
goto error;
|
||||
|
||||
/* context entry init */
|
||||
ret = domain_context_mapping(domain, pdev);
|
||||
ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
|
||||
if (!ret)
|
||||
return 0;
|
||||
error:
|
||||
|
@ -1857,6 +1935,23 @@ static inline void iommu_prepare_isa(void)
|
|||
}
|
||||
#endif /* !CONFIG_DMAR_FLPY_WA */
|
||||
|
||||
/* Initialize each context entry as pass through.*/
|
||||
static int __init init_context_pass_through(void)
|
||||
{
|
||||
struct pci_dev *pdev = NULL;
|
||||
struct dmar_domain *domain;
|
||||
int ret;
|
||||
|
||||
for_each_pci_dev(pdev) {
|
||||
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
|
||||
ret = domain_context_mapping(domain, pdev,
|
||||
CONTEXT_TT_PASS_THROUGH);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init init_dmars(void)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
|
@ -1864,6 +1959,7 @@ static int __init init_dmars(void)
|
|||
struct pci_dev *pdev;
|
||||
struct intel_iommu *iommu;
|
||||
int i, ret;
|
||||
int pass_through = 1;
|
||||
|
||||
/*
|
||||
* for each drhd
|
||||
|
@ -1917,7 +2013,15 @@ static int __init init_dmars(void)
|
|||
printk(KERN_ERR "IOMMU: allocate root entry failed\n");
|
||||
goto error;
|
||||
}
|
||||
if (!ecap_pass_through(iommu->ecap))
|
||||
pass_through = 0;
|
||||
}
|
||||
if (iommu_pass_through)
|
||||
if (!pass_through) {
|
||||
printk(KERN_INFO
|
||||
"Pass Through is not supported by hardware.\n");
|
||||
iommu_pass_through = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start from the sane iommu hardware state.
|
||||
|
@ -1973,35 +2077,56 @@ static int __init init_dmars(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* For each rmrr
|
||||
* for each dev attached to rmrr
|
||||
* do
|
||||
* locate drhd for dev, alloc domain for dev
|
||||
* allocate free domain
|
||||
* allocate page table entries for rmrr
|
||||
* if context not allocated for bus
|
||||
* allocate and init context
|
||||
* set present in root table for this bus
|
||||
* init context with domain, translation etc
|
||||
* endfor
|
||||
* endfor
|
||||
* If pass through is set and enabled, context entries of all pci
|
||||
* devices are intialized by pass through translation type.
|
||||
*/
|
||||
for_each_rmrr_units(rmrr) {
|
||||
for (i = 0; i < rmrr->devices_cnt; i++) {
|
||||
pdev = rmrr->devices[i];
|
||||
/* some BIOS lists non-exist devices in DMAR table */
|
||||
if (!pdev)
|
||||
continue;
|
||||
ret = iommu_prepare_rmrr_dev(rmrr, pdev);
|
||||
if (ret)
|
||||
printk(KERN_ERR
|
||||
"IOMMU: mapping reserved region failed\n");
|
||||
if (iommu_pass_through) {
|
||||
ret = init_context_pass_through();
|
||||
if (ret) {
|
||||
printk(KERN_ERR "IOMMU: Pass through init failed.\n");
|
||||
iommu_pass_through = 0;
|
||||
}
|
||||
}
|
||||
|
||||
iommu_prepare_gfx_mapping();
|
||||
/*
|
||||
* If pass through is not set or not enabled, setup context entries for
|
||||
* identity mappings for rmrr, gfx, and isa.
|
||||
*/
|
||||
if (!iommu_pass_through) {
|
||||
/*
|
||||
* For each rmrr
|
||||
* for each dev attached to rmrr
|
||||
* do
|
||||
* locate drhd for dev, alloc domain for dev
|
||||
* allocate free domain
|
||||
* allocate page table entries for rmrr
|
||||
* if context not allocated for bus
|
||||
* allocate and init context
|
||||
* set present in root table for this bus
|
||||
* init context with domain, translation etc
|
||||
* endfor
|
||||
* endfor
|
||||
*/
|
||||
for_each_rmrr_units(rmrr) {
|
||||
for (i = 0; i < rmrr->devices_cnt; i++) {
|
||||
pdev = rmrr->devices[i];
|
||||
/*
|
||||
* some BIOS lists non-exist devices in DMAR
|
||||
* table.
|
||||
*/
|
||||
if (!pdev)
|
||||
continue;
|
||||
ret = iommu_prepare_rmrr_dev(rmrr, pdev);
|
||||
if (ret)
|
||||
printk(KERN_ERR
|
||||
"IOMMU: mapping reserved region failed\n");
|
||||
}
|
||||
}
|
||||
|
||||
iommu_prepare_isa();
|
||||
iommu_prepare_gfx_mapping();
|
||||
|
||||
iommu_prepare_isa();
|
||||
}
|
||||
|
||||
/*
|
||||
* for each drhd
|
||||
|
@ -2023,10 +2148,8 @@ static int __init init_dmars(void)
|
|||
|
||||
iommu_set_root_entry(iommu);
|
||||
|
||||
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
|
||||
0);
|
||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
|
||||
0);
|
||||
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
|
||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
|
||||
iommu_disable_protect_mem_regions(iommu);
|
||||
|
||||
ret = iommu_enable_translation(iommu);
|
||||
|
@ -2112,7 +2235,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
|
|||
|
||||
/* make sure context mapping is ok */
|
||||
if (unlikely(!domain_context_mapped(pdev))) {
|
||||
ret = domain_context_mapping(domain, pdev);
|
||||
ret = domain_context_mapping(domain, pdev,
|
||||
CONTEXT_TT_MULTI_LEVEL);
|
||||
if (ret) {
|
||||
printk(KERN_ERR
|
||||
"Domain context map for %s failed",
|
||||
|
@ -2173,10 +2297,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
|||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* it's a non-present to present mapping */
|
||||
ret = iommu_flush_iotlb_psi(iommu, domain->id,
|
||||
start_paddr, size >> VTD_PAGE_SHIFT, 1);
|
||||
if (ret)
|
||||
/* it's a non-present to present mapping. Only flush if caching mode */
|
||||
if (cap_caching_mode(iommu->cap))
|
||||
iommu_flush_iotlb_psi(iommu, 0, start_paddr,
|
||||
size >> VTD_PAGE_SHIFT);
|
||||
else
|
||||
iommu_flush_write_buffer(iommu);
|
||||
|
||||
return start_paddr + ((u64)paddr & (~PAGE_MASK));
|
||||
|
@ -2210,15 +2335,22 @@ static void flush_unmaps(void)
|
|||
if (!iommu)
|
||||
continue;
|
||||
|
||||
if (deferred_flush[i].next) {
|
||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
|
||||
DMA_TLB_GLOBAL_FLUSH, 0);
|
||||
for (j = 0; j < deferred_flush[i].next; j++) {
|
||||
__free_iova(&deferred_flush[i].domain[j]->iovad,
|
||||
deferred_flush[i].iova[j]);
|
||||
}
|
||||
deferred_flush[i].next = 0;
|
||||
if (!deferred_flush[i].next)
|
||||
continue;
|
||||
|
||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
|
||||
DMA_TLB_GLOBAL_FLUSH);
|
||||
for (j = 0; j < deferred_flush[i].next; j++) {
|
||||
unsigned long mask;
|
||||
struct iova *iova = deferred_flush[i].iova[j];
|
||||
|
||||
mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
|
||||
mask = ilog2(mask >> VTD_PAGE_SHIFT);
|
||||
iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
|
||||
iova->pfn_lo << PAGE_SHIFT, mask);
|
||||
__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
|
||||
}
|
||||
deferred_flush[i].next = 0;
|
||||
}
|
||||
|
||||
list_size = 0;
|
||||
|
@ -2291,9 +2423,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
|||
/* free page tables */
|
||||
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
|
||||
if (intel_iommu_strict) {
|
||||
if (iommu_flush_iotlb_psi(iommu,
|
||||
domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
|
||||
iommu_flush_write_buffer(iommu);
|
||||
iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
|
||||
size >> VTD_PAGE_SHIFT);
|
||||
/* free iova */
|
||||
__free_iova(&domain->iovad, iova);
|
||||
} else {
|
||||
|
@ -2384,9 +2515,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
|
|||
/* free page tables */
|
||||
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
|
||||
|
||||
if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
|
||||
size >> VTD_PAGE_SHIFT, 0))
|
||||
iommu_flush_write_buffer(iommu);
|
||||
iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
|
||||
size >> VTD_PAGE_SHIFT);
|
||||
|
||||
/* free iova */
|
||||
__free_iova(&domain->iovad, iova);
|
||||
|
@ -2478,10 +2608,13 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
|
|||
offset += size;
|
||||
}
|
||||
|
||||
/* it's a non-present to present mapping */
|
||||
if (iommu_flush_iotlb_psi(iommu, domain->id,
|
||||
start_addr, offset >> VTD_PAGE_SHIFT, 1))
|
||||
/* it's a non-present to present mapping. Only flush if caching mode */
|
||||
if (cap_caching_mode(iommu->cap))
|
||||
iommu_flush_iotlb_psi(iommu, 0, start_addr,
|
||||
offset >> VTD_PAGE_SHIFT);
|
||||
else
|
||||
iommu_flush_write_buffer(iommu);
|
||||
|
||||
return nelems;
|
||||
}
|
||||
|
||||
|
@ -2640,9 +2773,9 @@ static int init_iommu_hw(void)
|
|||
iommu_set_root_entry(iommu);
|
||||
|
||||
iommu->flush.flush_context(iommu, 0, 0, 0,
|
||||
DMA_CCMD_GLOBAL_INVL, 0);
|
||||
DMA_CCMD_GLOBAL_INVL);
|
||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
|
||||
DMA_TLB_GLOBAL_FLUSH, 0);
|
||||
DMA_TLB_GLOBAL_FLUSH);
|
||||
iommu_disable_protect_mem_regions(iommu);
|
||||
iommu_enable_translation(iommu);
|
||||
}
|
||||
|
@ -2657,9 +2790,9 @@ static void iommu_flush_all(void)
|
|||
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
iommu->flush.flush_context(iommu, 0, 0, 0,
|
||||
DMA_CCMD_GLOBAL_INVL, 0);
|
||||
DMA_CCMD_GLOBAL_INVL);
|
||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
|
||||
DMA_TLB_GLOBAL_FLUSH, 0);
|
||||
DMA_TLB_GLOBAL_FLUSH);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2782,7 +2915,7 @@ int __init intel_iommu_init(void)
|
|||
* Check the need for DMA-remapping initialization now.
|
||||
* Above initialization will also be used by Interrupt-remapping.
|
||||
*/
|
||||
if (no_iommu || swiotlb || dmar_disabled)
|
||||
if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
|
||||
return -ENODEV;
|
||||
|
||||
iommu_init_mempool();
|
||||
|
@ -2802,7 +2935,15 @@ int __init intel_iommu_init(void)
|
|||
|
||||
init_timer(&unmap_timer);
|
||||
force_iommu = 1;
|
||||
dma_ops = &intel_dma_ops;
|
||||
|
||||
if (!iommu_pass_through) {
|
||||
printk(KERN_INFO
|
||||
"Multi-level page-table translation for DMAR.\n");
|
||||
dma_ops = &intel_dma_ops;
|
||||
} else
|
||||
printk(KERN_INFO
|
||||
"DMAR: Pass through translation for DMAR.\n");
|
||||
|
||||
init_iommu_sysfs();
|
||||
|
||||
register_iommu(&intel_iommu_ops);
|
||||
|
@ -2888,6 +3029,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
|
|||
info->dev->dev.archdata.iommu = NULL;
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
|
||||
iommu_disable_dev_iotlb(info);
|
||||
iommu_detach_dev(iommu, info->bus, info->devfn);
|
||||
iommu_detach_dependent_devices(iommu, pdev);
|
||||
free_devinfo_mem(info);
|
||||
|
@ -2938,6 +3080,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
|
|||
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags1);
|
||||
|
||||
iommu_disable_dev_iotlb(info);
|
||||
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
|
||||
iommu_detach_dev(iommu, info->bus, info->devfn);
|
||||
iommu_detach_dependent_devices(iommu, info->dev);
|
||||
|
@ -3142,11 +3285,11 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
ret = domain_context_mapping(dmar_domain, pdev);
|
||||
ret = vm_domain_add_dev_info(dmar_domain, pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vm_domain_add_dev_info(dmar_domain, pdev);
|
||||
ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -409,7 +409,7 @@ int free_irte(int irq)
|
|||
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
|
||||
{
|
||||
u64 addr;
|
||||
u32 cmd, sts;
|
||||
u32 sts;
|
||||
unsigned long flags;
|
||||
|
||||
addr = virt_to_phys((void *)iommu->ir_table->base);
|
||||
|
@ -420,9 +420,8 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
|
|||
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
|
||||
|
||||
/* Set interrupt-remapping table pointer */
|
||||
cmd = iommu->gcmd | DMA_GCMD_SIRTP;
|
||||
iommu->gcmd |= DMA_GCMD_SIRTP;
|
||||
writel(cmd, iommu->reg + DMAR_GCMD_REG);
|
||||
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
|
||||
|
||||
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
|
||||
readl, (sts & DMA_GSTS_IRTPS), sts);
|
||||
|
@ -437,9 +436,8 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
|
|||
spin_lock_irqsave(&iommu->register_lock, flags);
|
||||
|
||||
/* Enable interrupt-remapping */
|
||||
cmd = iommu->gcmd | DMA_GCMD_IRE;
|
||||
iommu->gcmd |= DMA_GCMD_IRE;
|
||||
writel(cmd, iommu->reg + DMAR_GCMD_REG);
|
||||
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
|
||||
|
||||
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
|
||||
readl, (sts & DMA_GSTS_IRES), sts);
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*
|
||||
* PCI Express I/O Virtualization (IOV) support.
|
||||
* Single Root IOV 1.0
|
||||
* Address Translation Service 1.0
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
@ -492,10 +493,10 @@ static int sriov_init(struct pci_dev *dev, int pos)
|
|||
|
||||
if (pdev)
|
||||
iov->dev = pci_dev_get(pdev);
|
||||
else {
|
||||
else
|
||||
iov->dev = dev;
|
||||
mutex_init(&iov->lock);
|
||||
}
|
||||
|
||||
mutex_init(&iov->lock);
|
||||
|
||||
dev->sriov = iov;
|
||||
dev->is_physfn = 1;
|
||||
|
@ -515,11 +516,11 @@ static void sriov_release(struct pci_dev *dev)
|
|||
{
|
||||
BUG_ON(dev->sriov->nr_virtfn);
|
||||
|
||||
if (dev == dev->sriov->dev)
|
||||
mutex_destroy(&dev->sriov->lock);
|
||||
else
|
||||
if (dev != dev->sriov->dev)
|
||||
pci_dev_put(dev->sriov->dev);
|
||||
|
||||
mutex_destroy(&dev->sriov->lock);
|
||||
|
||||
kfree(dev->sriov);
|
||||
dev->sriov = NULL;
|
||||
}
|
||||
|
@ -681,3 +682,145 @@ irqreturn_t pci_sriov_migration(struct pci_dev *dev)
|
|||
return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_sriov_migration);
|
||||
|
||||
static int ats_alloc_one(struct pci_dev *dev, int ps)
|
||||
{
|
||||
int pos;
|
||||
u16 cap;
|
||||
struct pci_ats *ats;
|
||||
|
||||
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
|
||||
if (!pos)
|
||||
return -ENODEV;
|
||||
|
||||
ats = kzalloc(sizeof(*ats), GFP_KERNEL);
|
||||
if (!ats)
|
||||
return -ENOMEM;
|
||||
|
||||
ats->pos = pos;
|
||||
ats->stu = ps;
|
||||
pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
|
||||
ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
|
||||
PCI_ATS_MAX_QDEP;
|
||||
dev->ats = ats;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ats_free_one(struct pci_dev *dev)
|
||||
{
|
||||
kfree(dev->ats);
|
||||
dev->ats = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_enable_ats - enable the ATS capability
|
||||
* @dev: the PCI device
|
||||
* @ps: the IOMMU page shift
|
||||
*
|
||||
* Returns 0 on success, or negative on failure.
|
||||
*/
|
||||
int pci_enable_ats(struct pci_dev *dev, int ps)
|
||||
{
|
||||
int rc;
|
||||
u16 ctrl;
|
||||
|
||||
BUG_ON(dev->ats && dev->ats->is_enabled);
|
||||
|
||||
if (ps < PCI_ATS_MIN_STU)
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->is_physfn || dev->is_virtfn) {
|
||||
struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
|
||||
|
||||
mutex_lock(&pdev->sriov->lock);
|
||||
if (pdev->ats)
|
||||
rc = pdev->ats->stu == ps ? 0 : -EINVAL;
|
||||
else
|
||||
rc = ats_alloc_one(pdev, ps);
|
||||
|
||||
if (!rc)
|
||||
pdev->ats->ref_cnt++;
|
||||
mutex_unlock(&pdev->sriov->lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!dev->is_physfn) {
|
||||
rc = ats_alloc_one(dev, ps);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
ctrl = PCI_ATS_CTRL_ENABLE;
|
||||
if (!dev->is_virtfn)
|
||||
ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
|
||||
pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
|
||||
|
||||
dev->ats->is_enabled = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_disable_ats - disable the ATS capability
|
||||
* @dev: the PCI device
|
||||
*/
|
||||
void pci_disable_ats(struct pci_dev *dev)
|
||||
{
|
||||
u16 ctrl;
|
||||
|
||||
BUG_ON(!dev->ats || !dev->ats->is_enabled);
|
||||
|
||||
pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
|
||||
ctrl &= ~PCI_ATS_CTRL_ENABLE;
|
||||
pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
|
||||
|
||||
dev->ats->is_enabled = 0;
|
||||
|
||||
if (dev->is_physfn || dev->is_virtfn) {
|
||||
struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
|
||||
|
||||
mutex_lock(&pdev->sriov->lock);
|
||||
pdev->ats->ref_cnt--;
|
||||
if (!pdev->ats->ref_cnt)
|
||||
ats_free_one(pdev);
|
||||
mutex_unlock(&pdev->sriov->lock);
|
||||
}
|
||||
|
||||
if (!dev->is_physfn)
|
||||
ats_free_one(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_ats_queue_depth - query the ATS Invalidate Queue Depth
|
||||
* @dev: the PCI device
|
||||
*
|
||||
* Returns the queue depth on success, or negative on failure.
|
||||
*
|
||||
* The ATS spec uses 0 in the Invalidate Queue Depth field to
|
||||
* indicate that the function can accept 32 Invalidate Request.
|
||||
* But here we use the `real' values (i.e. 1~32) for the Queue
|
||||
* Depth; and 0 indicates the function shares the Queue with
|
||||
* other functions (doesn't exclusively own a Queue).
|
||||
*/
|
||||
int pci_ats_queue_depth(struct pci_dev *dev)
|
||||
{
|
||||
int pos;
|
||||
u16 cap;
|
||||
|
||||
if (dev->is_virtfn)
|
||||
return 0;
|
||||
|
||||
if (dev->ats)
|
||||
return dev->ats->qdep;
|
||||
|
||||
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
|
||||
if (!pos)
|
||||
return -ENODEV;
|
||||
|
||||
pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
|
||||
|
||||
return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
|
||||
PCI_ATS_MAX_QDEP;
|
||||
}
|
||||
|
|
|
@ -229,6 +229,15 @@ struct pci_sriov {
|
|||
u8 __iomem *mstate; /* VF Migration State Array */
|
||||
};
|
||||
|
||||
/* Address Translation Service */
|
||||
struct pci_ats {
|
||||
int pos; /* capability position */
|
||||
int stu; /* Smallest Translation Unit */
|
||||
int qdep; /* Invalidate Queue Depth */
|
||||
int ref_cnt; /* Physical Function reference count */
|
||||
int is_enabled:1; /* Enable bit is set */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
extern int pci_iov_init(struct pci_dev *dev);
|
||||
extern void pci_iov_release(struct pci_dev *dev);
|
||||
|
@ -236,6 +245,20 @@ extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
|
|||
enum pci_bar_type *type);
|
||||
extern void pci_restore_iov_state(struct pci_dev *dev);
|
||||
extern int pci_iov_bus_range(struct pci_bus *bus);
|
||||
|
||||
extern int pci_enable_ats(struct pci_dev *dev, int ps);
|
||||
extern void pci_disable_ats(struct pci_dev *dev);
|
||||
extern int pci_ats_queue_depth(struct pci_dev *dev);
|
||||
/**
|
||||
* pci_ats_enabled - query the ATS status
|
||||
* @dev: the PCI device
|
||||
*
|
||||
* Returns 1 if ATS capability is enabled, or 0 if not.
|
||||
*/
|
||||
static inline int pci_ats_enabled(struct pci_dev *dev)
|
||||
{
|
||||
return dev->ats && dev->ats->is_enabled;
|
||||
}
|
||||
#else
|
||||
static inline int pci_iov_init(struct pci_dev *dev)
|
||||
{
|
||||
|
@ -257,6 +280,22 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pci_enable_ats(struct pci_dev *dev, int ps)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static inline void pci_disable_ats(struct pci_dev *dev)
|
||||
{
|
||||
}
|
||||
static inline int pci_ats_queue_depth(struct pci_dev *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static inline int pci_ats_enabled(struct pci_dev *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
|
||||
#endif /* DRIVERS_PCI_H */
|
||||
|
|
|
@ -13,6 +13,10 @@
|
|||
#define DMA_PTE_WRITE (2)
|
||||
#define DMA_PTE_SNP (1 << 11)
|
||||
|
||||
#define CONTEXT_TT_MULTI_LEVEL 0
|
||||
#define CONTEXT_TT_DEV_IOTLB 1
|
||||
#define CONTEXT_TT_PASS_THROUGH 2
|
||||
|
||||
struct intel_iommu;
|
||||
struct dmar_domain;
|
||||
struct root_entry;
|
||||
|
@ -21,11 +25,16 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
|
|||
|
||||
#ifdef CONFIG_DMAR
|
||||
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
|
||||
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
|
||||
#else
|
||||
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int dmar_disabled;
|
||||
|
|
|
@ -188,6 +188,15 @@ struct dmar_rmrr_unit {
|
|||
|
||||
#define for_each_rmrr_units(rmrr) \
|
||||
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
|
||||
|
||||
struct dmar_atsr_unit {
|
||||
struct list_head list; /* list of ATSR units */
|
||||
struct acpi_dmar_header *hdr; /* ACPI header */
|
||||
struct pci_dev **devices; /* target devices */
|
||||
int devices_cnt; /* target device count */
|
||||
u8 include_all:1; /* include all ports */
|
||||
};
|
||||
|
||||
/* Intel DMAR initialization functions */
|
||||
extern int intel_iommu_init(void);
|
||||
#else
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
|
||||
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
|
||||
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
|
||||
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
|
||||
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
|
||||
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
|
||||
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
|
||||
|
@ -120,8 +121,10 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
|
|||
(ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
|
||||
#define ecap_coherent(e) ((e) & 0x1)
|
||||
#define ecap_qis(e) ((e) & 0x2)
|
||||
#define ecap_pass_through(e) ((e >> 6) & 0x1)
|
||||
#define ecap_eim_support(e) ((e >> 4) & 0x1)
|
||||
#define ecap_ir_support(e) ((e >> 3) & 0x1)
|
||||
#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
|
||||
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
|
||||
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
|
||||
|
||||
|
@ -197,6 +200,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
|
|||
#define DMA_FSTS_PPF ((u32)2)
|
||||
#define DMA_FSTS_PFO ((u32)1)
|
||||
#define DMA_FSTS_IQE (1 << 4)
|
||||
#define DMA_FSTS_ICE (1 << 5)
|
||||
#define DMA_FSTS_ITE (1 << 6)
|
||||
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
|
||||
|
||||
/* FRCD_REG, 32 bits access */
|
||||
|
@ -225,7 +230,8 @@ do { \
|
|||
enum {
|
||||
QI_FREE,
|
||||
QI_IN_USE,
|
||||
QI_DONE
|
||||
QI_DONE,
|
||||
QI_ABORT
|
||||
};
|
||||
|
||||
#define QI_CC_TYPE 0x1
|
||||
|
@ -254,6 +260,12 @@ enum {
|
|||
#define QI_CC_DID(did) (((u64)did) << 16)
|
||||
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
|
||||
|
||||
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
|
||||
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
|
||||
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
|
||||
#define QI_DEV_IOTLB_SIZE 1
|
||||
#define QI_DEV_IOTLB_MAX_INVS 32
|
||||
|
||||
struct qi_desc {
|
||||
u64 low, high;
|
||||
};
|
||||
|
@ -280,10 +292,10 @@ struct ir_table {
|
|||
#endif
|
||||
|
||||
struct iommu_flush {
|
||||
int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
|
||||
u64 type, int non_present_entry_flush);
|
||||
int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||
unsigned int size_order, u64 type, int non_present_entry_flush);
|
||||
void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
|
||||
u8 fm, u64 type);
|
||||
void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||
unsigned int size_order, u64 type);
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -302,6 +314,7 @@ struct intel_iommu {
|
|||
spinlock_t register_lock; /* protect register handling */
|
||||
int seq_id; /* sequence id of the iommu */
|
||||
int agaw; /* agaw of this iommu */
|
||||
int msagaw; /* max sagaw of this iommu */
|
||||
unsigned int irq;
|
||||
unsigned char name[13]; /* Device Name */
|
||||
|
||||
|
@ -329,6 +342,7 @@ static inline void __iommu_flush_cache(
|
|||
}
|
||||
|
||||
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
|
||||
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
|
||||
|
||||
extern int alloc_iommu(struct dmar_drhd_unit *drhd);
|
||||
extern void free_iommu(struct intel_iommu *iommu);
|
||||
|
@ -337,11 +351,12 @@ extern void dmar_disable_qi(struct intel_iommu *iommu);
|
|||
extern int dmar_reenable_qi(struct intel_iommu *iommu);
|
||||
extern void qi_global_iec(struct intel_iommu *iommu);
|
||||
|
||||
extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
|
||||
u8 fm, u64 type, int non_present_entry_flush);
|
||||
extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||
unsigned int size_order, u64 type,
|
||||
int non_present_entry_flush);
|
||||
extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
|
||||
u8 fm, u64 type);
|
||||
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||
unsigned int size_order, u64 type);
|
||||
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
|
||||
u64 addr, unsigned mask);
|
||||
|
||||
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
||||
|
||||
|
|
|
@ -196,6 +196,7 @@ struct pci_cap_saved_state {
|
|||
struct pcie_link_state;
|
||||
struct pci_vpd;
|
||||
struct pci_sriov;
|
||||
struct pci_ats;
|
||||
|
||||
/*
|
||||
* The pci_dev structure is used to describe PCI devices.
|
||||
|
@ -293,6 +294,7 @@ struct pci_dev {
|
|||
struct pci_sriov *sriov; /* SR-IOV capability related */
|
||||
struct pci_dev *physfn; /* the PF this VF is associated with */
|
||||
};
|
||||
struct pci_ats *ats; /* Address Translation Service */
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -502,6 +502,7 @@
|
|||
#define PCI_EXT_CAP_ID_DSN 3
|
||||
#define PCI_EXT_CAP_ID_PWR 4
|
||||
#define PCI_EXT_CAP_ID_ARI 14
|
||||
#define PCI_EXT_CAP_ID_ATS 15
|
||||
#define PCI_EXT_CAP_ID_SRIOV 16
|
||||
|
||||
/* Advanced Error Reporting */
|
||||
|
@ -620,6 +621,15 @@
|
|||
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
|
||||
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
|
||||
|
||||
/* Address Translation Service */
|
||||
#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
|
||||
#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */
|
||||
#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */
|
||||
#define PCI_ATS_CTRL 0x06 /* ATS Control Register */
|
||||
#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
|
||||
#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
|
||||
#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
|
||||
|
||||
/* Single Root I/O Virtualization */
|
||||
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
|
||||
#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
|
||||
|
|
Loading…
Reference in New Issue
Block a user