forked from luck/tmp_suning_uos_patched
[SPARC64]: Fix for Niagara memory corruption.
On some sun4v systems, after netboot the ethernet controller and it's DMA mappings can be left active. The net result is that the kernel can end up using memory the ethernet controller will continue to DMA into, resulting in corruption. To deal with this, we are more careful about importing IOMMU translations which OBP has left in the IO-TLB. If the mapping maps into an area the firmware claimed was free and available memory for the kernel to use, we demap instead of import that IOMMU entry. This is going to cause the network chip to take a PCI master abort on the next DMA it attempts, if it has been left going like this. All tests show that this is handled properly by the PCI layer and the e1000 drivers. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
486ad10a7e
commit
c2a5a46be4
|
@ -988,8 +988,13 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
|
|||
HV_PCI_TSBID(0, i),
|
||||
&io_attrs, &ra);
|
||||
if (ret == HV_EOK) {
|
||||
cnt++;
|
||||
__set_bit(i, arena->map);
|
||||
if (page_in_phys_avail(ra)) {
|
||||
pci_sun4v_iommu_demap(devhandle,
|
||||
HV_PCI_TSBID(0, i), 1);
|
||||
} else {
|
||||
cnt++;
|
||||
__set_bit(i, arena->map);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1062,9 +1067,9 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
|
|||
iommu->arena.limit = num_tsb_entries;
|
||||
|
||||
sz = probe_existing_entries(pbm, iommu);
|
||||
|
||||
printk("%s: TSB entries [%lu], existing mapings [%lu]\n",
|
||||
pbm->name, num_tsb_entries, sz);
|
||||
if (sz)
|
||||
printk("%s: Imported %lu TSB entries from OBP\n",
|
||||
pbm->name, sz);
|
||||
}
|
||||
|
||||
static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
|
||||
|
|
|
@ -1396,7 +1396,7 @@ static void __init taint_real_pages(void)
|
|||
while (old_start < old_end) {
|
||||
int n;
|
||||
|
||||
for (n = 0; pavail_rescan_ents; n++) {
|
||||
for (n = 0; n < pavail_rescan_ents; n++) {
|
||||
unsigned long new_start, new_end;
|
||||
|
||||
new_start = pavail_rescan[n].phys_addr;
|
||||
|
@ -1418,6 +1418,32 @@ static void __init taint_real_pages(void)
|
|||
}
|
||||
}
|
||||
|
||||
int __init page_in_phys_avail(unsigned long paddr)
|
||||
{
|
||||
int i;
|
||||
|
||||
paddr &= PAGE_MASK;
|
||||
|
||||
for (i = 0; i < pavail_rescan_ents; i++) {
|
||||
unsigned long start, end;
|
||||
|
||||
start = pavail_rescan[i].phys_addr;
|
||||
end = start + pavail_rescan[i].reg_size;
|
||||
|
||||
if (paddr >= start && paddr < end)
|
||||
return 1;
|
||||
}
|
||||
if (paddr >= kern_base && paddr < (kern_base + kern_size))
|
||||
return 1;
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (paddr >= __pa(initrd_start) &&
|
||||
paddr < __pa(PAGE_ALIGN(initrd_end)))
|
||||
return 1;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long codepages, datapages, initpages;
|
||||
|
|
|
@ -756,6 +756,8 @@ extern unsigned long *sparc64_valid_addr_bitmap;
|
|||
#define kern_addr_valid(addr) \
|
||||
(test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))
|
||||
|
||||
extern int page_in_phys_avail(unsigned long paddr);
|
||||
|
||||
extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
|
||||
unsigned long pfn,
|
||||
unsigned long size, pgprot_t prot);
|
||||
|
|
Loading…
Reference in New Issue
Block a user