diff options
author | Robin Murphy <robin.murphy@arm.com> | 2016-09-12 17:14:00 +0100 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2016-09-16 09:34:22 +0100 |
commit | fade1ec055dc6b6373e7487906b7899b41d0c46f (patch) | |
tree | d8583918947bb5936ecf6e26f8b22598cca7e604 /drivers/iommu | |
parent | 44bb7e243bd4b4e5c79de2452cd9762582f58925 (diff) | |
download | linux-fade1ec055dc6b6373e7487906b7899b41d0c46f.tar.gz linux-fade1ec055dc6b6373e7487906b7899b41d0c46f.tar.xz |
iommu/dma: Avoid PCI host bridge windows
With our DMA ops enabled for PCI devices, we should avoid allocating
IOVAs which a host bridge might misinterpret as peer-to-peer DMA and
lead to faults, corruption or other badness. To be safe, punch out holes
for all of the relevant host bridge's windows when initialising a DMA
domain for a PCI device.
CC: Marek Szyprowski <m.szyprowski@samsung.com>
CC: Inki Dae <inki.dae@samsung.com>
Reported-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 25 |
1 files changed, 24 insertions, 1 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 4329d18080cf..c5ab8667e6f2 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -27,6 +27,7 @@ #include <linux/iova.h> #include <linux/irq.h> #include <linux/mm.h> +#include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> @@ -103,18 +104,38 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) } EXPORT_SYMBOL(iommu_put_dma_cookie); +static void iova_reserve_pci_windows(struct pci_dev *dev, + struct iova_domain *iovad) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); + struct resource_entry *window; + unsigned long lo, hi; + + resource_list_for_each_entry(window, &bridge->windows) { + if (resource_type(window->res) != IORESOURCE_MEM && + resource_type(window->res) != IORESOURCE_IO) + continue; + + lo = iova_pfn(iovad, window->res->start - window->offset); + hi = iova_pfn(iovad, window->res->end - window->offset); + reserve_iova(iovad, lo, hi); + } +} + /** * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @base: IOVA at which the mappable address space starts * @size: Size of IOVA space + * @dev: Device the domain is being initialised for * * @base and @size should be exact multiples of IOMMU page granularity to * avoid rounding surprises. If necessary, we reserve the page at address 0 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but * any change which could make prior IOVAs invalid will fail. */ -int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) +int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, + u64 size, struct device *dev) { struct iova_domain *iovad = cookie_iovad(domain); unsigned long order, base_pfn, end_pfn; @@ -152,6 +173,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size iovad->dma_32bit_pfn = end_pfn; } else { init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); + if (dev && dev_is_pci(dev)) + iova_reserve_pci_windows(to_pci_dev(dev), iovad); } return 0; } |