@@ -90,8 +90,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
/*
* Plug in direct dma map ops.
*/
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
/*
* IOC hardware snoops all DMA traffic keeping the caches consistent
@@ -33,8 +33,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
}
}
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
if (IS_ENABLED(CONFIG_CPU_V7M)) {
/*
@@ -1712,11 +1712,15 @@ void arm_iommu_detach_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
-static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+static void arm_setup_iommu_dma_ops(struct device *dev)
{
struct dma_iommu_mapping *mapping;
+ u64 dma_base = 0, size = 1ULL << 32;
+ if (dev->dma_range_map) {
+ dma_base = dma_range_map_min(dev->dma_range_map);
+ size = dma_range_map_max(dev->dma_range_map) - dma_base;
+ }
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
@@ -1747,8 +1751,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
#else
-static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+static void arm_setup_iommu_dma_ops(struct device *dev)
{
}
@@ -1756,8 +1759,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
/*
* Due to legacy code that sets the ->dma_coherent flag from a bus
@@ -1776,8 +1778,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
if (dev->dma_ops)
return;
- if (iommu)
- arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
+ if (device_iommu_mapped(dev))
+ arm_setup_iommu_dma_ops(dev);
xen_setup_dma_ops(dev);
dev->archdata.dma_ops_setup = true;
@@ -46,8 +46,7 @@ void arch_teardown_dma_ops(struct device *dev)
}
#endif
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
int cls = cache_line_size_of_cpu();
@@ -137,8 +137,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
#endif
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
dev->dma_coherent = coherent;
}
@@ -128,8 +128,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size);
}
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
TAINT_CPU_OUT_OF_SPEC,
@@ -1640,8 +1640,7 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- arch_setup_dma_ops(dev, 0, U64_MAX,
- iommu, attr == DEV_DMA_COHERENT);
+ arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT);
return 0;
}
@@ -484,11 +484,7 @@ EXPORT_SYMBOL_GPL(hv_query_ext_cap);
void hv_setup_dma_ops(struct device *dev, bool coherent)
{
- /*
- * Hyper-V does not offer a vIOMMU in the guest
- * VM, so pass 0/NULL for the IOMMU settings
- */
- arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+ arch_setup_dma_ops(dev, coherent);
}
EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
@@ -96,7 +96,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
const struct iommu_ops *iommu;
const struct bus_dma_region *map = NULL;
struct device_node *bus_np;
- u64 dma_start = 0;
u64 mask, end = 0;
bool coherent;
int ret;
@@ -118,7 +117,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
return ret == -ENODEV ? 0 : ret;
} else {
/* Determine the overall bounds of all DMA regions */
- dma_start = dma_range_map_min(map);
end = dma_range_map_max(map);
}
@@ -167,7 +165,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
- arch_setup_dma_ops(dev, dma_start, end - dma_start + 1, iommu, coherent);
+ arch_setup_dma_ops(dev, coherent);
if (!iommu)
of_dma_set_restricted_buffer(dev, np);
@@ -426,11 +426,9 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
#endif
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
+void arch_setup_dma_ops(struct device *dev, bool coherent);
#else
-static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, const struct iommu_ops *iommu, bool coherent)
+static inline void arch_setup_dma_ops(struct device *dev, bool coherent)
{
}
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */