@@ -597,19 +597,6 @@ static int iova_reserve_iommu_regions(struct device *dev,
return ret;
}
-static bool dev_is_untrusted(struct device *dev)
-{
- return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
-}
-
-static bool dev_use_swiotlb(struct device *dev, size_t size,
- enum dma_data_direction dir)
-{
- return IS_ENABLED(CONFIG_SWIOTLB) &&
- (dev_is_untrusted(dev) ||
- dma_kmalloc_needs_bounce(dev, size, dir));
-}
-
static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
@@ -964,8 +964,8 @@ void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
}
EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
-static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
- struct device *dev)
+enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
+ struct device *dev)
{
enum pci_p2pdma_map_type type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/pgtable.h>
#include <linux/slab.h>
+#include <linux/pci.h>
struct cma;
struct iommu_ops;
@@ -348,6 +349,19 @@ static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
}
+static inline bool dev_is_untrusted(struct device *dev)
+{
+ return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
+}
+
+static inline bool dev_use_swiotlb(struct device *dev, size_t size,
+ enum dma_data_direction dir)
+{
+ return IS_ENABLED(CONFIG_SWIOTLB) &&
+ (dev_is_untrusted(dev) ||
+ dma_kmalloc_needs_bounce(dev, size, dir));
+}
+
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
@@ -514,6 +528,8 @@ struct pci_p2pdma_map_state {
enum pci_p2pdma_map_type
pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
struct scatterlist *sg);
+enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
+ struct device *dev);
#else /* CONFIG_PCI_P2PDMA */
static inline enum pci_p2pdma_map_type
pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
@@ -521,6 +537,11 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
{
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
}
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_map_type(struct dev_pagemap *pgmap, struct device *dev)
+{
+ return PCI_P2PDMA_MAP_NOT_SUPPORTED;
+}
#endif /* CONFIG_PCI_P2PDMA */
#endif /* _LINUX_DMA_MAP_OPS_H */
@@ -100,6 +100,11 @@ struct dma_iova_attrs {
unsigned long attrs;
};
+struct dma_iova_state {
+ struct dma_iova_attrs *iova;
+ struct dma_memory_type *type;
+};
+
#ifdef CONFIG_DMA_API_DEBUG
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
void debug_dma_map_single(struct device *dev, const void *addr,
@@ -178,6 +183,7 @@ int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
size_t size, struct sg_table *sgt);
void dma_get_memory_type(struct page *page, struct dma_memory_type *type);
+bool dma_can_use_iova(struct dma_iova_state *state, size_t size);
#else /* CONFIG_HAS_DMA */
static inline int dma_alloc_iova(struct dma_iova_attrs *iova)
{
@@ -319,6 +325,10 @@ static inline void dma_get_memory_type(struct page *page,
struct dma_memory_type *type)
{
}
+static inline bool dma_can_use_iova(struct dma_iova_state *state, size_t size)
+{
+ return false;
+}
#endif /* CONFIG_HAS_DMA */
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
@@ -968,3 +968,35 @@ void dma_free_iova(struct dma_iova_attrs *iova)
ops->free_iova(dev, iova->addr, iova->size);
}
EXPORT_SYMBOL_GPL(dma_free_iova);
+
+/**
+ * dma_can_use_iova - check if the device type is valid
+ * and won't take SWIOTLB path
+ * @state: IOVA state
+ * @size: size of the buffer
+ *
+ * Return %true if the device should use swiotlb for the given buffer, else
+ * %false.
+ */
+bool dma_can_use_iova(struct dma_iova_state *state, size_t size)
+{
+ struct device *dev = state->iova->dev;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ struct dma_memory_type *type = state->type;
+ enum pci_p2pdma_map_type map;
+
+ if (is_swiotlb_force_bounce(dev) ||
+ dev_use_swiotlb(dev, size, state->iova->dir))
+ return false;
+
+ if (dma_map_direct(dev, ops) || !ops->alloc_iova)
+ return false;
+
+ if (type->type == DMA_MEMORY_TYPE_P2P) {
+ map = pci_p2pdma_map_type(type->p2p_pgmap, dev);
+ return map == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
+ }
+
+ return type->type == DMA_MEMORY_TYPE_NORMAL;
+}
+EXPORT_SYMBOL_GPL(dma_can_use_iova);