@@ -132,6 +132,7 @@ struct dma_map_ops {
u64 (*get_required_mask)(struct device *dev);
size_t (*max_mapping_size)(struct device *dev);
unsigned long (*get_merge_boundary)(struct device *dev);
+ void *(*get_virt_addr)(struct device *dev, dma_addr_t dma_handle);
};
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
@@ -442,6 +443,13 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+static inline bool dma_can_unmap_by_dma_addr(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ return dma_is_direct(ops) || (ops && ops->get_virt_addr);
+}
+
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, unsigned long attrs);
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
@@ -458,6 +466,14 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
+void *dma_unmap_single_attrs_desc(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+struct page *
+dma_unmap_page_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void *dma_sync_single_for_cpu_desc(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir);
bool dma_can_mmap(struct device *dev);
int dma_supported(struct device *dev, u64 mask);
int dma_set_mask(struct device *dev, u64 mask);
@@ -534,6 +550,27 @@ static inline void dmam_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
}
+
+static inline void *
+dma_unmap_single_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return NULL;
+}
+
+static inline struct page *
+dma_unmap_page_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return NULL;
+}
+
+static inline void *
+dma_sync_single_for_cpu_desc(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ return NULL;
+}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
@@ -578,6 +615,11 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev)
{
return 0;
}
+
+static inline bool dma_can_unmap_by_dma_addr(struct device *dev)
+{
+ return false;
+}
#endif /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
@@ -610,10 +652,13 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
+#define dma_unmap_single_desc(d, a, s, r) \
+ dma_unmap_single_attrs_desc(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
+#define dma_unmap_page_desc(d, a, s, r) dma_unmap_page_attrs_desc(d, a, s, r, 0)
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
@@ -345,6 +345,59 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
EXPORT_SYMBOL(dma_free_attrs);
+struct page *
+dma_unmap_page_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *ptr = NULL;
+
+ if (dma_is_direct(ops))
+ ptr = phys_to_virt(dma_to_phys(dev, addr));
+ else if (ops && ops->get_virt_addr)
+ ptr = ops->get_virt_addr(dev, addr);
+
+ dma_unmap_page_attrs(dev, addr, size, dir, attrs);
+
+ return ptr ? virt_to_page(ptr) : NULL;
+}
+EXPORT_SYMBOL_GPL(dma_unmap_page_attrs_desc);
+
+void *dma_unmap_single_attrs_desc(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *ptr = NULL;
+
+ if (dma_is_direct(ops))
+ ptr = phys_to_virt(dma_to_phys(dev, addr));
+ else if (ops && ops->get_virt_addr)
+ ptr = ops->get_virt_addr(dev, addr);
+
+ dma_unmap_single_attrs(dev, addr, size, dir, attrs);
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(dma_unmap_single_attrs_desc);
+
+void *dma_sync_single_for_cpu_desc(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *ptr = NULL;
+
+ if (dma_is_direct(ops))
+ ptr = phys_to_virt(dma_to_phys(dev, addr));
+ else if (ops && ops->get_virt_addr)
+ ptr = ops->get_virt_addr(dev, addr);
+
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(dma_sync_single_for_cpu_desc);
+
int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);