@@ -76,8 +76,8 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_
/*
* Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
*/
- if (gem->import_attach) {
- struct dma_buf *buf = gem->import_attach->dmabuf;
+ if (obj->dma_buf) {
+ struct dma_buf *buf = obj->dma_buf;
map->attach = dma_buf_attach(buf, dev);
if (IS_ERR(map->attach)) {
@@ -184,8 +184,8 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
if (obj->vaddr)
return obj->vaddr;
- if (obj->gem.import_attach) {
- ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
+ if (obj->dma_buf) {
+ ret = dma_buf_vmap_unlocked(obj->dma_buf, &map);
if (ret < 0)
return ERR_PTR(ret);
@@ -208,8 +208,8 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
if (obj->vaddr)
return;
- if (obj->gem.import_attach)
- return dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
+ if (obj->dma_buf)
+ return dma_buf_vunmap_unlocked(obj->dma_buf, &map);
vunmap(addr);
}
@@ -465,27 +465,32 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
if (IS_ERR(bo))
return bo;
- attach = dma_buf_attach(buf, drm->dev);
- if (IS_ERR(attach)) {
- err = PTR_ERR(attach);
- goto free;
- }
-
- get_dma_buf(buf);
+ /*
+ * If we need to use IOMMU API to map the dma-buf into the internally managed
+ * domain, map it first to the DRM device to get an sgt.
+ */
+ if (tegra->domain) {
+ attach = dma_buf_attach(buf, drm->dev);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto free;
+ }
- bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
- if (IS_ERR(bo->sgt)) {
- err = PTR_ERR(bo->sgt);
- goto detach;
- }
+ bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
+ if (IS_ERR(bo->sgt)) {
+ err = PTR_ERR(bo->sgt);
+ goto detach;
+ }
- if (tegra->domain) {
err = tegra_bo_iommu_map(tegra, bo);
if (err < 0)
goto detach;
+
+ bo->gem.import_attach = attach;
}
- bo->gem.import_attach = attach;
+ get_dma_buf(buf);
+ bo->dma_buf = buf;
return bo;
@@ -516,20 +521,21 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
dev_name(mapping->dev));
}
- if (tegra->domain)
+ if (tegra->domain) {
tegra_bo_iommu_unmap(tegra, bo);
- if (gem->import_attach) {
- struct dma_buf *dmabuf = gem->import_attach->dmabuf;
-
- dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
- DMA_TO_DEVICE);
- dma_buf_detach(dmabuf, gem->import_attach);
- dma_buf_put(dmabuf);
- } else {
- tegra_bo_free(gem->dev, bo);
+ if (gem->import_attach) {
+ dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
+ DMA_TO_DEVICE);
+ dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
+ }
}
+ tegra_bo_free(gem->dev, bo);
+
+ if (bo->dma_buf)
+ dma_buf_put(bo->dma_buf);
+
drm_gem_object_release(gem);
kfree(bo);
}
@@ -32,6 +32,26 @@ struct tegra_bo_tiling {
enum tegra_bo_sector_layout sector_layout;
};
+/*
+ * How memory is referenced within a tegra_bo:
+ *
+ * Buffer source | Mapping API(*) | Fields
+ * ---------------+-----------------+---------------
+ * Allocated here | DMA API | iova (IOVA mapped to drm->dev), vaddr (CPU VA)
+ *
+ * Allocated here | IOMMU API | pages/num_pages (Phys. memory), sgt (Mapped to drm->dev),
+ * | iova/size (Mapped to domain)
+ *
+ * Imported | DMA API | dma_buf (Imported dma_buf)
+ *
+ * Imported | IOMMU API | dma_buf (Imported dma_buf),
+ * | gem->import_attach (Attachment on drm->dev),
+ * | sgt (Mapped to drm->dev)
+ * | iova/size (Mapped to domain)
+ *
+ * (*) If tegra->domain is set, i.e. TegraDRM IOMMU domain is directly managed through IOMMU API,
+ * this is IOMMU API. Otherwise DMA API.
+ */
struct tegra_bo {
struct drm_gem_object gem;
struct host1x_bo base;
@@ -39,6 +59,7 @@ struct tegra_bo {
struct sg_table *sgt;
dma_addr_t iova;
void *vaddr;
+ struct dma_buf *dma_buf;
struct drm_mm_node *mm;
unsigned long num_pages;