Message ID | 20200416165725.206741-1-maxg@mellanox.com |
---|---|
State | New |
Headers | show |
Series | [1/2] powerpc/dma: Define map/unmap mmio resource callbacks | expand |
> dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs); > } > > +static dma_addr_t dma_iommu_map_resource(struct device *dev, > + phys_addr_t phys_addr, size_t size, > + enum dma_data_direction dir, > + unsigned long attrs) > +{ > + struct pci_dev *pdev = to_pci_dev(dev); > + struct pci_controller *phb = pci_bus_to_host(pdev->bus); > + > + if (dma_iommu_map_bypass(dev, attrs)) { > + if (phb->controller_ops.dma_map_resource && > + phb->controller_ops.dma_map_resource(pdev, phys_addr, size, > + dir)) > + return DMA_MAPPING_ERROR; > + return dma_direct_map_resource(dev, phys_addr, size, > + dir, attrs); > + } > + return DMA_MAPPING_ERROR; Just a nitpick, but to the return errors early would be a much more logical flow. Also if the callback just decides if the mapping is possible in bypass mode it should have that in the name: struct pci_controller_ops *ops = &phb->controller_ops; if (!dma_iommu_map_bypass(dev, attrs) || !ops->dma_can_direct_map_resource || !ops->dma_can_direct_map_resource(pdev, phys_addr, size, dir) return DMA_MAPPING_ERROR; return dma_direct_map_resource(dev, phys_addr, size, dir, attrs); > + if (dma_iommu_map_bypass(dev, attrs)) { > + if (phb->controller_ops.dma_unmap_resource) > + phb->controller_ops.dma_unmap_resource(pdev, dma_handle, > + size, dir); > + } This can do a little early return as well, coupled with a WARN_ON_ONCE as we should never end up in the unmap path for a setup where the map didn't work.
On 4/17/2020 9:55 AM, Christoph Hellwig wrote: >> dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs); >> } >> >> +static dma_addr_t dma_iommu_map_resource(struct device *dev, >> + phys_addr_t phys_addr, size_t size, >> + enum dma_data_direction dir, >> + unsigned long attrs) >> +{ >> + struct pci_dev *pdev = to_pci_dev(dev); >> + struct pci_controller *phb = pci_bus_to_host(pdev->bus); >> + >> + if (dma_iommu_map_bypass(dev, attrs)) { >> + if (phb->controller_ops.dma_map_resource && >> + phb->controller_ops.dma_map_resource(pdev, phys_addr, size, >> + dir)) >> + return DMA_MAPPING_ERROR; >> + return dma_direct_map_resource(dev, phys_addr, size, >> + dir, attrs); >> + } >> + return DMA_MAPPING_ERROR; > Just a nitpick, but to the return errors early would be a much more > logical flow. Also if the callback just decides if the mapping is > possible in bypass mode it should have that in the name: > > struct pci_controller_ops *ops = &phb->controller_ops; > > if (!dma_iommu_map_bypass(dev, attrs) || > !ops->dma_can_direct_map_resource || > !ops->dma_can_direct_map_resource(pdev, phys_addr, size, dir) > return DMA_MAPPING_ERROR; > return dma_direct_map_resource(dev, phys_addr, size, dir, attrs); maybe call it dma_direct_map_resource/dma_direct_unmap_resource for symmetry ? do you mean (small logic change): if (!dma_iommu_map_bypass(dev, attrs) || !ops->dma_direct_map_resource || ops->dma_direct_map_resource(pdev, phys_addr, size, dir)) return DMA_MAPPING_ERROR; return dma_direct_map_resource(dev, phys_addr, size, dir, attrs); > >> + if (dma_iommu_map_bypass(dev, attrs)) { >> + if (phb->controller_ops.dma_unmap_resource) >> + phb->controller_ops.dma_unmap_resource(pdev, dma_handle, >> + size, dir); >> + } > This can do a little early return as well, coupled with a WARN_ON_ONCE > as we should never end up in the unmap path for a setup where the map didn't > work. how about: struct pci_controller_ops *ops = &phb->controller_ops; if (dma_iommu_map_bypass(dev, attrs) && ops->dma_direct_unmap_resource) ops->dma_direct_unmap_resource(pdev, dma_handle, size, dir);
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 69f4cb3..d14b4ee 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -44,6 +44,12 @@ struct pci_controller_ops { #endif void (*shutdown)(struct pci_controller *hose); + int (*dma_map_resource)(struct pci_dev *pdev, + phys_addr_t phys_addr, size_t size, + enum dma_data_direction dir); + void (*dma_unmap_resource)(struct pci_dev *pdev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir); }; /* diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index e486d1d..dc53acc 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -108,6 +108,39 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs); } +static dma_addr_t dma_iommu_map_resource(struct device *dev, + phys_addr_t phys_addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *phb = pci_bus_to_host(pdev->bus); + + if (dma_iommu_map_bypass(dev, attrs)) { + if (phb->controller_ops.dma_map_resource && + phb->controller_ops.dma_map_resource(pdev, phys_addr, size, + dir)) + return DMA_MAPPING_ERROR; + return dma_direct_map_resource(dev, phys_addr, size, + dir, attrs); + } + return DMA_MAPPING_ERROR; +} + +static void dma_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *phb = pci_bus_to_host(pdev->bus); + + if (dma_iommu_map_bypass(dev, attrs)) { + if (phb->controller_ops.dma_unmap_resource) + phb->controller_ops.dma_unmap_resource(pdev, dma_handle, + size, dir); + } +} + static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) { struct pci_dev *pdev = to_pci_dev(dev); @@ -199,6 +232,8 @@ extern void dma_iommu_sync_sg_for_device(struct device *dev, .free = dma_iommu_free_coherent, .map_sg = dma_iommu_map_sg, .unmap_sg = dma_iommu_unmap_sg, + .map_resource = dma_iommu_map_resource, + .unmap_resource = dma_iommu_unmap_resource, .dma_supported = dma_iommu_dma_supported, .map_page = dma_iommu_map_page, .unmap_page = dma_iommu_unmap_page,