Message ID | 20200911170738.82818-11-leobras.c@gmail.com (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | DDW Indirect Mapping | expand |
Context | Check | Description |
---|---|---|
snowpatch_ozlabs/apply_patch | success | Successfully applied on branch powerpc/merge (4b552a4cbf286ff9dcdab19153f3c1c7d1680fab) |
snowpatch_ozlabs/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 224 lines checked |
snowpatch_ozlabs/needsstable | success | Patch has no Fixes tags |
On 12/09/2020 03:07, Leonardo Bras wrote: > Cc: linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org, > > Add a new helper _iommu_table_setparms(), and use it in > iommu_table_setparms() and iommu_table_setparms_lpar() to avoid duplicated > code. > > Also, setting tbl->it_ops was happening outsite iommu_table_setparms*(), > so move it to the new helper. Since we need the iommu_table_ops to be > declared before used, move iommu_table_lpar_multi_ops and > iommu_table_pseries_ops to before their respective iommu_table_setparms*(). > > The tce_exchange_pseries() also had to be moved up, since it's used in > iommu_table_lpar_multi_ops.xchg_no_kill. Use forward declarations (preferred) or make a separate patch for moving chunks (I do not see much point). > > Signed-off-by: Leonardo Bras <leobras.c@gmail.com> > --- > arch/powerpc/platforms/pseries/iommu.c | 149 ++++++++++++------------- > 1 file changed, 72 insertions(+), 77 deletions(-) > > diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c > index 510ccb0521af..abd36b257725 100644 > --- a/arch/powerpc/platforms/pseries/iommu.c > +++ b/arch/powerpc/platforms/pseries/iommu.c > @@ -495,12 +495,62 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, > return rc; > } > > +#ifdef CONFIG_IOMMU_API > +static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned > + long *tce, enum dma_data_direction *direction, > + bool realmode) > +{ > + long rc; > + unsigned long ioba = (unsigned long)index << tbl->it_page_shift; > + unsigned long flags, oldtce = 0; > + u64 proto_tce = iommu_direction_to_tce_perm(*direction); > + unsigned long newtce = *tce | proto_tce; > + > + spin_lock_irqsave(&tbl->large_pool.lock, flags); > + > + rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce); > + if (!rc) > + rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce); > + > + if (!rc) { > + *direction = iommu_tce_direction(oldtce); > + *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE); > + } > + > + spin_unlock_irqrestore(&tbl->large_pool.lock, flags); > + > + return rc; > +} > +#endif > + > static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, > unsigned long num_pfn, void *arg) > { > return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); > } > > +static inline void _iommu_table_setparms(struct iommu_table *tbl, unsigned long busno, > + unsigned long liobn, unsigned long win_addr, > + unsigned long window_size, unsigned long page_shift, > + unsigned long base, struct iommu_table_ops *table_ops) > +{ > + tbl->it_busno = busno; > + tbl->it_index = liobn; > + tbl->it_offset = win_addr >> page_shift; > + tbl->it_size = window_size >> page_shift; > + tbl->it_page_shift = page_shift; > + tbl->it_base = base; > + tbl->it_blocksize = 16; > + tbl->it_type = TCE_PCI; > + tbl->it_ops = table_ops; > +} > + > +struct iommu_table_ops iommu_table_pseries_ops = { > + .set = tce_build_pSeries, > + .clear = tce_free_pSeries, > + .get = tce_get_pseries > +}; > + > static void iommu_table_setparms(struct pci_controller *phb, > struct device_node *dn, > struct iommu_table *tbl) > @@ -509,8 +559,13 @@ static void iommu_table_setparms(struct pci_controller *phb, > const unsigned long *basep; > const u32 *sizep; > > - node = phb->dn; > + /* Test if we are going over 2GB of DMA space */ > + if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { > + udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); > + panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); > + } s/0x80000000ul/2*SZ_1G/ but more to the point - why this check? QEMU can create windows at 0 and as big as the VM requested. And I am pretty sure I can construct QEMU command line such as it won't have MMIO32 at all and a 4GB default DMA window. > > + node = phb->dn; > basep = of_get_property(node, "linux,tce-base", NULL); > sizep = of_get_property(node, "linux,tce-size", NULL); > if (basep == NULL || sizep == NULL) { > @@ -519,33 +574,25 @@ static void iommu_table_setparms(struct pci_controller *phb, > return; > } > > - tbl->it_base = (unsigned long)__va(*basep); > + _iommu_table_setparms(tbl, phb->bus->number, 0, phb->dma_window_base_cur, > + phb->dma_window_size, IOMMU_PAGE_SHIFT_4K, > + (unsigned long)__va(*basep), &iommu_table_pseries_ops); > > if (!is_kdump_kernel()) > memset((void *)tbl->it_base, 0, *sizep); > > - tbl->it_busno = phb->bus->number; > - tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; > - > - /* Units of tce entries */ > - tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift; > - > - /* Test if we are going over 2GB of DMA space */ > - if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { > - udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); > - panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); > - } > - > phb->dma_window_base_cur += phb->dma_window_size; > - > - /* Set the tce table size - measured in entries */ > - tbl->it_size = phb->dma_window_size >> tbl->it_page_shift; > - > - tbl->it_index = 0; > - tbl->it_blocksize = 16; > - tbl->it_type = TCE_PCI; > } > > +struct iommu_table_ops iommu_table_lpar_multi_ops = { > + .set = tce_buildmulti_pSeriesLP, > +#ifdef CONFIG_IOMMU_API > + .xchg_no_kill = tce_exchange_pseries, > +#endif > + .clear = tce_freemulti_pSeriesLP, > + .get = tce_get_pSeriesLP > +}; > + > /* > * iommu_table_setparms_lpar > * > @@ -557,28 +604,17 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb, > struct iommu_table_group *table_group, > const __be32 *dma_window) > { > - unsigned long offset, size; > + unsigned long offset, size, liobn; > > - of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); > + of_parse_dma_window(dn, dma_window, &liobn, &offset, &size); > > - tbl->it_busno = phb->bus->number; > - tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; > - tbl->it_base = 0; > - tbl->it_blocksize = 16; > - tbl->it_type = TCE_PCI; > - tbl->it_offset = offset >> tbl->it_page_shift; > - tbl->it_size = size >> tbl->it_page_shift; > + _iommu_table_setparms(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, 0, > + &iommu_table_lpar_multi_ops); > > table_group->tce32_start = offset; > table_group->tce32_size = size; > } > > -struct iommu_table_ops iommu_table_pseries_ops = { > - .set = tce_build_pSeries, > - .clear = tce_free_pSeries, > - .get = tce_get_pseries > -}; > - > static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) > { > struct device_node *dn; > @@ -647,7 +683,6 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) > tbl = pci->table_group->tables[0]; > > iommu_table_setparms(pci->phb, dn, tbl); > - tbl->it_ops = &iommu_table_pseries_ops; > iommu_init_table(tbl, pci->phb->node, 0, 0); > > /* Divide the rest (1.75GB) among the children */ > @@ -658,43 +693,6 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) > pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size); > } > > -#ifdef CONFIG_IOMMU_API > -static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned > - long *tce, enum dma_data_direction *direction, > - bool realmode) > -{ > - long rc; > - unsigned long ioba = (unsigned long) index << tbl->it_page_shift; > - unsigned long flags, oldtce = 0; > - u64 proto_tce = iommu_direction_to_tce_perm(*direction); > - unsigned long newtce = *tce | proto_tce; > - > - spin_lock_irqsave(&tbl->large_pool.lock, flags); > - > - rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce); > - if (!rc) > - rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce); > - > - if (!rc) { > - *direction = iommu_tce_direction(oldtce); > - *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE); > - } > - > - spin_unlock_irqrestore(&tbl->large_pool.lock, flags); > - > - return rc; > -} > -#endif > - > -struct iommu_table_ops iommu_table_lpar_multi_ops = { > - .set = tce_buildmulti_pSeriesLP, > -#ifdef CONFIG_IOMMU_API > - .xchg_no_kill = tce_exchange_pseries, > -#endif > - .clear = tce_freemulti_pSeriesLP, > - .get = tce_get_pSeriesLP > -}; > - > static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) > { > struct iommu_table *tbl; > @@ -729,7 +727,6 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) > tbl = ppci->table_group->tables[0]; > iommu_table_setparms_lpar(ppci->phb, pdn, tbl, > ppci->table_group, dma_window); > - tbl->it_ops = &iommu_table_lpar_multi_ops; > iommu_init_table(tbl, ppci->phb->node, 0, 0); > iommu_register_group(ppci->table_group, > pci_domain_nr(bus), 0); > @@ -758,7 +755,6 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) > PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node); > tbl = PCI_DN(dn)->table_group->tables[0]; > iommu_table_setparms(phb, dn, tbl); > - tbl->it_ops = &iommu_table_pseries_ops; > iommu_init_table(tbl, phb->node, 0, 0); > set_iommu_table_base(&dev->dev, tbl); > return; > @@ -1385,7 +1381,6 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) > tbl = pci->table_group->tables[0]; > iommu_table_setparms_lpar(pci->phb, pdn, tbl, > pci->table_group, dma_window); > - tbl->it_ops = &iommu_table_lpar_multi_ops; > iommu_init_table(tbl, pci->phb->node, 0, 0); > iommu_register_group(pci->table_group, > pci_domain_nr(pci->phb->bus), 0); >
On Tue, 2020-09-29 at 13:56 +1000, Alexey Kardashevskiy wrote: > > On 12/09/2020 03:07, Leonardo Bras wrote: > > Cc: linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org, > > > > Add a new helper _iommu_table_setparms(), and use it in > > iommu_table_setparms() and iommu_table_setparms_lpar() to avoid duplicated > > code. > > > > Also, setting tbl->it_ops was happening outsite iommu_table_setparms*(), > > so move it to the new helper. Since we need the iommu_table_ops to be > > declared before used, move iommu_table_lpar_multi_ops and > > iommu_table_pseries_ops to before their respective iommu_table_setparms*(). > > > > The tce_exchange_pseries() also had to be moved up, since it's used in > > iommu_table_lpar_multi_ops.xchg_no_kill. > > > Use forward declarations (preferred) or make a separate patch for moving > chunks (I do not see much point). Fixed :) > > @@ -509,8 +559,13 @@ static void iommu_table_setparms(struct pci_controller *phb, > > const unsigned long *basep; > > const u32 *sizep; > > - node = phb->dn; > > + /* Test if we are going over 2GB of DMA space */ > > > > > > > > + if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { > > + udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); > > + panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); > > + } > > > s/0x80000000ul/2*SZ_1G/ Done! > > but more to the point - why this check? QEMU can create windows at 0 and > as big as the VM requested. And I am pretty sure I can construct QEMU > command line such as it won't have MMIO32 at all and a 4GB default DMA > window. > Oh, the diff was a little strange here. I did not add this snippet, it was already in that function, but since I created the helper, the diff made it look like I introduced this piece of code. Please take a look in the diff snippet bellow. (This same lines were there.) > > @@ -519,33 +574,25 @@ static void iommu_table_setparms(struct pci_controller *phb, > > return; > > } > > > > - tbl->it_base = (unsigned long)__va(*basep); > > > > > > > > + _iommu_table_setparms(tbl, phb->bus->number, 0, phb->dma_window_base_cur, > > + phb->dma_window_size, IOMMU_PAGE_SHIFT_4K, > > + (unsigned long)__va(*basep), &iommu_table_pseries_ops); > > if (!is_kdump_kernel()) > > > > > > > > memset((void *)tbl->it_base, 0, *sizep); > > > > - tbl->it_busno = phb->bus->number; > > - tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; > > - > > - /* Units of tce entries */ > > - tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift; > > - > > - /* Test if we are going over 2GB of DMA space */ > > - if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { > > - udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); > > - panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); > > - } > > - > > phb->dma_window_base_cur += phb->dma_window_size; > > - > > - /* Set the tce table size - measured in entries */ > > - tbl->it_size = phb->dma_window_size >> tbl->it_page_shift; > > - > > - tbl->it_index = 0; > > - tbl->it_blocksize = 16; > > - tbl->it_type = TCE_PCI; > > } > > Thanks for reviewing, Alexey!
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 510ccb0521af..abd36b257725 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -495,12 +495,62 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, return rc; } +#ifdef CONFIG_IOMMU_API +static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned + long *tce, enum dma_data_direction *direction, + bool realmode) +{ + long rc; + unsigned long ioba = (unsigned long)index << tbl->it_page_shift; + unsigned long flags, oldtce = 0; + u64 proto_tce = iommu_direction_to_tce_perm(*direction); + unsigned long newtce = *tce | proto_tce; + + spin_lock_irqsave(&tbl->large_pool.lock, flags); + + rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce); + if (!rc) + rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce); + + if (!rc) { + *direction = iommu_tce_direction(oldtce); + *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE); + } + + spin_unlock_irqrestore(&tbl->large_pool.lock, flags); + + return rc; +} +#endif + static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, unsigned long num_pfn, void *arg) { return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); } +static inline void _iommu_table_setparms(struct iommu_table *tbl, unsigned long busno, + unsigned long liobn, unsigned long win_addr, + unsigned long window_size, unsigned long page_shift, + unsigned long base, struct iommu_table_ops *table_ops) +{ + tbl->it_busno = busno; + tbl->it_index = liobn; + tbl->it_offset = win_addr >> page_shift; + tbl->it_size = window_size >> page_shift; + tbl->it_page_shift = page_shift; + tbl->it_base = base; + tbl->it_blocksize = 16; + tbl->it_type = TCE_PCI; + tbl->it_ops = table_ops; +} + +struct iommu_table_ops iommu_table_pseries_ops = { + .set = tce_build_pSeries, + .clear = tce_free_pSeries, + .get = tce_get_pseries +}; + static void iommu_table_setparms(struct pci_controller *phb, struct device_node *dn, struct iommu_table *tbl) @@ -509,8 +559,13 @@ static void iommu_table_setparms(struct pci_controller *phb, const unsigned long *basep; const u32 *sizep; - node = phb->dn; + /* Test if we are going over 2GB of DMA space */ + if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { + udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); + panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); + } + node = phb->dn; basep = of_get_property(node, "linux,tce-base", NULL); sizep = of_get_property(node, "linux,tce-size", NULL); if (basep == NULL || sizep == NULL) { @@ -519,33 +574,25 @@ static void iommu_table_setparms(struct pci_controller *phb, return; } - tbl->it_base = (unsigned long)__va(*basep); + _iommu_table_setparms(tbl, phb->bus->number, 0, phb->dma_window_base_cur, + phb->dma_window_size, IOMMU_PAGE_SHIFT_4K, + (unsigned long)__va(*basep), &iommu_table_pseries_ops); if (!is_kdump_kernel()) memset((void *)tbl->it_base, 0, *sizep); - tbl->it_busno = phb->bus->number; - tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; - - /* Units of tce entries */ - tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift; - - /* Test if we are going over 2GB of DMA space */ - if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { - udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); - panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); - } - phb->dma_window_base_cur += phb->dma_window_size; - - /* Set the tce table size - measured in entries */ - tbl->it_size = phb->dma_window_size >> tbl->it_page_shift; - - tbl->it_index = 0; - tbl->it_blocksize = 16; - tbl->it_type = TCE_PCI; } +struct iommu_table_ops iommu_table_lpar_multi_ops = { + .set = tce_buildmulti_pSeriesLP, +#ifdef CONFIG_IOMMU_API + .xchg_no_kill = tce_exchange_pseries, +#endif + .clear = tce_freemulti_pSeriesLP, + .get = tce_get_pSeriesLP +}; + /* * iommu_table_setparms_lpar * @@ -557,28 +604,17 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb, struct iommu_table_group *table_group, const __be32 *dma_window) { - unsigned long offset, size; + unsigned long offset, size, liobn; - of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); + of_parse_dma_window(dn, dma_window, &liobn, &offset, &size); - tbl->it_busno = phb->bus->number; - tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; - tbl->it_base = 0; - tbl->it_blocksize = 16; - tbl->it_type = TCE_PCI; - tbl->it_offset = offset >> tbl->it_page_shift; - tbl->it_size = size >> tbl->it_page_shift; + _iommu_table_setparms(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, 0, + &iommu_table_lpar_multi_ops); table_group->tce32_start = offset; table_group->tce32_size = size; } -struct iommu_table_ops iommu_table_pseries_ops = { - .set = tce_build_pSeries, - .clear = tce_free_pSeries, - .get = tce_get_pseries -}; - static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) { struct device_node *dn; @@ -647,7 +683,6 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) tbl = pci->table_group->tables[0]; iommu_table_setparms(pci->phb, dn, tbl); - tbl->it_ops = &iommu_table_pseries_ops; iommu_init_table(tbl, pci->phb->node, 0, 0); /* Divide the rest (1.75GB) among the children */ @@ -658,43 +693,6 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size); } -#ifdef CONFIG_IOMMU_API -static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned - long *tce, enum dma_data_direction *direction, - bool realmode) -{ - long rc; - unsigned long ioba = (unsigned long) index << tbl->it_page_shift; - unsigned long flags, oldtce = 0; - u64 proto_tce = iommu_direction_to_tce_perm(*direction); - unsigned long newtce = *tce | proto_tce; - - spin_lock_irqsave(&tbl->large_pool.lock, flags); - - rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce); - if (!rc) - rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce); - - if (!rc) { - *direction = iommu_tce_direction(oldtce); - *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE); - } - - spin_unlock_irqrestore(&tbl->large_pool.lock, flags); - - return rc; -} -#endif - -struct iommu_table_ops iommu_table_lpar_multi_ops = { - .set = tce_buildmulti_pSeriesLP, -#ifdef CONFIG_IOMMU_API - .xchg_no_kill = tce_exchange_pseries, -#endif - .clear = tce_freemulti_pSeriesLP, - .get = tce_get_pSeriesLP -}; - static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) { struct iommu_table *tbl; @@ -729,7 +727,6 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) tbl = ppci->table_group->tables[0]; iommu_table_setparms_lpar(ppci->phb, pdn, tbl, ppci->table_group, dma_window); - tbl->it_ops = &iommu_table_lpar_multi_ops; iommu_init_table(tbl, ppci->phb->node, 0, 0); iommu_register_group(ppci->table_group, pci_domain_nr(bus), 0); @@ -758,7 +755,6 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node); tbl = PCI_DN(dn)->table_group->tables[0]; iommu_table_setparms(phb, dn, tbl); - tbl->it_ops = &iommu_table_pseries_ops; iommu_init_table(tbl, phb->node, 0, 0); set_iommu_table_base(&dev->dev, tbl); return; @@ -1385,7 +1381,6 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) tbl = pci->table_group->tables[0]; iommu_table_setparms_lpar(pci->phb, pdn, tbl, pci->table_group, dma_window); - tbl->it_ops = &iommu_table_lpar_multi_ops; iommu_init_table(tbl, pci->phb->node, 0, 0); iommu_register_group(pci->table_group, pci_domain_nr(pci->phb->bus), 0);
Cc: linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org, Add a new helper _iommu_table_setparms(), and use it in iommu_table_setparms() and iommu_table_setparms_lpar() to avoid duplicated code. Also, setting tbl->it_ops was happening outsite iommu_table_setparms*(), so move it to the new helper. Since we need the iommu_table_ops to be declared before used, move iommu_table_lpar_multi_ops and iommu_table_pseries_ops to before their respective iommu_table_setparms*(). The tce_exchange_pseries() also had to be moved up, since it's used in iommu_table_lpar_multi_ops.xchg_no_kill. Signed-off-by: Leonardo Bras <leobras.c@gmail.com> --- arch/powerpc/platforms/pseries/iommu.c | 149 ++++++++++++------------- 1 file changed, 72 insertions(+), 77 deletions(-)