@@ -353,7 +353,7 @@ void do_softirq_own_stack(void)
irq_hw_number_t virq_to_hw(unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
- return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
+ return WARN_ON(!irq_data) ? 0 : READ_ONCE(irq_data->hwirq);
}
EXPORT_SYMBOL_GPL(virq_to_hw);
@@ -1986,7 +1986,7 @@ int64_t pnv_opal_pci_msi_eoi(struct irq_data *d)
struct pci_controller *hose = irq_data_get_irq_chip_data(d->parent_data);
struct pnv_phb *phb = hose->private_data;
- return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq);
+ return opal_pci_msi_eoi(phb->opal_id, READ_ONCE(d->parent_data->hwirq));
}
/*
@@ -2162,11 +2162,11 @@ static void pnv_msi_compose_msg(struct irq_data *d, struct msi_msg *msg)
struct pnv_phb *phb = hose->private_data;
int rc;
- rc = __pnv_pci_ioda_msi_setup(phb, pdev, d->hwirq,
+ rc = __pnv_pci_ioda_msi_setup(phb, pdev, READ_ONCE(d->hwirq),
entry->pci.msi_attrib.is_64, msg);
if (rc)
dev_err(&pdev->dev, "Failed to setup %s-bit MSI #%ld : %d\n",
- entry->pci.msi_attrib.is_64 ? "64" : "32", d->hwirq, rc);
+ entry->pci.msi_attrib.is_64 ? "64" : "32", data_race(d->hwirq), rc);
}
/*
@@ -2184,7 +2184,7 @@ static void pnv_msi_eoi(struct irq_data *d)
* since it is translated into a vector number in
* OPAL, use that directly.
*/
- WARN_ON_ONCE(opal_pci_msi_eoi(phb->opal_id, d->hwirq));
+ WARN_ON_ONCE(opal_pci_msi_eoi(phb->opal_id, READ_ONCE(d->hwirq)));
}
irq_chip_eoi_parent(d);
@@ -2263,9 +2263,9 @@ static void pnv_irq_domain_free(struct irq_domain *domain, unsigned int virq,
struct pnv_phb *phb = hose->private_data;
pr_debug("%s bridge %pOF %d/%lx #%d\n", __func__, hose->dn,
- virq, d->hwirq, nr_irqs);
+ virq, data_race(d->hwirq), nr_irqs);
- msi_bitmap_free_hwirqs(&phb->msi_bmp, d->hwirq, nr_irqs);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, READ_ONCE(d->hwirq), nr_irqs);
/* XIVE domain is cleared through ->msi_free() */
}
@@ -452,7 +452,7 @@ static inline bool irqd_affinity_on_activate(struct irq_data *d)
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{
- return d->hwirq;
+ return READ_ONCE(d->hwirq);
}
/**
@@ -549,7 +549,7 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
"virq%i doesn't exist; cannot disassociate\n", irq))
return;
- hwirq = irq_data->hwirq;
+ hwirq = READ_ONCE(irq_data->hwirq);
mutex_lock(&domain->root->mutex);
@@ -948,7 +948,7 @@ struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
if (irq_domain_is_nomap(domain)) {
if (hwirq < domain->hwirq_max) {
data = irq_domain_get_irq_data(domain, hwirq);
- if (data && data->hwirq == hwirq)
+ if (data && READ_ONCE(data->hwirq) == hwirq)
desc = irq_data_to_desc(data);
if (irq && desc)
*irq = hwirq;
KCSAN revealed that while irq_data entries are written to either from behind a mutex, or otherwise atomically, accesses to irq_data->hwirq can occur asynchronously, without volatile annotation. Mark these accesses with READ_ONCE to avoid unfortunate compiler reorderings and remove KCSAN warnings. Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> --- arch/powerpc/kernel/irq.c | 2 +- arch/powerpc/platforms/powernv/pci-ioda.c | 12 ++++++------ include/linux/irq.h | 2 +- kernel/irq/irqdomain.c | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-)