[PATCH 2/6] irqchip/armada-370-xp: Implement SoC Error interrupts
Pali Rohár
pali at kernel.org
Fri May 6 11:55:46 PDT 2022
On Friday 06 May 2022 19:47:25 Marc Zyngier wrote:
> On Fri, 06 May 2022 19:30:51 +0100,
> Pali Rohár <pali at kernel.org> wrote:
> >
> > On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> > > On Fri, 06 May 2022 14:40:25 +0100,
> > > Pali Rohár <pali at kernel.org> wrote:
> > > >
> > > > MPIC IRQ 4 is used as SoC Error Summary interrupt and provides access to
> > > > another hierarchy of SoC Error interrupts. Implement a new IRQ chip and
> > > > domain for accessing this IRQ hierarchy.
> > > >
> > > > Signed-off-by: Pali Rohár <pali at kernel.org>
> > > > ---
> > > > drivers/irqchip/irq-armada-370-xp.c | 213 +++++++++++++++++++++++++++-
> > > > 1 file changed, 210 insertions(+), 3 deletions(-)
> > > >
> > > > diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
> > > > index ebd76ea1c69b..71578b65f5c8 100644
> > > > --- a/drivers/irqchip/irq-armada-370-xp.c
> > > > +++ b/drivers/irqchip/irq-armada-370-xp.c
> > > > @@ -117,6 +117,8 @@
> > > > /* Registers relative to main_int_base */
> > > > #define ARMADA_370_XP_INT_CONTROL (0x00)
> > > > #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04)
> > > > +#define ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS (0x20)
> > > > +#define ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS (0x24)
> > > > #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
> > > > #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
> > > > #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
> > > > @@ -130,6 +132,8 @@
> > > > #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
> > > > #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
> > > > #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
> > > > +#define ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF (0x50)
> > > > +#define ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF (0x54)
> > > > #define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54)
> > > > #define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu)
> > > >
> > > > @@ -146,6 +150,8 @@
> > > > static void __iomem *per_cpu_int_base;
> > > > static void __iomem *main_int_base;
> > > > static struct irq_domain *armada_370_xp_mpic_domain;
> > > > +static struct irq_domain *armada_370_xp_soc_err_domain;
> > > > +static unsigned int soc_err_irq_num_regs;
> > > > static u32 doorbell_mask_reg;
> > > > static int parent_irq;
> > > > #ifdef CONFIG_PCI_MSI
> > > > @@ -156,6 +162,8 @@ static DEFINE_MUTEX(msi_used_lock);
> > > > static phys_addr_t msi_doorbell_addr;
> > > > #endif
> > > >
> > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > > > +
> > > > static inline bool is_percpu_irq(irq_hw_number_t irq)
> > > > {
> > > > if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > > > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> > > > armada_370_xp_irq_unmask(data);
> > > > }
> > > >
> > > > + /* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > > > + for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > > > + struct irq_data *data;
> > > > + int virq;
> > > > +
> > > > + virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > > > + if (virq == 0)
> > > > + continue;
> > > > +
> > > > + data = irq_get_irq_data(virq);
> > > > +
> > > > + if (!irq_percpu_is_enabled(virq))
> > > > + continue;
> > > > +
> > > > + armada_370_xp_soc_err_irq_unmask(data);
> > > > + }
> > >
> > > So you do this loop and all these lookups, both here and in the resume
> > > function (duplicated code!) just to be able to call the unmask
> > > function? This would be better served by two straight writes of the
> > > mask register, which you'd conveniently save on suspend.
> > >
> > > Yes, you have only duplicated the existing logic. But surely there is
> > > something better to do.
> >
> > Yes, I just used existing logic.
> >
> > I'm not rewriting driver or doing big refactor of it, as this is not in
> > the scope of the PCIe AER interrupt support.
>
> Fair enough. By the same logic, I'm not taking any change to the
> driver until it is put in a better shape. Your call.
If you are maintainer of this code then it is expected from _you_ to
move the current code into _better shape_ as you wrote and expect. And
then show us exactly, how new changes in this driver should look like,
in examples.
> > > > +
> > > > + /* Unmask summary SoC Error Interrupt */
> > > > + if (soc_err_irq_num_regs > 0)
> > > > + writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
> > > > +
> > > > ipi_resume();
> > > > }
> > > >
> > > > @@ -546,8 +575,8 @@ static struct irq_chip armada_370_xp_irq_chip = {
> > > > static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
> > > > unsigned int virq, irq_hw_number_t hw)
> > > > {
> > > > - /* IRQs 0 and 1 cannot be mapped, they are handled internally */
> > > > - if (hw <= 1)
> > > > + /* IRQs 0, 1 and 4 cannot be mapped, they are handled internally */
> > > > + if (hw <= 1 || hw == 4)
> > > > return -EINVAL;
> > > >
> > > > armada_370_xp_irq_mask(irq_get_irq_data(virq));
> > > > @@ -577,6 +606,99 @@ static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
> > > > .xlate = irq_domain_xlate_onecell,
> > > > };
> > > >
> > > > +static DEFINE_RAW_SPINLOCK(armada_370_xp_soc_err_lock);
> > > > +
> > > > +static void armada_370_xp_soc_err_irq_mask(struct irq_data *d)
> > > > +{
> > > > + irq_hw_number_t hwirq = irqd_to_hwirq(d);
> > > > + u32 reg, mask;
> > > > +
> > > > + reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
> > > > + : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
> > > > +
> > > > + raw_spin_lock(&armada_370_xp_soc_err_lock);
> > > > + mask = readl(per_cpu_int_base + reg);
> > > > + mask &= ~BIT(hwirq % 32);
> > > > + writel(mask, per_cpu_int_base + reg);
> > > > + raw_spin_unlock(&armada_370_xp_soc_err_lock);
> > > > +}
> > > > +
> > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d)
> > > > +{
> > > > + irq_hw_number_t hwirq = irqd_to_hwirq(d);
> > > > + u32 reg, mask;
> > > > +
> > > > + reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
> > > > + : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
> > > > +
> > > > + raw_spin_lock(&armada_370_xp_soc_err_lock);
> > > > + mask = readl(per_cpu_int_base + reg);
> > > > + mask |= BIT(hwirq % 32);
> > > > + writel(mask, per_cpu_int_base + reg);
> > > > + raw_spin_unlock(&armada_370_xp_soc_err_lock);
> > > > +}
> > > > +
> > > > +static int armada_370_xp_soc_err_irq_mask_on_cpu(void *par)
> > > > +{
> > > > + struct irq_data *d = par;
> > > > + armada_370_xp_soc_err_irq_mask(d);
> > > > + return 0;
> > > > +}
> > > > +
> > > > +static int armada_370_xp_soc_err_irq_unmask_on_cpu(void *par)
> > > > +{
> > > > + struct irq_data *d = par;
> > > > + armada_370_xp_soc_err_irq_unmask(d);
> > > > + return 0;
> > > > +}
> > > > +
> > > > +static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
> > > > + const struct cpumask *mask,
> > > > + bool force)
> > > > +{
> > > > + unsigned int cpu;
> > > > +
> > > > + cpus_read_lock();
> > > > +
> > > > + /* First disable IRQ on all cores */
> > > > + for_each_online_cpu(cpu)
> > > > + smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
> > > > +
> > > > + /* Select a single core from the affinity mask which is online */
> > > > + cpu = cpumask_any_and(mask, cpu_online_mask);
> > > > + smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
> > > > +
> > > > + cpus_read_unlock();
> > > > +
> > > > + irq_data_update_effective_affinity(d, cpumask_of(cpu));
> > > > +
> > > > + return IRQ_SET_MASK_OK;
> > > > +}
> > >
> > > Aren't these per-CPU interrupts anyway? What does it mean to set their
> > > affinity? /me rolls eyes...
> >
> > Yes, they are per-CPU interrupts. But to mask or unmask particular
> > interrupt for specific CPU is possible only from that CPU. CPU 0 just
> > cannot move interrupt from CPU 0 to CPU 1. CPU 0 can only mask that
> > interrupt and CPU 1 has to unmask it.
>
> And that's no different form other per-CPU interrupts that have the
> exact same requirements. NAK to this sort of hacks.
You forgot to mention in your previous email how to do it, right? So we
are waiting...
More information about the linux-arm-kernel
mailing list