[PATCH v14 11/18] irqchip: Add RISC-V incoming MSI controller early driver

Björn Töpel bjorn at kernel.org
Thu Feb 22 05:13:33 PST 2024


Anup Patel <apatel at ventanamicro.com> writes:

> diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
> new file mode 100644
> index 000000000000..0c19ffb9ca3e
> --- /dev/null
> +++ b/drivers/irqchip/irq-riscv-imsic-state.c
> @@ -0,0 +1,870 @@

[...]

> +static void __imsic_local_sync(struct imsic_local_priv *lpriv)
> +{
> +	struct imsic_local_config *mlocal;
> +	struct imsic_vector *vec, *mvec;
> +	int i;
> +
> +	lockdep_assert_held(&lpriv->lock);
> +
> +	/* This pairs with the barrier in __imsic_remote_sync(). */
> +	smp_mb();

I'm trying to figure out why this barrier is needed? All the updates are
done behind the spinlocks. If there're some ordering constraints that
I'm missing, please document them.

> +
> +	for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) {
> +		if (!i || i == IMSIC_IPI_ID)
> +			goto skip;
> +		vec = &lpriv->vectors[i];
> +
> +		if (READ_ONCE(vec->enable))
> +			__imsic_id_set_enable(i);
> +		else
> +			__imsic_id_clear_enable(i);
> +
> +		/*
> +		 * If the ID was being moved to a new ID on some other CPU
> +		 * then we can get a MSI during the movement so check the
> +		 * ID pending bit and re-trigger the new ID on other CPU
> +		 * using MMIO write.
> +		 */
> +		mvec = READ_ONCE(vec->move);
> +		WRITE_ONCE(vec->move, NULL);

mvec = xchg(&vec->move, NULL) ?

> +		if (mvec && mvec != vec) {
> +			if (__imsic_id_read_clear_pending(i)) {
> +				mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
> +				writel_relaxed(mvec->local_id, mlocal->msi_va);
> +			}
> +
> +			imsic_vector_free(&lpriv->vectors[i]);
> +		}
> +
> +skip:
> +		bitmap_clear(lpriv->dirty_bitmap, i, 1);
> +	}
> +}
> +
> +void imsic_local_sync_all(void)
> +{
> +	struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
> +	unsigned long flags;
> +
> +	raw_spin_lock_irqsave(&lpriv->lock, flags);
> +	bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
> +	__imsic_local_sync(lpriv);
> +	raw_spin_unlock_irqrestore(&lpriv->lock, flags);
> +}
> +
> +void imsic_local_delivery(bool enable)
> +{
> +	if (enable) {
> +		imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD);
> +		imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY);
> +		return;
> +	}
> +
> +	imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_DISABLE_EIDELIVERY);
> +	imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_DISABLE_EITHRESHOLD);
> +}
> +
> +#ifdef CONFIG_SMP
> +static void imsic_local_timer_callback(struct timer_list *timer)
> +{
> +	struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
> +	unsigned long flags;
> +
> +	raw_spin_lock_irqsave(&lpriv->lock, flags);
> +	__imsic_local_sync(lpriv);
> +	raw_spin_unlock_irqrestore(&lpriv->lock, flags);
> +}
> +
> +static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
> +{
> +	lockdep_assert_held(&lpriv->lock);
> +
> +	/*
> +	 * Ensure that changes to vector enable, vector move and
> +	 * dirty bitmap are visible to the target CPU.

...which case the spinlock(s) are enough, no?

> +	 *
> +	 * This pairs with the barrier in __imsic_local_sync().
> +	 */
> +	smp_mb();
> +
> +	/*
> +	 * We schedule a timer on the target CPU if the target CPU is not
> +	 * same as the current CPU. An offline CPU will unconditionally
> +	 * synchronize IDs through imsic_starting_cpu() when the
> +	 * CPU is brought up.
> +	 */
> +	if (cpu_online(cpu)) {
> +		if (cpu == smp_processor_id()) {
> +			__imsic_local_sync(lpriv);
> +			return;
> +		}

Maybe move this if-clause out from the cpu_online(), and only have
something like
  if (cpu_online(cpu) && !timer_pending(&lpriv->timer)) { ... }
inside the CONFIG_SMP guard...

> +
> +		if (!timer_pending(&lpriv->timer)) {
> +			lpriv->timer.expires = jiffies + 1;
> +			add_timer_on(&lpriv->timer, cpu);
> +		}
> +	}
> +}
> +#else
> +static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
> +{
> +	lockdep_assert_held(&lpriv->lock);
> +	__imsic_local_sync(lpriv);
> +}
> +#endif

...where we can get rid of this special !SMP all together for this
function.

> +
> +void imsic_vector_mask(struct imsic_vector *vec)
> +{
> +	struct imsic_local_priv *lpriv;
> +
> +	lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
> +	if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
> +		return;
> +
> +	/*
> +	 * This function is called through Linux irq subsystem with
> +	 * irqs disabled so no need to save/restore irq flags.
> +	 */
> +
> +	raw_spin_lock(&lpriv->lock);
> +
> +	vec->enable = false;

Should have WRITE_ONCE to make the checkers happy.

> +	bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
> +	__imsic_remote_sync(lpriv, vec->cpu);
> +
> +	raw_spin_unlock(&lpriv->lock);
> +}
> +
> +void imsic_vector_unmask(struct imsic_vector *vec)
> +{
> +	struct imsic_local_priv *lpriv;
> +
> +	lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
> +	if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
> +		return;
> +
> +	/*
> +	 * This function is called through Linux irq subsystem with
> +	 * irqs disabled so no need to save/restore irq flags.
> +	 */
> +
> +	raw_spin_lock(&lpriv->lock);
> +
> +	vec->enable = true;

Dito.

> +	bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
> +	__imsic_remote_sync(lpriv, vec->cpu);
> +
> +	raw_spin_unlock(&lpriv->lock);
> +}
> +
> +static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
> +				     bool new_enable, struct imsic_vector *new_move)
> +{
> +	unsigned long flags;
> +	bool enabled;
> +
> +	raw_spin_lock_irqsave(&lpriv->lock, flags);
> +
> +	/* Update enable and move details */
> +	enabled = READ_ONCE(vec->enable);
> +	WRITE_ONCE(vec->enable, new_enable);

Again, xchg() would be easier to read.


Björn



More information about the linux-riscv mailing list