[PATCH v3 15/15] blk-mq: use hk cpus only when isolcpus=io_queue is enabled
Ming Lei
ming.lei at redhat.com
Fri Aug 9 08:23:58 PDT 2024
On Tue, Aug 06, 2024 at 02:06:47PM +0200, Daniel Wagner wrote:
> When isolcpus=io_queue is enabled all hardware queues should run on the
> housekeeping CPUs only. Thus ignore the affinity mask provided by the
> driver. Also we can't use blk_mq_map_queues because it will map all CPUs
> to first hctx unless, the CPU is the same as the hctx has the affinity
> set to, e.g. 8 CPUs with isolcpus=io_queue,2-3,6-7 config
>
> queue mapping for /dev/nvme0n1
> hctx0: default 2 3 4 6 7
> hctx1: default 5
> hctx2: default 0
> hctx3: default 1
>
> PCI name is 00:05.0: nvme0n1
> irq 57 affinity 0-1 effective 1 is_managed:0 nvme0q0
> irq 58 affinity 4 effective 4 is_managed:1 nvme0q1
> irq 59 affinity 5 effective 5 is_managed:1 nvme0q2
> irq 60 affinity 0 effective 0 is_managed:1 nvme0q3
> irq 61 affinity 1 effective 1 is_managed:1 nvme0q4
>
> where as with blk_mq_hk_map_queues we get
>
> queue mapping for /dev/nvme0n1
> hctx0: default 2 4
> hctx1: default 3 5
> hctx2: default 0 6
> hctx3: default 1 7
>
> PCI name is 00:05.0: nvme0n1
> irq 56 affinity 0-1 effective 1 is_managed:0 nvme0q0
> irq 61 affinity 4 effective 4 is_managed:1 nvme0q1
> irq 62 affinity 5 effective 5 is_managed:1 nvme0q2
> irq 63 affinity 0 effective 0 is_managed:1 nvme0q3
> irq 64 affinity 1 effective 1 is_managed:1 nvme0q4
>
> Signed-off-by: Daniel Wagner <dwagner at suse.de>
> ---
> block/blk-mq-cpumap.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 56 insertions(+)
>
> diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
> index c1277763aeeb..7e026c2ffa02 100644
> --- a/block/blk-mq-cpumap.c
> +++ b/block/blk-mq-cpumap.c
> @@ -60,11 +60,64 @@ unsigned int blk_mq_num_online_queues(unsigned int max_queues)
> }
> EXPORT_SYMBOL_GPL(blk_mq_num_online_queues);
>
> +static bool blk_mq_hk_map_queues(struct blk_mq_queue_map *qmap)
> +{
> + struct cpumask *hk_masks;
> + cpumask_var_t isol_mask;
> +
> + unsigned int queue, cpu;
> +
> + if (!housekeeping_enabled(HK_TYPE_IO_QUEUE))
> + return false;
> +
> + /* map housekeeping cpus to matching hardware context */
> + hk_masks = group_cpus_evenly(qmap->nr_queues);
> + if (!hk_masks)
> + goto fallback;
> +
> + for (queue = 0; queue < qmap->nr_queues; queue++) {
> + for_each_cpu(cpu, &hk_masks[queue])
> + qmap->mq_map[cpu] = qmap->queue_offset + queue;
> + }
> +
> + kfree(hk_masks);
> +
> + /* map isolcpus to hardware context */
> + if (!alloc_cpumask_var(&isol_mask, GFP_KERNEL))
> + goto fallback;
> +
> + queue = 0;
> + cpumask_andnot(isol_mask,
> + cpu_possible_mask,
> + housekeeping_cpumask(HK_TYPE_IO_QUEUE));
> +
> + for_each_cpu(cpu, isol_mask) {
> + qmap->mq_map[cpu] = qmap->queue_offset + queue;
> + queue = (queue + 1) % qmap->nr_queues;
> + }
> +
> + free_cpumask_var(isol_mask);
> +
> + return true;
> +
> +fallback:
> + /* map all cpus to hardware context ignoring any affinity */
> + queue = 0;
> + for_each_possible_cpu(cpu) {
> + qmap->mq_map[cpu] = qmap->queue_offset + queue;
> + queue = (queue + 1) % qmap->nr_queues;
> + }
> + return true;
> +}
> +
> void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
> {
> const struct cpumask *masks;
> unsigned int queue, cpu;
>
> + if (blk_mq_hk_map_queues(qmap))
> + return;
> +
> masks = group_cpus_evenly(qmap->nr_queues);
> if (!masks) {
> for_each_possible_cpu(cpu)
> @@ -118,6 +171,9 @@ void blk_mq_dev_map_queues(struct blk_mq_queue_map *qmap,
> const struct cpumask *mask;
> unsigned int queue, cpu;
>
> + if (blk_mq_hk_map_queues(qmap))
> + return;
> +
> for (queue = 0; queue < qmap->nr_queues; queue++) {
> mask = get_queue_affinity(dev_data, dev_off, queue);
> if (!mask)
>From above implementation, "isolcpus=io_queue" is actually just one
optimization on "isolcpus=managed_irq", and there isn't essential
difference between the two.
And I'd suggest to optimize 'isolcpus=managed_irq' directly, such as:
- reduce nr_queues or numgrps for group_cpus_evenly() according to
house-keeping cpu mask
- spread house-keeping & isolate cpu mask evenly on each queue, and
you can use the existed two-stage spread for doing that
thanks,
Ming
More information about the Linux-nvme
mailing list