[PATCH 1/2] block: add max_open_zones to blk-sysfs
Damien Le Moal
Damien.LeMoal at wdc.com
Fri Jul 3 00:56:30 EDT 2020
On 2020/07/02 21:37, Niklas Cassel wrote:
> On Tue, Jun 30, 2020 at 01:49:41AM +0000, Damien Le Moal wrote:
>> On 2020/06/16 19:28, Niklas Cassel wrote:
>>> diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
>>> index c08f6281b614..af156529f3b6 100644
>>> --- a/drivers/nvme/host/zns.c
>>> +++ b/drivers/nvme/host/zns.c
>>> @@ -82,6 +82,7 @@ int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns,
>>>
>>> q->limits.zoned = BLK_ZONED_HM;
>>> blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
>>> + blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1);
>>> free_data:
>>> kfree(id);
>>> return status;
>>> diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
>>> index 183a20720da9..aa3564139b40 100644
>>> --- a/drivers/scsi/sd_zbc.c
>>> +++ b/drivers/scsi/sd_zbc.c
>>> @@ -717,6 +717,10 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
>>> /* The drive satisfies the kernel restrictions: set it up */
>>> blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
>>> blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
>>> + if (sdkp->zones_max_open == U32_MAX)
>>> + blk_queue_max_open_zones(q, 0);
>>> + else
>>> + blk_queue_max_open_zones(q, sdkp->zones_max_open);
>>
>> This is correct only for host-managed drives. Host-aware models define the
>> "OPTIMAL NUMBER OF OPEN SEQUENTIAL WRITE PREFERRED ZONES" instead of a maximum
>> number of open sequential write required zones.
>>
>> Since the standard does not actually explicitly define what the value of the
>> maximum number of open sequential write required zones should be for a
>> host-aware drive, I would suggest to always have the max_open_zones value set to
>> 0 for host-aware disks.
>
> Isn't this already the case?
>
> At least according to the comments:
>
> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/scsi/sd_zbc.c?h=v5.8-rc3#n555
>
> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/scsi/sd_zbc.c?h=v5.8-rc3#n561
>
> We seem to set
>
> sdkp->zones_max_open = 0;
>
> for host-aware, and
>
> sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
>
> for host-managed.
>
> So the blk_queue_max_open_zones(q, sdkp->zones_max_open) call in
> sd_zbc_read_zones() should already export this new sysfs property
> as 0 for host-aware disks.
Oh, yes ! You are absolutely right. Forgot about that code :)
Please disregard this comment.
>
>
> Kind regards,
> Niklas
>
>>
>>> nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
>>>
>>> /* READ16/WRITE16 is mandatory for ZBC disks */
>>> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
>>> index 8fd900998b4e..2f332f00501d 100644
>>> --- a/include/linux/blkdev.h
>>> +++ b/include/linux/blkdev.h
>>> @@ -520,6 +520,7 @@ struct request_queue {
>>> unsigned int nr_zones;
>>> unsigned long *conv_zones_bitmap;
>>> unsigned long *seq_zones_wlock;
>>> + unsigned int max_open_zones;
>>> #endif /* CONFIG_BLK_DEV_ZONED */
>>>
>>> /*
>>> @@ -729,6 +730,17 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
>>> return true;
>>> return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
>>> }
>>> +
>>> +static inline void blk_queue_max_open_zones(struct request_queue *q,
>>> + unsigned int max_open_zones)
>>> +{
>>> + q->max_open_zones = max_open_zones;
>>> +}
>>> +
>>> +static inline unsigned int queue_max_open_zones(const struct request_queue *q)
>>> +{
>>> + return q->max_open_zones;
>>> +}
>>> #else /* CONFIG_BLK_DEV_ZONED */
>>> static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
>>> {
>>> @@ -744,6 +756,14 @@ static inline unsigned int blk_queue_zone_no(struct request_queue *q,
>>> {
>>> return 0;
>>> }
>>> +static inline void blk_queue_max_open_zones(struct request_queue *q,
>>> + unsigned int max_open_zones)
>>> +{
>>> +}
>>
>> Why is this one necessary ? For the !CONFIG_BLK_DEV_ZONED case, no driver should
>> ever call this function.
>
> Will remove in v2.
>
--
Damien Le Moal
Western Digital Research
More information about the Linux-nvme
mailing list