[PATCH] nvme: convert little endian NVME definitions to native format

Tokunori Ikegami ikegami.t at gmail.com
Tue Sep 17 07:31:48 PDT 2024


Yes just confirmed as the definition value is actually native byte order 
on the big endian environment as you mentioned.
Very sorry my first checking was incorrect. So let me drop the patch.

Regards,
Ikegami

On 2024/09/17 3:12, Caleb Sander wrote:
> On Mon, Sep 16, 2024 at 9:41 AM Tokunori Ikegami <ikegami.t at gmail.com> wrote:
>> For example the definition below is not native byte order I think.
>>       NVME_CTRL_ONCS_TIMESTAMP        = 1 << 6,
> Why not? CPUs assume native byte order for all arithmetic operations.
> The fact that you could print this value implies it must be
> represented in native byte order. Non-native endianness is only
> relevant when interacting with byte-addressable data external to the
> CPU (storage, network, etc.).
>
>> On 2024/09/17 1:12, Caleb Sander wrote:
>>> Why is this necessary? All these values obtained from the Identify
>>> Controller data structure are already stored in the CPU's native byte
>>> order. So testing them against native byte order constants looks
>>> correct.
>>>
>>> ctrl->oacs = le16_to_cpu(id->oacs);
>>> ctrl->oncs = le16_to_cpu(id->oncs);
>>> ctrl->mtfa = le16_to_cpu(id->mtfa);
>>> ctrl->oaes = le32_to_cpu(id->oaes);
>>> // ...
>>> ctrl->ctratt = le32_to_cpu(id->ctratt);
>>>
>>> On Mon, Sep 16, 2024 at 8:46 AM Tokunori Ikegami <ikegami.t at gmail.com> wrote:
>>>> This is to compare the definitions correctly with the converted values.
>>>>
>>>> Signed-off-by: Tokunori Ikegami <ikegami.t at gmail.com>
>>>> ---
>>>>    drivers/nvme/host/core.c | 20 ++++++++++----------
>>>>    drivers/nvme/host/nvme.h |  4 ++--
>>>>    drivers/nvme/host/pci.c  |  2 +-
>>>>    3 files changed, 13 insertions(+), 13 deletions(-)
>>>>
>>>> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
>>>> index ca9959a8fb9e..5e26546e811a 100644
>>>> --- a/drivers/nvme/host/core.c
>>>> +++ b/drivers/nvme/host/core.c
>>>> @@ -1273,7 +1273,7 @@ static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
>>>>            * command completion can postpone sending a keep alive command
>>>>            * by up to twice the delay between runs.
>>>>            */
>>>> -       if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
>>>> +       if (ctrl->ctratt & le32_to_cpu(NVME_CTRL_ATTR_TBKAS))
>>>>                   delay /= 2;
>>>>           return delay;
>>>>    }
>>>> @@ -1343,7 +1343,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
>>>>
>>>>           ctrl->ka_last_check_time = jiffies;
>>>>
>>>> -       if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
>>>> +       if ((ctrl->ctratt & le32_to_cpu(NVME_CTRL_ATTR_TBKAS)) && comp_seen) {
>>>>                   dev_dbg(ctrl->device,
>>>>                           "reschedule traffic based keep-alive timer\n");
>>>>                   ctrl->comp_seen = false;
>>>> @@ -1699,7 +1699,7 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count);
>>>>
>>>>    static void nvme_enable_aen(struct nvme_ctrl *ctrl)
>>>>    {
>>>> -       u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
>>>> +       u32 result, supported_aens = ctrl->oaes & le32_to_cpu(NVME_AEN_SUPPORTED);
>>>>           int status;
>>>>
>>>>           if (!supported_aens)
>>>> @@ -1829,7 +1829,7 @@ static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
>>>>           if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
>>>>                   lim->max_hw_discard_sectors =
>>>>                           nvme_lba_to_sect(ns->head, ctrl->dmrsl);
>>>> -       else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
>>>> +       else if (ctrl->oncs & le16_to_cpu(NVME_CTRL_ONCS_DSM))
>>>>                   lim->max_hw_discard_sectors = UINT_MAX;
>>>>           else
>>>>                   lim->max_hw_discard_sectors = 0;
>>>> @@ -1913,7 +1913,7 @@ static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
>>>>           if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
>>>>                   return;
>>>>
>>>> -       if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
>>>> +       if (nvm && (ctrl->ctratt & le32_to_cpu(NVME_CTRL_ATTR_ELBAS))) {
>>>>                   nvme_configure_pi_elbas(head, id, nvm);
>>>>           } else {
>>>>                   head->pi_size = sizeof(struct t10_pi_tuple);
>>>> @@ -2137,7 +2137,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
>>>>           }
>>>>           lbaf = nvme_lbaf_index(id->flbas);
>>>>
>>>> -       if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
>>>> +       if (ns->ctrl->ctratt & le32_to_cpu(NVME_CTRL_ATTR_ELBAS)) {
>>>>                   ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
>>>>                   if (ret < 0)
>>>>                           goto out;
>>>> @@ -2341,7 +2341,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
>>>>
>>>>    static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
>>>>    {
>>>> -       if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) {
>>>> +       if (ctrl->oacs & le16_to_cpu(NVME_CTRL_OACS_SEC_SUPP)) {
>>>>                   if (!ctrl->opal_dev)
>>>>                           ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit);
>>>>                   else if (was_suspended)
>>>> @@ -2520,7 +2520,7 @@ static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
>>>>           __le64 ts;
>>>>           int ret;
>>>>
>>>> -       if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
>>>> +       if (!(ctrl->oncs & le16_to_cpu(NVME_CTRL_ONCS_TIMESTAMP)))
>>>>                   return 0;
>>>>
>>>>           ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
>>>> @@ -2541,7 +2541,7 @@ static int nvme_configure_host_options(struct nvme_ctrl *ctrl)
>>>>           /* Don't bother enabling the feature if retry delay is not reported */
>>>>           if (ctrl->crdt[0])
>>>>                   acre = NVME_ENABLE_ACRE;
>>>> -       if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
>>>> +       if (ctrl->ctratt & le32_to_cpu(NVME_CTRL_ATTR_ELBAS))
>>>>                   lbafee = NVME_ENABLE_LBAFEE;
>>>>
>>>>           if (!acre && !lbafee)
>>>> @@ -3109,7 +3109,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
>>>>            * controllers max_hw_sectors value, which is based on the MDTS field
>>>>            * and possibly other limiting factors.
>>>>            */
>>>> -       if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
>>>> +       if ((ctrl->oncs & le16_to_cpu(NVME_CTRL_ONCS_WRITE_ZEROES)) &&
>>>>               !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
>>>>                   ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
>>>>           else
>>>> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
>>>> index 313a4f978a2c..25886b9d2796 100644
>>>> --- a/drivers/nvme/host/nvme.h
>>>> +++ b/drivers/nvme/host/nvme.h
>>>> @@ -860,9 +860,9 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
>>>>                   struct nvme_ns_head *head)
>>>>    {
>>>>           return head->shared ||
>>>> -               (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
>>>> +               (ctrl->oacs & le16_to_cpu(NVME_CTRL_OACS_NS_MNGT_SUPP)) ||
>>>>                   (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
>>>> -               (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
>>>> +               (ctrl->ctratt & le32_to_cpu(NVME_CTRL_CTRATT_NVM_SETS));
>>>>    }
>>>>
>>>>    /*
>>>> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
>>>> index 7990c3f22ecf..f8fa424fa952 100644
>>>> --- a/drivers/nvme/host/pci.c
>>>> +++ b/drivers/nvme/host/pci.c
>>>> @@ -249,7 +249,7 @@ static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
>>>>    {
>>>>           unsigned int mem_size = nvme_dbbuf_size(dev);
>>>>
>>>> -       if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP))
>>>> +       if (!(dev->ctrl.oacs & le16_to_cpu(NVME_CTRL_OACS_DBBUF_SUPP)))
>>>>                   return;
>>>>
>>>>           if (dev->dbbuf_dbs) {
>>>> --
>>>> 2.43.0
>>>>
>>>>



More information about the Linux-nvme mailing list