[PATCH v6 2/4] firmware: ti_sci: add support for restoring IRQs during resume
Thomas Richard
thomas.richard at bootlin.com
Tue May 5 06:16:17 PDT 2026
On 5/5/26 1:32 PM, Nishanth Menon wrote:
> On 14:21-20260427, Thomas Richard (TI) wrote:
>
> [..]
>
>> /**
>> * ti_sci_set_irq() - Helper api to configure the irq route between the
>> * requested source and destination
>> @@ -2324,15 +2363,43 @@ static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
>> u16 dst_host_irq, u16 ia_id, u16 vint,
>> u16 global_event, u8 vint_status_bit, u8 s_host)
>> {
>> + struct ti_sci_info *info = handle_to_ti_sci_info(handle);
>> + struct ti_sci_msg_req_manage_irq *desc;
>> + struct ti_sci_irq *irq;
>> + int ret;
>> +
>> pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
>> __func__, valid_params, src_id, src_index,
>> dst_id, dst_host_irq, ia_id, vint, global_event,
>> vint_status_bit);
>>
>> - return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
>> - dst_id, dst_host_irq, ia_id, vint,
>> - global_event, vint_status_bit, s_host,
>> - TI_SCI_MSG_SET_IRQ);
>> + ret = ti_sci_manage_irq(handle, valid_params, src_id, src_index,
>> + dst_id, dst_host_irq, ia_id, vint,
>> + global_event, vint_status_bit, s_host,
>> + TI_SCI_MSG_SET_IRQ);
>> +
>> + if (ret || !(info->fw_caps & MSG_FLAG_CAPS_LPM_IRQ_CONTEXT_LOST))
>> + return ret;
>> +
>> + irq = kzalloc_obj(*irq, GFP_KERNEL);
>> + if (!irq)
>> + return -ENOMEM;
>
> Do we need to handle cleanup of ti_sci_manage_irq if the allocation fails?
Yes to keep hash list and allocated IRQs consistent.
>
>> +
>> + desc = &irq->desc;
>> + desc->valid_params = valid_params;
>> + desc->src_id = src_id;
>> + desc->src_index = src_index;
>> + desc->dst_id = dst_id;
>> + desc->dst_host_irq = dst_host_irq;
>> + desc->ia_id = ia_id;
>> + desc->vint = vint;
>> + desc->global_event = global_event;
>> + desc->vint_status_bit = vint_status_bit;
>> + desc->secondary_host = s_host;
>> +
>> + hash_add(info->irqs, &irq->node, ti_sci_irq_hash(desc));
>
> No locking? set_irq can be invoked in parallel paths, no?
> Further, should'nt we check if the same src_id and src_index is already
> present before adding to hash list?
ti_sci_manage_irq(TI_SCI_MSG_SET_IRQ) is the lock. If it succeeds we
have to add it in hash list.
Can set_irq() and free_irq() be invoked in parallel paths? In this case
maybe I should add a lock for set_irq() and free_irq().
>
>> +
>> + return 0;
>> }
>>
>> /**
>> @@ -2358,15 +2425,46 @@ static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
>> u16 dst_host_irq, u16 ia_id, u16 vint,
>> u16 global_event, u8 vint_status_bit, u8 s_host)
>> {
>> + struct ti_sci_info *info = handle_to_ti_sci_info(handle);
>> + struct ti_sci_msg_req_manage_irq irq_desc;
>> + struct ti_sci_irq *this_irq;
>> + struct hlist_node *tmp_node;
>> + int ret;
>> +
>> pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
>> __func__, valid_params, src_id, src_index,
>> dst_id, dst_host_irq, ia_id, vint, global_event,
>> vint_status_bit);
>>
>> - return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
>> - dst_id, dst_host_irq, ia_id, vint,
>> - global_event, vint_status_bit, s_host,
>> - TI_SCI_MSG_FREE_IRQ);
>> + ret = ti_sci_manage_irq(handle, valid_params, src_id, src_index,
>> + dst_id, dst_host_irq, ia_id, vint,
>> + global_event, vint_status_bit, s_host,
>> + TI_SCI_MSG_FREE_IRQ);
>> +
>> + if (ret || !(info->fw_caps & MSG_FLAG_CAPS_LPM_IRQ_CONTEXT_LOST))
>> + return ret;
>> +
>> + irq_desc.valid_params = valid_params;
>> + irq_desc.src_id = src_id;
>> + irq_desc.src_index = src_index;
>> + irq_desc.dst_id = dst_id;
>> + irq_desc.dst_host_irq = dst_host_irq;
>> + irq_desc.ia_id = ia_id;
>> + irq_desc.vint = vint;
>> + irq_desc.global_event = global_event;
>> + irq_desc.vint_status_bit = vint_status_bit;
>> + irq_desc.secondary_host = s_host;
>> +
>> + hash_for_each_possible_safe(info->irqs, this_irq, tmp_node, node,
>> + ti_sci_irq_hash(&irq_desc)) {
>> + if (ti_sci_irq_equal(&irq_desc, &this_irq->desc)) {
>> + hlist_del(&this_irq->node);
>> + kfree(this_irq);
>> + return 0;
>> + }
>> + }
>> +
>
> We should ideally not be here, correct? Add a dev_warn?
yes
>
>> + return 0;
>> }
>>
>> /**
>> @@ -3847,7 +3945,10 @@ static int ti_sci_suspend_noirq(struct device *dev)
>> static int ti_sci_resume_noirq(struct device *dev)
>> {
>> struct ti_sci_info *info = dev_get_drvdata(dev);
>> - int ret = 0;
>> + struct ti_sci_msg_req_manage_irq *irq_desc;
>> + struct ti_sci_irq *irq;
>> + struct hlist_node *tmp_node;
>> + int ret = 0, i;
>> u32 source;
>> u64 time;
>> u8 pin;
>> @@ -3859,6 +3960,32 @@ static int ti_sci_resume_noirq(struct device *dev)
>> return ret;
>> }
>>
>> + switch (pm_suspend_target_state) {
>> + case PM_SUSPEND_MEM:
>> + if (info->fw_caps & MSG_FLAG_CAPS_LPM_IRQ_CONTEXT_LOST) {
>> + hash_for_each_safe(info->irqs, i, tmp_node, irq, node) {
>> + irq_desc = &irq->desc;
>> + ret = ti_sci_manage_irq(&info->handle,
>> + irq_desc->valid_params,
>> + irq_desc->src_id,
>> + irq_desc->src_index,
>> + irq_desc->dst_id,
>> + irq_desc->dst_host_irq,
>> + irq_desc->ia_id,
>> + irq_desc->vint,
>> + irq_desc->global_event,
>> + irq_desc->vint_status_bit,
>> + irq_desc->secondary_host,
>> + TI_SCI_MSG_SET_IRQ);
>> + if (ret)
>> + return ret;
>
> Do you want to attempt to restore the rest of the entries rather than give
> up on the first fail? Maybe just log the error for debug and attempt the
> rest?
In this case, if I get more than one error what value should I return?
Best Regards,
Thomas
More information about the linux-arm-kernel
mailing list