[PATCH 2/3] Provide and use an always inline version of finish_task_switch
Xie Yuanbin
qq570070308 at gmail.com
Fri Oct 24 11:35:40 PDT 2025
finish_task_switch is called during context switching,
inlining it can bring some performance benefits.
Add an always inline version `finish_task_switch_ainline` to be called
during context switching, and keep the original version for being called
elsewhere, so as to take into account the size impact.
Signed-off-by: Xie Yuanbin <qq570070308 at gmail.com>
---
kernel/sched/core.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1842285eac1e..6cb3f57c4d35 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5069,21 +5069,21 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* Note that we may have delayed dropping an mm in context_switch(). If
* so, we finish that here outside of the runqueue lock. (Doing it
* with the lock held can cause deadlocks; see schedule() for
* details.)
*
* The context switch have flipped the stack from under us and restored the
* local variables which were saved when this task called schedule() in the
* past. 'prev == current' is still correct but we need to recalculate this_rq
* because prev may have moved to another CPU.
*/
-static struct rq *finish_task_switch(struct task_struct *prev)
+static __always_inline struct rq *finish_task_switch_ainline(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
struct mm_struct *mm = rq->prev_mm;
unsigned int prev_state;
/*
* The previous task will have left us with a preempt_count of 2
* because it left us after:
*
@@ -5153,20 +5153,25 @@ static struct rq *finish_task_switch(struct task_struct *prev)
/* Task is done with its stack. */
put_task_stack(prev);
put_task_struct_rcu_user(prev);
}
return rq;
}
+static struct rq *finish_task_switch(struct task_struct *prev)
+{
+ return finish_task_switch_ainline(prev);
+}
+
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
asmlinkage __visible void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
/*
* New tasks start with FORK_PREEMPT_COUNT, see there and
* finish_task_switch() for details.
@@ -5247,21 +5252,21 @@ context_switch(struct rq *rq, struct task_struct *prev,
/* switch_mm_cid() requires the memory barriers above. */
switch_mm_cid(rq, prev, next);
prepare_lock_switch(rq, next, rf);
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
barrier();
- return finish_task_switch(prev);
+ return finish_task_switch_ainline(prev);
}
/*
* nr_running and nr_context_switches:
*
* externally visible scheduler statistics: current number of runnable
* threads, total number of context switches performed since bootup.
*/
unsigned int nr_running(void)
{
--
2.51.0
More information about the linux-riscv
mailing list