[PATCH v2] riscv: make update_mmu_cache to support asid

Jinyu Tang tjytimi at 163.com
Tue Sep 6 03:57:56 PDT 2022


Thanks for your guidance.^ ^

Sincerely yours,
Jinyu

At 2022-09-04 21:49:31, Conor.Dooley at microchip.com wrote:
>On 04/09/2022 14:37, Jinyu Tang wrote:
>> The `update_mmu_cache` function in riscv flush tlb cache without asid
>
>FWIW, when referring to functions please put the () at the end.
>Makes the changelog more natural to read. You do not need to make
>a v3 for that though.
>
>Thanks,
>Conor.
>
>> information now, which will flush tlbs in other tasks' address space
>> even if processor supports asid. So add a new function
>> `flush_tlb_local_one_page` to flush local one page whether processor
>> supports asid or not,for cases that need to flush local one page like
>> function `update_mmu_cache`.
>> 
>> Signed-off-by: Jinyu Tang <tjytimi at 163.com>
>> ---
>> RFC V1 -> V2 : 
>> 1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
>> suggestion. 
>> 2.Make commit log more clear.
>> 
>>  arch/riscv/include/asm/pgtable.h  |  2 +-
>>  arch/riscv/include/asm/tlbflush.h |  2 ++
>>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>>  3 files changed, 14 insertions(+), 1 deletion(-)
>> 
>> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
>> index 7ec936910a96..09ccefa6b6c7 100644
>> --- a/arch/riscv/include/asm/pgtable.h
>> +++ b/arch/riscv/include/asm/pgtable.h
>> @@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
>>  	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
>>  	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
>>  	 */
>> -	local_flush_tlb_page(address);
>> +	flush_tlb_local_one_page(vma, address);
>>  }
>>  
>>  static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
>> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
>> index 801019381dea..120aeb1c6ecf 100644
>> --- a/arch/riscv/include/asm/tlbflush.h
>> +++ b/arch/riscv/include/asm/tlbflush.h
>> @@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
>>  #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
>>  void flush_tlb_all(void);
>>  void flush_tlb_mm(struct mm_struct *mm);
>> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
>>  void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
>>  void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>>  		     unsigned long end);
>> @@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>>  
>>  #define flush_tlb_all() local_flush_tlb_all()
>>  #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
>> +#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
>>  
>>  static inline void flush_tlb_range(struct vm_area_struct *vma,
>>  		unsigned long start, unsigned long end)
>> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
>> index 27a7db8eb2c4..0843e1baaf34 100644
>> --- a/arch/riscv/mm/tlbflush.c
>> +++ b/arch/riscv/mm/tlbflush.c
>> @@ -41,6 +41,17 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
>>  		local_flush_tlb_all_asid(asid);
>>  }
>>  
>> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
>> +{
>> +	if (static_branch_unlikely(&use_asid_allocator)) {
>> +		unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
>> +
>> +		local_flush_tlb_page_asid(addr, asid);
>> +	} else {
>> +		local_flush_tlb_page(addr);
>> +	}
>> +}
>> +
>>  static void __ipi_flush_tlb_all(void *info)
>>  {
>>  	local_flush_tlb_all();




More information about the linux-riscv mailing list