[RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

guanxuetao at mprc.pku.edu.cn guanxuetao at mprc.pku.edu.cn
Mon Dec 10 00:20:30 EST 2012


> The purpose of iterating a vmlist is finding vm area with specific
> virtual address. find_vm_area() is provided for this purpose
> and more efficient, because it uses a rbtree.
> So change it.
>
> Cc: Chris Metcalf <cmetcalf at tilera.com>
> Cc: Guan Xuetao <gxt at mprc.pku.edu.cn>
> Cc: Thomas Gleixner <tglx at linutronix.de>
> Cc: Ingo Molnar <mingo at redhat.com>
> Cc: "H. Peter Anvin" <hpa at zytor.com>
> Signed-off-by: Joonsoo Kim <js1304 at gmail.com>

For UniCore32 bits:
Acked-by: Guan Xuetao <gxt at mprc.pku.edu.cn>

>
> diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
> index de0de0c..862782d 100644
> --- a/arch/tile/mm/pgtable.c
> +++ b/arch/tile/mm/pgtable.c
> @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
>  	   in parallel. Reuse of the virtual address is prevented by
>  	   leaving it in the global lists until we're done with it.
>  	   cpa takes care of the direct mappings. */
> -	read_lock(&vmlist_lock);
> -	for (p = vmlist; p; p = p->next) {
> -		if (p->addr == addr)
> -			break;
> -	}
> -	read_unlock(&vmlist_lock);
> +	p = find_vm_area((void *)addr);
>
>  	if (!p) {
>  		pr_err("iounmap: bad address %p\n", addr);
> diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
> index b7a6055..13068ee 100644
> --- a/arch/unicore32/mm/ioremap.c
> +++ b/arch/unicore32/mm/ioremap.c
> @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
>  void __uc32_iounmap(volatile void __iomem *io_addr)
>  {
>  	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
> -	struct vm_struct **p, *tmp;
> +	struct vm_struct *vm;
>
>  	/*
>  	 * If this is a section based mapping we need to handle it
> @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
>  	 * all the mappings before the area can be reclaimed
>  	 * by someone else.
>  	 */
> -	write_lock(&vmlist_lock);
> -	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
> -		if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
> -			if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
> -				unmap_area_sections((unsigned long)tmp->addr,
> -						    tmp->size);
> -			}
> -			break;
> -		}
> -	}
> -	write_unlock(&vmlist_lock);
> +	vm = find_vm_area(addr);
> +	if (vm && (vm->flags & VM_IOREMAP) &&
> +		(vm->flags & VM_UNICORE_SECTION_MAPPING))
> +		unmap_area_sections((unsigned long)vm->addr, vm->size);
>
>  	vunmap(addr);
>  }
> diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
> index 78fe3f1..9a1e658 100644
> --- a/arch/x86/mm/ioremap.c
> +++ b/arch/x86/mm/ioremap.c
> @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
>  	   in parallel. Reuse of the virtual address is prevented by
>  	   leaving it in the global lists until we're done with it.
>  	   cpa takes care of the direct mappings. */
> -	read_lock(&vmlist_lock);
> -	for (p = vmlist; p; p = p->next) {
> -		if (p->addr == (void __force *)addr)
> -			break;
> -	}
> -	read_unlock(&vmlist_lock);
> +	p = find_vm_area((void __force *)addr);
>
>  	if (!p) {
>  		printk(KERN_ERR "iounmap: bad address %p\n", addr);
> --
> 1.7.9.5
>




More information about the kexec mailing list