v6_lock needs to be a real spinlock on preempt-rt. Convert it to a raw_spinlock. No change for !RT kernels. Signed-off-by: Thomas Gleixner --- arch/arm/mm/copypage-v6.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) Index: linux-2.6-tip/arch/arm/mm/copypage-v6.c =================================================================== --- linux-2.6-tip.orig/arch/arm/mm/copypage-v6.c +++ linux-2.6-tip/arch/arm/mm/copypage-v6.c @@ -27,7 +27,7 @@ #define from_address (0xffff8000) #define to_address (0xffffc000) -static DEFINE_SPINLOCK(v6_lock); +static DEFINE_RAW_SPINLOCK(v6_lock); /* * Copy the user page. No aliasing to deal with so we can just @@ -96,7 +96,7 @@ static void v6_copy_user_highpage_aliasi * Now copy the page using the same cache colour as the * pages ultimate destination. */ - spin_lock(&v6_lock); + raw_spin_lock(&v6_lock); set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); @@ -109,7 +109,7 @@ static void v6_copy_user_highpage_aliasi copy_page((void *)kto, (void *)kfrom); - spin_unlock(&v6_lock); + raw_spin_unlock(&v6_lock); } /* @@ -129,13 +129,13 @@ static void v6_clear_user_highpage_alias * Now clear the page using the same cache colour as * the pages ultimate destination. */ - spin_lock(&v6_lock); + raw_spin_lock(&v6_lock); set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); flush_tlb_kernel_page(to); clear_page((void *)to); - spin_unlock(&v6_lock); + raw_spin_unlock(&v6_lock); } struct cpu_user_fns v6_user_fns __initdata = {