[PATCH] ARM: Stricten arguments to virt_to_pfn()

Linus Walleij linus.walleij at linaro.org
Mon May 16 05:16:07 PDT 2022


The kernel convention is that the argument passed to virt_to_pfn()
(virtual address to page frame number) needs to be an unsigned long,
but ARM has avoided this by including a cast to (unsigned long)
inside the definition of virt_to_pfn() itself. This creates problems
if we want to make virt_to_pfn() an actual function with a typed argument,
so fix up the offending call sites.

Signed-off-by: Linus Walleij <linus.walleij at linaro.org>
---
 arch/arm/include/asm/dma-direct.h | 2 +-
 arch/arm/include/asm/memory.h     | 4 ++--
 arch/arm/kernel/hibernate.c       | 4 ++--
 arch/arm/mm/kasan_init.c          | 6 +++---
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h
index 77fcb7ee5ec9..1f386059a3f0 100644
--- a/arch/arm/include/asm/dma-direct.h
+++ b/arch/arm/include/asm/dma-direct.h
@@ -28,7 +28,7 @@ static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
 {
 	if (dev)
-		return pfn_to_dma(dev, virt_to_pfn(addr));
+		return pfn_to_dma(dev, virt_to_pfn((unsigned long)addr));
 
 	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
 }
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index f673e13e0f94..aed49981a449 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -393,9 +393,9 @@ static inline unsigned long __virt_to_idmap(unsigned long x)
  */
 #define ARCH_PFN_OFFSET		PHYS_PFN_OFFSET
 
-#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
+#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn((unsigned long)kaddr))
 #define virt_addr_valid(kaddr)	(((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
-					&& pfn_valid(virt_to_pfn(kaddr)))
+				 && pfn_valid(virt_to_pfn((unsigned long)(kaddr))))
 
 #endif
 
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
index 2373020af965..e46691bfd9bc 100644
--- a/arch/arm/kernel/hibernate.c
+++ b/arch/arm/kernel/hibernate.c
@@ -25,8 +25,8 @@
 
 int pfn_is_nosave(unsigned long pfn)
 {
-	unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
-	unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
+	unsigned long nosave_begin_pfn = virt_to_pfn((unsigned long)&__nosave_begin);
+	unsigned long nosave_end_pfn = virt_to_pfn((unsigned long)(&__nosave_end - 1));
 
 	return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
 }
diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c
index 29caee9c79ce..4f70e3f09c02 100644
--- a/arch/arm/mm/kasan_init.c
+++ b/arch/arm/mm/kasan_init.c
@@ -58,7 +58,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
 				return;
 			}
 			memset(p, KASAN_SHADOW_INIT, PAGE_SIZE);
-			entry = pfn_pte(virt_to_pfn(p),
+			entry = pfn_pte(virt_to_pfn((unsigned long)p),
 					__pgprot(pgprot_val(PAGE_KERNEL)));
 		} else if (pte_none(READ_ONCE(*ptep))) {
 			/*
@@ -68,7 +68,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
 			 * will work on a scratch area until we can set up the
 			 * proper KASan shadow memory.
 			 */
-			entry = pfn_pte(virt_to_pfn(kasan_early_shadow_page),
+			entry = pfn_pte(virt_to_pfn((unsigned long)kasan_early_shadow_page),
 					__pgprot(_L_PTE_DEFAULT | L_PTE_DIRTY | L_PTE_XN));
 		} else {
 			/*
@@ -282,7 +282,7 @@ void __init kasan_init(void)
 	for (i = 0; i < PTRS_PER_PTE; i++)
 		set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE,
 			   &kasan_early_shadow_pte[i],
-			   pfn_pte(virt_to_pfn(kasan_early_shadow_page),
+			   pfn_pte(virt_to_pfn((unsigned long)kasan_early_shadow_page),
 				__pgprot(pgprot_val(PAGE_KERNEL)
 					 | L_PTE_RDONLY)));
 
-- 
2.35.1




More information about the linux-arm-kernel mailing list