[PATCH] arm64/io: add constant-argument check
Mark Rutland
mark.rutland at arm.com
Wed May 29 04:14:22 PDT 2024
On Tue, May 28, 2024 at 02:08:38PM +0200, Arnd Bergmann wrote:
> From: Arnd Bergmann <arnd at arndb.de>
>
> In some configurations __const_iowrite32_copy() does not get inlined
> and gcc runs into the BUILD_BUG():
>
> In file included from <command-line>:
> In function '__const_memcpy_toio_aligned32',
> inlined from '__const_iowrite32_copy' at arch/arm64/include/asm/io.h:203:3,
> inlined from '__const_iowrite32_copy' at arch/arm64/include/asm/io.h:199:20:
> include/linux/compiler_types.h:487:45: error: call to '__compiletime_assert_538' declared with attribute error: BUILD_BUG failed
> 487 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
> | ^
> include/linux/compiler_types.h:468:25: note: in definition of macro '__compiletime_assert'
> 468 | prefix ## suffix(); \
> | ^~~~~~
> include/linux/compiler_types.h:487:9: note: in expansion of macro '_compiletime_assert'
> 487 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
> | ^~~~~~~~~~~~~~~~~~~
> include/linux/build_bug.h:39:37: note: in expansion of macro 'compiletime_assert'
> 39 | #define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
> | ^~~~~~~~~~~~~~~~~~
> include/linux/build_bug.h:59:21: note: in expansion of macro 'BUILD_BUG_ON_MSG'
> 59 | #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
> | ^~~~~~~~~~~~~~~~
> arch/arm64/include/asm/io.h:193:17: note: in expansion of macro 'BUILD_BUG'
> 193 | BUILD_BUG();
> | ^~~~~~~~~
>
> Add a check to ensure that the argument is in fact a constant before
> calling into __const_memcpy_toio_aligned32().
>
> Fixes: ead79118dae6 ("arm64/io: Provide a WC friendly __iowriteXX_copy()")
> Signed-off-by: Arnd Bergmann <arnd at arndb.de>
> ---
> arch/arm64/include/asm/io.h | 3 ++-
> 1 file changed, 2 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
> index 4ff0ae3f6d66..44913f227060 100644
> --- a/arch/arm64/include/asm/io.h
> +++ b/arch/arm64/include/asm/io.h
> @@ -199,7 +199,8 @@ void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
> static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
> size_t count)
> {
> - if (count == 8 || count == 4 || count == 2 || count == 1) {
> + if (__builtin_constant_p(count) &&
> + (count == 8 || count == 4 || count == 2 || count == 1)) {
> __const_memcpy_toio_aligned32(to, from, count);
> dgh();
> } else {
I don't think this is the right fix.
The idea was that this was checked in __iowrite32_copy(), which does:
#define __iowrite32_copy(to, from, count) \
(__builtin_constant_p(count) ? \
__const_iowrite32_copy(to, from, count) : \
__iowrite32_copy_full(to, from, count))
... and so __const_iowrite32_copy() should really be marked as __always_inline,
and the same for __const_memcpy_toio_aligned32(), to guarantee that both get
inlined and such that __const_memcpy_toio_aligned32() sees a constant.
The same reasoning applies to __const_iowrite64_copy() and
__const_memcpy_toio_aligned64().
Checking for a constant in __const_iowrite32_copy() doesn't guarantee
that __const_memcpy_toio_aligned32() is inlined and will actually see a
constant.
Does diff the below you for you?
Mark.
---->8----
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4ff0ae3f6d669..f4350aae92d5d 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -153,8 +153,9 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
* emit the large TLP from the CPU.
*/
-static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
- const u32 *from, size_t count)
+static __always_inline void
+__const_memcpy_toio_aligned32(volatile u32 __iomem *to, const u32 *from,
+ size_t count)
{
switch (count) {
case 8:
@@ -196,8 +197,8 @@ static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
-static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
- size_t count)
+static __always_inline void
+__const_iowrite32_copy(void __iomem *to, const void *from, size_t count)
{
if (count == 8 || count == 4 || count == 2 || count == 1) {
__const_memcpy_toio_aligned32(to, from, count);
@@ -212,8 +213,9 @@ static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
__const_iowrite32_copy(to, from, count) : \
__iowrite32_copy_full(to, from, count))
-static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
- const u64 *from, size_t count)
+static __always_inline void
+__const_memcpy_toio_aligned64(volatile u64 __iomem *to, const u64 *from,
+ size_t count)
{
switch (count) {
case 8:
@@ -255,8 +257,8 @@ static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
-static inline void __const_iowrite64_copy(void __iomem *to, const void *from,
- size_t count)
+static __always_inline void
+__const_iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
if (count == 8 || count == 4 || count == 2 || count == 1) {
__const_memcpy_toio_aligned64(to, from, count);
More information about the linux-arm-kernel
mailing list