[PATCH 5/9] tlsf: decouple maximum allocation size from sizeof(size_t)

Ahmad Fatoum a.fatoum at pengutronix.de
Tue Oct 4 08:54:03 PDT 2022


Previous commit ensures that the first block is aligned to ALIGN_SIZE
and the implementation already ensured that sizes are rounded up to
multiples of ALIGN_SIZE.

However, each block starts with a size_t holding the block size. On
systems with sizeof(size_t) == 4, this means even if ALIGN_SIZE were
8, we would end up with an unaligned buffer.

The straight-forward fix for that is to increase the TLSF per-block
overhead to be 8 bytes per allocation, even on 32-bit systems.
That way alignment is naturally maintained. Prepare for this by
replacing references to the block size size_t type with new
tlsf_size_t.

Signed-off-by: Ahmad Fatoum <a.fatoum at pengutronix.de>
---
 common/tlsf.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/common/tlsf.c b/common/tlsf.c
index 83d469ae0a25..16682435e492 100644
--- a/common/tlsf.c
+++ b/common/tlsf.c
@@ -95,10 +95,12 @@ enum tlsf_private
 #define tlsf_static_assert(exp) \
 	typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
 
+typedef size_t tlsf_size_t;
+
 /* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */
 tlsf_static_assert(sizeof(int) * CHAR_BIT == 32);
-tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32);
-tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64);
+tlsf_static_assert(sizeof(tlsf_size_t) * CHAR_BIT >= 32);
+tlsf_static_assert(sizeof(tlsf_size_t) * CHAR_BIT <= 64);
 
 /* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
 tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
@@ -126,7 +128,7 @@ typedef struct block_header_t
 	struct block_header_t* prev_phys_block;
 
 	/* The size of this block, excluding the block header. */
-	size_t size;
+	tlsf_size_t size;
 
 	/* Next and previous free blocks. */
 	struct block_header_t* next_free;
@@ -147,7 +149,7 @@ static const size_t block_header_prev_free_bit = 1 << 1;
 ** The prev_phys_block field is stored *inside* the previous free block.
 */
 static const size_t block_header_shift = offsetof(block_header_t, size);
-static const size_t block_header_overhead = sizeof(size_t);
+static const size_t block_header_overhead = sizeof(tlsf_size_t);
 
 /* User data starts directly after the size field in a used block. */
 static const size_t block_start_offset =
@@ -989,7 +991,7 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
 	{
 		void* ptr = block_to_ptr(block);
 		void* aligned = align_ptr(ptr, align);
-		size_t gap = tlsf_cast(size_t,
+		tlsf_size_t gap = tlsf_cast(tlsf_size_t,
 			tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
 
 		/* If gap size is too small, offset to next aligned boundary. */
@@ -1001,7 +1003,7 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
 				tlsf_cast(tlsfptr_t, aligned) + offset);
 
 			aligned = align_ptr(next_aligned, align);
-			gap = tlsf_cast(size_t,
+			gap = tlsf_cast(tlsf_size_t,
 				tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
 		}
 
-- 
2.30.2




More information about the barebox mailing list