[PATCH v4 08/16] arm64/mm: Split asid_inits in 2 parts
Shameer Kolothum
shameerali.kolothum.thodi at huawei.com
Wed Apr 14 12:23:04 BST 2021
From: Julien Grall <julien.grall at arm.com>
Move out the common initialization of the ASID allocator in a separate
function.
Signed-off-by: Julien Grall <julien.grall at arm.com>
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi at huawei.com>
---
v3-->v4
-dropped asid_per_ctxt and added pinned asid map init.
---
arch/arm64/mm/context.c | 44 +++++++++++++++++++++++++++++++----------
1 file changed, 34 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 8af54e06f5bc..041c3c5e0216 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -412,26 +412,50 @@ static int asids_update_limit(void)
}
arch_initcall(asids_update_limit);
-static int asids_init(void)
+/*
+ * Initialize the ASID allocator
+ *
+ * @info: Pointer to the asid allocator structure
+ * @bits: Number of ASIDs available
+ * @pinned: Support for Pinned ASIDs
+ */
+static int asid_allocator_init(struct asid_info *info, u32 bits, bool pinned)
{
- struct asid_info *info = &asid_info;
+ info->bits = bits;
- info->bits = get_cpu_asid_bits();
+ /*
+ * Expect allocation after rollover to fail if we don't have at least
+ * one more ASID than CPUs. ASID #0 is always reserved.
+ */
+ WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
sizeof(*info->map), GFP_KERNEL);
if (!info->map)
- panic("Failed to allocate bitmap for %lu ASIDs\n",
- NUM_CTXT_ASIDS(info));
+ return -ENOMEM;
info->map_idx = 1;
- info->active = &active_asids;
- info->reserved = &reserved_asids;
raw_spin_lock_init(&info->lock);
- info->pinned_map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
- sizeof(*info->pinned_map), GFP_KERNEL);
- info->nr_pinned_asids = 0;
+ if (pinned) {
+ info->pinned_map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
+ sizeof(*info->pinned_map), GFP_KERNEL);
+ info->nr_pinned_asids = 0;
+ }
+
+ return 0;
+}
+
+static int asids_init(void)
+{
+ struct asid_info *info = &asid_info;
+
+ if (asid_allocator_init(info, get_cpu_asid_bits(), true))
+ panic("Unable to initialize ASID allocator for %lu ASIDs\n",
+ NUM_CTXT_ASIDS(info));
+
+ info->active = &active_asids;
+ info->reserved = &reserved_asids;
/*
* We cannot call set_reserved_asid_bits() here because CPU
--
2.17.1
More information about the linux-arm-kernel
mailing list