[RFC PATCH 5/6] kvm: arm64: Fix constant-pool users in hyp

David Brazdil dbrazdil at google.com
Wed Dec 9 08:01:45 EST 2020


Hey, relized I never replied to this...

On Tue, Nov 24, 2020 at 03:08:20PM +0100, Ard Biesheuvel wrote:
> On Thu, 19 Nov 2020 at 17:26, David Brazdil <dbrazdil at google.com> wrote:
> >
> > Hyp code used to use absolute addressing via a constant pool to obtain
> > the kernel VA of 3 symbols - panic, __hyp_panic_string and
> > __kvm_handle_stub_hvc. This used to work because the kernel would
> > relocate the addresses in the constant pool to kernel VA at boot and hyp
> > would simply load them from there.
> >
> > Now that relocations are fixed up to point to hyp VAs, this does not
> > work any longer. Rework the helpers to convert hyp VA to kernel VA / PA
> > as needed.
> >
> 
> Ok, so the reason for the problem is that the literal exists inside
> the HYP text, and all literals are fixed up using the HYP mapping,
> even if they don't point to something that is mapped at HYP. Would it
> make sense to simply disregard literals that point outside of the HYP
> VA mapping?

That would be possible - I'm about to post a v1 with build-time generation of
these relocs and kernel symbols are easily recognizable as they're undefined
in that ELF object. But I do worry that that would confuse people a lot.
And if somebody referenced a kernel symbol in C (maybe not a use case, but...)
they would get different results depending on which addressing mode the
compiler picked.

> 
> > Signed-off-by: David Brazdil <dbrazdil at google.com>
> > ---
> >  arch/arm64/include/asm/kvm_mmu.h | 29 +++++++++++++++++++----------
> >  arch/arm64/kvm/hyp/nvhe/host.S   | 29 +++++++++++++++--------------
> >  2 files changed, 34 insertions(+), 24 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> > index 8cb8974ec9cc..0676ff2105bb 100644
> > --- a/arch/arm64/include/asm/kvm_mmu.h
> > +++ b/arch/arm64/include/asm/kvm_mmu.h
> > @@ -72,9 +72,14 @@ alternative_cb kvm_update_va_mask
> >  alternative_cb_end
> >  .endm
> >
> > +.macro hyp_pa reg, tmp
> > +       ldr_l   \tmp, hyp_physvirt_offset
> > +       add     \reg, \reg, \tmp
> > +.endm
> > +
> >  /*
> > - * Convert a kernel image address to a PA
> > - * reg: kernel address to be converted in place
> > + * Convert a hypervisor VA to a kernel image address
> > + * reg: hypervisor address to be converted in place
> >   * tmp: temporary register
> >   *
> >   * The actual code generation takes place in kvm_get_kimage_voffset, and
> > @@ -82,18 +87,22 @@ alternative_cb_end
> >   * perform the register allocation (kvm_get_kimage_voffset uses the
> >   * specific registers encoded in the instructions).
> >   */
> > -.macro kimg_pa reg, tmp
> > +.macro hyp_kimg reg, tmp
> > +       /* Convert hyp VA -> PA. */
> > +       hyp_pa  \reg, \tmp
> > +
> > +       /* Load kimage_voffset. */
> >  alternative_cb kvm_get_kimage_voffset
> > -       movz    \tmp, #0
> > -       movk    \tmp, #0, lsl #16
> > -       movk    \tmp, #0, lsl #32
> > -       movk    \tmp, #0, lsl #48
> > +       movz    \tmp, #0
> > +       movk    \tmp, #0, lsl #16
> > +       movk    \tmp, #0, lsl #32
> > +       movk    \tmp, #0, lsl #48
> >  alternative_cb_end
> >
> > -       /* reg = __pa(reg) */
> > -       sub     \reg, \reg, \tmp
> > +       /* Convert PA -> kimg VA. */
> > +       add     \reg, \reg, \tmp
> >  .endm
> > -
> > +
> >  #else
> >
> >  #include <linux/pgtable.h>
> > diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
> > index 596dd5ae8e77..bcb80d525d8c 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/host.S
> > +++ b/arch/arm64/kvm/hyp/nvhe/host.S
> > @@ -74,27 +74,28 @@ SYM_FUNC_END(__host_enter)
> >   * void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
> >   */
> >  SYM_FUNC_START(__hyp_do_panic)
> > -       /* Load the format arguments into x1-7 */
> > -       mov     x6, x3
> > -       get_vcpu_ptr x7, x3
> > -
> > -       mrs     x3, esr_el2
> > -       mrs     x4, far_el2
> > -       mrs     x5, hpfar_el2
> > -
> >         /* Prepare and exit to the host's panic funciton. */
> >         mov     lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
> >                       PSR_MODE_EL1h)
> >         msr     spsr_el2, lr
> >         ldr     lr, =panic
> > +       hyp_kimg lr, x6
> >         msr     elr_el2, lr
> >
> > -       /*
> > -        * Set the panic format string and enter the host, conditionally
> > -        * restoring the host context.
> > -        */
> > +       /* Set the panic format string. Use the, now free, LR as scratch. */
> > +       ldr     lr, =__hyp_panic_string
> > +       hyp_kimg lr, x6
> > +
> > +       /* Load the format arguments into x1-7. */
> > +       mov     x6, x3
> > +       get_vcpu_ptr x7, x3
> > +       mrs     x3, esr_el2
> > +       mrs     x4, far_el2
> > +       mrs     x5, hpfar_el2
> > +
> > +       /* Enter the host, conditionally restoring the host context. */
> >         cmp     x0, xzr
> > -       ldr     x0, =__hyp_panic_string
> > +       mov     x0, lr
> >         b.eq    __host_enter_without_restoring
> >         b       __host_enter_for_panic
> >  SYM_FUNC_END(__hyp_do_panic)
> > @@ -124,7 +125,7 @@ SYM_FUNC_END(__hyp_do_panic)
> >          * Preserve x0-x4, which may contain stub parameters.
> >          */
> >         ldr     x5, =__kvm_handle_stub_hvc
> > -       kimg_pa x5, x6
> > +       hyp_pa  x5, x6
> >         br      x5
> >  .L__vect_end\@:
> >  .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
> > --
> > 2.29.2.299.gdc1121823c-goog
> >



More information about the linux-arm-kernel mailing list