[PATCH 08/12] KVM: arm64: nv: Add emulation of AT S12E{0,1}{R,W}
Marc Zyngier
maz at kernel.org
Sat Jul 20 02:49:29 PDT 2024
On Thu, 18 Jul 2024 16:10:20 +0100,
Alexandru Elisei <alexandru.elisei at arm.com> wrote:
>
> Hi,
>
> On Tue, Jun 25, 2024 at 02:35:07PM +0100, Marc Zyngier wrote:
> > On the face of it, AT S12E{0,1}{R,W} is pretty simple. It is the
> > combination of AT S1E{0,1}{R,W}, followed by an extra S2 walk.
> >
> > However, there is a great deal of complexity coming from combining
> > the S1 and S2 attributes to report something consistent in PAR_EL1.
> >
> > This is an absolute mine field, and I have a splitting headache.
> >
> > [..]
> > +static u8 compute_sh(u8 attr, u64 desc)
> > +{
> > + /* Any form of device, as well as NC has SH[1:0]=0b10 */
> > + if (MEMATTR_IS_DEVICE(attr) || attr == MEMATTR(NC, NC))
> > + return 0b10;
> > +
> > + return FIELD_GET(PTE_SHARED, desc) == 0b11 ? 0b11 : 0b10;
>
> If shareability is 0b00 (non-shareable), the PAR_EL1.SH field will be 0b10
> (outer-shareable), which seems to be contradicting PAREncodeShareability().
Yup, well caught.
> > + par |= FIELD_PREP(SYS_PAR_EL1_SH,
> > + compute_sh(final_attr, tr->desc));
> > +
> > + return par;
> >
>
> It seems that the code doesn't combine shareability attributes, as per rule
> RGDTNP and S2CombineS1MemAttrs() or S2ApplyFWBMemAttrs(), which both end up
> calling S2CombineS1Shareability().
That as well. See below what I'm stashing on top.
Thanks,
M.
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index e66c97fc1fd3..28c4344d1c34 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -459,13 +459,34 @@ static u8 combine_s1_s2_attr(u8 s1, u8 s2)
return final;
}
+#define ATTR_NSH 0b00
+#define ATTR_RSV 0b01
+#define ATTR_OSH 0b10
+#define ATTR_ISH 0b11
+
static u8 compute_sh(u8 attr, u64 desc)
{
+ u8 sh;
+
/* Any form of device, as well as NC has SH[1:0]=0b10 */
if (MEMATTR_IS_DEVICE(attr) || attr == MEMATTR(NC, NC))
- return 0b10;
+ return ATTR_OSH;
+
+ sh = FIELD_GET(PTE_SHARED, desc);
+ if (sh == ATTR_RSV) /* Reserved, mapped to NSH */
+ sh = ATTR_NSH;
+
+ return sh;
+}
+
+static u8 combine_sh(u8 s1_sh, u8 s2_sh)
+{
+ if (s1_sh == ATTR_OSH || s2_sh == ATTR_OSH)
+ return ATTR_OSH;
+ if (s1_sh == ATTR_ISH || s2_sh == ATTR_ISH)
+ return ATTR_ISH;
- return FIELD_GET(PTE_SHARED, desc) == 0b11 ? 0b11 : 0b10;
+ return ATTR_NSH;
}
static u64 compute_par_s12(struct kvm_vcpu *vcpu, u64 s1_par,
@@ -540,7 +561,8 @@ static u64 compute_par_s12(struct kvm_vcpu *vcpu, u64 s1_par,
par = FIELD_PREP(SYS_PAR_EL1_ATTR, final_attr);
par |= tr->output & GENMASK(47, 12);
par |= FIELD_PREP(SYS_PAR_EL1_SH,
- compute_sh(final_attr, tr->desc));
+ combine_sh(FIELD_GET(SYS_PAR_EL1_SH, s1_par),
+ compute_sh(final_attr, tr->desc)));
return par;
}
--
Without deviation from the norm, progress is not possible.
More information about the linux-arm-kernel
mailing list