[PATCH v8 08/12] iommu/arm-smmu-v3: Seize private ASID
Xiang Zheng
zhengxiang9 at huawei.com
Mon Jul 6 08:40:27 EDT 2020
Hi Jean,
On 2020/6/18 23:51, Jean-Philippe Brucker wrote:
> The SMMU has a single ASID space, the union of shared and private ASID
> sets. This means that the SMMU driver competes with the arch allocator
> for ASIDs. Shared ASIDs are those of Linux processes, allocated by the
> arch, and contribute in broadcast TLB maintenance. Private ASIDs are
> allocated by the SMMU driver and used for "classic" map/unmap DMA. They
> require command-queue TLB invalidations.
>
> When we pin down an mm_context and get an ASID that is already in use by
> the SMMU, it belongs to a private context. We used to simply abort the
> bind, but this is unfair to users that would be unable to bind a few
> seemingly random processes. Try to allocate a new private ASID for the
> context, and make the old ASID shared.
>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe at linaro.org>
> ---
> drivers/iommu/arm-smmu-v3.c | 101 +++++++++++++++++++++++++++++-------
> 1 file changed, 82 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
> index cabd942e4cbf3..5506add42c9c8 100644
> --- a/drivers/iommu/arm-smmu-v3.c
> +++ b/drivers/iommu/arm-smmu-v3.c
> @@ -733,6 +733,7 @@ struct arm_smmu_option_prop {
> };
>
> static DEFINE_XARRAY_ALLOC1(asid_xa);
> +static DEFINE_MUTEX(asid_lock);
> static DEFINE_MUTEX(sva_lock);
>
> static struct arm_smmu_option_prop arm_smmu_options[] = {
> @@ -1537,6 +1538,17 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
> }
>
> /* Context descriptor manipulation functions */
> +static void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
> +{
> + struct arm_smmu_cmdq_ent cmd = {
> + .opcode = CMDQ_OP_TLBI_NH_ASID,
> + .tlbi.asid = asid,
> + };
> +
> + arm_smmu_cmdq_issue_cmd(smmu, &cmd);
> + arm_smmu_cmdq_issue_sync(smmu);
> +}
> +
> static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
> int ssid, bool leaf)
> {
> @@ -1795,9 +1807,18 @@ static bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
> return free;
> }
>
> +/*
> + * Try to reserve this ASID in the SMMU. If it is in use, try to steal it from
> + * the private entry. Careful here, we may be modifying the context tables of
> + * another SMMU!
> + */
> static struct arm_smmu_ctx_desc *arm_smmu_share_asid(u16 asid)
> {
> + int ret;
> + u32 new_asid;
> struct arm_smmu_ctx_desc *cd;
> + struct arm_smmu_device *smmu;
> + struct arm_smmu_domain *smmu_domain;
>
> cd = xa_load(&asid_xa, asid);
> if (!cd)
> @@ -1809,8 +1830,31 @@ static struct arm_smmu_ctx_desc *arm_smmu_share_asid(u16 asid)
> return cd;
> }
>
> - /* Ouch, ASID is already in use for a private cd. */
> - return ERR_PTR(-EBUSY);
> + smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
> + smmu = smmu_domain->smmu;
> +
> + ret = xa_alloc(&asid_xa, &new_asid, cd,
> + XA_LIMIT(1, 1 << smmu->asid_bits), GFP_KERNEL);
> + if (ret)
> + return ERR_PTR(-ENOSPC);
> + /*
> + * Race with unmap: TLB invalidations will start targeting the new ASID,
> + * which isn't assigned yet. We'll do an invalidate-all on the old ASID
> + * later, so it doesn't matter.
> + */
> + cd->asid = new_asid;
> +
> + /*
> + * Update ASID and invalidate CD in all associated masters. There will
> + * be some overlap between use of both ASIDs, until we invalidate the
> + * TLB.
> + */
> + arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
> +
> + /* Invalidate TLB entries previously associated with that context */
> + arm_smmu_tlb_inv_asid(smmu, asid);
> +
> + return NULL;
> }
>
> __maybe_unused
> @@ -1836,7 +1880,20 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
>
> arm_smmu_init_cd(cd);
>
> + /*
> + * Serialize against arm_smmu_domain_finalise_s1() and
> + * arm_smmu_domain_free() as we might need to replace the private ASID
> + * from an existing CD.
> + */
> + mutex_lock(&asid_lock);
> old_cd = arm_smmu_share_asid(asid);
> + if (!old_cd) {
> + ret = xa_insert(&asid_xa, asid, cd, GFP_KERNEL);
Should we use "xa_store" here? If "asid" has already been used for private, old_cd would be NULL and
the entry indexed by "asid" in the asid_xa remains.
> + if (ret)
> + old_cd = ERR_PTR(ret);
> + }
> + mutex_unlock(&asid_lock);
> +
> if (IS_ERR(old_cd)) {
> ret = PTR_ERR(old_cd);
> goto err_free_cd;
> @@ -1850,11 +1907,6 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
> return old_cd;
> }
>
> - /* Fails if a private ASID has been allocated since we last checked */
> - ret = xa_insert(&asid_xa, asid, cd, GFP_KERNEL);
> - if (ret)
> - goto err_free_cd;
> -
> tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - VA_BITS) |
> FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
> FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
> @@ -2398,15 +2450,6 @@ static void arm_smmu_tlb_inv_context(void *cookie)
> struct arm_smmu_device *smmu = smmu_domain->smmu;
> struct arm_smmu_cmdq_ent cmd;
>
> - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
> - cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
> - cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
> - cmd.tlbi.vmid = 0;
> - } else {
> - cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
> - cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
> - }
> -
> /*
> * NOTE: when io-pgtable is in non-strict mode, we may get here with
> * PTEs previously cleared by unmaps on the current CPU not yet visible
> @@ -2414,8 +2457,14 @@ static void arm_smmu_tlb_inv_context(void *cookie)
> * insertion to guarantee those are observed before the TLBI. Do be
> * careful, 007.
> */
> - arm_smmu_cmdq_issue_cmd(smmu, &cmd);
> - arm_smmu_cmdq_issue_sync(smmu);
> + if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
> + arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
> + } else {
> + cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
> + cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
> + arm_smmu_cmdq_issue_cmd(smmu, &cmd);
> + arm_smmu_cmdq_issue_sync(smmu);
> + }
> arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
> }
>
> @@ -2599,9 +2648,15 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
> if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
> struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
>
> + /*
> + * Prevent arm_smmu_share_asid() from rewriting CD#0 while we're
> + * freeing it.
> + */
> + mutex_lock(&asid_lock);
> if (cfg->cdcfg.cdtab)
> arm_smmu_free_cd_tables(smmu_domain);
> arm_smmu_free_asid(&cfg->cd);
> + mutex_unlock(&asid_lock);
> } else {
> struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
> if (cfg->vmid)
> @@ -2623,10 +2678,15 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
>
> arm_smmu_init_cd(&cfg->cd);
>
> + /*
> + * Prevent arm_smmu_share_asid() from seizing the private ASID we're
> + * allocating here until it is written to the CD.
> + */
> + mutex_lock(&asid_lock);
> ret = xa_alloc(&asid_xa, &asid, &cfg->cd,
> XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
> if (ret)
> - return ret;
> + goto out_unlock;
>
> cfg->s1cdmax = master->ssid_bits;
>
> @@ -2654,12 +2714,15 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
> if (ret)
> goto out_free_cd_tables;
>
> + mutex_unlock(&asid_lock);
> return 0;
>
> out_free_cd_tables:
> arm_smmu_free_cd_tables(smmu_domain);
> out_free_asid:
> arm_smmu_free_asid(&cfg->cd);
> +out_unlock:
> + mutex_unlock(&asid_lock);
> return ret;
> }
>
>
--
Thanks,
Xiang
More information about the linux-arm-kernel
mailing list