[RFC PATCH v4 10/15] iommu/io-pgtable-arm: Implement arm_lpae_unmap_pages()

Will Deacon will at kernel.org
Thu Apr 8 15:32:11 BST 2021


On Wed, Apr 07, 2021 at 09:52:36PM -0700, Isaac J. Manjarres wrote:
> Implement the unmap_pages() callback for the ARM LPAE io-pgtable
> format.
> 
> Signed-off-by: Isaac J. Manjarres <isaacm at codeaurora.org>
> Suggested-by: Will Deacon <will at kernel.org>
> ---
>  drivers/iommu/io-pgtable-arm.c | 70 ++++++++++++++++++++++------------
>  1 file changed, 45 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
> index ea66b10c04c4..6700685f81d4 100644
> --- a/drivers/iommu/io-pgtable-arm.c
> +++ b/drivers/iommu/io-pgtable-arm.c
> @@ -253,8 +253,8 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
>  
>  static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
>  			       struct iommu_iotlb_gather *gather,
> -			       unsigned long iova, size_t size, int lvl,
> -			       arm_lpae_iopte *ptep);
> +			       unsigned long iova, size_t size, size_t pgcount,
> +			       int lvl, arm_lpae_iopte *ptep);
>  
>  static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
>  				phys_addr_t paddr, arm_lpae_iopte prot,
> @@ -298,7 +298,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
>  			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
>  
>  			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
> -			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz,
> +			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
>  					     lvl, tblp) != sz) {
>  				WARN_ON(1);
>  				return -EINVAL;
> @@ -526,14 +526,14 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
>  				       struct iommu_iotlb_gather *gather,
>  				       unsigned long iova, size_t size,
>  				       arm_lpae_iopte blk_pte, int lvl,
> -				       arm_lpae_iopte *ptep)
> +				       arm_lpae_iopte *ptep, size_t pgcount)
>  {
>  	struct io_pgtable_cfg *cfg = &data->iop.cfg;
>  	arm_lpae_iopte pte, *tablep;
>  	phys_addr_t blk_paddr;
>  	size_t tablesz = ARM_LPAE_GRANULE(data);
>  	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
> -	int i, unmap_idx = -1;
> +	int i, unmap_idx_start = -1, num_entries = 0, max_entries;
>  
>  	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
>  		return 0;
> @@ -542,15 +542,18 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
>  	if (!tablep)
>  		return 0; /* Bytes unmapped */
>  
> -	if (size == split_sz)
> -		unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
> +	if (size == split_sz) {
> +		unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
> +		max_entries = (tablesz >> ilog2(sizeof(pte))) - unmap_idx_start;
> +		num_entries = min_t(int, pgcount, max_entries);
> +	}
>  
>  	blk_paddr = iopte_to_paddr(blk_pte, data);
>  	pte = iopte_prot(blk_pte);
>  
>  	for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {

Given that we already have a 'tablesz / sizeof(pte)' expression here, I'd be
inclined to have either a local variable or a macro helper to get at the
ptes_per_table value that you also need to compute max_entries.

>  		/* Unmap! */
> -		if (i == unmap_idx)
> +		if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
>  			continue;
>  
>  		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
> @@ -568,38 +571,45 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
>  			return 0;
>  
>  		tablep = iopte_deref(pte, data);
> -	} else if (unmap_idx >= 0) {
> -		io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
> -		return size;
> +	} else if (unmap_idx_start >= 0) {
> +		for (i = 0; i < num_entries; i++)
> +			io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);

I suppose we could add a count paramater to the iotlb gather stuff in
future too, but for now this is fine as this series is already pretty big.

Will



More information about the linux-arm-kernel mailing list