[RFC PATCH] drivers: iommu: reset cached node if dma_mask is changed
Joerg Roedel
joro at 8bytes.org
Wed May 13 08:33:08 UTC 2020
Adding Robin.
On Tue, May 05, 2020 at 12:07:59AM +0530, Ajay Kumar wrote:
> The current IOVA allocation code stores a cached copy of the
> first allocated IOVA address node, and all the subsequent allocations
> have no way to get past(higher than) the first allocated IOVA range.
>
> This causes issue when dma_mask for the master device is changed.
> Though the DMA window is increased, the allocation code unaware of
> the change, goes ahead allocating IOVA address lower than the
> first allocated IOVA address.
>
> This patch adds a check for dma_mask change in the IOVA allocation
> function and resets the cached IOVA node to anchor node everytime
> the dma_mask change is observed.
>
> NOTE:
> This patch is needed to address the issue discussed in below thread:
> https://www.spinics.net/lists/iommu/msg43586.html
>
> Signed-off-by: Ajay Kumar <ajaykumar.rs at samsung.com>
> Signed-off-by: Sathyam Panda <sathya.panda at samsung.com>
> ---
> drivers/iommu/iova.c | 17 ++++++++++++++++-
> include/linux/iova.h | 1 +
> 2 files changed, 17 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
> index 41c605b0058f..0e99975036ae 100644
> --- a/drivers/iommu/iova.c
> +++ b/drivers/iommu/iova.c
> @@ -44,6 +44,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
> iovad->granule = granule;
> iovad->start_pfn = start_pfn;
> iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
> + iovad->curr_limit_pfn = iovad->dma_32bit_pfn;
> iovad->max32_alloc_size = iovad->dma_32bit_pfn;
> iovad->flush_cb = NULL;
> iovad->fq = NULL;
> @@ -116,9 +117,20 @@ EXPORT_SYMBOL_GPL(init_iova_flush_queue);
> static struct rb_node *
> __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
> {
> - if (limit_pfn <= iovad->dma_32bit_pfn)
> + if (limit_pfn <= iovad->dma_32bit_pfn) {
> + /* re-init cached node if DMA limit has changed */
> + if (limit_pfn != iovad->curr_limit_pfn) {
> + iovad->cached32_node = &iovad->anchor.node;
> + iovad->curr_limit_pfn = limit_pfn;
> + }
> return iovad->cached32_node;
> + }
>
> + /* re-init cached node if DMA limit has changed */
> + if (limit_pfn != iovad->curr_limit_pfn) {
> + iovad->cached_node = &iovad->anchor.node;
> + iovad->curr_limit_pfn = limit_pfn;
> + }
> return iovad->cached_node;
> }
>
> @@ -190,6 +202,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
> if (size_aligned)
> align_mask <<= fls_long(size - 1);
>
> + if (limit_pfn != iovad->curr_limit_pfn)
> + iovad->max32_alloc_size = iovad->dma_32bit_pfn;
> +
> /* Walk the tree backwards */
> spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
> if (limit_pfn <= iovad->dma_32bit_pfn &&
> diff --git a/include/linux/iova.h b/include/linux/iova.h
> index a0637abffee8..be2220c096ef 100644
> --- a/include/linux/iova.h
> +++ b/include/linux/iova.h
> @@ -73,6 +73,7 @@ struct iova_domain {
> unsigned long granule; /* pfn granularity for this domain */
> unsigned long start_pfn; /* Lower limit for this domain */
> unsigned long dma_32bit_pfn;
> + unsigned long curr_limit_pfn; /* Current max limit for this domain */
> unsigned long max32_alloc_size; /* Size of last failed allocation */
> struct iova_fq __percpu *fq; /* Flush Queue */
>
> --
> 2.17.1
More information about the iommu
mailing list