Skip to content

Commit d54b5cb

Browse files
committed
KVM: arm64: Assume non-PFNMAP/MIXEDMAP VMAs can be mapped cacheable
JIRA: https://issues.redhat.com/browse/RHEL-73607 Upstream: https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git commit 216887f Author: Ankit Agrawal <ankita@nvidia.com> Date: Sat Jul 5 07:17:13 2025 +0000 KVM: arm64: Assume non-PFNMAP/MIXEDMAP VMAs can be mapped cacheable Despite its name, kvm_is_device_pfn() is actually used to determine if a given PFN has a kernel mapping that can be used to perform cache maintenance, as it calls pfn_is_map_memory() internally. Expand the helper into its single callsite and further condition the check on the VMA having either VM_PFNMAP or VM_MIXEDMAP set. VMAs that set neither of these flags must always contain Normal, struct page backed memory with valid aliases in the kernel address space. Suggested-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: David Hildenbrand <david@redhat.com> Tested-by: Donald Dutile <ddutile@redhat.com> Signed-off-by: Ankit Agrawal <ankita@nvidia.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20250705071717.5062-3-ankita@nvidia.com [ Oliver: fixed typos, refined changelog ] Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Signed-off-by: Donald Dutile <ddutile@redhat.com>
1 parent e63474d commit d54b5cb

File tree

1 file changed

+4
-6
lines changed

1 file changed

+4
-6
lines changed

arch/arm64/kvm/mmu.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -193,11 +193,6 @@ int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
193193
return 0;
194194
}
195195

196-
static bool kvm_is_device_pfn(unsigned long pfn)
197-
{
198-
return !pfn_is_map_memory(pfn);
199-
}
200-
201196
static void *stage2_memcache_zalloc_page(void *arg)
202197
{
203198
struct kvm_mmu_memory_cache *mc = arg;
@@ -1488,6 +1483,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
14881483
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
14891484
struct kvm_pgtable *pgt;
14901485
struct page *page;
1486+
vm_flags_t vm_flags;
14911487
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED;
14921488

14931489
if (fault_is_perm)
@@ -1615,6 +1611,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
16151611

16161612
vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
16171613

1614+
vm_flags = vma->vm_flags;
1615+
16181616
/* Don't use the VMA after the unlock -- it may have vanished */
16191617
vma = NULL;
16201618

@@ -1638,7 +1636,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
16381636
if (is_error_noslot_pfn(pfn))
16391637
return -EFAULT;
16401638

1641-
if (kvm_is_device_pfn(pfn)) {
1639+
if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(pfn)) {
16421640
/*
16431641
* If the page was identified as device early by looking at
16441642
* the VMA flags, vma_pagesize is already representing the

0 commit comments

Comments
 (0)