Skip to content

Commit ea3b31c

Browse files
author
Baoquan He
committed
kexec: define functions to map and unmap segments
JIRA: https://issues.redhat.com/browse/RHEL-114162 Upstream Status: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git commit 0091d92 Author: Steven Chen <chenste@linux.microsoft.com> Date: Mon Apr 21 15:25:09 2025 -0700 kexec: define functions to map and unmap segments Implement kimage_map_segment() to enable IMA to map the measurement log list to the kimage structure during the kexec 'load' stage. This function gathers the source pages within the specified address range, and maps them to a contiguous virtual address range. This is a preparation for later usage. Implement kimage_unmap_segment() for unmapping segments using vunmap(). Cc: Eric Biederman <ebiederm@xmission.com> Cc: Baoquan He <bhe@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Dave Young <dyoung@redhat.com> Co-developed-by: Tushar Sugandhi <tusharsu@linux.microsoft.com> Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com> Signed-off-by: Steven Chen <chenste@linux.microsoft.com> Acked-by: Baoquan He <bhe@redhat.com> Tested-by: Stefan Berger <stefanb@linux.ibm.com> # ppc64/kvm Signed-off-by: Mimi Zohar <zohar@linux.ibm.com> Signed-off-by: Baoquan He <bhe@redhat.com>
1 parent 8b66661 commit ea3b31c

File tree

2 files changed

+60
-0
lines changed

2 files changed

+60
-0
lines changed

include/linux/kexec.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -474,13 +474,19 @@ extern bool kexec_file_dbg_print;
474474
#define kexec_dprintk(fmt, arg...) \
475475
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
476476

477+
extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
478+
extern void kimage_unmap_segment(void *buffer);
477479
#else /* !CONFIG_KEXEC_CORE */
478480
struct pt_regs;
479481
struct task_struct;
482+
struct kimage;
480483
static inline void __crash_kexec(struct pt_regs *regs) { }
481484
static inline void crash_kexec(struct pt_regs *regs) { }
482485
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
483486
static inline int kexec_crash_loaded(void) { return 0; }
487+
static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
488+
{ return NULL; }
489+
static inline void kimage_unmap_segment(void *buffer) { }
484490
#define kexec_in_progress false
485491
#endif /* CONFIG_KEXEC_CORE */
486492

kernel/kexec_core.c

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -877,6 +877,60 @@ int kimage_load_segment(struct kimage *image,
877877
return result;
878878
}
879879

880+
void *kimage_map_segment(struct kimage *image,
881+
unsigned long addr, unsigned long size)
882+
{
883+
unsigned long src_page_addr, dest_page_addr = 0;
884+
unsigned long eaddr = addr + size;
885+
kimage_entry_t *ptr, entry;
886+
struct page **src_pages;
887+
unsigned int npages;
888+
void *vaddr = NULL;
889+
int i;
890+
891+
/*
892+
* Collect the source pages and map them in a contiguous VA range.
893+
*/
894+
npages = PFN_UP(eaddr) - PFN_DOWN(addr);
895+
src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
896+
if (!src_pages) {
897+
pr_err("Could not allocate ima pages array.\n");
898+
return NULL;
899+
}
900+
901+
i = 0;
902+
for_each_kimage_entry(image, ptr, entry) {
903+
if (entry & IND_DESTINATION) {
904+
dest_page_addr = entry & PAGE_MASK;
905+
} else if (entry & IND_SOURCE) {
906+
if (dest_page_addr >= addr && dest_page_addr < eaddr) {
907+
src_page_addr = entry & PAGE_MASK;
908+
src_pages[i++] =
909+
virt_to_page(__va(src_page_addr));
910+
if (i == npages)
911+
break;
912+
dest_page_addr += PAGE_SIZE;
913+
}
914+
}
915+
}
916+
917+
/* Sanity check. */
918+
WARN_ON(i < npages);
919+
920+
vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
921+
kfree(src_pages);
922+
923+
if (!vaddr)
924+
pr_err("Could not map ima buffer.\n");
925+
926+
return vaddr;
927+
}
928+
929+
void kimage_unmap_segment(void *segment_buffer)
930+
{
931+
vunmap(segment_buffer);
932+
}
933+
880934
struct kexec_load_limit {
881935
/* Mutex protects the limit count. */
882936
struct mutex mutex;

0 commit comments

Comments
 (0)