Files
linux-stable-mirror/security/integrity/ima/ima_kexec.c
Harshit Mogalapalli f11d7d088f ima: verify the previous kernel's IMA buffer lies in addressable RAM
[ Upstream commit 10d1c75ed4 ]

Patch series "Address page fault in ima_restore_measurement_list()", v3.

When the second-stage kernel is booted via kexec with a limiting command
line such as "mem=<size>" we observe a pafe fault that happens.

    BUG: unable to handle page fault for address: ffff97793ff47000
    RIP: ima_restore_measurement_list+0xdc/0x45a
    #PF: error_code(0x0000)  not-present page

This happens on x86_64 only, as this is already fixed in aarch64 in
commit: cbf9c4b961 ("of: check previous kernel's ima-kexec-buffer
against memory bounds")

This patch (of 3):

When the second-stage kernel is booted with a limiting command line (e.g.
"mem=<size>"), the IMA measurement buffer handed over from the previous
kernel may fall outside the addressable RAM of the new kernel.  Accessing
such a buffer can fault during early restore.

Introduce a small generic helper, ima_validate_range(), which verifies
that a physical [start, end] range for the previous-kernel IMA buffer lies
within addressable memory:
	- On x86, use pfn_range_is_mapped().
	- On OF based architectures, use page_is_ram().

Link: https://lkml.kernel.org/r/20251231061609.907170-1-harshit.m.mogalapalli@oracle.com
Link: https://lkml.kernel.org/r/20251231061609.907170-2-harshit.m.mogalapalli@oracle.com
Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
Reviewed-by: Mimi Zohar <zohar@linux.ibm.com>
Cc: Alexander Graf <graf@amazon.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Borislav Betkov <bp@alien8.de>
Cc: guoweikang <guoweikang.kernel@gmail.com>
Cc: Henry Willard <henry.willard@oracle.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Bohac <jbohac@suse.cz>
Cc: Joel Granados <joel.granados@kernel.org>
Cc: Jonathan McDowell <noodles@fb.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Paul Webb <paul.x.webb@oracle.com>
Cc: Sohil Mehta <sohil.mehta@intel.com>
Cc: Sourabh Jain <sourabhjain@linux.ibm.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Yifei Liu <yifei.l.liu@oracle.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2026-03-13 17:20:27 +01:00

276 lines
6.7 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 IBM Corporation
*
* Authors:
* Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
* Mimi Zohar <zohar@linux.vnet.ibm.com>
*/
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/kexec.h>
#include <linux/of.h>
#include <linux/ima.h>
#include <linux/mm.h>
#include <linux/overflow.h>
#include <linux/reboot.h>
#include <asm/page.h>
#include "ima.h"
#ifdef CONFIG_IMA_KEXEC
static bool ima_kexec_update_registered;
static struct seq_file ima_kexec_file;
static void *ima_kexec_buffer;
static void ima_free_kexec_file_buf(struct seq_file *sf)
{
vfree(sf->buf);
sf->buf = NULL;
sf->size = 0;
sf->read_pos = 0;
sf->count = 0;
}
static int ima_alloc_kexec_file_buf(size_t segment_size)
{
ima_free_kexec_file_buf(&ima_kexec_file);
/* segment size can't change between kexec load and execute */
ima_kexec_file.buf = vmalloc(segment_size);
if (!ima_kexec_file.buf)
return -ENOMEM;
ima_kexec_file.size = segment_size;
ima_kexec_file.read_pos = 0;
ima_kexec_file.count = sizeof(struct ima_kexec_hdr); /* reserved space */
return 0;
}
static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
unsigned long segment_size)
{
struct ima_queue_entry *qe;
struct ima_kexec_hdr khdr;
int ret = 0;
/* segment size can't change between kexec load and execute */
if (!ima_kexec_file.buf) {
pr_err("Kexec file buf not allocated\n");
return -EINVAL;
}
memset(&khdr, 0, sizeof(khdr));
khdr.version = 1;
/* This is an append-only list, no need to hold the RCU read lock */
list_for_each_entry_rcu(qe, &ima_measurements, later, true) {
if (ima_kexec_file.count < ima_kexec_file.size) {
khdr.count++;
ima_measurements_show(&ima_kexec_file, qe);
} else {
ret = -EINVAL;
break;
}
}
if (ret < 0)
goto out;
/*
* fill in reserved space with some buffer details
* (eg. version, buffer size, number of measurements)
*/
khdr.buffer_size = ima_kexec_file.count;
if (ima_canonical_fmt) {
khdr.version = cpu_to_le16(khdr.version);
khdr.count = cpu_to_le64(khdr.count);
khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
}
memcpy(ima_kexec_file.buf, &khdr, sizeof(khdr));
print_hex_dump_debug("ima dump: ", DUMP_PREFIX_NONE, 16, 1,
ima_kexec_file.buf, ima_kexec_file.count < 100 ?
ima_kexec_file.count : 100,
true);
*buffer_size = ima_kexec_file.count;
*buffer = ima_kexec_file.buf;
out:
return ret;
}
/*
* Called during kexec_file_load so that IMA can add a segment to the kexec
* image for the measurement list for the next kernel.
*
* This function assumes that kexec_lock is held.
*/
void ima_add_kexec_buffer(struct kimage *image)
{
struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
.buf_min = 0, .buf_max = ULONG_MAX,
.top_down = true };
unsigned long binary_runtime_size;
/* use more understandable variable names than defined in kbuf */
void *kexec_buffer = NULL;
size_t kexec_buffer_size;
size_t kexec_segment_size;
int ret;
/*
* Reserve an extra half page of memory for additional measurements
* added during the kexec load.
*/
binary_runtime_size = ima_get_binary_runtime_size();
if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
kexec_segment_size = ULONG_MAX;
else
kexec_segment_size = ALIGN(ima_get_binary_runtime_size() +
PAGE_SIZE / 2, PAGE_SIZE);
if ((kexec_segment_size == ULONG_MAX) ||
((kexec_segment_size >> PAGE_SHIFT) > totalram_pages() / 2)) {
pr_err("Binary measurement list too large.\n");
return;
}
ret = ima_alloc_kexec_file_buf(kexec_segment_size);
if (ret < 0) {
pr_err("Not enough memory for the kexec measurement buffer.\n");
return;
}
ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer,
kexec_segment_size);
if (!kexec_buffer) {
pr_err("Not enough memory for the kexec measurement buffer.\n");
return;
}
kbuf.buffer = kexec_buffer;
kbuf.bufsz = kexec_buffer_size;
kbuf.memsz = kexec_segment_size;
ret = kexec_add_buffer(&kbuf);
if (ret) {
pr_err("Error passing over kexec measurement buffer.\n");
vfree(kexec_buffer);
return;
}
image->ima_buffer_addr = kbuf.mem;
image->ima_buffer_size = kexec_segment_size;
image->ima_buffer = kexec_buffer;
kexec_dprintk("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
kbuf.mem);
}
/*
* Called during kexec execute so that IMA can update the measurement list.
*/
static int ima_update_kexec_buffer(struct notifier_block *self,
unsigned long action, void *data)
{
return NOTIFY_OK;
}
static struct notifier_block update_buffer_nb = {
.notifier_call = ima_update_kexec_buffer,
.priority = INT_MIN
};
/*
* Create a mapping for the source pages that contain the IMA buffer
* so we can update it later.
*/
void ima_kexec_post_load(struct kimage *image)
{
if (ima_kexec_buffer) {
kimage_unmap_segment(ima_kexec_buffer);
ima_kexec_buffer = NULL;
}
if (!image->ima_buffer_addr)
return;
ima_kexec_buffer = kimage_map_segment(image,
image->ima_buffer_addr,
image->ima_buffer_size);
if (!ima_kexec_buffer) {
pr_err("Could not map measurements buffer.\n");
return;
}
if (!ima_kexec_update_registered) {
register_reboot_notifier(&update_buffer_nb);
ima_kexec_update_registered = true;
}
}
#endif /* IMA_KEXEC */
/*
* Restore the measurement list from the previous kernel.
*/
void __init ima_load_kexec_buffer(void)
{
void *kexec_buffer = NULL;
size_t kexec_buffer_size = 0;
int rc;
rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size);
switch (rc) {
case 0:
rc = ima_restore_measurement_list(kexec_buffer_size,
kexec_buffer);
if (rc != 0)
pr_err("Failed to restore the measurement list: %d\n",
rc);
ima_free_kexec_buffer();
break;
case -ENOTSUPP:
pr_debug("Restoring the measurement list not supported\n");
break;
case -ENOENT:
pr_debug("No measurement list to restore\n");
break;
default:
pr_debug("Error restoring the measurement list: %d\n", rc);
}
}
/*
* ima_validate_range - verify a physical buffer lies in addressable RAM
* @phys: physical start address of the buffer from previous kernel
* @size: size of the buffer
*
* On success return 0. On failure returns -EINVAL so callers can skip
* restoring.
*/
int ima_validate_range(phys_addr_t phys, size_t size)
{
unsigned long start_pfn, end_pfn;
phys_addr_t end_phys;
if (check_add_overflow(phys, (phys_addr_t)size - 1, &end_phys))
return -EINVAL;
start_pfn = PHYS_PFN(phys);
end_pfn = PHYS_PFN(end_phys);
#ifdef CONFIG_X86
if (!pfn_range_is_mapped(start_pfn, end_pfn))
#else
if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn))
#endif
{
pr_warn("IMA: previous kernel measurement buffer %pa (size 0x%zx) lies outside available memory\n",
&phys, size);
return -EINVAL;
}
return 0;
}