Files
linux-stable-mirror/tools/testing/selftests/mm/soft-dirty.c
T
Linus Torvalds 509d3f4584 Merge tag 'mm-nonmm-stable-2025-12-06-11-14' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull non-MM updates from Andrew Morton:

 - "panic: sys_info: Refactor and fix a potential issue" (Andy Shevchenko)
   fixes a build issue and does some cleanup in ib/sys_info.c

 - "Implement mul_u64_u64_div_u64_roundup()" (David Laight)
   enhances the 64-bit math code on behalf of a PWM driver and beefs up
   the test module for these library functions

 - "scripts/gdb/symbols: make BPF debug info available to GDB" (Ilya Leoshkevich)
   makes BPF symbol names, sizes, and line numbers available to the GDB
   debugger

 - "Enable hung_task and lockup cases to dump system info on demand" (Feng Tang)
   adds a sysctl which can be used to cause additional info dumping when
   the hung-task and lockup detectors fire

 - "lib/base64: add generic encoder/decoder, migrate users" (Kuan-Wei Chiu)
   adds a general base64 encoder/decoder to lib/ and migrates several
   users away from their private implementations

 - "rbree: inline rb_first() and rb_last()" (Eric Dumazet)
   makes TCP a little faster

 - "liveupdate: Rework KHO for in-kernel users" (Pasha Tatashin)
   reworks the KEXEC Handover interfaces in preparation for Live Update
   Orchestrator (LUO), and possibly for other future clients

 - "kho: simplify state machine and enable dynamic updates" (Pasha Tatashin)
   increases the flexibility of KEXEC Handover. Also preparation for LUO

 - "Live Update Orchestrator" (Pasha Tatashin)
   is a major new feature targeted at cloud environments. Quoting the
   cover letter:

      This series introduces the Live Update Orchestrator, a kernel
      subsystem designed to facilitate live kernel updates using a
      kexec-based reboot. This capability is critical for cloud
      environments, allowing hypervisors to be updated with minimal
      downtime for running virtual machines. LUO achieves this by
      preserving the state of selected resources, such as memory,
      devices and their dependencies, across the kernel transition.

      As a key feature, this series includes support for preserving
      memfd file descriptors, which allows critical in-memory data, such
      as guest RAM or any other large memory region, to be maintained in
      RAM across the kexec reboot.

   Mike Rappaport merits a mention here, for his extensive review and
   testing work.

 - "kexec: reorganize kexec and kdump sysfs" (Sourabh Jain)
   moves the kexec and kdump sysfs entries from /sys/kernel/ to
   /sys/kernel/kexec/ and adds back-compatibility symlinks which can
   hopefully be removed one day

 - "kho: fixes for vmalloc restoration" (Mike Rapoport)
   fixes a BUG which was being hit during KHO restoration of vmalloc()
   regions

* tag 'mm-nonmm-stable-2025-12-06-11-14' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (139 commits)
  calibrate: update header inclusion
  Reinstate "resource: avoid unnecessary lookups in find_next_iomem_res()"
  vmcoreinfo: track and log recoverable hardware errors
  kho: fix restoring of contiguous ranges of order-0 pages
  kho: kho_restore_vmalloc: fix initialization of pages array
  MAINTAINERS: TPM DEVICE DRIVER: update the W-tag
  init: replace simple_strtoul with kstrtoul to improve lpj_setup
  KHO: fix boot failure due to kmemleak access to non-PRESENT pages
  Documentation/ABI: new kexec and kdump sysfs interface
  Documentation/ABI: mark old kexec sysfs deprecated
  kexec: move sysfs entries to /sys/kernel/kexec
  test_kho: always print restore status
  kho: free chunks using free_page() instead of kfree()
  selftests/liveupdate: add kexec test for multiple and empty sessions
  selftests/liveupdate: add simple kexec-based selftest for LUO
  selftests/liveupdate: add userspace API selftests
  docs: add documentation for memfd preservation via LUO
  mm: memfd_luo: allow preserving memfd
  liveupdate: luo_file: add private argument to store runtime state
  mm: shmem: export some functions to internal.h
  ...
2025-12-06 14:01:20 -08:00

349 lines
8.9 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <fcntl.h>
#include <stdint.h>
#include <malloc.h>
#include <sys/mman.h>
#include "kselftest.h"
#include "vm_util.h"
#include "thp_settings.h"
#define PAGEMAP_FILE_PATH "/proc/self/pagemap"
#define TEST_ITERATIONS 10000
static void test_simple(int pagemap_fd, int pagesize)
{
int i;
char *map;
map = aligned_alloc(pagesize, pagesize);
if (!map)
ksft_exit_fail_msg("mmap failed\n");
clear_softdirty();
for (i = 0 ; i < TEST_ITERATIONS; i++) {
if (pagemap_is_softdirty(pagemap_fd, map) == 1) {
ksft_print_msg("dirty bit was 1, but should be 0 (i=%d)\n", i);
break;
}
clear_softdirty();
// Write something to the page to get the dirty bit enabled on the page
map[0]++;
if (pagemap_is_softdirty(pagemap_fd, map) == 0) {
ksft_print_msg("dirty bit was 0, but should be 1 (i=%d)\n", i);
break;
}
clear_softdirty();
}
free(map);
ksft_test_result(i == TEST_ITERATIONS, "Test %s\n", __func__);
}
static void test_vma_reuse(int pagemap_fd, int pagesize)
{
char *map, *map2;
map = mmap(NULL, pagesize, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANON), -1, 0);
if (map == MAP_FAILED)
ksft_exit_fail_msg("mmap failed");
// The kernel always marks new regions as soft dirty
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
"Test %s dirty bit of allocated page\n", __func__);
clear_softdirty();
munmap(map, pagesize);
map2 = mmap(NULL, pagesize, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANON), -1, 0);
if (map2 == MAP_FAILED)
ksft_exit_fail_msg("mmap failed");
// Dirty bit is set for new regions even if they are reused
if (map == map2)
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map2) == 1,
"Test %s dirty bit of reused address page\n", __func__);
else
ksft_test_result_skip("Test %s dirty bit of reused address page\n", __func__);
munmap(map2, pagesize);
}
static void test_hugepage(int pagemap_fd, int pagesize)
{
char *map;
int i, ret;
if (!thp_is_enabled()) {
ksft_test_result_skip("Transparent Hugepages not available\n");
return;
}
size_t hpage_len = read_pmd_pagesize();
if (!hpage_len)
ksft_exit_fail_msg("Reading PMD pagesize failed");
map = memalign(hpage_len, hpage_len);
if (!map)
ksft_exit_fail_msg("memalign failed\n");
ret = madvise(map, hpage_len, MADV_HUGEPAGE);
if (ret)
ksft_exit_fail_msg("madvise failed %d\n", ret);
for (i = 0; i < hpage_len; i++)
map[i] = (char)i;
if (check_huge_anon(map, 1, hpage_len)) {
ksft_test_result_pass("Test %s huge page allocation\n", __func__);
clear_softdirty();
for (i = 0 ; i < TEST_ITERATIONS ; i++) {
if (pagemap_is_softdirty(pagemap_fd, map) == 1) {
ksft_print_msg("dirty bit was 1, but should be 0 (i=%d)\n", i);
break;
}
clear_softdirty();
// Write something to the page to get the dirty bit enabled on the page
map[0]++;
if (pagemap_is_softdirty(pagemap_fd, map) == 0) {
ksft_print_msg("dirty bit was 0, but should be 1 (i=%d)\n", i);
break;
}
clear_softdirty();
}
ksft_test_result(i == TEST_ITERATIONS, "Test %s huge page dirty bit\n", __func__);
} else {
// hugepage allocation failed. skip these tests
ksft_test_result_skip("Test %s huge page allocation\n", __func__);
ksft_test_result_skip("Test %s huge page dirty bit\n", __func__);
}
free(map);
}
static void test_mprotect(int pagemap_fd, int pagesize, bool anon)
{
const char *type[] = {"file", "anon"};
const char *fname = "./soft-dirty-test-file";
int test_fd = 0;
char *map;
if (anon) {
map = mmap(NULL, pagesize, PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (!map)
ksft_exit_fail_msg("anon mmap failed\n");
} else {
test_fd = open(fname, O_RDWR | O_CREAT, 0664);
if (test_fd < 0) {
ksft_test_result_skip("Test %s open() file failed\n", __func__);
return;
}
unlink(fname);
ftruncate(test_fd, pagesize);
map = mmap(NULL, pagesize, PROT_READ|PROT_WRITE,
MAP_SHARED, test_fd, 0);
if (!map)
ksft_exit_fail_msg("file mmap failed\n");
}
*map = 1;
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
"Test %s-%s dirty bit of new written page\n",
__func__, type[anon]);
clear_softdirty();
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
"Test %s-%s soft-dirty clear after clear_refs\n",
__func__, type[anon]);
mprotect(map, pagesize, PROT_READ);
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
"Test %s-%s soft-dirty clear after marking RO\n",
__func__, type[anon]);
mprotect(map, pagesize, PROT_READ|PROT_WRITE);
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
"Test %s-%s soft-dirty clear after marking RW\n",
__func__, type[anon]);
*map = 2;
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
"Test %s-%s soft-dirty after rewritten\n",
__func__, type[anon]);
munmap(map, pagesize);
if (!anon)
close(test_fd);
}
static void test_merge(int pagemap_fd, int pagesize)
{
char *reserved, *map, *map2;
/*
* Reserve space for tests:
*
* ---padding to ---
* | avoid adj. |
* v merge v
* |---|---|---|---|---|
* | | 1 | 2 | 3 | |
* |---|---|---|---|---|
*/
reserved = mmap(NULL, 5 * pagesize, PROT_NONE,
MAP_ANON | MAP_PRIVATE, -1, 0);
if (reserved == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
munmap(reserved, 4 * pagesize);
/*
* Establish initial VMA:
*
* S/D
* |---|---|---|---|---|
* | | 1 | | | |
* |---|---|---|---|---|
*/
map = mmap(&reserved[pagesize], pagesize, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (map == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
/* This will clear VM_SOFTDIRTY too. */
clear_softdirty();
/*
* Now place a new mapping which will be marked VM_SOFTDIRTY. Away from
* map:
*
* - S/D
* |---|---|---|---|---|
* | | 1 | | 2 | |
* |---|---|---|---|---|
*/
map2 = mmap(&reserved[3 * pagesize], pagesize, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (map2 == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
/*
* Now remap it immediately adjacent to map, if the merge correctly
* propagates VM_SOFTDIRTY, we should then observe the VMA as a whole
* being marked soft-dirty:
*
* merge
* S/D
* |---|-------|---|---|
* | | 1 | | |
* |---|-------|---|---|
*/
map2 = mremap(map2, pagesize, pagesize, MREMAP_FIXED | MREMAP_MAYMOVE,
&reserved[2 * pagesize]);
if (map2 == MAP_FAILED)
ksft_exit_fail_msg("mremap failed\n");
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
"Test %s-anon soft-dirty after remap merge 1st pg\n",
__func__);
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map2) == 1,
"Test %s-anon soft-dirty after remap merge 2nd pg\n",
__func__);
munmap(map, 2 * pagesize);
/*
* Now establish another VMA:
*
* S/D
* |---|---|---|---|---|
* | | 1 | | | |
* |---|---|---|---|---|
*/
map = mmap(&reserved[pagesize], pagesize, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (map == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
/* Clear VM_SOFTDIRTY... */
clear_softdirty();
/* ...and establish incompatible adjacent VMA:
*
* - S/D
* |---|---|---|---|---|
* | | 1 | 2 | | |
* |---|---|---|---|---|
*/
map2 = mmap(&reserved[2 * pagesize], pagesize,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (map2 == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
/*
* Now mprotect() VMA 1 so it's compatible with 2 and therefore merges:
*
* merge
* S/D
* |---|-------|---|---|
* | | 1 | | |
* |---|-------|---|---|
*/
if (mprotect(map, pagesize, PROT_READ | PROT_WRITE | PROT_EXEC))
ksft_exit_fail_msg("mprotect failed\n");
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
"Test %s-anon soft-dirty after mprotect merge 1st pg\n",
__func__);
ksft_test_result(pagemap_is_softdirty(pagemap_fd, map2) == 1,
"Test %s-anon soft-dirty after mprotect merge 2nd pg\n",
__func__);
munmap(map, 2 * pagesize);
}
static void test_mprotect_anon(int pagemap_fd, int pagesize)
{
test_mprotect(pagemap_fd, pagesize, true);
}
static void test_mprotect_file(int pagemap_fd, int pagesize)
{
test_mprotect(pagemap_fd, pagesize, false);
}
int main(int argc, char **argv)
{
int pagemap_fd;
int pagesize;
ksft_print_header();
if (!softdirty_supported())
ksft_exit_skip("soft-dirty is not support\n");
ksft_set_plan(19);
pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY);
if (pagemap_fd < 0)
ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH);
pagesize = getpagesize();
test_simple(pagemap_fd, pagesize);
test_vma_reuse(pagemap_fd, pagesize);
test_hugepage(pagemap_fd, pagesize);
test_mprotect_anon(pagemap_fd, pagesize);
test_mprotect_file(pagemap_fd, pagesize);
test_merge(pagemap_fd, pagesize);
close(pagemap_fd);
ksft_finished();
}