Commit d451b075 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'linux_kselftest-next-6.9-rc1' of...

Merge tag 'linux_kselftest-next-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest

Pull kselftest update from Shuah Khan:

 - livepatch restructuring to move the module out of lib to be built as
   a out-of-tree modules during kselftest build. This makes it easier
   change, debug and rebuild the tests by running make on the
   selftests/livepatch directory, which is not currently possible since
   the modules on lib/livepatch are build and installed using the main
   makefile modules target.

 - livepatch restructuring fixes for problems found by kernel test
   robot. The change skips the test if kernel-devel isn't installed
   (default value of KDIR), or if KDIR variable passed doesn't exists.

 - resctrl test restructuring and new non-contiguous CBMs CAT test

 - new ktap_helpers to print diagnostic messages, pass/fail tests based
   on exit code, abort test, and finish the test.

 - a new test verify power supply properties.

 - a new ftrace to exercise function tracer across cpu hotplug.

 - timeout increase for mqueue test to allow the test to run on i3.metal
   AWS instances.

 - minor spelling corrections in several tests.

 - missing gitignore files and changes to existing gitignore files.

* tag 'linux_kselftest-next-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest: (57 commits)
  kselftest: Add basic test for probing the rust sample modules
  selftests: lib.mk: Do not process TEST_GEN_MODS_DIR
  selftests: livepatch: Avoid running the tests if kernel-devel is missing
  selftests: livepatch: Add initial .gitignore
  selftests/resctrl: Add non-contiguous CBMs CAT test
  selftests/resctrl: Add resource_info_file_exists()
  selftests/resctrl: Split validate_resctrl_feature_request()
  selftests/resctrl: Add a helper for the non-contiguous test
  selftests/resctrl: Add test groups and name L3 CAT test L3_CAT
  selftests: sched: Fix spelling mistake "hiearchy" -> "hierarchy"
  selftests/mqueue: Set timeout to 180 seconds
  selftests/ftrace: Add test to exercize function tracer across cpu hotplug
  selftest: ftrace: fix minor typo in log
  selftests: thermal: intel: workload_hint: add missing gitignore
  selftests: thermal: intel: power_floor: add missing gitignore
  selftests: uevent: add missing gitignore
  selftests: Add test to verify power supply properties
  selftests: ktap_helpers: Add a helper to finish the test
  selftests: ktap_helpers: Add a helper to abort the test
  selftests: ktap_helpers: Add helper to pass/fail test based on exit code
  ...
parents e8f897f4 5d94da7f
......@@ -245,6 +245,10 @@ Contributing new tests (details)
TEST_PROGS, TEST_GEN_PROGS mean it is the executable tested by
default.
TEST_GEN_MODS_DIR should be used by tests that require modules to be built
before the test starts. The variable will contain the name of the directory
containing the modules.
TEST_CUSTOM_PROGS should be used by tests that require custom build
rules and prevent common build rule use.
......
......@@ -12517,7 +12517,6 @@ F: arch/powerpc/include/asm/livepatch.h
F: include/linux/livepatch.h
F: kernel/livepatch/
F: kernel/module/livepatch.c
F: lib/livepatch/
F: samples/livepatch/
F: tools/testing/selftests/livepatch/
......@@ -17550,6 +17549,7 @@ F: Documentation/devicetree/bindings/power/supply/
F: drivers/power/supply/
F: include/linux/power/
F: include/linux/power_supply.h
F: tools/testing/selftests/power_supply/
POWERNV OPERATOR PANEL LCD DISPLAY DRIVER
M: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
......@@ -19120,6 +19120,7 @@ F: Documentation/rust/
F: rust/
F: samples/rust/
F: scripts/*rust*
F: tools/testing/selftests/rust/
K: \b(?i:rust)\b
RXRPC SOCKETS (AF_RXRPC)
......
......@@ -880,4 +880,3 @@ CONFIG_ATOMIC64_SELFTEST=y
CONFIG_STRING_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
CONFIG_TEST_LIVEPATCH=m
......@@ -808,4 +808,3 @@ CONFIG_KPROBES_SANITY_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
CONFIG_TEST_LIVEPATCH=m
......@@ -2858,28 +2858,6 @@ config TEST_MEMCAT_P
If unsure, say N.
config TEST_LIVEPATCH
tristate "Test livepatching"
default n
depends on DYNAMIC_DEBUG
depends on LIVEPATCH
depends on m
help
Test kernel livepatching features for correctness. The tests will
load test modules that will be livepatched in various scenarios.
To run all the livepatching tests:
make -C tools/testing/selftests TARGETS=livepatch run_tests
Alternatively, individual tests may be invoked:
tools/testing/selftests/livepatch/test-callbacks.sh
tools/testing/selftests/livepatch/test-livepatch.sh
tools/testing/selftests/livepatch/test-shadow-vars.sh
If unsure, say N.
config TEST_OBJAGG
tristate "Perform selftest on object aggreration manager"
default n
......
......@@ -134,8 +134,6 @@ endif
obj-$(CONFIG_TEST_FPU) += test_fpu.o
CFLAGS_test_fpu.o += $(FPU_CFLAGS)
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
# Some KUnit files (hooks.o) need to be built-in even when KUnit is a module,
# so we can't just use obj-$(CONFIG_KUNIT).
ifdef CONFIG_KUNIT
......
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for livepatch test code.
obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
test_klp_callbacks_demo.o \
test_klp_callbacks_demo2.o \
test_klp_callbacks_busy.o \
test_klp_callbacks_mod.o \
test_klp_livepatch.o \
test_klp_shadow_vars.o \
test_klp_state.o \
test_klp_state2.o \
test_klp_state3.o
......@@ -67,6 +67,7 @@ TARGETS += nsfs
TARGETS += perf_events
TARGETS += pidfd
TARGETS += pid_namespace
TARGETS += power_supply
TARGETS += powerpc
TARGETS += prctl
TARGETS += proc
......@@ -78,6 +79,7 @@ TARGETS += riscv
TARGETS += rlimits
TARGETS += rseq
TARGETS += rtc
TARGETS += rust
TARGETS += seccomp
TARGETS += sgx
TARGETS += sigaltstack
......@@ -236,6 +238,7 @@ ifdef INSTALL_PATH
install -m 744 kselftest/module.sh $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/runner.sh $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/ktap_helpers.sh $(INSTALL_PATH)/kselftest/
install -m 744 run_kselftest.sh $(INSTALL_PATH)/
rm -f $(TEST_LIST)
@ret=1; \
......
......@@ -4,7 +4,7 @@ ifneq ($(PY3),)
TEST_PROGS := test_unprobed_devices.sh
TEST_GEN_FILES := compatible_list
TEST_FILES := compatible_ignore_list ktap_helpers.sh
TEST_FILES := compatible_ignore_list
include ../lib.mk
......
......@@ -15,16 +15,12 @@
DIR="$(dirname $(readlink -f "$0"))"
source "${DIR}"/ktap_helpers.sh
source "${DIR}"/../kselftest/ktap_helpers.sh
PDT=/proc/device-tree/
COMPAT_LIST="${DIR}"/compatible_list
IGNORE_LIST="${DIR}"/compatible_ignore_list
KSFT_PASS=0
KSFT_FAIL=1
KSFT_SKIP=4
ktap_print_header
if [[ ! -d "${PDT}" ]]; then
......
......@@ -504,7 +504,7 @@ prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
if [ "$KTAP" = "1" ]; then
echo -n "# Totals:"
echo -n " pass:"`echo $PASSED_CASES | wc -w`
echo -n " faii:"`echo $FAILED_CASES | wc -w`
echo -n " fail:"`echo $FAILED_CASES | wc -w`
echo -n " xfail:"`echo $XFAILED_CASES | wc -w`
echo -n " xpass:0"
echo -n " skip:"`echo $UNTESTED_CASES $UNSUPPORTED_CASES | wc -w`
......
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: Test file and directory owership changes for eventfs
# description: Test file and directory ownership changes for eventfs
original_group=`stat -c "%g" .`
original_owner=`stat -c "%u" .`
......
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# description: ftrace - function trace across cpu hotplug
# requires: function:tracer
if ! which nproc ; then
nproc() {
ls -d /sys/devices/system/cpu/cpu[0-9]* | wc -l
}
fi
NP=`nproc`
if [ $NP -eq 1 ] ;then
echo "We cannot test cpu hotplug in UP environment"
exit_unresolved
fi
# Find online cpu
for i in /sys/devices/system/cpu/cpu[1-9]*; do
if [ -f $i/online ] && [ "$(cat $i/online)" = "1" ]; then
cpu=$i
break
fi
done
if [ -z "$cpu" ]; then
echo "We cannot test cpu hotplug with a single cpu online"
exit_unresolved
fi
echo 0 > tracing_on
echo > trace
: "Set $(basename $cpu) offline/online with function tracer enabled"
echo function > current_tracer
echo 1 > tracing_on
(echo 0 > $cpu/online)
(echo "forked"; sleep 1)
(echo 1 > $cpu/online)
echo 0 > tracing_on
echo nop > current_tracer
......@@ -40,7 +40,7 @@ grep "id: \(unknown_\|sys_\)" events/raw_syscalls/sys_exit/hist > /dev/null || \
reset_trigger
echo "Test histgram with log2 modifier"
echo "Test histogram with log2 modifier"
echo 'hist:keys=bytes_req.log2' > events/kmem/kmalloc/trigger
for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
......
......@@ -17,6 +17,8 @@
*
*****************************************************************************/
#define _GNU_SOURCE
#include <errno.h>
#include <limits.h>
#include <pthread.h>
......@@ -358,6 +360,7 @@ int unit_test(int broadcast, long lock, int third_party_owner, long timeout_ns)
int main(int argc, char *argv[])
{
const char *test_name;
int c, ret;
while ((c = getopt(argc, argv, "bchlot:v:")) != -1) {
......@@ -397,6 +400,14 @@ int main(int argc, char *argv[])
"\tArguments: broadcast=%d locked=%d owner=%d timeout=%ldns\n",
broadcast, locked, owner, timeout_ns);
ret = asprintf(&test_name,
"%s broadcast=%d locked=%d owner=%d timeout=%ldns",
TEST_NAME, broadcast, locked, owner, timeout_ns);
if (ret < 0) {
ksft_print_msg("Failed to generate test name\n");
test_name = TEST_NAME;
}
/*
* FIXME: unit_test is obsolete now that we parse options and the
* various style of runs are done by run.sh - simplify the code and move
......@@ -404,6 +415,6 @@ int main(int argc, char *argv[])
*/
ret = unit_test(broadcast, locked, owner, timeout_ns);
print_result(TEST_NAME, ret);
print_result(test_name, ret);
return ret;
}
......@@ -9,14 +9,27 @@ KTAP_CNT_PASS=0
KTAP_CNT_FAIL=0
KTAP_CNT_SKIP=0
KSFT_PASS=0
KSFT_FAIL=1
KSFT_XFAIL=2
KSFT_XPASS=3
KSFT_SKIP=4
KSFT_NUM_TESTS=0
ktap_print_header() {
echo "TAP version 13"
}
ktap_print_msg()
{
echo "#" $@
}
ktap_set_plan() {
num_tests="$1"
KSFT_NUM_TESTS="$1"
echo "1..$num_tests"
echo "1..$KSFT_NUM_TESTS"
}
ktap_skip_all() {
......@@ -65,6 +78,34 @@ ktap_test_fail() {
KTAP_CNT_FAIL=$((KTAP_CNT_FAIL+1))
}
ktap_test_result() {
description="$1"
shift
if $@; then
ktap_test_pass "$description"
else
ktap_test_fail "$description"
fi
}
ktap_exit_fail_msg() {
echo "Bail out! " $@
ktap_print_totals
exit "$KSFT_FAIL"
}
ktap_finished() {
ktap_print_totals
if [ $(("$KTAP_CNT_PASS" + "$KTAP_CNT_SKIP")) -eq "$KSFT_NUM_TESTS" ]; then
exit "$KSFT_PASS"
else
exit "$KSFT_FAIL"
fi
}
ktap_print_totals() {
echo "# Totals: pass:$KTAP_CNT_PASS fail:$KTAP_CNT_FAIL xfail:0 xpass:0 skip:$KTAP_CNT_SKIP error:0"
}
......@@ -58,7 +58,8 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) \
$(if $(TEST_GEN_MODS_DIR),gen_mods_dir)
define RUN_TESTS
BASE_DIR="$(selfdir)"; \
......@@ -71,8 +72,8 @@ endef
run_tests: all
ifdef building_out_of_srctree
@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)$(TEST_GEN_MODS_DIR)" != "X" ]; then \
rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(TEST_GEN_MODS_DIR) $(OUTPUT); \
fi
@if [ "X$(TEST_PROGS)" != "X" ]; then \
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
......@@ -84,11 +85,22 @@ else
@$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS))
endif
gen_mods_dir:
$(Q)$(MAKE) -C $(TEST_GEN_MODS_DIR)
clean_mods_dir:
$(Q)$(MAKE) -C $(TEST_GEN_MODS_DIR) clean
define INSTALL_SINGLE_RULE
$(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
$(if $(INSTALL_LIST),rsync -a --copy-unsafe-links $(INSTALL_LIST) $(INSTALL_PATH)/)
endef
define INSTALL_MODS_RULE
$(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH)/$(INSTALL_LIST))
$(if $(INSTALL_LIST),rsync -a --copy-unsafe-links $(INSTALL_LIST)/*.ko $(INSTALL_PATH)/$(INSTALL_LIST))
endef
define INSTALL_RULE
$(eval INSTALL_LIST = $(TEST_PROGS)) $(INSTALL_SINGLE_RULE)
$(eval INSTALL_LIST = $(TEST_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
......@@ -97,6 +109,7 @@ define INSTALL_RULE
$(eval INSTALL_LIST = $(TEST_CUSTOM_PROGS)) $(INSTALL_SINGLE_RULE)
$(eval INSTALL_LIST = $(TEST_GEN_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
$(eval INSTALL_LIST = $(TEST_GEN_FILES)) $(INSTALL_SINGLE_RULE)
$(eval INSTALL_LIST = $(notdir $(TEST_GEN_MODS_DIR))) $(INSTALL_MODS_RULE)
$(eval INSTALL_LIST = $(wildcard config settings)) $(INSTALL_SINGLE_RULE)
endef
......@@ -122,7 +135,7 @@ define CLEAN
$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
endef
clean:
clean: $(if $(TEST_GEN_MODS_DIR),clean_mods_dir)
$(CLEAN)
# Enables to extend CFLAGS and LDFLAGS from command line, e.g.
......@@ -153,4 +166,4 @@ $(OUTPUT)/%:%.S
$(LINK.S) $^ $(LDLIBS) -o $@
endif
.PHONY: run_tests all clean install emit_tests
.PHONY: run_tests all clean install emit_tests gen_mods_dir clean_mods_dir
# SPDX-License-Identifier: GPL-2.0
TEST_GEN_FILES := test_klp-call_getpid
TEST_GEN_MODS_DIR := test_modules
TEST_PROGS_EXTENDED := functions.sh
TEST_PROGS := \
test-livepatch.sh \
......@@ -7,7 +9,8 @@ TEST_PROGS := \
test-shadow-vars.sh \
test-state.sh \
test-ftrace.sh \
test-sysfs.sh
test-sysfs.sh \
test-syscall.sh
TEST_FILES := settings
......
......@@ -13,23 +13,36 @@ the message buffer for only the duration of each individual test.)
Config
------
Set these config options and their prerequisites:
Set CONFIG_LIVEPATCH=y option and it's prerequisites.
CONFIG_LIVEPATCH=y
CONFIG_TEST_LIVEPATCH=m
Building the tests
------------------
To only build the tests without running them, run:
% make -C tools/testing/selftests/livepatch
The command above will compile all test modules and test programs, making them
ready to be packaged if so desired.
Running the tests
-----------------
Test kernel modules are built as part of lib/ (make modules) and need to
be installed (make modules_install) as the test scripts will modprobe
them.
Test kernel modules are built before running the livepatch selftests. The
modules are located under test_modules directory, and are built as out-of-tree
modules. This is specially useful since the same sources can be built and
tested on systems with different kABI, ensuring they the tests are backwards
compatible. The modules will be loaded by the test scripts using insmod.
To run the livepatch selftests, from the top of the kernel source tree:
% make -C tools/testing/selftests TARGETS=livepatch run_tests
or
% make kselftest TARGETS=livepatch
Adding tests
------------
......
CONFIG_LIVEPATCH=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_TEST_LIVEPATCH=m
......@@ -34,6 +34,18 @@ function is_root() {
fi
}
# Check if we can compile the modules before loading them
function has_kdir() {
if [ -z "$KDIR" ]; then
KDIR="/lib/modules/$(uname -r)/build"
fi
if [ ! -d "$KDIR" ]; then
echo "skip all tests: KDIR ($KDIR) not available to compile modules."
exit $ksft_skip
fi
}
# die(msg) - game over, man
# msg - dying words
function die() {
......@@ -96,6 +108,7 @@ function cleanup() {
# the ftrace_enabled sysctl.
function setup_config() {
is_root
has_kdir
push_config
set_dynamic_debug
set_ftrace_enabled 1
......@@ -115,16 +128,14 @@ function loop_until() {
done
}
function assert_mod() {
local mod="$1"
modprobe --dry-run "$mod" &>/dev/null
}
function is_livepatch_mod() {
local mod="$1"
if [[ $(modinfo "$mod" | awk '/^livepatch:/{print $NF}') == "Y" ]]; then
if [[ ! -f "test_modules/$mod.ko" ]]; then
die "Can't find \"test_modules/$mod.ko\", try \"make\""
fi
if [[ $(modinfo "test_modules/$mod.ko" | awk '/^livepatch:/{print $NF}') == "Y" ]]; then
return 0
fi
......@@ -134,9 +145,9 @@ function is_livepatch_mod() {
function __load_mod() {
local mod="$1"; shift
local msg="% modprobe $mod $*"
local msg="% insmod test_modules/$mod.ko $*"
log "${msg%% }"
ret=$(modprobe "$mod" "$@" 2>&1)
ret=$(insmod "test_modules/$mod.ko" "$@" 2>&1)
if [[ "$ret" != "" ]]; then
die "$ret"
fi
......@@ -149,13 +160,10 @@ function __load_mod() {
# load_mod(modname, params) - load a kernel module
# modname - module name to load
# params - module parameters to pass to modprobe
# params - module parameters to pass to insmod
function load_mod() {
local mod="$1"; shift
assert_mod "$mod" ||
skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
is_livepatch_mod "$mod" &&
die "use load_lp() to load the livepatch module $mod"
......@@ -165,13 +173,10 @@ function load_mod() {
# load_lp_nowait(modname, params) - load a kernel module with a livepatch
# but do not wait on until the transition finishes
# modname - module name to load
# params - module parameters to pass to modprobe
# params - module parameters to pass to insmod
function load_lp_nowait() {
local mod="$1"; shift
assert_mod "$mod" ||
skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
is_livepatch_mod "$mod" ||
die "module $mod is not a livepatch"
......@@ -184,7 +189,7 @@ function load_lp_nowait() {
# load_lp(modname, params) - load a kernel module with a livepatch
# modname - module name to load
# params - module parameters to pass to modprobe
# params - module parameters to pass to insmod
function load_lp() {
local mod="$1"; shift
......@@ -197,13 +202,13 @@ function load_lp() {
# load_failing_mod(modname, params) - load a kernel module, expect to fail
# modname - module name to load
# params - module parameters to pass to modprobe
# params - module parameters to pass to insmod
function load_failing_mod() {
local mod="$1"; shift
local msg="% modprobe $mod $*"
local msg="% insmod test_modules/$mod.ko $*"
log "${msg%% }"
ret=$(modprobe "$mod" "$@" 2>&1)
ret=$(insmod "test_modules/$mod.ko" "$@" 2>&1)
if [[ "$ret" == "" ]]; then
die "$mod unexpectedly loaded"
fi
......
......@@ -34,9 +34,9 @@ disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
unload_mod $MOD_TARGET
check_result "% modprobe $MOD_TARGET
check_result "% insmod test_modules/$MOD_TARGET.ko
$MOD_TARGET: ${MOD_TARGET}_init
% modprobe $MOD_LIVEPATCH
% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -81,7 +81,7 @@ disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
unload_mod $MOD_TARGET
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -89,7 +89,7 @@ livepatch: '$MOD_LIVEPATCH': starting patching transition
livepatch: '$MOD_LIVEPATCH': completing patching transition
$MOD_LIVEPATCH: post_patch_callback: vmlinux
livepatch: '$MOD_LIVEPATCH': patching complete
% modprobe $MOD_TARGET
% insmod test_modules/$MOD_TARGET.ko
livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
......@@ -129,9 +129,9 @@ unload_mod $MOD_TARGET
disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
check_result "% modprobe $MOD_TARGET
check_result "% insmod test_modules/$MOD_TARGET.ko
$MOD_TARGET: ${MOD_TARGET}_init
% modprobe $MOD_LIVEPATCH
% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -177,7 +177,7 @@ unload_mod $MOD_TARGET
disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -185,7 +185,7 @@ livepatch: '$MOD_LIVEPATCH': starting patching transition
livepatch: '$MOD_LIVEPATCH': completing patching transition
$MOD_LIVEPATCH: post_patch_callback: vmlinux
livepatch: '$MOD_LIVEPATCH': patching complete
% modprobe $MOD_TARGET
% insmod test_modules/$MOD_TARGET.ko
livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
......@@ -219,7 +219,7 @@ load_lp $MOD_LIVEPATCH
disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -254,9 +254,9 @@ load_mod $MOD_TARGET
load_failing_mod $MOD_LIVEPATCH pre_patch_ret=-19
unload_mod $MOD_TARGET
check_result "% modprobe $MOD_TARGET
check_result "% insmod test_modules/$MOD_TARGET.ko
$MOD_TARGET: ${MOD_TARGET}_init
% modprobe $MOD_LIVEPATCH pre_patch_ret=-19
% insmod test_modules/$MOD_LIVEPATCH.ko pre_patch_ret=-19
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
test_klp_callbacks_demo: pre_patch_callback: vmlinux
......@@ -265,7 +265,7 @@ livepatch: failed to enable patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': canceling patching transition, going to unpatch
livepatch: '$MOD_LIVEPATCH': completing unpatching transition
livepatch: '$MOD_LIVEPATCH': unpatching complete
modprobe: ERROR: could not insert '$MOD_LIVEPATCH': No such device
insmod: ERROR: could not insert module test_modules/$MOD_LIVEPATCH.ko: No such device
% rmmod $MOD_TARGET
$MOD_TARGET: ${MOD_TARGET}_exit"
......@@ -295,7 +295,7 @@ load_failing_mod $MOD_TARGET
disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -304,12 +304,12 @@ livepatch: '$MOD_LIVEPATCH': completing patching transition
$MOD_LIVEPATCH: post_patch_callback: vmlinux
livepatch: '$MOD_LIVEPATCH': patching complete
% echo -19 > /sys/module/$MOD_LIVEPATCH/parameters/pre_patch_ret
% modprobe $MOD_TARGET
% insmod test_modules/$MOD_TARGET.ko
livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
livepatch: pre-patch callback failed for object '$MOD_TARGET'
livepatch: patch '$MOD_LIVEPATCH' failed for module '$MOD_TARGET', refusing to load module '$MOD_TARGET'
modprobe: ERROR: could not insert '$MOD_TARGET': No such device
insmod: ERROR: could not insert module test_modules/$MOD_TARGET.ko: No such device
% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
......@@ -340,11 +340,11 @@ disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
unload_mod $MOD_TARGET_BUSY
check_result "% modprobe $MOD_TARGET_BUSY block_transition=N
check_result "% insmod test_modules/$MOD_TARGET_BUSY.ko block_transition=N
$MOD_TARGET_BUSY: ${MOD_TARGET_BUSY}_init
$MOD_TARGET_BUSY: busymod_work_func enter
$MOD_TARGET_BUSY: busymod_work_func exit
% modprobe $MOD_LIVEPATCH
% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -354,7 +354,7 @@ livepatch: '$MOD_LIVEPATCH': completing patching transition
$MOD_LIVEPATCH: post_patch_callback: vmlinux
$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
livepatch: '$MOD_LIVEPATCH': patching complete
% modprobe $MOD_TARGET
% insmod test_modules/$MOD_TARGET.ko
livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
......@@ -421,16 +421,16 @@ disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
unload_mod $MOD_TARGET_BUSY
check_result "% modprobe $MOD_TARGET_BUSY block_transition=Y
check_result "% insmod test_modules/$MOD_TARGET_BUSY.ko block_transition=Y
$MOD_TARGET_BUSY: ${MOD_TARGET_BUSY}_init
$MOD_TARGET_BUSY: busymod_work_func enter
% modprobe $MOD_LIVEPATCH
% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
livepatch: '$MOD_LIVEPATCH': starting patching transition
% modprobe $MOD_TARGET
% insmod test_modules/$MOD_TARGET.ko
livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
$MOD_TARGET: ${MOD_TARGET}_init
......@@ -467,7 +467,7 @@ disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH2
unload_lp $MOD_LIVEPATCH
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -475,7 +475,7 @@ livepatch: '$MOD_LIVEPATCH': starting patching transition
livepatch: '$MOD_LIVEPATCH': completing patching transition
$MOD_LIVEPATCH: post_patch_callback: vmlinux
livepatch: '$MOD_LIVEPATCH': patching complete
% modprobe $MOD_LIVEPATCH2
% insmod test_modules/$MOD_LIVEPATCH2.ko
livepatch: enabling patch '$MOD_LIVEPATCH2'
livepatch: '$MOD_LIVEPATCH2': initializing patching transition
$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
......@@ -523,7 +523,7 @@ disable_lp $MOD_LIVEPATCH2
unload_lp $MOD_LIVEPATCH2
unload_lp $MOD_LIVEPATCH
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -531,7 +531,7 @@ livepatch: '$MOD_LIVEPATCH': starting patching transition
livepatch: '$MOD_LIVEPATCH': completing patching transition
$MOD_LIVEPATCH: post_patch_callback: vmlinux
livepatch: '$MOD_LIVEPATCH': patching complete
% modprobe $MOD_LIVEPATCH2 replace=1
% insmod test_modules/$MOD_LIVEPATCH2.ko replace=1
livepatch: enabling patch '$MOD_LIVEPATCH2'
livepatch: '$MOD_LIVEPATCH2': initializing patching transition
$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
......
......@@ -35,7 +35,7 @@ disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
check_result "livepatch: kernel.ftrace_enabled = 0
% modprobe $MOD_LIVEPATCH
% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
livepatch: failed to register ftrace handler for function 'cmdline_proc_show' (-16)
......@@ -44,9 +44,9 @@ livepatch: failed to enable patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': canceling patching transition, going to unpatch
livepatch: '$MOD_LIVEPATCH': completing unpatching transition
livepatch: '$MOD_LIVEPATCH': unpatching complete
modprobe: ERROR: could not insert '$MOD_LIVEPATCH': Device or resource busy
insmod: ERROR: could not insert module test_modules/$MOD_LIVEPATCH.ko: Device or resource busy
livepatch: kernel.ftrace_enabled = 1
% modprobe $MOD_LIVEPATCH
% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
livepatch: '$MOD_LIVEPATCH': starting patching transition
......
......@@ -31,7 +31,7 @@ if [[ "$(cat /proc/cmdline)" == "$MOD_LIVEPATCH: this has been live patched" ]]
die "livepatch kselftest(s) failed"
fi
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
livepatch: '$MOD_LIVEPATCH': starting patching transition
......@@ -75,14 +75,14 @@ unload_lp $MOD_LIVEPATCH
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
livepatch: '$MOD_LIVEPATCH': starting patching transition
livepatch: '$MOD_LIVEPATCH': completing patching transition
livepatch: '$MOD_LIVEPATCH': patching complete
$MOD_LIVEPATCH: this has been live patched
% modprobe $MOD_REPLACE replace=0
% insmod test_modules/$MOD_REPLACE.ko replace=0
livepatch: enabling patch '$MOD_REPLACE'
livepatch: '$MOD_REPLACE': initializing patching transition
livepatch: '$MOD_REPLACE': starting patching transition
......@@ -135,14 +135,14 @@ unload_lp $MOD_REPLACE
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
livepatch: '$MOD_LIVEPATCH': starting patching transition
livepatch: '$MOD_LIVEPATCH': completing patching transition
livepatch: '$MOD_LIVEPATCH': patching complete
$MOD_LIVEPATCH: this has been live patched
% modprobe $MOD_REPLACE replace=1
% insmod test_modules/$MOD_REPLACE.ko replace=1
livepatch: enabling patch '$MOD_REPLACE'
livepatch: '$MOD_REPLACE': initializing patching transition
livepatch: '$MOD_REPLACE': starting patching transition
......
......@@ -16,7 +16,7 @@ start_test "basic shadow variable API"
load_mod $MOD_TEST
unload_mod $MOD_TEST
check_result "% modprobe $MOD_TEST
check_result "% insmod test_modules/$MOD_TEST.ko
$MOD_TEST: klp_shadow_get(obj=PTR1, id=0x1234) = PTR0
$MOD_TEST: got expected NULL result
$MOD_TEST: shadow_ctor: PTR3 -> PTR2
......
......@@ -19,7 +19,7 @@ load_lp $MOD_LIVEPATCH
disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -51,7 +51,7 @@ unload_lp $MOD_LIVEPATCH
disable_lp $MOD_LIVEPATCH2
unload_lp $MOD_LIVEPATCH2
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
$MOD_LIVEPATCH: pre_patch_callback: vmlinux
......@@ -61,7 +61,7 @@ livepatch: '$MOD_LIVEPATCH': completing patching transition
$MOD_LIVEPATCH: post_patch_callback: vmlinux
$MOD_LIVEPATCH: fix_console_loglevel: fixing console_loglevel
livepatch: '$MOD_LIVEPATCH': patching complete
% modprobe $MOD_LIVEPATCH2
% insmod test_modules/$MOD_LIVEPATCH2.ko
livepatch: enabling patch '$MOD_LIVEPATCH2'
livepatch: '$MOD_LIVEPATCH2': initializing patching transition
$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
......@@ -96,7 +96,7 @@ disable_lp $MOD_LIVEPATCH2
unload_lp $MOD_LIVEPATCH2
unload_lp $MOD_LIVEPATCH3
check_result "% modprobe $MOD_LIVEPATCH2
check_result "% insmod test_modules/$MOD_LIVEPATCH2.ko
livepatch: enabling patch '$MOD_LIVEPATCH2'
livepatch: '$MOD_LIVEPATCH2': initializing patching transition
$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
......@@ -106,7 +106,7 @@ livepatch: '$MOD_LIVEPATCH2': completing patching transition
$MOD_LIVEPATCH2: post_patch_callback: vmlinux
$MOD_LIVEPATCH2: fix_console_loglevel: fixing console_loglevel
livepatch: '$MOD_LIVEPATCH2': patching complete
% modprobe $MOD_LIVEPATCH3
% insmod test_modules/$MOD_LIVEPATCH3.ko
livepatch: enabling patch '$MOD_LIVEPATCH3'
livepatch: '$MOD_LIVEPATCH3': initializing patching transition
$MOD_LIVEPATCH3: pre_patch_callback: vmlinux
......@@ -117,7 +117,7 @@ $MOD_LIVEPATCH3: post_patch_callback: vmlinux
$MOD_LIVEPATCH3: fix_console_loglevel: taking over the console_loglevel change
livepatch: '$MOD_LIVEPATCH3': patching complete
% rmmod $MOD_LIVEPATCH2
% modprobe $MOD_LIVEPATCH2
% insmod test_modules/$MOD_LIVEPATCH2.ko
livepatch: enabling patch '$MOD_LIVEPATCH2'
livepatch: '$MOD_LIVEPATCH2': initializing patching transition
$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
......@@ -149,7 +149,7 @@ load_failing_mod $MOD_LIVEPATCH
disable_lp $MOD_LIVEPATCH2
unload_lp $MOD_LIVEPATCH2
check_result "% modprobe $MOD_LIVEPATCH2
check_result "% insmod test_modules/$MOD_LIVEPATCH2.ko
livepatch: enabling patch '$MOD_LIVEPATCH2'
livepatch: '$MOD_LIVEPATCH2': initializing patching transition
$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
......@@ -159,9 +159,9 @@ livepatch: '$MOD_LIVEPATCH2': completing patching transition
$MOD_LIVEPATCH2: post_patch_callback: vmlinux
$MOD_LIVEPATCH2: fix_console_loglevel: fixing console_loglevel
livepatch: '$MOD_LIVEPATCH2': patching complete
% modprobe $MOD_LIVEPATCH
% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: Livepatch patch ($MOD_LIVEPATCH) is not compatible with the already installed livepatches.
modprobe: ERROR: could not insert '$MOD_LIVEPATCH': Invalid argument
insmod: ERROR: could not insert module test_modules/$MOD_LIVEPATCH.ko: Invalid parameters
% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH2/enabled
livepatch: '$MOD_LIVEPATCH2': initializing unpatching transition
$MOD_LIVEPATCH2: pre_unpatch_callback: vmlinux
......
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2023 SUSE
# Author: Marcos Paulo de Souza <mpdesouza@suse.com>
. $(dirname $0)/functions.sh
MOD_SYSCALL=test_klp_syscall
setup_config
# - Start _NRPROC processes calling getpid and load a livepatch to patch the
# getpid syscall. Check if all the processes transitioned to the livepatched
# state.
start_test "patch getpid syscall while being heavily hammered"
for i in $(seq 1 $(getconf _NPROCESSORS_ONLN)); do
./test_klp-call_getpid &
pids[$i]="$!"
done
pid_list=$(echo ${pids[@]} | tr ' ' ',')
load_lp $MOD_SYSCALL klp_pids=$pid_list
# wait for all tasks to transition to patched state
loop_until 'grep -q '^0$' /sys/kernel/test_klp_syscall/npids'
pending_pids=$(cat /sys/kernel/test_klp_syscall/npids)
log "$MOD_SYSCALL: Remaining not livepatched processes: $pending_pids"
for pid in ${pids[@]}; do
kill $pid || true
done
disable_lp $MOD_SYSCALL
unload_lp $MOD_SYSCALL
check_result "% insmod test_modules/$MOD_SYSCALL.ko klp_pids=$pid_list
livepatch: enabling patch '$MOD_SYSCALL'
livepatch: '$MOD_SYSCALL': initializing patching transition
livepatch: '$MOD_SYSCALL': starting patching transition
livepatch: '$MOD_SYSCALL': completing patching transition
livepatch: '$MOD_SYSCALL': patching complete
$MOD_SYSCALL: Remaining not livepatched processes: 0
% echo 0 > /sys/kernel/livepatch/$MOD_SYSCALL/enabled
livepatch: '$MOD_SYSCALL': initializing unpatching transition
livepatch: '$MOD_SYSCALL': starting unpatching transition
livepatch: '$MOD_SYSCALL': completing unpatching transition
livepatch: '$MOD_SYSCALL': unpatching complete
% rmmod $MOD_SYSCALL"
exit 0
......@@ -27,7 +27,7 @@ disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
check_result "% modprobe $MOD_LIVEPATCH
check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
livepatch: enabling patch '$MOD_LIVEPATCH'
livepatch: '$MOD_LIVEPATCH': initializing patching transition
livepatch: '$MOD_LIVEPATCH': starting patching transition
......@@ -56,7 +56,7 @@ check_sysfs_value "$MOD_LIVEPATCH" "$MOD_TARGET/patched" "0"
disable_lp $MOD_LIVEPATCH
unload_lp $MOD_LIVEPATCH
check_result "% modprobe test_klp_callbacks_demo
check_result "% insmod test_modules/test_klp_callbacks_demo.ko
livepatch: enabling patch 'test_klp_callbacks_demo'
livepatch: 'test_klp_callbacks_demo': initializing patching transition
test_klp_callbacks_demo: pre_patch_callback: vmlinux
......@@ -64,7 +64,7 @@ livepatch: 'test_klp_callbacks_demo': starting patching transition
livepatch: 'test_klp_callbacks_demo': completing patching transition
test_klp_callbacks_demo: post_patch_callback: vmlinux
livepatch: 'test_klp_callbacks_demo': patching complete
% modprobe test_klp_callbacks_mod
% insmod test_modules/test_klp_callbacks_mod.ko
livepatch: applying patch 'test_klp_callbacks_demo' to loading module 'test_klp_callbacks_mod'
test_klp_callbacks_demo: pre_patch_callback: test_klp_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init
test_klp_callbacks_demo: post_patch_callback: test_klp_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2023 SUSE
* Authors: Libor Pechacek <lpechacek@suse.cz>
* Marcos Paulo de Souza <mpdesouza@suse.com>
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <signal.h>
static int stop;
static int sig_int;
void hup_handler(int signum)
{
stop = 1;
}
void int_handler(int signum)
{
stop = 1;
sig_int = 1;
}
int main(int argc, char *argv[])
{
long count = 0;
signal(SIGHUP, &hup_handler);
signal(SIGINT, &int_handler);
while (!stop) {
(void)syscall(SYS_getpid);
count++;
}
if (sig_int)
printf("%ld iterations done\n", count);
return 0;
}
TESTMODS_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
KDIR ?= /lib/modules/$(shell uname -r)/build
obj-m += test_klp_atomic_replace.o \
test_klp_callbacks_busy.o \
test_klp_callbacks_demo.o \
test_klp_callbacks_demo2.o \
test_klp_callbacks_mod.o \
test_klp_livepatch.o \
test_klp_state.o \
test_klp_state2.o \
test_klp_state3.o \
test_klp_shadow_vars.o \
test_klp_syscall.o
# Ensure that KDIR exists, otherwise skip the compilation
modules:
ifneq ("$(wildcard $(KDIR))", "")
$(Q)$(MAKE) -C $(KDIR) modules KBUILD_EXTMOD=$(TESTMODS_DIR)
endif
# Ensure that KDIR exists, otherwise skip the clean target
clean:
ifneq ("$(wildcard $(KDIR))", "")
$(Q)$(MAKE) -C $(KDIR) clean KBUILD_EXTMOD=$(TESTMODS_DIR)
endif
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2023 SUSE
* Authors: Libor Pechacek <lpechacek@suse.cz>
* Nicolai Stange <nstange@suse.de>
* Marcos Paulo de Souza <mpdesouza@suse.com>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/livepatch.h>
#if defined(__x86_64__)
#define FN_PREFIX __x64_
#elif defined(__s390x__)
#define FN_PREFIX __s390x_
#elif defined(__aarch64__)
#define FN_PREFIX __arm64_
#else
/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER */
#define FN_PREFIX
#endif
/* Protects klp_pids */
static DEFINE_MUTEX(kpid_mutex);
static unsigned int npids, npids_pending;
static int klp_pids[NR_CPUS];
module_param_array(klp_pids, int, &npids_pending, 0);
MODULE_PARM_DESC(klp_pids, "Array of pids to be transitioned to livepatched state.");
static ssize_t npids_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", npids_pending);
}
static struct kobj_attribute klp_attr = __ATTR_RO(npids);
static struct kobject *klp_kobj;
static asmlinkage long lp_sys_getpid(void)
{
int i;
mutex_lock(&kpid_mutex);
if (npids_pending > 0) {
for (i = 0; i < npids; i++) {
if (current->pid == klp_pids[i]) {
klp_pids[i] = 0;
npids_pending--;
break;
}
}
}
mutex_unlock(&kpid_mutex);
return task_tgid_vnr(current);
}
static struct klp_func vmlinux_funcs[] = {
{
.old_name = __stringify(FN_PREFIX) "sys_getpid",
.new_func = lp_sys_getpid,
}, {}
};
static struct klp_object objs[] = {
{
/* name being NULL means vmlinux */
.funcs = vmlinux_funcs,
}, {}
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
};
static int livepatch_init(void)
{
int ret;
klp_kobj = kobject_create_and_add("test_klp_syscall", kernel_kobj);
if (!klp_kobj)
return -ENOMEM;
ret = sysfs_create_file(klp_kobj, &klp_attr.attr);
if (ret) {
kobject_put(klp_kobj);
return ret;
}
/*
* Save the number pids to transition to livepatched state before the
* number of pending pids is decremented.
*/
npids = npids_pending;
return klp_enable_patch(&patch);
}
static void livepatch_exit(void)
{
kobject_put(klp_kobj);
}
module_init(livepatch_init);
module_exit(livepatch_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
MODULE_AUTHOR("Libor Pechacek <lpechacek@suse.cz>");
MODULE_AUTHOR("Nicolai Stange <nstange@suse.de>");
MODULE_AUTHOR("Marcos Paulo de Souza <mpdesouza@suse.com>");
MODULE_DESCRIPTION("Livepatch test: syscall transition");
TEST_PROGS := test_power_supply_properties.sh
TEST_FILES := helpers.sh
include ../lib.mk
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) 2022, 2024 Collabora Ltd
SYSFS_SUPPLIES=/sys/class/power_supply
calc() {
awk "BEGIN { print $* }";
}
test_sysfs_prop() {
PROP="$1"
VALUE="$2" # optional
PROP_PATH="$SYSFS_SUPPLIES"/"$DEVNAME"/"$PROP"
TEST_NAME="$DEVNAME".sysfs."$PROP"
if [ -z "$VALUE" ]; then
ktap_test_result "$TEST_NAME" [ -f "$PROP_PATH" ]
else
ktap_test_result "$TEST_NAME" grep -q "$VALUE" "$PROP_PATH"
fi
}
to_human_readable_unit() {
VALUE="$1"
UNIT="$2"
case "$VALUE" in
*[!0-9]* ) return ;; # Not a number
esac
if [ "$UNIT" = "uA" ]; then
new_unit="mA"
div=1000
elif [ "$UNIT" = "uV" ]; then
new_unit="V"
div=1000000
elif [ "$UNIT" = "uAh" ]; then
new_unit="Ah"
div=1000000
elif [ "$UNIT" = "uW" ]; then
new_unit="mW"
div=1000
elif [ "$UNIT" = "uWh" ]; then
new_unit="Wh"
div=1000000
else
return
fi
value_converted=$(calc "$VALUE"/"$div")
echo "$value_converted" "$new_unit"
}
_check_sysfs_prop_available() {
PROP=$1
PROP_PATH="$SYSFS_SUPPLIES"/"$DEVNAME"/"$PROP"
TEST_NAME="$DEVNAME".sysfs."$PROP"
if [ ! -e "$PROP_PATH" ] ; then
ktap_test_skip "$TEST_NAME"
return 1
fi
if ! cat "$PROP_PATH" >/dev/null; then
ktap_print_msg "Failed to read"
ktap_test_fail "$TEST_NAME"
return 1
fi
return 0
}
test_sysfs_prop_optional() {
PROP=$1
UNIT=$2 # optional
TEST_NAME="$DEVNAME".sysfs."$PROP"
_check_sysfs_prop_available "$PROP" || return
DATA=$(cat "$SYSFS_SUPPLIES"/"$DEVNAME"/"$PROP")
ktap_print_msg "Reported: '$DATA' $UNIT ($(to_human_readable_unit "$DATA" "$UNIT"))"
ktap_test_pass "$TEST_NAME"
}
test_sysfs_prop_optional_range() {
PROP=$1
MIN=$2
MAX=$3
UNIT=$4 # optional
TEST_NAME="$DEVNAME".sysfs."$PROP"
_check_sysfs_prop_available "$PROP" || return
DATA=$(cat "$SYSFS_SUPPLIES"/"$DEVNAME"/"$PROP")
if [ "$DATA" -lt "$MIN" ] || [ "$DATA" -gt "$MAX" ]; then
ktap_print_msg "'$DATA' is out of range (min=$MIN, max=$MAX)"
ktap_test_fail "$TEST_NAME"
else
ktap_print_msg "Reported: '$DATA' $UNIT ($(to_human_readable_unit "$DATA" "$UNIT"))"
ktap_test_pass "$TEST_NAME"
fi
}
test_sysfs_prop_optional_list() {
PROP=$1
LIST=$2
TEST_NAME="$DEVNAME".sysfs."$PROP"
_check_sysfs_prop_available "$PROP" || return
DATA=$(cat "$SYSFS_SUPPLIES"/"$DEVNAME"/"$PROP")
valid=0
OLDIFS=$IFS
IFS=","
for item in $LIST; do
if [ "$DATA" = "$item" ]; then
valid=1
break
fi
done
if [ "$valid" -eq 1 ]; then
ktap_print_msg "Reported: '$DATA'"
ktap_test_pass "$TEST_NAME"
else
ktap_print_msg "'$DATA' is not a valid value for this property"
ktap_test_fail "$TEST_NAME"
fi
IFS=$OLDIFS
}
dump_file() {
FILE="$1"
while read -r line; do
ktap_print_msg "$line"
done < "$FILE"
}
__test_uevent_prop() {
PROP="$1"
OPTIONAL="$2"
VALUE="$3" # optional
UEVENT_PATH="$SYSFS_SUPPLIES"/"$DEVNAME"/uevent
TEST_NAME="$DEVNAME".uevent."$PROP"
if ! grep -q "POWER_SUPPLY_$PROP=" "$UEVENT_PATH"; then
if [ "$OPTIONAL" -eq 1 ]; then
ktap_test_skip "$TEST_NAME"
else
ktap_print_msg "Missing property"
ktap_test_fail "$TEST_NAME"
fi
return
fi
if ! grep -q "POWER_SUPPLY_$PROP=$VALUE" "$UEVENT_PATH"; then
ktap_print_msg "Invalid value for uevent property, dumping..."
dump_file "$UEVENT_PATH"
ktap_test_fail "$TEST_NAME"
else
ktap_test_pass "$TEST_NAME"
fi
}
test_uevent_prop() {
__test_uevent_prop "$1" 0 "$2"
}
test_uevent_prop_optional() {
__test_uevent_prop "$1" 1 "$2"
}
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) 2022, 2024 Collabora Ltd
#
# This test validates the power supply uAPI: namely, the files in sysfs and
# lines in uevent that expose the power supply properties.
#
# By default all power supplies available are tested. Optionally the name of a
# power supply can be passed as a parameter to test only that one instead.
DIR="$(dirname "$(readlink -f "$0")")"
. "${DIR}"/../kselftest/ktap_helpers.sh
. "${DIR}"/helpers.sh
count_tests() {
SUPPLIES=$1
# This needs to be updated every time a new test is added.
NUM_TESTS=33
total_tests=0
for i in $SUPPLIES; do
total_tests=$(("$total_tests" + "$NUM_TESTS"))
done
echo "$total_tests"
}
ktap_print_header
SYSFS_SUPPLIES=/sys/class/power_supply/
if [ $# -eq 0 ]; then
supplies=$(ls "$SYSFS_SUPPLIES")
else
supplies=$1
fi
ktap_set_plan "$(count_tests "$supplies")"
for DEVNAME in $supplies; do
ktap_print_msg Testing device "$DEVNAME"
if [ ! -d "$SYSFS_SUPPLIES"/"$DEVNAME" ]; then
ktap_test_fail "$DEVNAME".exists
ktap_exit_fail_msg Device does not exist
fi
ktap_test_pass "$DEVNAME".exists
test_uevent_prop NAME "$DEVNAME"
test_sysfs_prop type
SUPPLY_TYPE=$(cat "$SYSFS_SUPPLIES"/"$DEVNAME"/type)
# This fails on kernels < 5.8 (needs 2ad3d74e3c69f)
test_uevent_prop TYPE "$SUPPLY_TYPE"
test_sysfs_prop_optional usb_type
test_sysfs_prop_optional_range online 0 2
test_sysfs_prop_optional_range present 0 1
test_sysfs_prop_optional_list status "Unknown","Charging","Discharging","Not charging","Full"
# Capacity is reported as percentage, thus any value less than 0 and
# greater than 100 are not allowed.
test_sysfs_prop_optional_range capacity 0 100 "%"
test_sysfs_prop_optional_list capacity_level "Unknown","Critical","Low","Normal","High","Full"
test_sysfs_prop_optional model_name
test_sysfs_prop_optional manufacturer
test_sysfs_prop_optional serial_number
test_sysfs_prop_optional_list technology "Unknown","NiMH","Li-ion","Li-poly","LiFe","NiCd","LiMn"
test_sysfs_prop_optional cycle_count
test_sysfs_prop_optional_list scope "Unknown","System","Device"
test_sysfs_prop_optional input_current_limit "uA"
test_sysfs_prop_optional input_voltage_limit "uV"
# Technically the power-supply class does not limit reported values.
# E.g. one could expose an RTC backup-battery, which goes below 1.5V or
# an electric vehicle battery with over 300V. But most devices do not
# have a step-up capable regulator behind the battery and operate with
# voltages considered safe to touch, so we limit the allowed range to
# 1.8V-60V to catch drivers reporting incorrectly scaled values. E.g. a
# common mistake is reporting data in mV instead of µV.
test_sysfs_prop_optional_range voltage_now 1800000 60000000 "uV"
test_sysfs_prop_optional_range voltage_min 1800000 60000000 "uV"
test_sysfs_prop_optional_range voltage_max 1800000 60000000 "uV"
test_sysfs_prop_optional_range voltage_min_design 1800000 60000000 "uV"
test_sysfs_prop_optional_range voltage_max_design 1800000 60000000 "uV"
# current based systems
test_sysfs_prop_optional current_now "uA"
test_sysfs_prop_optional current_max "uA"
test_sysfs_prop_optional charge_now "uAh"
test_sysfs_prop_optional charge_full "uAh"
test_sysfs_prop_optional charge_full_design "uAh"
# power based systems
test_sysfs_prop_optional power_now "uW"
test_sysfs_prop_optional energy_now "uWh"
test_sysfs_prop_optional energy_full "uWh"
test_sysfs_prop_optional energy_full_design "uWh"
test_sysfs_prop_optional energy_full_design "uWh"
done
ktap_finished
......@@ -3,106 +3,59 @@
#include <stdint.h>
#include "resctrl.h"
struct read_format {
__u64 nr; /* The number of events */
struct {
__u64 value; /* The value of the event */
} values[2];
};
static struct perf_event_attr pea_llc_miss;
static struct read_format rf_cqm;
static int fd_lm;
char llc_occup_path[1024];
static void initialize_perf_event_attr(void)
void perf_event_attr_initialize(struct perf_event_attr *pea, __u64 config)
{
pea_llc_miss.type = PERF_TYPE_HARDWARE;
pea_llc_miss.size = sizeof(struct perf_event_attr);
pea_llc_miss.read_format = PERF_FORMAT_GROUP;
pea_llc_miss.exclude_kernel = 1;
pea_llc_miss.exclude_hv = 1;
pea_llc_miss.exclude_idle = 1;
pea_llc_miss.exclude_callchain_kernel = 1;
pea_llc_miss.inherit = 1;
pea_llc_miss.exclude_guest = 1;
pea_llc_miss.disabled = 1;
memset(pea, 0, sizeof(*pea));
pea->type = PERF_TYPE_HARDWARE;
pea->size = sizeof(*pea);
pea->read_format = PERF_FORMAT_GROUP;
pea->exclude_kernel = 1;
pea->exclude_hv = 1;
pea->exclude_idle = 1;
pea->exclude_callchain_kernel = 1;
pea->inherit = 1;
pea->exclude_guest = 1;
pea->disabled = 1;
pea->config = config;
}
static void ioctl_perf_event_ioc_reset_enable(void)
/* Start counters to log values */
int perf_event_reset_enable(int pe_fd)
{
ioctl(fd_lm, PERF_EVENT_IOC_RESET, 0);
ioctl(fd_lm, PERF_EVENT_IOC_ENABLE, 0);
}
static int perf_event_open_llc_miss(pid_t pid, int cpu_no)
{
fd_lm = perf_event_open(&pea_llc_miss, pid, cpu_no, -1,
PERF_FLAG_FD_CLOEXEC);
if (fd_lm == -1) {
perror("Error opening leader");
ctrlc_handler(0, NULL, NULL);
return -1;
}
return 0;
}
static void initialize_llc_perf(void)
{
memset(&pea_llc_miss, 0, sizeof(struct perf_event_attr));
memset(&rf_cqm, 0, sizeof(struct read_format));
/* Initialize perf_event_attr structures for HW_CACHE_MISSES */
initialize_perf_event_attr();
pea_llc_miss.config = PERF_COUNT_HW_CACHE_MISSES;
rf_cqm.nr = 1;
}
static int reset_enable_llc_perf(pid_t pid, int cpu_no)
{
int ret = 0;
int ret;
ret = perf_event_open_llc_miss(pid, cpu_no);
ret = ioctl(pe_fd, PERF_EVENT_IOC_RESET, 0);
if (ret < 0)
return ret;
/* Start counters to log values */
ioctl_perf_event_ioc_reset_enable();
ret = ioctl(pe_fd, PERF_EVENT_IOC_ENABLE, 0);
if (ret < 0)
return ret;
return 0;
}
/*
* get_llc_perf: llc cache miss through perf events
* @llc_perf_miss: LLC miss counter that is filled on success
*
* Perf events like HW_CACHE_MISSES could be used to validate number of
* cache lines allocated.
*
* Return: =0 on success. <0 on failure.
*/
static int get_llc_perf(unsigned long *llc_perf_miss)
void perf_event_initialize_read_format(struct perf_event_read *pe_read)
{
__u64 total_misses;
int ret;
/* Stop counters after one span to get miss rate */
memset(pe_read, 0, sizeof(*pe_read));
pe_read->nr = 1;
}
ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0);
int perf_open(struct perf_event_attr *pea, pid_t pid, int cpu_no)
{
int pe_fd;
ret = read(fd_lm, &rf_cqm, sizeof(struct read_format));
if (ret == -1) {
perror("Could not get llc misses through perf");
pe_fd = perf_event_open(pea, pid, cpu_no, -1, PERF_FLAG_FD_CLOEXEC);
if (pe_fd == -1) {
ksft_perror("Error opening leader");
return -1;
}
total_misses = rf_cqm.values[0].value;
*llc_perf_miss = total_misses;
perf_event_reset_enable(pe_fd);
return 0;
return pe_fd;
}
/*
......@@ -124,12 +77,12 @@ static int get_llc_occu_resctrl(unsigned long *llc_occupancy)
fp = fopen(llc_occup_path, "r");
if (!fp) {
perror("Failed to open results file");
ksft_perror("Failed to open results file");
return errno;
return -1;
}
if (fscanf(fp, "%lu", llc_occupancy) <= 0) {
perror("Could not get llc occupancy");
ksft_perror("Could not get llc occupancy");
fclose(fp);
return -1;
......@@ -146,163 +99,91 @@ static int get_llc_occu_resctrl(unsigned long *llc_occupancy)
* @llc_value: perf miss value /
* llc occupancy value reported by resctrl FS
*
* Return: 0 on success. non-zero on failure.
* Return: 0 on success, < 0 on error.
*/
static int print_results_cache(char *filename, int bm_pid,
unsigned long llc_value)
static int print_results_cache(const char *filename, int bm_pid, __u64 llc_value)
{
FILE *fp;
if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
printf("Pid: %d \t LLC_value: %lu\n", bm_pid,
llc_value);
printf("Pid: %d \t LLC_value: %llu\n", bm_pid, llc_value);
} else {
fp = fopen(filename, "a");
if (!fp) {
perror("Cannot open results file");
ksft_perror("Cannot open results file");
return errno;
return -1;
}
fprintf(fp, "Pid: %d \t llc_value: %lu\n", bm_pid, llc_value);
fprintf(fp, "Pid: %d \t llc_value: %llu\n", bm_pid, llc_value);
fclose(fp);
}
return 0;
}
int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
/*
* perf_event_measure - Measure perf events
* @filename: Filename for writing the results
* @bm_pid: PID that runs the benchmark
*
* Measures perf events (e.g., cache misses) and writes the results into
* @filename. @bm_pid is written to the results file along with the measured
* value.
*
* Return: =0 on success. <0 on failure.
*/
int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
const char *filename, int bm_pid)
{
unsigned long llc_perf_miss = 0, llc_occu_resc = 0, llc_value = 0;
int ret;
/*
* Measure cache miss from perf.
*/
if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
ret = get_llc_perf(&llc_perf_miss);
/* Stop counters after one span to get miss rate */
ret = ioctl(pe_fd, PERF_EVENT_IOC_DISABLE, 0);
if (ret < 0)
return ret;
llc_value = llc_perf_miss;
}
/*
* Measure llc occupancy from resctrl.
*/
if (!strncmp(param->resctrl_val, CMT_STR, sizeof(CMT_STR))) {
ret = get_llc_occu_resctrl(&llc_occu_resc);
if (ret < 0)
return ret;
llc_value = llc_occu_resc;
ret = read(pe_fd, pe_read, sizeof(*pe_read));
if (ret == -1) {
ksft_perror("Could not get perf value");
return -1;
}
ret = print_results_cache(param->filename, bm_pid, llc_value);
if (ret)
return ret;
return 0;
return print_results_cache(filename, bm_pid, pe_read->values[0].value);
}
/*
* cache_val: execute benchmark and measure LLC occupancy resctrl
* and perf cache miss for the benchmark
* @param: parameters passed to cache_val()
* @span: buffer size for the benchmark
* measure_llc_resctrl - Measure resctrl LLC value from resctrl
* @filename: Filename for writing the results
* @bm_pid: PID that runs the benchmark
*
* Measures LLC occupancy from resctrl and writes the results into @filename.
* @bm_pid is written to the results file along with the measured value.
*
* Return: 0 on success. non-zero on failure.
* Return: =0 on success. <0 on failure.
*/
int cat_val(struct resctrl_val_param *param, size_t span)
int measure_llc_resctrl(const char *filename, int bm_pid)
{
int memflush = 1, operation = 0, ret = 0;
char *resctrl_val = param->resctrl_val;
pid_t bm_pid;
if (strcmp(param->filename, "") == 0)
sprintf(param->filename, "stdio");
bm_pid = getpid();
/* Taskset benchmark to specified cpu */
ret = taskset_benchmark(bm_pid, param->cpu_no);
if (ret)
return ret;
/* Write benchmark to specified con_mon grp, mon_grp in resctrl FS*/
ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
resctrl_val);
if (ret)
return ret;
initialize_llc_perf();
unsigned long llc_occu_resc = 0;
int ret;
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
ret = param->setup(param);
if (ret == END_OF_TESTS) {
ret = 0;
break;
}
ret = get_llc_occu_resctrl(&llc_occu_resc);
if (ret < 0)
break;
ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
if (ret)
break;
if (run_fill_buf(span, memflush, operation, true)) {
fprintf(stderr, "Error-running fill buffer\n");
ret = -1;
goto pe_close;
}
sleep(1);
ret = measure_cache_vals(param, bm_pid);
if (ret)
goto pe_close;
}
return ret;
pe_close:
close(fd_lm);
return ret;
return print_results_cache(filename, bm_pid, llc_occu_resc);
}
/*
* show_cache_info: show cache test result information
* @sum_llc_val: sum of LLC cache result data
* @no_of_bits: number of bits
* @cache_span: cache span in bytes for CMT or in lines for CAT
* @max_diff: max difference
* @max_diff_percent: max difference percentage
* @num_of_runs: number of runs
* @platform: show test information on this platform
* @cmt: CMT test or CAT test
*
* Return: 0 on success. non-zero on failure.
* show_cache_info - Show generic cache test information
* @no_of_bits: Number of bits
* @avg_llc_val: Average of LLC cache result data
* @cache_span: Cache span
* @lines: @cache_span in lines or bytes
*/
int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
size_t cache_span, unsigned long max_diff,
unsigned long max_diff_percent, unsigned long num_of_runs,
bool platform, bool cmt)
void show_cache_info(int no_of_bits, __u64 avg_llc_val, size_t cache_span, bool lines)
{
unsigned long avg_llc_val = 0;
float diff_percent;
long avg_diff = 0;
int ret;
avg_llc_val = sum_llc_val / num_of_runs;
avg_diff = (long)abs(cache_span - avg_llc_val);
diff_percent = ((float)cache_span - avg_llc_val) / cache_span * 100;
ret = platform && abs((int)diff_percent) > max_diff_percent &&
(cmt ? (abs(avg_diff) > max_diff) : true);
ksft_print_msg("%s Check cache miss rate within %lu%%\n",
ret ? "Fail:" : "Pass:", max_diff_percent);
ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent));
ksft_print_msg("Number of bits: %d\n", no_of_bits);
ksft_print_msg("Average LLC val: %lu\n", avg_llc_val);
ksft_print_msg("Cache span (%s): %zu\n", cmt ? "bytes" : "lines",
ksft_print_msg("Average LLC val: %llu\n", avg_llc_val);
ksft_print_msg("Cache span (%s): %zu\n", lines ? "lines" : "bytes",
cache_span);
return ret;
}
......@@ -11,108 +11,254 @@
#include "resctrl.h"
#include <unistd.h>
#define RESULT_FILE_NAME1 "result_cat1"
#define RESULT_FILE_NAME2 "result_cat2"
#define RESULT_FILE_NAME "result_cat"
#define NUM_OF_RUNS 5
#define MAX_DIFF_PERCENT 4
#define MAX_DIFF 1000000
/*
* Change schemata. Write schemata to specified
* con_mon grp, mon_grp in resctrl FS.
* Run 5 times in order to get average values.
* Minimum difference in LLC misses between a test with n+1 bits CBM to the
* test with n bits is MIN_DIFF_PERCENT_PER_BIT * (n - 1). With e.g. 5 vs 4
* bits in the CBM mask, the minimum difference must be at least
* MIN_DIFF_PERCENT_PER_BIT * (4 - 1) = 3 percent.
*
* The relationship between number of used CBM bits and difference in LLC
* misses is not expected to be linear. With a small number of bits, the
* margin is smaller than with larger number of bits. For selftest purposes,
* however, linear approach is enough because ultimately only pass/fail
* decision has to be made and distinction between strong and stronger
* signal is irrelevant.
*/
static int cat_setup(struct resctrl_val_param *p)
#define MIN_DIFF_PERCENT_PER_BIT 1UL
static int show_results_info(__u64 sum_llc_val, int no_of_bits,
unsigned long cache_span,
unsigned long min_diff_percent,
unsigned long num_of_runs, bool platform,
__s64 *prev_avg_llc_val)
{
char schemata[64];
__u64 avg_llc_val = 0;
float avg_diff;
int ret = 0;
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return END_OF_TESTS;
avg_llc_val = sum_llc_val / num_of_runs;
if (*prev_avg_llc_val) {
float delta = (__s64)(avg_llc_val - *prev_avg_llc_val);
if (p->num_of_runs == 0) {
sprintf(schemata, "%lx", p->mask);
ret = write_schemata(p->ctrlgrp, schemata, p->cpu_no,
p->resctrl_val);
avg_diff = delta / *prev_avg_llc_val;
ret = platform && (avg_diff * 100) < (float)min_diff_percent;
ksft_print_msg("%s Check cache miss rate changed more than %.1f%%\n",
ret ? "Fail:" : "Pass:", (float)min_diff_percent);
ksft_print_msg("Percent diff=%.1f\n", avg_diff * 100);
}
p->num_of_runs++;
*prev_avg_llc_val = avg_llc_val;
show_cache_info(no_of_bits, avg_llc_val, cache_span, true);
return ret;
}
static int check_results(struct resctrl_val_param *param, size_t span)
/* Remove the highest bit from CBM */
static unsigned long next_mask(unsigned long current_mask)
{
return current_mask & (current_mask >> 1);
}
static int check_results(struct resctrl_val_param *param, const char *cache_type,
unsigned long cache_total_size, unsigned long full_cache_mask,
unsigned long current_mask)
{
char *token_array[8], temp[512];
unsigned long sum_llc_perf_miss = 0;
int runs = 0, no_of_bits = 0;
__u64 sum_llc_perf_miss = 0;
__s64 prev_avg_llc_val = 0;
unsigned long alloc_size;
int runs = 0;
int fail = 0;
int ret;
FILE *fp;
ksft_print_msg("Checking for pass/fail\n");
fp = fopen(param->filename, "r");
if (!fp) {
perror("# Cannot open file");
ksft_perror("Cannot open file");
return errno;
return -1;
}
while (fgets(temp, sizeof(temp), fp)) {
char *token = strtok(temp, ":\t");
int fields = 0;
int bits;
while (token) {
token_array[fields++] = token;
token = strtok(NULL, ":\t");
}
/*
* Discard the first value which is inaccurate due to monitoring
* setup transition phase.
*/
if (runs > 0)
sum_llc_perf_miss += strtoul(token_array[3], NULL, 0);
sum_llc_perf_miss += strtoull(token_array[3], NULL, 0);
runs++;
if (runs < NUM_OF_RUNS)
continue;
if (!current_mask) {
ksft_print_msg("Unexpected empty cache mask\n");
break;
}
alloc_size = cache_portion_size(cache_total_size, current_mask, full_cache_mask);
bits = count_bits(current_mask);
ret = show_results_info(sum_llc_perf_miss, bits,
alloc_size / 64,
MIN_DIFF_PERCENT_PER_BIT * (bits - 1),
runs, get_vendor() == ARCH_INTEL,
&prev_avg_llc_val);
if (ret)
fail = 1;
runs = 0;
sum_llc_perf_miss = 0;
current_mask = next_mask(current_mask);
}
fclose(fp);
no_of_bits = count_bits(param->mask);
return show_cache_info(sum_llc_perf_miss, no_of_bits, span / 64,
MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
get_vendor() == ARCH_INTEL, false);
return fail;
}
void cat_test_cleanup(void)
{
remove(RESULT_FILE_NAME1);
remove(RESULT_FILE_NAME2);
remove(RESULT_FILE_NAME);
}
/*
* cat_test - Execute CAT benchmark and measure cache misses
* @test: Test information structure
* @uparams: User supplied parameters
* @param: Parameters passed to cat_test()
* @span: Buffer size for the benchmark
* @current_mask Start mask for the first iteration
*
* Run CAT selftest by varying the allocated cache portion and comparing the
* impact on cache misses (the result analysis is done in check_results()
* and show_results_info(), not in this function).
*
* One bit is removed from the CAT allocation bit mask (in current_mask) for
* each subsequent test which keeps reducing the size of the allocated cache
* portion. A single test flushes the buffer, reads it to warm up the cache,
* and reads the buffer again. The cache misses are measured during the last
* read pass.
*
* Return: 0 when the test was run, < 0 on error.
*/
static int cat_test(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *param,
size_t span, unsigned long current_mask)
{
char *resctrl_val = param->resctrl_val;
struct perf_event_read pe_read;
struct perf_event_attr pea;
cpu_set_t old_affinity;
unsigned char *buf;
char schemata[64];
int ret, i, pe_fd;
pid_t bm_pid;
if (strcmp(param->filename, "") == 0)
sprintf(param->filename, "stdio");
bm_pid = getpid();
/* Taskset benchmark to specified cpu */
ret = taskset_benchmark(bm_pid, uparams->cpu, &old_affinity);
if (ret)
return ret;
/* Write benchmark to specified con_mon grp, mon_grp in resctrl FS*/
ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
resctrl_val);
if (ret)
goto reset_affinity;
perf_event_attr_initialize(&pea, PERF_COUNT_HW_CACHE_MISSES);
perf_event_initialize_read_format(&pe_read);
pe_fd = perf_open(&pea, bm_pid, uparams->cpu);
if (pe_fd < 0) {
ret = -1;
goto reset_affinity;
}
buf = alloc_buffer(span, 1);
if (!buf) {
ret = -1;
goto pe_close;
}
while (current_mask) {
snprintf(schemata, sizeof(schemata), "%lx", param->mask & ~current_mask);
ret = write_schemata("", schemata, uparams->cpu, test->resource);
if (ret)
goto free_buf;
snprintf(schemata, sizeof(schemata), "%lx", current_mask);
ret = write_schemata(param->ctrlgrp, schemata, uparams->cpu, test->resource);
if (ret)
goto free_buf;
for (i = 0; i < NUM_OF_RUNS; i++) {
mem_flush(buf, span);
fill_cache_read(buf, span, true);
ret = perf_event_reset_enable(pe_fd);
if (ret)
goto free_buf;
fill_cache_read(buf, span, true);
ret = perf_event_measure(pe_fd, &pe_read, param->filename, bm_pid);
if (ret)
goto free_buf;
}
current_mask = next_mask(current_mask);
}
free_buf:
free(buf);
pe_close:
close(pe_fd);
reset_affinity:
taskset_restore(bm_pid, &old_affinity);
return ret;
}
int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
static int cat_run_test(const struct resctrl_test *test, const struct user_params *uparams)
{
unsigned long l_mask, l_mask_1;
int ret, pipefd[2], sibling_cpu_no;
unsigned long cache_size = 0;
unsigned long long_mask;
char cbm_mask[256];
unsigned long long_mask, start_mask, full_cache_mask;
unsigned long cache_total_size = 0;
int n = uparams->bits;
unsigned int start;
int count_of_bits;
char pipe_message;
size_t span;
int ret;
/* Get default cbm mask for L3/L2 cache */
ret = get_cbm_mask(cache_type, cbm_mask);
ret = get_full_cbm(test->resource, &full_cache_mask);
if (ret)
return ret;
/* Get the largest contiguous exclusive portion of the cache */
ret = get_mask_no_shareable(test->resource, &long_mask);
if (ret)
return ret;
long_mask = strtoul(cbm_mask, NULL, 16);
/* Get L3/L2 cache size */
ret = get_cache_size(cpu_no, cache_type, &cache_size);
ret = get_cache_size(uparams->cpu, test->resource, &cache_total_size);
if (ret)
return ret;
ksft_print_msg("Cache size :%lu\n", cache_size);
ksft_print_msg("Cache size :%lu\n", cache_total_size);
/* Get max number of bits from default-cabm mask */
count_of_bits = count_bits(long_mask);
count_of_bits = count_contiguous_bits(long_mask, &start);
if (!n)
n = count_of_bits / 2;
......@@ -123,89 +269,124 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
count_of_bits - 1);
return -1;
}
/* Get core id from same socket for running another thread */
sibling_cpu_no = get_core_sibling(cpu_no);
if (sibling_cpu_no < 0)
return -1;
start_mask = create_bit_mask(start, n);
struct resctrl_val_param param = {
.resctrl_val = CAT_STR,
.cpu_no = cpu_no,
.setup = cat_setup,
.ctrlgrp = "c1",
.filename = RESULT_FILE_NAME,
.num_of_runs = 0,
};
param.mask = long_mask;
span = cache_portion_size(cache_total_size, start_mask, full_cache_mask);
l_mask = long_mask >> n;
l_mask_1 = ~l_mask & long_mask;
remove(param.filename);
/* Set param values for parent thread which will be allocated bitmask
* with (max_bits - n) bits
*/
span = cache_size * (count_of_bits - n) / count_of_bits;
strcpy(param.ctrlgrp, "c2");
strcpy(param.mongrp, "m2");
strcpy(param.filename, RESULT_FILE_NAME2);
param.mask = l_mask;
param.num_of_runs = 0;
if (pipe(pipefd)) {
perror("# Unable to create pipe");
return errno;
}
ret = cat_test(test, uparams, &param, span, start_mask);
if (ret)
goto out;
fflush(stdout);
bm_pid = fork();
ret = check_results(&param, test->resource,
cache_total_size, full_cache_mask, start_mask);
out:
cat_test_cleanup();
/* Set param values for child thread which will be allocated bitmask
* with n bits
*/
if (bm_pid == 0) {
param.mask = l_mask_1;
strcpy(param.ctrlgrp, "c1");
strcpy(param.mongrp, "m1");
span = cache_size * n / count_of_bits;
strcpy(param.filename, RESULT_FILE_NAME1);
param.num_of_runs = 0;
param.cpu_no = sibling_cpu_no;
}
return ret;
}
remove(param.filename);
static int noncont_cat_run_test(const struct resctrl_test *test,
const struct user_params *uparams)
{
unsigned long full_cache_mask, cont_mask, noncont_mask;
unsigned int eax, ebx, ecx, edx, sparse_masks;
int bit_center, ret;
char schemata[64];
ret = cat_val(&param, span);
if (ret == 0)
ret = check_results(&param, span);
/* Check to compare sparse_masks content to CPUID output. */
ret = resource_info_unsigned_get(test->resource, "sparse_masks", &sparse_masks);
if (ret)
return ret;
if (!strcmp(test->resource, "L3"))
__cpuid_count(0x10, 1, eax, ebx, ecx, edx);
else if (!strcmp(test->resource, "L2"))
__cpuid_count(0x10, 2, eax, ebx, ecx, edx);
else
return -EINVAL;
if (sparse_masks != ((ecx >> 3) & 1)) {
ksft_print_msg("CPUID output doesn't match 'sparse_masks' file content!\n");
return 1;
}
/* Write checks initialization. */
ret = get_full_cbm(test->resource, &full_cache_mask);
if (ret < 0)
return ret;
bit_center = count_bits(full_cache_mask) / 2;
if (bm_pid == 0) {
/* Tell parent that child is ready */
close(pipefd[0]);
pipe_message = 1;
if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
sizeof(pipe_message))
/*
* Just print the error message.
* Let while(1) run and wait for itself to be killed.
* The bit_center needs to be at least 3 to properly calculate the CBM
* hole in the noncont_mask. If it's smaller return an error since the
* cache mask is too short and that shouldn't happen.
*/
perror("# failed signaling parent process");
close(pipefd[1]);
while (1)
;
} else {
/* Parent waits for child to be ready. */
close(pipefd[1]);
pipe_message = 0;
while (pipe_message != 1) {
if (read(pipefd[0], &pipe_message,
sizeof(pipe_message)) < sizeof(pipe_message)) {
perror("# failed reading from child process");
break;
}
}
close(pipefd[0]);
kill(bm_pid, SIGKILL);
if (bit_center < 3)
return -EINVAL;
cont_mask = full_cache_mask >> bit_center;
/* Contiguous mask write check. */
snprintf(schemata, sizeof(schemata), "%lx", cont_mask);
ret = write_schemata("", schemata, uparams->cpu, test->resource);
if (ret) {
ksft_print_msg("Write of contiguous CBM failed\n");
return 1;
}
cat_test_cleanup();
/*
* Non-contiguous mask write check. CBM has a 0xf hole approximately in the middle.
* Output is compared with support information to catch any edge case errors.
*/
noncont_mask = ~(0xfUL << (bit_center - 2)) & full_cache_mask;
snprintf(schemata, sizeof(schemata), "%lx", noncont_mask);
ret = write_schemata("", schemata, uparams->cpu, test->resource);
if (ret && sparse_masks)
ksft_print_msg("Non-contiguous CBMs supported but write of non-contiguous CBM failed\n");
else if (ret && !sparse_masks)
ksft_print_msg("Non-contiguous CBMs not supported and write of non-contiguous CBM failed as expected\n");
else if (!ret && !sparse_masks)
ksft_print_msg("Non-contiguous CBMs not supported but write of non-contiguous CBM succeeded\n");
return !ret == !sparse_masks;
}
return ret;
static bool noncont_cat_feature_check(const struct resctrl_test *test)
{
if (!resctrl_resource_exists(test->resource))
return false;
return resource_info_file_exists(test->resource, "sparse_masks");
}
struct resctrl_test l3_cat_test = {
.name = "L3_CAT",
.group = "CAT",
.resource = "L3",
.feature_check = test_resource_feature_check,
.run_test = cat_run_test,
};
struct resctrl_test l3_noncont_cat_test = {
.name = "L3_NONCONT_CAT",
.group = "CAT",
.resource = "L3",
.feature_check = noncont_cat_feature_check,
.run_test = noncont_cat_run_test,
};
struct resctrl_test l2_noncont_cat_test = {
.name = "L2_NONCONT_CAT",
.group = "CAT",
.resource = "L2",
.feature_check = noncont_cat_feature_check,
.run_test = noncont_cat_run_test,
};
......@@ -16,7 +16,9 @@
#define MAX_DIFF 2000000
#define MAX_DIFF_PERCENT 15
static int cmt_setup(struct resctrl_val_param *p)
static int cmt_setup(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *p)
{
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
......@@ -27,6 +29,33 @@ static int cmt_setup(struct resctrl_val_param *p)
return 0;
}
static int show_results_info(unsigned long sum_llc_val, int no_of_bits,
unsigned long cache_span, unsigned long max_diff,
unsigned long max_diff_percent, unsigned long num_of_runs,
bool platform)
{
unsigned long avg_llc_val = 0;
float diff_percent;
long avg_diff = 0;
int ret;
avg_llc_val = sum_llc_val / num_of_runs;
avg_diff = (long)abs(cache_span - avg_llc_val);
diff_percent = ((float)cache_span - avg_llc_val) / cache_span * 100;
ret = platform && abs((int)diff_percent) > max_diff_percent &&
abs(avg_diff) > max_diff;
ksft_print_msg("%s Check cache miss rate within %lu%%\n",
ret ? "Fail:" : "Pass:", max_diff_percent);
ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent));
show_cache_info(no_of_bits, avg_llc_val, cache_span, false);
return ret;
}
static int check_results(struct resctrl_val_param *param, size_t span, int no_of_bits)
{
char *token_array[8], temp[512];
......@@ -37,9 +66,9 @@ static int check_results(struct resctrl_val_param *param, size_t span, int no_of
ksft_print_msg("Checking for pass/fail\n");
fp = fopen(param->filename, "r");
if (!fp) {
perror("# Error in opening file\n");
ksft_perror("Error in opening file");
return errno;
return -1;
}
while (fgets(temp, sizeof(temp), fp)) {
......@@ -58,9 +87,8 @@ static int check_results(struct resctrl_val_param *param, size_t span, int no_of
}
fclose(fp);
return show_cache_info(sum_llc_occu_resc, no_of_bits, span,
MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
true, true);
return show_results_info(sum_llc_occu_resc, no_of_bits, span,
MAX_DIFF, MAX_DIFF_PERCENT, runs - 1, true);
}
void cmt_test_cleanup(void)
......@@ -68,28 +96,26 @@ void cmt_test_cleanup(void)
remove(RESULT_FILE_NAME);
}
int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd)
static int cmt_run_test(const struct resctrl_test *test, const struct user_params *uparams)
{
const char * const *cmd = benchmark_cmd;
const char * const *cmd = uparams->benchmark_cmd;
const char *new_cmd[BENCHMARK_ARGS];
unsigned long cache_size = 0;
unsigned long cache_total_size = 0;
int n = uparams->bits ? : 5;
unsigned long long_mask;
char *span_str = NULL;
char cbm_mask[256];
int count_of_bits;
size_t span;
int ret, i;
ret = get_cbm_mask("L3", cbm_mask);
ret = get_full_cbm("L3", &long_mask);
if (ret)
return ret;
long_mask = strtoul(cbm_mask, NULL, 16);
ret = get_cache_size(cpu_no, "L3", &cache_size);
ret = get_cache_size(uparams->cpu, "L3", &cache_total_size);
if (ret)
return ret;
ksft_print_msg("Cache size :%lu\n", cache_size);
ksft_print_msg("Cache size :%lu\n", cache_total_size);
count_of_bits = count_bits(long_mask);
......@@ -103,19 +129,18 @@ int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd)
.resctrl_val = CMT_STR,
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
.filename = RESULT_FILE_NAME,
.mask = ~(long_mask << n) & long_mask,
.num_of_runs = 0,
.setup = cmt_setup,
};
span = cache_size * n / count_of_bits;
span = cache_portion_size(cache_total_size, param.mask, long_mask);
if (strcmp(cmd[0], "fill_buf") == 0) {
/* Duplicate the command to be able to replace span in it */
for (i = 0; benchmark_cmd[i]; i++)
new_cmd[i] = benchmark_cmd[i];
for (i = 0; uparams->benchmark_cmd[i]; i++)
new_cmd[i] = uparams->benchmark_cmd[i];
new_cmd[i] = NULL;
ret = asprintf(&span_str, "%zu", span);
......@@ -127,11 +152,13 @@ int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd)
remove(RESULT_FILE_NAME);
ret = resctrl_val(cmd, &param);
ret = resctrl_val(test, uparams, cmd, &param);
if (ret)
goto out;
ret = check_results(&param, span, n);
if (ret && (get_vendor() == ARCH_INTEL))
ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
out:
cmt_test_cleanup();
......@@ -139,3 +166,16 @@ int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd)
return ret;
}
static bool cmt_feature_check(const struct resctrl_test *test)
{
return test_resource_feature_check(test) &&
resctrl_mon_feature_exists("L3_MON", "llc_occupancy");
}
struct resctrl_test cmt_test = {
.name = "CMT",
.resource = "L3",
.feature_check = cmt_feature_check,
.run_test = cmt_run_test,
};
......@@ -38,7 +38,7 @@ static void cl_flush(void *p)
#endif
}
static void mem_flush(unsigned char *buf, size_t buf_size)
void mem_flush(unsigned char *buf, size_t buf_size)
{
unsigned char *cp = buf;
size_t i = 0;
......@@ -51,39 +51,38 @@ static void mem_flush(unsigned char *buf, size_t buf_size)
sb();
}
static void *malloc_and_init_memory(size_t buf_size)
{
void *p = NULL;
uint64_t *p64;
size_t s64;
int ret;
ret = posix_memalign(&p, PAGE_SIZE, buf_size);
if (ret < 0)
return NULL;
p64 = (uint64_t *)p;
s64 = buf_size / sizeof(uint64_t);
while (s64 > 0) {
*p64 = (uint64_t)rand();
p64 += (CL_SIZE / sizeof(uint64_t));
s64 -= (CL_SIZE / sizeof(uint64_t));
}
return p;
}
/*
* Buffer index step advance to workaround HW prefetching interfering with
* the measurements.
*
* Must be a prime to step through all indexes of the buffer.
*
* Some primes work better than others on some architectures (from MBA/MBM
* result stability point of view).
*/
#define FILL_IDX_MULT 23
static int fill_one_span_read(unsigned char *buf, size_t buf_size)
{
unsigned char *end_ptr = buf + buf_size;
unsigned char sum, *p;
unsigned int size = buf_size / (CL_SIZE / 2);
unsigned int i, idx = 0;
unsigned char sum = 0;
sum = 0;
p = buf;
while (p < end_ptr) {
sum += *p;
p += (CL_SIZE / 2);
/*
* Read the buffer in an order that is unexpected by HW prefetching
* optimizations to prevent them interfering with the caching pattern.
*
* The read order is (in terms of halves of cachelines):
* i * FILL_IDX_MULT % size
* The formula is open-coded below to avoiding modulo inside the loop
* as it improves MBA/MBM result stability on some architectures.
*/
for (i = 0; i < size; i++) {
sum += buf[idx * (CL_SIZE / 2)];
idx += FILL_IDX_MULT;
while (idx >= size)
idx -= size;
}
return sum;
......@@ -101,10 +100,9 @@ static void fill_one_span_write(unsigned char *buf, size_t buf_size)
}
}
static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once)
void fill_cache_read(unsigned char *buf, size_t buf_size, bool once)
{
int ret = 0;
FILE *fp;
while (1) {
ret = fill_one_span_read(buf, buf_size);
......@@ -113,67 +111,59 @@ static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once)
}
/* Consume read result so that reading memory is not optimized out. */
fp = fopen("/dev/null", "w");
if (!fp) {
perror("Unable to write to /dev/null");
return -1;
}
fprintf(fp, "Sum: %d ", ret);
fclose(fp);
return 0;
*value_sink = ret;
}
static int fill_cache_write(unsigned char *buf, size_t buf_size, bool once)
static void fill_cache_write(unsigned char *buf, size_t buf_size, bool once)
{
while (1) {
fill_one_span_write(buf, buf_size);
if (once)
break;
}
return 0;
}
static int fill_cache(size_t buf_size, int memflush, int op, bool once)
unsigned char *alloc_buffer(size_t buf_size, int memflush)
{
unsigned char *buf;
void *buf = NULL;
uint64_t *p64;
size_t s64;
int ret;
buf = malloc_and_init_memory(buf_size);
if (!buf)
return -1;
/* Flush the memory before using to avoid "cache hot pages" effect */
if (memflush)
mem_flush(buf, buf_size);
if (op == 0)
ret = fill_cache_read(buf, buf_size, once);
else
ret = fill_cache_write(buf, buf_size, once);
ret = posix_memalign(&buf, PAGE_SIZE, buf_size);
if (ret < 0)
return NULL;
free(buf);
/* Initialize the buffer */
p64 = buf;
s64 = buf_size / sizeof(uint64_t);
if (ret) {
printf("\n Error in fill cache read/write...\n");
return -1;
while (s64 > 0) {
*p64 = (uint64_t)rand();
p64 += (CL_SIZE / sizeof(uint64_t));
s64 -= (CL_SIZE / sizeof(uint64_t));
}
/* Flush the memory before using to avoid "cache hot pages" effect */
if (memflush)
mem_flush(buf, buf_size);
return 0;
return buf;
}
int run_fill_buf(size_t span, int memflush, int op, bool once)
int run_fill_buf(size_t buf_size, int memflush, int op, bool once)
{
size_t cache_size = span;
int ret;
unsigned char *buf;
ret = fill_cache(cache_size, memflush, op, once);
if (ret) {
printf("\n Error in fill cache\n");
buf = alloc_buffer(buf_size, memflush);
if (!buf)
return -1;
}
if (op == 0)
fill_cache_read(buf, buf_size, once);
else
fill_cache_write(buf, buf_size, once);
free(buf);
return 0;
}
......@@ -22,7 +22,9 @@
* con_mon grp, mon_grp in resctrl FS.
* For each allocation, run 5 times in order to get average values.
*/
static int mba_setup(struct resctrl_val_param *p)
static int mba_setup(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *p)
{
static int runs_per_allocation, allocation = 100;
char allocation_str[64];
......@@ -40,8 +42,7 @@ static int mba_setup(struct resctrl_val_param *p)
sprintf(allocation_str, "%d", allocation);
ret = write_schemata(p->ctrlgrp, allocation_str, p->cpu_no,
p->resctrl_val);
ret = write_schemata(p->ctrlgrp, allocation_str, uparams->cpu, test->resource);
if (ret < 0)
return ret;
......@@ -109,9 +110,9 @@ static int check_results(void)
fp = fopen(output, "r");
if (!fp) {
perror(output);
ksft_perror(output);
return errno;
return -1;
}
runs = 0;
......@@ -141,13 +142,12 @@ void mba_test_cleanup(void)
remove(RESULT_FILE_NAME);
}
int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd)
static int mba_run_test(const struct resctrl_test *test, const struct user_params *uparams)
{
struct resctrl_val_param param = {
.resctrl_val = MBA_STR,
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
.filename = RESULT_FILE_NAME,
.bw_report = "reads",
.setup = mba_setup
......@@ -156,7 +156,7 @@ int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd)
remove(RESULT_FILE_NAME);
ret = resctrl_val(benchmark_cmd, &param);
ret = resctrl_val(test, uparams, uparams->benchmark_cmd, &param);
if (ret)
goto out;
......@@ -167,3 +167,17 @@ int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd)
return ret;
}
static bool mba_feature_check(const struct resctrl_test *test)
{
return test_resource_feature_check(test) &&
resctrl_mon_feature_exists("L3_MON", "mbm_local_bytes");
}
struct resctrl_test mba_test = {
.name = "MBA",
.resource = "MB",
.vendor_specific = ARCH_INTEL,
.feature_check = mba_feature_check,
.run_test = mba_run_test,
};
......@@ -59,9 +59,9 @@ static int check_results(size_t span)
fp = fopen(output, "r");
if (!fp) {
perror(output);
ksft_perror(output);
return errno;
return -1;
}
runs = 0;
......@@ -86,7 +86,9 @@ static int check_results(size_t span)
return ret;
}
static int mbm_setup(struct resctrl_val_param *p)
static int mbm_setup(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *p)
{
int ret = 0;
......@@ -95,9 +97,8 @@ static int mbm_setup(struct resctrl_val_param *p)
return END_OF_TESTS;
/* Set up shemata with 100% allocation on the first run. */
if (p->num_of_runs == 0 && validate_resctrl_feature_request("MB", NULL))
ret = write_schemata(p->ctrlgrp, "100", p->cpu_no,
p->resctrl_val);
if (p->num_of_runs == 0 && resctrl_resource_exists("MB"))
ret = write_schemata(p->ctrlgrp, "100", uparams->cpu, test->resource);
p->num_of_runs++;
......@@ -109,13 +110,12 @@ void mbm_test_cleanup(void)
remove(RESULT_FILE_NAME);
}
int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd)
static int mbm_run_test(const struct resctrl_test *test, const struct user_params *uparams)
{
struct resctrl_val_param param = {
.resctrl_val = MBM_STR,
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
.filename = RESULT_FILE_NAME,
.bw_report = "reads",
.setup = mbm_setup
......@@ -124,14 +124,30 @@ int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd)
remove(RESULT_FILE_NAME);
ret = resctrl_val(benchmark_cmd, &param);
ret = resctrl_val(test, uparams, uparams->benchmark_cmd, &param);
if (ret)
goto out;
ret = check_results(DEFAULT_SPAN);
if (ret && (get_vendor() == ARCH_INTEL))
ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
out:
mbm_test_cleanup();
return ret;
}
static bool mbm_feature_check(const struct resctrl_test *test)
{
return resctrl_mon_feature_exists("L3_MON", "mbm_total_bytes") &&
resctrl_mon_feature_exists("L3_MON", "mbm_local_bytes");
}
struct resctrl_test mbm_test = {
.name = "MBM",
.resource = "MB",
.vendor_specific = ARCH_INTEL,
.feature_check = mbm_feature_check,
.run_test = mbm_run_test,
};
......@@ -28,6 +28,12 @@
#define PHYS_ID_PATH "/sys/devices/system/cpu/cpu"
#define INFO_PATH "/sys/fs/resctrl/info"
/*
* CPU vendor IDs
*
* Define as bits because they're used for vendor_specific bitmask in
* the struct resctrl_test.
*/
#define ARCH_INTEL 1
#define ARCH_AMD 2
......@@ -37,20 +43,52 @@
#define DEFAULT_SPAN (250 * MB)
#define PARENT_EXIT(err_msg) \
#define PARENT_EXIT() \
do { \
perror(err_msg); \
kill(ppid, SIGKILL); \
umount_resctrlfs(); \
exit(EXIT_FAILURE); \
} while (0)
/*
* user_params: User supplied parameters
* @cpu: CPU number to which the benchmark will be bound to
* @bits: Number of bits used for cache allocation size
* @benchmark_cmd: Benchmark command to run during (some of the) tests
*/
struct user_params {
int cpu;
int bits;
const char *benchmark_cmd[BENCHMARK_ARGS];
};
/*
* resctrl_test: resctrl test definition
* @name: Test name
* @group: Test group - a common name for tests that share some characteristic
* (e.g., L3 CAT test belongs to the CAT group). Can be NULL
* @resource: Resource to test (e.g., MB, L3, L2, etc.)
* @vendor_specific: Bitmask for vendor-specific tests (can be 0 for universal tests)
* @disabled: Test is disabled
* @feature_check: Callback to check required resctrl features
* @run_test: Callback to run the test
*/
struct resctrl_test {
const char *name;
const char *group;
const char *resource;
unsigned int vendor_specific;
bool disabled;
bool (*feature_check)(const struct resctrl_test *test);
int (*run_test)(const struct resctrl_test *test,
const struct user_params *uparams);
};
/*
* resctrl_val_param: resctrl test parameters
* @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
* @ctrlgrp: Name of the control monitor group (con_mon grp)
* @mongrp: Name of the monitor group (mon grp)
* @cpu_no: CPU number to which the benchmark would be binded
* @filename: Name of file to which the o/p should be written
* @bw_report: Bandwidth report type (reads vs writes)
* @setup: Call back function to setup test environment
......@@ -59,12 +97,20 @@ struct resctrl_val_param {
char *resctrl_val;
char ctrlgrp[64];
char mongrp[64];
int cpu_no;
char filename[64];
char *bw_report;
unsigned long mask;
int num_of_runs;
int (*setup)(struct resctrl_val_param *param);
int (*setup)(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *param);
};
struct perf_event_read {
__u64 nr; /* The number of events */
struct {
__u64 value; /* The value of the event */
} values[2];
};
#define MBM_STR "mbm"
......@@ -72,6 +118,13 @@ struct resctrl_val_param {
#define CMT_STR "cmt"
#define CAT_STR "cat"
/*
* Memory location that consumes values compiler must not optimize away.
* Volatile ensures writes to this location cannot be optimized away by
* compiler.
*/
extern volatile int *value_sink;
extern pid_t bm_pid, ppid;
extern char llc_occup_path[1024];
......@@ -79,42 +132,84 @@ extern char llc_occup_path[1024];
int get_vendor(void);
bool check_resctrlfs_support(void);
int filter_dmesg(void);
int get_resource_id(int cpu_no, int *resource_id);
int get_domain_id(const char *resource, int cpu_no, int *domain_id);
int mount_resctrlfs(void);
int umount_resctrlfs(void);
int validate_bw_report_request(char *bw_report);
bool validate_resctrl_feature_request(const char *resource, const char *feature);
bool resctrl_resource_exists(const char *resource);
bool resctrl_mon_feature_exists(const char *resource, const char *feature);
bool resource_info_file_exists(const char *resource, const char *file);
bool test_resource_feature_check(const struct resctrl_test *test);
char *fgrep(FILE *inf, const char *str);
int taskset_benchmark(pid_t bm_pid, int cpu_no);
int write_schemata(char *ctrlgrp, char *schemata, int cpu_no,
char *resctrl_val);
int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity);
int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity);
int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resource);
int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
char *resctrl_val);
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
int group_fd, unsigned long flags);
int run_fill_buf(size_t span, int memflush, int op, bool once);
int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param);
int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd);
unsigned char *alloc_buffer(size_t buf_size, int memflush);
void mem_flush(unsigned char *buf, size_t buf_size);
void fill_cache_read(unsigned char *buf, size_t buf_size, bool once);
int run_fill_buf(size_t buf_size, int memflush, int op, bool once);
int resctrl_val(const struct resctrl_test *test,
const struct user_params *uparams,
const char * const *benchmark_cmd,
struct resctrl_val_param *param);
void tests_cleanup(void);
void mbm_test_cleanup(void);
int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd);
void mba_test_cleanup(void);
int get_cbm_mask(char *cache_type, char *cbm_mask);
int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
unsigned long create_bit_mask(unsigned int start, unsigned int len);
unsigned int count_contiguous_bits(unsigned long val, unsigned int *start);
int get_full_cbm(const char *cache_type, unsigned long *mask);
int get_mask_no_shareable(const char *cache_type, unsigned long *mask);
int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size);
int resource_info_unsigned_get(const char *resource, const char *filename, unsigned int *val);
void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
int signal_handler_register(void);
void signal_handler_unregister(void);
int cat_val(struct resctrl_val_param *param, size_t span);
void cat_test_cleanup(void);
int cat_perf_miss_val(int cpu_no, int no_of_bits, char *cache_type);
int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd);
unsigned int count_bits(unsigned long n);
void cmt_test_cleanup(void);
int get_core_sibling(int cpu_no);
int measure_cache_vals(struct resctrl_val_param *param, int bm_pid);
int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
size_t cache_span, unsigned long max_diff,
unsigned long max_diff_percent, unsigned long num_of_runs,
bool platform, bool cmt);
void perf_event_attr_initialize(struct perf_event_attr *pea, __u64 config);
void perf_event_initialize_read_format(struct perf_event_read *pe_read);
int perf_open(struct perf_event_attr *pea, pid_t pid, int cpu_no);
int perf_event_reset_enable(int pe_fd);
int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
const char *filename, int bm_pid);
int measure_llc_resctrl(const char *filename, int bm_pid);
void show_cache_info(int no_of_bits, __u64 avg_llc_val, size_t cache_span, bool lines);
/*
* cache_portion_size - Calculate the size of a cache portion
* @cache_size: Total cache size in bytes
* @portion_mask: Cache portion mask
* @full_cache_mask: Full Cache Bit Mask (CBM) for the cache
*
* Return: The size of the cache portion in bytes.
*/
static inline unsigned long cache_portion_size(unsigned long cache_size,
unsigned long portion_mask,
unsigned long full_cache_mask)
{
unsigned int bits = count_bits(full_cache_mask);
/*
* With no bits the full CBM, assume cache cannot be split into
* smaller portions. To avoid divide by zero, return cache_size.
*/
if (!bits)
return cache_size;
return cache_size * count_bits(portion_mask) / bits;
}
extern struct resctrl_test mbm_test;
extern struct resctrl_test mba_test;
extern struct resctrl_test cmt_test;
extern struct resctrl_test l3_cat_test;
extern struct resctrl_test l3_noncont_cat_test;
extern struct resctrl_test l2_noncont_cat_test;
#endif /* RESCTRL_H */
......@@ -10,6 +10,19 @@
*/
#include "resctrl.h"
/* Volatile memory sink to prevent compiler optimizations */
static volatile int sink_target;
volatile int *value_sink = &sink_target;
static struct resctrl_test *resctrl_tests[] = {
&mbm_test,
&mba_test,
&cmt_test,
&l3_cat_test,
&l3_noncont_cat_test,
&l2_noncont_cat_test,
};
static int detect_vendor(void)
{
FILE *inf = fopen("/proc/cpuinfo", "r");
......@@ -49,11 +62,20 @@ int get_vendor(void)
static void cmd_help(void)
{
int i;
printf("usage: resctrl_tests [-h] [-t test list] [-n no_of_bits] [-b benchmark_cmd [option]...]\n");
printf("\t-b benchmark_cmd [option]...: run specified benchmark for MBM, MBA and CMT\n");
printf("\t default benchmark is builtin fill_buf\n");
printf("\t-t test list: run tests specified in the test list, ");
printf("\t-t test list: run tests/groups specified by the list, ");
printf("e.g. -t mbm,mba,cmt,cat\n");
printf("\t\tSupported tests (group):\n");
for (i = 0; i < ARRAY_SIZE(resctrl_tests); i++) {
if (resctrl_tests[i]->group)
printf("\t\t\t%s (%s)\n", resctrl_tests[i]->name, resctrl_tests[i]->group);
else
printf("\t\t\t%s\n", resctrl_tests[i]->name);
}
printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n");
printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n");
printf("\t-h: help\n");
......@@ -92,116 +114,63 @@ static void test_cleanup(void)
signal_handler_unregister();
}
static void run_mbm_test(const char * const *benchmark_cmd, int cpu_no)
static bool test_vendor_specific_check(const struct resctrl_test *test)
{
int res;
if (!test->vendor_specific)
return true;
ksft_print_msg("Starting MBM BW change ...\n");
if (test_prepare()) {
ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
return;
}
if (!validate_resctrl_feature_request("L3_MON", "mbm_total_bytes") ||
!validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") ||
(get_vendor() != ARCH_INTEL)) {
ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n");
goto cleanup;
}
res = mbm_bw_change(cpu_no, benchmark_cmd);
ksft_test_result(!res, "MBM: bw change\n");
if ((get_vendor() == ARCH_INTEL) && res)
ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
cleanup:
test_cleanup();
return get_vendor() & test->vendor_specific;
}
static void run_mba_test(const char * const *benchmark_cmd, int cpu_no)
static void run_single_test(const struct resctrl_test *test, const struct user_params *uparams)
{
int res;
ksft_print_msg("Starting MBA Schemata change ...\n");
int ret;
if (test_prepare()) {
ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
if (test->disabled)
return;
}
if (!validate_resctrl_feature_request("MB", NULL) ||
!validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") ||
(get_vendor() != ARCH_INTEL)) {
ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n");
goto cleanup;
if (!test_vendor_specific_check(test)) {
ksft_test_result_skip("Hardware does not support %s\n", test->name);
return;
}
res = mba_schemata_change(cpu_no, benchmark_cmd);
ksft_test_result(!res, "MBA: schemata change\n");
cleanup:
test_cleanup();
}
static void run_cmt_test(const char * const *benchmark_cmd, int cpu_no)
{
int res;
ksft_print_msg("Starting CMT test ...\n");
ksft_print_msg("Starting %s test ...\n", test->name);
if (test_prepare()) {
ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
return;
}
if (!validate_resctrl_feature_request("L3_MON", "llc_occupancy") ||
!validate_resctrl_feature_request("L3", NULL)) {
ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n");
if (!test->feature_check(test)) {
ksft_test_result_skip("Hardware does not support %s or %s is disabled\n",
test->name, test->name);
goto cleanup;
}
res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
ksft_test_result(!res, "CMT: test\n");
if ((get_vendor() == ARCH_INTEL) && res)
ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
ret = test->run_test(test, uparams);
ksft_test_result(!ret, "%s: test\n", test->name);
cleanup:
test_cleanup();
}
static void run_cat_test(int cpu_no, int no_of_bits)
static void init_user_params(struct user_params *uparams)
{
int res;
ksft_print_msg("Starting CAT test ...\n");
memset(uparams, 0, sizeof(*uparams));
if (test_prepare()) {
ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
return;
}
if (!validate_resctrl_feature_request("L3", NULL)) {
ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n");
goto cleanup;
}
res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
ksft_test_result(!res, "CAT: test\n");
cleanup:
test_cleanup();
uparams->cpu = 1;
uparams->bits = 0;
}
int main(int argc, char **argv)
{
bool mbm_test = true, mba_test = true, cmt_test = true;
const char *benchmark_cmd[BENCHMARK_ARGS] = {};
int c, cpu_no = 1, i, no_of_bits = 0;
int tests = ARRAY_SIZE(resctrl_tests);
bool test_param_seen = false;
struct user_params uparams;
char *span_str = NULL;
bool cat_test = true;
int tests = 0;
int ret;
int ret, c, i;
init_user_params(&uparams);
while ((c = getopt(argc, argv, "ht:b:n:p:")) != -1) {
char *token;
......@@ -219,32 +188,35 @@ int main(int argc, char **argv)
/* Extract benchmark command from command line. */
for (i = 0; i < argc - optind; i++)
benchmark_cmd[i] = argv[i + optind];
benchmark_cmd[i] = NULL;
uparams.benchmark_cmd[i] = argv[i + optind];
uparams.benchmark_cmd[i] = NULL;
goto last_arg;
case 't':
token = strtok(optarg, ",");
mbm_test = false;
mba_test = false;
cmt_test = false;
cat_test = false;
if (!test_param_seen) {
for (i = 0; i < ARRAY_SIZE(resctrl_tests); i++)
resctrl_tests[i]->disabled = true;
tests = 0;
test_param_seen = true;
}
while (token) {
if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
mbm_test = true;
tests++;
} else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
mba_test = true;
tests++;
} else if (!strncmp(token, CMT_STR, sizeof(CMT_STR))) {
cmt_test = true;
tests++;
} else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
cat_test = true;
bool found = false;
for (i = 0; i < ARRAY_SIZE(resctrl_tests); i++) {
if (!strcasecmp(token, resctrl_tests[i]->name) ||
(resctrl_tests[i]->group &&
!strcasecmp(token, resctrl_tests[i]->group))) {
if (resctrl_tests[i]->disabled)
tests++;
} else {
printf("invalid argument\n");
resctrl_tests[i]->disabled = false;
found = true;
}
}
if (!found) {
printf("invalid test: %s\n", token);
return -1;
}
......@@ -252,11 +224,11 @@ int main(int argc, char **argv)
}
break;
case 'p':
cpu_no = atoi(optarg);
uparams.cpu = atoi(optarg);
break;
case 'n':
no_of_bits = atoi(optarg);
if (no_of_bits <= 0) {
uparams.bits = atoi(optarg);
if (uparams.bits <= 0) {
printf("Bail out! invalid argument for no_of_bits\n");
return -1;
}
......@@ -291,32 +263,23 @@ int main(int argc, char **argv)
filter_dmesg();
if (!benchmark_cmd[0]) {
if (!uparams.benchmark_cmd[0]) {
/* If no benchmark is given by "-b" argument, use fill_buf. */
benchmark_cmd[0] = "fill_buf";
uparams.benchmark_cmd[0] = "fill_buf";
ret = asprintf(&span_str, "%u", DEFAULT_SPAN);
if (ret < 0)
ksft_exit_fail_msg("Out of memory!\n");
benchmark_cmd[1] = span_str;
benchmark_cmd[2] = "1";
benchmark_cmd[3] = "0";
benchmark_cmd[4] = "false";
benchmark_cmd[5] = NULL;
uparams.benchmark_cmd[1] = span_str;
uparams.benchmark_cmd[2] = "1";
uparams.benchmark_cmd[3] = "0";
uparams.benchmark_cmd[4] = "false";
uparams.benchmark_cmd[5] = NULL;
}
ksft_set_plan(tests ? : 4);
if (mbm_test)
run_mbm_test(benchmark_cmd, cpu_no);
if (mba_test)
run_mba_test(benchmark_cmd, cpu_no);
if (cmt_test)
run_cmt_test(benchmark_cmd, cpu_no);
ksft_set_plan(tests);
if (cat_test)
run_cat_test(cpu_no, no_of_bits);
for (i = 0; i < ARRAY_SIZE(resctrl_tests); i++)
run_single_test(resctrl_tests[i], &uparams);
free(span_str);
ksft_finished();
......
......@@ -156,12 +156,12 @@ static int read_from_imc_dir(char *imc_dir, int count)
sprintf(imc_counter_type, "%s%s", imc_dir, "type");
fp = fopen(imc_counter_type, "r");
if (!fp) {
perror("Failed to open imc counter type file");
ksft_perror("Failed to open iMC counter type file");
return -1;
}
if (fscanf(fp, "%u", &imc_counters_config[count][READ].type) <= 0) {
perror("Could not get imc type");
ksft_perror("Could not get iMC type");
fclose(fp);
return -1;
......@@ -175,12 +175,12 @@ static int read_from_imc_dir(char *imc_dir, int count)
sprintf(imc_counter_cfg, "%s%s", imc_dir, READ_FILE_NAME);
fp = fopen(imc_counter_cfg, "r");
if (!fp) {
perror("Failed to open imc config file");
ksft_perror("Failed to open iMC config file");
return -1;
}
if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
perror("Could not get imc cas count read");
ksft_perror("Could not get iMC cas count read");
fclose(fp);
return -1;
......@@ -193,12 +193,12 @@ static int read_from_imc_dir(char *imc_dir, int count)
sprintf(imc_counter_cfg, "%s%s", imc_dir, WRITE_FILE_NAME);
fp = fopen(imc_counter_cfg, "r");
if (!fp) {
perror("Failed to open imc config file");
ksft_perror("Failed to open iMC config file");
return -1;
}
if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
perror("Could not get imc cas count write");
ksft_perror("Could not get iMC cas count write");
fclose(fp);
return -1;
......@@ -262,12 +262,12 @@ static int num_of_imcs(void)
}
closedir(dp);
if (count == 0) {
perror("Unable find iMC counters!\n");
ksft_print_msg("Unable to find iMC counters\n");
return -1;
}
} else {
perror("Unable to open PMU directory!\n");
ksft_perror("Unable to open PMU directory");
return -1;
}
......@@ -339,14 +339,14 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
if (read(r->fd, &r->return_value,
sizeof(struct membw_read_format)) == -1) {
perror("Couldn't get read b/w through iMC");
ksft_perror("Couldn't get read b/w through iMC");
return -1;
}
if (read(w->fd, &w->return_value,
sizeof(struct membw_read_format)) == -1) {
perror("Couldn't get write bw through iMC");
ksft_perror("Couldn't get write bw through iMC");
return -1;
}
......@@ -387,20 +387,20 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
return 0;
}
void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
void set_mbm_path(const char *ctrlgrp, const char *mongrp, int domain_id)
{
if (ctrlgrp && mongrp)
sprintf(mbm_total_path, CON_MON_MBM_LOCAL_BYTES_PATH,
RESCTRL_PATH, ctrlgrp, mongrp, resource_id);
RESCTRL_PATH, ctrlgrp, mongrp, domain_id);
else if (!ctrlgrp && mongrp)
sprintf(mbm_total_path, MON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
mongrp, resource_id);
mongrp, domain_id);
else if (ctrlgrp && !mongrp)
sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
ctrlgrp, resource_id);
ctrlgrp, domain_id);
else if (!ctrlgrp && !mongrp)
sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
resource_id);
domain_id);
}
/*
......@@ -413,23 +413,23 @@ void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
int cpu_no, char *resctrl_val)
{
int resource_id;
int domain_id;
if (get_resource_id(cpu_no, &resource_id) < 0) {
perror("Could not get resource_id");
if (get_domain_id("MB", cpu_no, &domain_id) < 0) {
ksft_print_msg("Could not get domain ID\n");
return;
}
if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
set_mbm_path(ctrlgrp, mongrp, resource_id);
set_mbm_path(ctrlgrp, mongrp, domain_id);
if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
if (ctrlgrp)
sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
RESCTRL_PATH, ctrlgrp, resource_id);
RESCTRL_PATH, ctrlgrp, domain_id);
else
sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH,
RESCTRL_PATH, resource_id);
RESCTRL_PATH, domain_id);
}
}
......@@ -449,12 +449,12 @@ static int get_mem_bw_resctrl(unsigned long *mbm_total)
fp = fopen(mbm_total_path, "r");
if (!fp) {
perror("Failed to open total bw file");
ksft_perror("Failed to open total bw file");
return -1;
}
if (fscanf(fp, "%lu", mbm_total) <= 0) {
perror("Could not get mbm local bytes");
ksft_perror("Could not get mbm local bytes");
fclose(fp);
return -1;
......@@ -495,7 +495,7 @@ int signal_handler_register(void)
if (sigaction(SIGINT, &sigact, NULL) ||
sigaction(SIGTERM, &sigact, NULL) ||
sigaction(SIGHUP, &sigact, NULL)) {
perror("# sigaction");
ksft_perror("sigaction");
ret = -1;
}
return ret;
......@@ -515,7 +515,7 @@ void signal_handler_unregister(void)
if (sigaction(SIGINT, &sigact, NULL) ||
sigaction(SIGTERM, &sigact, NULL) ||
sigaction(SIGHUP, &sigact, NULL)) {
perror("# sigaction");
ksft_perror("sigaction");
}
}
......@@ -526,7 +526,7 @@ void signal_handler_unregister(void)
* @bw_imc: perf imc counter value
* @bw_resc: memory bandwidth value
*
* Return: 0 on success. non-zero on failure.
* Return: 0 on success, < 0 on error.
*/
static int print_results_bw(char *filename, int bm_pid, float bw_imc,
unsigned long bw_resc)
......@@ -540,16 +540,16 @@ static int print_results_bw(char *filename, int bm_pid, float bw_imc,
} else {
fp = fopen(filename, "a");
if (!fp) {
perror("Cannot open results file");
ksft_perror("Cannot open results file");
return errno;
return -1;
}
if (fprintf(fp, "Pid: %d \t Mem_BW_iMC: %f \t Mem_BW_resc: %lu \t Difference: %lu\n",
bm_pid, bw_imc, bw_resc, diff) <= 0) {
ksft_print_msg("Could not log results\n");
fclose(fp);
perror("Could not log results.");
return errno;
return -1;
}
fclose(fp);
}
......@@ -582,19 +582,20 @@ static void set_cmt_path(const char *ctrlgrp, const char *mongrp, char sock_num)
static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
int cpu_no, char *resctrl_val)
{
int resource_id;
int domain_id;
if (get_resource_id(cpu_no, &resource_id) < 0) {
perror("# Unable to resource_id");
if (get_domain_id("L3", cpu_no, &domain_id) < 0) {
ksft_print_msg("Could not get domain ID\n");
return;
}
if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
set_cmt_path(ctrlgrp, mongrp, resource_id);
set_cmt_path(ctrlgrp, mongrp, domain_id);
}
static int
measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
static int measure_vals(const struct user_params *uparams,
struct resctrl_val_param *param,
unsigned long *bw_resc_start)
{
unsigned long bw_resc, bw_resc_end;
float bw_imc;
......@@ -607,7 +608,7 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
* Compare the two values to validate resctrl value.
* It takes 1sec to measure the data.
*/
ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
ret = get_mem_bw_imc(uparams->cpu, param->bw_report, &bw_imc);
if (ret < 0)
return ret;
......@@ -647,20 +648,24 @@ static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
* stdio (console)
*/
fp = freopen("/dev/null", "w", stdout);
if (!fp)
PARENT_EXIT("Unable to direct benchmark status to /dev/null");
if (!fp) {
ksft_perror("Unable to direct benchmark status to /dev/null");
PARENT_EXIT();
}
if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
/* Execute default fill_buf benchmark */
span = strtoul(benchmark_cmd[1], NULL, 10);
memflush = atoi(benchmark_cmd[2]);
operation = atoi(benchmark_cmd[3]);
if (!strcmp(benchmark_cmd[4], "true"))
if (!strcmp(benchmark_cmd[4], "true")) {
once = true;
else if (!strcmp(benchmark_cmd[4], "false"))
} else if (!strcmp(benchmark_cmd[4], "false")) {
once = false;
else
PARENT_EXIT("Invalid once parameter");
} else {
ksft_print_msg("Invalid once parameter\n");
PARENT_EXIT();
}
if (run_fill_buf(span, memflush, operation, once))
fprintf(stderr, "Error in running fill buffer\n");
......@@ -668,22 +673,28 @@ static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
/* Execute specified benchmark */
ret = execvp(benchmark_cmd[0], benchmark_cmd);
if (ret)
perror("wrong\n");
ksft_perror("execvp");
}
fclose(stdout);
PARENT_EXIT("Unable to run specified benchmark");
ksft_print_msg("Unable to run specified benchmark\n");
PARENT_EXIT();
}
/*
* resctrl_val: execute benchmark and measure memory bandwidth on
* the benchmark
* @test: test information structure
* @uparams: user supplied parameters
* @benchmark_cmd: benchmark command and its arguments
* @param: parameters passed to resctrl_val()
*
* Return: 0 on success. non-zero on failure.
* Return: 0 when the test was run, < 0 on error.
*/
int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param)
int resctrl_val(const struct resctrl_test *test,
const struct user_params *uparams,
const char * const *benchmark_cmd,
struct resctrl_val_param *param)
{
char *resctrl_val = param->resctrl_val;
unsigned long bw_resc_start = 0;
......@@ -709,7 +720,7 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par
ppid = getpid();
if (pipe(pipefd)) {
perror("# Unable to create pipe");
ksft_perror("Unable to create pipe");
return -1;
}
......@@ -721,7 +732,7 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par
fflush(stdout);
bm_pid = fork();
if (bm_pid == -1) {
perror("# Unable to fork");
ksft_perror("Unable to fork");
return -1;
}
......@@ -738,15 +749,17 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par
sigact.sa_flags = SA_SIGINFO;
/* Register for "SIGUSR1" signal from parent */
if (sigaction(SIGUSR1, &sigact, NULL))
PARENT_EXIT("Can't register child for signal");
if (sigaction(SIGUSR1, &sigact, NULL)) {
ksft_perror("Can't register child for signal");
PARENT_EXIT();
}
/* Tell parent that child is ready */
close(pipefd[0]);
pipe_message = 1;
if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
sizeof(pipe_message)) {
perror("# failed signaling parent process");
ksft_perror("Failed signaling parent process");
close(pipefd[1]);
return -1;
}
......@@ -755,7 +768,8 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par
/* Suspend child until delivery of "SIGUSR1" from parent */
sigsuspend(&sigact.sa_mask);
PARENT_EXIT("Child is done");
ksft_perror("Child is done");
PARENT_EXIT();
}
ksft_print_msg("Benchmark PID: %d\n", bm_pid);
......@@ -769,7 +783,7 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par
value.sival_ptr = (void *)benchmark_cmd;
/* Taskset benchmark to specified cpu */
ret = taskset_benchmark(bm_pid, param->cpu_no);
ret = taskset_benchmark(bm_pid, uparams->cpu, NULL);
if (ret)
goto out;
......@@ -786,17 +800,17 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par
goto out;
initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
param->cpu_no, resctrl_val);
uparams->cpu, resctrl_val);
} else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
param->cpu_no, resctrl_val);
uparams->cpu, resctrl_val);
/* Parent waits for child to be ready. */
close(pipefd[1]);
while (pipe_message != 1) {
if (read(pipefd[0], &pipe_message, sizeof(pipe_message)) <
sizeof(pipe_message)) {
perror("# failed reading message from child process");
ksft_perror("Failed reading message from child process");
close(pipefd[0]);
goto out;
}
......@@ -805,8 +819,8 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par
/* Signal child to start benchmark */
if (sigqueue(bm_pid, SIGUSR1, value) == -1) {
perror("# sigqueue SIGUSR1 to child");
ret = errno;
ksft_perror("sigqueue SIGUSR1 to child");
ret = -1;
goto out;
}
......@@ -815,7 +829,7 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
ret = param->setup(param);
ret = param->setup(test, uparams, param);
if (ret == END_OF_TESTS) {
ret = 0;
break;
......@@ -825,12 +839,12 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par
if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
ret = measure_vals(param, &bw_resc_start);
ret = measure_vals(uparams, param, &bw_resc_start);
if (ret)
break;
} else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
sleep(1);
ret = measure_cache_vals(param, bm_pid);
ret = measure_llc_resctrl(param->filename, bm_pid);
if (ret)
break;
}
......
......@@ -20,7 +20,7 @@ static int find_resctrl_mount(char *buffer)
mounts = fopen("/proc/mounts", "r");
if (!mounts) {
perror("/proc/mounts");
ksft_perror("/proc/mounts");
return -ENXIO;
}
while (!feof(mounts)) {
......@@ -56,7 +56,7 @@ static int find_resctrl_mount(char *buffer)
* Mounts resctrl FS. Fails if resctrl FS is already mounted to avoid
* pre-existing settings interfering with the test results.
*
* Return: 0 on success, non-zero on failure
* Return: 0 on success, < 0 on error.
*/
int mount_resctrlfs(void)
{
......@@ -69,7 +69,7 @@ int mount_resctrlfs(void)
ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH);
ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL);
if (ret)
perror("# mount");
ksft_perror("mount");
return ret;
}
......@@ -86,41 +86,67 @@ int umount_resctrlfs(void)
return ret;
if (umount(mountpoint)) {
perror("# Unable to umount resctrl");
ksft_perror("Unable to umount resctrl");
return errno;
return -1;
}
return 0;
}
/*
* get_resource_id - Get socket number/l3 id for a specified CPU
* get_cache_level - Convert cache level from string to integer
* @cache_type: Cache level as string
*
* Return: cache level as integer or -1 if @cache_type is invalid.
*/
static int get_cache_level(const char *cache_type)
{
if (!strcmp(cache_type, "L3"))
return 3;
if (!strcmp(cache_type, "L2"))
return 2;
ksft_print_msg("Invalid cache level\n");
return -1;
}
static int get_resource_cache_level(const char *resource)
{
/* "MB" use L3 (LLC) as resource */
if (!strcmp(resource, "MB"))
return 3;
return get_cache_level(resource);
}
/*
* get_domain_id - Get resctrl domain ID for a specified CPU
* @resource: resource name
* @cpu_no: CPU number
* @resource_id: Socket number or l3_id
* @domain_id: domain ID (cache ID; for MB, L3 cache ID)
*
* Return: >= 0 on success, < 0 on failure.
*/
int get_resource_id(int cpu_no, int *resource_id)
int get_domain_id(const char *resource, int cpu_no, int *domain_id)
{
char phys_pkg_path[1024];
int cache_num;
FILE *fp;
if (get_vendor() == ARCH_AMD)
sprintf(phys_pkg_path, "%s%d/cache/index3/id",
PHYS_ID_PATH, cpu_no);
else
sprintf(phys_pkg_path, "%s%d/topology/physical_package_id",
PHYS_ID_PATH, cpu_no);
cache_num = get_resource_cache_level(resource);
if (cache_num < 0)
return cache_num;
sprintf(phys_pkg_path, "%s%d/cache/index%d/id", PHYS_ID_PATH, cpu_no, cache_num);
fp = fopen(phys_pkg_path, "r");
if (!fp) {
perror("Failed to open physical_package_id");
ksft_perror("Failed to open cache id file");
return -1;
}
if (fscanf(fp, "%d", resource_id) <= 0) {
perror("Could not get socket number or l3 id");
if (fscanf(fp, "%d", domain_id) <= 0) {
ksft_perror("Could not get domain ID");
fclose(fp);
return -1;
......@@ -138,31 +164,26 @@ int get_resource_id(int cpu_no, int *resource_id)
*
* Return: = 0 on success, < 0 on failure.
*/
int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size)
{
char cache_path[1024], cache_str[64];
int length, i, cache_num;
FILE *fp;
if (!strcmp(cache_type, "L3")) {
cache_num = 3;
} else if (!strcmp(cache_type, "L2")) {
cache_num = 2;
} else {
perror("Invalid cache level");
return -1;
}
cache_num = get_cache_level(cache_type);
if (cache_num < 0)
return cache_num;
sprintf(cache_path, "/sys/bus/cpu/devices/cpu%d/cache/index%d/size",
cpu_no, cache_num);
fp = fopen(cache_path, "r");
if (!fp) {
perror("Failed to open cache size");
ksft_perror("Failed to open cache size");
return -1;
}
if (fscanf(fp, "%s", cache_str) <= 0) {
perror("Could not get cache_size");
ksft_perror("Could not get cache_size");
fclose(fp);
return -1;
......@@ -196,30 +217,29 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
#define CORE_SIBLINGS_PATH "/sys/bus/cpu/devices/cpu"
/*
* get_cbm_mask - Get cbm mask for given cache
* @cache_type: Cache level L2/L3
* @cbm_mask: cbm_mask returned as a string
* get_bit_mask - Get bit mask from given file
* @filename: File containing the mask
* @mask: The bit mask returned as unsigned long
*
* Return: = 0 on success, < 0 on failure.
*/
int get_cbm_mask(char *cache_type, char *cbm_mask)
static int get_bit_mask(const char *filename, unsigned long *mask)
{
char cbm_mask_path[1024];
FILE *fp;
if (!cbm_mask)
if (!filename || !mask)
return -1;
sprintf(cbm_mask_path, "%s/%s/cbm_mask", INFO_PATH, cache_type);
fp = fopen(cbm_mask_path, "r");
fp = fopen(filename, "r");
if (!fp) {
perror("Failed to open cache level");
ksft_print_msg("Failed to open bit mask file '%s': %s\n",
filename, strerror(errno));
return -1;
}
if (fscanf(fp, "%s", cbm_mask) <= 0) {
perror("Could not get max cbm_mask");
if (fscanf(fp, "%lx", mask) <= 0) {
ksft_print_msg("Could not read bit mask file '%s': %s\n",
filename, strerror(errno));
fclose(fp);
return -1;
......@@ -230,63 +250,182 @@ int get_cbm_mask(char *cache_type, char *cbm_mask)
}
/*
* get_core_sibling - Get sibling core id from the same socket for given CPU
* @cpu_no: CPU number
* resource_info_unsigned_get - Read an unsigned value from
* /sys/fs/resctrl/info/@resource/@filename
* @resource: Resource name that matches directory name in
* /sys/fs/resctrl/info
* @filename: File in /sys/fs/resctrl/info/@resource
* @val: Contains read value on success.
*
* Return: > 0 on success, < 0 on failure.
* Return: = 0 on success, < 0 on failure. On success the read
* value is saved into @val.
*/
int get_core_sibling(int cpu_no)
int resource_info_unsigned_get(const char *resource, const char *filename,
unsigned int *val)
{
char core_siblings_path[1024], cpu_list_str[64];
int sibling_cpu_no = -1;
char file_path[PATH_MAX];
FILE *fp;
sprintf(core_siblings_path, "%s%d/topology/core_siblings_list",
CORE_SIBLINGS_PATH, cpu_no);
snprintf(file_path, sizeof(file_path), "%s/%s/%s", INFO_PATH, resource,
filename);
fp = fopen(core_siblings_path, "r");
fp = fopen(file_path, "r");
if (!fp) {
perror("Failed to open core siblings path");
ksft_print_msg("Error opening %s: %m\n", file_path);
return -1;
}
if (fscanf(fp, "%s", cpu_list_str) <= 0) {
perror("Could not get core_siblings list");
fclose(fp);
if (fscanf(fp, "%u", val) <= 0) {
ksft_print_msg("Could not get contents of %s: %m\n", file_path);
fclose(fp);
return -1;
}
fclose(fp);
return 0;
}
char *token = strtok(cpu_list_str, "-,");
/*
* create_bit_mask- Create bit mask from start, len pair
* @start: LSB of the mask
* @len Number of bits in the mask
*/
unsigned long create_bit_mask(unsigned int start, unsigned int len)
{
return ((1UL << len) - 1UL) << start;
}
while (token) {
sibling_cpu_no = atoi(token);
/* Skipping core 0 as we don't want to run test on core 0 */
if (sibling_cpu_no != 0 && sibling_cpu_no != cpu_no)
break;
token = strtok(NULL, "-,");
/*
* count_contiguous_bits - Returns the longest train of bits in a bit mask
* @val A bit mask
* @start The location of the least-significant bit of the longest train
*
* Return: The length of the contiguous bits in the longest train of bits
*/
unsigned int count_contiguous_bits(unsigned long val, unsigned int *start)
{
unsigned long last_val;
unsigned int count = 0;
while (val) {
last_val = val;
val &= (val >> 1);
count++;
}
return sibling_cpu_no;
if (start) {
if (count)
*start = ffsl(last_val) - 1;
else
*start = 0;
}
return count;
}
/*
* get_full_cbm - Get full Cache Bit Mask (CBM)
* @cache_type: Cache type as "L2" or "L3"
* @mask: Full cache bit mask representing the maximal portion of cache
* available for allocation, returned as unsigned long.
*
* Return: = 0 on success, < 0 on failure.
*/
int get_full_cbm(const char *cache_type, unsigned long *mask)
{
char cbm_path[PATH_MAX];
int ret;
if (!cache_type)
return -1;
snprintf(cbm_path, sizeof(cbm_path), "%s/%s/cbm_mask",
INFO_PATH, cache_type);
ret = get_bit_mask(cbm_path, mask);
if (ret || !*mask)
return -1;
return 0;
}
/*
* get_shareable_mask - Get shareable mask from shareable_bits
* @cache_type: Cache type as "L2" or "L3"
* @shareable_mask: Shareable mask returned as unsigned long
*
* Return: = 0 on success, < 0 on failure.
*/
static int get_shareable_mask(const char *cache_type, unsigned long *shareable_mask)
{
char mask_path[PATH_MAX];
if (!cache_type)
return -1;
snprintf(mask_path, sizeof(mask_path), "%s/%s/shareable_bits",
INFO_PATH, cache_type);
return get_bit_mask(mask_path, shareable_mask);
}
/*
* get_mask_no_shareable - Get Cache Bit Mask (CBM) without shareable bits
* @cache_type: Cache type as "L2" or "L3"
* @mask: The largest exclusive portion of the cache out of the
* full CBM, returned as unsigned long
*
* Parts of a cache may be shared with other devices such as GPU. This function
* calculates the largest exclusive portion of the cache where no other devices
* besides CPU have access to the cache portion.
*
* Return: = 0 on success, < 0 on failure.
*/
int get_mask_no_shareable(const char *cache_type, unsigned long *mask)
{
unsigned long full_mask, shareable_mask;
unsigned int start, len;
if (get_full_cbm(cache_type, &full_mask) < 0)
return -1;
if (get_shareable_mask(cache_type, &shareable_mask) < 0)
return -1;
len = count_contiguous_bits(full_mask & ~shareable_mask, &start);
if (!len)
return -1;
*mask = create_bit_mask(start, len);
return 0;
}
/*
* taskset_benchmark - Taskset PID (i.e. benchmark) to a specified cpu
* @bm_pid: PID that should be binded
* @cpu_no: CPU number at which the PID would be binded
* @old_affinity: When not NULL, set to old CPU affinity
*
* Return: 0 on success, non-zero on failure
* Return: 0 on success, < 0 on error.
*/
int taskset_benchmark(pid_t bm_pid, int cpu_no)
int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity)
{
cpu_set_t my_set;
if (old_affinity) {
CPU_ZERO(old_affinity);
if (sched_getaffinity(bm_pid, sizeof(*old_affinity),
old_affinity)) {
ksft_perror("Unable to read CPU affinity");
return -1;
}
}
CPU_ZERO(&my_set);
CPU_SET(cpu_no, &my_set);
if (sched_setaffinity(bm_pid, sizeof(cpu_set_t), &my_set)) {
perror("Unable to taskset benchmark");
ksft_perror("Unable to taskset benchmark");
return -1;
}
......@@ -294,13 +433,30 @@ int taskset_benchmark(pid_t bm_pid, int cpu_no)
return 0;
}
/*
* taskset_restore - Taskset PID to the earlier CPU affinity
* @bm_pid: PID that should be reset
* @old_affinity: The old CPU affinity to restore
*
* Return: 0 on success, < 0 on error.
*/
int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity)
{
if (sched_setaffinity(bm_pid, sizeof(*old_affinity), old_affinity)) {
ksft_perror("Unable to restore CPU affinity");
return -1;
}
return 0;
}
/*
* create_grp - Create a group only if one doesn't exist
* @grp_name: Name of the group
* @grp: Full path and name of the group
* @parent_grp: Full path and name of the parent group
*
* Return: 0 on success, non-zero on failure
* Return: 0 on success, < 0 on error.
*/
static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
{
......@@ -325,7 +481,7 @@ static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
}
closedir(dp);
} else {
perror("Unable to open resctrl for group");
ksft_perror("Unable to open resctrl for group");
return -1;
}
......@@ -333,7 +489,7 @@ static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
/* Requested grp doesn't exist, hence create it */
if (found_grp == 0) {
if (mkdir(grp, 0) == -1) {
perror("Unable to create group");
ksft_perror("Unable to create group");
return -1;
}
......@@ -348,12 +504,12 @@ static int write_pid_to_tasks(char *tasks, pid_t pid)
fp = fopen(tasks, "w");
if (!fp) {
perror("Failed to open tasks file");
ksft_perror("Failed to open tasks file");
return -1;
}
if (fprintf(fp, "%d\n", pid) < 0) {
perror("Failed to wr pid to tasks file");
ksft_print_msg("Failed to write pid to tasks file\n");
fclose(fp);
return -1;
......@@ -376,7 +532,7 @@ static int write_pid_to_tasks(char *tasks, pid_t pid)
* pid is not written, this means that pid is in con_mon grp and hence
* should consult con_mon grp's mon_data directory for results.
*
* Return: 0 on success, non-zero on failure
* Return: 0 on success, < 0 on error.
*/
int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
char *resctrl_val)
......@@ -420,7 +576,7 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
out:
ksft_print_msg("Writing benchmark parameters to resctrl FS\n");
if (ret)
perror("# writing to resctrlfs");
ksft_print_msg("Failed writing to resctrlfs\n");
return ret;
}
......@@ -430,23 +586,17 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
* @ctrlgrp: Name of the con_mon grp
* @schemata: Schemata that should be updated to
* @cpu_no: CPU number that the benchmark PID is binded to
* @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
* @resource: Resctrl resource (Eg: MB, L3, L2, etc.)
*
* Update schemata of a con_mon grp *only* if requested resctrl feature is
* Update schemata of a con_mon grp *only* if requested resctrl resource is
* allocation type
*
* Return: 0 on success, non-zero on failure
* Return: 0 on success, < 0 on error.
*/
int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resource)
{
char controlgroup[1024], reason[128], schema[1024] = {};
int resource_id, fd, schema_len = -1, ret = 0;
if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) &&
strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
return -ENOENT;
int domain_id, fd, schema_len, ret = 0;
if (!schemata) {
ksft_print_msg("Skipping empty schemata update\n");
......@@ -454,8 +604,8 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
return -1;
}
if (get_resource_id(cpu_no, &resource_id) < 0) {
sprintf(reason, "Failed to get resource id");
if (get_domain_id(resource, cpu_no, &domain_id) < 0) {
sprintf(reason, "Failed to get domain ID");
ret = -1;
goto out;
......@@ -466,14 +616,8 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
else
sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
schema_len = snprintf(schema, sizeof(schema), "%s%d%c%s\n",
"L3:", resource_id, '=', schemata);
if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
schema_len = snprintf(schema, sizeof(schema), "%s%d%c%s\n",
"MB:", resource_id, '=', schemata);
schema_len = snprintf(schema, sizeof(schema), "%s:%d=%s\n",
resource, domain_id, schemata);
if (schema_len < 0 || schema_len >= sizeof(schema)) {
snprintf(reason, sizeof(reason),
"snprintf() failed with return value : %d", schema_len);
......@@ -564,20 +708,16 @@ char *fgrep(FILE *inf, const char *str)
}
/*
* validate_resctrl_feature_request - Check if requested feature is valid.
* @resource: Required resource (e.g., MB, L3, L2, L3_MON, etc.)
* @feature: Required monitor feature (in mon_features file). Can only be
* set for L3_MON. Must be NULL for all other resources.
* resctrl_resource_exists - Check if a resource is supported.
* @resource: Resctrl resource (e.g., MB, L3, L2, L3_MON, etc.)
*
* Return: True if the resource/feature is supported, else false. False is
* Return: True if the resource is supported, else false. False is
* also returned if resctrl FS is not mounted.
*/
bool validate_resctrl_feature_request(const char *resource, const char *feature)
bool resctrl_resource_exists(const char *resource)
{
char res_path[PATH_MAX];
struct stat statbuf;
char *res;
FILE *inf;
int ret;
if (!resource)
......@@ -592,8 +732,25 @@ bool validate_resctrl_feature_request(const char *resource, const char *feature)
if (stat(res_path, &statbuf))
return false;
if (!feature)
return true;
}
/*
* resctrl_mon_feature_exists - Check if requested monitoring feature is valid.
* @resource: Resource that uses the mon_features file. Currently only L3_MON
* is valid.
* @feature: Required monitor feature (in mon_features file).
*
* Return: True if the feature is supported, else false.
*/
bool resctrl_mon_feature_exists(const char *resource, const char *feature)
{
char res_path[PATH_MAX];
char *res;
FILE *inf;
if (!feature || !resource)
return false;
snprintf(res_path, sizeof(res_path), "%s/%s/mon_features", INFO_PATH, resource);
inf = fopen(res_path, "r");
......@@ -607,6 +764,36 @@ bool validate_resctrl_feature_request(const char *resource, const char *feature)
return !!res;
}
/*
* resource_info_file_exists - Check if a file is present inside
* /sys/fs/resctrl/info/@resource.
* @resource: Required resource (Eg: MB, L3, L2, etc.)
* @file: Required file.
*
* Return: True if the /sys/fs/resctrl/info/@resource/@file exists, else false.
*/
bool resource_info_file_exists(const char *resource, const char *file)
{
char res_path[PATH_MAX];
struct stat statbuf;
if (!file || !resource)
return false;
snprintf(res_path, sizeof(res_path), "%s/%s/%s", INFO_PATH, resource,
file);
if (stat(res_path, &statbuf))
return false;
return true;
}
bool test_resource_feature_check(const struct resctrl_test *test)
{
return resctrl_resource_exists(test->resource);
}
int filter_dmesg(void)
{
char line[1024];
......@@ -617,7 +804,7 @@ int filter_dmesg(void)
ret = pipe(pipefds);
if (ret) {
perror("pipe");
ksft_perror("pipe");
return ret;
}
fflush(stdout);
......@@ -626,13 +813,13 @@ int filter_dmesg(void)
close(pipefds[0]);
dup2(pipefds[1], STDOUT_FILENO);
execlp("dmesg", "dmesg", NULL);
perror("executing dmesg");
ksft_perror("Executing dmesg");
exit(1);
}
close(pipefds[1]);
fp = fdopen(pipefds[0], "r");
if (!fp) {
perror("fdopen(pipe)");
ksft_perror("fdopen(pipe)");
kill(pid, SIGTERM);
return -1;
......
# SPDX-License-Identifier: GPL-2.0
TEST_PROGS += test_probe_samples.sh
include ../lib.mk
CONFIG_RUST=y
CONFIG_SAMPLES=y
CONFIG_SAMPLES_RUST=y
CONFIG_SAMPLE_RUST_MINIMAL=m
CONFIG_SAMPLE_RUST_PRINT=m
\ No newline at end of file
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) 2023 Collabora Ltd
#
# This script tests whether the rust sample modules can
# be added and removed correctly.
#
DIR="$(dirname "$(readlink -f "$0")")"
KTAP_HELPERS="${DIR}/../kselftest/ktap_helpers.sh"
if [ -e "$KTAP_HELPERS" ]; then
source "$KTAP_HELPERS"
else
echo "$KTAP_HELPERS file not found [SKIP]"
exit 4
fi
rust_sample_modules=("rust_minimal" "rust_print")
ktap_print_header
for sample in "${rust_sample_modules[@]}"; do
if ! /sbin/modprobe -n -q "$sample"; then
ktap_skip_all "module $sample is not found in /lib/modules/$(uname -r)"
exit "$KSFT_SKIP"
fi
done
ktap_set_plan "${#rust_sample_modules[@]}"
for sample in "${rust_sample_modules[@]}"; do
if /sbin/modprobe -q "$sample"; then
/sbin/modprobe -q -r "$sample"
ktap_test_pass "$sample"
else
ktap_test_fail "$sample"
fi
done
ktap_finished
......@@ -276,7 +276,7 @@ int main(int argc, char *argv[])
if (setpgid(0, 0) != 0)
handle_error("process group");
printf("\n## Create a thread/process/process group hiearchy\n");
printf("\n## Create a thread/process/process group hierarchy\n");
create_processes(num_processes, num_threads, procs);
need_cleanup = 1;
disp_processes(num_processes, procs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment