Commit 25f3cdf8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ktest-v3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-ktest

Pull ktest updates from Steven Rostedt.

* tag 'ktest-v3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-ktest:
  ktest: Add README to explain what is in the examples directory
  ktest: Add the snowball.conf example config
  ktest: Add an example config that does cross compiling of several archs
  ktest: Add kvm.conf example config
  ktest: Add useful example configs
  ktest: Add USE_OUTPUT_MIN_CONFIG to avoid prompt on make_min_config
  ktest: Add MIN_CONFIG_TYPE to allow making a minum .config that has network
  ktest: Fix kernelrevision with POST_BUILD
parents 72c04af9 24d0c030
This directory contains example configs to use ktest for various tasks.
The configs still need to be customized for your environment, but it
is broken up by task which makes it easier to understand how to set up
ktest.
The configs are based off of real working configs but have been modified
and commented to show more generic use cases that are more helpful for
developers.
crosstests.conf - this config shows an example of testing a git repo against
lots of different architectures. It only does build tests, but makes
it easy to compile test different archs. You can download the arch
cross compilers from:
http://kernel.org/pub/tools/crosstool/files/bin/x86_64/
test.conf - A generic example of a config. This is based on an actual config
used to perform real testing.
kvm.conf - A example of a config that is used to test a virtual guest running
on a host.
snowball.conf - An example config that was used to demo ktest.pl against
a snowball ARM board.
include/ - The include directory holds default configs that can be
included into other configs. This is a real use example that shows how
to reuse configs for various machines or set ups. The files here
are included by other config files, where the other config files define
options and variables that will make the included config work for the
given environment.
#
# Example config for cross compiling
#
# In this config, it is expected that the tool chains from:
#
# http://kernel.org/pub/tools/crosstool/files/bin/x86_64/
#
# running on a x86_64 system have been downloaded and installed into:
#
# /usr/local/
#
# such that the compiler binaries are something like:
#
# /usr/local/gcc-4.5.2-nolibc/mips-linux/bin/mips-linux-gcc
#
# Some of the archs will use gcc-4.5.1 instead of gcc-4.5.2
# this config uses variables to differentiate them.
#
# Comments describe some of the options, but full descriptions of
# options are described in the samples.conf file.
# ${PWD} is defined by ktest.pl to be the directory that the user
# was in when they executed ktest.pl. It may be better to hardcode the
# path name here. THIS_DIR is the variable used through out the config file
# in case you want to change it.
THIS_DIR := ${PWD}
# Update the BUILD_DIR option to the location of your git repo you want to test.
BUILD_DIR = ${THIS_DIR}/linux.git
# The build will go into this directory. It will be created when you run the test.
OUTPUT_DIR = ${THIS_DIR}/cross-compile
# The build will be compiled with -j8
BUILD_OPTIONS = -j8
# The test will not stop when it hits a failure.
DIE_ON_FAILURE = 0
# If you want to have ktest.pl store the failure somewhere, uncomment this option
# and change the directory where ktest should store the failures.
#STORE_FAILURES = ${THIS_DIR}/failures
# The log file is stored in the OUTPUT_DIR called cross.log
# If you enable this, you need to create the OUTPUT_DIR. It wont be created for you.
LOG_FILE = ${OUTPUT_DIR}/cross.log
# The log file will be cleared each time you run ktest.
CLEAR_LOG = 1
# As some archs do not build with the defconfig, they have been marked
# to be ignored. If you want to test them anyway, change DO_FAILED to 1.
# If a test that has been marked as DO_FAILED passes, then you should change
# that test to be DO_DEFAULT
DO_FAILED := 0
DO_DEFAULT := 1
# By setting both DO_FAILED and DO_DEFAULT to zero, you can pick a single
# arch that you want to test. (uncomment RUN and chose your arch)
#RUN := m32r
# At the bottom of the config file exists a bisect test. You can update that
# test and set DO_FAILED and DO_DEFAULT to zero, and uncomment this variable
# to run the bisect on the arch.
#RUN := bisect
# By default all tests will be running gcc 4.5.2. Some tests are using 4.5.1
# and they select that in the test.
# Note: GCC_VER is declared as on option and not a variable ('=' instead of ':=')
# This is important. A variable is used only in the config file and if it is set
# it stays that way for the rest of the config file until it is change again.
# Here we want GCC_VER to remain persistent and change for each test, as it is used in
# the MAKE_CMD. By using '=' instead of ':=' we achieve our goal.
GCC_VER = 4.5.2
MAKE_CMD = PATH=/usr/local/gcc-${GCC_VER}-nolibc/${CROSS}/bin:$PATH CROSS_COMPILE=${CROSS}- make ARCH=${ARCH}
# all tests are only doing builds.
TEST_TYPE = build
# If you want to add configs on top of the defconfig, you can add those configs into
# the add-config file and uncomment this option. This is useful if you want to test
# all cross compiles with PREEMPT set, or TRACING on, etc.
#ADD_CONFIG = ${THIS_DIR}/add-config
# All tests are using defconfig
BUILD_TYPE = defconfig
# The test names will have the arch and cross compiler used. This will be shown in
# the results.
TEST_NAME = ${ARCH} ${CROSS}
# alpha
TEST_START IF ${RUN} == alpha || ${DO_DEFAULT}
# Notice that CROSS and ARCH are also options and not variables (again '=' instead
# of ':='). This is because TEST_NAME and MAKE_CMD wil use them for each test.
# Only options are available during runs. Variables are only present in parsing the
# config file.
CROSS = alpha-linux
ARCH = alpha
# arm
TEST_START IF ${RUN} == arm || ${DO_DEFAULT}
CROSS = arm-unknown-linux-gnueabi
ARCH = arm
# black fin
TEST_START IF ${RUN} == bfin || ${DO_DEFAULT}
CROSS = bfin-uclinux
ARCH = blackfin
BUILD_OPTIONS = -j8 vmlinux
# cris - FAILS?
TEST_START IF ${RUN} == cris || ${RUN} == cris64 || ${DO_FAILED}
CROSS = cris-linux
ARCH = cris
# cris32 - not right arch?
TEST_START IF ${RUN} == cris || ${RUN} == cris32 || ${DO_FAILED}
CROSS = crisv32-linux
ARCH = cris
# ia64
TEST_START IF ${RUN} == ia64 || ${DO_DEFAULT}
CROSS = ia64-linux
ARCH = ia64
# frv
TEST_START IF ${RUN} == frv || ${DO_FAILED}
CROSS = frv-linux
ARCH = frv
GCC_VER = 4.5.1
# h8300 - failed make defconfig??
TEST_START IF ${RUN} == h8300 || ${DO_FAILED}
CROSS = h8300-elf
ARCH = h8300
GCC_VER = 4.5.1
# m68k fails with error?
TEST_START IF ${RUN} == m68k || ${DO_DEFAULT}
CROSS = m68k-linux
ARCH = m68k
# mips64
TEST_START IF ${RUN} == mips || ${RUN} == mips64 || ${DO_DEFAULT}
CROSS = mips64-linux
ARCH = mips
# mips32
TEST_START IF ${RUN} == mips || ${RUN} == mips32 || ${DO_DEFAULT}
CROSS = mips-linux
ARCH = mips
# m32r
TEST_START IF ${RUN} == m32r || ${DO_FAILED}
CROSS = m32r-linux
ARCH = m32r
GCC_VER = 4.5.1
BUILD_OPTIONS = -j8 vmlinux
# parisc64 failed?
TEST_START IF ${RUN} == hppa || ${RUN} == hppa64 || ${DO_FAILED}
CROSS = hppa64-linux
ARCH = parisc
# parisc
TEST_START IF ${RUN} == hppa || ${RUN} == hppa32 || ${DO_FAILED}
CROSS = hppa-linux
ARCH = parisc
# ppc
TEST_START IF ${RUN} == ppc || ${RUN} == ppc32 || ${DO_DEFAULT}
CROSS = powerpc-linux
ARCH = powerpc
# ppc64
TEST_START IF ${RUN} == ppc || ${RUN} == ppc64 || ${DO_DEFAULT}
CROSS = powerpc64-linux
ARCH = powerpc
# s390
TEST_START IF ${RUN} == s390 || ${DO_DEFAULT}
CROSS = s390x-linux
ARCH = s390
# sh
TEST_START IF ${RUN} == sh || ${DO_DEFAULT}
CROSS = sh4-linux
ARCH = sh
# sparc64
TEST_START IF ${RUN} == sparc || ${RUN} == sparc64 || ${DO_DEFAULT}
CROSS = sparc64-linux
ARCH = sparc64
# sparc
TEST_START IF ${RUN} == sparc || ${RUN} == sparc32 || ${DO_DEFAULT}
CROSS = sparc-linux
ARCH = sparc
# xtensa failed
TEST_START IF ${RUN} == xtensa || ${DO_FAILED}
CROSS = xtensa-linux
ARCH = xtensa
# UML
TEST_START IF ${RUN} == uml || ${DO_DEFAULT}
MAKE_CMD = make ARCH=um SUBARCH=x86_64
ARCH = uml
CROSS =
TEST_START IF ${RUN} == x86 || ${RUN} == i386 || ${DO_DEFAULT}
MAKE_CMD = make ARCH=i386
ARCH = i386
CROSS =
TEST_START IF ${RUN} == x86 || ${RUN} == x86_64 || ${DO_DEFAULT}
MAKE_CMD = make ARCH=x86_64
ARCH = x86_64
CROSS =
#################################
# This is a bisect if needed. You need to give it a MIN_CONFIG that
# will be the config file it uses. Basically, just copy the created defconfig
# for the arch someplace and point MIN_CONFIG to it.
TEST_START IF ${RUN} == bisect
MIN_CONFIG = ${THIS_DIR}/min-config
CROSS = s390x-linux
ARCH = s390
TEST_TYPE = bisect
BISECT_TYPE = build
BISECT_GOOD = v3.1
BISECT_BAD = v3.2
CHECKOUT = v3.2
#################################
# These defaults are needed to keep ktest.pl from complaining. They are
# ignored because the test does not go pass the build. No install or
# booting of the target images.
DEFAULTS
MACHINE = crosstest
SSH_USER = root
BUILD_TARGET = cross
TARGET_IMAGE = image
POWER_CYCLE = cycle
CONSOLE = console
LOCALVERSION = version
GRUB_MENU = grub
REBOOT_ON_ERROR = 0
POWEROFF_ON_ERROR = 0
POWEROFF_ON_SUCCESS = 0
REBOOT_ON_SUCCESS = 0
#
# This example shows the bisect tests (git bisect and config bisect)
#
# The config that includes this file may define a RUN_TEST
# variable that will tell this config what test to run.
# (what to set the TEST option to).
#
DEFAULTS IF NOT DEFINED RUN_TEST
# Requires that hackbench is in the PATH
RUN_TEST := ${SSH} hackbench 50
# Set TEST to 'bisect' to do a normal git bisect. You need
# to modify the options below to make it bisect the exact
# commits you are interested in.
#
TEST_START IF ${TEST} == bisect
TEST_TYPE = bisect
# You must set the commit that was considered good (git bisect good)
BISECT_GOOD = v3.3
# You must set the commit that was considered bad (git bisect bad)
BISECT_BAD = HEAD
# It's best to specify the branch to checkout before starting the bisect.
CHECKOUT = origin/master
# This can be build, boot, or test. Here we are doing a bisect
# that requires to run a test to know if the bisect was good or bad.
# The test should exit with 0 on good, non-zero for bad. But see
# the BISECT_RET_* options in samples.conf to override this.
BISECT_TYPE = test
TEST = ${RUN_TEST}
# It is usually a good idea to confirm that the GOOD and the BAD
# commits are truly good and bad respectively. Having BISECT_CHECK
# set to 1 will check both that the good commit works and the bad
# commit fails. If you only want to check one or the other,
# set BISECT_CHECK to 'good' or to 'bad'.
BISECT_CHECK = 1
#BISECT_CHECK = good
#BISECT_CHECK = bad
# Usually it's a good idea to specify the exact config you
# want to use throughout the entire bisect. Here we placed
# it in the directory we called ktest.pl from and named it
# 'config-bisect'.
MIN_CONFIG = ${THIS_DIR}/config-bisect
# By default, if we are doing a BISECT_TYPE = test run but the
# build or boot fails, ktest.pl will do a 'git bisect skip'.
# Uncomment the below option to make ktest stop testing on such
# an error.
#BISECT_SKIP = 0
# Now if you had BISECT_SKIP = 0 and the test fails, you can
# examine what happened and then do 'git bisect log > /tmp/replay'
# Set BISECT_REPLAY to /tmp/replay and ktest.pl will run the
# 'git bisect replay /tmp/replay' before continuing the bisect test.
#BISECT_REPLAY = /tmp/replay
# If you used BISECT_REPLAY after the bisect test failed, you may
# not want to continue the bisect on that commit that failed.
# By setting BISECT_START to a new commit. ktest.pl will checkout
# that commit after it has performed the 'git bisect replay' but
# before it continues running the bisect test.
#BISECT_START = 2545eb6198e7e1ec50daa0cfc64a4cdfecf24ec9
# Now if you don't trust ktest.pl to make the decisions for you, then
# set BISECT_MANUAL to 1. This will cause ktest.pl not to decide
# if the commit was good or bad. Instead, it will ask you to tell
# it if the current commit was good. In the mean time, you could
# take the result, load it on any machine you want. Run several tests,
# or whatever you feel like. Then, when you are happy, you can tell
# ktest if you think it was good or not and ktest.pl will continue
# the git bisect. You can even change what commit it is currently at.
#BISECT_MANUAL = 1
# One of the unique tests that ktest does is the config bisect.
# Currently (which hopefully will be fixed soon), the bad config
# must be a superset of the good config. This is because it only
# searches for a config that causes the target to fail. If the
# good config is not a subset of the bad config, or if the target
# fails because of a lack of a config, then it will not find
# the config for you.
TEST_START IF ${TEST} == config-bisect
TEST_TYPE = config_bisect
# set to build, boot, test
CONFIG_BISECT_TYPE = boot
# Set the config that is considered bad.
CONFIG_BISECT = ${THIS_DIR}/config-bad
# This config is optional. By default it uses the
# MIN_CONFIG as the good config.
CONFIG_BISECT_GOOD = ${THIS_DIR}/config-good
# This file holds defaults for most the tests. It defines the options that
# are most common to tests that are likely to be shared.
#
# Note, after including this file, a config file may override any option
# with a DEFAULTS OVERRIDE section.
#
# For those cases that use the same machine to boot a 64 bit
# and a 32 bit version. The MACHINE is the DNS name to get to the
# box (usually different if it was 64 bit or 32 bit) but the
# BOX here is defined as a variable that will be the name of the box
# itself. It is useful for calling scripts that will power cycle
# the box, as only one script needs to be created to power cycle
# even though the box itself has multiple operating systems on it.
# By default, BOX and MACHINE are the same.
DEFAULTS IF NOT DEFINED BOX
BOX := ${MACHINE}
# Consider each box as 64 bit box, unless the config including this file
# has defined BITS = 32
DEFAULTS IF NOT DEFINED BITS
BITS := 64
DEFAULTS
# THIS_DIR is used through out the configs and defaults to ${PWD} which
# is the directory that ktest.pl was called from.
THIS_DIR := ${PWD}
# to orginize your configs, having each machine save their configs
# into a separate directly is useful.
CONFIG_DIR := ${THIS_DIR}/configs/${MACHINE}
# Reset the log before running each test.
CLEAR_LOG = 1
# As installing kernels usually requires root privilege, default the
# user on the target as root. It is also required that the target
# allows ssh to root from the host without asking for a password.
SSH_USER = root
# For accesing the machine, we will ssh to root@machine.
SSH := ssh ${SSH_USER}@${MACHINE}
# Update this. The default here is ktest will ssh to the target box
# and run a script called 'run-test' located on that box.
TEST = ${SSH} run-test
# Point build dir to the git repo you use
BUILD_DIR = ${THIS_DIR}/linux.git
# Each machine will have its own output build directory.
OUTPUT_DIR = ${THIS_DIR}/build/${MACHINE}
# Yes this config is focused on x86 (but ktest works for other archs too)
BUILD_TARGET = arch/x86/boot/bzImage
TARGET_IMAGE = /boot/vmlinuz-test
# have directory for the scripts to reboot and power cycle the boxes
SCRIPTS_DIR := ${THIS_DIR}/scripts
# You can have each box/machine have a script to power cycle it.
# Name your script <box>-cycle.
POWER_CYCLE = ${SCRIPTS_DIR}/${BOX}-cycle
# This script is used to power off the box.
POWER_OFF = ${SCRIPTS_DIR}/${BOX}-poweroff
# Keep your test kernels separate from your other kernels.
LOCALVERSION = -test
# The /boot/grub/menu.lst is searched for the line:
# title Test Kernel
# and ktest will use that kernel to reboot into.
# For grub2 or other boot loaders, you need to set BOOT_TYPE
# to 'script' and define other ways to load the kernel.
# See snowball.conf example.
#
GRUB_MENU = Test Kernel
# The kernel build will use this option.
BUILD_OPTIONS = -j8
# Keeping the log file with the output dir is convenient.
LOG_FILE = ${OUTPUT_DIR}/${MACHINE}.log
# Each box should have their own minum configuration
# See min-config.conf
MIN_CONFIG = ${CONFIG_DIR}/config-min
# For things like randconfigs, there may be configs you find that
# are already broken, or there may be some configs that you always
# want set. Uncomment ADD_CONFIG and point it to the make config files
# that set the configs you want to keep on (or off) in your build.
# ADD_CONFIG is usually something to add configs to all machines,
# where as, MIN_CONFIG is specific per machine.
#ADD_CONFIG = ${THIS_DIR}/config-broken ${THIS_DIR}/config-general
# To speed up reboots for bisects and patchcheck, instead of
# waiting 60 seconds for the console to be idle, if this line is
# seen in the console output, ktest will know the good kernel has
# finished rebooting and it will be able to continue the tests.
REBOOT_SUCCESS_LINE = ${MACHINE} login:
# The following is different ways to end the test.
# by setting the variable REBOOT to: none, error, fail or
# something else, ktest will power cycle or reboot the target box
# at the end of the tests.
#
# REBOOT := none
# Don't do anything at the end of the test.
#
# REBOOT := error
# Reboot the box if ktest detects an error
#
# REBOOT := fail
# Do not stop on failure, and after all tests are complete
# power off the box (for both success and error)
# This is good to run over a weekend and you don't want to waste
# electricity.
#
DEFAULTS IF ${REBOOT} == none
REBOOT_ON_SUCCESS = 0
REBOOT_ON_ERROR = 0
POWEROFF_ON_ERROR = 0
POWEROFF_ON_SUCCESS = 0
DEFAULTS ELSE IF ${REBOOT} == error
REBOOT_ON_SUCCESS = 0
REBOOT_ON_ERROR = 1
POWEROFF_ON_ERROR = 0
POWEROFF_ON_SUCCESS = 0
DEFAULTS ELSE IF ${REBOOT} == fail
REBOOT_ON_SUCCESS = 0
POWEROFF_ON_ERROR = 1
POWEROFF_ON_SUCCESS = 1
POWEROFF_AFTER_HALT = 120
DIE_ON_FAILURE = 0
# Store the failure information into this directory
# such as the .config, dmesg, and build log.
STORE_FAILURES = ${THIS_DIR}/failures
DEFAULTS ELSE
REBOOT_ON_SUCCESS = 1
REBOOT_ON_ERROR = 1
POWEROFF_ON_ERROR = 0
POWEROFF_ON_SUCCESS = 0
#
# This file has some examples for creating a MIN_CONFIG.
# (A .config file that is the minimum for a machine to boot, or
# to boot and make a network connection.)
#
# A MIN_CONFIG is very useful as it is the minimum configuration
# needed to boot a given machine. You can debug someone else's
# .config by only setting the configs in your MIN_CONFIG. The closer
# your MIN_CONFIG is to the true minimum set of configs needed to
# boot your machine, the closer the config you test with will be
# to the users config that had the failure.
#
# The make_min_config test allows you to create a MIN_CONFIG that
# is truly the minimum set of configs needed to boot a box.
#
# In this example, the final config will reside in
# ${CONFIG_DIR}/config-new-min and ${CONFIG_DIR}/config-new-min-net.
# Just move one to the location you have set for MIN_CONFIG.
#
# The first test creates a MIN_CONFIG that will be the minimum
# configuration to boot ${MACHINE} and be able to ssh to it.
#
# The second test creates a MIN_CONFIG that will only boot
# the target and most likely will not let you ssh to it. (Notice
# how the second test uses the first test's result to continue with.
# This is because the second test config is a subset of the first).
#
# The ${CONFIG_DIR}/config-skip (and -net) will hold the configs
# that ktest.pl found would not boot the target without them set.
# The config-new-min holds configs that ktest.pl could not test
# directly because another config that was needed to boot the box
# selected them. Sometimes it is possible that this file will hold
# the true minimum configuration. You can test to see if this is
# the case by running the boot test with BOOT_TYPE = allnoconfig and
# setting setting the MIN_CONFIG to ${CONFIG_DIR}/config-skip. If the
# machine still boots, then you can use the config-skip as your MIN_CONFIG.
#
# These tests can run for several hours (and perhaps days).
# It's OK to kill the test with a Ctrl^C. By restarting without
# modifying this config, ktest.pl will notice that the config-new-min(-net)
# exists, and will use that instead as the starting point.
# The USE_OUTPUT_MIN_CONFIG is set to 1 to keep ktest.pl from asking
# you if you want to use the OUTPUT_MIN_CONFIG as the starting point.
# By using the OUTPUT_MIN_CONFIG as the starting point will allow ktest.pl to
# start almost where it left off.
#
TEST_START IF ${TEST} == min-config
TEST_TYPE = make_min_config
OUTPUT_MIN_CONFIG = ${CONFIG_DIR}/config-new-min-net
IGNORE_CONFIG = ${CONFIG_DIR}/config-skip-net
MIN_CONFIG_TYPE = test
TEST = ${SSH} echo hi
USE_OUTPUT_MIN_CONFIG = 1
TEST_START IF ${TEST} == min-config && ${MULTI}
TEST_TYPE = make_min_config
OUTPUT_MIN_CONFIG = ${CONFIG_DIR}/config-new-min
IGNORE_CONFIG = ${CONFIG_DIR}/config-skip
MIN_CONFIG = ${CONFIG_DIR}/config-new-min-net
USE_OUTPUT_MIN_CONFIG = 1
# patchcheck.conf
#
# This contains a test that takes two git commits and will test each
# commit between the two. The build test will look at what files the
# commit has touched, and if any of those files produce a warning, then
# the build will fail.
# PATCH_START is the commit to begin with and PATCH_END is the commit
# to end with (inclusive). This is similar to doing a git rebase -i PATCH_START~1
# and then testing each commit and doing a git rebase --continue.
# You can use a SHA1, a git tag, or anything that git will accept for a checkout
PATCH_START := HEAD~3
PATCH_END := HEAD
# Change PATCH_CHECKOUT to be the branch you want to test. The test will
# do a git checkout of this branch before starting. Obviously both
# PATCH_START and PATCH_END must be in this branch (and PATCH_START must
# be contained by PATCH_END).
PATCH_CHECKOUT := test/branch
# Usually it's a good idea to have a set config to use for testing individual
# patches.
PATCH_CONFIG := ${CONFIG_DIR}/config-patchcheck
# Change PATCH_TEST to run some test for each patch. Each commit that is
# tested, after it is built and installed on the test machine, this command
# will be executed. Usually what is done is to ssh to the target box and
# run some test scripts. If you just want to boot test your patches
# comment PATCH_TEST out.
PATCH_TEST := ${SSH} "/usr/local/bin/ktest-test-script"
DEFAULTS IF DEFINED PATCH_TEST
PATCH_TEST_TYPE := test
DEFAULTS ELSE
PATCH_TEST_TYPE := boot
# If for some reason a file has a warning that one of your patches touch
# but you do not care about it, set IGNORE_WARNINGS to that commit(s)
# (space delimited)
#IGNORE_WARNINGS = 39eaf7ef884dcc44f7ff1bac803ca2a1dcf43544 6edb2a8a385f0cdef51dae37ff23e74d76d8a6ce
# If you are running a multi test, and the test failed on the first
# test but on, say the 5th patch. If you want to restart on the
# fifth patch, set PATCH_START1. This will make the first test start
# from this commit instead of the PATCH_START commit.
# Note, do not change this option. Just define PATCH_START1 in the
# top config (the one you pass to ktest.pl), and this will use it,
# otherwise it will just use PATCH_START if PATCH_START1 is not defined.
DEFAULTS IF NOT DEFINED PATCH_START1
PATCH_START1 := ${PATCH_START}
TEST_START IF ${TEST} == patchcheck
TEST_TYPE = patchcheck
MIN_CONFIG = ${PATCH_CONFIG}
TEST = ${PATCH_TEST}
PATCHCHECK_TYPE = ${PATCH_TEST_TYPE}
PATCHCHECK_START = ${PATCH_START1}
PATCHCHECK_END = ${PATCH_END}
CHECKOUT = ${PATCH_CHECKOUT}
TEST_START IF ${TEST} == patchcheck && ${MULTI}
TEST_TYPE = patchcheck
MIN_CONFIG = ${PATCH_CONFIG}
TEST = ${PATCH_TEST}
PATCHCHECK_TYPE = ${PATCH_TEST_TYPE}
PATCHCHECK_START = ${PATCH_START}
PATCHCHECK_END = ${PATCH_END}
CHECKOUT = ${PATCH_CHECKOUT}
# Use multi to test different compilers?
MAKE_CMD = CC=gcc-4.5.1 make
#
# This is an example of various tests that you can run
#
# The variable TEST can be of boot, build, randconfig, or test.
#
# Note that TEST is a variable created with ':=' and only exists
# throughout the config processing (not during the tests itself).
#
# The TEST option (defined with '=') is used to tell ktest.pl
# what test to run after a successful boot. The TEST option is
# persistent into the test runs.
#
# The config that includes this file may define a BOOT_TYPE
# variable that tells this config what type of boot test to run.
# If it's not defined, the below DEFAULTS will set the default
# to 'oldconfig'.
#
DEFAULTS IF NOT DEFINED BOOT_TYPE
BOOT_TYPE := oldconfig
# The config that includes this file may define a RUN_TEST
# variable that will tell this config what test to run.
# (what to set the TEST option to).
#
DEFAULTS IF NOT DEFINED RUN_TEST
# Requires that hackbench is in the PATH
RUN_TEST := ${SSH} hackbench 50
# If TEST is set to 'boot' then just build a kernel and boot
# the target.
TEST_START IF ${TEST} == boot
TEST_TYPE = boot
# Notice how we set the BUILD_TYPE option to the BOOT_TYPE variable.
BUILD_TYPE = ${BOOT_TYPE}
# Do not do a make mrproper.
BUILD_NOCLEAN = 1
# If you only want to build the kernel, and perhaps install
# and test it yourself, then just set TEST to build.
TEST_START IF ${TEST} == build
TEST_TYPE = build
BUILD_TYPE = ${BOOT_TYPE}
BUILD_NOCLEAN = 1
# Build, install, boot and test with a randconfg 10 times.
# It is important that you have set MIN_CONFIG in the config
# that includes this file otherwise it is likely that the
# randconfig will not have the neccessary configs needed to
# boot your box. This version of the test requires a min
# config that has enough to make sure the target has network
# working.
TEST_START ITERATE 10 IF ${TEST} == randconfig
MIN_CONFIG = ${CONFIG_DIR}/config-min-net
TEST_TYPE = test
BUILD_TYPE = randconfig
TEST = ${RUN_TEST}
# This is the same as above, but only tests to a boot prompt.
# The MIN_CONFIG used here does not need to have networking
# working.
TEST_START ITERATE 10 IF ${TEST} == randconfig && ${MULTI}
TEST_TYPE = boot
BUILD_TYPE = randconfig
MIN_CONFIG = ${CONFIG_DIR}/config-min
MAKE_CMD = make
# This builds, installs, boots and tests the target.
TEST_START IF ${TEST} == test
TEST_TYPE = test
BUILD_TYPE = ${BOOT_TYPE}
TEST = ${RUN_TEST}
BUILD_NOCLEAN = 1
#
# This config is an example usage of ktest.pl with a kvm guest
#
# The guest is called 'Guest' and this would be something that
# could be run on the host to test a virtual machine target.
MACHINE = Guest
# Use virsh to read the serial console of the guest
CONSOLE = virsh console ${MACHINE}
#*************************************#
# This part is the same as test.conf #
#*************************************#
# The include files will set up the type of test to run. Just set TEST to
# which test you want to run.
#
# TESTS = patchcheck, randconfig, boot, test, config-bisect, bisect, min-config
#
# See the include/*.conf files that define these tests
#
TEST := patchcheck
# Some tests may have more than one test to run. Define MULTI := 1 to run
# the extra tests.
MULTI := 0
# In case you want to differentiate which type of system you are testing
BITS := 64
# REBOOT = none, error, fail, empty
# See include/defaults.conf
REBOOT := empty
# The defaults file will set up various settings that can be used by all
# machine configs.
INCLUDE include/defaults.conf
#*************************************#
# Now we are different from test.conf #
#*************************************#
# The example here assumes that Guest is running a Fedora release
# that uses dracut for its initfs. The POST_INSTALL will be executed
# after the install of the kernel and modules are complete.
#
POST_INSTALL = ${SSH} /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
# Guests sometimes get stuck on reboot. We wait 3 seconds after running
# the reboot command and then do a full power-cycle of the guest.
# This forces the guest to restart.
#
POWERCYCLE_AFTER_REBOOT = 3
# We do the same after the halt command, but this time we wait 20 seconds.
POWEROFF_AFTER_HALT = 20
# As the defaults.conf file has a POWER_CYCLE option already defined,
# and options can not be defined in the same section more than once
# (all DEFAULTS sections are considered the same). We use the
# DEFAULTS OVERRIDE to tell ktest.pl to ignore the previous defined
# options, for the options set in the OVERRIDE section.
#
DEFAULTS OVERRIDE
# Instead of using the default POWER_CYCLE option defined in
# defaults.conf, we use virsh to cycle it. To do so, we destroy
# the guest, wait 5 seconds, and then start it up again.
# Crude, but effective.
#
POWER_CYCLE = virsh destroy ${MACHINE}; sleep 5; virsh start ${MACHINE}
DEFAULTS
# The following files each handle a different test case.
# Having them included allows you to set up more than one machine and share
# the same tests.
INCLUDE include/patchcheck.conf
INCLUDE include/tests.conf
INCLUDE include/bisect.conf
INCLUDE include/min-config.conf
# This example was used to boot the snowball ARM board.
# See http://people.redhat.com/srostedt/ktest-embedded-2012/
# PWD is a ktest.pl variable that will result in the process working
# directory that ktest.pl is executed in.
# THIS_DIR is automatically assigned the PWD of the path that generated
# the config file. It is best to use this variable when assigning other
# directory paths within this directory. This allows you to easily
# move the test cases to other locations or to other machines.
#
THIS_DIR := /home/rostedt/work/demo/ktest-embed
LOG_FILE = ${OUTPUT_DIR}/snowball.log
CLEAR_LOG = 1
MAKE_CMD = PATH=/usr/local/gcc-4.5.2-nolibc/arm-unknown-linux-gnueabi/bin:$PATH CROSS_COMPILE=arm-unknown-linux-gnueabi- make ARCH=arm
ADD_CONFIG = ${THIS_DIR}/addconfig
SCP_TO_TARGET = echo "don't do scp"
TFTPBOOT := /var/lib/tftpboot
TFTPDEF := ${TFTPBOOT}/snowball-default
TFTPTEST := ${OUTPUT_DIR}/${BUILD_TARGET}
SWITCH_TO_GOOD = cp ${TFTPDEF} ${TARGET_IMAGE}
SWITCH_TO_TEST = cp ${TFTPTEST} ${TARGET_IMAGE}
# Define each test with TEST_START
# The config options below it will override the defaults
TEST_START SKIP
TEST_TYPE = boot
BUILD_TYPE = u8500_defconfig
BUILD_NOCLEAN = 1
TEST_START
TEST_TYPE = make_min_config
OUTPUT_MIN_CONFIG = ${THIS_DIR}/config.newmin
START_MIN_CONFIG = ${THIS_DIR}/config.orig
IGNORE_CONFIG = ${THIS_DIR}/config.ignore
BUILD_NOCLEAN = 1
DEFAULTS
LOCALVERSION = -test
POWER_CYCLE = echo use the thumb luke; read a
CONSOLE = cat ${THIS_DIR}/snowball-cat
REBOOT_TYPE = script
SSH_USER = root
BUILD_OPTIONS = -j8 uImage
BUILD_DIR = ${THIS_DIR}/linux.git
OUTPUT_DIR = ${THIS_DIR}/snowball-build
MACHINE = snowball
TARGET_IMAGE = /var/lib/tftpboot/snowball-image
BUILD_TARGET = arch/arm/boot/uImage
#
# Generic config for a machine
#
# Name your machine (the DNS name, what you ssh to)
MACHINE = foo
# BOX can be different than foo, if the machine BOX has
# multiple partitions with different systems installed. For example,
# you may have a i386 and x86_64 installation on a test box.
# If this is the case, MACHINE defines the way to connect to the
# machine, which may be different between which system the machine
# is booting into. BOX is used for the scripts to reboot and power cycle
# the machine, where it does not matter which system the machine boots into.
#
#BOX := bar
# Define a way to read the console
CONSOLE = stty -F /dev/ttyS0 115200 parodd; cat /dev/ttyS0
# The include files will set up the type of test to run. Just set TEST to
# which test you want to run.
#
# TESTS = patchcheck, randconfig, boot, test, config-bisect, bisect, min-config
#
# See the include/*.conf files that define these tests
#
TEST := patchcheck
# Some tests may have more than one test to run. Define MULTI := 1 to run
# the extra tests.
MULTI := 0
# In case you want to differentiate which type of system you are testing
BITS := 64
# REBOOT = none, error, fail, empty
# See include/defaults.conf
REBOOT := empty
# The defaults file will set up various settings that can be used by all
# machine configs.
INCLUDE include/defaults.conf
# In case you need to add a patch for a bisect or something
#PRE_BUILD = patch -p1 < ${THIS_DIR}/fix.patch
# Reset the repo after the build and remove all 'test' modules from the target
# Notice that DO_POST_BUILD is a variable (defined by ':=') and POST_BUILD
# is the option (defined by '=')
DO_POST_BUILD := git reset --hard
POST_BUILD = ${SSH} 'rm -rf /lib/modules/*-test*'; ${DO_POST_BUILD}
# The following files each handle a different test case.
# Having them included allows you to set up more than one machine and share
# the same tests.
INCLUDE include/patchcheck.conf
INCLUDE include/tests.conf
INCLUDE include/bisect.conf
INCLUDE include/min-config.conf
......@@ -39,6 +39,7 @@ my %default = (
"CLEAR_LOG" => 0,
"BISECT_MANUAL" => 0,
"BISECT_SKIP" => 1,
"MIN_CONFIG_TYPE" => "boot",
"SUCCESS_LINE" => "login:",
"DETECT_TRIPLE_FAULT" => 1,
"NO_INSTALL" => 0,
......@@ -66,6 +67,7 @@ my %default = (
my $ktest_config;
my $version;
my $have_version = 0;
my $machine;
my $ssh_user;
my $tmpdir;
......@@ -106,6 +108,8 @@ my $minconfig;
my $start_minconfig;
my $start_minconfig_defined;
my $output_minconfig;
my $minconfig_type;
my $use_output_minconfig;
my $ignore_config;
my $ignore_errors;
my $addconfig;
......@@ -205,6 +209,8 @@ my %option_map = (
"MIN_CONFIG" => \$minconfig,
"OUTPUT_MIN_CONFIG" => \$output_minconfig,
"START_MIN_CONFIG" => \$start_minconfig,
"MIN_CONFIG_TYPE" => \$minconfig_type,
"USE_OUTPUT_MIN_CONFIG" => \$use_output_minconfig,
"IGNORE_CONFIG" => \$ignore_config,
"TEST" => \$run_test,
"ADD_CONFIG" => \$addconfig,
......@@ -1702,10 +1708,12 @@ sub install {
sub get_version {
# get the release name
return if ($have_version);
doprint "$make kernelrelease ... ";
$version = `$make kernelrelease | tail -1`;
chomp($version);
doprint "$version\n";
$have_version = 1;
}
sub start_monitor_and_boot {
......@@ -1828,6 +1836,9 @@ sub build {
my $save_no_reboot = $no_reboot;
$no_reboot = 1;
# Calculate a new version from here.
$have_version = 0;
if (defined($pre_build)) {
my $ret = run_command $pre_build;
if (!$ret && defined($pre_build_die) &&
......@@ -1887,6 +1898,9 @@ sub build {
undef $redirect;
if (defined($post_build)) {
# Because a post build may change the kernel version
# do it now.
get_version;
my $ret = run_command $post_build;
if (!$ret && defined($post_build_die) &&
$post_build_die) {
......@@ -3119,6 +3133,12 @@ sub test_this_config {
sub make_min_config {
my ($i) = @_;
my $type = $minconfig_type;
if ($type ne "boot" && $type ne "test") {
fail "Invalid MIN_CONFIG_TYPE '$minconfig_type'\n" .
" make_min_config works only with 'boot' and 'test'\n" and return;
}
if (!defined($output_minconfig)) {
fail "OUTPUT_MIN_CONFIG not defined" and return;
}
......@@ -3128,9 +3148,16 @@ sub make_min_config {
# that instead.
if (-f $output_minconfig && !$start_minconfig_defined) {
print "$output_minconfig exists\n";
if (!defined($use_output_minconfig)) {
if (read_yn " Use it as minconfig?") {
$start_minconfig = $output_minconfig;
}
} elsif ($use_output_minconfig > 0) {
doprint "Using $output_minconfig as MIN_CONFIG\n";
$start_minconfig = $output_minconfig;
} else {
doprint "Set to still use MIN_CONFIG as starting point\n";
}
}
if (!defined($start_minconfig)) {
......@@ -3278,6 +3305,11 @@ sub make_min_config {
build "oldconfig" or $failed = 1;
if (!$failed) {
start_monitor_and_boot or $failed = 1;
if ($type eq "test" && !$failed) {
do_run_test or $failed = 1;
}
end_monitor;
}
......@@ -3474,6 +3506,8 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
$no_reboot = 1;
$reboot_success = 0;
$have_version = 0;
$iteration = $i;
my $makecmd = set_test_option("MAKE_CMD", $i);
......
......@@ -1105,10 +1105,26 @@
# and will not be tested again in later runs.
# (optional)
#
# MIN_CONFIG_TYPE can be either 'boot' or 'test'. With 'boot' it will
# test if the created config can just boot the machine. If this is
# set to 'test', then the TEST option must be defined and the created
# config will not only boot the target, but also make sure that the
# config lets the test succeed. This is useful to make sure the final
# config that is generated allows network activity (ssh).
# (optional)
#
# USE_OUTPUT_MIN_CONFIG set this to 1 if you do not want to be prompted
# about using the OUTPUT_MIN_CONFIG as the MIN_CONFIG as the starting
# point. Set it to 0 if you want to always just use the given MIN_CONFIG.
# If it is not defined, it will prompt you to pick which config
# to start with (MIN_CONFIG or OUTPUT_MIN_CONFIG).
#
# Example:
#
# TEST_TYPE = make_min_config
# OUTPUT_MIN_CONFIG = /path/to/config-new-min
# START_MIN_CONFIG = /path/to/config-min
# IGNORE_CONFIG = /path/to/config-tested
# MIN_CONFIG_TYPE = test
# TEST = ssh ${USER}@${MACHINE} echo hi
#
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment