Commit 937d6eef authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'docs-5.5a' of git://git.lwn.net/linux

Pull Documentation updates from Jonathan Corbet:
 "Here are the main documentation changes for 5.5:

   - Various kerneldoc script enhancements.

   - More RST conversions; those are slowing down as we run out of
     things to convert, but we're a ways from done still.

   - Dan's "maintainer profile entry" work landed at last. Now we just
     need to get maintainers to fill in the profiles...

   - A reworking of the parallel build setup to work better with a
     variety of systems (and to not take over huge systems entirely in
     particular).

   - The MAINTAINERS file is now converted to RST during the build.
     Hopefully nobody ever tries to print this thing, or they will need
     to load a lot of paper.

   - A script and documentation making it easy for maintainers to add
     Link: tags at commit time.

  Also included is the removal of a bunch of spurious CR characters"

* tag 'docs-5.5a' of git://git.lwn.net/linux: (91 commits)
  docs: remove a bunch of stray CRs
  docs: fix up the maintainer profile document
  libnvdimm, MAINTAINERS: Maintainer Entry Profile
  Maintainer Handbook: Maintainer Entry Profile
  MAINTAINERS: Reclaim the P: tag for Maintainer Entry Profile
  docs, parallelism: Rearrange how jobserver reservations are made
  docs, parallelism: Do not leak blocking mode to other readers
  docs, parallelism: Fix failure path and add comment
  Documentation: Remove bootmem_debug from kernel-parameters.txt
  Documentation: security: core.rst: fix warnings
  Documentation/process/howto/kokr: Update for 4.x -> 5.x versioning
  Documentation/translation: Use Korean for Korean translation title
  docs/memory-barriers.txt: Remove remaining references to mmiowb()
  docs/memory-barriers.txt/kokr: Update I/O section to be clearer about CPU vs thread
  docs/memory-barriers.txt/kokr: Fix style, spacing and grammar in I/O section
  Documentation/kokr: Kill all references to mmiowb()
  docs/memory-barriers.txt/kokr: Rewrite "KERNEL I/O BARRIER EFFECTS" section
  docs: Add initial documentation for devfreq
  Documentation: Document how to get links with git am
  docs: Add request_irq() documentation
  ...
parents 2c97b5ae 36bb9778
...@@ -156,6 +156,7 @@ Mark Brown <broonie@sirena.org.uk> ...@@ -156,6 +156,7 @@ Mark Brown <broonie@sirena.org.uk>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com> Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
Mathieu Othacehe <m.othacehe@gmail.com> Mathieu Othacehe <m.othacehe@gmail.com>
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com> Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
Matthew Wilcox <willy@infradead.org> <matthew@wil.cx> Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
......
...@@ -1875,8 +1875,9 @@ S: The Netherlands ...@@ -1875,8 +1875,9 @@ S: The Netherlands
N: Martin Kepplinger N: Martin Kepplinger
E: martink@posteo.de E: martink@posteo.de
E: martin.kepplinger@ginzinger.com E: martin.kepplinger@puri.sm
W: http://www.martinkepplinger.com W: http://www.martinkepplinger.com
P: 4096R/5AB387D3 F208 2B88 0F9E 4239 3468 6E3F 5003 98DF 5AB3 87D3
D: mma8452 accelerators iio driver D: mma8452 accelerators iio driver
D: pegasus_notetaker input driver D: pegasus_notetaker input driver
D: Kernel fixes and cleanups D: Kernel fixes and cleanups
......
...@@ -13,7 +13,7 @@ endif ...@@ -13,7 +13,7 @@ endif
SPHINXBUILD = sphinx-build SPHINXBUILD = sphinx-build
SPHINXOPTS = SPHINXOPTS =
SPHINXDIRS = . SPHINXDIRS = .
_SPHINXDIRS = $(patsubst $(srctree)/Documentation/%/conf.py,%,$(wildcard $(srctree)/Documentation/*/conf.py)) _SPHINXDIRS = $(patsubst $(srctree)/Documentation/%/index.rst,%,$(wildcard $(srctree)/Documentation/*/index.rst))
SPHINX_CONF = conf.py SPHINX_CONF = conf.py
PAPER = PAPER =
BUILDDIR = $(obj)/output BUILDDIR = $(obj)/output
...@@ -33,8 +33,6 @@ ifeq ($(HAVE_SPHINX),0) ...@@ -33,8 +33,6 @@ ifeq ($(HAVE_SPHINX),0)
else # HAVE_SPHINX else # HAVE_SPHINX
export SPHINXOPTS = $(shell perl -e 'open IN,"sphinx-build --version 2>&1 |"; while (<IN>) { if (m/([\d\.]+)/) { print "-jauto" if ($$1 >= "1.7") } ;} close IN')
# User-friendly check for pdflatex and latexmk # User-friendly check for pdflatex and latexmk
HAVE_PDFLATEX := $(shell if which $(PDFLATEX) >/dev/null 2>&1; then echo 1; else echo 0; fi) HAVE_PDFLATEX := $(shell if which $(PDFLATEX) >/dev/null 2>&1; then echo 1; else echo 0; fi)
HAVE_LATEXMK := $(shell if which latexmk >/dev/null 2>&1; then echo 1; else echo 0; fi) HAVE_LATEXMK := $(shell if which latexmk >/dev/null 2>&1; then echo 1; else echo 0; fi)
...@@ -67,6 +65,8 @@ quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4) ...@@ -67,6 +65,8 @@ quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media $2 && \ cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media $2 && \
PYTHONDONTWRITEBYTECODE=1 \ PYTHONDONTWRITEBYTECODE=1 \
BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \ BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
$(PYTHON) $(srctree)/scripts/jobserver-exec \
$(SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
$(SPHINXBUILD) \ $(SPHINXBUILD) \
-b $2 \ -b $2 \
-c $(abspath $(srctree)/$(src)) \ -c $(abspath $(srctree)/$(src)) \
......
...@@ -56,7 +56,7 @@ setid capabilities from the application completely and refactor the process ...@@ -56,7 +56,7 @@ setid capabilities from the application completely and refactor the process
spawning semantics in the application (e.g. by using a privileged helper program spawning semantics in the application (e.g. by using a privileged helper program
to do process spawning and UID/GID transitions). Unfortunately, there are a to do process spawning and UID/GID transitions). Unfortunately, there are a
number of semantics around process spawning that would be affected by this, such number of semantics around process spawning that would be affected by this, such
as fork() calls where the program doesn???t immediately call exec() after the as fork() calls where the program doesn't immediately call exec() after the
fork(), parent processes specifying custom environment variables or command line fork(), parent processes specifying custom environment variables or command line
args for spawned child processes, or inheritance of file handles across a args for spawned child processes, or inheritance of file handles across a
fork()/exec(). Because of this, as solution that uses a privileged helper in fork()/exec(). Because of this, as solution that uses a privileged helper in
...@@ -72,7 +72,7 @@ own user namespace, and only approved UIDs/GIDs could be mapped back to the ...@@ -72,7 +72,7 @@ own user namespace, and only approved UIDs/GIDs could be mapped back to the
initial system user namespace, affectively preventing privilege escalation. initial system user namespace, affectively preventing privilege escalation.
Unfortunately, it is not generally feasible to use user namespaces in isolation, Unfortunately, it is not generally feasible to use user namespaces in isolation,
without pairing them with other namespace types, which is not always an option. without pairing them with other namespace types, which is not always an option.
Linux checks for capabilities based off of the user namespace that ???owns??? some Linux checks for capabilities based off of the user namespace that "owns" some
entity. For example, Linux has the notion that network namespaces are owned by entity. For example, Linux has the notion that network namespaces are owned by
the user namespace in which they were created. A consequence of this is that the user namespace in which they were created. A consequence of this is that
capability checks for access to a given network namespace are done by checking capability checks for access to a given network namespace are done by checking
......
...@@ -1120,8 +1120,9 @@ PAGE_SIZE multiple when read back. ...@@ -1120,8 +1120,9 @@ PAGE_SIZE multiple when read back.
Best-effort memory protection. If the memory usage of a Best-effort memory protection. If the memory usage of a
cgroup is within its effective low boundary, the cgroup's cgroup is within its effective low boundary, the cgroup's
memory won't be reclaimed unless memory can be reclaimed memory won't be reclaimed unless there is no reclaimable
from unprotected cgroups. Above the effective low boundary (or memory available in unprotected cgroups.
Above the effective low boundary (or
effective min boundary if it is higher), pages are reclaimed effective min boundary if it is higher), pages are reclaimed
proportionally to the overage, reducing reclaim pressure for proportionally to the overage, reducing reclaim pressure for
smaller overages. smaller overages.
...@@ -1925,7 +1926,7 @@ Cpuset Interface Files ...@@ -1925,7 +1926,7 @@ Cpuset Interface Files
It accepts only the following input values when written to. It accepts only the following input values when written to.
"root" - a paritition root "root" - a partition root
"member" - a non-root member of a partition "member" - a non-root member of a partition
When set to be a partition root, the current cgroup is the When set to be a partition root, the current cgroup is the
......
============================================================= =========================================
Usage of the new open sourced rbu (Remote BIOS Update) driver Dell Remote BIOS Update driver (dell_rbu)
============================================================= =========================================
Purpose Purpose
======= =======
Document demonstrating the use of the Dell Remote BIOS Update driver. Document demonstrating the use of the Dell Remote BIOS Update driver
for updating BIOS images on Dell servers and desktops. for updating BIOS images on Dell servers and desktops.
Scope Scope
...@@ -37,7 +37,7 @@ maintains a link list of packets for reading them back. ...@@ -37,7 +37,7 @@ maintains a link list of packets for reading them back.
If the dell_rbu driver is unloaded all the allocated memory is freed. If the dell_rbu driver is unloaded all the allocated memory is freed.
The rbu driver needs to have an application (as mentioned above)which will The rbu driver needs to have an application (as mentioned above) which will
inform the BIOS to enable the update in the next system reboot. inform the BIOS to enable the update in the next system reboot.
The user should not unload the rbu driver after downloading the BIOS image The user should not unload the rbu driver after downloading the BIOS image
...@@ -71,7 +71,7 @@ be downloaded. It is done as below:: ...@@ -71,7 +71,7 @@ be downloaded. It is done as below::
echo XXXX > /sys/devices/platform/dell_rbu/packet_size echo XXXX > /sys/devices/platform/dell_rbu/packet_size
In the packet update mechanism, the user needs to create a new file having In the packet update mechanism, the user needs to create a new file having
packets of data arranged back to back. It can be done as follows packets of data arranged back to back. It can be done as follows:
The user creates packets header, gets the chunk of the BIOS image and The user creates packets header, gets the chunk of the BIOS image and
places it next to the packetheader; now, the packetheader + BIOS image chunk places it next to the packetheader; now, the packetheader + BIOS image chunk
added together should match the specified packet_size. This makes one added together should match the specified packet_size. This makes one
...@@ -114,7 +114,7 @@ The entries can be recreated by doing the following:: ...@@ -114,7 +114,7 @@ The entries can be recreated by doing the following::
echo init > /sys/devices/platform/dell_rbu/image_type echo init > /sys/devices/platform/dell_rbu/image_type
.. note:: echoing init in image_type does not change it original value. .. note:: echoing init in image_type does not change its original value.
Also the driver provides /sys/devices/platform/dell_rbu/data readonly file to Also the driver provides /sys/devices/platform/dell_rbu/data readonly file to
read back the image downloaded. read back the image downloaded.
......
...@@ -9,6 +9,7 @@ Device Mapper ...@@ -9,6 +9,7 @@ Device Mapper
cache cache
delay delay
dm-crypt dm-crypt
dm-dust
dm-flakey dm-flakey
dm-init dm-init
dm-integrity dm-integrity
......
...@@ -57,60 +57,61 @@ configure specific aspects of kernel behavior to your liking. ...@@ -57,60 +57,61 @@ configure specific aspects of kernel behavior to your liking.
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
initrd
cgroup-v2
cgroup-v1/index
serial-console
braille-console
parport
md
module-signing
rapidio
sysrq
unicode
vga-softcursor
binfmt-misc
mono
java
ras
bcache
blockdev/index
ext4
binderfs
cifs/index
xfs
jfs
ufs
pm/index
thunderbolt
LSM/index
mm/index
namespaces/index
perf-security
acpi/index acpi/index
aoe/index aoe/index
auxdisplay/index
bcache
binderfs
binfmt-misc
blockdev/index
braille-console
btmrvl btmrvl
cgroup-v1/index
cgroup-v2
cifs/index
clearing-warn-once clearing-warn-once
cpu-load cpu-load
cputopology cputopology
dell_rbu
device-mapper/index device-mapper/index
efi-stub efi-stub
ext4
gpio/index gpio/index
highuid highuid
hw_random hw_random
initrd
iostats iostats
java
jfs
kernel-per-CPU-kthreads kernel-per-CPU-kthreads
laptops/index laptops/index
auxdisplay/index
lcd-panel-cgram lcd-panel-cgram
ldm ldm
lockup-watchdogs lockup-watchdogs
LSM/index
md
mm/index
module-signing
mono
namespaces/index
numastat numastat
parport
perf-security
pm/index
pnp pnp
rapidio
ras
rtc rtc
serial-console
svga svga
wimax/index sysrq
thunderbolt
ufs
unicode
vga-softcursor
video-output video-output
wimax/index
xfs
.. only:: subproject and html .. only:: subproject and html
......
...@@ -46,78 +46,79 @@ each snapshot of your disk statistics. ...@@ -46,78 +46,79 @@ each snapshot of your disk statistics.
In 2.4, the statistics fields are those after the device name. In In 2.4, the statistics fields are those after the device name. In
the above example, the first field of statistics would be 446216. the above example, the first field of statistics would be 446216.
By contrast, in 2.6+ if you look at ``/sys/block/hda/stat``, you'll By contrast, in 2.6+ if you look at ``/sys/block/hda/stat``, you'll
find just the eleven fields, beginning with 446216. If you look at find just the 15 fields, beginning with 446216. If you look at
``/proc/diskstats``, the eleven fields will be preceded by the major and ``/proc/diskstats``, the 15 fields will be preceded by the major and
minor device numbers, and device name. Each of these formats provides minor device numbers, and device name. Each of these formats provides
eleven fields of statistics, each meaning exactly the same things. 15 fields of statistics, each meaning exactly the same things.
All fields except field 9 are cumulative since boot. Field 9 should All fields except field 9 are cumulative since boot. Field 9 should
go to zero as I/Os complete; all others only increase (unless they go to zero as I/Os complete; all others only increase (unless they
overflow and wrap). Yes, these are (32-bit or 64-bit) unsigned long overflow and wrap). Wrapping might eventually occur on a very busy
(native word size) numbers, and on a very busy or long-lived system they or long-lived system; so applications should be prepared to deal with
may wrap. Applications should be prepared to deal with that; unless it. Regarding wrapping, the types of the fields are either unsigned
your observations are measured in large numbers of minutes or hours, int (32 bit) or unsigned long (32-bit or 64-bit, depending on your
they should not wrap twice before you notice them. machine) as noted per-field below. Unless your observations are very
spread in time, these fields should not wrap twice before you notice it.
Each set of stats only applies to the indicated device; if you want Each set of stats only applies to the indicated device; if you want
system-wide stats you'll have to find all the devices and sum them all up. system-wide stats you'll have to find all the devices and sum them all up.
Field 1 -- # of reads completed Field 1 -- # of reads completed (unsigned long)
This is the total number of reads completed successfully. This is the total number of reads completed successfully.
Field 2 -- # of reads merged, field 6 -- # of writes merged Field 2 -- # of reads merged, field 6 -- # of writes merged (unsigned long)
Reads and writes which are adjacent to each other may be merged for Reads and writes which are adjacent to each other may be merged for
efficiency. Thus two 4K reads may become one 8K read before it is efficiency. Thus two 4K reads may become one 8K read before it is
ultimately handed to the disk, and so it will be counted (and queued) ultimately handed to the disk, and so it will be counted (and queued)
as only one I/O. This field lets you know how often this was done. as only one I/O. This field lets you know how often this was done.
Field 3 -- # of sectors read Field 3 -- # of sectors read (unsigned long)
This is the total number of sectors read successfully. This is the total number of sectors read successfully.
Field 4 -- # of milliseconds spent reading Field 4 -- # of milliseconds spent reading (unsigned int)
This is the total number of milliseconds spent by all reads (as This is the total number of milliseconds spent by all reads (as
measured from __make_request() to end_that_request_last()). measured from __make_request() to end_that_request_last()).
Field 5 -- # of writes completed Field 5 -- # of writes completed (unsigned long)
This is the total number of writes completed successfully. This is the total number of writes completed successfully.
Field 6 -- # of writes merged Field 6 -- # of writes merged (unsigned long)
See the description of field 2. See the description of field 2.
Field 7 -- # of sectors written Field 7 -- # of sectors written (unsigned long)
This is the total number of sectors written successfully. This is the total number of sectors written successfully.
Field 8 -- # of milliseconds spent writing Field 8 -- # of milliseconds spent writing (unsigned int)
This is the total number of milliseconds spent by all writes (as This is the total number of milliseconds spent by all writes (as
measured from __make_request() to end_that_request_last()). measured from __make_request() to end_that_request_last()).
Field 9 -- # of I/Os currently in progress Field 9 -- # of I/Os currently in progress (unsigned int)
The only field that should go to zero. Incremented as requests are The only field that should go to zero. Incremented as requests are
given to appropriate struct request_queue and decremented as they finish. given to appropriate struct request_queue and decremented as they finish.
Field 10 -- # of milliseconds spent doing I/Os Field 10 -- # of milliseconds spent doing I/Os (unsigned int)
This field increases so long as field 9 is nonzero. This field increases so long as field 9 is nonzero.
Since 5.0 this field counts jiffies when at least one request was Since 5.0 this field counts jiffies when at least one request was
started or completed. If request runs more than 2 jiffies then some started or completed. If request runs more than 2 jiffies then some
I/O time will not be accounted unless there are other requests. I/O time will not be accounted unless there are other requests.
Field 11 -- weighted # of milliseconds spent doing I/Os Field 11 -- weighted # of milliseconds spent doing I/Os (unsigned int)
This field is incremented at each I/O start, I/O completion, I/O This field is incremented at each I/O start, I/O completion, I/O
merge, or read of these stats by the number of I/Os in progress merge, or read of these stats by the number of I/Os in progress
(field 9) times the number of milliseconds spent doing I/O since the (field 9) times the number of milliseconds spent doing I/O since the
last update of this field. This can provide an easy measure of both last update of this field. This can provide an easy measure of both
I/O completion time and the backlog that may be accumulating. I/O completion time and the backlog that may be accumulating.
Field 12 -- # of discards completed Field 12 -- # of discards completed (unsigned long)
This is the total number of discards completed successfully. This is the total number of discards completed successfully.
Field 13 -- # of discards merged Field 13 -- # of discards merged (unsigned long)
See the description of field 2 See the description of field 2
Field 14 -- # of sectors discarded Field 14 -- # of sectors discarded (unsigned long)
This is the total number of sectors discarded successfully. This is the total number of sectors discarded successfully.
Field 15 -- # of milliseconds spent discarding Field 15 -- # of milliseconds spent discarding (unsigned int)
This is the total number of milliseconds spent by all discards (as This is the total number of milliseconds spent by all discards (as
measured from __make_request() to end_that_request_last()). measured from __make_request() to end_that_request_last()).
......
...@@ -437,8 +437,6 @@ ...@@ -437,8 +437,6 @@
no delay (0). no delay (0).
Format: integer Format: integer
bootmem_debug [KNL] Enable bootmem allocator debug messages.
bert_disable [ACPI] bert_disable [ACPI]
Disable BERT OS support on buggy BIOSes. Disable BERT OS support on buggy BIOSes.
...@@ -983,12 +981,10 @@ ...@@ -983,12 +981,10 @@
earlycon= [KNL] Output early console device and options. earlycon= [KNL] Output early console device and options.
[ARM64] The early console is determined by the When used with no options, the early console is
stdout-path property in device tree's chosen node, determined by stdout-path property in device tree's
or determined by the ACPI SPCR table. chosen node or the ACPI SPCR table if supported by
the platform.
[X86] When used with no options the early console is
determined by the ACPI SPCR table.
cdns,<addr>[,options] cdns,<addr>[,options]
Start an early, polled-mode console on a Cadence Start an early, polled-mode console on a Cadence
......
...@@ -19,7 +19,9 @@ devices/imx8_ddr0/format/. The "events" directory describes the events types ...@@ -19,7 +19,9 @@ devices/imx8_ddr0/format/. The "events" directory describes the events types
hardware supported that can be used with perf tool, see /sys/bus/event_source/ hardware supported that can be used with perf tool, see /sys/bus/event_source/
devices/imx8_ddr0/events/. The "caps" directory describes filter features implemented devices/imx8_ddr0/events/. The "caps" directory describes filter features implemented
in DDR PMU, see /sys/bus/events_source/devices/imx8_ddr0/caps/. in DDR PMU, see /sys/bus/events_source/devices/imx8_ddr0/caps/.
e.g.::
.. code-block:: bash
perf stat -a -e imx8_ddr0/cycles/ cmd perf stat -a -e imx8_ddr0/cycles/ cmd
perf stat -a -e imx8_ddr0/read/,imx8_ddr0/write/ cmd perf stat -a -e imx8_ddr0/read/,imx8_ddr0/write/ cmd
...@@ -35,8 +37,9 @@ value 1 for supported. ...@@ -35,8 +37,9 @@ value 1 for supported.
Filter is defined with two configuration parts: Filter is defined with two configuration parts:
--AXI_ID defines AxID matching value. --AXI_ID defines AxID matching value.
--AXI_MASKING defines which bits of AxID are meaningful for the matching. --AXI_MASKING defines which bits of AxID are meaningful for the matching.
0:corresponding bit is masked.
1: corresponding bit is not masked, i.e. used to do the matching. - 0: corresponding bit is masked.
- 1: corresponding bit is not masked, i.e. used to do the matching.
AXI_ID and AXI_MASKING are mapped on DPCR1 register in performance counter. AXI_ID and AXI_MASKING are mapped on DPCR1 register in performance counter.
When non-masked bits are matching corresponding AXI_ID bits then counter is When non-masked bits are matching corresponding AXI_ID bits then counter is
...@@ -45,14 +48,20 @@ value 1 for supported. ...@@ -45,14 +48,20 @@ value 1 for supported.
This filter doesn't support filter different AXI ID for axid-read and axid-write This filter doesn't support filter different AXI ID for axid-read and axid-write
event at the same time as this filter is shared between counters. event at the same time as this filter is shared between counters.
e.g.::
.. code-block:: bash
perf stat -a -e imx8_ddr0/axid-read,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd perf stat -a -e imx8_ddr0/axid-read,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd
perf stat -a -e imx8_ddr0/axid-write,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd perf stat -a -e imx8_ddr0/axid-write,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd
NOTE: axi_mask is inverted in userspace(i.e. set bits are bits to mask), and .. note::
axi_mask is inverted in userspace(i.e. set bits are bits to mask), and
it will be reverted in driver automatically. so that the user can just specify it will be reverted in driver automatically. so that the user can just specify
axi_id to monitor a specific id, rather than having to specify axi_mask. axi_id to monitor a specific id, rather than having to specify axi_mask.
e.g.::
.. code-block:: bash
perf stat -a -e imx8_ddr0/axid-read,axi_id=0x12/ cmd, which will monitor ARID=0x12 perf stat -a -e imx8_ddr0/axid-read,axi_id=0x12/ cmd, which will monitor ARID=0x12
* With DDR_CAP_AXI_ID_FILTER_ENHANCED quirk(filter: 1, enhanced_filter: 1). * With DDR_CAP_AXI_ID_FILTER_ENHANCED quirk(filter: 1, enhanced_filter: 1).
......
...@@ -8,6 +8,7 @@ Performance monitor support ...@@ -8,6 +8,7 @@ Performance monitor support
:maxdepth: 1 :maxdepth: 1
hisi-pmu hisi-pmu
imx-ddr
qcom_l2_pmu qcom_l2_pmu
qcom_l3_pmu qcom_l3_pmu
arm-ccn arm-ccn
......
...@@ -831,8 +831,8 @@ printk_ratelimit: ...@@ -831,8 +831,8 @@ printk_ratelimit:
================= =================
Some warning messages are rate limited. printk_ratelimit specifies Some warning messages are rate limited. printk_ratelimit specifies
the minimum length of time between these messages (in jiffies), by the minimum length of time between these messages (in seconds).
default we allow one every 5 seconds. The default value is 5 seconds.
A value of 0 will disable rate limiting. A value of 0 will disable rate limiting.
...@@ -845,6 +845,8 @@ seconds, we do allow a burst of messages to pass through. ...@@ -845,6 +845,8 @@ seconds, we do allow a burst of messages to pass through.
printk_ratelimit_burst specifies the number of messages we can printk_ratelimit_burst specifies the number of messages we can
send before ratelimiting kicks in. send before ratelimiting kicks in.
The default value is 10 messages.
printk_devkmsg: printk_devkmsg:
=============== ===============
...@@ -1101,7 +1103,7 @@ During initialization the kernel sets this value such that even if the ...@@ -1101,7 +1103,7 @@ During initialization the kernel sets this value such that even if the
maximum number of threads is created, the thread structures occupy only maximum number of threads is created, the thread structures occupy only
a part (1/8th) of the available RAM pages. a part (1/8th) of the available RAM pages.
The minimum value that can be written to threads-max is 20. The minimum value that can be written to threads-max is 1.
The maximum value that can be written to threads-max is given by the The maximum value that can be written to threads-max is given by the
constant FUTEX_TID_MASK (0x3fffffff). constant FUTEX_TID_MASK (0x3fffffff).
...@@ -1109,10 +1111,6 @@ constant FUTEX_TID_MASK (0x3fffffff). ...@@ -1109,10 +1111,6 @@ constant FUTEX_TID_MASK (0x3fffffff).
If a value outside of this range is written to threads-max an error If a value outside of this range is written to threads-max an error
EINVAL occurs. EINVAL occurs.
The value written is checked against the available RAM pages. If the
thread structures would occupy too much (more than 1/8th) of the
available RAM pages threads-max is reduced accordingly.
unknown_nmi_panic: unknown_nmi_panic:
================== ==================
......
...@@ -37,7 +37,8 @@ needs_sphinx = '1.3' ...@@ -37,7 +37,8 @@ needs_sphinx = '1.3'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones. # ones.
extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain', extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain',
'kfigure', 'sphinx.ext.ifconfig', 'automarkup'] 'kfigure', 'sphinx.ext.ifconfig', 'automarkup',
'maintainers_include']
# The name of the math extension changed on Sphinx 1.4 # The name of the math extension changed on Sphinx 1.4
if (major == 1 and minor > 3) or (major > 1): if (major == 1 and minor > 3) or (major > 1):
......
...@@ -23,7 +23,7 @@ begins with the creation of a pool using one of: ...@@ -23,7 +23,7 @@ begins with the creation of a pool using one of:
.. kernel-doc:: lib/genalloc.c .. kernel-doc:: lib/genalloc.c
:functions: devm_gen_pool_create :functions: devm_gen_pool_create
A call to :c:func:`gen_pool_create` will create a pool. The granularity of A call to gen_pool_create() will create a pool. The granularity of
allocations is set with min_alloc_order; it is a log-base-2 number like allocations is set with min_alloc_order; it is a log-base-2 number like
those used by the page allocator, but it refers to bytes rather than pages. those used by the page allocator, but it refers to bytes rather than pages.
So, if min_alloc_order is passed as 3, then all allocations will be a So, if min_alloc_order is passed as 3, then all allocations will be a
...@@ -32,7 +32,7 @@ required to track the memory in the pool. The nid parameter specifies ...@@ -32,7 +32,7 @@ required to track the memory in the pool. The nid parameter specifies
which NUMA node should be used for the allocation of the housekeeping which NUMA node should be used for the allocation of the housekeeping
structures; it can be -1 if the caller doesn't care. structures; it can be -1 if the caller doesn't care.
The "managed" interface :c:func:`devm_gen_pool_create` ties the pool to a The "managed" interface devm_gen_pool_create() ties the pool to a
specific device. Among other things, it will automatically clean up the specific device. Among other things, it will automatically clean up the
pool when the given device is destroyed. pool when the given device is destroyed.
...@@ -53,32 +53,32 @@ to the pool. That can be done with one of: ...@@ -53,32 +53,32 @@ to the pool. That can be done with one of:
:functions: gen_pool_add :functions: gen_pool_add
.. kernel-doc:: lib/genalloc.c .. kernel-doc:: lib/genalloc.c
:functions: gen_pool_add_virt :functions: gen_pool_add_owner
A call to :c:func:`gen_pool_add` will place the size bytes of memory A call to gen_pool_add() will place the size bytes of memory
starting at addr (in the kernel's virtual address space) into the given starting at addr (in the kernel's virtual address space) into the given
pool, once again using nid as the node ID for ancillary memory allocations. pool, once again using nid as the node ID for ancillary memory allocations.
The :c:func:`gen_pool_add_virt` variant associates an explicit physical The gen_pool_add_virt() variant associates an explicit physical
address with the memory; this is only necessary if the pool will be used address with the memory; this is only necessary if the pool will be used
for DMA allocations. for DMA allocations.
The functions for allocating memory from the pool (and putting it back) The functions for allocating memory from the pool (and putting it back)
are: are:
.. kernel-doc:: lib/genalloc.c .. kernel-doc:: include/linux/genalloc.h
:functions: gen_pool_alloc :functions: gen_pool_alloc
.. kernel-doc:: lib/genalloc.c .. kernel-doc:: lib/genalloc.c
:functions: gen_pool_dma_alloc :functions: gen_pool_dma_alloc
.. kernel-doc:: lib/genalloc.c .. kernel-doc:: lib/genalloc.c
:functions: gen_pool_free :functions: gen_pool_free_owner
As one would expect, :c:func:`gen_pool_alloc` will allocate size< bytes As one would expect, gen_pool_alloc() will allocate size< bytes
from the given pool. The :c:func:`gen_pool_dma_alloc` variant allocates from the given pool. The gen_pool_dma_alloc() variant allocates
memory for use with DMA operations, returning the associated physical memory for use with DMA operations, returning the associated physical
address in the space pointed to by dma. This will only work if the memory address in the space pointed to by dma. This will only work if the memory
was added with :c:func:`gen_pool_add_virt`. Note that this function was added with gen_pool_add_virt(). Note that this function
departs from the usual genpool pattern of using unsigned long values to departs from the usual genpool pattern of using unsigned long values to
represent kernel addresses; it returns a void * instead. represent kernel addresses; it returns a void * instead.
...@@ -89,14 +89,14 @@ return. If that sort of control is needed, the following functions will be ...@@ -89,14 +89,14 @@ return. If that sort of control is needed, the following functions will be
of interest: of interest:
.. kernel-doc:: lib/genalloc.c .. kernel-doc:: lib/genalloc.c
:functions: gen_pool_alloc_algo :functions: gen_pool_alloc_algo_owner
.. kernel-doc:: lib/genalloc.c .. kernel-doc:: lib/genalloc.c
:functions: gen_pool_set_algo :functions: gen_pool_set_algo
Allocations with :c:func:`gen_pool_alloc_algo` specify an algorithm to be Allocations with gen_pool_alloc_algo() specify an algorithm to be
used to choose the memory to be allocated; the default algorithm can be set used to choose the memory to be allocated; the default algorithm can be set
with :c:func:`gen_pool_set_algo`. The data value is passed to the with gen_pool_set_algo(). The data value is passed to the
algorithm; most ignore it, but it is occasionally needed. One can, algorithm; most ignore it, but it is occasionally needed. One can,
naturally, write a special-purpose algorithm, but there is a fair set naturally, write a special-purpose algorithm, but there is a fair set
already available: already available:
......
...@@ -26,7 +26,7 @@ Rationale ...@@ -26,7 +26,7 @@ Rationale
========= =========
The original implementation of interrupt handling in Linux uses the The original implementation of interrupt handling in Linux uses the
:c:func:`__do_IRQ` super-handler, which is able to deal with every type of __do_IRQ() super-handler, which is able to deal with every type of
interrupt logic. interrupt logic.
Originally, Russell King identified different types of handlers to build Originally, Russell King identified different types of handlers to build
...@@ -43,7 +43,7 @@ During the implementation we identified another type: ...@@ -43,7 +43,7 @@ During the implementation we identified another type:
- Fast EOI type - Fast EOI type
In the SMP world of the :c:func:`__do_IRQ` super-handler another type was In the SMP world of the __do_IRQ() super-handler another type was
identified: identified:
- Per CPU type - Per CPU type
...@@ -83,7 +83,7 @@ IRQ-flow implementation for 'level type' interrupts and add a ...@@ -83,7 +83,7 @@ IRQ-flow implementation for 'level type' interrupts and add a
(sub)architecture specific 'edge type' implementation. (sub)architecture specific 'edge type' implementation.
To make the transition to the new model easier and prevent the breakage To make the transition to the new model easier and prevent the breakage
of existing implementations, the :c:func:`__do_IRQ` super-handler is still of existing implementations, the __do_IRQ() super-handler is still
available. This leads to a kind of duality for the time being. Over time available. This leads to a kind of duality for the time being. Over time
the new model should be used in more and more architectures, as it the new model should be used in more and more architectures, as it
enables smaller and cleaner IRQ subsystems. It's deprecated for three enables smaller and cleaner IRQ subsystems. It's deprecated for three
...@@ -116,7 +116,7 @@ status information and pointers to the interrupt flow method and the ...@@ -116,7 +116,7 @@ status information and pointers to the interrupt flow method and the
interrupt chip structure which are assigned to this interrupt. interrupt chip structure which are assigned to this interrupt.
Whenever an interrupt triggers, the low-level architecture code calls Whenever an interrupt triggers, the low-level architecture code calls
into the generic interrupt code by calling :c:func:`desc->handle_irq`. This into the generic interrupt code by calling desc->handle_irq(). This
high-level IRQ handling function only uses desc->irq_data.chip high-level IRQ handling function only uses desc->irq_data.chip
primitives referenced by the assigned chip descriptor structure. primitives referenced by the assigned chip descriptor structure.
...@@ -125,27 +125,29 @@ High-level Driver API ...@@ -125,27 +125,29 @@ High-level Driver API
The high-level Driver API consists of following functions: The high-level Driver API consists of following functions:
- :c:func:`request_irq` - request_irq()
- :c:func:`free_irq` - request_threaded_irq()
- :c:func:`disable_irq` - free_irq()
- :c:func:`enable_irq` - disable_irq()
- :c:func:`disable_irq_nosync` (SMP only) - enable_irq()
- :c:func:`synchronize_irq` (SMP only) - disable_irq_nosync() (SMP only)
- :c:func:`irq_set_irq_type` - synchronize_irq() (SMP only)
- :c:func:`irq_set_irq_wake` - irq_set_irq_type()
- :c:func:`irq_set_handler_data` - irq_set_irq_wake()
- :c:func:`irq_set_chip` - irq_set_handler_data()
- :c:func:`irq_set_chip_data` - irq_set_chip()
- irq_set_chip_data()
See the autogenerated function documentation for details. See the autogenerated function documentation for details.
...@@ -154,19 +156,19 @@ High-level IRQ flow handlers ...@@ -154,19 +156,19 @@ High-level IRQ flow handlers
The generic layer provides a set of pre-defined irq-flow methods: The generic layer provides a set of pre-defined irq-flow methods:
- :c:func:`handle_level_irq` - handle_level_irq()
- :c:func:`handle_edge_irq` - handle_edge_irq()
- :c:func:`handle_fasteoi_irq` - handle_fasteoi_irq()
- :c:func:`handle_simple_irq` - handle_simple_irq()
- :c:func:`handle_percpu_irq` - handle_percpu_irq()
- :c:func:`handle_edge_eoi_irq` - handle_edge_eoi_irq()
- :c:func:`handle_bad_irq` - handle_bad_irq()
The interrupt flow handlers (either pre-defined or architecture The interrupt flow handlers (either pre-defined or architecture
specific) are assigned to specific interrupts by the architecture either specific) are assigned to specific interrupts by the architecture either
...@@ -325,14 +327,14 @@ Delayed interrupt disable ...@@ -325,14 +327,14 @@ Delayed interrupt disable
This per interrupt selectable feature, which was introduced by Russell This per interrupt selectable feature, which was introduced by Russell
King in the ARM interrupt implementation, does not mask an interrupt at King in the ARM interrupt implementation, does not mask an interrupt at
the hardware level when :c:func:`disable_irq` is called. The interrupt is kept the hardware level when disable_irq() is called. The interrupt is kept
enabled and is masked in the flow handler when an interrupt event enabled and is masked in the flow handler when an interrupt event
happens. This prevents losing edge interrupts on hardware which does not happens. This prevents losing edge interrupts on hardware which does not
store an edge interrupt event while the interrupt is disabled at the store an edge interrupt event while the interrupt is disabled at the
hardware level. When an interrupt arrives while the IRQ_DISABLED flag hardware level. When an interrupt arrives while the IRQ_DISABLED flag
is set, then the interrupt is masked at the hardware level and the is set, then the interrupt is masked at the hardware level and the
IRQ_PENDING bit is set. When the interrupt is re-enabled by IRQ_PENDING bit is set. When the interrupt is re-enabled by
:c:func:`enable_irq` the pending bit is checked and if it is set, the interrupt enable_irq() the pending bit is checked and if it is set, the interrupt
is resent either via hardware or by a software resend mechanism. (It's is resent either via hardware or by a software resend mechanism. (It's
necessary to enable CONFIG_HARDIRQS_SW_RESEND when you want to use necessary to enable CONFIG_HARDIRQS_SW_RESEND when you want to use
the delayed interrupt disable feature and your hardware is not capable the delayed interrupt disable feature and your hardware is not capable
...@@ -369,7 +371,7 @@ handler(s) to use these basic units of low-level functionality. ...@@ -369,7 +371,7 @@ handler(s) to use these basic units of low-level functionality.
__do_IRQ entry point __do_IRQ entry point
==================== ====================
The original implementation :c:func:`__do_IRQ` was an alternative entry point The original implementation __do_IRQ() was an alternative entry point
for all types of interrupts. It no longer exists. for all types of interrupts. It no longer exists.
This handler turned out to be not suitable for all interrupt hardware This handler turned out to be not suitable for all interrupt hardware
......
...@@ -88,10 +88,11 @@ Selecting memory allocator ...@@ -88,10 +88,11 @@ Selecting memory allocator
========================== ==========================
The most straightforward way to allocate memory is to use a function The most straightforward way to allocate memory is to use a function
from the :c:func:`kmalloc` family. And, to be on the safe size it's from the kmalloc() family. And, to be on the safe side it's best to use
best to use routines that set memory to zero, like routines that set memory to zero, like kzalloc(). If you need to
:c:func:`kzalloc`. If you need to allocate memory for an array, there allocate memory for an array, there are kmalloc_array() and kcalloc()
are :c:func:`kmalloc_array` and :c:func:`kcalloc` helpers. helpers. The helpers struct_size(), array_size() and array3_size() can
be used to safely calculate object sizes without overflowing.
The maximal size of a chunk that can be allocated with `kmalloc` is The maximal size of a chunk that can be allocated with `kmalloc` is
limited. The actual limit depends on the hardware and the kernel limited. The actual limit depends on the hardware and the kernel
...@@ -102,29 +103,26 @@ The address of a chunk allocated with `kmalloc` is aligned to at least ...@@ -102,29 +103,26 @@ The address of a chunk allocated with `kmalloc` is aligned to at least
ARCH_KMALLOC_MINALIGN bytes. For sizes which are a power of two, the ARCH_KMALLOC_MINALIGN bytes. For sizes which are a power of two, the
alignment is also guaranteed to be at least the respective size. alignment is also guaranteed to be at least the respective size.
For large allocations you can use :c:func:`vmalloc` and For large allocations you can use vmalloc() and vzalloc(), or directly
:c:func:`vzalloc`, or directly request pages from the page request pages from the page allocator. The memory allocated by `vmalloc`
allocator. The memory allocated by `vmalloc` and related functions is and related functions is not physically contiguous.
not physically contiguous.
If you are not sure whether the allocation size is too large for If you are not sure whether the allocation size is too large for
`kmalloc`, it is possible to use :c:func:`kvmalloc` and its `kmalloc`, it is possible to use kvmalloc() and its derivatives. It will
derivatives. It will try to allocate memory with `kmalloc` and if the try to allocate memory with `kmalloc` and if the allocation fails it
allocation fails it will be retried with `vmalloc`. There are will be retried with `vmalloc`. There are restrictions on which GFP
restrictions on which GFP flags can be used with `kvmalloc`; please flags can be used with `kvmalloc`; please see kvmalloc_node() reference
see :c:func:`kvmalloc_node` reference documentation. Note that documentation. Note that `kvmalloc` may return memory that is not
`kvmalloc` may return memory that is not physically contiguous. physically contiguous.
If you need to allocate many identical objects you can use the slab If you need to allocate many identical objects you can use the slab
cache allocator. The cache should be set up with cache allocator. The cache should be set up with kmem_cache_create() or
:c:func:`kmem_cache_create` or :c:func:`kmem_cache_create_usercopy` kmem_cache_create_usercopy() before it can be used. The second function
before it can be used. The second function should be used if a part of should be used if a part of the cache might be copied to the userspace.
the cache might be copied to the userspace. After the cache is After the cache is created kmem_cache_alloc() and its convenience
created :c:func:`kmem_cache_alloc` and its convenience wrappers can wrappers can allocate memory from that cache.
allocate memory from that cache.
When the allocated memory is no longer needed it must be freed. You can
When the allocated memory is no longer needed it must be freed. You use kvfree() for the memory allocated with `kmalloc`, `vmalloc` and
can use :c:func:`kvfree` for the memory allocated with `kmalloc`, `kvmalloc`. The slab caches should be freed with kmem_cache_free(). And
`vmalloc` and `kvmalloc`. The slab caches should be freed with don't forget to destroy the cache with kmem_cache_destroy().
:c:func:`kmem_cache_free`. And don't forget to destroy the cache with
:c:func:`kmem_cache_destroy`.
...@@ -11,7 +11,7 @@ User Space Memory Access ...@@ -11,7 +11,7 @@ User Space Memory Access
.. kernel-doc:: arch/x86/lib/usercopy_32.c .. kernel-doc:: arch/x86/lib/usercopy_32.c
:export: :export:
.. kernel-doc:: mm/util.c .. kernel-doc:: mm/gup.c
:functions: get_user_pages_fast :functions: get_user_pages_fast
.. _mm-api-gfp-flags: .. _mm-api-gfp-flags:
......
...@@ -137,6 +137,20 @@ equivalent to %lx (or %lu). %px is preferred because it is more uniquely ...@@ -137,6 +137,20 @@ equivalent to %lx (or %lu). %px is preferred because it is more uniquely
grep'able. If in the future we need to modify the way the kernel handles grep'able. If in the future we need to modify the way the kernel handles
printing pointers we will be better equipped to find the call sites. printing pointers we will be better equipped to find the call sites.
Pointer Differences
-------------------
::
%td 2560
%tx a00
For printing the pointer differences, use the %t modifier for ptrdiff_t.
Example::
printk("test: difference between pointers: %td\n", ptr2 - ptr1);
Struct Resources Struct Resources
---------------- ----------------
......
...@@ -35,7 +35,7 @@ atomics & refcounters only provide atomicity and ...@@ -35,7 +35,7 @@ atomics & refcounters only provide atomicity and
program order (po) relation (on the same CPU). It guarantees that program order (po) relation (on the same CPU). It guarantees that
each ``atomic_*()`` and ``refcount_*()`` operation is atomic and instructions each ``atomic_*()`` and ``refcount_*()`` operation is atomic and instructions
are executed in program order on a single CPU. are executed in program order on a single CPU.
This is implemented using :c:func:`READ_ONCE`/:c:func:`WRITE_ONCE` and This is implemented using READ_ONCE()/WRITE_ONCE() and
compare-and-swap primitives. compare-and-swap primitives.
A strong (full) memory ordering guarantees that all prior loads and A strong (full) memory ordering guarantees that all prior loads and
...@@ -44,7 +44,7 @@ before any po-later instruction is executed on the same CPU. ...@@ -44,7 +44,7 @@ before any po-later instruction is executed on the same CPU.
It also guarantees that all po-earlier stores on the same CPU It also guarantees that all po-earlier stores on the same CPU
and all propagated stores from other CPUs must propagate to all and all propagated stores from other CPUs must propagate to all
other CPUs before any po-later instruction is executed on the original other CPUs before any po-later instruction is executed on the original
CPU (A-cumulative property). This is implemented using :c:func:`smp_mb`. CPU (A-cumulative property). This is implemented using smp_mb().
A RELEASE memory ordering guarantees that all prior loads and A RELEASE memory ordering guarantees that all prior loads and
stores (all po-earlier instructions) on the same CPU are completed stores (all po-earlier instructions) on the same CPU are completed
...@@ -52,14 +52,14 @@ before the operation. It also guarantees that all po-earlier ...@@ -52,14 +52,14 @@ before the operation. It also guarantees that all po-earlier
stores on the same CPU and all propagated stores from other CPUs stores on the same CPU and all propagated stores from other CPUs
must propagate to all other CPUs before the release operation must propagate to all other CPUs before the release operation
(A-cumulative property). This is implemented using (A-cumulative property). This is implemented using
:c:func:`smp_store_release`. smp_store_release().
An ACQUIRE memory ordering guarantees that all post loads and An ACQUIRE memory ordering guarantees that all post loads and
stores (all po-later instructions) on the same CPU are stores (all po-later instructions) on the same CPU are
completed after the acquire operation. It also guarantees that all completed after the acquire operation. It also guarantees that all
po-later stores on the same CPU must propagate to all other CPUs po-later stores on the same CPU must propagate to all other CPUs
after the acquire operation executes. This is implemented using after the acquire operation executes. This is implemented using
:c:func:`smp_acquire__after_ctrl_dep`. smp_acquire__after_ctrl_dep().
A control dependency (on success) for refcounters guarantees that A control dependency (on success) for refcounters guarantees that
if a reference for an object was successfully obtained (reference if a reference for an object was successfully obtained (reference
...@@ -78,8 +78,8 @@ case 1) - non-"Read/Modify/Write" (RMW) ops ...@@ -78,8 +78,8 @@ case 1) - non-"Read/Modify/Write" (RMW) ops
Function changes: Function changes:
* :c:func:`atomic_set` --> :c:func:`refcount_set` * atomic_set() --> refcount_set()
* :c:func:`atomic_read` --> :c:func:`refcount_read` * atomic_read() --> refcount_read()
Memory ordering guarantee changes: Memory ordering guarantee changes:
...@@ -91,8 +91,8 @@ case 2) - increment-based ops that return no value ...@@ -91,8 +91,8 @@ case 2) - increment-based ops that return no value
Function changes: Function changes:
* :c:func:`atomic_inc` --> :c:func:`refcount_inc` * atomic_inc() --> refcount_inc()
* :c:func:`atomic_add` --> :c:func:`refcount_add` * atomic_add() --> refcount_add()
Memory ordering guarantee changes: Memory ordering guarantee changes:
...@@ -103,7 +103,7 @@ case 3) - decrement-based RMW ops that return no value ...@@ -103,7 +103,7 @@ case 3) - decrement-based RMW ops that return no value
Function changes: Function changes:
* :c:func:`atomic_dec` --> :c:func:`refcount_dec` * atomic_dec() --> refcount_dec()
Memory ordering guarantee changes: Memory ordering guarantee changes:
...@@ -115,8 +115,8 @@ case 4) - increment-based RMW ops that return a value ...@@ -115,8 +115,8 @@ case 4) - increment-based RMW ops that return a value
Function changes: Function changes:
* :c:func:`atomic_inc_not_zero` --> :c:func:`refcount_inc_not_zero` * atomic_inc_not_zero() --> refcount_inc_not_zero()
* no atomic counterpart --> :c:func:`refcount_add_not_zero` * no atomic counterpart --> refcount_add_not_zero()
Memory ordering guarantees changes: Memory ordering guarantees changes:
...@@ -131,8 +131,8 @@ case 5) - generic dec/sub decrement-based RMW ops that return a value ...@@ -131,8 +131,8 @@ case 5) - generic dec/sub decrement-based RMW ops that return a value
Function changes: Function changes:
* :c:func:`atomic_dec_and_test` --> :c:func:`refcount_dec_and_test` * atomic_dec_and_test() --> refcount_dec_and_test()
* :c:func:`atomic_sub_and_test` --> :c:func:`refcount_sub_and_test` * atomic_sub_and_test() --> refcount_sub_and_test()
Memory ordering guarantees changes: Memory ordering guarantees changes:
...@@ -144,14 +144,14 @@ case 6) other decrement-based RMW ops that return a value ...@@ -144,14 +144,14 @@ case 6) other decrement-based RMW ops that return a value
Function changes: Function changes:
* no atomic counterpart --> :c:func:`refcount_dec_if_one` * no atomic counterpart --> refcount_dec_if_one()
* ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)`` * ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)``
Memory ordering guarantees changes: Memory ordering guarantees changes:
* fully ordered --> RELEASE ordering + control dependency * fully ordered --> RELEASE ordering + control dependency
.. note:: :c:func:`atomic_add_unless` only provides full order on success. .. note:: atomic_add_unless() only provides full order on success.
case 7) - lock-based RMW case 7) - lock-based RMW
...@@ -159,10 +159,10 @@ case 7) - lock-based RMW ...@@ -159,10 +159,10 @@ case 7) - lock-based RMW
Function changes: Function changes:
* :c:func:`atomic_dec_and_lock` --> :c:func:`refcount_dec_and_lock` * atomic_dec_and_lock() --> refcount_dec_and_lock()
* :c:func:`atomic_dec_and_mutex_lock` --> :c:func:`refcount_dec_and_mutex_lock` * atomic_dec_and_mutex_lock() --> refcount_dec_and_mutex_lock()
Memory ordering guarantees changes: Memory ordering guarantees changes:
* fully ordered --> RELEASE ordering + control dependency + hold * fully ordered --> RELEASE ordering + control dependency + hold
:c:func:`spin_lock` on success spin_lock() on success
...@@ -69,7 +69,7 @@ the kernel command line. ...@@ -69,7 +69,7 @@ the kernel command line.
Memory may be allocated or freed before kmemleak is initialised and Memory may be allocated or freed before kmemleak is initialised and
these actions are stored in an early log buffer. The size of this buffer these actions are stored in an early log buffer. The size of this buffer
is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option. is configured via the CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE option.
If CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF are enabled, the kmemleak is If CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF are enabled, the kmemleak is
disabled by default. Passing ``kmemleak=on`` on the kernel command disabled by default. Passing ``kmemleak=on`` on the kernel command
......
...@@ -549,5 +549,5 @@ Example 3: HiFive Unleashed (RISC-V 64 bit, 4 core system) ...@@ -549,5 +549,5 @@ Example 3: HiFive Unleashed (RISC-V 64 bit, 4 core system)
[2] Devicetree NUMA binding description [2] Devicetree NUMA binding description
Documentation/devicetree/bindings/numa.txt Documentation/devicetree/bindings/numa.txt
[3] RISC-V Linux kernel documentation [3] RISC-V Linux kernel documentation
Documentation/devicetree/bindings/riscv/cpus.txt Documentation/devicetree/bindings/riscv/cpus.yaml
[4] https://www.devicetree.org/specifications/ [4] https://www.devicetree.org/specifications/
...@@ -2,7 +2,7 @@ Ingenic JZ47xx SoCs Timer/Counter Unit devicetree bindings ...@@ -2,7 +2,7 @@ Ingenic JZ47xx SoCs Timer/Counter Unit devicetree bindings
========================================================== ==========================================================
For a description of the TCU hardware and drivers, have a look at For a description of the TCU hardware and drivers, have a look at
Documentation/mips/ingenic-tcu.txt. Documentation/mips/ingenic-tcu.rst.
Required properties: Required properties:
......
...@@ -476,6 +476,22 @@ internal: *[source-pattern ...]* ...@@ -476,6 +476,22 @@ internal: *[source-pattern ...]*
.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c .. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
:internal: :internal:
identifiers: *[ function/type ...]*
Include documentation for each *function* and *type* in *source*.
If no *function* is specified, the documentation for all functions
and types in the *source* will be included.
Examples::
.. kernel-doc:: lib/bitmap.c
:identifiers: bitmap_parselist bitmap_parselist_user
.. kernel-doc:: lib/idr.c
:identifiers:
functions: *[ function/type ...]*
This is an alias of the 'identifiers' directive and deprecated.
doc: *title* doc: *title*
Include documentation for the ``DOC:`` paragraph identified by *title* in Include documentation for the ``DOC:`` paragraph identified by *title* in
*source*. Spaces are allowed in *title*; do not quote the *title*. The *title* *source*. Spaces are allowed in *title*; do not quote the *title*. The *title*
...@@ -488,19 +504,6 @@ doc: *title* ...@@ -488,19 +504,6 @@ doc: *title*
.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c .. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
:doc: High Definition Audio over HDMI and Display Port :doc: High Definition Audio over HDMI and Display Port
functions: *[ function ...]*
Include documentation for each *function* in *source*.
If no *function* is specified, the documentation for all functions
and types in the *source* will be included.
Examples::
.. kernel-doc:: lib/bitmap.c
:functions: bitmap_parselist bitmap_parselist_user
.. kernel-doc:: lib/idr.c
:functions:
Without options, the kernel-doc directive includes all documentation comments Without options, the kernel-doc directive includes all documentation comments
from the source file. from the source file.
......
.. SPDX-License-Identifier: GPL-2.0
========================
Device Frequency Scaling
========================
Introduction
------------
This framework provides a standard kernel interface for Dynamic Voltage and
Frequency Switching on arbitrary devices.
It exposes controls for adjusting frequency through sysfs files which are
similar to the cpufreq subsystem.
Devices for which current usage can be measured can have their frequency
automatically adjusted by governors.
API
---
Device drivers need to initialize a :c:type:`devfreq_profile` and call the
:c:func:`devfreq_add_device` function to create a :c:type:`devfreq` instance.
.. kernel-doc:: include/linux/devfreq.h
.. kernel-doc:: include/linux/devfreq-event.h
.. kernel-doc:: drivers/devfreq/devfreq.c
:export:
.. kernel-doc:: drivers/devfreq/devfreq-event.c
:export:
...@@ -500,7 +500,7 @@ available but we try to move away from this: ...@@ -500,7 +500,7 @@ available but we try to move away from this:
gpiochip. It will pass the struct gpio_chip* for the chip to all IRQ gpiochip. It will pass the struct gpio_chip* for the chip to all IRQ
callbacks, so the callbacks need to embed the gpio_chip in its state callbacks, so the callbacks need to embed the gpio_chip in its state
container and obtain a pointer to the container using container_of(). container and obtain a pointer to the container using container_of().
(See Documentation/driver-model/design-patterns.txt) (See Documentation/driver-api/driver-model/design-patterns.rst)
- gpiochip_irqchip_add_nested(): adds a nested cascaded irqchip to a gpiochip, - gpiochip_irqchip_add_nested(): adds a nested cascaded irqchip to a gpiochip,
as discussed above regarding different types of cascaded irqchips. The as discussed above regarding different types of cascaded irqchips. The
......
...@@ -40,6 +40,7 @@ available subsections can be seen below. ...@@ -40,6 +40,7 @@ available subsections can be seen below.
ipmb ipmb
i3c/index i3c/index
interconnect interconnect
devfreq
hsi hsi
edac edac
scsi scsi
...@@ -73,7 +74,6 @@ available subsections can be seen below. ...@@ -73,7 +74,6 @@ available subsections can be seen below.
connector connector
console console
dcdbas dcdbas
dell_rbu
edid edid
eisa eisa
ipmb ipmb
...@@ -93,7 +93,6 @@ available subsections can be seen below. ...@@ -93,7 +93,6 @@ available subsections can be seen below.
pwm pwm
rfkill rfkill
serial/index serial/index
sgi-ioc4
sm501 sm501
smsc_ece1099 smsc_ece1099
switchtec switchtec
......
...@@ -49,9 +49,6 @@ Device Drivers Base ...@@ -49,9 +49,6 @@ Device Drivers Base
Device Drivers DMA Management Device Drivers DMA Management
----------------------------- -----------------------------
.. kernel-doc:: kernel/dma/coherent.c
:export:
.. kernel-doc:: kernel/dma/mapping.c .. kernel-doc:: kernel/dma/mapping.c
:export: :export:
......
.. SPDX-License-Identifier: GPL-2.0 .. SPDX-License-Identifier: GPL-2.0
===================================== =====================================
GENERIC SYSTEM INTERCONNECT SUBSYSTEM Generic System Interconnect Subsystem
===================================== =====================================
Introduction Introduction
......
...@@ -49,7 +49,9 @@ but is not just blindly executing as 'root'. Keep in mind ...@@ -49,7 +49,9 @@ but is not just blindly executing as 'root'. Keep in mind
the use of ioctl(,TIOCSETD,) is not specific to the n_tracerouter the use of ioctl(,TIOCSETD,) is not specific to the n_tracerouter
and n_tracesink line discpline drivers but is a generic and n_tracesink line discpline drivers but is a generic
operation for a program to use a line discpline driver operation for a program to use a line discpline driver
on a tty port other than the default n_tty:: on a tty port other than the default n_tty:
.. code-block:: c
/////////// To hook up n_tracerouter and n_tracesink ///////// /////////// To hook up n_tracerouter and n_tracesink /////////
......
...@@ -46,4 +46,5 @@ Documentation for filesystem implementations. ...@@ -46,4 +46,5 @@ Documentation for filesystem implementations.
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
autofs
virtiofs virtiofs
...@@ -105,7 +105,7 @@ getattr: no ...@@ -105,7 +105,7 @@ getattr: no
listxattr: no listxattr: no
fiemap: no fiemap: no
update_time: no update_time: no
atomic_open: exclusive atomic_open: shared (exclusive if O_CREAT is set in open flags)
tmpfile: no tmpfile: no
============ ============================================= ============ =============================================
......
...@@ -17,7 +17,7 @@ Usage Notes ...@@ -17,7 +17,7 @@ Usage Notes
----------- -----------
This driver does not auto-detect devices. You will have to instantiate the This driver does not auto-detect devices. You will have to instantiate the
devices explicitly. Please see Documentation/i2c/instantiating-devices for devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
details. details.
Sysfs entries Sysfs entries
......
. SPDX-License-Identifier: GPL-2.0 .. SPDX-License-Identifier: GPL-2.0
=============== ===============
I2C Bus Drivers I2C Bus Drivers
......
. SPDX-License-Identifier: GPL-2.0 .. SPDX-License-Identifier: GPL-2.0
=================== ===================
I2C/SMBus Subsystem I2C/SMBus Subsystem
......
...@@ -57,7 +57,6 @@ the kernel interface as seen by application developers. ...@@ -57,7 +57,6 @@ the kernel interface as seen by application developers.
:maxdepth: 2 :maxdepth: 2
userspace-api/index userspace-api/index
ioctl/index
Introduction to kernel development Introduction to kernel development
......
...@@ -32,3 +32,33 @@ You may also like to tell ``gpg`` which ``tty`` to use (add to your shell rc fil ...@@ -32,3 +32,33 @@ You may also like to tell ``gpg`` which ``tty`` to use (add to your shell rc fil
:: ::
export GPG_TTY=$(tty) export GPG_TTY=$(tty)
Creating commit links to lore.kernel.org
----------------------------------------
The web site http://lore.kernel.org is meant as a grand archive of all mail
list traffic concerning or influencing the kernel development. Storing archives
of patches here is a recommended practice, and when a maintainer applies a
patch to a subsystem tree, it is a good idea to provide a Link: tag with a
reference back to the lore archive so that people that browse the commit
history can find related discussions and rationale behind a certain change.
The link tag will look like this:
Link: https://lore.kernel.org/r/<message-id>
This can be configured to happen automatically any time you issue ``git am``
by adding the following hook into your git:
.. code-block:: none
$ git config am.messageid true
$ cat >.git/hooks/applypatch-msg <<'EOF'
#!/bin/sh
. git-sh-setup
perl -pi -e 's|^Message-Id:\s*<?([^>]+)>?$|Link: https://lore.kernel.org/r/$1|g;' "$1"
test -x "$GIT_DIR/hooks/commit-msg" &&
exec "$GIT_DIR/hooks/commit-msg" ${1+"$@"}
:
EOF
$ chmod a+x .git/hooks/applypatch-msg
...@@ -12,4 +12,5 @@ additions to this manual. ...@@ -12,4 +12,5 @@ additions to this manual.
configure-git configure-git
rebasing-and-merging rebasing-and-merging
pull-requests pull-requests
maintainer-entry-profile
.. _maintainerentryprofile:
Maintainer Entry Profile
========================
The Maintainer Entry Profile supplements the top-level process documents
(submitting-patches, submitting drivers...) with
subsystem/device-driver-local customs as well as details about the patch
submission life-cycle. A contributor uses this document to level set
their expectations and avoid common mistakes, maintainers may use these
profiles to look across subsystems for opportunities to converge on
common practices.
Overview
--------
Provide an introduction to how the subsystem operates. While MAINTAINERS
tells the contributor where to send patches for which files, it does not
convey other subsystem-local infrastructure and mechanisms that aid
development.
Example questions to consider:
- Are there notifications when patches are applied to the local tree, or
merged upstream?
- Does the subsystem have a patchwork instance? Are patchwork state
changes notified?
- Any bots or CI infrastructure that watches the list, or automated
testing feedback that the subsystem gates acceptance?
- Git branches that are pulled into -next?
- What branch should contributors submit against?
- Links to any other Maintainer Entry Profiles? For example a
device-driver may point to an entry for its parent subsystem. This makes
the contributor aware of obligations a maintainer may have have for
other maintainers in the submission chain.
Submit Checklist Addendum
-------------------------
List mandatory and advisory criteria, beyond the common "submit-checklist",
for a patch to be considered healthy enough for maintainer attention.
For example: "pass checkpatch.pl with no errors, or warning. Pass the
unit test detailed at $URI".
The Submit Checklist Addendum can also include details about the status
of related hardware specifications. For example, does the subsystem
require published specifications at a certain revision before patches
will be considered.
Key Cycle Dates
---------------
One of the common misunderstandings of submitters is that patches can be
sent at any time before the merge window closes and can still be
considered for the next -rc1. The reality is that most patches need to
be settled in soaking in linux-next in advance of the merge window
opening. Clarify for the submitter the key dates (in terms rc release
week) that patches might considered for merging and when patches need to
wait for the next -rc. At a minimum:
- Last -rc for new feature submissions:
New feature submissions targeting the next merge window should have
their first posting for consideration before this point. Patches that
are submitted after this point should be clear that they are targeting
the NEXT+1 merge window, or should come with sufficient justification
why they should be considered on an expedited schedule. A general
guideline is to set expectation with contributors that new feature
submissions should appear before -rc5.
- Last -rc to merge features: Deadline for merge decisions
Indicate to contributors the point at which an as yet un-applied patch
set will need to wait for the NEXT+1 merge window. Of course there is no
obligation to ever except any given patchset, but if the review has not
concluded by this point the expectation the contributor should wait and
resubmit for the following merge window.
Optional:
- First -rc at which the development baseline branch, listed in the
overview section, should be considered ready for new submissions.
Review Cadence
--------------
One of the largest sources of contributor angst is how soon to ping
after a patchset has been posted without receiving any feedback. In
addition to specifying how long to wait before a resubmission this
section can also indicate a preferred style of update like, resend the
full series, or privately send a reminder email. This section might also
list how review works for this code area and methods to get feedback
that are not directly from the maintainer.
Existing profiles
-----------------
For now, existing maintainer profiles are listed here; we will likely want
to do something different in the near future.
.. toctree::
:maxdepth: 1
../nvdimm/maintainer-entry-profile
...@@ -63,7 +63,6 @@ CONTENTS ...@@ -63,7 +63,6 @@ CONTENTS
- Compiler barrier. - Compiler barrier.
- CPU memory barriers. - CPU memory barriers.
- MMIO write barrier.
(*) Implicit kernel memory barriers. (*) Implicit kernel memory barriers.
...@@ -75,7 +74,6 @@ CONTENTS ...@@ -75,7 +74,6 @@ CONTENTS
(*) Inter-CPU acquiring barrier effects. (*) Inter-CPU acquiring barrier effects.
- Acquires vs memory accesses. - Acquires vs memory accesses.
- Acquires vs I/O accesses.
(*) Where are memory barriers needed? (*) Where are memory barriers needed?
...@@ -492,10 +490,9 @@ And a couple of implicit varieties: ...@@ -492,10 +490,9 @@ And a couple of implicit varieties:
happen before it completes. happen before it completes.
The use of ACQUIRE and RELEASE operations generally precludes the need The use of ACQUIRE and RELEASE operations generally precludes the need
for other sorts of memory barrier (but note the exceptions mentioned in for other sorts of memory barrier. In addition, a RELEASE+ACQUIRE pair is
the subsection "MMIO write barrier"). In addition, a RELEASE+ACQUIRE -not- guaranteed to act as a full memory barrier. However, after an
pair is -not- guaranteed to act as a full memory barrier. However, after ACQUIRE on a given variable, all memory accesses preceding any prior
an ACQUIRE on a given variable, all memory accesses preceding any prior
RELEASE on that same variable are guaranteed to be visible. In other RELEASE on that same variable are guaranteed to be visible. In other
words, within a given variable's critical section, all accesses of all words, within a given variable's critical section, all accesses of all
previous critical sections for that variable are guaranteed to have previous critical sections for that variable are guaranteed to have
...@@ -1512,8 +1509,6 @@ levels: ...@@ -1512,8 +1509,6 @@ levels:
(*) CPU memory barriers. (*) CPU memory barriers.
(*) MMIO write barrier.
COMPILER BARRIER COMPILER BARRIER
---------------- ----------------
......
...@@ -68,4 +68,4 @@ and frameworks can be controlled from the same registers, all of these ...@@ -68,4 +68,4 @@ and frameworks can be controlled from the same registers, all of these
drivers access their registers through the same regmap. drivers access their registers through the same regmap.
For more information regarding the devicetree bindings of the TCU drivers, For more information regarding the devicetree bindings of the TCU drivers,
have a look at Documentation/devicetree/bindings/mfd/ingenic,tcu.txt. have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.txt.
This diff is collapsed.
...@@ -279,7 +279,7 @@ mlx5 tracepoints ...@@ -279,7 +279,7 @@ mlx5 tracepoints
================ ================
mlx5 driver provides internal trace points for tracking and debugging using mlx5 driver provides internal trace points for tracking and debugging using
kernel tracepoints interfaces (refer to Documentation/trace/ftrase.rst). kernel tracepoints interfaces (refer to Documentation/trace/ftrace.rst).
For the list of support mlx5 events check /sys/kernel/debug/tracing/events/mlx5/ For the list of support mlx5 events check /sys/kernel/debug/tracing/events/mlx5/
......
...@@ -233,7 +233,7 @@ help debug packet drops caused by these exceptions. The following list includes ...@@ -233,7 +233,7 @@ help debug packet drops caused by these exceptions. The following list includes
links to the description of driver-specific traps registered by various device links to the description of driver-specific traps registered by various device
drivers: drivers:
* :doc:`/devlink-trap-netdevsim` * :doc:`devlink-trap-netdevsim`
Generic Packet Trap Groups Generic Packet Trap Groups
========================== ==========================
......
...@@ -73,7 +73,7 @@ The Reduced Gigabit Medium Independent Interface (RGMII) is a 12-pin ...@@ -73,7 +73,7 @@ The Reduced Gigabit Medium Independent Interface (RGMII) is a 12-pin
electrical signal interface using a synchronous 125Mhz clock signal and several electrical signal interface using a synchronous 125Mhz clock signal and several
data lines. Due to this design decision, a 1.5ns to 2ns delay must be added data lines. Due to this design decision, a 1.5ns to 2ns delay must be added
between the clock line (RXC or TXC) and the data lines to let the PHY (clock between the clock line (RXC or TXC) and the data lines to let the PHY (clock
sink) have enough setup and hold times to sample the data lines correctly. The sink) have a large enough setup and hold time to sample the data lines correctly. The
PHY library offers different types of PHY_INTERFACE_MODE_RGMII* values to let PHY library offers different types of PHY_INTERFACE_MODE_RGMII* values to let
the PHY driver and optionally the MAC driver, implement the required delay. The the PHY driver and optionally the MAC driver, implement the required delay. The
values of phy_interface_t must be understood from the perspective of the PHY values of phy_interface_t must be understood from the perspective of the PHY
......
LIBNVDIMM Maintainer Entry Profile
==================================
Overview
--------
The libnvdimm subsystem manages persistent memory across multiple
architectures. The mailing list, is tracked by patchwork here:
https://patchwork.kernel.org/project/linux-nvdimm/list/
...and that instance is configured to give feedback to submitters on
patch acceptance and upstream merge. Patches are merged to either the
'libnvdimm-fixes', or 'libnvdimm-for-next' branch. Those branches are
available here:
https://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git/
In general patches can be submitted against the latest -rc, however if
the incoming code change is dependent on other pending changes then the
patch should be based on the libnvdimm-for-next branch. However, since
persistent memory sits at the intersection of storage and memory there
are cases where patches are more suitable to be merged through a
Filesystem or the Memory Management tree. When in doubt copy the nvdimm
list and the maintainers will help route.
Submissions will be exposed to the kbuild robot for compile regression
testing. It helps to get a success notification from that infrastructure
before submitting, but it is not required.
Submit Checklist Addendum
-------------------------
There are unit tests for the subsystem via the ndctl utility:
https://github.com/pmem/ndctl
Those tests need to be passed before the patches go upstream, but not
necessarily before initial posting. Contact the list if you need help
getting the test environment set up.
### ACPI Device Specific Methods (_DSM)
Before patches enabling for a new _DSM family will be considered it must
be assigned a format-interface-code from the NVDIMM Sub-team of the ACPI
Specification Working Group. In general, the stance of the subsystem is
to push back on the proliferation of NVDIMM command sets, do strongly
consider implementing support for an existing command set. See
drivers/acpi/nfit/nfit.h for the set of support command sets.
Key Cycle Dates
---------------
New submissions can be sent at any time, but if they intend to hit the
next merge window they should be sent before -rc4, and ideally
stabilized in the libnvdimm-for-next branch by -rc6. Of course if a
patch set requires more than 2 weeks of review -rc4 is already too late
and some patches may require multiple development cycles to review.
Review Cadence
--------------
In general, please wait up to one week before pinging for feedback. A
private mail reminder is preferred. Alternatively ask for other
developers that have Reviewed-by tags for libnvdimm changes to take a
look and offer their opinion.
...@@ -46,7 +46,7 @@ will need to add a 32-bit compat layer: ...@@ -46,7 +46,7 @@ will need to add a 32-bit compat layer:
conversion or worse, fiddle the raw __u64 through your code since that conversion or worse, fiddle the raw __u64 through your code since that
diminishes the checking tools like sparse can provide. The macro diminishes the checking tools like sparse can provide. The macro
u64_to_user_ptr can be used in the kernel to avoid warnings about integers u64_to_user_ptr can be used in the kernel to avoid warnings about integers
and pointres of different sizes. and pointers of different sizes.
Basics Basics
......
...@@ -240,7 +240,7 @@ an involved disclosed party. The current ambassadors list: ...@@ -240,7 +240,7 @@ an involved disclosed party. The current ambassadors list:
============= ======================================================== ============= ========================================================
ARM ARM
AMD AMD Tom Lendacky <tom.lendacky@amd.com>
IBM IBM
Intel Tony Luck <tony.luck@intel.com> Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <tsoni@codeaurora.org> Qualcomm Trilok Soni <tsoni@codeaurora.org>
......
...@@ -46,6 +46,7 @@ Other guides to the community that are of interest to most developers are: ...@@ -46,6 +46,7 @@ Other guides to the community that are of interest to most developers are:
kernel-docs kernel-docs
deprecated deprecated
embargoed-hardware-issues embargoed-hardware-issues
maintainers
These are some overall technical guides that have been put here for now for These are some overall technical guides that have been put here for now for
lack of a better place. lack of a better place.
...@@ -57,6 +58,7 @@ lack of a better place. ...@@ -57,6 +58,7 @@ lack of a better place.
adding-syscalls adding-syscalls
magic-number magic-number
volatile-considered-harmful volatile-considered-harmful
botching-up-ioctls
clang-format clang-format
.. only:: subproject and html .. only:: subproject and html
......
...@@ -782,7 +782,58 @@ helpful, you can use the https://lkml.kernel.org/ redirector (e.g., in ...@@ -782,7 +782,58 @@ helpful, you can use the https://lkml.kernel.org/ redirector (e.g., in
the cover email text) to link to an earlier version of the patch series. the cover email text) to link to an earlier version of the patch series.
16) Sending ``git pull`` requests 16) Providing base tree information
-----------------------------------
When other developers receive your patches and start the review process,
it is often useful for them to know where in the tree history they
should place your work. This is particularly useful for automated CI
processes that attempt to run a series of tests in order to establish
the quality of your submission before the maintainer starts the review.
If you are using ``git format-patch`` to generate your patches, you can
automatically include the base tree information in your submission by
using the ``--base`` flag. The easiest and most convenient way to use
this option is with topical branches::
$ git checkout -t -b my-topical-branch master
Branch 'my-topical-branch' set up to track local branch 'master'.
Switched to a new branch 'my-topical-branch'
[perform your edits and commits]
$ git format-patch --base=auto --cover-letter -o outgoing/ master
outgoing/0000-cover-letter.patch
outgoing/0001-First-Commit.patch
outgoing/...
When you open ``outgoing/0000-cover-letter.patch`` for editing, you will
notice that it will have the ``base-commit:`` trailer at the very
bottom, which provides the reviewer and the CI tools enough information
to properly perform ``git am`` without worrying about conflicts::
$ git checkout -b patch-review [base-commit-id]
Switched to a new branch 'patch-review'
$ git am patches.mbox
Applying: First Commit
Applying: ...
Please see ``man git-format-patch`` for more information about this
option.
.. note::
The ``--base`` feature was introduced in git version 2.9.0.
If you are not using git to format your patches, you can still include
the same ``base-commit`` trailer to indicate the commit hash of the tree
on which your work is based. You should add it either in the cover
letter or in the first patch of the series and it should be placed
either below the ``---`` line or at the very bottom of all other
content, right before your email signature.
17) Sending ``git pull`` requests
--------------------------------- ---------------------------------
If you have a series of patches, it may be most convenient to have the If you have a series of patches, it may be most convenient to have the
......
...@@ -21,7 +21,7 @@ The following 64-byte header is present in decompressed Linux kernel image:: ...@@ -21,7 +21,7 @@ The following 64-byte header is present in decompressed Linux kernel image::
u32 res1 = 0; /* Reserved */ u32 res1 = 0; /* Reserved */
u64 res2 = 0; /* Reserved */ u64 res2 = 0; /* Reserved */
u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */ u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */
u32 magic2 = 0x56534905; /* Magic number 2, little endian, "RSC\x05" */ u32 magic2 = 0x05435352; /* Magic number 2, little endian, "RSC\x05" */
u32 res4; /* Reserved for PE COFF offset */ u32 res4; /* Reserved for PE COFF offset */
This header format is compliant with PE/COFF header and largely inspired from This header format is compliant with PE/COFF header and largely inspired from
......
...@@ -28,7 +28,7 @@ of these will need to start with a baseline observation and then calculate ...@@ -28,7 +28,7 @@ of these will need to start with a baseline observation and then calculate
the change in the counters at each subsequent observation. A perl script the change in the counters at each subsequent observation. A perl script
which does this for many of the fields is available at which does this for many of the fields is available at
http://eaglet.rain.com/rick/linux/schedstat/ http://eaglet.pdxhosts.com/rick/linux/schedstat/
Note that any such script will necessarily be version-specific, as the main Note that any such script will necessarily be version-specific, as the main
reason to change versions is changes in the output format. For those wishing reason to change versions is changes in the output format. For those wishing
...@@ -164,4 +164,4 @@ report on how well a particular process or set of processes is faring ...@@ -164,4 +164,4 @@ report on how well a particular process or set of processes is faring
under the scheduler's policies. A simple version of such a program is under the scheduler's policies. A simple version of such a program is
available at available at
http://eaglet.rain.com/rick/linux/schedstat/v12/latency.c http://eaglet.pdxhosts.com/rick/linux/schedstat/v12/latency.c
...@@ -1102,7 +1102,7 @@ payload contents" for more information. ...@@ -1102,7 +1102,7 @@ payload contents" for more information.
See also Documentation/security/keys/request-key.rst. See also Documentation/security/keys/request-key.rst.
* To search for a key in a specific domain, call: * To search for a key in a specific domain, call::
struct key *request_key_tag(const struct key_type *type, struct key *request_key_tag(const struct key_type *type,
const char *description, const char *description,
......
...@@ -56,7 +56,7 @@ the infrastructure to support security modules. The LSM kernel patch ...@@ -56,7 +56,7 @@ the infrastructure to support security modules. The LSM kernel patch
also moves most of the capabilities logic into an optional security also moves most of the capabilities logic into an optional security
module, with the system defaulting to the traditional superuser logic. module, with the system defaulting to the traditional superuser logic.
This capabilities module is discussed further in This capabilities module is discussed further in
`LSM Capabilities Module <#cap>`__. `LSM Capabilities Module`_.
The LSM kernel patch adds security fields to kernel data structures and The LSM kernel patch adds security fields to kernel data structures and
inserts calls to hook functions at critical points in the kernel code to inserts calls to hook functions at critical points in the kernel code to
......
...@@ -53,6 +53,16 @@ div[class^="highlight"] pre { ...@@ -53,6 +53,16 @@ div[class^="highlight"] pre {
line-height: normal; line-height: normal;
} }
/* Keep fields from being strangely far apart due to inheirited table CSS. */
.rst-content table.field-list th.field-name {
padding-top: 1px;
padding-bottom: 1px;
}
.rst-content table.field-list td.field-body {
padding-top: 1px;
padding-bottom: 1px;
}
@media screen { @media screen {
/* content column /* content column
......
...@@ -59,9 +59,10 @@ class KernelDocDirective(Directive): ...@@ -59,9 +59,10 @@ class KernelDocDirective(Directive):
optional_arguments = 4 optional_arguments = 4
option_spec = { option_spec = {
'doc': directives.unchanged_required, 'doc': directives.unchanged_required,
'functions': directives.unchanged,
'export': directives.unchanged, 'export': directives.unchanged,
'internal': directives.unchanged, 'internal': directives.unchanged,
'identifiers': directives.unchanged,
'functions': directives.unchanged,
} }
has_content = False has_content = False
...@@ -77,6 +78,10 @@ class KernelDocDirective(Directive): ...@@ -77,6 +78,10 @@ class KernelDocDirective(Directive):
tab_width = self.options.get('tab-width', self.state.document.settings.tab_width) tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
# 'function' is an alias of 'identifiers'
if 'functions' in self.options:
self.options['identifiers'] = self.options.get('functions')
# FIXME: make this nicer and more robust against errors # FIXME: make this nicer and more robust against errors
if 'export' in self.options: if 'export' in self.options:
cmd += ['-export'] cmd += ['-export']
...@@ -86,11 +91,11 @@ class KernelDocDirective(Directive): ...@@ -86,11 +91,11 @@ class KernelDocDirective(Directive):
export_file_patterns = str(self.options.get('internal')).split() export_file_patterns = str(self.options.get('internal')).split()
elif 'doc' in self.options: elif 'doc' in self.options:
cmd += ['-function', str(self.options.get('doc'))] cmd += ['-function', str(self.options.get('doc'))]
elif 'functions' in self.options: elif 'identifiers' in self.options:
functions = self.options.get('functions').split() identifiers = self.options.get('identifiers').split()
if functions: if identifiers:
for f in functions: for i in identifiers:
cmd += ['-function', f] cmd += ['-function', i]
else: else:
cmd += ['-no-doc-sections'] cmd += ['-no-doc-sections']
......
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
u"""
maintainers-include
~~~~~~~~~~~~~~~~~~~
Implementation of the ``maintainers-include`` reST-directive.
:copyright: Copyright (C) 2019 Kees Cook <keescook@chromium.org>
:license: GPL Version 2, June 1991 see linux/COPYING for details.
The ``maintainers-include`` reST-directive performs extensive parsing
specific to the Linux kernel's standard "MAINTAINERS" file, in an
effort to avoid needing to heavily mark up the original plain text.
"""
import sys
import re
import os.path
from docutils import statemachine
from docutils.utils.error_reporting import ErrorString
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives.misc import Include
__version__ = '1.0'
def setup(app):
app.add_directive("maintainers-include", MaintainersInclude)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
class MaintainersInclude(Include):
u"""MaintainersInclude (``maintainers-include``) directive"""
required_arguments = 0
def parse_maintainers(self, path):
"""Parse all the MAINTAINERS lines into ReST for human-readability"""
result = list()
result.append(".. _maintainers:")
result.append("")
# Poor man's state machine.
descriptions = False
maintainers = False
subsystems = False
# Field letter to field name mapping.
field_letter = None
fields = dict()
prev = None
field_prev = ""
field_content = ""
for line in open(path):
if sys.version_info.major == 2:
line = unicode(line, 'utf-8')
# Have we reached the end of the preformatted Descriptions text?
if descriptions and line.startswith('Maintainers'):
descriptions = False
# Ensure a blank line following the last "|"-prefixed line.
result.append("")
# Start subsystem processing? This is to skip processing the text
# between the Maintainers heading and the first subsystem name.
if maintainers and not subsystems:
if re.search('^[A-Z0-9]', line):
subsystems = True
# Drop needless input whitespace.
line = line.rstrip()
# Linkify all non-wildcard refs to ReST files in Documentation/.
pat = '(Documentation/([^\s\?\*]*)\.rst)'
m = re.search(pat, line)
if m:
# maintainers.rst is in a subdirectory, so include "../".
line = re.sub(pat, ':doc:`%s <../%s>`' % (m.group(2), m.group(2)), line)
# Check state machine for output rendering behavior.
output = None
if descriptions:
# Escape the escapes in preformatted text.
output = "| %s" % (line.replace("\\", "\\\\"))
# Look for and record field letter to field name mappings:
# R: Designated *reviewer*: FullName <address@domain>
m = re.search("\s(\S):\s", line)
if m:
field_letter = m.group(1)
if field_letter and not field_letter in fields:
m = re.search("\*([^\*]+)\*", line)
if m:
fields[field_letter] = m.group(1)
elif subsystems:
# Skip empty lines: subsystem parser adds them as needed.
if len(line) == 0:
continue
# Subsystem fields are batched into "field_content"
if line[1] != ':':
# Render a subsystem entry as:
# SUBSYSTEM NAME
# ~~~~~~~~~~~~~~
# Flush pending field content.
output = field_content + "\n\n"
field_content = ""
# Collapse whitespace in subsystem name.
heading = re.sub("\s+", " ", line)
output = output + "%s\n%s" % (heading, "~" * len(heading))
field_prev = ""
else:
# Render a subsystem field as:
# :Field: entry
# entry...
field, details = line.split(':', 1)
details = details.strip()
# Mark paths (and regexes) as literal text for improved
# readability and to escape any escapes.
if field in ['F', 'N', 'X', 'K']:
# But only if not already marked :)
if not ':doc:' in details:
details = '``%s``' % (details)
# Comma separate email field continuations.
if field == field_prev and field_prev in ['M', 'R', 'L']:
field_content = field_content + ","
# Do not repeat field names, so that field entries
# will be collapsed together.
if field != field_prev:
output = field_content + "\n"
field_content = ":%s:" % (fields.get(field, field))
field_content = field_content + "\n\t%s" % (details)
field_prev = field
else:
output = line
# Re-split on any added newlines in any above parsing.
if output != None:
for separated in output.split('\n'):
result.append(separated)
# Update the state machine when we find heading separators.
if line.startswith('----------'):
if prev.startswith('Descriptions'):
descriptions = True
if prev.startswith('Maintainers'):
maintainers = True
# Retain previous line for state machine transitions.
prev = line
# Flush pending field contents.
if field_content != "":
for separated in field_content.split('\n'):
result.append(separated)
output = "\n".join(result)
# For debugging the pre-rendered results...
#print(output, file=open("/tmp/MAINTAINERS.rst", "w"))
self.state_machine.insert_input(
statemachine.string2lines(output), path)
def run(self):
"""Include the MAINTAINERS file as part of this reST file."""
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
# Walk up source path directories to find Documentation/../
path = self.state_machine.document.attributes['source']
path = os.path.realpath(path)
tail = path
while tail != "Documentation" and tail != "":
(path, tail) = os.path.split(path)
# Append "MAINTAINERS"
path = os.path.join(path, "MAINTAINERS")
try:
self.state.document.settings.record_dependencies.add(path)
lines = self.parse_maintainers(path)
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
return []
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0+
#
# Figure out if we should follow a specific parallelism from the make
# environment (as exported by scripts/jobserver-exec), or fall back to
# the "auto" parallelism when "-jN" is not specified at the top-level
# "make" invocation.
sphinx="$1"
shift || true
parallel="$PARALLELISM"
if [ -z "$parallel" ] ; then
# If no parallelism is specified at the top-level make, then
# fall back to the expected "-jauto" mode that the "htmldocs"
# target has had.
auto=$(perl -e 'open IN,"'"$sphinx"' --version 2>&1 |";
while (<IN>) {
if (m/([\d\.]+)/) {
print "auto" if ($1 >= "1.7")
}
}
close IN')
if [ -n "$auto" ] ; then
parallel="$auto"
fi
fi
# Only if some parallelism has been determined do we add the -jN option.
if [ -n "$parallel" ] ; then
parallel="-j$parallel"
fi
exec "$sphinx" "$parallel" "$@"
This diff is collapsed.
...@@ -489,7 +489,7 @@ interface provided for that purpose by the generic STM API:: ...@@ -489,7 +489,7 @@ interface provided for that purpose by the generic STM API::
crw------- 1 root root 10, 61 Jan 3 18:11 /dev/stm0 crw------- 1 root root 10, 61 Jan 3 18:11 /dev/stm0
root@genericarmv8:~# root@genericarmv8:~#
Details on how to use the generic STM API can be found here [#second]_. Details on how to use the generic STM API can be found here:- :doc:`../stm` [#second]_.
.. [#first] Documentation/ABI/testing/sysfs-bus-coresight-devices-stm .. [#first] Documentation/ABI/testing/sysfs-bus-coresight-devices-stm
......
==============================
CoreSight - ARM Hardware Trace
==============================
.. toctree::
:maxdepth: 2
:glob:
*
...@@ -23,5 +23,4 @@ Linux Tracing Technologies ...@@ -23,5 +23,4 @@ Linux Tracing Technologies
intel_th intel_th
stm stm
sys-t sys-t
coresight coresight/index
coresight-cpu-debug
...@@ -455,7 +455,7 @@ soluzioni disponibili: ...@@ -455,7 +455,7 @@ soluzioni disponibili:
`GnuK`_ della FSIJ. Questo è uno dei pochi dispositivi a supportare le chiavi `GnuK`_ della FSIJ. Questo è uno dei pochi dispositivi a supportare le chiavi
ECC ED25519, ma offre meno funzionalità di sicurezza (come la resistenza ECC ED25519, ma offre meno funzionalità di sicurezza (come la resistenza
alla manomissione o alcuni attacchi ad un canale laterale). alla manomissione o alcuni attacchi ad un canale laterale).
- `Nitrokey Pro`_: è simile alla Nitrokey Start, ma è più resistente alla - `Nitrokey Pro 2`_: è simile alla Nitrokey Start, ma è più resistente alla
manomissione e offre più funzionalità di sicurezza. La Pro 2 supporta la manomissione e offre più funzionalità di sicurezza. La Pro 2 supporta la
crittografia ECC (NISTP). crittografia ECC (NISTP).
- `Yubikey 5`_: l'hardware e il software sono proprietari, ma è più economica - `Yubikey 5`_: l'hardware e il software sono proprietari, ma è più economica
......
...@@ -240,21 +240,21 @@ ReST 마크업을 사용하는 문서들은 Documentation/output 에 생성된 ...@@ -240,21 +240,21 @@ ReST 마크업을 사용하는 문서들은 Documentation/output 에 생성된
서브시스템에 특화된 커널 브랜치들로 구성된다. 몇몇 다른 메인 서브시스템에 특화된 커널 브랜치들로 구성된다. 몇몇 다른 메인
브랜치들은 다음과 같다. 브랜치들은 다음과 같다.
- main 4.x 커널 트리 - 리누스의 메인라인 트리
- 4.x.y - 안정된 커널 트리 - 여러 메이저 넘버를 갖는 다양한 안정된 커널 트리들
- 서브시스템을 위한 커널 트리들과 패치들 - 서브시스템을 위한 커널 트리들
- 4.x - 통합 테스트를 위한 next 커널 트리 - 통합 테스트를 위한 linux-next 커널 트리
4.x 커널 트리 메인라인 트리
~~~~~~~~~~~~~ ~~~~~~~~~~~~~
4.x 커널들은 Linus Torvalds가 관리하며 https://kernel.org 의 메인라인 트리는 Linus Torvalds가 관리하며 https://kernel.org 또는 소스
pub/linux/kernel/v4.x/ 디렉토리에서 참조될 수 있다.개발 프로세스는 다음과 같다. 저장소에서 참조될 수 있다.개발 프로세스는 다음과 같다.
- 새로운 커널이 배포되자마자 2주의 시간이 주어진다. 이 기간동은 - 새로운 커널이 배포되자마자 2주의 시간이 주어진다. 이 기간동은
메인테이너들은 큰 diff들을 Linus에게 제출할 수 있다. 대개 이 패치들은 메인테이너들은 큰 diff들을 Linus에게 제출할 수 있다. 대개 이 패치들은
몇 주 동안 -next 커널내에 이미 있었던 것들이다. 큰 변경들을 제출하는 데 몇 주 동안 linux-next 커널내에 이미 있었던 것들이다. 큰 변경들을 제출하는
선호되는 방법은 git(커널의 소스 관리 툴, 더 많은 정보들은 선호되는 방법은 git(커널의 소스 관리 툴, 더 많은 정보들은
https://git-scm.com/ 에서 참조할 수 있다)를 사용하는 것이지만 순수한 https://git-scm.com/ 에서 참조할 수 있다)를 사용하는 것이지만 순수한
패치파일의 형식으로 보내는 것도 무관하다. 패치파일의 형식으로 보내는 것도 무관하다.
- 2주 후에 -rc1 커널이 릴리즈되며 여기서부터의 주안점은 새로운 커널을 - 2주 후에 -rc1 커널이 릴리즈되며 여기서부터의 주안점은 새로운 커널을
...@@ -281,28 +281,25 @@ Andrew Morton의 글이 있다. ...@@ -281,28 +281,25 @@ Andrew Morton의 글이 있다.
버그의 상황에 따라 배포되는 것이지 미리정해 놓은 시간에 따라 버그의 상황에 따라 배포되는 것이지 미리정해 놓은 시간에 따라
배포되는 것은 아니기 때문이다."* 배포되는 것은 아니기 때문이다."*
4.x.y - 안정 커널 트리 여러 메이저 넘버를 갖는 다양한 안정된 커널 트리들
~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 자리 숫자로 이루어진 버젼의 커널들은 -stable 커널들이다. 그것들은 4.x 3 자리 숫자로 이루어진 버젼의 커널들은 -stable 커널들이다. 그것들은 해당 메이저
커널에서 발견된 큰 회귀들이나 보안 문제들 중 비교적 작고 중요한 수정들을 메인라인 릴리즈에서 발견된 큰 회귀들이나 보안 문제들 중 비교적 작고 중요한
포함한다. 수정들을 포함하며, 앞의 두 버전 넘버는 같은 기반 버전을 의미한다.
이것은 가장 최근의 안정적인 커널을 원하는 사용자에게 추천되는 브랜치이며, 이것은 가장 최근의 안정적인 커널을 원하는 사용자에게 추천되는 브랜치이며,
개발/실험적 버젼을 테스트하는 것을 돕고자 하는 사용자들과는 별로 관련이 없다. 개발/실험적 버젼을 테스트하는 것을 돕고자 하는 사용자들과는 별로 관련이 없다.
어떤 4.x.y 커널도 사용할 수 없다면 그때는 가장 높은 숫자의 4.x -stable 트리들은 "stable" 팀<stable@vger.kernel.org>에 의해 관리되며 거의 매번
커널이 현재의 안정 커널이다. 격주로 배포된다.
4.x.y는 "stable" 팀<stable@vger.kernel.org>에 의해 관리되며 거의 매번 격주로
배포된다.
커널 트리 문서들 내의 :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>` 커널 트리 문서들 내의 :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
파일은 어떤 종류의 변경들이 -stable 트리로 들어왔는지와 파일은 어떤 종류의 변경들이 -stable 트리로 들어왔는지와
배포 프로세스가 어떻게 진행되는지를 설명한다. 배포 프로세스가 어떻게 진행되는지를 설명한다.
서브시스템 커널 트리들과 패치들 서브시스템 커널 트리들
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~
다양한 커널 서브시스템의 메인테이너들 --- 그리고 많은 커널 서브시스템 개발자들 다양한 커널 서브시스템의 메인테이너들 --- 그리고 많은 커널 서브시스템 개발자들
--- 은 그들의 현재 개발 상태를 소스 저장소로 노출한다. 이를 통해 다른 사람들도 --- 은 그들의 현재 개발 상태를 소스 저장소로 노출한다. 이를 통해 다른 사람들도
...@@ -324,17 +321,18 @@ Andrew Morton의 글이 있다. ...@@ -324,17 +321,18 @@ Andrew Morton의 글이 있다.
대부분의 이러한 patchwork 사이트는 https://patchwork.kernel.org/ 또는 대부분의 이러한 patchwork 사이트는 https://patchwork.kernel.org/ 또는
http://patchwork.ozlabs.org/ 에 나열되어 있다. http://patchwork.ozlabs.org/ 에 나열되어 있다.
4.x - 통합 테스트를 위한 next 커널 트리 통합 테스트를 위한 linux-next 커널 트리
--------------------------------------- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
서브시스템 트리들의 변경사항들은 mainline 4.x 트리로 들어오기 전에 통합
테스트를 거쳐야 한다. 이런 목적으로, 모든 서브시스템 트리의 변경사항을 거의 서브시스템 트리들의 변경사항들은 mainline 트리로 들어오기 전에 통합 테스트를
매일 받아가는 특수한 테스트 저장소가 존재한다: 거쳐야 한다. 이런 목적으로, 모든 서브시스템 트리의 변경사항을 거의 매일
받아가는 특수한 테스트 저장소가 존재한다:
https://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git https://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git
이런 식으로, -next 커널을 통해 다음 머지 기간에 메인라인 커널에 어떤 변경이 이런 식으로, linux-next 커널을 통해 다음 머지 기간에 메인라인 커널에 어떤
가해질 것인지 간략히 알 수 있다. 모험심 강한 테스터라면 -next 커널에서 테스트를 변경이 가해질 것인지 간략히 알 수 있다. 모험심 강한 테스터라면 linux-next
수행하는 것도 좋을 것이다. 커널에서 테스트를 수행하는 것도 좋을 것이다.
버그 보고 버그 보고
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
\renewcommand\thesection* \renewcommand\thesection*
\renewcommand\thesubsection* \renewcommand\thesubsection*
Korean translations 한국어 번역
=================== ===========
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
......
...@@ -21,6 +21,7 @@ place where this information is gathered. ...@@ -21,6 +21,7 @@ place where this information is gathered.
unshare unshare
spec_ctrl spec_ctrl
accelerators/ocxl accelerators/ocxl
ioctl/index
.. only:: subproject and html .. only:: subproject and html
......
...@@ -9,7 +9,6 @@ IOCTLs ...@@ -9,7 +9,6 @@ IOCTLs
ioctl-number ioctl-number
botching-up-ioctls
ioctl-decoding ioctl-decoding
cdrom cdrom
......
. SPDX-License-Identifier: GPL-2.0 .. SPDX-License-Identifier: GPL-2.0
================ ================
1-Wire Subsystem 1-Wire Subsystem
......
This diff is collapsed.
...@@ -1858,7 +1858,7 @@ static int ftgmac100_probe(struct platform_device *pdev) ...@@ -1858,7 +1858,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
} }
/* Indicate that we support PAUSE frames (see comment in /* Indicate that we support PAUSE frames (see comment in
* Documentation/networking/phy.txt) * Documentation/networking/phy.rst)
*/ */
phy_support_asym_pause(phy); phy_support_asym_pause(phy);
......
...@@ -596,8 +596,8 @@ enum ionic_txq_desc_opcode { ...@@ -596,8 +596,8 @@ enum ionic_txq_desc_opcode {
* the @encap is set, the device will * the @encap is set, the device will
* offload the outer header checksums using * offload the outer header checksums using
* LCO (local checksum offload) (see * LCO (local checksum offload) (see
* Documentation/networking/checksum- * Documentation/networking/checksum-offloads.rst
* offloads.txt for more info). * for more info).
* *
* IONIC_TXQ_DESC_OPCODE_CSUM_HW: * IONIC_TXQ_DESC_OPCODE_CSUM_HW:
* *
......
...@@ -258,7 +258,7 @@ config DELL_RBU ...@@ -258,7 +258,7 @@ config DELL_RBU
DELL system. Note you need a Dell OpenManage or Dell Update package (DUP) DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
supporting application to communicate with the BIOS regarding the new supporting application to communicate with the BIOS regarding the new
image for the image update to take effect. image for the image update to take effect.
See <file:Documentation/driver-api/dell_rbu.rst> for more details on the driver. See <file:Documentation/admin-guide/dell_rbu.rst> for more details on the driver.
config FUJITSU_LAPTOP config FUJITSU_LAPTOP
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
* on every time the packet data is written. This driver requires an * on every time the packet data is written. This driver requires an
* application to break the BIOS image in to fixed sized packet chunks. * application to break the BIOS image in to fixed sized packet chunks.
* *
* See Documentation/driver-api/dell_rbu.rst for more info. * See Documentation/admin-guide/dell_rbu.rst for more info.
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
......
...@@ -1551,7 +1551,7 @@ init_cifs(void) ...@@ -1551,7 +1551,7 @@ init_cifs(void)
/* /*
* Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3) * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
* so that we don't launch too many worker threads but * so that we don't launch too many worker threads but
* Documentation/workqueue.txt recommends setting it to 0 * Documentation/core-api/workqueue.rst recommends setting it to 0
*/ */
/* WQ_UNBOUND allows decrypt tasks to run on any CPU */ /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
......
...@@ -140,6 +140,19 @@ request_threaded_irq(unsigned int irq, irq_handler_t handler, ...@@ -140,6 +140,19 @@ request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn, irq_handler_t thread_fn,
unsigned long flags, const char *name, void *dev); unsigned long flags, const char *name, void *dev);
/**
* request_irq - Add a handler for an interrupt line
* @irq: The interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* Primary handler for threaded interrupts
* If NULL, the default primary handler is installed
* @flags: Handling flags
* @name: Name of the device generating this interrupt
* @dev: A cookie passed to the handler function
*
* This call allocates an interrupt and establishes a handler; see
* the documentation for request_threaded_irq() for details.
*/
static inline int __must_check static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev) const char *name, void *dev)
......
...@@ -472,7 +472,7 @@ void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size, ...@@ -472,7 +472,7 @@ void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
EXPORT_SYMBOL(gen_pool_dma_zalloc_align); EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
/** /**
* gen_pool_free - free allocated special memory back to the pool * gen_pool_free_owner - free allocated special memory back to the pool
* @pool: pool to free to * @pool: pool to free to
* @addr: starting address of memory to free back to pool * @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free * @size: size in bytes of memory to free
......
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0+
#
# This determines how many parallel tasks "make" is expecting, as it is
# not exposed via an special variables, reserves them all, runs a subprocess
# with PARALLELISM environment variable set, and releases the jobs back again.
#
# https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html#POSIX-Jobserver
from __future__ import print_function
import os, sys, errno
import subprocess
# Extract and prepare jobserver file descriptors from envirnoment.
claim = 0
jobs = b""
try:
# Fetch the make environment options.
flags = os.environ['MAKEFLAGS']
# Look for "--jobserver=R,W"
# Note that GNU Make has used --jobserver-fds and --jobserver-auth
# so this handles all of them.
opts = [x for x in flags.split(" ") if x.startswith("--jobserver")]
# Parse out R,W file descriptor numbers and set them nonblocking.
fds = opts[0].split("=", 1)[1]
reader, writer = [int(x) for x in fds.split(",", 1)]
# Open a private copy of reader to avoid setting nonblocking
# on an unexpecting process with the same reader fd.
reader = os.open("/proc/self/fd/%d" % (reader),
os.O_RDONLY | os.O_NONBLOCK)
# Read out as many jobserver slots as possible.
while True:
try:
slot = os.read(reader, 8)
jobs += slot
except (OSError, IOError) as e:
if e.errno == errno.EWOULDBLOCK:
# Stop at the end of the jobserver queue.
break
# If something went wrong, give back the jobs.
if len(jobs):
os.write(writer, jobs)
raise e
# Add a bump for our caller's reserveration, since we're just going
# to sit here blocked on our child.
claim = len(jobs) + 1
except (KeyError, IndexError, ValueError, OSError, IOError) as e:
# Any missing environment strings or bad fds should result in just
# not being parallel.
pass
# We can only claim parallelism if there was a jobserver (i.e. a top-level
# "-jN" argument) and there were no other failures. Otherwise leave out the
# environment variable and let the child figure out what is best.
if claim > 0:
os.environ['PARALLELISM'] = '%d' % (claim)
rc = subprocess.call(sys.argv[1:])
# Return all the reserved slots.
if len(jobs):
os.write(writer, jobs)
sys.exit(rc)
...@@ -1062,7 +1062,7 @@ sub dump_struct($$) { ...@@ -1062,7 +1062,7 @@ sub dump_struct($$) {
my $x = shift; my $x = shift;
my $file = shift; my $file = shift;
if ($x =~ /(struct|union)\s+(\w+)\s*\{(.*)\}(\s*(__packed|__aligned|__attribute__\s*\(\([a-z0-9,_\s\(\)]*\)\)))*/) { if ($x =~ /(struct|union)\s+(\w+)\s*\{(.*)\}(\s*(__packed|__aligned|____cacheline_aligned_in_smp|__attribute__\s*\(\([a-z0-9,_\s\(\)]*\)\)))*/) {
my $decl_type = $1; my $decl_type = $1;
$declaration_name = $2; $declaration_name = $2;
my $members = $3; my $members = $3;
...@@ -1073,10 +1073,11 @@ sub dump_struct($$) { ...@@ -1073,10 +1073,11 @@ sub dump_struct($$) {
# strip comments: # strip comments:
$members =~ s/\/\*.*?\*\///gos; $members =~ s/\/\*.*?\*\///gos;
# strip attributes # strip attributes
$members =~ s/\s*__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)//gi; $members =~ s/\s*__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)/ /gi;
$members =~ s/\s*__aligned\s*\([^;]*\)//gos; $members =~ s/\s*__aligned\s*\([^;]*\)/ /gos;
$members =~ s/\s*__packed\s*//gos; $members =~ s/\s*__packed\s*/ /gos;
$members =~ s/\s*CRYPTO_MINALIGN_ATTR//gos; $members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos;
$members =~ s/\s*____cacheline_aligned_in_smp/ /gos;
# replace DECLARE_BITMAP # replace DECLARE_BITMAP
$members =~ s/DECLARE_BITMAP\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos; $members =~ s/DECLARE_BITMAP\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos;
# replace DECLARE_HASHTABLE # replace DECLARE_HASHTABLE
...@@ -1449,6 +1450,10 @@ sub push_parameter($$$$) { ...@@ -1449,6 +1450,10 @@ sub push_parameter($$$$) {
# handles unnamed variable parameters # handles unnamed variable parameters
$param = "..."; $param = "...";
} }
elsif ($param =~ /\w\.\.\.$/) {
# for named variable parameters of the form `x...`, remove the dots
$param =~ s/\.\.\.$//;
}
if (!defined $parameterdescs{$param} || $parameterdescs{$param} eq "") { if (!defined $parameterdescs{$param} || $parameterdescs{$param} eq "") {
$parameterdescs{$param} = "variable arguments"; $parameterdescs{$param} = "variable arguments";
} }
...@@ -1936,6 +1941,18 @@ sub process_name($$) { ...@@ -1936,6 +1941,18 @@ sub process_name($$) {
sub process_body($$) { sub process_body($$) {
my $file = shift; my $file = shift;
# Until all named variable macro parameters are
# documented using the bare name (`x`) rather than with
# dots (`x...`), strip the dots:
if ($section =~ /\w\.\.\.$/) {
$section =~ s/\.\.\.$//;
if ($verbose) {
print STDERR "${file}:$.: warning: Variable macro arguments should be documented without dots\n";
++$warnings;
}
}
if (/$doc_sect/i) { # case insensitive for supported section names if (/$doc_sect/i) { # case insensitive for supported section names
$newsection = $1; $newsection = $1;
$newcontents = $2; $newcontents = $2;
......
...@@ -124,11 +124,13 @@ sub add_package($$) ...@@ -124,11 +124,13 @@ sub add_package($$)
sub check_missing_file($$$) sub check_missing_file($$$)
{ {
my $file = shift; my $files = shift;
my $package = shift; my $package = shift;
my $is_optional = shift; my $is_optional = shift;
return if(-e $file); for (@$files) {
return if(-e $_);
}
add_package($package, $is_optional); add_package($package, $is_optional);
} }
...@@ -343,10 +345,11 @@ sub give_debian_hints() ...@@ -343,10 +345,11 @@ sub give_debian_hints()
); );
if ($pdf) { if ($pdf) {
check_missing_file("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", check_missing_file(["/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"],
"fonts-dejavu", 2); "fonts-dejavu", 2);
check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc", check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
"/usr/share/fonts/opentype/noto/NotoSerifCJK-Regular.ttc"],
"fonts-noto-cjk", 2); "fonts-noto-cjk", 2);
} }
...@@ -413,7 +416,7 @@ sub give_redhat_hints() ...@@ -413,7 +416,7 @@ sub give_redhat_hints()
} }
if ($pdf) { if ($pdf) {
check_missing_file("/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc", check_missing_file(["/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc"],
"google-noto-sans-cjk-ttc-fonts", 2); "google-noto-sans-cjk-ttc-fonts", 2);
} }
...@@ -498,7 +501,7 @@ sub give_mageia_hints() ...@@ -498,7 +501,7 @@ sub give_mageia_hints()
$map{"latexmk"} = "texlive-collection-basic"; $map{"latexmk"} = "texlive-collection-basic";
if ($pdf) { if ($pdf) {
check_missing_file("/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc", check_missing_file(["/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc"],
"google-noto-sans-cjk-ttc-fonts", 2); "google-noto-sans-cjk-ttc-fonts", 2);
} }
...@@ -517,6 +520,7 @@ sub give_arch_linux_hints() ...@@ -517,6 +520,7 @@ sub give_arch_linux_hints()
"dot" => "graphviz", "dot" => "graphviz",
"convert" => "imagemagick", "convert" => "imagemagick",
"xelatex" => "texlive-bin", "xelatex" => "texlive-bin",
"latexmk" => "texlive-core",
"rsvg-convert" => "extra/librsvg", "rsvg-convert" => "extra/librsvg",
); );
...@@ -528,7 +532,7 @@ sub give_arch_linux_hints() ...@@ -528,7 +532,7 @@ sub give_arch_linux_hints()
check_pacman_missing(\@archlinux_tex_pkgs, 2) if ($pdf); check_pacman_missing(\@archlinux_tex_pkgs, 2) if ($pdf);
if ($pdf) { if ($pdf) {
check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc", check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc"],
"noto-fonts-cjk", 2); "noto-fonts-cjk", 2);
} }
...@@ -549,11 +553,11 @@ sub give_gentoo_hints() ...@@ -549,11 +553,11 @@ sub give_gentoo_hints()
"rsvg-convert" => "gnome-base/librsvg", "rsvg-convert" => "gnome-base/librsvg",
); );
check_missing_file("/usr/share/fonts/dejavu/DejaVuSans.ttf", check_missing_file(["/usr/share/fonts/dejavu/DejaVuSans.ttf"],
"media-fonts/dejavu", 2) if ($pdf); "media-fonts/dejavu", 2) if ($pdf);
if ($pdf) { if ($pdf) {
check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf", check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf"],
"media-fonts/noto-cjk", 2); "media-fonts/noto-cjk", 2);
} }
...@@ -645,6 +649,12 @@ sub check_distros() ...@@ -645,6 +649,12 @@ sub check_distros()
# Common dependencies # Common dependencies
# #
sub deactivate_help()
{
printf "\tIf you want to exit the virtualenv, you can use:\n";
printf "\tdeactivate\n";
}
sub check_needs() sub check_needs()
{ {
# Check for needed programs/tools # Check for needed programs/tools
...@@ -686,6 +696,7 @@ sub check_needs() ...@@ -686,6 +696,7 @@ sub check_needs()
if ($need_sphinx && scalar @activates > 0 && $activates[0] ge $min_activate) { if ($need_sphinx && scalar @activates > 0 && $activates[0] ge $min_activate) {
printf "\nNeed to activate a compatible Sphinx version on virtualenv with:\n"; printf "\nNeed to activate a compatible Sphinx version on virtualenv with:\n";
printf "\t. $activates[0]\n"; printf "\t. $activates[0]\n";
deactivate_help();
exit (1); exit (1);
} else { } else {
my $rec_activate = "$virtenv_dir/bin/activate"; my $rec_activate = "$virtenv_dir/bin/activate";
...@@ -697,6 +708,7 @@ sub check_needs() ...@@ -697,6 +708,7 @@ sub check_needs()
printf "\t$virtualenv $virtenv_dir\n"; printf "\t$virtualenv $virtenv_dir\n";
printf "\t. $rec_activate\n"; printf "\t. $rec_activate\n";
printf "\tpip install -r $requirement_file\n"; printf "\tpip install -r $requirement_file\n";
deactivate_help();
$need++ if (!$rec_sphinx_upgrade); $need++ if (!$rec_sphinx_upgrade);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment