Commit 3f803abf authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew Morton)

Merge misc fixes from Andrew Morton.

* emailed patches from Andrew Morton akpm@linux-foundation.org>:
  mm: page_alloc: exempt GFP_THISNODE allocations from zone fairness
  mm: numa: bugfix for LAST_CPUPID_NOT_IN_PAGE_FLAGS
  MAINTAINERS: add and correct types of some "T:" entries
  MAINTAINERS: use tab for separator
  rapidio/tsi721: fix tasklet termination in dma channel release
  hfsplus: fix remount issue
  zram: avoid null access when fail to alloc meta
  sh: prefix sh-specific "CCR" and "CCR2" by "SH_"
  ocfs2: fix quota file corruption
  drivers/rtc/rtc-s3c.c: fix incorrect way of save/restore of S3C2410_TICNT for TYPE_S3C64XX
  kallsyms: fix absolute addresses for kASLR
  scripts/gen_initramfs_list.sh: fix flags for initramfs LZ4 compression
  mm: include VM_MIXEDMAP flag in the VM_SPECIAL list to avoid m(un)locking
  memcg: reparent charges of children before processing parent
  memcg: fix endless loop in __mem_cgroup_iter_next()
  lib/radix-tree.c: swapoff tmpfs radix_tree: remember to rcu_read_unlock
  dma debug: account for cachelines and read-only mappings in overlap tracking
  mm: close PageTail race
  MAINTAINERS: EDAC: add Mauro and Borislav as interim patch collectors
parents 0c0bd34a 27329369
...@@ -73,7 +73,8 @@ Descriptions of section entries: ...@@ -73,7 +73,8 @@ Descriptions of section entries:
L: Mailing list that is relevant to this area L: Mailing list that is relevant to this area
W: Web-page with status/info W: Web-page with status/info
Q: Patchwork web based patch tracking system site Q: Patchwork web based patch tracking system site
T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit. T: SCM tree type and location.
Type is one of: git, hg, quilt, stgit, topgit
S: Status, one of the following: S: Status, one of the following:
Supported: Someone is actually paid to look after this. Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it. Maintained: Someone actually looks after it.
...@@ -2159,7 +2160,7 @@ F: Documentation/zh_CN/ ...@@ -2159,7 +2160,7 @@ F: Documentation/zh_CN/
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
M: Peter Chen <Peter.Chen@freescale.com> M: Peter Chen <Peter.Chen@freescale.com>
T: git://github.com/hzpeterchen/linux-usb.git T: git git://github.com/hzpeterchen/linux-usb.git
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Maintained S: Maintained
F: drivers/usb/chipidea/ F: drivers/usb/chipidea/
...@@ -2382,7 +2383,7 @@ M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> ...@@ -2382,7 +2383,7 @@ M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
M: Daniel Lezcano <daniel.lezcano@linaro.org> M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org L: linux-arm-kernel@lists.infradead.org
T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
S: Maintained S: Maintained
F: drivers/cpuidle/cpuidle-big_little.c F: drivers/cpuidle/cpuidle-big_little.c
...@@ -2391,7 +2392,7 @@ M: Rafael J. Wysocki <rjw@rjwysocki.net> ...@@ -2391,7 +2392,7 @@ M: Rafael J. Wysocki <rjw@rjwysocki.net>
M: Daniel Lezcano <daniel.lezcano@linaro.org> M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Maintained S: Maintained
T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
F: drivers/cpuidle/* F: drivers/cpuidle/*
F: include/linux/cpuidle.h F: include/linux/cpuidle.h
...@@ -3095,6 +3096,8 @@ F: fs/ecryptfs/ ...@@ -3095,6 +3096,8 @@ F: fs/ecryptfs/
EDAC-CORE EDAC-CORE
M: Doug Thompson <dougthompson@xmission.com> M: Doug Thompson <dougthompson@xmission.com>
M: Borislav Petkov <bp@alien8.de>
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
L: linux-edac@vger.kernel.org L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Supported S: Supported
...@@ -4914,7 +4917,7 @@ F: drivers/staging/ktap/ ...@@ -4914,7 +4917,7 @@ F: drivers/staging/ktap/
KCONFIG KCONFIG
M: "Yann E. MORIN" <yann.morin.1998@free.fr> M: "Yann E. MORIN" <yann.morin.1998@free.fr>
L: linux-kbuild@vger.kernel.org L: linux-kbuild@vger.kernel.org
T: git://gitorious.org/linux-kconfig/linux-kconfig T: git git://gitorious.org/linux-kconfig/linux-kconfig
S: Maintained S: Maintained
F: Documentation/kbuild/kconfig-language.txt F: Documentation/kbuild/kconfig-language.txt
F: scripts/kconfig/ F: scripts/kconfig/
...@@ -5733,7 +5736,7 @@ L: linux-rdma@vger.kernel.org ...@@ -5733,7 +5736,7 @@ L: linux-rdma@vger.kernel.org
W: http://www.mellanox.com W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/ Q: http://patchwork.ozlabs.org/project/netdev/list/
Q: http://patchwork.kernel.org/project/linux-rdma/list/ Q: http://patchwork.kernel.org/project/linux-rdma/list/
T: git://openfabrics.org/~eli/connect-ib.git T: git git://openfabrics.org/~eli/connect-ib.git
S: Supported S: Supported
F: drivers/net/ethernet/mellanox/mlx5/core/ F: drivers/net/ethernet/mellanox/mlx5/core/
F: include/linux/mlx5/ F: include/linux/mlx5/
...@@ -5743,7 +5746,7 @@ M: Eli Cohen <eli@mellanox.com> ...@@ -5743,7 +5746,7 @@ M: Eli Cohen <eli@mellanox.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
W: http://www.mellanox.com W: http://www.mellanox.com
Q: http://patchwork.kernel.org/project/linux-rdma/list/ Q: http://patchwork.kernel.org/project/linux-rdma/list/
T: git://openfabrics.org/~eli/connect-ib.git T: git git://openfabrics.org/~eli/connect-ib.git
S: Supported S: Supported
F: include/linux/mlx5/ F: include/linux/mlx5/
F: drivers/infiniband/hw/mlx5/ F: drivers/infiniband/hw/mlx5/
...@@ -9812,7 +9815,7 @@ ZR36067 VIDEO FOR LINUX DRIVER ...@@ -9812,7 +9815,7 @@ ZR36067 VIDEO FOR LINUX DRIVER
L: mjpeg-users@lists.sourceforge.net L: mjpeg-users@lists.sourceforge.net
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: http://mjpeg.sourceforge.net/driver-zoran/ W: http://mjpeg.sourceforge.net/driver-zoran/
T: Mercurial http://linuxtv.org/hg/v4l-dvb T: hg http://linuxtv.org/hg/v4l-dvb
S: Odd Fixes S: Odd Fixes
F: drivers/media/pci/zoran/ F: drivers/media/pci/zoran/
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#define SH_CACHE_ASSOC 8 #define SH_CACHE_ASSOC 8
#if defined(CONFIG_CPU_SUBTYPE_SH7619) #if defined(CONFIG_CPU_SUBTYPE_SH7619)
#define CCR 0xffffffec #define SH_CCR 0xffffffec
#define CCR_CACHE_CE 0x01 /* Cache enable */ #define CCR_CACHE_CE 0x01 /* Cache enable */
#define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */ #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */
......
...@@ -17,8 +17,8 @@ ...@@ -17,8 +17,8 @@
#define SH_CACHE_COMBINED 4 #define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8 #define SH_CACHE_ASSOC 8
#define CCR 0xfffc1000 /* CCR1 */ #define SH_CCR 0xfffc1000 /* CCR1 */
#define CCR2 0xfffc1004 #define SH_CCR2 0xfffc1004
/* /*
* Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#define SH_CACHE_COMBINED 4 #define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8 #define SH_CACHE_ASSOC 8
#define CCR 0xffffffec /* Address of Cache Control Register */ #define SH_CCR 0xffffffec /* Address of Cache Control Register */
#define CCR_CACHE_CE 0x01 /* Cache Enable */ #define CCR_CACHE_CE 0x01 /* Cache Enable */
#define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */ #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#define SH_CACHE_COMBINED 4 #define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8 #define SH_CACHE_ASSOC 8
#define CCR 0xff00001c /* Address of Cache Control Register */ #define SH_CCR 0xff00001c /* Address of Cache Control Register */
#define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */
#define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/
#define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */ #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */
......
...@@ -112,7 +112,7 @@ static void cache_init(void) ...@@ -112,7 +112,7 @@ static void cache_init(void)
unsigned long ccr, flags; unsigned long ccr, flags;
jump_to_uncached(); jump_to_uncached();
ccr = __raw_readl(CCR); ccr = __raw_readl(SH_CCR);
/* /*
* At this point we don't know whether the cache is enabled or not - a * At this point we don't know whether the cache is enabled or not - a
...@@ -189,7 +189,7 @@ static void cache_init(void) ...@@ -189,7 +189,7 @@ static void cache_init(void)
l2_cache_init(); l2_cache_init();
__raw_writel(flags, CCR); __raw_writel(flags, SH_CCR);
back_to_cached(); back_to_cached();
} }
#else #else
......
...@@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter) ...@@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
*/ */
jump_to_uncached(); jump_to_uncached();
ccr = __raw_readl(CCR); ccr = __raw_readl(SH_CCR);
if ((ccr & CCR_CACHE_ENABLE) == 0) { if ((ccr & CCR_CACHE_ENABLE) == 0) {
back_to_cached(); back_to_cached();
......
...@@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size) ...@@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size)
local_irq_save(flags); local_irq_save(flags);
jump_to_uncached(); jump_to_uncached();
ccr = __raw_readl(CCR); ccr = __raw_readl(SH_CCR);
ccr |= CCR_CACHE_INVALIDATE; ccr |= CCR_CACHE_INVALIDATE;
__raw_writel(ccr, CCR); __raw_writel(ccr, SH_CCR);
back_to_cached(); back_to_cached();
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size) ...@@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size)
/* If there are too many pages then just blow the cache */ /* If there are too many pages then just blow the cache */
if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
__raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
SH_CCR);
} else { } else {
for (v = begin; v < end; v += L1_CACHE_BYTES) for (v = begin; v < end; v += L1_CACHE_BYTES)
sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
...@@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args) ...@@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args)
/* I-Cache invalidate */ /* I-Cache invalidate */
/* If there are too many pages then just blow the cache */ /* If there are too many pages then just blow the cache */
if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
__raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
SH_CCR);
} else { } else {
for (v = start; v < end; v += L1_CACHE_BYTES) for (v = start; v < end; v += L1_CACHE_BYTES)
sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
......
...@@ -133,9 +133,9 @@ static void flush_icache_all(void) ...@@ -133,9 +133,9 @@ static void flush_icache_all(void)
jump_to_uncached(); jump_to_uncached();
/* Flush I-cache */ /* Flush I-cache */
ccr = __raw_readl(CCR); ccr = __raw_readl(SH_CCR);
ccr |= CCR_CACHE_ICI; ccr |= CCR_CACHE_ICI;
__raw_writel(ccr, CCR); __raw_writel(ccr, SH_CCR);
/* /*
* back_to_cached() will take care of the barrier for us, don't add * back_to_cached() will take care of the barrier for us, don't add
......
...@@ -19,7 +19,7 @@ void __init shx3_cache_init(void) ...@@ -19,7 +19,7 @@ void __init shx3_cache_init(void)
{ {
unsigned int ccr; unsigned int ccr;
ccr = __raw_readl(CCR); ccr = __raw_readl(SH_CCR);
/* /*
* If we've got cache aliases, resolve them in hardware. * If we've got cache aliases, resolve them in hardware.
...@@ -40,5 +40,5 @@ void __init shx3_cache_init(void) ...@@ -40,5 +40,5 @@ void __init shx3_cache_init(void)
ccr |= CCR_CACHE_IBE; ccr |= CCR_CACHE_IBE;
#endif #endif
writel_uncached(ccr, CCR); writel_uncached(ccr, SH_CCR);
} }
...@@ -285,8 +285,8 @@ void __init cpu_cache_init(void) ...@@ -285,8 +285,8 @@ void __init cpu_cache_init(void)
{ {
unsigned int cache_disabled = 0; unsigned int cache_disabled = 0;
#ifdef CCR #ifdef SH_CCR
cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
#endif #endif
compute_alias(&boot_cpu_data.icache); compute_alias(&boot_cpu_data.icache);
......
...@@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio) ...@@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio)
/* Non-zero page count for non-head members of /* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel. * compound pages is no longer allowed by the kernel.
*/ */
page = compound_trans_head(bv.bv_page); page = compound_head(bv.bv_page);
atomic_inc(&page->_count); atomic_inc(&page->_count);
} }
} }
...@@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio) ...@@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio)
struct bvec_iter iter; struct bvec_iter iter;
bio_for_each_segment(bv, bio, iter) { bio_for_each_segment(bv, bio, iter) {
page = compound_trans_head(bv.bv_page); page = compound_head(bv.bv_page);
atomic_dec(&page->_count); atomic_dec(&page->_count);
} }
} }
......
...@@ -612,6 +612,8 @@ static ssize_t disksize_store(struct device *dev, ...@@ -612,6 +612,8 @@ static ssize_t disksize_store(struct device *dev,
disksize = PAGE_ALIGN(disksize); disksize = PAGE_ALIGN(disksize);
meta = zram_meta_alloc(disksize); meta = zram_meta_alloc(disksize);
if (!meta)
return -ENOMEM;
down_write(&zram->init_lock); down_write(&zram->init_lock);
if (zram->init_done) { if (zram->init_done) {
up_write(&zram->init_lock); up_write(&zram->init_lock);
......
...@@ -678,6 +678,7 @@ struct tsi721_bdma_chan { ...@@ -678,6 +678,7 @@ struct tsi721_bdma_chan {
struct list_head free_list; struct list_head free_list;
dma_cookie_t completed_cookie; dma_cookie_t completed_cookie;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
bool active;
}; };
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
......
...@@ -206,7 +206,7 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) ...@@ -206,7 +206,7 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
{ {
/* Disable BDMA channel interrupts */ /* Disable BDMA channel interrupts */
iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
if (bdma_chan->active)
tasklet_schedule(&bdma_chan->tasklet); tasklet_schedule(&bdma_chan->tasklet);
} }
...@@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) ...@@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
} }
#endif /* CONFIG_PCI_MSI */ #endif /* CONFIG_PCI_MSI */
tasklet_enable(&bdma_chan->tasklet); bdma_chan->active = true;
tsi721_bdma_interrupt_enable(bdma_chan, 1); tsi721_bdma_interrupt_enable(bdma_chan, 1);
return bdma_chan->bd_num - 1; return bdma_chan->bd_num - 1;
...@@ -576,9 +576,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) ...@@ -576,9 +576,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
static void tsi721_free_chan_resources(struct dma_chan *dchan) static void tsi721_free_chan_resources(struct dma_chan *dchan)
{ {
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
#ifdef CONFIG_PCI_MSI
struct tsi721_device *priv = to_tsi721(dchan->device); struct tsi721_device *priv = to_tsi721(dchan->device);
#endif
LIST_HEAD(list); LIST_HEAD(list);
dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
...@@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan) ...@@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
BUG_ON(!list_empty(&bdma_chan->active_list)); BUG_ON(!list_empty(&bdma_chan->active_list));
BUG_ON(!list_empty(&bdma_chan->queue)); BUG_ON(!list_empty(&bdma_chan->queue));
tasklet_disable(&bdma_chan->tasklet); tsi721_bdma_interrupt_enable(bdma_chan, 0);
bdma_chan->active = false;
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
bdma_chan->id].vector);
synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
bdma_chan->id].vector);
} else
#endif
synchronize_irq(priv->pdev->irq);
tasklet_kill(&bdma_chan->tasklet);
spin_lock_bh(&bdma_chan->lock); spin_lock_bh(&bdma_chan->lock);
list_splice_init(&bdma_chan->free_list, &list); list_splice_init(&bdma_chan->free_list, &list);
spin_unlock_bh(&bdma_chan->lock); spin_unlock_bh(&bdma_chan->lock);
tsi721_bdma_interrupt_enable(bdma_chan, 0);
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) { if (priv->flags & TSI721_USING_MSIX) {
free_irq(priv->msix[TSI721_VECT_DMA0_DONE + free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
...@@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv) ...@@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
bdma_chan->dchan.cookie = 1; bdma_chan->dchan.cookie = 1;
bdma_chan->dchan.chan_id = i; bdma_chan->dchan.chan_id = i;
bdma_chan->id = i; bdma_chan->id = i;
bdma_chan->active = false;
spin_lock_init(&bdma_chan->lock); spin_lock_init(&bdma_chan->lock);
...@@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv) ...@@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv)
tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
(unsigned long)bdma_chan); (unsigned long)bdma_chan);
tasklet_disable(&bdma_chan->tasklet);
list_add_tail(&bdma_chan->dchan.device_node, list_add_tail(&bdma_chan->dchan.device_node,
&mport->dma.channels); &mport->dma.channels);
} }
......
...@@ -580,10 +580,12 @@ static int s3c_rtc_suspend(struct device *dev) ...@@ -580,10 +580,12 @@ static int s3c_rtc_suspend(struct device *dev)
clk_enable(rtc_clk); clk_enable(rtc_clk);
/* save TICNT for anyone using periodic interrupts */ /* save TICNT for anyone using periodic interrupts */
ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) { if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON); ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt_en_save &= S3C64XX_RTCCON_TICEN; ticnt_en_save &= S3C64XX_RTCCON_TICEN;
ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT);
} else {
ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
} }
s3c_rtc_enable(pdev, 0); s3c_rtc_enable(pdev, 0);
...@@ -605,10 +607,15 @@ static int s3c_rtc_resume(struct device *dev) ...@@ -605,10 +607,15 @@ static int s3c_rtc_resume(struct device *dev)
clk_enable(rtc_clk); clk_enable(rtc_clk);
s3c_rtc_enable(pdev, 1); s3c_rtc_enable(pdev, 1);
writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) { writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
if (ticnt_en_save) {
tmp = readw(s3c_rtc_base + S3C2410_RTCCON); tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); writew(tmp | ticnt_en_save,
s3c_rtc_base + S3C2410_RTCCON);
}
} else {
writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
} }
if (device_may_wakeup(dev) && wake_en) { if (device_may_wakeup(dev) && wake_en) {
......
...@@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn) ...@@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn)
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
bool reserved; bool reserved;
struct page *tail = pfn_to_page(pfn); struct page *tail = pfn_to_page(pfn);
struct page *head = compound_trans_head(tail); struct page *head = compound_head(tail);
reserved = !!(PageReserved(head)); reserved = !!(PageReserved(head));
if (head != tail) { if (head != tail) {
/* /*
* "head" is not a dangling pointer * "head" is not a dangling pointer
* (compound_trans_head takes care of that) * (compound_head takes care of that)
* but the hugepage may have been split * but the hugepage may have been split
* from under us (and we may not hold a * from under us (and we may not hold a
* reference count on the head page so it can * reference count on the head page so it can
......
...@@ -75,7 +75,7 @@ int hfsplus_parse_options_remount(char *input, int *force) ...@@ -75,7 +75,7 @@ int hfsplus_parse_options_remount(char *input, int *force)
int token; int token;
if (!input) if (!input)
return 0; return 1;
while ((p = strsep(&input, ",")) != NULL) { while ((p = strsep(&input, ",")) != NULL) {
if (!*p) if (!*p)
......
...@@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot) ...@@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
*/ */
if (status < 0) if (status < 0)
mlog_errno(status); mlog_errno(status);
/*
* Clear dq_off so that we search for the structure in quota file next
* time we acquire it. The structure might be deleted and reallocated
* elsewhere by another node while our dquot structure is on freelist.
*/
dquot->dq_off = 0;
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_trans: out_trans:
ocfs2_commit_trans(osb, handle); ocfs2_commit_trans(osb, handle);
...@@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot) ...@@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
status = ocfs2_lock_global_qf(info, 1); status = ocfs2_lock_global_qf(info, 1);
if (status < 0) if (status < 0)
goto out; goto out;
if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
status = ocfs2_qinfo_lock(info, 0); status = ocfs2_qinfo_lock(info, 0);
if (status < 0) if (status < 0)
goto out_dq; goto out_dq;
/*
* We always want to read dquot structure from disk because we don't
* know what happened with it while it was on freelist.
*/
status = qtree_read_dquot(&info->dqi_gi, dquot); status = qtree_read_dquot(&info->dqi_gi, dquot);
ocfs2_qinfo_unlock(info, 0); ocfs2_qinfo_unlock(info, 0);
if (status < 0) if (status < 0)
goto out_dq; goto out_dq;
}
set_bit(DQ_READ_B, &dquot->dq_flags);
OCFS2_DQUOT(dquot)->dq_use_count++; OCFS2_DQUOT(dquot)->dq_use_count++;
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
......
...@@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot) ...@@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
out: out:
/* Clear the read bit so that next time someone uses this
* dquot he reads fresh info from disk and allocates local
* dquot structure */
clear_bit(DQ_READ_B, &dquot->dq_flags);
return status; return status;
} }
......
...@@ -121,9 +121,8 @@ u64 stable_page_flags(struct page *page) ...@@ -121,9 +121,8 @@ u64 stable_page_flags(struct page *page)
* just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
* to make sure a given page is a thp, not a non-huge compound page. * to make sure a given page is a thp, not a non-huge compound page.
*/ */
else if (PageTransCompound(page) && else if (PageTransCompound(page) && (PageLRU(compound_head(page)) ||
(PageLRU(compound_trans_head(page)) || PageAnon(compound_head(page))))
PageAnon(compound_trans_head(page))))
u |= 1 << KPF_THP; u |= 1 << KPF_THP;
/* /*
......
...@@ -157,46 +157,6 @@ static inline int hpage_nr_pages(struct page *page) ...@@ -157,46 +157,6 @@ static inline int hpage_nr_pages(struct page *page)
return HPAGE_PMD_NR; return HPAGE_PMD_NR;
return 1; return 1;
} }
/*
* compound_trans_head() should be used instead of compound_head(),
* whenever the "page" passed as parameter could be the tail of a
* transparent hugepage that could be undergoing a
* __split_huge_page_refcount(). The page structure layout often
* changes across releases and it makes extensive use of unions. So if
* the page structure layout will change in a way that
* page->first_page gets clobbered by __split_huge_page_refcount, the
* implementation making use of smp_rmb() will be required.
*
* Currently we define compound_trans_head as compound_head, because
* page->private is in the same union with page->first_page, and
* page->private isn't clobbered. However this also means we're
* currently leaving dirt into the page->private field of anonymous
* pages resulting from a THP split, instead of setting page->private
* to zero like for every other page that has PG_private not set. But
* anonymous pages don't use page->private so this is not a problem.
*/
#if 0
/* This will be needed if page->private will be clobbered in split_huge_page */
static inline struct page *compound_trans_head(struct page *page)
{
if (PageTail(page)) {
struct page *head;
head = page->first_page;
smp_rmb();
/*
* head may be a dangling pointer.
* __split_huge_page_refcount clears PageTail before
* overwriting first_page, so if PageTail is still
* there it means the head pointer isn't dangling.
*/
if (PageTail(page))
return head;
}
return page;
}
#else
#define compound_trans_head(page) compound_head(page)
#endif
extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp); unsigned long addr, pmd_t pmd, pmd_t *pmdp);
...@@ -226,7 +186,6 @@ static inline int split_huge_page(struct page *page) ...@@ -226,7 +186,6 @@ static inline int split_huge_page(struct page *page)
do { } while (0) do { } while (0)
#define split_huge_page_pmd_mm(__mm, __address, __pmd) \ #define split_huge_page_pmd_mm(__mm, __address, __pmd) \
do { } while (0) do { } while (0)
#define compound_trans_head(page) compound_head(page)
static inline int hugepage_madvise(struct vm_area_struct *vma, static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice) unsigned long *vm_flags, int advice)
{ {
......
...@@ -175,7 +175,7 @@ extern unsigned int kobjsize(const void *objp); ...@@ -175,7 +175,7 @@ extern unsigned int kobjsize(const void *objp);
* Special vmas that are non-mergable, non-mlock()able. * Special vmas that are non-mergable, non-mlock()able.
* Note: mm/huge_memory.c VM_NO_THP depends on this definition. * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
*/ */
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP) #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
/* /*
* mapping from the currently active vm_flags protection bits (the * mapping from the currently active vm_flags protection bits (the
...@@ -399,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page, ...@@ -399,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page,
static inline struct page *compound_head(struct page *page) static inline struct page *compound_head(struct page *page)
{ {
if (unlikely(PageTail(page))) if (unlikely(PageTail(page))) {
return page->first_page; struct page *head = page->first_page;
/*
* page->first_page may be a dangling pointer to an old
* compound page, so recheck that it is still a tail
* page before returning.
*/
smp_rmb();
if (likely(PageTail(page)))
return head;
}
return page; return page;
} }
...@@ -757,7 +767,7 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) ...@@ -757,7 +767,7 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int page_cpupid_xchg_last(struct page *page, int cpupid) static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{ {
return xchg(&page->_last_cpupid, cpupid); return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
} }
static inline int page_cpupid_last(struct page *page) static inline int page_cpupid_last(struct page *page)
...@@ -766,7 +776,7 @@ static inline int page_cpupid_last(struct page *page) ...@@ -766,7 +776,7 @@ static inline int page_cpupid_last(struct page *page)
} }
static inline void page_cpupid_reset_last(struct page *page) static inline void page_cpupid_reset_last(struct page *page)
{ {
page->_last_cpupid = -1; page->_last_cpupid = -1 & LAST_CPUPID_MASK;
} }
#else #else
static inline int page_cpupid_last(struct page *page) static inline int page_cpupid_last(struct page *page)
......
...@@ -424,111 +424,134 @@ void debug_dma_dump_mappings(struct device *dev) ...@@ -424,111 +424,134 @@ void debug_dma_dump_mappings(struct device *dev)
EXPORT_SYMBOL(debug_dma_dump_mappings); EXPORT_SYMBOL(debug_dma_dump_mappings);
/* /*
* For each page mapped (initial page in the case of * For each mapping (initial cacheline in the case of
* dma_alloc_coherent/dma_map_{single|page}, or each page in a * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
* scatterlist) insert into this tree using the pfn as the key. At * scatterlist, or the cacheline specified in dma_map_single) insert
* into this tree using the cacheline as the key. At
* dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
* the pfn already exists at insertion time add a tag as a reference * the entry already exists at insertion time add a tag as a reference
* count for the overlapping mappings. For now, the overlap tracking * count for the overlapping mappings. For now, the overlap tracking
* just ensures that 'unmaps' balance 'maps' before marking the pfn * just ensures that 'unmaps' balance 'maps' before marking the
* idle, but we should also be flagging overlaps as an API violation. * cacheline idle, but we should also be flagging overlaps as an API
* violation.
* *
* Memory usage is mostly constrained by the maximum number of available * Memory usage is mostly constrained by the maximum number of available
* dma-debug entries in that we need a free dma_debug_entry before * dma-debug entries in that we need a free dma_debug_entry before
* inserting into the tree. In the case of dma_map_{single|page} and * inserting into the tree. In the case of dma_map_page and
* dma_alloc_coherent there is only one dma_debug_entry and one pfn to * dma_alloc_coherent there is only one dma_debug_entry and one
* track per event. dma_map_sg(), on the other hand, * dma_active_cacheline entry to track per event. dma_map_sg(), on the
* consumes a single dma_debug_entry, but inserts 'nents' entries into * other hand, consumes a single dma_debug_entry, but inserts 'nents'
* the tree. * entries into the tree.
* *
* At any time debug_dma_assert_idle() can be called to trigger a * At any time debug_dma_assert_idle() can be called to trigger a
* warning if the given page is in the active set. * warning if any cachelines in the given page are in the active set.
*/ */
static RADIX_TREE(dma_active_pfn, GFP_NOWAIT); static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
static DEFINE_SPINLOCK(radix_lock); static DEFINE_SPINLOCK(radix_lock);
#define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
static int active_pfn_read_overlap(unsigned long pfn) static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
{
return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
(entry->offset >> L1_CACHE_SHIFT);
}
static int active_cacheline_read_overlap(phys_addr_t cln)
{ {
int overlap = 0, i; int overlap = 0, i;
for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
if (radix_tree_tag_get(&dma_active_pfn, pfn, i)) if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
overlap |= 1 << i; overlap |= 1 << i;
return overlap; return overlap;
} }
static int active_pfn_set_overlap(unsigned long pfn, int overlap) static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
{ {
int i; int i;
if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0) if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
return overlap; return overlap;
for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
if (overlap & 1 << i) if (overlap & 1 << i)
radix_tree_tag_set(&dma_active_pfn, pfn, i); radix_tree_tag_set(&dma_active_cacheline, cln, i);
else else
radix_tree_tag_clear(&dma_active_pfn, pfn, i); radix_tree_tag_clear(&dma_active_cacheline, cln, i);
return overlap; return overlap;
} }
static void active_pfn_inc_overlap(unsigned long pfn) static void active_cacheline_inc_overlap(phys_addr_t cln)
{ {
int overlap = active_pfn_read_overlap(pfn); int overlap = active_cacheline_read_overlap(cln);
overlap = active_pfn_set_overlap(pfn, ++overlap); overlap = active_cacheline_set_overlap(cln, ++overlap);
/* If we overflowed the overlap counter then we're potentially /* If we overflowed the overlap counter then we're potentially
* leaking dma-mappings. Otherwise, if maps and unmaps are * leaking dma-mappings. Otherwise, if maps and unmaps are
* balanced then this overflow may cause false negatives in * balanced then this overflow may cause false negatives in
* debug_dma_assert_idle() as the pfn may be marked idle * debug_dma_assert_idle() as the cacheline may be marked idle
* prematurely. * prematurely.
*/ */
WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP, WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
"DMA-API: exceeded %d overlapping mappings of pfn %lx\n", "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
ACTIVE_PFN_MAX_OVERLAP, pfn); ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
} }
static int active_pfn_dec_overlap(unsigned long pfn) static int active_cacheline_dec_overlap(phys_addr_t cln)
{ {
int overlap = active_pfn_read_overlap(pfn); int overlap = active_cacheline_read_overlap(cln);
return active_pfn_set_overlap(pfn, --overlap); return active_cacheline_set_overlap(cln, --overlap);
} }
static int active_pfn_insert(struct dma_debug_entry *entry) static int active_cacheline_insert(struct dma_debug_entry *entry)
{ {
phys_addr_t cln = to_cacheline_number(entry);
unsigned long flags; unsigned long flags;
int rc; int rc;
/* If the device is not writing memory then we don't have any
* concerns about the cpu consuming stale data. This mitigates
* legitimate usages of overlapping mappings.
*/
if (entry->direction == DMA_TO_DEVICE)
return 0;
spin_lock_irqsave(&radix_lock, flags); spin_lock_irqsave(&radix_lock, flags);
rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry); rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
if (rc == -EEXIST) if (rc == -EEXIST)
active_pfn_inc_overlap(entry->pfn); active_cacheline_inc_overlap(cln);
spin_unlock_irqrestore(&radix_lock, flags); spin_unlock_irqrestore(&radix_lock, flags);
return rc; return rc;
} }
static void active_pfn_remove(struct dma_debug_entry *entry) static void active_cacheline_remove(struct dma_debug_entry *entry)
{ {
phys_addr_t cln = to_cacheline_number(entry);
unsigned long flags; unsigned long flags;
/* ...mirror the insert case */
if (entry->direction == DMA_TO_DEVICE)
return;
spin_lock_irqsave(&radix_lock, flags); spin_lock_irqsave(&radix_lock, flags);
/* since we are counting overlaps the final put of the /* since we are counting overlaps the final put of the
* entry->pfn will occur when the overlap count is 0. * cacheline will occur when the overlap count is 0.
* active_pfn_dec_overlap() returns -1 in that case * active_cacheline_dec_overlap() returns -1 in that case
*/ */
if (active_pfn_dec_overlap(entry->pfn) < 0) if (active_cacheline_dec_overlap(cln) < 0)
radix_tree_delete(&dma_active_pfn, entry->pfn); radix_tree_delete(&dma_active_cacheline, cln);
spin_unlock_irqrestore(&radix_lock, flags); spin_unlock_irqrestore(&radix_lock, flags);
} }
/** /**
* debug_dma_assert_idle() - assert that a page is not undergoing dma * debug_dma_assert_idle() - assert that a page is not undergoing dma
* @page: page to lookup in the dma_active_pfn tree * @page: page to lookup in the dma_active_cacheline tree
* *
* Place a call to this routine in cases where the cpu touching the page * Place a call to this routine in cases where the cpu touching the page
* before the dma completes (page is dma_unmapped) will lead to data * before the dma completes (page is dma_unmapped) will lead to data
...@@ -536,22 +559,38 @@ static void active_pfn_remove(struct dma_debug_entry *entry) ...@@ -536,22 +559,38 @@ static void active_pfn_remove(struct dma_debug_entry *entry)
*/ */
void debug_dma_assert_idle(struct page *page) void debug_dma_assert_idle(struct page *page)
{ {
static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
struct dma_debug_entry *entry = NULL;
void **results = (void **) &ents;
unsigned int nents, i;
unsigned long flags; unsigned long flags;
struct dma_debug_entry *entry; phys_addr_t cln;
if (!page) if (!page)
return; return;
cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
spin_lock_irqsave(&radix_lock, flags); spin_lock_irqsave(&radix_lock, flags);
entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page)); nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
CACHELINES_PER_PAGE);
for (i = 0; i < nents; i++) {
phys_addr_t ent_cln = to_cacheline_number(ents[i]);
if (ent_cln == cln) {
entry = ents[i];
break;
} else if (ent_cln >= cln + CACHELINES_PER_PAGE)
break;
}
spin_unlock_irqrestore(&radix_lock, flags); spin_unlock_irqrestore(&radix_lock, flags);
if (!entry) if (!entry)
return; return;
cln = to_cacheline_number(entry);
err_printk(entry->dev, entry, err_printk(entry->dev, entry,
"DMA-API: cpu touching an active dma mapped page " "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
"[pfn=0x%lx]\n", entry->pfn); &cln);
} }
/* /*
...@@ -568,9 +607,9 @@ static void add_dma_entry(struct dma_debug_entry *entry) ...@@ -568,9 +607,9 @@ static void add_dma_entry(struct dma_debug_entry *entry)
hash_bucket_add(bucket, entry); hash_bucket_add(bucket, entry);
put_hash_bucket(bucket, &flags); put_hash_bucket(bucket, &flags);
rc = active_pfn_insert(entry); rc = active_cacheline_insert(entry);
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n"); pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true; global_disable = true;
} }
...@@ -631,7 +670,7 @@ static void dma_entry_free(struct dma_debug_entry *entry) ...@@ -631,7 +670,7 @@ static void dma_entry_free(struct dma_debug_entry *entry)
{ {
unsigned long flags; unsigned long flags;
active_pfn_remove(entry); active_cacheline_remove(entry);
/* /*
* add to beginning of the list - this way the entries are * add to beginning of the list - this way the entries are
......
...@@ -1253,8 +1253,10 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) ...@@ -1253,8 +1253,10 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
node = indirect_to_ptr(node); node = indirect_to_ptr(node);
max_index = radix_tree_maxindex(node->height); max_index = radix_tree_maxindex(node->height);
if (cur_index > max_index) if (cur_index > max_index) {
rcu_read_unlock();
break; break;
}
cur_index = __locate(node, item, cur_index, &found_index); cur_index = __locate(node, item, cur_index, &found_index);
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -1961,7 +1961,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) ...@@ -1961,7 +1961,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
return ret; return ret;
} }
#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE) #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
int hugepage_madvise(struct vm_area_struct *vma, int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice) unsigned long *vm_flags, int advice)
......
...@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item) ...@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item)
static struct page *page_trans_compound_anon(struct page *page) static struct page *page_trans_compound_anon(struct page *page)
{ {
if (PageTransCompound(page)) { if (PageTransCompound(page)) {
struct page *head = compound_trans_head(page); struct page *head = compound_head(page);
/* /*
* head may actually be splitted and freed from under * head may actually be splitted and freed from under
* us but it's ok here. * us but it's ok here.
......
...@@ -1127,8 +1127,8 @@ static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, ...@@ -1127,8 +1127,8 @@ static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
* skipping css reference should be safe. * skipping css reference should be safe.
*/ */
if (next_css) { if (next_css) {
if ((next_css->flags & CSS_ONLINE) && if ((next_css == &root->css) ||
(next_css == &root->css || css_tryget(next_css))) ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
return mem_cgroup_from_css(next_css); return mem_cgroup_from_css(next_css);
prev_css = next_css; prev_css = next_css;
...@@ -6595,6 +6595,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) ...@@ -6595,6 +6595,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup_event *event, *tmp; struct mem_cgroup_event *event, *tmp;
struct cgroup_subsys_state *iter;
/* /*
* Unregister events and notify userspace. * Unregister events and notify userspace.
...@@ -6611,7 +6612,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) ...@@ -6611,7 +6612,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
kmem_cgroup_css_offline(memcg); kmem_cgroup_css_offline(memcg);
mem_cgroup_invalidate_reclaim_iterators(memcg); mem_cgroup_invalidate_reclaim_iterators(memcg);
mem_cgroup_reparent_charges(memcg);
/*
* This requires that offlining is serialized. Right now that is
* guaranteed because css_killed_work_fn() holds the cgroup_mutex.
*/
css_for_each_descendant_post(iter, css)
mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
mem_cgroup_destroy_all_caches(memcg); mem_cgroup_destroy_all_caches(memcg);
vmpressure_cleanup(&memcg->vmpressure); vmpressure_cleanup(&memcg->vmpressure);
} }
......
...@@ -1651,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags) ...@@ -1651,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags)
{ {
int ret; int ret;
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_trans_head(page); struct page *hpage = compound_head(page);
if (PageHWPoison(page)) { if (PageHWPoison(page)) {
pr_info("soft offline: %#lx page already poisoned\n", pfn); pr_info("soft offline: %#lx page already poisoned\n", pfn);
......
...@@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order) ...@@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order)
__SetPageHead(page); __SetPageHead(page);
for (i = 1; i < nr_pages; i++) { for (i = 1; i < nr_pages; i++) {
struct page *p = page + i; struct page *p = page + i;
__SetPageTail(p);
set_page_count(p, 0); set_page_count(p, 0);
p->first_page = page; p->first_page = page;
/* Make sure p->first_page is always valid for PageTail() */
smp_wmb();
__SetPageTail(p);
} }
} }
...@@ -1236,6 +1238,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) ...@@ -1236,6 +1238,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
static bool gfp_thisnode_allocation(gfp_t gfp_mask)
{
return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
}
#else
static bool gfp_thisnode_allocation(gfp_t gfp_mask)
{
return false;
}
#endif #endif
/* /*
...@@ -1572,7 +1583,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, ...@@ -1572,7 +1583,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
get_pageblock_migratetype(page)); get_pageblock_migratetype(page));
} }
/*
* NOTE: GFP_THISNODE allocations do not partake in the kswapd
* aging protocol, so they can't be fair.
*/
if (!gfp_thisnode_allocation(gfp_flags))
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
__count_zone_vm_events(PGALLOC, zone, 1 << order); __count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags); zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -1944,8 +1961,12 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, ...@@ -1944,8 +1961,12 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
* ultimately fall back to remote zones that do not * ultimately fall back to remote zones that do not
* partake in the fairness round-robin cycle of this * partake in the fairness round-robin cycle of this
* zonelist. * zonelist.
*
* NOTE: GFP_THISNODE allocations do not partake in
* the kswapd aging protocol, so they can't be fair.
*/ */
if (alloc_flags & ALLOC_WMARK_LOW) { if ((alloc_flags & ALLOC_WMARK_LOW) &&
!gfp_thisnode_allocation(gfp_mask)) {
if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
continue; continue;
if (!zone_local(preferred_zone, zone)) if (!zone_local(preferred_zone, zone))
...@@ -2501,8 +2522,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2501,8 +2522,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* allowed per node queues are empty and that nodes are * allowed per node queues are empty and that nodes are
* over allocated. * over allocated.
*/ */
if (IS_ENABLED(CONFIG_NUMA) && if (gfp_thisnode_allocation(gfp_mask))
(gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage; goto nopage;
restart: restart:
......
...@@ -98,7 +98,7 @@ static void put_compound_page(struct page *page) ...@@ -98,7 +98,7 @@ static void put_compound_page(struct page *page)
} }
/* __split_huge_page_refcount can run under us */ /* __split_huge_page_refcount can run under us */
page_head = compound_trans_head(page); page_head = compound_head(page);
/* /*
* THP can not break up slab pages so avoid taking * THP can not break up slab pages so avoid taking
...@@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page) ...@@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page)
*/ */
unsigned long flags; unsigned long flags;
bool got; bool got;
struct page *page_head = compound_trans_head(page); struct page *page_head = compound_head(page);
/* Ref to put_compound_page() comment. */ /* Ref to put_compound_page() comment. */
if (!__compound_tail_refcounted(page_head)) { if (!__compound_tail_refcounted(page_head)) {
......
...@@ -257,7 +257,7 @@ case "$arg" in ...@@ -257,7 +257,7 @@ case "$arg" in
&& compr="lzop -9 -f" && compr="lzop -9 -f"
echo "$output_file" | grep -q "\.lz4$" \ echo "$output_file" | grep -q "\.lz4$" \
&& [ -x "`which lz4 2> /dev/null`" ] \ && [ -x "`which lz4 2> /dev/null`" ] \
&& compr="lz4 -9 -f" && compr="lz4 -l -9 -f"
echo "$output_file" | grep -q "\.cpio$" && compr="cat" echo "$output_file" | grep -q "\.cpio$" && compr="cat"
shift shift
;; ;;
......
...@@ -330,8 +330,7 @@ static void write_src(void) ...@@ -330,8 +330,7 @@ static void write_src(void)
printf("\tPTR\t_text + %#llx\n", printf("\tPTR\t_text + %#llx\n",
table[i].addr - _text); table[i].addr - _text);
else else
printf("\tPTR\t_text - %#llx\n", printf("\tPTR\t%#llx\n", table[i].addr);
_text - table[i].addr);
} else { } else {
printf("\tPTR\t%#llx\n", table[i].addr); printf("\tPTR\t%#llx\n", table[i].addr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment