Commit 2756d373 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup

Pull cgroup update from Tejun Heo:
 "cpuset got simplified a bit.  cgroup core got a fix on unified
  hierarchy and grew some effective css related interfaces which will be
  used for blkio support for writeback IO traffic which is currently
  being worked on"

* 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
  cgroup: implement cgroup_get_e_css()
  cgroup: add cgroup_subsys->css_e_css_changed()
  cgroup: add cgroup_subsys->css_released()
  cgroup: fix the async css offline wait logic in cgroup_subtree_control_write()
  cgroup: restructure child_subsys_mask handling in cgroup_subtree_control_write()
  cgroup: separate out cgroup_calc_child_subsys_mask() from cgroup_refresh_child_subsys_mask()
  cpuset: lock vs unlock typo
  cpuset: simplify cpuset_node_allowed API
  cpuset: convert callback_mutex to a spinlock
parents 4e8790f7 eeecbd19
...@@ -638,8 +638,10 @@ struct cgroup_subsys { ...@@ -638,8 +638,10 @@ struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
int (*css_online)(struct cgroup_subsys_state *css); int (*css_online)(struct cgroup_subsys_state *css);
void (*css_offline)(struct cgroup_subsys_state *css); void (*css_offline)(struct cgroup_subsys_state *css);
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css); void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css); void (*css_reset)(struct cgroup_subsys_state *css);
void (*css_e_css_changed)(struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_subsys_state *css, int (*can_attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset); struct cgroup_taskset *tset);
...@@ -934,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it); ...@@ -934,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss); struct cgroup_subsys *ss);
......
...@@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); ...@@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
void cpuset_init_current_mems_allowed(void); void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
{ {
return nr_cpusets() <= 1 || return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
__cpuset_node_allowed_softwall(node, gfp_mask);
} }
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{ {
return nr_cpusets() <= 1 || return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
__cpuset_node_allowed_hardwall(node, gfp_mask);
}
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{
return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
}
static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
{
return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
} }
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
...@@ -179,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) ...@@ -179,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1; return 1;
} }
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
{
return 1;
}
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
return 1;
}
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{ {
return 1; return 1;
} }
static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{ {
return 1; return 1;
} }
......
...@@ -277,6 +277,10 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, ...@@ -277,6 +277,10 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
if (!(cgrp->root->subsys_mask & (1 << ss->id))) if (!(cgrp->root->subsys_mask & (1 << ss->id)))
return NULL; return NULL;
/*
* This function is used while updating css associations and thus
* can't test the csses directly. Use ->child_subsys_mask.
*/
while (cgroup_parent(cgrp) && while (cgroup_parent(cgrp) &&
!(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id))) !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id)))
cgrp = cgroup_parent(cgrp); cgrp = cgroup_parent(cgrp);
...@@ -284,6 +288,39 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, ...@@ -284,6 +288,39 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
return cgroup_css(cgrp, ss); return cgroup_css(cgrp, ss);
} }
/**
* cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
*
* Find and get the effective css of @cgrp for @ss. The effective css is
* defined as the matching css of the nearest ancestor including self which
* has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
* the root css is returned, so this function always returns a valid css.
* The returned css must be put using css_put().
*/
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
struct cgroup_subsys *ss)
{
struct cgroup_subsys_state *css;
rcu_read_lock();
do {
css = cgroup_css(cgrp, ss);
if (css && css_tryget_online(css))
goto out_unlock;
cgrp = cgroup_parent(cgrp);
} while (cgrp);
css = init_css_set.subsys[ss->id];
css_get(css);
out_unlock:
rcu_read_unlock();
return css;
}
/* convenient tests for these bits */ /* convenient tests for these bits */
static inline bool cgroup_is_dead(const struct cgroup *cgrp) static inline bool cgroup_is_dead(const struct cgroup *cgrp)
{ {
...@@ -1019,31 +1056,30 @@ static void cgroup_put(struct cgroup *cgrp) ...@@ -1019,31 +1056,30 @@ static void cgroup_put(struct cgroup *cgrp)
} }
/** /**
* cgroup_refresh_child_subsys_mask - update child_subsys_mask * cgroup_calc_child_subsys_mask - calculate child_subsys_mask
* @cgrp: the target cgroup * @cgrp: the target cgroup
* @subtree_control: the new subtree_control mask to consider
* *
* On the default hierarchy, a subsystem may request other subsystems to be * On the default hierarchy, a subsystem may request other subsystems to be
* enabled together through its ->depends_on mask. In such cases, more * enabled together through its ->depends_on mask. In such cases, more
* subsystems than specified in "cgroup.subtree_control" may be enabled. * subsystems than specified in "cgroup.subtree_control" may be enabled.
* *
* This function determines which subsystems need to be enabled given the * This function calculates which subsystems need to be enabled if
* current @cgrp->subtree_control and records it in * @subtree_control is to be applied to @cgrp. The returned mask is always
* @cgrp->child_subsys_mask. The resulting mask is always a superset of * a superset of @subtree_control and follows the usual hierarchy rules.
* @cgrp->subtree_control and follows the usual hierarchy rules.
*/ */
static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp) static unsigned int cgroup_calc_child_subsys_mask(struct cgroup *cgrp,
unsigned int subtree_control)
{ {
struct cgroup *parent = cgroup_parent(cgrp); struct cgroup *parent = cgroup_parent(cgrp);
unsigned int cur_ss_mask = cgrp->subtree_control; unsigned int cur_ss_mask = subtree_control;
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
int ssid; int ssid;
lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&cgroup_mutex);
if (!cgroup_on_dfl(cgrp)) { if (!cgroup_on_dfl(cgrp))
cgrp->child_subsys_mask = cur_ss_mask; return cur_ss_mask;
return;
}
while (true) { while (true) {
unsigned int new_ss_mask = cur_ss_mask; unsigned int new_ss_mask = cur_ss_mask;
...@@ -1067,7 +1103,20 @@ static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp) ...@@ -1067,7 +1103,20 @@ static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
cur_ss_mask = new_ss_mask; cur_ss_mask = new_ss_mask;
} }
cgrp->child_subsys_mask = cur_ss_mask; return cur_ss_mask;
}
/**
* cgroup_refresh_child_subsys_mask - update child_subsys_mask
* @cgrp: the target cgroup
*
* Update @cgrp->child_subsys_mask according to the current
* @cgrp->subtree_control using cgroup_calc_child_subsys_mask().
*/
static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
{
cgrp->child_subsys_mask =
cgroup_calc_child_subsys_mask(cgrp, cgrp->subtree_control);
} }
/** /**
...@@ -2641,7 +2690,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, ...@@ -2641,7 +2690,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
loff_t off) loff_t off)
{ {
unsigned int enable = 0, disable = 0; unsigned int enable = 0, disable = 0;
unsigned int css_enable, css_disable, old_ctrl, new_ctrl; unsigned int css_enable, css_disable, old_sc, new_sc, old_ss, new_ss;
struct cgroup *cgrp, *child; struct cgroup *cgrp, *child;
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
char *tok; char *tok;
...@@ -2693,36 +2742,6 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, ...@@ -2693,36 +2742,6 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
ret = -ENOENT; ret = -ENOENT;
goto out_unlock; goto out_unlock;
} }
/*
* @ss is already enabled through dependency and
* we'll just make it visible. Skip draining.
*/
if (cgrp->child_subsys_mask & (1 << ssid))
continue;
/*
* Because css offlining is asynchronous, userland
* might try to re-enable the same controller while
* the previous instance is still around. In such
* cases, wait till it's gone using offline_waitq.
*/
cgroup_for_each_live_child(child, cgrp) {
DEFINE_WAIT(wait);
if (!cgroup_css(child, ss))
continue;
cgroup_get(child);
prepare_to_wait(&child->offline_waitq, &wait,
TASK_UNINTERRUPTIBLE);
cgroup_kn_unlock(of->kn);
schedule();
finish_wait(&child->offline_waitq, &wait);
cgroup_put(child);
return restart_syscall();
}
} else if (disable & (1 << ssid)) { } else if (disable & (1 << ssid)) {
if (!(cgrp->subtree_control & (1 << ssid))) { if (!(cgrp->subtree_control & (1 << ssid))) {
disable &= ~(1 << ssid); disable &= ~(1 << ssid);
...@@ -2758,18 +2777,47 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, ...@@ -2758,18 +2777,47 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
* subsystems than specified may need to be enabled or disabled * subsystems than specified may need to be enabled or disabled
* depending on subsystem dependencies. * depending on subsystem dependencies.
*/ */
cgrp->subtree_control |= enable; old_sc = cgrp->subtree_control;
cgrp->subtree_control &= ~disable; old_ss = cgrp->child_subsys_mask;
new_sc = (old_sc | enable) & ~disable;
new_ss = cgroup_calc_child_subsys_mask(cgrp, new_sc);
old_ctrl = cgrp->child_subsys_mask; css_enable = ~old_ss & new_ss;
cgroup_refresh_child_subsys_mask(cgrp); css_disable = old_ss & ~new_ss;
new_ctrl = cgrp->child_subsys_mask;
css_enable = ~old_ctrl & new_ctrl;
css_disable = old_ctrl & ~new_ctrl;
enable |= css_enable; enable |= css_enable;
disable |= css_disable; disable |= css_disable;
/*
* Because css offlining is asynchronous, userland might try to
* re-enable the same controller while the previous instance is
* still around. In such cases, wait till it's gone using
* offline_waitq.
*/
for_each_subsys(ss, ssid) {
if (!(css_enable & (1 << ssid)))
continue;
cgroup_for_each_live_child(child, cgrp) {
DEFINE_WAIT(wait);
if (!cgroup_css(child, ss))
continue;
cgroup_get(child);
prepare_to_wait(&child->offline_waitq, &wait,
TASK_UNINTERRUPTIBLE);
cgroup_kn_unlock(of->kn);
schedule();
finish_wait(&child->offline_waitq, &wait);
cgroup_put(child);
return restart_syscall();
}
}
cgrp->subtree_control = new_sc;
cgrp->child_subsys_mask = new_ss;
/* /*
* Create new csses or make the existing ones visible. A css is * Create new csses or make the existing ones visible. A css is
* created invisible if it's being implicitly enabled through * created invisible if it's being implicitly enabled through
...@@ -2825,6 +2873,24 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, ...@@ -2825,6 +2873,24 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
} }
} }
/*
* The effective csses of all the descendants (excluding @cgrp) may
* have changed. Subsystems can optionally subscribe to this event
* by implementing ->css_e_css_changed() which is invoked if any of
* the effective csses seen from the css's cgroup may have changed.
*/
for_each_subsys(ss, ssid) {
struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss);
struct cgroup_subsys_state *css;
if (!ss->css_e_css_changed || !this_css)
continue;
css_for_each_descendant_pre(css, this_css)
if (css != this_css)
ss->css_e_css_changed(css);
}
kernfs_activate(cgrp->kn); kernfs_activate(cgrp->kn);
ret = 0; ret = 0;
out_unlock: out_unlock:
...@@ -2832,9 +2898,8 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, ...@@ -2832,9 +2898,8 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
return ret ?: nbytes; return ret ?: nbytes;
err_undo_css: err_undo_css:
cgrp->subtree_control &= ~enable; cgrp->subtree_control = old_sc;
cgrp->subtree_control |= disable; cgrp->child_subsys_mask = old_ss;
cgroup_refresh_child_subsys_mask(cgrp);
for_each_subsys(ss, ssid) { for_each_subsys(ss, ssid) {
if (!(enable & (1 << ssid))) if (!(enable & (1 << ssid)))
...@@ -4370,6 +4435,8 @@ static void css_release_work_fn(struct work_struct *work) ...@@ -4370,6 +4435,8 @@ static void css_release_work_fn(struct work_struct *work)
if (ss) { if (ss) {
/* css release path */ /* css release path */
cgroup_idr_remove(&ss->css_idr, css->id); cgroup_idr_remove(&ss->css_idr, css->id);
if (ss->css_released)
ss->css_released(css);
} else { } else {
/* cgroup release path */ /* cgroup release path */
cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
......
This diff is collapsed.
...@@ -582,7 +582,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, ...@@ -582,7 +582,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
for_each_zone_zonelist_nodemask(zone, z, zonelist, for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) { MAX_NR_ZONES - 1, nodemask) {
if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) { if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
page = dequeue_huge_page_node(h, zone_to_nid(zone)); page = dequeue_huge_page_node(h, zone_to_nid(zone));
if (page) { if (page) {
if (avoid_reserve) if (avoid_reserve)
......
...@@ -233,7 +233,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist, ...@@ -233,7 +233,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
/* Check this allocation failure is caused by cpuset's wall function */ /* Check this allocation failure is caused by cpuset's wall function */
for_each_zone_zonelist_nodemask(zone, z, zonelist, for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) high_zoneidx, nodemask)
if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) if (!cpuset_zone_allowed(zone, gfp_mask))
cpuset_limited = true; cpuset_limited = true;
if (cpuset_limited) { if (cpuset_limited) {
......
...@@ -1990,7 +1990,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, ...@@ -1990,7 +1990,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
/* /*
* Scan zonelist, looking for a zone with enough free. * Scan zonelist, looking for a zone with enough free.
* See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c. * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
*/ */
for_each_zone_zonelist_nodemask(zone, z, zonelist, for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) { high_zoneidx, nodemask) {
...@@ -2001,7 +2001,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, ...@@ -2001,7 +2001,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
continue; continue;
if (cpusets_enabled() && if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) && (alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed_softwall(zone, gfp_mask)) !cpuset_zone_allowed(zone, gfp_mask))
continue; continue;
/* /*
* Distribute pages in proportion to the individual * Distribute pages in proportion to the individual
...@@ -2529,7 +2529,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) ...@@ -2529,7 +2529,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
alloc_flags |= ALLOC_HARDER; alloc_flags |= ALLOC_HARDER;
/* /*
* Ignore cpuset mems for GFP_ATOMIC rather than fail, see the * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
* comment for __cpuset_node_allowed_softwall(). * comment for __cpuset_node_allowed().
*/ */
alloc_flags &= ~ALLOC_CPUSET; alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(rt_task(current)) && !in_interrupt()) } else if (unlikely(rt_task(current)) && !in_interrupt())
......
...@@ -3015,7 +3015,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) ...@@ -3015,7 +3015,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
nid = zone_to_nid(zone); nid = zone_to_nid(zone);
if (cpuset_zone_allowed_hardwall(zone, flags) && if (cpuset_zone_allowed(zone, flags | __GFP_HARDWALL) &&
get_node(cache, nid) && get_node(cache, nid) &&
get_node(cache, nid)->free_objects) { get_node(cache, nid)->free_objects) {
obj = ____cache_alloc_node(cache, obj = ____cache_alloc_node(cache,
......
...@@ -1665,7 +1665,8 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, ...@@ -1665,7 +1665,8 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
n = get_node(s, zone_to_nid(zone)); n = get_node(s, zone_to_nid(zone));
if (n && cpuset_zone_allowed_hardwall(zone, flags) && if (n && cpuset_zone_allowed(zone,
flags | __GFP_HARDWALL) &&
n->nr_partial > s->min_partial) { n->nr_partial > s->min_partial) {
object = get_partial_node(s, n, c, flags); object = get_partial_node(s, n, c, flags);
if (object) { if (object) {
......
...@@ -2405,7 +2405,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -2405,7 +2405,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
* to global LRU. * to global LRU.
*/ */
if (global_reclaim(sc)) { if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) if (!cpuset_zone_allowed(zone,
GFP_KERNEL | __GFP_HARDWALL))
continue; continue;
lru_pages += zone_reclaimable_pages(zone); lru_pages += zone_reclaimable_pages(zone);
...@@ -3388,7 +3389,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) ...@@ -3388,7 +3389,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
if (!populated_zone(zone)) if (!populated_zone(zone))
return; return;
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
return; return;
pgdat = zone->zone_pgdat; pgdat = zone->zone_pgdat;
if (pgdat->kswapd_max_order < order) { if (pgdat->kswapd_max_order < order) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment