Commit 0ed17f01 authored by Dave Chinner's avatar Dave Chinner Committed by Darrick J. Wong

xfs: introduce all-mounts list for cpu hotplug notifications

The inode inactivation and CIL tracking percpu structures are
per-xfs_mount structures. That means when we get a CPU dead
notification, we need to then iterate all the per-cpu structure
instances to process them. Rather than keeping linked lists of
per-cpu structures in each subsystem, add a list of all xfs_mounts
that the generic xfs_cpu_dead() function will iterate and call into
each subsystem appropriately.

This allows us to handle both per-mount and global XFS percpu state
from xfs_cpu_dead(), and avoids the need to link subsystem
structures that can be easily found from the xfs_mount into their
own global lists.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
[djwong: expand some comments about mount list setup ordering rules]
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent f1653c2e
...@@ -82,6 +82,7 @@ typedef struct xfs_mount { ...@@ -82,6 +82,7 @@ typedef struct xfs_mount {
xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
struct list_head m_mount_list; /* global mount list */
/* /*
* Optional cache of rt summary level per bitmap block with the * Optional cache of rt summary level per bitmap block with the
* invariant that m_rsum_cache[bbno] <= the minimum i for which * invariant that m_rsum_cache[bbno] <= the minimum i for which
......
...@@ -49,6 +49,28 @@ static struct kset *xfs_kset; /* top-level xfs sysfs dir */ ...@@ -49,6 +49,28 @@ static struct kset *xfs_kset; /* top-level xfs sysfs dir */
static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
#endif #endif
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(xfs_mount_list);
static DEFINE_SPINLOCK(xfs_mount_list_lock);
static inline void xfs_mount_list_add(struct xfs_mount *mp)
{
spin_lock(&xfs_mount_list_lock);
list_add(&mp->m_mount_list, &xfs_mount_list);
spin_unlock(&xfs_mount_list_lock);
}
static inline void xfs_mount_list_del(struct xfs_mount *mp)
{
spin_lock(&xfs_mount_list_lock);
list_del(&mp->m_mount_list);
spin_unlock(&xfs_mount_list_lock);
}
#else /* !CONFIG_HOTPLUG_CPU */
static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
#endif
enum xfs_dax_mode { enum xfs_dax_mode {
XFS_DAX_INODE = 0, XFS_DAX_INODE = 0,
XFS_DAX_ALWAYS = 1, XFS_DAX_ALWAYS = 1,
...@@ -1038,6 +1060,7 @@ xfs_fs_put_super( ...@@ -1038,6 +1060,7 @@ xfs_fs_put_super(
xfs_freesb(mp); xfs_freesb(mp);
free_percpu(mp->m_stats.xs_stats); free_percpu(mp->m_stats.xs_stats);
xfs_mount_list_del(mp);
xfs_destroy_percpu_counters(mp); xfs_destroy_percpu_counters(mp);
xfs_destroy_mount_workqueues(mp); xfs_destroy_mount_workqueues(mp);
xfs_close_devices(mp); xfs_close_devices(mp);
...@@ -1409,6 +1432,13 @@ xfs_fs_fill_super( ...@@ -1409,6 +1432,13 @@ xfs_fs_fill_super(
if (error) if (error)
goto out_destroy_workqueues; goto out_destroy_workqueues;
/*
* All percpu data structures requiring cleanup when a cpu goes offline
* must be allocated before adding this @mp to the cpu-dead handler's
* mount list.
*/
xfs_mount_list_add(mp);
/* Allocate stats memory before we do operations that might use it */ /* Allocate stats memory before we do operations that might use it */
mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
if (!mp->m_stats.xs_stats) { if (!mp->m_stats.xs_stats) {
...@@ -1617,6 +1647,7 @@ xfs_fs_fill_super( ...@@ -1617,6 +1647,7 @@ xfs_fs_fill_super(
out_free_stats: out_free_stats:
free_percpu(mp->m_stats.xs_stats); free_percpu(mp->m_stats.xs_stats);
out_destroy_counters: out_destroy_counters:
xfs_mount_list_del(mp);
xfs_destroy_percpu_counters(mp); xfs_destroy_percpu_counters(mp);
out_destroy_workqueues: out_destroy_workqueues:
xfs_destroy_mount_workqueues(mp); xfs_destroy_mount_workqueues(mp);
...@@ -2116,6 +2147,15 @@ static int ...@@ -2116,6 +2147,15 @@ static int
xfs_cpu_dead( xfs_cpu_dead(
unsigned int cpu) unsigned int cpu)
{ {
struct xfs_mount *mp, *n;
spin_lock(&xfs_mount_list_lock);
list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
spin_unlock(&xfs_mount_list_lock);
/* xfs_subsys_dead(mp, cpu); */
spin_lock(&xfs_mount_list_lock);
}
spin_unlock(&xfs_mount_list_lock);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment