Commit 69307ade authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-5.9-merge-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Darrick Wong:
 "Two small fixes that have come in during the past week:

   - Fix duplicated words in comments

   - Fix an ubsan complaint about null pointer arithmetic"

* tag 'xfs-5.9-merge-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  xfs: Fix UBSAN null-ptr-deref in xfs_sysfs_init
  xfs: delete duplicated words + other fixes
parents ff419b61 96cf2a2c
...@@ -600,7 +600,7 @@ xfs_sb_quota_to_disk( ...@@ -600,7 +600,7 @@ xfs_sb_quota_to_disk(
* disk. If neither are active, we should NULL the inode. * disk. If neither are active, we should NULL the inode.
* *
* In all cases, the separate pquotino must remain 0 because it * In all cases, the separate pquotino must remain 0 because it
* it beyond the "end" of the valid non-pquotino superblock. * is beyond the "end" of the valid non-pquotino superblock.
*/ */
if (from->sb_qflags & XFS_GQUOTA_ACCT) if (from->sb_qflags & XFS_GQUOTA_ACCT)
to->sb_gquotino = cpu_to_be64(from->sb_gquotino); to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
......
...@@ -44,7 +44,7 @@ xfs_attr_shortform_compare(const void *a, const void *b) ...@@ -44,7 +44,7 @@ xfs_attr_shortform_compare(const void *a, const void *b)
/* /*
* Copy out entries of shortform attribute lists for attr_list(). * Copy out entries of shortform attribute lists for attr_list().
* Shortform attribute lists are not stored in hashval sorted order. * Shortform attribute lists are not stored in hashval sorted order.
* If the output buffer is not large enough to hold them all, then we * If the output buffer is not large enough to hold them all, then
* we have to calculate each entries' hashvalue and sort them before * we have to calculate each entries' hashvalue and sort them before
* we can begin returning them to the user. * we can begin returning them to the user.
*/ */
......
...@@ -127,7 +127,7 @@ xfs_buf_item_size_segment( ...@@ -127,7 +127,7 @@ xfs_buf_item_size_segment(
* stretch of non-contiguous chunks to be logged. Contiguous chunks are logged * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
* in a single iovec. * in a single iovec.
* *
* Discontiguous buffers need a format structure per region that that is being * Discontiguous buffers need a format structure per region that is being
* logged. This makes the changes in the buffer appear to log recovery as though * logged. This makes the changes in the buffer appear to log recovery as though
* they came from separate buffers, just like would occur if multiple buffers * they came from separate buffers, just like would occur if multiple buffers
* were used instead of a single discontiguous buffer. This enables * were used instead of a single discontiguous buffer. This enables
......
...@@ -948,7 +948,7 @@ xlog_recover_buf_commit_pass2( ...@@ -948,7 +948,7 @@ xlog_recover_buf_commit_pass2(
* or inode_cluster_size bytes, whichever is bigger. The inode * or inode_cluster_size bytes, whichever is bigger. The inode
* buffers in the log can be a different size if the log was generated * buffers in the log can be a different size if the log was generated
* by an older kernel using unclustered inode buffers or a newer kernel * by an older kernel using unclustered inode buffers or a newer kernel
* running with a different inode cluster size. Regardless, if the * running with a different inode cluster size. Regardless, if
* the inode buffer size isn't max(blocksize, inode_cluster_size) * the inode buffer size isn't max(blocksize, inode_cluster_size)
* for *our* value of inode_cluster_size, then we need to keep * for *our* value of inode_cluster_size, then we need to keep
* the buffer out of the buffer cache so that the buffer won't * the buffer out of the buffer cache so that the buffer won't
......
...@@ -807,7 +807,7 @@ xfs_qm_dqget_checks( ...@@ -807,7 +807,7 @@ xfs_qm_dqget_checks(
} }
/* /*
* Given the file system, id, and type (UDQUOT/GDQUOT), return a a locked * Given the file system, id, and type (UDQUOT/GDQUOT), return a locked
* dquot, doing an allocation (if requested) as needed. * dquot, doing an allocation (if requested) as needed.
*/ */
int int
......
...@@ -56,7 +56,7 @@ xfs_fs_encode_fh( ...@@ -56,7 +56,7 @@ xfs_fs_encode_fh(
fileid_type = FILEID_INO32_GEN_PARENT; fileid_type = FILEID_INO32_GEN_PARENT;
/* /*
* If the the filesystem may contain 64bit inode numbers, we need * If the filesystem may contain 64bit inode numbers, we need
* to use larger file handles that can represent them. * to use larger file handles that can represent them.
* *
* While we only allocate inodes that do not fit into 32 bits any * While we only allocate inodes that do not fit into 32 bits any
......
...@@ -451,7 +451,7 @@ xfs_lock_inodes( ...@@ -451,7 +451,7 @@ xfs_lock_inodes(
/* /*
* Currently supports between 2 and 5 inodes with exclusive locking. We * Currently supports between 2 and 5 inodes with exclusive locking. We
* support an arbitrary depth of locking here, but absolute limits on * support an arbitrary depth of locking here, but absolute limits on
* inodes depend on the the type of locking and the limits placed by * inodes depend on the type of locking and the limits placed by
* lockdep annotations in xfs_lock_inumorder. These are all checked by * lockdep annotations in xfs_lock_inumorder. These are all checked by
* the asserts. * the asserts.
*/ */
...@@ -3105,7 +3105,7 @@ xfs_cross_rename( ...@@ -3105,7 +3105,7 @@ xfs_cross_rename(
/* /*
* xfs_rename_alloc_whiteout() * xfs_rename_alloc_whiteout()
* *
* Return a referenced, unlinked, unlocked inode that that can be used as a * Return a referenced, unlinked, unlocked inode that can be used as a
* whiteout in a rename transaction. We use a tmpfile inode here so that if we * whiteout in a rename transaction. We use a tmpfile inode here so that if we
* crash between allocating the inode and linking it into the rename transaction * crash between allocating the inode and linking it into the rename transaction
* recovery will free the inode and we won't leak it. * recovery will free the inode and we won't leak it.
......
...@@ -191,7 +191,7 @@ xfs_inode_item_format_data_fork( ...@@ -191,7 +191,7 @@ xfs_inode_item_format_data_fork(
ip->i_df.if_bytes > 0) { ip->i_df.if_bytes > 0) {
/* /*
* Round i_bytes up to a word boundary. * Round i_bytes up to a word boundary.
* The underlying memory is guaranteed to * The underlying memory is guaranteed
* to be there by xfs_idata_realloc(). * to be there by xfs_idata_realloc().
*/ */
data_bytes = roundup(ip->i_df.if_bytes, 4); data_bytes = roundup(ip->i_df.if_bytes, 4);
...@@ -275,7 +275,7 @@ xfs_inode_item_format_attr_fork( ...@@ -275,7 +275,7 @@ xfs_inode_item_format_attr_fork(
ip->i_afp->if_bytes > 0) { ip->i_afp->if_bytes > 0) {
/* /*
* Round i_bytes up to a word boundary. * Round i_bytes up to a word boundary.
* The underlying memory is guaranteed to * The underlying memory is guaranteed
* to be there by xfs_idata_realloc(). * to be there by xfs_idata_realloc().
*/ */
data_bytes = roundup(ip->i_afp->if_bytes, 4); data_bytes = roundup(ip->i_afp->if_bytes, 4);
......
...@@ -865,7 +865,7 @@ xfs_buffered_write_iomap_begin( ...@@ -865,7 +865,7 @@ xfs_buffered_write_iomap_begin(
} }
/* /*
* Search the data fork fork first to look up our source mapping. We * Search the data fork first to look up our source mapping. We
* always need the data fork map, as we have to return it to the * always need the data fork map, as we have to return it to the
* iomap code so that the higher level write code can read data in to * iomap code so that the higher level write code can read data in to
* perform read-modify-write cycles for unaligned writes. * perform read-modify-write cycles for unaligned writes.
......
...@@ -239,7 +239,7 @@ xfs_cil_prepare_item( ...@@ -239,7 +239,7 @@ xfs_cil_prepare_item(
* this CIL context and so we need to pin it. If we are replacing the * this CIL context and so we need to pin it. If we are replacing the
* old_lv, then remove the space it accounts for and make it the shadow * old_lv, then remove the space it accounts for and make it the shadow
* buffer for later freeing. In both cases we are now switching to the * buffer for later freeing. In both cases we are now switching to the
* shadow buffer, so update the the pointer to it appropriately. * shadow buffer, so update the pointer to it appropriately.
*/ */
if (!old_lv) { if (!old_lv) {
if (lv->lv_item->li_ops->iop_pin) if (lv->lv_item->li_ops->iop_pin)
......
...@@ -1100,7 +1100,7 @@ xlog_verify_head( ...@@ -1100,7 +1100,7 @@ xlog_verify_head(
* *
* Note that xlog_find_tail() clears the blocks at the new head * Note that xlog_find_tail() clears the blocks at the new head
* (i.e., the records with invalid CRC) if the cycle number * (i.e., the records with invalid CRC) if the cycle number
* matches the the current cycle. * matches the current cycle.
*/ */
found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
buffer, rhead_blk, rhead, wrapped); buffer, rhead_blk, rhead, wrapped);
......
...@@ -485,7 +485,7 @@ xfs_cui_item_recover( ...@@ -485,7 +485,7 @@ xfs_cui_item_recover(
* transaction. Normally, any work that needs to be deferred * transaction. Normally, any work that needs to be deferred
* gets attached to the same defer_ops that scheduled the * gets attached to the same defer_ops that scheduled the
* refcount update. However, we're in log recovery here, so we * refcount update. However, we're in log recovery here, so we
* we use the passed in defer_ops and to finish up any work that * use the passed in defer_ops and to finish up any work that
* doesn't fit. We need to reserve enough blocks to handle a * doesn't fit. We need to reserve enough blocks to handle a
* full btree split on either end of the refcount range. * full btree split on either end of the refcount range.
*/ */
......
...@@ -721,7 +721,7 @@ xfs_reflink_end_cow( ...@@ -721,7 +721,7 @@ xfs_reflink_end_cow(
* repeatedly cycles the ILOCK to allocate one transaction per remapped * repeatedly cycles the ILOCK to allocate one transaction per remapped
* extent. * extent.
* *
* If we're being called by writeback then the the pages will still * If we're being called by writeback then the pages will still
* have PageWriteback set, which prevents races with reflink remapping * have PageWriteback set, which prevents races with reflink remapping
* and truncate. Reflink remapping prevents races with writeback by * and truncate. Reflink remapping prevents races with writeback by
* taking the iolock and mmaplock before flushing the pages and * taking the iolock and mmaplock before flushing the pages and
......
...@@ -32,9 +32,11 @@ xfs_sysfs_init( ...@@ -32,9 +32,11 @@ xfs_sysfs_init(
struct xfs_kobj *parent_kobj, struct xfs_kobj *parent_kobj,
const char *name) const char *name)
{ {
struct kobject *parent;
parent = parent_kobj ? &parent_kobj->kobject : NULL;
init_completion(&kobj->complete); init_completion(&kobj->complete);
return kobject_init_and_add(&kobj->kobject, ktype, return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
&parent_kobj->kobject, "%s", name);
} }
static inline void static inline void
......
...@@ -480,7 +480,7 @@ xfsaild_push( ...@@ -480,7 +480,7 @@ xfsaild_push(
* inode buffer is locked because we already pushed the * inode buffer is locked because we already pushed the
* updates to it as part of inode clustering. * updates to it as part of inode clustering.
* *
* We do not want to to stop flushing just because lots * We do not want to stop flushing just because lots
* of items are already being flushed, but we need to * of items are already being flushed, but we need to
* re-try the flushing relatively soon if most of the * re-try the flushing relatively soon if most of the
* AIL is being flushed. * AIL is being flushed.
...@@ -515,7 +515,7 @@ xfsaild_push( ...@@ -515,7 +515,7 @@ xfsaild_push(
/* /*
* Are there too many items we can't do anything with? * Are there too many items we can't do anything with?
* *
* If we we are skipping too many items because we can't flush * If we are skipping too many items because we can't flush
* them or they are already being flushed, we back off and * them or they are already being flushed, we back off and
* given them time to complete whatever operation is being * given them time to complete whatever operation is being
* done. i.e. remove pressure from the AIL while we can't make * done. i.e. remove pressure from the AIL while we can't make
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment