linux-next: manual merge of the vfs-brauner tree with the btrfs tree

Stephen Rothwell posted 1 patch 3 weeks, 2 days ago
linux-next: manual merge of the vfs-brauner tree with the btrfs tree
Posted by Stephen Rothwell 3 weeks, 2 days ago
Hi all,

Today's linux-next merge of the vfs-brauner tree got a conflict in:

  fs/btrfs/inode.c

between commits:

  530eec3e0f6e ("btrfs: simplify range tracking in cow_file_range()")
  25e40a35c927 ("btrfs: extract the inner loop of cow_file_range() to enhance the error handling")

from the btrfs tree and commit:

  a6752a6e7fb0 ("btrfs: Switch from using the private_2 flag to owner_2")

from the vfs-brauner tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc fs/btrfs/inode.c
index 6ec223f9987f,c14e37438a0b..000000000000
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@@ -1293,147 -1320,6 +1293,147 @@@ u64 btrfs_get_extent_allocation_hint(st
  	return alloc_hint;
  }
  
 +/*
 + * Run one cow range, which includes:
 + *
 + * - Reserve the data extent
 + * - Create the io extent map
 + * - Create the ordered extent
 + *
 + * @ins will be updated if the range should be skipped for error handling, no
 + * matter the return value.
 + *
 + * Return 0 if everything is fine.
 + * Return -EAGAIN if btrfs_reserve_extent() failed for zoned fs, caller needs
 + * some extra handling.
 + * Return <0 for other errors.
 + */
 +static int run_one_cow_range(struct btrfs_inode *inode,
 +			     struct folio *locked_folio,
 +			     struct btrfs_key *ins,
 +			     u64 start,
 +			     u64 end, u64 *alloc_hint, bool keep_locked)
 +{
 +	struct btrfs_root *root = inode->root;
 +	struct btrfs_fs_info *fs_info = root->fs_info;
 +	struct btrfs_ordered_extent *ordered;
 +	struct btrfs_file_extent file_extent = { 0 };
 +	struct extent_state *cached = NULL;
 +	struct extent_map *em = NULL;
 +	unsigned long page_ops;
 +	const u64 len = end + 1 - start;
 +	u32 min_alloc_size;
 +	int ret;
 +
 +	ASSERT(IS_ALIGNED(start, fs_info->sectorsize));
 +	ASSERT(IS_ALIGNED(end + 1, fs_info->sectorsize));
 +
 +	ins->offset = 0;
 +	ins->objectid = 0;
 +
 +	/*
 +	 * Relocation relies on the relocated extents to have exactly the same
 +	 * size as the original extents. Normally writeback for relocation data
 +	 * extents follows a NOCOW path because relocation preallocates the
 +	 * extents. However, due to an operation such as scrub turning a block
 +	 * group to RO mode, it may fallback to COW mode, so we must make sure
 +	 * an extent allocated during COW has exactly the requested size and can
 +	 * not be split into smaller extents, otherwise relocation breaks and
 +	 * fails during the stage where it updates the bytenr of file extent
 +	 * items.
 +	 */
 +	if (btrfs_is_data_reloc_root(root))
 +		min_alloc_size = len;
 +	else
 +		min_alloc_size = fs_info->sectorsize;
 +
 +	ret = btrfs_reserve_extent(root, len, len, min_alloc_size, 0,
 +				   *alloc_hint, ins, 1, 1);
 +	if (ret < 0)
 +		return ret;
 +
 +	file_extent.disk_bytenr = ins->objectid;
 +	file_extent.disk_num_bytes = ins->offset;
 +	file_extent.num_bytes = ins->offset;
 +	file_extent.ram_bytes = ins->offset;
 +	file_extent.offset = 0;
 +	file_extent.compression = BTRFS_COMPRESS_NONE;
 +
 +	lock_extent(&inode->io_tree, start, start + ins->offset - 1,
 +		    &cached);
 +
 +	em = btrfs_create_io_em(inode, start, &file_extent,
 +				BTRFS_ORDERED_REGULAR);
 +	if (IS_ERR(em)) {
 +		unlock_extent(&inode->io_tree, start,
 +			      start + ins->offset - 1, &cached);
 +		ret = PTR_ERR(em);
 +		goto out_free_reserved;
 +	}
 +	free_extent_map(em);
 +
 +	ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
 +					     1 << BTRFS_ORDERED_REGULAR);
 +	if (IS_ERR(ordered)) {
 +		unlock_extent(&inode->io_tree, start,
 +			      start + ins->offset - 1, &cached);
 +		ret = PTR_ERR(ordered);
 +		goto out_drop_em;
 +	}
 +
 +	if (btrfs_is_data_reloc_root(root)) {
 +		ret = btrfs_reloc_clone_csums(ordered);
 +
 +		/*
 +		 * Only drop cache here, and process as normal.
 +		 *
 +		 * We must not allow extent_clear_unlock_delalloc()
 +		 * at error handling to free meta of this ordered
 +		 * extent, as its meta should be freed by
 +		 * btrfs_finish_ordered_io().
 +		 *
 +		 * So we must continue until @start is increased to
 +		 * skip current ordered extent.
 +		 */
 +		if (ret < 0)
 +			btrfs_drop_extent_map_range(inode, start,
 +						    start + ins->offset - 1,
 +						    false);
 +	}
 +	btrfs_put_ordered_extent(ordered);
 +
 +	btrfs_dec_block_group_reservations(fs_info, ins->objectid);
 +
 +	/*
 +	 * We're not doing compressed IO, don't unlock the first page
 +	 * (which the caller expects to stay locked), don't clear any
 +	 * dirty bits and don't set any writeback bits
 +	 *
- 	 * Do set the Ordered (Private2) bit so we know this page was
++	 * Do set the Ordered flag so we know this page was
 +	 * properly setup for writepage.
 +	 */
 +	page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
 +	page_ops |= PAGE_SET_ORDERED;
 +
 +	extent_clear_unlock_delalloc(inode, start, start + ins->offset - 1,
 +				     locked_folio, &cached,
 +				     EXTENT_LOCKED | EXTENT_DELALLOC,
 +				     page_ops);
 +	*alloc_hint = ins->objectid + ins->offset;
 +	return ret;
 +
 +out_drop_em:
 +	btrfs_drop_extent_map_range(inode, start, start + ins->offset - 1, false);
 +out_free_reserved:
 +	btrfs_dec_block_group_reservations(fs_info, ins->objectid);
 +	btrfs_free_reserved_extent(fs_info, ins->objectid, ins->offset, 1);
 +	/* This is reserved for btrfs_reserve_extent() error. */
 +	ASSERT(ret != -EAGAIN);
 +	ins->offset = 0;
 +	ins->objectid = 0;
 +	return ret;
 +}
 +
  /*
   * when extent_io.c finds a delayed allocation range in the file,
   * the call backs end up in this code.  The basic idea is to