[PATCH v2] fs: make insert_inode_locked() wait for inode destruction

Mateusz Guzik posted 1 patch 3 weeks, 3 days ago
fs/inode.c | 41 ++++++++++++++++++++++++-----------------
1 file changed, 24 insertions(+), 17 deletions(-)
[PATCH v2] fs: make insert_inode_locked() wait for inode destruction
Posted by Mateusz Guzik 3 weeks, 3 days ago
This is the only routine which instead skipped instead of waiting.

The current behavior is arguably a bug as it results in a corner case
where the inode hash can have *two* matching inodes, one of which is on
its way out.

Ironing out this difference is an incremental step towards sanitizing
the API.

Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
---

v2:
- add a way to avoid the rcu dance in __wait_on_freeing_inode


 fs/inode.c | 41 ++++++++++++++++++++++++-----------------
 1 file changed, 24 insertions(+), 17 deletions(-)

diff --git a/fs/inode.c b/fs/inode.c
index 8a47c4da603f..a4cfe9182a7c 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1028,19 +1028,20 @@ long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
 	return freed;
 }
 
-static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked);
+static void __wait_on_freeing_inode(struct inode *inode, bool hash_locked, bool rcu_locked);
+
 /*
  * Called with the inode lock held.
  */
 static struct inode *find_inode(struct super_block *sb,
 				struct hlist_head *head,
 				int (*test)(struct inode *, void *),
-				void *data, bool is_inode_hash_locked,
+				void *data, bool hash_locked,
 				bool *isnew)
 {
 	struct inode *inode = NULL;
 
-	if (is_inode_hash_locked)
+	if (hash_locked)
 		lockdep_assert_held(&inode_hash_lock);
 	else
 		lockdep_assert_not_held(&inode_hash_lock);
@@ -1054,7 +1055,7 @@ static struct inode *find_inode(struct super_block *sb,
 			continue;
 		spin_lock(&inode->i_lock);
 		if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
-			__wait_on_freeing_inode(inode, is_inode_hash_locked);
+			__wait_on_freeing_inode(inode, hash_locked, true);
 			goto repeat;
 		}
 		if (unlikely(inode_state_read(inode) & I_CREATING)) {
@@ -1078,11 +1079,11 @@ static struct inode *find_inode(struct super_block *sb,
  */
 static struct inode *find_inode_fast(struct super_block *sb,
 				struct hlist_head *head, unsigned long ino,
-				bool is_inode_hash_locked, bool *isnew)
+				bool hash_locked, bool *isnew)
 {
 	struct inode *inode = NULL;
 
-	if (is_inode_hash_locked)
+	if (hash_locked)
 		lockdep_assert_held(&inode_hash_lock);
 	else
 		lockdep_assert_not_held(&inode_hash_lock);
@@ -1096,7 +1097,7 @@ static struct inode *find_inode_fast(struct super_block *sb,
 			continue;
 		spin_lock(&inode->i_lock);
 		if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
-			__wait_on_freeing_inode(inode, is_inode_hash_locked);
+			__wait_on_freeing_inode(inode, hash_locked, true);
 			goto repeat;
 		}
 		if (unlikely(inode_state_read(inode) & I_CREATING)) {
@@ -1832,16 +1833,13 @@ int insert_inode_locked(struct inode *inode)
 	while (1) {
 		struct inode *old = NULL;
 		spin_lock(&inode_hash_lock);
+repeat:
 		hlist_for_each_entry(old, head, i_hash) {
 			if (old->i_ino != ino)
 				continue;
 			if (old->i_sb != sb)
 				continue;
 			spin_lock(&old->i_lock);
-			if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
-				spin_unlock(&old->i_lock);
-				continue;
-			}
 			break;
 		}
 		if (likely(!old)) {
@@ -1852,6 +1850,11 @@ int insert_inode_locked(struct inode *inode)
 			spin_unlock(&inode_hash_lock);
 			return 0;
 		}
+		if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
+			__wait_on_freeing_inode(old, true, false);
+			old = NULL;
+			goto repeat;
+		}
 		if (unlikely(inode_state_read(old) & I_CREATING)) {
 			spin_unlock(&old->i_lock);
 			spin_unlock(&inode_hash_lock);
@@ -2522,16 +2525,18 @@ EXPORT_SYMBOL(inode_needs_sync);
  * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
  * will DTRT.
  */
-static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked)
+static void __wait_on_freeing_inode(struct inode *inode, bool hash_locked, bool rcu_locked)
 {
 	struct wait_bit_queue_entry wqe;
 	struct wait_queue_head *wq_head;
 
+	VFS_BUG_ON(!hash_locked && !rcu_locked);
+
 	/*
 	 * Handle racing against evict(), see that routine for more details.
 	 */
 	if (unlikely(inode_unhashed(inode))) {
-		WARN_ON(is_inode_hash_locked);
+		WARN_ON(hash_locked);
 		spin_unlock(&inode->i_lock);
 		return;
 	}
@@ -2539,14 +2544,16 @@ static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_lock
 	wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
 	prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
 	spin_unlock(&inode->i_lock);
-	rcu_read_unlock();
-	if (is_inode_hash_locked)
+	if (rcu_locked)
+		rcu_read_unlock();
+	if (hash_locked)
 		spin_unlock(&inode_hash_lock);
 	schedule();
 	finish_wait(wq_head, &wqe.wq_entry);
-	if (is_inode_hash_locked)
+	if (hash_locked)
 		spin_lock(&inode_hash_lock);
-	rcu_read_lock();
+	if (rcu_locked)
+		rcu_read_lock();
 }
 
 static __initdata unsigned long ihash_entries;
-- 
2.48.1
Re: [PATCH v2] fs: make insert_inode_locked() wait for inode destruction
Posted by Jan Kara 3 weeks, 2 days ago
On Wed 14-01-26 10:47:16, Mateusz Guzik wrote:
> This is the only routine which instead skipped instead of waiting.
> 
> The current behavior is arguably a bug as it results in a corner case
> where the inode hash can have *two* matching inodes, one of which is on
> its way out.
> 
> Ironing out this difference is an incremental step towards sanitizing
> the API.
> 
> Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>

Looks good. Feel free to add:

Reviewed-by: Jan Kara <jack@suse.cz>

								Honza

> ---
> 
> v2:
> - add a way to avoid the rcu dance in __wait_on_freeing_inode
> 
> 
>  fs/inode.c | 41 ++++++++++++++++++++++++-----------------
>  1 file changed, 24 insertions(+), 17 deletions(-)
> 
> diff --git a/fs/inode.c b/fs/inode.c
> index 8a47c4da603f..a4cfe9182a7c 100644
> --- a/fs/inode.c
> +++ b/fs/inode.c
> @@ -1028,19 +1028,20 @@ long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
>  	return freed;
>  }
>  
> -static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked);
> +static void __wait_on_freeing_inode(struct inode *inode, bool hash_locked, bool rcu_locked);
> +
>  /*
>   * Called with the inode lock held.
>   */
>  static struct inode *find_inode(struct super_block *sb,
>  				struct hlist_head *head,
>  				int (*test)(struct inode *, void *),
> -				void *data, bool is_inode_hash_locked,
> +				void *data, bool hash_locked,
>  				bool *isnew)
>  {
>  	struct inode *inode = NULL;
>  
> -	if (is_inode_hash_locked)
> +	if (hash_locked)
>  		lockdep_assert_held(&inode_hash_lock);
>  	else
>  		lockdep_assert_not_held(&inode_hash_lock);
> @@ -1054,7 +1055,7 @@ static struct inode *find_inode(struct super_block *sb,
>  			continue;
>  		spin_lock(&inode->i_lock);
>  		if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
> -			__wait_on_freeing_inode(inode, is_inode_hash_locked);
> +			__wait_on_freeing_inode(inode, hash_locked, true);
>  			goto repeat;
>  		}
>  		if (unlikely(inode_state_read(inode) & I_CREATING)) {
> @@ -1078,11 +1079,11 @@ static struct inode *find_inode(struct super_block *sb,
>   */
>  static struct inode *find_inode_fast(struct super_block *sb,
>  				struct hlist_head *head, unsigned long ino,
> -				bool is_inode_hash_locked, bool *isnew)
> +				bool hash_locked, bool *isnew)
>  {
>  	struct inode *inode = NULL;
>  
> -	if (is_inode_hash_locked)
> +	if (hash_locked)
>  		lockdep_assert_held(&inode_hash_lock);
>  	else
>  		lockdep_assert_not_held(&inode_hash_lock);
> @@ -1096,7 +1097,7 @@ static struct inode *find_inode_fast(struct super_block *sb,
>  			continue;
>  		spin_lock(&inode->i_lock);
>  		if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
> -			__wait_on_freeing_inode(inode, is_inode_hash_locked);
> +			__wait_on_freeing_inode(inode, hash_locked, true);
>  			goto repeat;
>  		}
>  		if (unlikely(inode_state_read(inode) & I_CREATING)) {
> @@ -1832,16 +1833,13 @@ int insert_inode_locked(struct inode *inode)
>  	while (1) {
>  		struct inode *old = NULL;
>  		spin_lock(&inode_hash_lock);
> +repeat:
>  		hlist_for_each_entry(old, head, i_hash) {
>  			if (old->i_ino != ino)
>  				continue;
>  			if (old->i_sb != sb)
>  				continue;
>  			spin_lock(&old->i_lock);
> -			if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
> -				spin_unlock(&old->i_lock);
> -				continue;
> -			}
>  			break;
>  		}
>  		if (likely(!old)) {
> @@ -1852,6 +1850,11 @@ int insert_inode_locked(struct inode *inode)
>  			spin_unlock(&inode_hash_lock);
>  			return 0;
>  		}
> +		if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
> +			__wait_on_freeing_inode(old, true, false);
> +			old = NULL;
> +			goto repeat;
> +		}
>  		if (unlikely(inode_state_read(old) & I_CREATING)) {
>  			spin_unlock(&old->i_lock);
>  			spin_unlock(&inode_hash_lock);
> @@ -2522,16 +2525,18 @@ EXPORT_SYMBOL(inode_needs_sync);
>   * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
>   * will DTRT.
>   */
> -static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked)
> +static void __wait_on_freeing_inode(struct inode *inode, bool hash_locked, bool rcu_locked)
>  {
>  	struct wait_bit_queue_entry wqe;
>  	struct wait_queue_head *wq_head;
>  
> +	VFS_BUG_ON(!hash_locked && !rcu_locked);
> +
>  	/*
>  	 * Handle racing against evict(), see that routine for more details.
>  	 */
>  	if (unlikely(inode_unhashed(inode))) {
> -		WARN_ON(is_inode_hash_locked);
> +		WARN_ON(hash_locked);
>  		spin_unlock(&inode->i_lock);
>  		return;
>  	}
> @@ -2539,14 +2544,16 @@ static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_lock
>  	wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
>  	prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
>  	spin_unlock(&inode->i_lock);
> -	rcu_read_unlock();
> -	if (is_inode_hash_locked)
> +	if (rcu_locked)
> +		rcu_read_unlock();
> +	if (hash_locked)
>  		spin_unlock(&inode_hash_lock);
>  	schedule();
>  	finish_wait(wq_head, &wqe.wq_entry);
> -	if (is_inode_hash_locked)
> +	if (hash_locked)
>  		spin_lock(&inode_hash_lock);
> -	rcu_read_lock();
> +	if (rcu_locked)
> +		rcu_read_lock();
>  }
>  
>  static __initdata unsigned long ihash_entries;
> -- 
> 2.48.1
> 
-- 
Jan Kara <jack@suse.com>
SUSE Labs, CR
Re: [PATCH v2] fs: make insert_inode_locked() wait for inode destruction
Posted by Christian Brauner 3 weeks, 2 days ago
On Wed, Jan 14, 2026 at 10:47:16AM +0100, Mateusz Guzik wrote:
> This is the only routine which instead skipped instead of waiting.
> 
> The current behavior is arguably a bug as it results in a corner case
> where the inode hash can have *two* matching inodes, one of which is on
> its way out.
> 
> Ironing out this difference is an incremental step towards sanitizing
> the API.
> 
> Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
> ---

Still reviewing but if ok I'll replace the buggy version.