At present, the object->file has the NULL pointer dereference problem in
ondemand-mode. The root cause is that the allocated fd and object->file
lifetime are inconsistent, and the user-space invocation to anon_fd uses
object->file. Following is the process that triggers the issue:
[write fd] [umount]
cachefiles_ondemand_fd_write_iter
fscache_cookie_state_machine
cachefiles_withdraw_cookie
if (!file) return -ENOBUFS
cachefiles_clean_up_object
cachefiles_unmark_inode_in_use
fput(object->file)
object->file = NULL
// file NULL pointer dereference!
__cachefiles_write(..., file, ...)
Fix this issue by add an additional reference count to the object->file
before write/llseek, and decrement after it finished.
Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie")
Signed-off-by: Zizhi Wo <wozizhi@huawei.com>
---
fs/cachefiles/interface.c | 3 +++
fs/cachefiles/ondemand.c | 30 ++++++++++++++++++++++++------
2 files changed, 27 insertions(+), 6 deletions(-)
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 35ba2117a6f6..d30127ead911 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -342,10 +342,13 @@ static void cachefiles_clean_up_object(struct cachefiles_object *object,
}
cachefiles_unmark_inode_in_use(object, object->file);
+
+ spin_lock(&object->lock);
if (object->file) {
fput(object->file);
object->file = NULL;
}
+ spin_unlock(&object->lock);
}
/*
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index 38ca6dce8ef2..fe3de9ad57bf 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -60,20 +60,26 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
{
struct cachefiles_object *object = kiocb->ki_filp->private_data;
struct cachefiles_cache *cache = object->volume->cache;
- struct file *file = object->file;
+ struct file *file;
size_t len = iter->count, aligned_len = len;
loff_t pos = kiocb->ki_pos;
const struct cred *saved_cred;
int ret;
- if (!file)
+ spin_lock(&object->lock);
+ file = object->file;
+ if (!file) {
+ spin_unlock(&object->lock);
return -ENOBUFS;
+ }
+ get_file(file);
+ spin_unlock(&object->lock);
cachefiles_begin_secure(cache, &saved_cred);
ret = __cachefiles_prepare_write(object, file, &pos, &aligned_len, len, true);
cachefiles_end_secure(cache, saved_cred);
if (ret < 0)
- return ret;
+ goto out;
trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
@@ -82,6 +88,8 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
kiocb->ki_pos += ret;
}
+out:
+ fput(file);
return ret;
}
@@ -89,12 +97,22 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
int whence)
{
struct cachefiles_object *object = filp->private_data;
- struct file *file = object->file;
+ struct file *file;
+ loff_t ret;
- if (!file)
+ spin_lock(&object->lock);
+ file = object->file;
+ if (!file) {
+ spin_unlock(&object->lock);
return -ENOBUFS;
+ }
+ get_file(file);
+ spin_unlock(&object->lock);
- return vfs_llseek(file, pos, whence);
+ ret = vfs_llseek(file, pos, whence);
+ fput(file);
+
+ return ret;
}
static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
--
2.39.2
Zizhi Wo <wozizhi@huawei.com> wrote:
> + spin_lock(&object->lock);
> if (object->file) {
> fput(object->file);
> object->file = NULL;
> }
> + spin_unlock(&object->lock);
I would suggest stashing the file pointer in a local var and then doing the
fput() outside of the locks.
David
在 2024/10/10 19:26, David Howells 写道:
> Zizhi Wo <wozizhi@huawei.com> wrote:
>
>> + spin_lock(&object->lock);
>> if (object->file) {
>> fput(object->file);
>> object->file = NULL;
>> }
>> + spin_unlock(&object->lock);
>
> I would suggest stashing the file pointer in a local var and then doing the
> fput() outside of the locks.
>
> David
>
>
If fput() is executed outside the lock, I am currently unsure how to
guarantee that file in __cachefiles_write() does not trigger null
pointer dereference...
Thanks,
Zizhi Wo
Zizhi Wo <wozizhi@huawei.com> wrote:
> 在 2024/10/10 19:26, David Howells 写道:
> > Zizhi Wo <wozizhi@huawei.com> wrote:
> >
> >> + spin_lock(&object->lock);
> >> if (object->file) {
> >> fput(object->file);
> >> object->file = NULL;
> >> }
> >> + spin_unlock(&object->lock);
> > I would suggest stashing the file pointer in a local var and then doing the
> > fput() outside of the locks.
> > David
> >
>
> If fput() is executed outside the lock, I am currently unsure how to
> guarantee that file in __cachefiles_write() does not trigger null
> pointer dereference...
I'm not sure why there's a problem here. I was thinking along the lines of:
struct file *tmp;
spin_lock(&object->lock);
tmp = object->file)
object->file = NULL;
spin_unlock(&object->lock);
if (tmp)
fput(tmp);
Note that fput() may defer the actual work if the counter hits zero, so the
cleanup may not happen inside the lock; further, the cleanup done by __fput()
may sleep.
David
在 2024/10/10 22:52, David Howells 写道:
> Zizhi Wo <wozizhi@huawei.com> wrote:
>
>> 在 2024/10/10 19:26, David Howells 写道:
>>> Zizhi Wo <wozizhi@huawei.com> wrote:
>>>
>>>> + spin_lock(&object->lock);
>>>> if (object->file) {
>>>> fput(object->file);
>>>> object->file = NULL;
>>>> }
>>>> + spin_unlock(&object->lock);
>>> I would suggest stashing the file pointer in a local var and then doing the
>>> fput() outside of the locks.
>>> David
>>>
>>
>> If fput() is executed outside the lock, I am currently unsure how to
>> guarantee that file in __cachefiles_write() does not trigger null
>> pointer dereference...
>
> I'm not sure why there's a problem here. I was thinking along the lines of:
>
> struct file *tmp;
> spin_lock(&object->lock);
> tmp = object->file)
> object->file = NULL;
> spin_unlock(&object->lock);
> if (tmp)
> fput(tmp);
>
> Note that fput() may defer the actual work if the counter hits zero, so the
> cleanup may not happen inside the lock; further, the cleanup done by __fput()
> may sleep.
>
> David
>
>
Oh, I see what you mean. I will sort it out and issue the second patch
as soon as possible.
Thanks,
Zizhi Wo
© 2016 - 2026 Red Hat, Inc.