drivers/scsi/lpfc/lpfc_nvmet.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)
Fix a use-after-free window by correcting the buffer release sequence in
the deferred receive path. The code freed the RQ buffer first and only
then cleared the context pointer under the lock. Concurrent paths
(e.g., ABTS and the repost path) also inspect and release the same
pointer under the lock, so the old order could lead to double-free/UAF.
Note that the repost path already uses the correct pattern: detach the
pointer under the lock, then free it after dropping the lock. The deferred
path should do the same.
Fixes: 472e146d1cf3 ("scsi: lpfc: Correct upcalling nvmet_fc transport during io done downcall")
Cc: stable@vger.kernel.org
Signed-off-by: John Evans <evans1210144@gmail.com>
---
drivers/scsi/lpfc/lpfc_nvmet.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index fba2e62027b7..766319543680 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1264,10 +1264,15 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
/* Free the nvmebuf since a new buffer already replaced it */
- nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->rqb_buffer = NULL;
- spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ nvmebuf = ctxp->rqb_buffer;
+ if (nvmebuf) {
+ ctxp->rqb_buffer = NULL;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ } else {
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ }
}
/**
--
2.43.0
Hi John, nvmebuf NULL ptr check is already performed earlier in this routine. Maybe this patch should rather look like: diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index fba2e62027b7..cd527324eae7 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1264,10 +1264,10 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, atomic_inc(&tgtp->rcv_fcp_cmd_defer); /* Free the nvmebuf since a new buffer already replaced it */ - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); spin_lock_irqsave(&ctxp->ctxlock, iflag); ctxp->rqb_buffer = NULL; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); } Regards, Justin Tee
> nvmebuf NULL ptr check is already performed earlier in this routine. > > Maybe this patch should rather look like: > > diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c > index fba2e62027b7..cd527324eae7 100644 > --- a/drivers/scsi/lpfc/lpfc_nvmet.c > +++ b/drivers/scsi/lpfc/lpfc_nvmet.c > @@ -1264,10 +1264,10 @@ lpfc_nvmet_defer_rcv(struct > nvmet_fc_target_port *tgtport, > atomic_inc(&tgtp->rcv_fcp_cmd_defer); > > /* Free the nvmebuf since a new buffer already replaced it */ > - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); > spin_lock_irqsave(&ctxp->ctxlock, iflag); > ctxp->rqb_buffer = NULL; > spin_unlock_irqrestore(&ctxp->ctxlock, iflag); > + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); > } > > Regards, > Justin Tee Hi Justin, Moving free() after clearing is necessary but not sufficient. The race is between the load of ctxp->rqb_buffer and the clear under ctxp->ctxlock. If we load nvmebuf without the lock, another path can free+NULL it before we take the lock: T1 (defer_rcv): nvmebuf = ctxp->rqb_buffer; // no lock T2 (ctxbuf_post): [lock] nvmebuf' = ctxp->rqb_buffer; ctxp->rqb_buffer = NULL; free(nvmebuf'); [unlock] T1 (defer_rcv): [lock] ctxp->rqb_buffer = NULL; [unlock]; free(nvmebuf); // UAF/double free
Hi John, Ah okay, then how about moving where we take the ctxp->ctxlock? diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index fba2e62027b7..3d8ab456ced5 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1243,7 +1243,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, struct lpfc_nvmet_tgtport *tgtp; struct lpfc_async_xchg_ctx *ctxp = container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); - struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; + struct rqb_dmabuf *nvmebuf; struct lpfc_hba *phba = ctxp->phba; unsigned long iflag; @@ -1251,11 +1251,14 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", ctxp->oxid, ctxp->size, raw_smp_processor_id()); + spin_lock_irqsave(&ctxp->ctxlock, iflag); + nvmebuf = ctxp->rqb_buffer; if (!nvmebuf) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6425 Defer rcv: no buffer oxid x%x: " "flg %x ste %x\n", ctxp->oxid, ctxp->flag, ctxp->state); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); return; } @@ -1264,10 +1267,9 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, atomic_inc(&tgtp->rcv_fcp_cmd_defer); /* Free the nvmebuf since a new buffer already replaced it */ - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); - spin_lock_irqsave(&ctxp->ctxlock, iflag); ctxp->rqb_buffer = NULL; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); } Regards, Justin
> Hi John, > > Ah okay, then how about moving where we take the ctxp->ctxlock? > > diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c > index fba2e62027b7..3d8ab456ced5 100644 > --- a/drivers/scsi/lpfc/lpfc_nvmet.c > +++ b/drivers/scsi/lpfc/lpfc_nvmet.c > @@ -1243,7 +1243,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, > struct lpfc_nvmet_tgtport *tgtp; > struct lpfc_async_xchg_ctx *ctxp = > container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); > - struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; > + struct rqb_dmabuf *nvmebuf; > struct lpfc_hba *phba = ctxp->phba; > unsigned long iflag; > > @@ -1251,11 +1251,14 @@ lpfc_nvmet_defer_rcv(struct > nvmet_fc_target_port *tgtport, > lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", > ctxp->oxid, ctxp->size, raw_smp_processor_id()); > > + spin_lock_irqsave(&ctxp->ctxlock, iflag); > + nvmebuf = ctxp->rqb_buffer; > if (!nvmebuf) { > lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, > "6425 Defer rcv: no buffer oxid x%x: " > "flg %x ste %x\n", > ctxp->oxid, ctxp->flag, ctxp->state); > + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); > return; > } > > @@ -1264,10 +1267,9 @@ lpfc_nvmet_defer_rcv(struct > nvmet_fc_target_port *tgtport, > atomic_inc(&tgtp->rcv_fcp_cmd_defer); > > /* Free the nvmebuf since a new buffer already replaced it */ > - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); > - spin_lock_irqsave(&ctxp->ctxlock, iflag); > ctxp->rqb_buffer = NULL; > spin_unlock_irqrestore(&ctxp->ctxlock, iflag); > + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); > } > > Regards, > Justin Hi Justin, Fixed in the patch v2. Thanks! Regards, John
© 2016 - 2025 Red Hat, Inc.