Implement the SCSI-specific io_uring command handler for BSG using
struct bsg_uring_cmd.
The handler builds a SCSI request from the io_uring command, maps user
buffers (including fixed buffers), and completes asynchronously via a
request end_io callback and task_work. Completion returns a 32-bit
status and packed residual/sense information via CQE res and res2, and
supports IO_URING_F_NONBLOCK.
Signed-off-by: Yang Xiuwei <yangxiuwei@kylinos.cn>
---
drivers/scsi/scsi_bsg.c | 198 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 197 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
index 4d57e524e141..95b2427cc0bf 100644
--- a/drivers/scsi/scsi_bsg.c
+++ b/drivers/scsi/scsi_bsg.c
@@ -10,10 +10,206 @@
#define uptr64(val) ((void __user *)(uintptr_t)(val))
+/*
+ * Per-command BSG SCSI PDU stored in io_uring_cmd.pdu[32].
+ * Holds temporary state between submission, completion and task_work.
+ */
+struct scsi_bsg_uring_cmd_pdu {
+ struct bio *bio; /* mapped user buffer, unmap in task work */
+ struct request *req; /* block request, freed in task work */
+ u64 response_addr; /* user space response buffer address */
+ u32 resid_len; /* residual transfer length */
+ /* Protocol-specific status fields using union for extensibility */
+ union {
+ struct {
+ u8 device_status; /* SCSI device status (low 8 bits of result) */
+ u8 driver_status; /* SCSI driver status (DRIVER_SENSE if check) */
+ u8 host_status; /* SCSI host status (host_byte of result) */
+ u8 sense_len_wr; /* actual sense data length written */
+ } scsi;
+ /* Future protocols can add their own status layouts here */
+ };
+};
+
+static inline struct scsi_bsg_uring_cmd_pdu *scsi_bsg_uring_cmd_pdu(
+ struct io_uring_cmd *ioucmd)
+{
+ return io_uring_cmd_to_pdu(ioucmd, struct scsi_bsg_uring_cmd_pdu);
+}
+
+/*
+ * Task work callback executed in process context.
+ * Builds res2 with status information and copies sense data to user space.
+ * res2 layout (64-bit):
+ * 0-7: device_status
+ * 8-15: driver_status
+ * 16-23: host_status
+ * 24-31: sense_len_wr
+ * 32-63: resid_len
+ */
+static void scsi_bsg_uring_task_cb(struct io_tw_req tw_req, io_tw_token_t tw)
+{
+ struct io_uring_cmd *ioucmd = io_uring_cmd_from_tw(tw_req);
+ struct scsi_bsg_uring_cmd_pdu *pdu = scsi_bsg_uring_cmd_pdu(ioucmd);
+ struct scsi_cmnd *scmd;
+ struct request *rq = pdu->req;
+ int ret = 0;
+ u64 res2;
+
+ scmd = blk_mq_rq_to_pdu(rq);
+
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+
+ /* Build res2 with status information */
+ res2 = ((u64)pdu->resid_len << 32) |
+ ((u64)(pdu->scsi.sense_len_wr & 0xff) << 24) |
+ ((u64)(pdu->scsi.host_status & 0xff) << 16) |
+ ((u64)(pdu->scsi.driver_status & 0xff) << 8) |
+ (pdu->scsi.device_status & 0xff);
+
+ if (pdu->scsi.sense_len_wr && pdu->response_addr) {
+ if (copy_to_user(uptr64(pdu->response_addr), scmd->sense_buffer,
+ pdu->scsi.sense_len_wr))
+ ret = -EFAULT;
+ }
+
+ blk_mq_free_request(rq);
+ io_uring_cmd_done32(ioucmd, ret, res2,
+ IO_URING_CMD_TASK_WORK_ISSUE_FLAGS);
+}
+
+static enum rq_end_io_ret scsi_bsg_uring_cmd_done(struct request *req,
+ blk_status_t status)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct scsi_bsg_uring_cmd_pdu *pdu = scsi_bsg_uring_cmd_pdu(ioucmd);
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
+
+ /* Pack SCSI status fields into union */
+ pdu->scsi.device_status = scmd->result & 0xff;
+ pdu->scsi.host_status = host_byte(scmd->result);
+ pdu->scsi.driver_status = 0;
+ pdu->scsi.sense_len_wr = 0;
+
+ if (scsi_status_is_check_condition(scmd->result)) {
+ pdu->scsi.driver_status = DRIVER_SENSE;
+ if (pdu->response_addr)
+ pdu->scsi.sense_len_wr = min_t(u8, scmd->sense_len, SCSI_SENSE_BUFFERSIZE);
+ }
+
+ pdu->resid_len = scmd->resid_len;
+
+ io_uring_cmd_do_in_task_lazy(ioucmd, scsi_bsg_uring_task_cb);
+ return RQ_END_IO_NONE;
+}
+
+static int scsi_bsg_map_user_buffer(struct request *req,
+ struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags, gfp_t gfp_mask)
+{
+ const struct bsg_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
+ struct iov_iter iter;
+ bool is_write = cmd->dout_xfer_len > 0;
+ u64 buf_addr = is_write ? cmd->dout_xferp : cmd->din_xferp;
+ unsigned long buf_len = is_write ? cmd->dout_xfer_len : cmd->din_xfer_len;
+ int ret;
+
+ if (ioucmd->flags & IORING_URING_CMD_FIXED) {
+ ret = io_uring_cmd_import_fixed(buf_addr, buf_len,
+ is_write ? WRITE : READ,
+ &iter, ioucmd, issue_flags);
+ if (ret < 0)
+ return ret;
+ ret = blk_rq_map_user_iov(req->q, req, NULL, &iter, gfp_mask);
+ } else {
+ ret = blk_rq_map_user(req->q, req, NULL, uptr64(buf_addr),
+ buf_len, gfp_mask);
+ }
+
+ return ret;
+}
+
static int scsi_bsg_uring_cmd(struct request_queue *q, struct io_uring_cmd *ioucmd,
unsigned int issue_flags, bool open_for_write)
{
- return -EOPNOTSUPP;
+ struct scsi_bsg_uring_cmd_pdu *pdu = scsi_bsg_uring_cmd_pdu(ioucmd);
+ const struct bsg_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
+ struct scsi_cmnd *scmd;
+ struct request *req;
+ blk_mq_req_flags_t blk_flags = 0;
+ gfp_t gfp_mask = GFP_KERNEL;
+ int ret = 0;
+
+ if (cmd->protocol != BSG_PROTOCOL_SCSI ||
+ cmd->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
+ return -EINVAL;
+
+ if (!cmd->request || cmd->request_len == 0)
+ return -EINVAL;
+
+ if (cmd->dout_xfer_len && cmd->din_xfer_len) {
+ pr_warn_once("BIDI support in bsg has been removed.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (cmd->dout_iovec_count > 0 || cmd->din_iovec_count > 0)
+ return -EOPNOTSUPP;
+
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ blk_flags = BLK_MQ_REQ_NOWAIT;
+ gfp_mask = GFP_NOWAIT;
+ }
+
+ req = scsi_alloc_request(q, cmd->dout_xfer_len ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, blk_flags);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ scmd = blk_mq_rq_to_pdu(req);
+ scmd->cmd_len = cmd->request_len;
+ if (scmd->cmd_len > sizeof(scmd->cmnd)) {
+ ret = -EINVAL;
+ goto out_free_req;
+ }
+ scmd->allowed = SG_DEFAULT_RETRIES;
+
+ if (copy_from_user(scmd->cmnd, uptr64(cmd->request), cmd->request_len)) {
+ ret = -EFAULT;
+ goto out_free_req;
+ }
+
+ if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) {
+ ret = -EPERM;
+ goto out_free_req;
+ }
+
+ pdu->response_addr = cmd->response;
+ scmd->sense_len = cmd->max_response_len ?
+ min(cmd->max_response_len, SCSI_SENSE_BUFFERSIZE) : SCSI_SENSE_BUFFERSIZE;
+
+ if (cmd->dout_xfer_len || cmd->din_xfer_len) {
+ ret = scsi_bsg_map_user_buffer(req, ioucmd, issue_flags, gfp_mask);
+ if (ret)
+ goto out_free_req;
+ pdu->bio = req->bio;
+ } else {
+ pdu->bio = NULL;
+ }
+
+ req->timeout = cmd->timeout_ms ?
+ msecs_to_jiffies(cmd->timeout_ms) : BLK_DEFAULT_SG_TIMEOUT;
+
+ req->end_io = scsi_bsg_uring_cmd_done;
+ req->end_io_data = ioucmd;
+ pdu->req = req;
+
+ blk_execute_rq_nowait(req, false);
+ return -EIOCBQUEUED;
+
+out_free_req:
+ blk_mq_free_request(req);
+ return ret;
}
static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
--
2.25.1
Hi Yang,
kernel test robot noticed the following build errors:
[auto build test ERROR on axboe/for-next]
[also build test ERROR on jejb-scsi/for-next mkp-scsi/for-next linus/master v7.0-rc2 next-20260303]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Yang-Xiuwei/bsg-add-bsg_uring_cmd-uapi-structure/20260304-160717
base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git for-next
patch link: https://lore.kernel.org/r/20260304080313.675768-4-yangxiuwei%40kylinos.cn
patch subject: [PATCH v5 3/3] scsi: bsg: add io_uring passthrough handler
config: x86_64-rhel-9.4 (https://download.01.org/0day-ci/archive/20260304/202603041450.tuj48h9Q-lkp@intel.com/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260304/202603041450.tuj48h9Q-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603041450.tuj48h9Q-lkp@intel.com/
All errors (new ones prefixed by >>):
drivers/scsi/scsi_bsg.c: In function 'scsi_bsg_map_user_buffer':
>> drivers/scsi/scsi_bsg.c:111:71: error: macro "io_uring_sqe_cmd" requires 2 arguments, but only 1 given
111 | const struct bsg_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
| ^
In file included from drivers/scsi/scsi_bsg.c:3:
include/linux/io_uring/cmd.h:29:9: note: macro "io_uring_sqe_cmd" defined here
29 | #define io_uring_sqe_cmd(sqe, type) ({ \
| ^~~~~~~~~~~~~~~~
>> drivers/scsi/scsi_bsg.c:111:43: error: 'io_uring_sqe_cmd' undeclared (first use in this function); did you mean 'io_uring_sqe'?
111 | const struct bsg_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
| ^~~~~~~~~~~~~~~~
| io_uring_sqe
drivers/scsi/scsi_bsg.c:111:43: note: each undeclared identifier is reported only once for each function it appears in
drivers/scsi/scsi_bsg.c: In function 'scsi_bsg_uring_cmd':
drivers/scsi/scsi_bsg.c:137:71: error: macro "io_uring_sqe_cmd" requires 2 arguments, but only 1 given
137 | const struct bsg_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
| ^
include/linux/io_uring/cmd.h:29:9: note: macro "io_uring_sqe_cmd" defined here
29 | #define io_uring_sqe_cmd(sqe, type) ({ \
| ^~~~~~~~~~~~~~~~
drivers/scsi/scsi_bsg.c:137:43: error: 'io_uring_sqe_cmd' undeclared (first use in this function); did you mean 'io_uring_sqe'?
137 | const struct bsg_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
| ^~~~~~~~~~~~~~~~
| io_uring_sqe
drivers/scsi/scsi_bsg.c:203:21: error: assignment to 'enum rq_end_io_ret (*)(struct request *, blk_status_t, const struct io_comp_batch *)' {aka 'enum rq_end_io_ret (*)(struct request *, unsigned char, const struct io_comp_batch *)'} from incompatible pointer type 'enum rq_end_io_ret (*)(struct request *, blk_status_t)' {aka 'enum rq_end_io_ret (*)(struct request *, unsigned char)'} [-Wincompatible-pointer-types]
203 | req->end_io = scsi_bsg_uring_cmd_done;
| ^
vim +/io_uring_sqe_cmd +111 drivers/scsi/scsi_bsg.c
106
107 static int scsi_bsg_map_user_buffer(struct request *req,
108 struct io_uring_cmd *ioucmd,
109 unsigned int issue_flags, gfp_t gfp_mask)
110 {
> 111 const struct bsg_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
112 struct iov_iter iter;
113 bool is_write = cmd->dout_xfer_len > 0;
114 u64 buf_addr = is_write ? cmd->dout_xferp : cmd->din_xferp;
115 unsigned long buf_len = is_write ? cmd->dout_xfer_len : cmd->din_xfer_len;
116 int ret;
117
118 if (ioucmd->flags & IORING_URING_CMD_FIXED) {
119 ret = io_uring_cmd_import_fixed(buf_addr, buf_len,
120 is_write ? WRITE : READ,
121 &iter, ioucmd, issue_flags);
122 if (ret < 0)
123 return ret;
124 ret = blk_rq_map_user_iov(req->q, req, NULL, &iter, gfp_mask);
125 } else {
126 ret = blk_rq_map_user(req->q, req, NULL, uptr64(buf_addr),
127 buf_len, gfp_mask);
128 }
129
130 return ret;
131 }
132
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2026 Red Hat, Inc.