A host that has more than one path connecting to an nvme subsystem
typically has an nvme controller associated with every path. This is
mostly applicable to nvmeof. If one path goes down, inflight IOs on that
path should not be retried immediately on another path because this
could lead to data corruption as described in TP4129. TP8028 defines
cross-controller reset mechanism that can be used by host to terminate
IOs on the failed path using one of the remaining healthy paths. Only
after IOs are terminated, or long enough time passes as defined by
TP4129, inflight IOs should be retried on another path. Implement core
cross-controller reset shared logic to be used by the transports.
Signed-off-by: Mohamed Khalfella <mkhalfella@purestorage.com>
---
drivers/nvme/host/constants.c | 1 +
drivers/nvme/host/core.c | 141 ++++++++++++++++++++++++++++++++++
drivers/nvme/host/nvme.h | 9 +++
3 files changed, 151 insertions(+)
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index dc90df9e13a2..f679efd5110e 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -46,6 +46,7 @@ static const char * const nvme_admin_ops[] = {
[nvme_admin_virtual_mgmt] = "Virtual Management",
[nvme_admin_nvme_mi_send] = "NVMe Send MI",
[nvme_admin_nvme_mi_recv] = "NVMe Receive MI",
+ [nvme_admin_cross_ctrl_reset] = "Cross Controller Reset",
[nvme_admin_dbbuf] = "Doorbell Buffer Config",
[nvme_admin_format_nvm] = "Format NVM",
[nvme_admin_security_send] = "Security Send",
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 231d402e9bfb..765b1524b3ed 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -554,6 +554,146 @@ void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
+static struct nvme_ctrl *nvme_find_ctrl_ccr(struct nvme_ctrl *ictrl,
+ u32 min_cntlid)
+{
+ struct nvme_subsystem *subsys = ictrl->subsys;
+ struct nvme_ctrl *ctrl, *sctrl = NULL;
+ unsigned long flags;
+
+ mutex_lock(&nvme_subsystems_lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->cntlid < min_cntlid)
+ continue;
+
+ if (atomic_dec_if_positive(&ctrl->ccr_limit) < 0)
+ continue;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (ctrl->state != NVME_CTRL_LIVE) {
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ atomic_inc(&ctrl->ccr_limit);
+ continue;
+ }
+
+ /*
+ * We got a good candidate source controller that is locked and
+ * LIVE. However, no guarantee ctrl will not be deleted after
+ * ctrl->lock is released. Get a ref of both ctrl and admin_q
+ * so they do not disappear until we are done with them.
+ */
+ WARN_ON_ONCE(!blk_get_queue(ctrl->admin_q));
+ nvme_get_ctrl(ctrl);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ sctrl = ctrl;
+ break;
+ }
+ mutex_unlock(&nvme_subsystems_lock);
+ return sctrl;
+}
+
+static void nvme_put_ctrl_ccr(struct nvme_ctrl *sctrl)
+{
+ atomic_inc(&sctrl->ccr_limit);
+ blk_put_queue(sctrl->admin_q);
+ nvme_put_ctrl(sctrl);
+}
+
+static int nvme_issue_wait_ccr(struct nvme_ctrl *sctrl, struct nvme_ctrl *ictrl)
+{
+ struct nvme_ccr_entry ccr = { };
+ union nvme_result res = { 0 };
+ struct nvme_command c = { };
+ unsigned long flags, tmo;
+ bool completed = false;
+ int ret = 0;
+ u32 result;
+
+ init_completion(&ccr.complete);
+ ccr.ictrl = ictrl;
+
+ spin_lock_irqsave(&sctrl->lock, flags);
+ list_add_tail(&ccr.list, &sctrl->ccr_list);
+ spin_unlock_irqrestore(&sctrl->lock, flags);
+
+ c.ccr.opcode = nvme_admin_cross_ctrl_reset;
+ c.ccr.ciu = ictrl->ciu;
+ c.ccr.icid = cpu_to_le16(ictrl->cntlid);
+ c.ccr.cirn = cpu_to_le64(ictrl->cirn);
+ ret = __nvme_submit_sync_cmd(sctrl->admin_q, &c, &res,
+ NULL, 0, NVME_QID_ANY, 0);
+ if (ret) {
+ ret = -EIO;
+ goto out;
+ }
+
+ result = le32_to_cpu(res.u32);
+ if (result & 0x01) /* Immediate Reset Successful */
+ goto out;
+
+ tmo = secs_to_jiffies(ictrl->kato);
+ if (!wait_for_completion_timeout(&ccr.complete, tmo)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ completed = true;
+
+out:
+ spin_lock_irqsave(&sctrl->lock, flags);
+ list_del(&ccr.list);
+ spin_unlock_irqrestore(&sctrl->lock, flags);
+ if (completed) {
+ if (ccr.ccrs == NVME_CCR_STATUS_SUCCESS)
+ return 0;
+ return -EREMOTEIO;
+ }
+ return ret;
+}
+
+unsigned long nvme_fence_ctrl(struct nvme_ctrl *ictrl)
+{
+ unsigned long deadline, now, timeout;
+ struct nvme_ctrl *sctrl;
+ u32 min_cntlid = 0;
+ int ret;
+
+ timeout = nvme_fence_timeout_ms(ictrl);
+ dev_info(ictrl->device, "attempting CCR, timeout %lums\n", timeout);
+
+ now = jiffies;
+ deadline = now + msecs_to_jiffies(timeout);
+ while (time_before(now, deadline)) {
+ sctrl = nvme_find_ctrl_ccr(ictrl, min_cntlid);
+ if (!sctrl) {
+ /* CCR failed, switch to time-based recovery */
+ return deadline - now;
+ }
+
+ ret = nvme_issue_wait_ccr(sctrl, ictrl);
+ if (!ret) {
+ dev_info(ictrl->device, "CCR succeeded using %s\n",
+ dev_name(sctrl->device));
+ nvme_put_ctrl_ccr(sctrl);
+ return 0;
+ }
+
+ min_cntlid = sctrl->cntlid + 1;
+ nvme_put_ctrl_ccr(sctrl);
+ now = jiffies;
+
+ if (ret == -EIO) /* CCR command failed */
+ continue;
+
+ /* CCR operation failed or timed out */
+ return time_before(now, deadline) ? deadline - now : 0;
+ }
+
+ dev_info(ictrl->device, "CCR reached timeout, call it done\n");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fence_ctrl);
+
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
@@ -5121,6 +5261,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
+ INIT_LIST_HEAD(&ctrl->ccr_list);
xa_init(&ctrl->cels);
ctrl->dev = dev;
ctrl->ops = ops;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b1c37eb3379e..f3ab9411cac5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -279,6 +279,13 @@ enum nvme_ctrl_flags {
NVME_CTRL_FROZEN = 6,
};
+struct nvme_ccr_entry {
+ struct list_head list;
+ struct completion complete;
+ struct nvme_ctrl *ictrl;
+ u8 ccrs;
+};
+
struct nvme_ctrl {
bool comp_seen;
bool identified;
@@ -296,6 +303,7 @@ struct nvme_ctrl {
struct blk_mq_tag_set *tagset;
struct blk_mq_tag_set *admin_tagset;
struct list_head namespaces;
+ struct list_head ccr_list;
struct mutex namespaces_lock;
struct srcu_struct srcu;
struct device ctrl_device;
@@ -813,6 +821,7 @@ blk_status_t nvme_host_path_error(struct request *req);
bool nvme_cancel_request(struct request *req, void *data);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
+unsigned long nvme_fence_ctrl(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
--
2.52.0
On Fri, Feb 13, 2026 at 8:28 PM Mohamed Khalfella
<mkhalfella@purestorage.com> wrote:
>
> A host that has more than one path connecting to an nvme subsystem
> typically has an nvme controller associated with every path. This is
> mostly applicable to nvmeof. If one path goes down, inflight IOs on that
> path should not be retried immediately on another path because this
> could lead to data corruption as described in TP4129. TP8028 defines
> cross-controller reset mechanism that can be used by host to terminate
> IOs on the failed path using one of the remaining healthy paths. Only
> after IOs are terminated, or long enough time passes as defined by
> TP4129, inflight IOs should be retried on another path. Implement core
> cross-controller reset shared logic to be used by the transports.
>
> Signed-off-by: Mohamed Khalfella <mkhalfella@purestorage.com>
> +static int nvme_issue_wait_ccr(struct nvme_ctrl *sctrl, struct nvme_ctrl *ictrl)
> + if (!wait_for_completion_timeout(&ccr.complete, tmo)) {
> + ret = -ETIMEDOUT;
> + goto out;
> + }
The more I look at this, the less I can ignore that this tmo should be
capped by deadline - now..
> +unsigned long nvme_fence_ctrl(struct nvme_ctrl *ictrl)
> + deadline = now + msecs_to_jiffies(timeout);
> + while (time_before(now, deadline)) {
...
> + ret = nvme_issue_wait_ccr(sctrl, ictrl);
...
> + }
Sincerely,
Randy Jennings
On Wed 2026-02-25 18:37:44 -0800, Randy Jennings wrote:
> On Fri, Feb 13, 2026 at 8:28 PM Mohamed Khalfella
> <mkhalfella@purestorage.com> wrote:
> >
> > A host that has more than one path connecting to an nvme subsystem
> > typically has an nvme controller associated with every path. This is
> > mostly applicable to nvmeof. If one path goes down, inflight IOs on that
> > path should not be retried immediately on another path because this
> > could lead to data corruption as described in TP4129. TP8028 defines
> > cross-controller reset mechanism that can be used by host to terminate
> > IOs on the failed path using one of the remaining healthy paths. Only
> > after IOs are terminated, or long enough time passes as defined by
> > TP4129, inflight IOs should be retried on another path. Implement core
> > cross-controller reset shared logic to be used by the transports.
> >
> > Signed-off-by: Mohamed Khalfella <mkhalfella@purestorage.com>
> > +static int nvme_issue_wait_ccr(struct nvme_ctrl *sctrl, struct nvme_ctrl *ictrl)
> > + if (!wait_for_completion_timeout(&ccr.complete, tmo)) {
> > + ret = -ETIMEDOUT;
> > + goto out;
> > + }
> The more I look at this, the less I can ignore that this tmo should be
> capped by deadline - now..
I updated nvme_issue_wait_ccr() to do that.
>
> > +unsigned long nvme_fence_ctrl(struct nvme_ctrl *ictrl)
> > + deadline = now + msecs_to_jiffies(timeout);
> > + while (time_before(now, deadline)) {
> ...
> > + ret = nvme_issue_wait_ccr(sctrl, ictrl);
> ...
> > + }
> Sincerely,
> Randy Jennings
On 2/14/26 05:25, Mohamed Khalfella wrote:
> A host that has more than one path connecting to an nvme subsystem
> typically has an nvme controller associated with every path. This is
> mostly applicable to nvmeof. If one path goes down, inflight IOs on that
> path should not be retried immediately on another path because this
> could lead to data corruption as described in TP4129. TP8028 defines
> cross-controller reset mechanism that can be used by host to terminate
> IOs on the failed path using one of the remaining healthy paths. Only
> after IOs are terminated, or long enough time passes as defined by
> TP4129, inflight IOs should be retried on another path. Implement core
> cross-controller reset shared logic to be used by the transports.
>
> Signed-off-by: Mohamed Khalfella <mkhalfella@purestorage.com>
> ---
> drivers/nvme/host/constants.c | 1 +
> drivers/nvme/host/core.c | 141 ++++++++++++++++++++++++++++++++++
> drivers/nvme/host/nvme.h | 9 +++
> 3 files changed, 151 insertions(+)
>
> diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
> index dc90df9e13a2..f679efd5110e 100644
> --- a/drivers/nvme/host/constants.c
> +++ b/drivers/nvme/host/constants.c
> @@ -46,6 +46,7 @@ static const char * const nvme_admin_ops[] = {
> [nvme_admin_virtual_mgmt] = "Virtual Management",
> [nvme_admin_nvme_mi_send] = "NVMe Send MI",
> [nvme_admin_nvme_mi_recv] = "NVMe Receive MI",
> + [nvme_admin_cross_ctrl_reset] = "Cross Controller Reset",
> [nvme_admin_dbbuf] = "Doorbell Buffer Config",
> [nvme_admin_format_nvm] = "Format NVM",
> [nvme_admin_security_send] = "Security Send",
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 231d402e9bfb..765b1524b3ed 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -554,6 +554,146 @@ void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
> }
> EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
>
> +static struct nvme_ctrl *nvme_find_ctrl_ccr(struct nvme_ctrl *ictrl,
> + u32 min_cntlid)
> +{
> + struct nvme_subsystem *subsys = ictrl->subsys;
> + struct nvme_ctrl *ctrl, *sctrl = NULL;
> + unsigned long flags;
> +
> + mutex_lock(&nvme_subsystems_lock);
> + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
> + if (ctrl->cntlid < min_cntlid)
> + continue;
> +
> + if (atomic_dec_if_positive(&ctrl->ccr_limit) < 0)
> + continue;
> +
> + spin_lock_irqsave(&ctrl->lock, flags);
> + if (ctrl->state != NVME_CTRL_LIVE) {
> + spin_unlock_irqrestore(&ctrl->lock, flags);
> + atomic_inc(&ctrl->ccr_limit);
> + continue;
> + }
> +
> + /*
> + * We got a good candidate source controller that is locked and
> + * LIVE. However, no guarantee ctrl will not be deleted after
> + * ctrl->lock is released. Get a ref of both ctrl and admin_q
> + * so they do not disappear until we are done with them.
> + */
> + WARN_ON_ONCE(!blk_get_queue(ctrl->admin_q));
> + nvme_get_ctrl(ctrl);
> + spin_unlock_irqrestore(&ctrl->lock, flags);
> + sctrl = ctrl;
> + break;
> + }
> + mutex_unlock(&nvme_subsystems_lock);
> + return sctrl;
> +}
> +
> +static void nvme_put_ctrl_ccr(struct nvme_ctrl *sctrl)
> +{
> + atomic_inc(&sctrl->ccr_limit);
> + blk_put_queue(sctrl->admin_q);
> + nvme_put_ctrl(sctrl);
> +}
> +
> +static int nvme_issue_wait_ccr(struct nvme_ctrl *sctrl, struct nvme_ctrl *ictrl)
> +{
> + struct nvme_ccr_entry ccr = { };
> + union nvme_result res = { 0 };
> + struct nvme_command c = { };
> + unsigned long flags, tmo;
> + bool completed = false;
> + int ret = 0;
> + u32 result;
> +
> + init_completion(&ccr.complete);
> + ccr.ictrl = ictrl;
> +
> + spin_lock_irqsave(&sctrl->lock, flags);
> + list_add_tail(&ccr.list, &sctrl->ccr_list);
> + spin_unlock_irqrestore(&sctrl->lock, flags);
> +
> + c.ccr.opcode = nvme_admin_cross_ctrl_reset;
> + c.ccr.ciu = ictrl->ciu;
> + c.ccr.icid = cpu_to_le16(ictrl->cntlid);
> + c.ccr.cirn = cpu_to_le64(ictrl->cirn);
> + ret = __nvme_submit_sync_cmd(sctrl->admin_q, &c, &res,
> + NULL, 0, NVME_QID_ANY, 0);
> + if (ret) {
> + ret = -EIO;
> + goto out;
> + }
> +
> + result = le32_to_cpu(res.u32);
> + if (result & 0x01) /* Immediate Reset Successful */
> + goto out;
> +
> + tmo = secs_to_jiffies(ictrl->kato);
> + if (!wait_for_completion_timeout(&ccr.complete, tmo)) {
> + ret = -ETIMEDOUT;
> + goto out;
> + }
> +
That will be tricky. The 'ccr' comand will be sent with the default
command queue timeout which is decoupled from KATO.
So you really should set the command timeout for the 'ccr' command
to ctrl->kato to ensure it'll be terminated correctly.
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
On Mon 2026-02-16 13:41:39 +0100, Hannes Reinecke wrote:
> On 2/14/26 05:25, Mohamed Khalfella wrote:
> > A host that has more than one path connecting to an nvme subsystem
> > typically has an nvme controller associated with every path. This is
> > mostly applicable to nvmeof. If one path goes down, inflight IOs on that
> > path should not be retried immediately on another path because this
> > could lead to data corruption as described in TP4129. TP8028 defines
> > cross-controller reset mechanism that can be used by host to terminate
> > IOs on the failed path using one of the remaining healthy paths. Only
> > after IOs are terminated, or long enough time passes as defined by
> > TP4129, inflight IOs should be retried on another path. Implement core
> > cross-controller reset shared logic to be used by the transports.
> >
> > Signed-off-by: Mohamed Khalfella <mkhalfella@purestorage.com>
> > ---
> > drivers/nvme/host/constants.c | 1 +
> > drivers/nvme/host/core.c | 141 ++++++++++++++++++++++++++++++++++
> > drivers/nvme/host/nvme.h | 9 +++
> > 3 files changed, 151 insertions(+)
> >
> > diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
> > index dc90df9e13a2..f679efd5110e 100644
> > --- a/drivers/nvme/host/constants.c
> > +++ b/drivers/nvme/host/constants.c
> > @@ -46,6 +46,7 @@ static const char * const nvme_admin_ops[] = {
> > [nvme_admin_virtual_mgmt] = "Virtual Management",
> > [nvme_admin_nvme_mi_send] = "NVMe Send MI",
> > [nvme_admin_nvme_mi_recv] = "NVMe Receive MI",
> > + [nvme_admin_cross_ctrl_reset] = "Cross Controller Reset",
> > [nvme_admin_dbbuf] = "Doorbell Buffer Config",
> > [nvme_admin_format_nvm] = "Format NVM",
> > [nvme_admin_security_send] = "Security Send",
> > diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> > index 231d402e9bfb..765b1524b3ed 100644
> > --- a/drivers/nvme/host/core.c
> > +++ b/drivers/nvme/host/core.c
> > @@ -554,6 +554,146 @@ void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
> > }
> > EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
> >
> > +static struct nvme_ctrl *nvme_find_ctrl_ccr(struct nvme_ctrl *ictrl,
> > + u32 min_cntlid)
> > +{
> > + struct nvme_subsystem *subsys = ictrl->subsys;
> > + struct nvme_ctrl *ctrl, *sctrl = NULL;
> > + unsigned long flags;
> > +
> > + mutex_lock(&nvme_subsystems_lock);
> > + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
> > + if (ctrl->cntlid < min_cntlid)
> > + continue;
> > +
> > + if (atomic_dec_if_positive(&ctrl->ccr_limit) < 0)
> > + continue;
> > +
> > + spin_lock_irqsave(&ctrl->lock, flags);
> > + if (ctrl->state != NVME_CTRL_LIVE) {
> > + spin_unlock_irqrestore(&ctrl->lock, flags);
> > + atomic_inc(&ctrl->ccr_limit);
> > + continue;
> > + }
> > +
> > + /*
> > + * We got a good candidate source controller that is locked and
> > + * LIVE. However, no guarantee ctrl will not be deleted after
> > + * ctrl->lock is released. Get a ref of both ctrl and admin_q
> > + * so they do not disappear until we are done with them.
> > + */
> > + WARN_ON_ONCE(!blk_get_queue(ctrl->admin_q));
> > + nvme_get_ctrl(ctrl);
> > + spin_unlock_irqrestore(&ctrl->lock, flags);
> > + sctrl = ctrl;
> > + break;
> > + }
> > + mutex_unlock(&nvme_subsystems_lock);
> > + return sctrl;
> > +}
> > +
> > +static void nvme_put_ctrl_ccr(struct nvme_ctrl *sctrl)
> > +{
> > + atomic_inc(&sctrl->ccr_limit);
> > + blk_put_queue(sctrl->admin_q);
> > + nvme_put_ctrl(sctrl);
> > +}
> > +
> > +static int nvme_issue_wait_ccr(struct nvme_ctrl *sctrl, struct nvme_ctrl *ictrl)
> > +{
> > + struct nvme_ccr_entry ccr = { };
> > + union nvme_result res = { 0 };
> > + struct nvme_command c = { };
> > + unsigned long flags, tmo;
> > + bool completed = false;
> > + int ret = 0;
> > + u32 result;
> > +
> > + init_completion(&ccr.complete);
> > + ccr.ictrl = ictrl;
> > +
> > + spin_lock_irqsave(&sctrl->lock, flags);
> > + list_add_tail(&ccr.list, &sctrl->ccr_list);
> > + spin_unlock_irqrestore(&sctrl->lock, flags);
> > +
> > + c.ccr.opcode = nvme_admin_cross_ctrl_reset;
> > + c.ccr.ciu = ictrl->ciu;
> > + c.ccr.icid = cpu_to_le16(ictrl->cntlid);
> > + c.ccr.cirn = cpu_to_le64(ictrl->cirn);
> > + ret = __nvme_submit_sync_cmd(sctrl->admin_q, &c, &res,
> > + NULL, 0, NVME_QID_ANY, 0);
> > + if (ret) {
> > + ret = -EIO;
> > + goto out;
> > + }
> > +
> > + result = le32_to_cpu(res.u32);
> > + if (result & 0x01) /* Immediate Reset Successful */
> > + goto out;
> > +
> > + tmo = secs_to_jiffies(ictrl->kato);
> > + if (!wait_for_completion_timeout(&ccr.complete, tmo)) {
> > + ret = -ETIMEDOUT;
> > + goto out;
> > + }
> > +
> That will be tricky. The 'ccr' comand will be sent with the default
> command queue timeout which is decoupled from KATO.
> So you really should set the command timeout for the 'ccr' command
> to ctrl->kato to ensure it'll be terminated correctly.
>
Agreed. The timeout for CCR request should be ctr->kato just like what
we do for keep alive request. The easiest way IMO to do is is to extend
__nvme_submit_sync_cmd() to take request timeout. I do not want to make
this change in this patchset.
Is it okay I make this change after this patchset gets merged?
© 2016 - 2026 Red Hat, Inc.