From: Shiju Jose <shiju.jose@huawei.com>
CXL spec 3.1 section 8.2.9.9.11.1 describes the device patrol scrub control
feature. The device patrol scrub proactively locates and makes corrections
to errors in regular cycle. The patrol scrub control allows the request to
configure patrol scrub input configurations.
The patrol scrub control allows the requester to specify the number of
hours for which the patrol scrub cycles must be completed, provided that
the requested number is not less than the minimum number of hours for the
patrol scrub cycle that the device is capable of. In addition, the patrol
scrub controls allow the host to disable and enable the feature in case
disabling of the feature is needed for other purposes such as
performance-aware operations which require the background operations to be
turned off.
Reviewed-by: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: Shiju Jose <shiju.jose@huawei.com>
---
hw/cxl/cxl-mailbox-utils.c | 97 +++++++++++++++++++++++++++++++++++++-
1 file changed, 96 insertions(+), 1 deletion(-)
diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c
index 1bbc9a48a6..5a6f4e4029 100644
--- a/hw/cxl/cxl-mailbox-utils.c
+++ b/hw/cxl/cxl-mailbox-utils.c
@@ -809,6 +809,7 @@ typedef struct CXLSupportedFeatureEntry {
} QEMU_PACKED CXLSupportedFeatureEntry;
enum CXL_SUPPORTED_FEATURES_LIST {
+ CXL_FEATURE_PATROL_SCRUB = 0,
CXL_FEATURE_MAX
};
@@ -849,6 +850,37 @@ enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER {
CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX
};
+/* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */
+static const QemuUUID patrol_scrub_uuid = {
+ .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33,
+ 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a)
+};
+
+#define CXL_MEMDEV_PS_GET_FEATURE_VERSION 0x01
+#define CXL_MEMDEV_PS_SET_FEATURE_VERSION 0x01
+#define CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT BIT(0)
+#define CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT BIT(1)
+#define CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT 12
+#define CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT 1
+#define CXL_MEMDEV_PS_ENABLE_DEFAULT 0
+
+/* CXL memdev patrol scrub control attributes */
+struct CXLMemPatrolScrubReadAttrbs {
+ uint8_t scrub_cycle_cap;
+ uint16_t scrub_cycle;
+ uint8_t scrub_flags;
+} QEMU_PACKED cxl_memdev_ps_feat_read_attrbs;
+
+typedef struct CXLMemPatrolScrubWriteAttrbs {
+ uint8_t scrub_cycle_hr;
+ uint8_t scrub_flags;
+} QEMU_PACKED CXLMemPatrolScrubWriteAttrbs;
+
+typedef struct CXLMemPatrolScrubSetFeature {
+ CXLSetFeatureInHeader hdr;
+ CXLMemPatrolScrubWriteAttrbs feat_data;
+} QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature;
+
/* CXL r3.0 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */
static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd,
uint8_t *payload_in,
@@ -872,7 +904,7 @@ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd,
uint16_t feat_entries = 0;
if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) ||
- get_feats_in->start_index > CXL_FEATURE_MAX) {
+ get_feats_in->start_index >= CXL_FEATURE_MAX) {
return CXL_MBOX_INVALID_INPUT;
}
req_entries = (get_feats_in->count -
@@ -884,6 +916,31 @@ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd,
entry = 0;
while (entry < req_entries) {
switch (index) {
+ case CXL_FEATURE_PATROL_SCRUB:
+ /* Fill supported feature entry for device patrol scrub control */
+ get_feats_out->feat_entries[entry] =
+ (struct CXLSupportedFeatureEntry) {
+ .uuid = patrol_scrub_uuid,
+ .feat_index = index,
+ .get_feat_size = sizeof(cxl_memdev_ps_feat_read_attrbs),
+ .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrbs),
+ /* Bit[0] : 1, feature attributes changeable */
+ .attrb_flags = 0x1,
+ .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION,
+ .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION,
+ .set_feat_effects = 0,
+ };
+ feat_entries++;
+ /* Set default value for device patrol scrub read attributes */
+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle_cap =
+ CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT |
+ CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT;
+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle =
+ CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT |
+ (CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT << 8);
+ cxl_memdev_ps_feat_read_attrbs.scrub_flags =
+ CXL_MEMDEV_PS_ENABLE_DEFAULT;
+ break;
default:
break;
}
@@ -924,6 +981,21 @@ static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd,
return CXL_MBOX_INVALID_INPUT;
}
+ if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) {
+ if (get_feature->offset >= sizeof(cxl_memdev_ps_feat_read_attrbs)) {
+ return CXL_MBOX_INVALID_INPUT;
+ }
+ bytes_to_copy = sizeof(cxl_memdev_ps_feat_read_attrbs) -
+ get_feature->offset;
+ bytes_to_copy = (bytes_to_copy > get_feature->count) ?
+ get_feature->count : bytes_to_copy;
+ memcpy(payload_out,
+ &cxl_memdev_ps_feat_read_attrbs + get_feature->offset,
+ bytes_to_copy);
+ } else {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+
*len_out = bytes_to_copy;
return CXL_MBOX_SUCCESS;
@@ -937,6 +1009,29 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd,
size_t *len_out,
CXLCCI *cci)
{
+ CXLMemPatrolScrubWriteAttrbs *ps_write_attrbs;
+ CXLMemPatrolScrubSetFeature *ps_set_feature;
+ CXLSetFeatureInHeader *hdr = (void *)payload_in;
+
+ if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
+ if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION ||
+ (hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK) !=
+ CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER) {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+
+ ps_set_feature = (void *)payload_in;
+ ps_write_attrbs = &ps_set_feature->feat_data;
+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle &= ~0xFF;
+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle |=
+ ps_write_attrbs->scrub_cycle_hr & 0xFF;
+ cxl_memdev_ps_feat_read_attrbs.scrub_flags &= ~0x1;
+ cxl_memdev_ps_feat_read_attrbs.scrub_flags |=
+ ps_write_attrbs->scrub_flags & 0x1;
+ } else {
+ return CXL_MBOX_UNSUPPORTED;
+ }
+
return CXL_MBOX_SUCCESS;
}
--
2.34.1
On Fri, 24 Nov 2023, shiju.jose@huawei.com wrote: >From: Shiju Jose <shiju.jose@huawei.com> > >CXL spec 3.1 section 8.2.9.9.11.1 describes the device patrol scrub control >feature. The device patrol scrub proactively locates and makes corrections >to errors in regular cycle. The patrol scrub control allows the request to >configure patrol scrub input configurations. > >The patrol scrub control allows the requester to specify the number of >hours for which the patrol scrub cycles must be completed, provided that >the requested number is not less than the minimum number of hours for the >patrol scrub cycle that the device is capable of. In addition, the patrol >scrub controls allow the host to disable and enable the feature in case >disabling of the feature is needed for other purposes such as >performance-aware operations which require the background operations to be >turned off. > >Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> >Signed-off-by: Shiju Jose <shiju.jose@huawei.com> >--- > hw/cxl/cxl-mailbox-utils.c | 97 +++++++++++++++++++++++++++++++++++++- > 1 file changed, 96 insertions(+), 1 deletion(-) > >diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c >index 1bbc9a48a6..5a6f4e4029 100644 >--- a/hw/cxl/cxl-mailbox-utils.c >+++ b/hw/cxl/cxl-mailbox-utils.c >@@ -809,6 +809,7 @@ typedef struct CXLSupportedFeatureEntry { > } QEMU_PACKED CXLSupportedFeatureEntry; > > enum CXL_SUPPORTED_FEATURES_LIST { >+ CXL_FEATURE_PATROL_SCRUB = 0, > CXL_FEATURE_MAX > }; > >@@ -849,6 +850,37 @@ enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { > CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX > }; > >+/* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */ >+static const QemuUUID patrol_scrub_uuid = { >+ .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, >+ 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) >+}; >+ >+#define CXL_MEMDEV_PS_GET_FEATURE_VERSION 0x01 >+#define CXL_MEMDEV_PS_SET_FEATURE_VERSION 0x01 >+#define CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT BIT(0) >+#define CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT BIT(1) >+#define CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT 12 >+#define CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT 1 >+#define CXL_MEMDEV_PS_ENABLE_DEFAULT 0 >+ >+/* CXL memdev patrol scrub control attributes */ >+struct CXLMemPatrolScrubReadAttrbs { >+ uint8_t scrub_cycle_cap; >+ uint16_t scrub_cycle; >+ uint8_t scrub_flags; >+} QEMU_PACKED cxl_memdev_ps_feat_read_attrbs; >+ >+typedef struct CXLMemPatrolScrubWriteAttrbs { >+ uint8_t scrub_cycle_hr; >+ uint8_t scrub_flags; >+} QEMU_PACKED CXLMemPatrolScrubWriteAttrbs; fyi there is an ask, which I certainly agree with, to make these static here instead of at runtime. https://lore.kernel.org/linux-cxl/20240119175006.00007f74@Huawei.com/ Also, this series probably needs rebasing per Jonathan's latest branch with lots of updates. >+ >+typedef struct CXLMemPatrolScrubSetFeature { >+ CXLSetFeatureInHeader hdr; >+ CXLMemPatrolScrubWriteAttrbs feat_data; >+} QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; >+ > /* CXL r3.0 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */ > static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, > uint8_t *payload_in, >@@ -872,7 +904,7 @@ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, > uint16_t feat_entries = 0; > > if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || >- get_feats_in->start_index > CXL_FEATURE_MAX) { >+ get_feats_in->start_index >= CXL_FEATURE_MAX) { > return CXL_MBOX_INVALID_INPUT; > } > req_entries = (get_feats_in->count - >@@ -884,6 +916,31 @@ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, > entry = 0; > while (entry < req_entries) { > switch (index) { >+ case CXL_FEATURE_PATROL_SCRUB: >+ /* Fill supported feature entry for device patrol scrub control */ >+ get_feats_out->feat_entries[entry] = >+ (struct CXLSupportedFeatureEntry) { >+ .uuid = patrol_scrub_uuid, >+ .feat_index = index, >+ .get_feat_size = sizeof(cxl_memdev_ps_feat_read_attrbs), >+ .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrbs), >+ /* Bit[0] : 1, feature attributes changeable */ >+ .attrb_flags = 0x1, >+ .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, >+ .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, >+ .set_feat_effects = 0, >+ }; >+ feat_entries++; >+ /* Set default value for device patrol scrub read attributes */ >+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle_cap = >+ CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT | >+ CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT; >+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle = >+ CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT | >+ (CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT << 8); >+ cxl_memdev_ps_feat_read_attrbs.scrub_flags = >+ CXL_MEMDEV_PS_ENABLE_DEFAULT; >+ break; > default: > break; > } >@@ -924,6 +981,21 @@ static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, > return CXL_MBOX_INVALID_INPUT; > } > >+ if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { >+ if (get_feature->offset >= sizeof(cxl_memdev_ps_feat_read_attrbs)) { >+ return CXL_MBOX_INVALID_INPUT; >+ } >+ bytes_to_copy = sizeof(cxl_memdev_ps_feat_read_attrbs) - >+ get_feature->offset; >+ bytes_to_copy = (bytes_to_copy > get_feature->count) ? >+ get_feature->count : bytes_to_copy; min()? >+ memcpy(payload_out, >+ &cxl_memdev_ps_feat_read_attrbs + get_feature->offset, >+ bytes_to_copy); >+ } else { >+ return CXL_MBOX_UNSUPPORTED; >+ } >+ > *len_out = bytes_to_copy; > > return CXL_MBOX_SUCCESS; >@@ -937,6 +1009,29 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, > size_t *len_out, > CXLCCI *cci) > { >+ CXLMemPatrolScrubWriteAttrbs *ps_write_attrbs; >+ CXLMemPatrolScrubSetFeature *ps_set_feature; >+ CXLSetFeatureInHeader *hdr = (void *)payload_in; >+ >+ if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { >+ if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION || >+ (hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK) != >+ CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER) { >+ return CXL_MBOX_UNSUPPORTED; >+ } >+ >+ ps_set_feature = (void *)payload_in; >+ ps_write_attrbs = &ps_set_feature->feat_data; >+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle &= ~0xFF; >+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle |= >+ ps_write_attrbs->scrub_cycle_hr & 0xFF; >+ cxl_memdev_ps_feat_read_attrbs.scrub_flags &= ~0x1; >+ cxl_memdev_ps_feat_read_attrbs.scrub_flags |= >+ ps_write_attrbs->scrub_flags & 0x1; >+ } else { >+ return CXL_MBOX_UNSUPPORTED; >+ } >+ > return CXL_MBOX_SUCCESS; > } > >-- >2.34.1 >
Hi Davidlohr, Thanks for the feedback. Please find reply inline. >-----Original Message----- >From: Davidlohr Bueso <dave@stgolabs.net> >Sent: 15 February 2024 20:56 >To: Shiju Jose <shiju.jose@huawei.com> >Cc: qemu-devel@nongnu.org; linux-cxl@vger.kernel.org; Jonathan Cameron ><jonathan.cameron@huawei.com>; tanxiaofei <tanxiaofei@huawei.com>; >Zengtao (B) <prime.zeng@hisilicon.com>; Linuxarm <linuxarm@huawei.com>; >fan.ni@samsung.com >Subject: Re: [PATCH v2 2/3] hw/cxl/cxl-mailbox-utils: Add device patrol scrub >control feature > >On Fri, 24 Nov 2023, shiju.jose@huawei.com wrote: > >>From: Shiju Jose <shiju.jose@huawei.com> >> >>CXL spec 3.1 section 8.2.9.9.11.1 describes the device patrol scrub >>control feature. The device patrol scrub proactively locates and makes >>corrections to errors in regular cycle. The patrol scrub control allows >>the request to configure patrol scrub input configurations. >> >>The patrol scrub control allows the requester to specify the number of >>hours for which the patrol scrub cycles must be completed, provided >>that the requested number is not less than the minimum number of hours >>for the patrol scrub cycle that the device is capable of. In addition, >>the patrol scrub controls allow the host to disable and enable the >>feature in case disabling of the feature is needed for other purposes >>such as performance-aware operations which require the background >>operations to be turned off. >> >>Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> >>Signed-off-by: Shiju Jose <shiju.jose@huawei.com> >>--- >> hw/cxl/cxl-mailbox-utils.c | 97 +++++++++++++++++++++++++++++++++++++- >> 1 file changed, 96 insertions(+), 1 deletion(-) >> >>diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c >>index 1bbc9a48a6..5a6f4e4029 100644 >>--- a/hw/cxl/cxl-mailbox-utils.c >>+++ b/hw/cxl/cxl-mailbox-utils.c >>@@ -809,6 +809,7 @@ typedef struct CXLSupportedFeatureEntry { } >>QEMU_PACKED CXLSupportedFeatureEntry; >> >> enum CXL_SUPPORTED_FEATURES_LIST { >>+ CXL_FEATURE_PATROL_SCRUB = 0, >> CXL_FEATURE_MAX >> }; >> >>@@ -849,6 +850,37 @@ enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { >> CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX >> }; >> >>+/* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature >>+*/ static const QemuUUID patrol_scrub_uuid = { >>+ .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, >>+ 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) }; >>+ >>+#define CXL_MEMDEV_PS_GET_FEATURE_VERSION 0x01 >>+#define CXL_MEMDEV_PS_SET_FEATURE_VERSION 0x01 >>+#define CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT BIT(0) >>+#define CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT >BIT(1) >>+#define CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT 12 >>+#define CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT 1 >>+#define CXL_MEMDEV_PS_ENABLE_DEFAULT 0 >>+ >>+/* CXL memdev patrol scrub control attributes */ struct >>+CXLMemPatrolScrubReadAttrbs { >>+ uint8_t scrub_cycle_cap; >>+ uint16_t scrub_cycle; >>+ uint8_t scrub_flags; >>+} QEMU_PACKED cxl_memdev_ps_feat_read_attrbs; >>+ >>+typedef struct CXLMemPatrolScrubWriteAttrbs { >>+ uint8_t scrub_cycle_hr; >>+ uint8_t scrub_flags; >>+} QEMU_PACKED CXLMemPatrolScrubWriteAttrbs; > >fyi there is an ask, which I certainly agree with, to make these static here >instead of at runtime. I will make cxl_memdev_ps_feat_read_attrbs static, however can't make const because cxl_memdev_ps_feat_read_attrbs use to store the attributes in the set_feature. May be rename cxl_memdev_ps_feat_read_attrbs to cxl_memdev_ps_feat_ attrbs to avoid confusion? > >https://lore.kernel.org/linux-cxl/20240119175006.00007f74@Huawei.com/ > >Also, this series probably needs rebasing per Jonathan's latest branch with lots of >updates. The v3 posted recently was rebased to Jonathan's recent branch https://gitlab.com/jic23/qemu/-/tree/cxl-2024-02-05-draft Looks like latest branch is cxl-2024-02-14. I will rebase. https://lore.kernel.org/qemu-devel/20240215110146.1444-1-shiju.jose@huawei.com/T/#t > >>+ >>+typedef struct CXLMemPatrolScrubSetFeature { >>+ CXLSetFeatureInHeader hdr; >>+ CXLMemPatrolScrubWriteAttrbs feat_data; } QEMU_PACKED >>+QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; >>+ >> /* CXL r3.0 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) >>*/ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, >> uint8_t *payload_in, @@ >>-872,7 +904,7 @@ static CXLRetCode cmd_features_get_supported(const >struct cxl_cmd *cmd, >> uint16_t feat_entries = 0; >> >> if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || >>- get_feats_in->start_index > CXL_FEATURE_MAX) { >>+ get_feats_in->start_index >= CXL_FEATURE_MAX) { >> return CXL_MBOX_INVALID_INPUT; >> } >> req_entries = (get_feats_in->count - @@ -884,6 +916,31 @@ static >>CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, >> entry = 0; >> while (entry < req_entries) { >> switch (index) { >>+ case CXL_FEATURE_PATROL_SCRUB: >>+ /* Fill supported feature entry for device patrol scrub control */ >>+ get_feats_out->feat_entries[entry] = >>+ (struct CXLSupportedFeatureEntry) { >>+ .uuid = patrol_scrub_uuid, >>+ .feat_index = index, >>+ .get_feat_size = sizeof(cxl_memdev_ps_feat_read_attrbs), >>+ .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrbs), >>+ /* Bit[0] : 1, feature attributes changeable */ >>+ .attrb_flags = 0x1, >>+ .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, >>+ .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, >>+ .set_feat_effects = 0, >>+ }; >>+ feat_entries++; >>+ /* Set default value for device patrol scrub read attributes */ >>+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle_cap = >>+ >CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT | >>+ >CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT; >>+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle = >>+ CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT | >>+ (CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT << 8); >>+ cxl_memdev_ps_feat_read_attrbs.scrub_flags = >>+ CXL_MEMDEV_PS_ENABLE_DEFAULT; >>+ break; >> default: >> break; >> } >>@@ -924,6 +981,21 @@ static CXLRetCode cmd_features_get_feature(const >struct cxl_cmd *cmd, >> return CXL_MBOX_INVALID_INPUT; >> } >> >>+ if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { >>+ if (get_feature->offset >= sizeof(cxl_memdev_ps_feat_read_attrbs)) { >>+ return CXL_MBOX_INVALID_INPUT; >>+ } >>+ bytes_to_copy = sizeof(cxl_memdev_ps_feat_read_attrbs) - >>+ get_feature->offset; >>+ bytes_to_copy = (bytes_to_copy > get_feature->count) ? >>+ get_feature->count : bytes_to_copy; > >min()? > >>+ memcpy(payload_out, >>+ &cxl_memdev_ps_feat_read_attrbs + get_feature->offset, >>+ bytes_to_copy); >>+ } else { >>+ return CXL_MBOX_UNSUPPORTED; >>+ } >>+ >> *len_out = bytes_to_copy; >> >> return CXL_MBOX_SUCCESS; >>@@ -937,6 +1009,29 @@ static CXLRetCode cmd_features_set_feature(const >struct cxl_cmd *cmd, >> size_t *len_out, >> CXLCCI *cci) { >>+ CXLMemPatrolScrubWriteAttrbs *ps_write_attrbs; >>+ CXLMemPatrolScrubSetFeature *ps_set_feature; >>+ CXLSetFeatureInHeader *hdr = (void *)payload_in; >>+ >>+ if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { >>+ if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION || >>+ (hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK) != >>+ CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER) { >>+ return CXL_MBOX_UNSUPPORTED; >>+ } >>+ >>+ ps_set_feature = (void *)payload_in; >>+ ps_write_attrbs = &ps_set_feature->feat_data; >>+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle &= ~0xFF; >>+ cxl_memdev_ps_feat_read_attrbs.scrub_cycle |= >>+ ps_write_attrbs->scrub_cycle_hr & 0xFF; >>+ cxl_memdev_ps_feat_read_attrbs.scrub_flags &= ~0x1; >>+ cxl_memdev_ps_feat_read_attrbs.scrub_flags |= >>+ ps_write_attrbs->scrub_flags & 0x1; >>+ } else { >>+ return CXL_MBOX_UNSUPPORTED; >>+ } >>+ >> return CXL_MBOX_SUCCESS; >> } >> >>-- >>2.34.1 >> Thanks, Shiju
On Fri, Nov 24, 2023 at 09:53:36PM +0800, shiju.jose@huawei.com wrote: > From: Shiju Jose <shiju.jose@huawei.com> > > CXL spec 3.1 section 8.2.9.9.11.1 describes the device patrol scrub control > feature. The device patrol scrub proactively locates and makes corrections > to errors in regular cycle. The patrol scrub control allows the request to > configure patrol scrub input configurations. > > The patrol scrub control allows the requester to specify the number of > hours for which the patrol scrub cycles must be completed, provided that > the requested number is not less than the minimum number of hours for the > patrol scrub cycle that the device is capable of. In addition, the patrol > scrub controls allow the host to disable and enable the feature in case > disabling of the feature is needed for other purposes such as > performance-aware operations which require the background operations to be > turned off. > > Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> > Signed-off-by: Shiju Jose <shiju.jose@huawei.com> > --- LGTM except for some minor comments inlined. > hw/cxl/cxl-mailbox-utils.c | 97 +++++++++++++++++++++++++++++++++++++- > 1 file changed, 96 insertions(+), 1 deletion(-) > > diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c > index 1bbc9a48a6..5a6f4e4029 100644 > --- a/hw/cxl/cxl-mailbox-utils.c > +++ b/hw/cxl/cxl-mailbox-utils.c > @@ -809,6 +809,7 @@ typedef struct CXLSupportedFeatureEntry { > } QEMU_PACKED CXLSupportedFeatureEntry; > > enum CXL_SUPPORTED_FEATURES_LIST { > + CXL_FEATURE_PATROL_SCRUB = 0, > CXL_FEATURE_MAX > }; > > @@ -849,6 +850,37 @@ enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { > CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX > }; > > +/* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */ > +static const QemuUUID patrol_scrub_uuid = { > + .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, > + 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) > +}; > + > +#define CXL_MEMDEV_PS_GET_FEATURE_VERSION 0x01 > +#define CXL_MEMDEV_PS_SET_FEATURE_VERSION 0x01 > +#define CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT BIT(0) > +#define CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT BIT(1) > +#define CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT 12 > +#define CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT 1 > +#define CXL_MEMDEV_PS_ENABLE_DEFAULT 0 > + > +/* CXL memdev patrol scrub control attributes */ > +struct CXLMemPatrolScrubReadAttrbs { > + uint8_t scrub_cycle_cap; > + uint16_t scrub_cycle; > + uint8_t scrub_flags; > +} QEMU_PACKED cxl_memdev_ps_feat_read_attrbs; > + > +typedef struct CXLMemPatrolScrubWriteAttrbs { > + uint8_t scrub_cycle_hr; > + uint8_t scrub_flags; > +} QEMU_PACKED CXLMemPatrolScrubWriteAttrbs; > + > +typedef struct CXLMemPatrolScrubSetFeature { > + CXLSetFeatureInHeader hdr; > + CXLMemPatrolScrubWriteAttrbs feat_data; > +} QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; > + > /* CXL r3.0 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */ > static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, > uint8_t *payload_in, > @@ -872,7 +904,7 @@ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, > uint16_t feat_entries = 0; > > if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || > - get_feats_in->start_index > CXL_FEATURE_MAX) { > + get_feats_in->start_index >= CXL_FEATURE_MAX) { Not totally sure about this, the spec says "...Greater than..." although I also think it should be >=. Similar things for the offset usage below. Fan > return CXL_MBOX_INVALID_INPUT; > } > req_entries = (get_feats_in->count - > @@ -884,6 +916,31 @@ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, > entry = 0; > while (entry < req_entries) { > switch (index) { > + case CXL_FEATURE_PATROL_SCRUB: > + /* Fill supported feature entry for device patrol scrub control */ > + get_feats_out->feat_entries[entry] = > + (struct CXLSupportedFeatureEntry) { > + .uuid = patrol_scrub_uuid, > + .feat_index = index, > + .get_feat_size = sizeof(cxl_memdev_ps_feat_read_attrbs), > + .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrbs), > + /* Bit[0] : 1, feature attributes changeable */ > + .attrb_flags = 0x1, > + .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, > + .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, > + .set_feat_effects = 0, > + }; > + feat_entries++; > + /* Set default value for device patrol scrub read attributes */ > + cxl_memdev_ps_feat_read_attrbs.scrub_cycle_cap = > + CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT | > + CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT; > + cxl_memdev_ps_feat_read_attrbs.scrub_cycle = > + CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT | > + (CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT << 8); > + cxl_memdev_ps_feat_read_attrbs.scrub_flags = > + CXL_MEMDEV_PS_ENABLE_DEFAULT; > + break; > default: > break; > } > @@ -924,6 +981,21 @@ static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, > return CXL_MBOX_INVALID_INPUT; > } > > + if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { > + if (get_feature->offset >= sizeof(cxl_memdev_ps_feat_read_attrbs)) { > + return CXL_MBOX_INVALID_INPUT; > + } > + bytes_to_copy = sizeof(cxl_memdev_ps_feat_read_attrbs) - > + get_feature->offset; > + bytes_to_copy = (bytes_to_copy > get_feature->count) ? > + get_feature->count : bytes_to_copy; > + memcpy(payload_out, > + &cxl_memdev_ps_feat_read_attrbs + get_feature->offset, > + bytes_to_copy); > + } else { > + return CXL_MBOX_UNSUPPORTED; > + } > + > *len_out = bytes_to_copy; > > return CXL_MBOX_SUCCESS; > @@ -937,6 +1009,29 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, > size_t *len_out, > CXLCCI *cci) > { > + CXLMemPatrolScrubWriteAttrbs *ps_write_attrbs; > + CXLMemPatrolScrubSetFeature *ps_set_feature; > + CXLSetFeatureInHeader *hdr = (void *)payload_in; > + > + if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { > + if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION || > + (hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK) != > + CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER) { > + return CXL_MBOX_UNSUPPORTED; > + } > + > + ps_set_feature = (void *)payload_in; > + ps_write_attrbs = &ps_set_feature->feat_data; > + cxl_memdev_ps_feat_read_attrbs.scrub_cycle &= ~0xFF; > + cxl_memdev_ps_feat_read_attrbs.scrub_cycle |= > + ps_write_attrbs->scrub_cycle_hr & 0xFF; > + cxl_memdev_ps_feat_read_attrbs.scrub_flags &= ~0x1; > + cxl_memdev_ps_feat_read_attrbs.scrub_flags |= > + ps_write_attrbs->scrub_flags & 0x1; > + } else { > + return CXL_MBOX_UNSUPPORTED; > + } > + > return CXL_MBOX_SUCCESS; > } > > -- > 2.34.1 >
Hi Fan, >-----Original Message----- >From: fan <nifan.cxl@gmail.com> >Sent: 15 February 2024 18:47 >To: Shiju Jose <shiju.jose@huawei.com> >Cc: qemu-devel@nongnu.org; linux-cxl@vger.kernel.org; Jonathan Cameron ><jonathan.cameron@huawei.com>; tanxiaofei <tanxiaofei@huawei.com>; >Zengtao (B) <prime.zeng@hisilicon.com>; Linuxarm <linuxarm@huawei.com>; >fan.ni@samsung.com >Subject: Re: [PATCH v2 2/3] hw/cxl/cxl-mailbox-utils: Add device patrol scrub >control feature > >On Fri, Nov 24, 2023 at 09:53:36PM +0800, shiju.jose@huawei.com wrote: >> From: Shiju Jose <shiju.jose@huawei.com> >> >> CXL spec 3.1 section 8.2.9.9.11.1 describes the device patrol scrub >> control feature. The device patrol scrub proactively locates and makes >> corrections to errors in regular cycle. The patrol scrub control >> allows the request to configure patrol scrub input configurations. >> >> The patrol scrub control allows the requester to specify the number of >> hours for which the patrol scrub cycles must be completed, provided >> that the requested number is not less than the minimum number of hours >> for the patrol scrub cycle that the device is capable of. In addition, >> the patrol scrub controls allow the host to disable and enable the >> feature in case disabling of the feature is needed for other purposes >> such as performance-aware operations which require the background >> operations to be turned off. >> >> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> >> Signed-off-by: Shiju Jose <shiju.jose@huawei.com> >> --- > >LGTM except for some minor comments inlined. > > >> hw/cxl/cxl-mailbox-utils.c | 97 >> +++++++++++++++++++++++++++++++++++++- >> 1 file changed, 96 insertions(+), 1 deletion(-) >> >> diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c >> index 1bbc9a48a6..5a6f4e4029 100644 >> --- a/hw/cxl/cxl-mailbox-utils.c >> +++ b/hw/cxl/cxl-mailbox-utils.c >> @@ -809,6 +809,7 @@ typedef struct CXLSupportedFeatureEntry { } >> QEMU_PACKED CXLSupportedFeatureEntry; >> >> enum CXL_SUPPORTED_FEATURES_LIST { >> + CXL_FEATURE_PATROL_SCRUB = 0, >> CXL_FEATURE_MAX >> }; >> >> @@ -849,6 +850,37 @@ enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { >> CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX >> }; >> >> +/* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature >> +*/ static const QemuUUID patrol_scrub_uuid = { >> + .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, >> + 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) }; >> + >> +#define CXL_MEMDEV_PS_GET_FEATURE_VERSION 0x01 >> +#define CXL_MEMDEV_PS_SET_FEATURE_VERSION 0x01 >> +#define CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT BIT(0) >> +#define CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT >BIT(1) >> +#define CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT 12 >> +#define CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT 1 >> +#define CXL_MEMDEV_PS_ENABLE_DEFAULT 0 >> + >> +/* CXL memdev patrol scrub control attributes */ struct >> +CXLMemPatrolScrubReadAttrbs { >> + uint8_t scrub_cycle_cap; >> + uint16_t scrub_cycle; >> + uint8_t scrub_flags; >> +} QEMU_PACKED cxl_memdev_ps_feat_read_attrbs; >> + >> +typedef struct CXLMemPatrolScrubWriteAttrbs { >> + uint8_t scrub_cycle_hr; >> + uint8_t scrub_flags; >> +} QEMU_PACKED CXLMemPatrolScrubWriteAttrbs; >> + >> +typedef struct CXLMemPatrolScrubSetFeature { >> + CXLSetFeatureInHeader hdr; >> + CXLMemPatrolScrubWriteAttrbs feat_data; } QEMU_PACKED >> +QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; >> + >> /* CXL r3.0 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) >> */ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd >*cmd, >> uint8_t *payload_in, @@ >> -872,7 +904,7 @@ static CXLRetCode cmd_features_get_supported(const >struct cxl_cmd *cmd, >> uint16_t feat_entries = 0; >> >> if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || >> - get_feats_in->start_index > CXL_FEATURE_MAX) { >> + get_feats_in->start_index >= CXL_FEATURE_MAX) { > >Not totally sure about this, the spec says "...Greater than..." although I also think >it should be >=. Similar things for the offset usage below. Spec r3.1 described in Table 8-95. Get Supported Features Input Payload as , "Starting Feature Index: Index of the first requested Supported Feature Entry. Feature index is a zero-based value." Thus I believe get_feats_in->start_index >= CXL_FEATURE_MAX is correct because the feature index is zero-based value. Regarding the offset usage mentioned, can you point which code? Is it get_feature->offset? > >Fan > >> return CXL_MBOX_INVALID_INPUT; >> } >> req_entries = (get_feats_in->count - @@ -884,6 +916,31 @@ static >> CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, >> entry = 0; >> while (entry < req_entries) { >> switch (index) { >> + case CXL_FEATURE_PATROL_SCRUB: >> + /* Fill supported feature entry for device patrol scrub control */ >> + get_feats_out->feat_entries[entry] = >> + (struct CXLSupportedFeatureEntry) { >> + .uuid = patrol_scrub_uuid, >> + .feat_index = index, >> + .get_feat_size = sizeof(cxl_memdev_ps_feat_read_attrbs), >> + .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrbs), >> + /* Bit[0] : 1, feature attributes changeable */ >> + .attrb_flags = 0x1, >> + .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, >> + .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, >> + .set_feat_effects = 0, >> + }; >> + feat_entries++; >> + /* Set default value for device patrol scrub read attributes */ >> + cxl_memdev_ps_feat_read_attrbs.scrub_cycle_cap = >> + >CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT | >> + >CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT; >> + cxl_memdev_ps_feat_read_attrbs.scrub_cycle = >> + CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT | >> + (CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT << 8); >> + cxl_memdev_ps_feat_read_attrbs.scrub_flags = >> + CXL_MEMDEV_PS_ENABLE_DEFAULT; >> + break; >> default: >> break; >> } >> @@ -924,6 +981,21 @@ static CXLRetCode cmd_features_get_feature(const >struct cxl_cmd *cmd, >> return CXL_MBOX_INVALID_INPUT; >> } >> >> + if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { >> + if (get_feature->offset >= sizeof(cxl_memdev_ps_feat_read_attrbs)) { >> + return CXL_MBOX_INVALID_INPUT; >> + } >> + bytes_to_copy = sizeof(cxl_memdev_ps_feat_read_attrbs) - >> + get_feature->offset; >> + bytes_to_copy = (bytes_to_copy > get_feature->count) ? >> + get_feature->count : bytes_to_copy; >> + memcpy(payload_out, >> + &cxl_memdev_ps_feat_read_attrbs + get_feature->offset, >> + bytes_to_copy); >> + } else { >> + return CXL_MBOX_UNSUPPORTED; >> + } >> + >> *len_out = bytes_to_copy; >> >> return CXL_MBOX_SUCCESS; >> @@ -937,6 +1009,29 @@ static CXLRetCode cmd_features_set_feature(const >struct cxl_cmd *cmd, >> size_t *len_out, >> CXLCCI *cci) { >> + CXLMemPatrolScrubWriteAttrbs *ps_write_attrbs; >> + CXLMemPatrolScrubSetFeature *ps_set_feature; >> + CXLSetFeatureInHeader *hdr = (void *)payload_in; >> + >> + if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { >> + if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION || >> + (hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK) != >> + CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER) { >> + return CXL_MBOX_UNSUPPORTED; >> + } >> + >> + ps_set_feature = (void *)payload_in; >> + ps_write_attrbs = &ps_set_feature->feat_data; >> + cxl_memdev_ps_feat_read_attrbs.scrub_cycle &= ~0xFF; >> + cxl_memdev_ps_feat_read_attrbs.scrub_cycle |= >> + ps_write_attrbs->scrub_cycle_hr & 0xFF; >> + cxl_memdev_ps_feat_read_attrbs.scrub_flags &= ~0x1; >> + cxl_memdev_ps_feat_read_attrbs.scrub_flags |= >> + ps_write_attrbs->scrub_flags & 0x1; >> + } else { >> + return CXL_MBOX_UNSUPPORTED; >> + } >> + >> return CXL_MBOX_SUCCESS; >> } >> >> -- >> 2.34.1 >> Thanks, Shiju
On Fri, Feb 16, 2024 at 10:16:12AM +0000, Shiju Jose wrote: > Hi Fan, > > >-----Original Message----- > >From: fan <nifan.cxl@gmail.com> > >Sent: 15 February 2024 18:47 > >To: Shiju Jose <shiju.jose@huawei.com> > >Cc: qemu-devel@nongnu.org; linux-cxl@vger.kernel.org; Jonathan Cameron > ><jonathan.cameron@huawei.com>; tanxiaofei <tanxiaofei@huawei.com>; > >Zengtao (B) <prime.zeng@hisilicon.com>; Linuxarm <linuxarm@huawei.com>; > >fan.ni@samsung.com > >Subject: Re: [PATCH v2 2/3] hw/cxl/cxl-mailbox-utils: Add device patrol scrub > >control feature > > > >On Fri, Nov 24, 2023 at 09:53:36PM +0800, shiju.jose@huawei.com wrote: > >> From: Shiju Jose <shiju.jose@huawei.com> > >> > >> CXL spec 3.1 section 8.2.9.9.11.1 describes the device patrol scrub > >> control feature. The device patrol scrub proactively locates and makes > >> corrections to errors in regular cycle. The patrol scrub control > >> allows the request to configure patrol scrub input configurations. > >> > >> The patrol scrub control allows the requester to specify the number of > >> hours for which the patrol scrub cycles must be completed, provided > >> that the requested number is not less than the minimum number of hours > >> for the patrol scrub cycle that the device is capable of. In addition, > >> the patrol scrub controls allow the host to disable and enable the > >> feature in case disabling of the feature is needed for other purposes > >> such as performance-aware operations which require the background > >> operations to be turned off. > >> > >> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> > >> Signed-off-by: Shiju Jose <shiju.jose@huawei.com> > >> --- > > > >LGTM except for some minor comments inlined. > > > > > >> hw/cxl/cxl-mailbox-utils.c | 97 > >> +++++++++++++++++++++++++++++++++++++- > >> 1 file changed, 96 insertions(+), 1 deletion(-) > >> > >> diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c > >> index 1bbc9a48a6..5a6f4e4029 100644 > >> --- a/hw/cxl/cxl-mailbox-utils.c > >> +++ b/hw/cxl/cxl-mailbox-utils.c > >> @@ -809,6 +809,7 @@ typedef struct CXLSupportedFeatureEntry { } > >> QEMU_PACKED CXLSupportedFeatureEntry; > >> > >> enum CXL_SUPPORTED_FEATURES_LIST { > >> + CXL_FEATURE_PATROL_SCRUB = 0, > >> CXL_FEATURE_MAX > >> }; > >> > >> @@ -849,6 +850,37 @@ enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { > >> CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX > >> }; > >> > >> +/* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature > >> +*/ static const QemuUUID patrol_scrub_uuid = { > >> + .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, > >> + 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) }; > >> + > >> +#define CXL_MEMDEV_PS_GET_FEATURE_VERSION 0x01 > >> +#define CXL_MEMDEV_PS_SET_FEATURE_VERSION 0x01 > >> +#define CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT BIT(0) > >> +#define CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT > >BIT(1) > >> +#define CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT 12 > >> +#define CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT 1 > >> +#define CXL_MEMDEV_PS_ENABLE_DEFAULT 0 > >> + > >> +/* CXL memdev patrol scrub control attributes */ struct > >> +CXLMemPatrolScrubReadAttrbs { > >> + uint8_t scrub_cycle_cap; > >> + uint16_t scrub_cycle; > >> + uint8_t scrub_flags; > >> +} QEMU_PACKED cxl_memdev_ps_feat_read_attrbs; > >> + > >> +typedef struct CXLMemPatrolScrubWriteAttrbs { > >> + uint8_t scrub_cycle_hr; > >> + uint8_t scrub_flags; > >> +} QEMU_PACKED CXLMemPatrolScrubWriteAttrbs; > >> + > >> +typedef struct CXLMemPatrolScrubSetFeature { > >> + CXLSetFeatureInHeader hdr; > >> + CXLMemPatrolScrubWriteAttrbs feat_data; } QEMU_PACKED > >> +QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; > >> + > >> /* CXL r3.0 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) > >> */ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd > >*cmd, > >> uint8_t *payload_in, @@ > >> -872,7 +904,7 @@ static CXLRetCode cmd_features_get_supported(const > >struct cxl_cmd *cmd, > >> uint16_t feat_entries = 0; > >> > >> if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || > >> - get_feats_in->start_index > CXL_FEATURE_MAX) { > >> + get_feats_in->start_index >= CXL_FEATURE_MAX) { > > > >Not totally sure about this, the spec says "...Greater than..." although I also think > >it should be >=. Similar things for the offset usage below. > > Spec r3.1 described in Table 8-95. Get Supported Features Input Payload as , > "Starting Feature Index: Index of the first requested Supported Feature Entry. > Feature index is a zero-based value." > Thus I believe get_feats_in->start_index >= CXL_FEATURE_MAX is correct because > the feature index is zero-based value. That is also my understanding. > > Regarding the offset usage mentioned, can you point which code? > Is it get_feature->offset? > Yea. Maybe not an issue as long as we keep all consistent.o Fan > > > >Fan > > > >> return CXL_MBOX_INVALID_INPUT; > >> } > >> req_entries = (get_feats_in->count - @@ -884,6 +916,31 @@ static > >> CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, > >> entry = 0; > >> while (entry < req_entries) { > >> switch (index) { > >> + case CXL_FEATURE_PATROL_SCRUB: > >> + /* Fill supported feature entry for device patrol scrub control */ > >> + get_feats_out->feat_entries[entry] = > >> + (struct CXLSupportedFeatureEntry) { > >> + .uuid = patrol_scrub_uuid, > >> + .feat_index = index, > >> + .get_feat_size = sizeof(cxl_memdev_ps_feat_read_attrbs), > >> + .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrbs), > >> + /* Bit[0] : 1, feature attributes changeable */ > >> + .attrb_flags = 0x1, > >> + .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, > >> + .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, > >> + .set_feat_effects = 0, > >> + }; > >> + feat_entries++; > >> + /* Set default value for device patrol scrub read attributes */ > >> + cxl_memdev_ps_feat_read_attrbs.scrub_cycle_cap = > >> + > >CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT | > >> + > >CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT; > >> + cxl_memdev_ps_feat_read_attrbs.scrub_cycle = > >> + CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT | > >> + (CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT << 8); > >> + cxl_memdev_ps_feat_read_attrbs.scrub_flags = > >> + CXL_MEMDEV_PS_ENABLE_DEFAULT; > >> + break; > >> default: > >> break; > >> } > >> @@ -924,6 +981,21 @@ static CXLRetCode cmd_features_get_feature(const > >struct cxl_cmd *cmd, > >> return CXL_MBOX_INVALID_INPUT; > >> } > >> > >> + if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { > >> + if (get_feature->offset >= sizeof(cxl_memdev_ps_feat_read_attrbs)) { > >> + return CXL_MBOX_INVALID_INPUT; > >> + } > >> + bytes_to_copy = sizeof(cxl_memdev_ps_feat_read_attrbs) - > >> + get_feature->offset; > >> + bytes_to_copy = (bytes_to_copy > get_feature->count) ? > >> + get_feature->count : bytes_to_copy; > >> + memcpy(payload_out, > >> + &cxl_memdev_ps_feat_read_attrbs + get_feature->offset, > >> + bytes_to_copy); > >> + } else { > >> + return CXL_MBOX_UNSUPPORTED; > >> + } > >> + > >> *len_out = bytes_to_copy; > >> > >> return CXL_MBOX_SUCCESS; > >> @@ -937,6 +1009,29 @@ static CXLRetCode cmd_features_set_feature(const > >struct cxl_cmd *cmd, > >> size_t *len_out, > >> CXLCCI *cci) { > >> + CXLMemPatrolScrubWriteAttrbs *ps_write_attrbs; > >> + CXLMemPatrolScrubSetFeature *ps_set_feature; > >> + CXLSetFeatureInHeader *hdr = (void *)payload_in; > >> + > >> + if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { > >> + if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION || > >> + (hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK) != > >> + CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER) { > >> + return CXL_MBOX_UNSUPPORTED; > >> + } > >> + > >> + ps_set_feature = (void *)payload_in; > >> + ps_write_attrbs = &ps_set_feature->feat_data; > >> + cxl_memdev_ps_feat_read_attrbs.scrub_cycle &= ~0xFF; > >> + cxl_memdev_ps_feat_read_attrbs.scrub_cycle |= > >> + ps_write_attrbs->scrub_cycle_hr & 0xFF; > >> + cxl_memdev_ps_feat_read_attrbs.scrub_flags &= ~0x1; > >> + cxl_memdev_ps_feat_read_attrbs.scrub_flags |= > >> + ps_write_attrbs->scrub_flags & 0x1; > >> + } else { > >> + return CXL_MBOX_UNSUPPORTED; > >> + } > >> + > >> return CXL_MBOX_SUCCESS; > >> } > >> > >> -- > >> 2.34.1 > >> > > Thanks, > Shiju
© 2016 - 2024 Red Hat, Inc.