block/rbd.c | 202 ++++++++++++++++++++++++++++++++++++++++--- qapi/block-core.json | 5 +- 2 files changed, 192 insertions(+), 15 deletions(-)
This patch adds the support of preallocation (off/full) for the RBD
block driver.
If rbd_writesame() is available and supports zeroed buffers, we use
it to quickly fill the image when full preallocation is required.
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
---
v3:
- rebased on master
- filled with zeroed buffer [Max]
- used rbd_writesame() only when we can disable the discard of zeroed
buffers
- added 'since: 4.2' in qapi/block-core.json [Max]
- used buffer as large as the "stripe unit"
---
block/rbd.c | 202 ++++++++++++++++++++++++++++++++++++++++---
qapi/block-core.json | 5 +-
2 files changed, 192 insertions(+), 15 deletions(-)
diff --git a/block/rbd.c b/block/rbd.c
index 59757b3120..d923a5a26c 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -64,6 +64,7 @@
#define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER)
#define RBD_MAX_SNAPS 100
+#define RBD_DEFAULT_CONCURRENT_OPS 10
/* The LIBRBD_SUPPORTS_IOVEC is defined in librbd.h */
#ifdef LIBRBD_SUPPORTS_IOVEC
@@ -104,6 +105,7 @@ typedef struct BDRVRBDState {
char *image_name;
char *snap;
uint64_t image_size;
+ bool ws_zero_supported; /* rbd_writesame() supports zeroed buffers */
} BDRVRBDState;
static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx,
@@ -333,6 +335,155 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs)
}
}
+static int qemu_rbd_get_max_concurrent_ops(rados_t cluster)
+{
+ char buf[16];
+ int ret, max_concurrent_ops;
+
+ ret = rados_conf_get(cluster, "rbd_concurrent_management_ops", buf,
+ sizeof(buf));
+ if (ret < 0) {
+ return RBD_DEFAULT_CONCURRENT_OPS;
+ }
+
+ ret = qemu_strtoi(buf, NULL, 10, &max_concurrent_ops);
+ if (ret < 0) {
+ return RBD_DEFAULT_CONCURRENT_OPS;
+ }
+
+ return max_concurrent_ops;
+}
+
+static int qemu_rbd_do_truncate(rados_t cluster, rbd_image_t image,
+ int64_t offset, PreallocMode prealloc,
+ bool ws_zero_supported, Error **errp)
+{
+ uint64_t current_length;
+ char *buf = NULL;
+ int ret;
+
+ ret = rbd_get_size(image, ¤t_length);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get file length");
+ goto out;
+ }
+
+ if (current_length > offset && prealloc != PREALLOC_MODE_OFF) {
+ error_setg(errp, "Cannot use preallocation for shrinking files");
+ ret = -ENOTSUP;
+ goto out;
+ }
+
+ switch (prealloc) {
+ case PREALLOC_MODE_FULL: {
+ uint64_t buf_size, current_offset = current_length;
+ ssize_t bytes;
+
+ ret = rbd_get_stripe_unit(image, &buf_size);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to get stripe unit");
+ goto out;
+ }
+
+ ret = rbd_resize(image, offset);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to resize file");
+ goto out;
+ }
+
+ buf = g_malloc0(buf_size);
+
+#ifdef LIBRBD_SUPPORTS_WRITESAME
+ if (ws_zero_supported) {
+ uint64_t writesame_max_size;
+ int max_concurrent_ops;
+
+ max_concurrent_ops = qemu_rbd_get_max_concurrent_ops(cluster);
+ /*
+ * We limit the rbd_writesame() size to avoid to spawn more then
+ * 'rbd_concurrent_management_ops' concurrent operations.
+ */
+ writesame_max_size = MIN(buf_size * max_concurrent_ops, INT_MAX);
+
+ while (offset - current_offset > buf_size) {
+ bytes = MIN(offset - current_offset, writesame_max_size);
+ /*
+ * rbd_writesame() supports only request where the size of the
+ * operation is multiple of buffer size.
+ */
+ bytes -= bytes % buf_size;
+
+ bytes = rbd_writesame(image, current_offset, bytes, buf,
+ buf_size, 0);
+ if (bytes < 0) {
+ ret = bytes;
+ error_setg_errno(errp, -ret,
+ "Failed to write for preallocation");
+ goto out;
+ }
+
+ current_offset += bytes;
+ }
+ }
+#endif /* LIBRBD_SUPPORTS_WRITESAME */
+
+ while (current_offset < offset) {
+ bytes = rbd_write(image, current_offset,
+ MIN(offset - current_offset, buf_size), buf);
+ if (bytes < 0) {
+ ret = bytes;
+ error_setg_errno(errp, -ret,
+ "Failed to write for preallocation");
+ goto out;
+ }
+
+ current_offset += bytes;
+ }
+
+ ret = rbd_flush(image);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to flush the file");
+ goto out;
+ }
+
+ break;
+ }
+ case PREALLOC_MODE_OFF:
+ ret = rbd_resize(image, offset);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed to resize file");
+ goto out;
+ }
+ break;
+ default:
+ error_setg(errp, "Unsupported preallocation mode: %s",
+ PreallocMode_str(prealloc));
+ ret = -ENOTSUP;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ g_free(buf);
+ return ret;
+}
+
+static bool qemu_rbd_writesame_zero_supported(rados_t *cluster)
+{
+ int ret = 1;
+
+#ifdef LIBRBD_SUPPORTS_WRITESAME
+ /*
+ * When "rbd_discard_on_zeroed_write_same" is not available, rbd_writesame()
+ * can discard requests with zeroed buffer.
+ */
+ ret = rados_conf_set(*cluster, "rbd_discard_on_zeroed_write_same", "false");
+#endif
+
+ return ret == 0;
+}
+
static QemuOptsList runtime_opts = {
.name = "rbd",
.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
@@ -378,6 +529,7 @@ static int qemu_rbd_do_create(BlockdevCreateOptions *options,
BlockdevCreateOptionsRbd *opts = &options->u.rbd;
rados_t cluster;
rados_ioctx_t io_ctx;
+ rbd_image_t image;
int obj_order = 0;
int ret;
@@ -406,13 +558,23 @@ static int qemu_rbd_do_create(BlockdevCreateOptions *options,
return ret;
}
- ret = rbd_create(io_ctx, opts->location->image, opts->size, &obj_order);
+ ret = rbd_create(io_ctx, opts->location->image, 0, &obj_order);
if (ret < 0) {
error_setg_errno(errp, -ret, "error rbd create");
goto out;
}
- ret = 0;
+ ret = rbd_open(io_ctx, opts->location->image, &image, NULL);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "error rbd open");
+ goto out;
+ }
+
+ ret = qemu_rbd_do_truncate(cluster, image, opts->size, opts->preallocation,
+ qemu_rbd_writesame_zero_supported(&cluster),
+ errp);
+
+ rbd_close(image);
out:
rados_ioctx_destroy(io_ctx);
rados_shutdown(cluster);
@@ -433,6 +595,7 @@ static int coroutine_fn qemu_rbd_co_create_opts(const char *filename,
BlockdevOptionsRbd *loc;
Error *local_err = NULL;
const char *keypairs, *password_secret;
+ char *prealloc;
QDict *options = NULL;
int ret = 0;
@@ -451,6 +614,16 @@ static int coroutine_fn qemu_rbd_co_create_opts(const char *filename,
BLOCK_OPT_CLUSTER_SIZE, 0);
rbd_opts->has_cluster_size = (rbd_opts->cluster_size != 0);
+ prealloc = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
+ rbd_opts->preallocation = qapi_enum_parse(&PreallocMode_lookup, prealloc,
+ PREALLOC_MODE_OFF, &local_err);
+ g_free(prealloc);
+ if (local_err) {
+ ret = -EINVAL;
+ error_propagate(errp, local_err);
+ goto exit;
+ }
+
options = qdict_new();
qemu_rbd_parse_filename(filename, options, &local_err);
if (local_err) {
@@ -770,6 +943,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags,
s->snap = g_strdup(opts->snapshot);
s->image_name = g_strdup(opts->image);
+ s->ws_zero_supported = qemu_rbd_writesame_zero_supported(&s->cluster);
/* rbd_open is always r/w */
r = rbd_open(s->io_ctx, s->image_name, &s->image, s->snap);
@@ -1089,21 +1263,16 @@ static int coroutine_fn qemu_rbd_co_truncate(BlockDriverState *bs,
PreallocMode prealloc,
Error **errp)
{
- int r;
-
- if (prealloc != PREALLOC_MODE_OFF) {
- error_setg(errp, "Unsupported preallocation mode '%s'",
- PreallocMode_str(prealloc));
- return -ENOTSUP;
- }
+ BDRVRBDState *s = bs->opaque;
+ int ret;
- r = qemu_rbd_resize(bs, offset);
- if (r < 0) {
- error_setg_errno(errp, -r, "Failed to resize file");
- return r;
+ ret = qemu_rbd_do_truncate(s->cluster, s->image, offset, prealloc,
+ s->ws_zero_supported, errp);
+ if (ret == 0) {
+ s->image_size = offset;
}
- return 0;
+ return ret;
}
static int qemu_rbd_snap_create(BlockDriverState *bs,
@@ -1256,6 +1425,11 @@ static QemuOptsList qemu_rbd_create_opts = {
.type = QEMU_OPT_SIZE,
.help = "RBD object size"
},
+ {
+ .name = BLOCK_OPT_PREALLOC,
+ .type = QEMU_OPT_STRING,
+ .help = "Preallocation mode (allowed values: off, full)"
+ },
{
.name = "password-secret",
.type = QEMU_OPT_STRING,
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 0d43d4f37c..ff55171f8d 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -4346,13 +4346,16 @@
# point to a snapshot.
# @size Size of the virtual disk in bytes
# @cluster-size RBD object size
+# @preallocation Preallocation mode for the new image (since: 4.2)
+# (default: off; allowed values: off, full)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsRbd',
'data': { 'location': 'BlockdevOptionsRbd',
'size': 'size',
- '*cluster-size' : 'size' } }
+ '*cluster-size' : 'size',
+ '*preallocation': 'PreallocMode' } }
##
# @BlockdevVmdkSubformat:
--
2.20.1
On Tue, 2019-07-23 at 09:13 +0200, Stefano Garzarella wrote: > This patch adds the support of preallocation (off/full) for the RBD > block driver. > If rbd_writesame() is available and supports zeroed buffers, we use > it to quickly fill the image when full preallocation is required. > > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> > --- > v3: > - rebased on master > - filled with zeroed buffer [Max] > - used rbd_writesame() only when we can disable the discard of zeroed > buffers > - added 'since: 4.2' in qapi/block-core.json [Max] > - used buffer as large as the "stripe unit" > --- > block/rbd.c | 202 ++++++++++++++++++++++++++++++++++++++++--- > qapi/block-core.json | 5 +- > 2 files changed, 192 insertions(+), 15 deletions(-) > > diff --git a/block/rbd.c b/block/rbd.c > index 59757b3120..d923a5a26c 100644 > --- a/block/rbd.c > +++ b/block/rbd.c > @@ -64,6 +64,7 @@ > #define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER) > > #define RBD_MAX_SNAPS 100 > +#define RBD_DEFAULT_CONCURRENT_OPS 10 > > /* The LIBRBD_SUPPORTS_IOVEC is defined in librbd.h */ > #ifdef LIBRBD_SUPPORTS_IOVEC > @@ -104,6 +105,7 @@ typedef struct BDRVRBDState { > char *image_name; > char *snap; > uint64_t image_size; > + bool ws_zero_supported; /* rbd_writesame() supports zeroed buffers */ > } BDRVRBDState; > > static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, > @@ -333,6 +335,155 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs) > } > } > > +static int qemu_rbd_get_max_concurrent_ops(rados_t cluster) > +{ > + char buf[16]; > + int ret, max_concurrent_ops; > + > + ret = rados_conf_get(cluster, "rbd_concurrent_management_ops", buf, > + sizeof(buf)); > + if (ret < 0) { > + return RBD_DEFAULT_CONCURRENT_OPS; > + } > + > + ret = qemu_strtoi(buf, NULL, 10, &max_concurrent_ops); > + if (ret < 0) { > + return RBD_DEFAULT_CONCURRENT_OPS; > + } > + > + return max_concurrent_ops; > +} > + > +static int qemu_rbd_do_truncate(rados_t cluster, rbd_image_t image, > + int64_t offset, PreallocMode prealloc, > + bool ws_zero_supported, Error **errp) > +{ > + uint64_t current_length; > + char *buf = NULL; > + int ret; > + > + ret = rbd_get_size(image, ¤t_length); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "Failed to get file length"); > + goto out; > + } > + > + if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { > + error_setg(errp, "Cannot use preallocation for shrinking files"); > + ret = -ENOTSUP; > + goto out; > + } > + > + switch (prealloc) { > + case PREALLOC_MODE_FULL: { > + uint64_t buf_size, current_offset = current_length; > + ssize_t bytes; > + > + ret = rbd_get_stripe_unit(image, &buf_size); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "Failed to get stripe unit"); > + goto out; > + } > + > + ret = rbd_resize(image, offset); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "Failed to resize file"); > + goto out; > + } > + > + buf = g_malloc0(buf_size); > + > +#ifdef LIBRBD_SUPPORTS_WRITESAME > + if (ws_zero_supported) { > + uint64_t writesame_max_size; > + int max_concurrent_ops; > + > + max_concurrent_ops = qemu_rbd_get_max_concurrent_ops(cluster); > + /* > + * We limit the rbd_writesame() size to avoid to spawn more then > + * 'rbd_concurrent_management_ops' concurrent operations. > + */ > + writesame_max_size = MIN(buf_size * max_concurrent_ops, INT_MAX); > + > + while (offset - current_offset > buf_size) { > + bytes = MIN(offset - current_offset, writesame_max_size); > + /* > + * rbd_writesame() supports only request where the size of the > + * operation is multiple of buffer size. > + */ > + bytes -= bytes % buf_size; > + > + bytes = rbd_writesame(image, current_offset, bytes, buf, > + buf_size, 0); > + if (bytes < 0) { > + ret = bytes; > + error_setg_errno(errp, -ret, > + "Failed to write for preallocation"); > + goto out; > + } > + > + current_offset += bytes; > + } > + } > +#endif /* LIBRBD_SUPPORTS_WRITESAME */ > + > + while (current_offset < offset) { > + bytes = rbd_write(image, current_offset, > + MIN(offset - current_offset, buf_size), buf); > + if (bytes < 0) { > + ret = bytes; > + error_setg_errno(errp, -ret, > + "Failed to write for preallocation"); > + goto out; > + } > + > + current_offset += bytes; > + } > + > + ret = rbd_flush(image); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "Failed to flush the file"); > + goto out; > + } > + > + break; > + } > + case PREALLOC_MODE_OFF: > + ret = rbd_resize(image, offset); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "Failed to resize file"); > + goto out; > + } > + break; > + default: > + error_setg(errp, "Unsupported preallocation mode: %s", > + PreallocMode_str(prealloc)); > + ret = -ENOTSUP; > + goto out; > + } > + > + ret = 0; > + > +out: > + g_free(buf); > + return ret; > +} > + > +static bool qemu_rbd_writesame_zero_supported(rados_t *cluster) > +{ > + int ret = 1; > + > +#ifdef LIBRBD_SUPPORTS_WRITESAME > + /* > + * When "rbd_discard_on_zeroed_write_same" is not available, rbd_writesame() > + * can discard requests with zeroed buffer. > + */ > + ret = rados_conf_set(*cluster, "rbd_discard_on_zeroed_write_same", "false"); > +#endif > + > + return ret == 0; > +} > + > static QemuOptsList runtime_opts = { > .name = "rbd", > .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), > @@ -378,6 +529,7 @@ static int qemu_rbd_do_create(BlockdevCreateOptions *options, > BlockdevCreateOptionsRbd *opts = &options->u.rbd; > rados_t cluster; > rados_ioctx_t io_ctx; > + rbd_image_t image; > int obj_order = 0; > int ret; > > @@ -406,13 +558,23 @@ static int qemu_rbd_do_create(BlockdevCreateOptions *options, > return ret; > } > > - ret = rbd_create(io_ctx, opts->location->image, opts->size, &obj_order); > + ret = rbd_create(io_ctx, opts->location->image, 0, &obj_order); > if (ret < 0) { > error_setg_errno(errp, -ret, "error rbd create"); > goto out; > } > > - ret = 0; > + ret = rbd_open(io_ctx, opts->location->image, &image, NULL); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "error rbd open"); > + goto out; > + } > + > + ret = qemu_rbd_do_truncate(cluster, image, opts->size, opts->preallocation, > + qemu_rbd_writesame_zero_supported(&cluster), > + errp); > + > + rbd_close(image); > out: > rados_ioctx_destroy(io_ctx); > rados_shutdown(cluster); > @@ -433,6 +595,7 @@ static int coroutine_fn qemu_rbd_co_create_opts(const char *filename, > BlockdevOptionsRbd *loc; > Error *local_err = NULL; > const char *keypairs, *password_secret; > + char *prealloc; > QDict *options = NULL; > int ret = 0; > > @@ -451,6 +614,16 @@ static int coroutine_fn qemu_rbd_co_create_opts(const char *filename, > BLOCK_OPT_CLUSTER_SIZE, 0); > rbd_opts->has_cluster_size = (rbd_opts->cluster_size != 0); > > + prealloc = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); > + rbd_opts->preallocation = qapi_enum_parse(&PreallocMode_lookup, prealloc, > + PREALLOC_MODE_OFF, &local_err); > + g_free(prealloc); > + if (local_err) { > + ret = -EINVAL; > + error_propagate(errp, local_err); > + goto exit; > + } > + > options = qdict_new(); > qemu_rbd_parse_filename(filename, options, &local_err); > if (local_err) { > @@ -770,6 +943,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, > > s->snap = g_strdup(opts->snapshot); > s->image_name = g_strdup(opts->image); > + s->ws_zero_supported = qemu_rbd_writesame_zero_supported(&s->cluster); > > /* rbd_open is always r/w */ > r = rbd_open(s->io_ctx, s->image_name, &s->image, s->snap); > @@ -1089,21 +1263,16 @@ static int coroutine_fn qemu_rbd_co_truncate(BlockDriverState *bs, > PreallocMode prealloc, > Error **errp) > { > - int r; > - > - if (prealloc != PREALLOC_MODE_OFF) { > - error_setg(errp, "Unsupported preallocation mode '%s'", > - PreallocMode_str(prealloc)); > - return -ENOTSUP; > - } > + BDRVRBDState *s = bs->opaque; > + int ret; > > - r = qemu_rbd_resize(bs, offset); > - if (r < 0) { > - error_setg_errno(errp, -r, "Failed to resize file"); > - return r; > + ret = qemu_rbd_do_truncate(s->cluster, s->image, offset, prealloc, > + s->ws_zero_supported, errp); > + if (ret == 0) { > + s->image_size = offset; > } > > - return 0; > + return ret; > } > > static int qemu_rbd_snap_create(BlockDriverState *bs, > @@ -1256,6 +1425,11 @@ static QemuOptsList qemu_rbd_create_opts = { > .type = QEMU_OPT_SIZE, > .help = "RBD object size" > }, > + { > + .name = BLOCK_OPT_PREALLOC, > + .type = QEMU_OPT_STRING, > + .help = "Preallocation mode (allowed values: off, full)" > + }, > { > .name = "password-secret", > .type = QEMU_OPT_STRING, > diff --git a/qapi/block-core.json b/qapi/block-core.json > index 0d43d4f37c..ff55171f8d 100644 > --- a/qapi/block-core.json > +++ b/qapi/block-core.json > @@ -4346,13 +4346,16 @@ > # point to a snapshot. > # @size Size of the virtual disk in bytes > # @cluster-size RBD object size > +# @preallocation Preallocation mode for the new image (since: 4.2) > +# (default: off; allowed values: off, full) > # > # Since: 2.12 > ## > { 'struct': 'BlockdevCreateOptionsRbd', > 'data': { 'location': 'BlockdevOptionsRbd', > 'size': 'size', > - '*cluster-size' : 'size' } } > + '*cluster-size' : 'size', > + '*preallocation': 'PreallocMode' } } > > ## > # @BlockdevVmdkSubformat: I think I don't see anything obviously wrong, but note that I don't know ceph yet, thus I might have missed something. So: Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Best regards, Maxim Levitsky
On Tue, Jul 23, 2019 at 3:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > This patch adds the support of preallocation (off/full) for the RBD > block driver. > If rbd_writesame() is available and supports zeroed buffers, we use > it to quickly fill the image when full preallocation is required. > > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> > --- > v3: > - rebased on master > - filled with zeroed buffer [Max] > - used rbd_writesame() only when we can disable the discard of zeroed > buffers > - added 'since: 4.2' in qapi/block-core.json [Max] > - used buffer as large as the "stripe unit" > --- > block/rbd.c | 202 ++++++++++++++++++++++++++++++++++++++++--- > qapi/block-core.json | 5 +- > 2 files changed, 192 insertions(+), 15 deletions(-) > > diff --git a/block/rbd.c b/block/rbd.c > index 59757b3120..d923a5a26c 100644 > --- a/block/rbd.c > +++ b/block/rbd.c > @@ -64,6 +64,7 @@ > #define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER) > > #define RBD_MAX_SNAPS 100 > +#define RBD_DEFAULT_CONCURRENT_OPS 10 > > /* The LIBRBD_SUPPORTS_IOVEC is defined in librbd.h */ > #ifdef LIBRBD_SUPPORTS_IOVEC > @@ -104,6 +105,7 @@ typedef struct BDRVRBDState { > char *image_name; > char *snap; > uint64_t image_size; > + bool ws_zero_supported; /* rbd_writesame() supports zeroed buffers */ > } BDRVRBDState; > > static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, > @@ -333,6 +335,155 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs) > } > } > > +static int qemu_rbd_get_max_concurrent_ops(rados_t cluster) > +{ > + char buf[16]; > + int ret, max_concurrent_ops; > + > + ret = rados_conf_get(cluster, "rbd_concurrent_management_ops", buf, > + sizeof(buf)); > + if (ret < 0) { > + return RBD_DEFAULT_CONCURRENT_OPS; > + } > + > + ret = qemu_strtoi(buf, NULL, 10, &max_concurrent_ops); > + if (ret < 0) { > + return RBD_DEFAULT_CONCURRENT_OPS; > + } > + > + return max_concurrent_ops; > +} > + > +static int qemu_rbd_do_truncate(rados_t cluster, rbd_image_t image, > + int64_t offset, PreallocMode prealloc, > + bool ws_zero_supported, Error **errp) > +{ > + uint64_t current_length; > + char *buf = NULL; > + int ret; > + > + ret = rbd_get_size(image, ¤t_length); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "Failed to get file length"); > + goto out; > + } > + > + if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { > + error_setg(errp, "Cannot use preallocation for shrinking files"); > + ret = -ENOTSUP; > + goto out; > + } > + > + switch (prealloc) { > + case PREALLOC_MODE_FULL: { > + uint64_t buf_size, current_offset = current_length; > + ssize_t bytes; > + > + ret = rbd_get_stripe_unit(image, &buf_size); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "Failed to get stripe unit"); > + goto out; > + } > + > + ret = rbd_resize(image, offset); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "Failed to resize file"); > + goto out; > + } > + > + buf = g_malloc0(buf_size); > + > +#ifdef LIBRBD_SUPPORTS_WRITESAME > + if (ws_zero_supported) { > + uint64_t writesame_max_size; > + int max_concurrent_ops; > + > + max_concurrent_ops = qemu_rbd_get_max_concurrent_ops(cluster); > + /* > + * We limit the rbd_writesame() size to avoid to spawn more then > + * 'rbd_concurrent_management_ops' concurrent operations. > + */ > + writesame_max_size = MIN(buf_size * max_concurrent_ops, INT_MAX); In the most efficient world, the 'buf_size' would be some small, fixed power of 2 value (like 512 bytes) since there isn't much need to send extra zeroes. You would then want to writesame the full stripe period (if possible), where a stripe period is the data block object size (defaults to 4MiB and is availble via 'rbd_stat') * the stripe count. In this case, the stripe count becomes the number of in-flight IOs. Therefore, you could substitute its value w/ the max_concurrent_ops to ensure you are issuing exactly max_concurrent_ops IOs per rbd_writesame call. > + > + while (offset - current_offset > buf_size) { > + bytes = MIN(offset - current_offset, writesame_max_size); > + /* > + * rbd_writesame() supports only request where the size of the > + * operation is multiple of buffer size. > + */ > + bytes -= bytes % buf_size; > + > + bytes = rbd_writesame(image, current_offset, bytes, buf, > + buf_size, 0); If the RBD in-memory cache is enabled during this operation, the writesame will effectively just be turned into a write. Therefore, when pre-allocating, you will want to disable the cache. > + if (bytes < 0) { > + ret = bytes; > + error_setg_errno(errp, -ret, > + "Failed to write for preallocation"); > + goto out; > + } > + > + current_offset += bytes; > + } > + } > +#endif /* LIBRBD_SUPPORTS_WRITESAME */ > + > + while (current_offset < offset) { > + bytes = rbd_write(image, current_offset, > + MIN(offset - current_offset, buf_size), buf); > + if (bytes < 0) { > + ret = bytes; > + error_setg_errno(errp, -ret, > + "Failed to write for preallocation"); > + goto out; > + } > + > + current_offset += bytes; > + } > + > + ret = rbd_flush(image); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "Failed to flush the file"); > + goto out; > + } > + > + break; > + } > + case PREALLOC_MODE_OFF: > + ret = rbd_resize(image, offset); I'm not familiar enough w/ the QEMU block code, but why would the PREALLOC_MODE_FULL case not need to resize the image? > + if (ret < 0) { > + error_setg_errno(errp, -ret, "Failed to resize file"); > + goto out; > + } > + break; > + default: > + error_setg(errp, "Unsupported preallocation mode: %s", > + PreallocMode_str(prealloc)); > + ret = -ENOTSUP; > + goto out; > + } > + > + ret = 0; > + > +out: > + g_free(buf); > + return ret; > +} > + > +static bool qemu_rbd_writesame_zero_supported(rados_t *cluster) > +{ > + int ret = 1; > + > +#ifdef LIBRBD_SUPPORTS_WRITESAME > + /* > + * When "rbd_discard_on_zeroed_write_same" is not available, rbd_writesame() > + * can discard requests with zeroed buffer. > + */ > + ret = rados_conf_set(*cluster, "rbd_discard_on_zeroed_write_same", "false"); > +#endif > + > + return ret == 0; > +} > + > static QemuOptsList runtime_opts = { > .name = "rbd", > .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), > @@ -378,6 +529,7 @@ static int qemu_rbd_do_create(BlockdevCreateOptions *options, > BlockdevCreateOptionsRbd *opts = &options->u.rbd; > rados_t cluster; > rados_ioctx_t io_ctx; > + rbd_image_t image; > int obj_order = 0; > int ret; > > @@ -406,13 +558,23 @@ static int qemu_rbd_do_create(BlockdevCreateOptions *options, > return ret; > } > > - ret = rbd_create(io_ctx, opts->location->image, opts->size, &obj_order); > + ret = rbd_create(io_ctx, opts->location->image, 0, &obj_order); > if (ret < 0) { > error_setg_errno(errp, -ret, "error rbd create"); > goto out; > } > > - ret = 0; > + ret = rbd_open(io_ctx, opts->location->image, &image, NULL); > + if (ret < 0) { > + error_setg_errno(errp, -ret, "error rbd open"); > + goto out; > + } > + > + ret = qemu_rbd_do_truncate(cluster, image, opts->size, opts->preallocation, > + qemu_rbd_writesame_zero_supported(&cluster), > + errp); > + > + rbd_close(image); > out: > rados_ioctx_destroy(io_ctx); > rados_shutdown(cluster); > @@ -433,6 +595,7 @@ static int coroutine_fn qemu_rbd_co_create_opts(const char *filename, > BlockdevOptionsRbd *loc; > Error *local_err = NULL; > const char *keypairs, *password_secret; > + char *prealloc; > QDict *options = NULL; > int ret = 0; > > @@ -451,6 +614,16 @@ static int coroutine_fn qemu_rbd_co_create_opts(const char *filename, > BLOCK_OPT_CLUSTER_SIZE, 0); > rbd_opts->has_cluster_size = (rbd_opts->cluster_size != 0); > > + prealloc = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); > + rbd_opts->preallocation = qapi_enum_parse(&PreallocMode_lookup, prealloc, > + PREALLOC_MODE_OFF, &local_err); > + g_free(prealloc); > + if (local_err) { > + ret = -EINVAL; > + error_propagate(errp, local_err); > + goto exit; > + } > + > options = qdict_new(); > qemu_rbd_parse_filename(filename, options, &local_err); > if (local_err) { > @@ -770,6 +943,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, > > s->snap = g_strdup(opts->snapshot); > s->image_name = g_strdup(opts->image); > + s->ws_zero_supported = qemu_rbd_writesame_zero_supported(&s->cluster); > > /* rbd_open is always r/w */ > r = rbd_open(s->io_ctx, s->image_name, &s->image, s->snap); > @@ -1089,21 +1263,16 @@ static int coroutine_fn qemu_rbd_co_truncate(BlockDriverState *bs, > PreallocMode prealloc, > Error **errp) > { > - int r; > - > - if (prealloc != PREALLOC_MODE_OFF) { > - error_setg(errp, "Unsupported preallocation mode '%s'", > - PreallocMode_str(prealloc)); > - return -ENOTSUP; > - } > + BDRVRBDState *s = bs->opaque; > + int ret; > > - r = qemu_rbd_resize(bs, offset); > - if (r < 0) { > - error_setg_errno(errp, -r, "Failed to resize file"); > - return r; > + ret = qemu_rbd_do_truncate(s->cluster, s->image, offset, prealloc, > + s->ws_zero_supported, errp); > + if (ret == 0) { > + s->image_size = offset; > } > > - return 0; > + return ret; > } > > static int qemu_rbd_snap_create(BlockDriverState *bs, > @@ -1256,6 +1425,11 @@ static QemuOptsList qemu_rbd_create_opts = { > .type = QEMU_OPT_SIZE, > .help = "RBD object size" > }, > + { > + .name = BLOCK_OPT_PREALLOC, > + .type = QEMU_OPT_STRING, > + .help = "Preallocation mode (allowed values: off, full)" > + }, > { > .name = "password-secret", > .type = QEMU_OPT_STRING, > diff --git a/qapi/block-core.json b/qapi/block-core.json > index 0d43d4f37c..ff55171f8d 100644 > --- a/qapi/block-core.json > +++ b/qapi/block-core.json > @@ -4346,13 +4346,16 @@ > # point to a snapshot. > # @size Size of the virtual disk in bytes > # @cluster-size RBD object size > +# @preallocation Preallocation mode for the new image (since: 4.2) > +# (default: off; allowed values: off, full) > # > # Since: 2.12 > ## > { 'struct': 'BlockdevCreateOptionsRbd', > 'data': { 'location': 'BlockdevOptionsRbd', > 'size': 'size', > - '*cluster-size' : 'size' } } > + '*cluster-size' : 'size', > + '*preallocation': 'PreallocMode' } } > > ## > # @BlockdevVmdkSubformat: > -- > 2.20.1 > -- Jason
On Wed, Jul 24, 2019 at 01:48:42PM -0400, Jason Dillaman wrote: > On Tue, Jul 23, 2019 at 3:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > This patch adds the support of preallocation (off/full) for the RBD > > block driver. > > If rbd_writesame() is available and supports zeroed buffers, we use > > it to quickly fill the image when full preallocation is required. > > > > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> > > --- > > v3: > > - rebased on master > > - filled with zeroed buffer [Max] > > - used rbd_writesame() only when we can disable the discard of zeroed > > buffers > > - added 'since: 4.2' in qapi/block-core.json [Max] > > - used buffer as large as the "stripe unit" > > --- > > block/rbd.c | 202 ++++++++++++++++++++++++++++++++++++++++--- > > qapi/block-core.json | 5 +- > > 2 files changed, 192 insertions(+), 15 deletions(-) > > > > diff --git a/block/rbd.c b/block/rbd.c > > index 59757b3120..d923a5a26c 100644 > > --- a/block/rbd.c > > +++ b/block/rbd.c > > @@ -64,6 +64,7 @@ > > #define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER) > > > > #define RBD_MAX_SNAPS 100 > > +#define RBD_DEFAULT_CONCURRENT_OPS 10 > > > > /* The LIBRBD_SUPPORTS_IOVEC is defined in librbd.h */ > > #ifdef LIBRBD_SUPPORTS_IOVEC > > @@ -104,6 +105,7 @@ typedef struct BDRVRBDState { > > char *image_name; > > char *snap; > > uint64_t image_size; > > + bool ws_zero_supported; /* rbd_writesame() supports zeroed buffers */ > > } BDRVRBDState; > > > > static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, > > @@ -333,6 +335,155 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs) > > } > > } > > > > +static int qemu_rbd_get_max_concurrent_ops(rados_t cluster) > > +{ > > + char buf[16]; > > + int ret, max_concurrent_ops; > > + > > + ret = rados_conf_get(cluster, "rbd_concurrent_management_ops", buf, > > + sizeof(buf)); > > + if (ret < 0) { > > + return RBD_DEFAULT_CONCURRENT_OPS; > > + } > > + > > + ret = qemu_strtoi(buf, NULL, 10, &max_concurrent_ops); > > + if (ret < 0) { > > + return RBD_DEFAULT_CONCURRENT_OPS; > > + } > > + > > + return max_concurrent_ops; > > +} > > + > > +static int qemu_rbd_do_truncate(rados_t cluster, rbd_image_t image, > > + int64_t offset, PreallocMode prealloc, > > + bool ws_zero_supported, Error **errp) > > +{ > > + uint64_t current_length; > > + char *buf = NULL; > > + int ret; > > + > > + ret = rbd_get_size(image, ¤t_length); > > + if (ret < 0) { > > + error_setg_errno(errp, -ret, "Failed to get file length"); > > + goto out; > > + } > > + > > + if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { > > + error_setg(errp, "Cannot use preallocation for shrinking files"); > > + ret = -ENOTSUP; > > + goto out; > > + } > > + > > + switch (prealloc) { > > + case PREALLOC_MODE_FULL: { > > + uint64_t buf_size, current_offset = current_length; > > + ssize_t bytes; > > + > > + ret = rbd_get_stripe_unit(image, &buf_size); > > + if (ret < 0) { > > + error_setg_errno(errp, -ret, "Failed to get stripe unit"); > > + goto out; > > + } > > + > > + ret = rbd_resize(image, offset); > > + if (ret < 0) { > > + error_setg_errno(errp, -ret, "Failed to resize file"); > > + goto out; > > + } > > + > > + buf = g_malloc0(buf_size); > > + > > +#ifdef LIBRBD_SUPPORTS_WRITESAME > > + if (ws_zero_supported) { > > + uint64_t writesame_max_size; > > + int max_concurrent_ops; > > + > > + max_concurrent_ops = qemu_rbd_get_max_concurrent_ops(cluster); > > + /* > > + * We limit the rbd_writesame() size to avoid to spawn more then > > + * 'rbd_concurrent_management_ops' concurrent operations. > > + */ > > + writesame_max_size = MIN(buf_size * max_concurrent_ops, INT_MAX); > > In the most efficient world, the 'buf_size' would be some small, fixed > power of 2 value (like 512 bytes) since there isn't much need to send > extra zeroes. You would then want to writesame the full stripe period > (if possible), where a stripe period is the data block object size > (defaults to 4MiB and is availble via 'rbd_stat') * the stripe count. > In this case, the stripe count becomes the number of in-flight IOs. > Therefore, you could substitute its value w/ the max_concurrent_ops to > ensure you are issuing exactly max_concurrent_ops IOs per > rbd_writesame call. > Initially, I had a fixed buffer size to 4 KiB, but I noted that, when we didn't use writesame, the rbd_write() was very slow, so I used the stripe unit as a buffer size. Do you think is better to have a small buffer (512 byte) when we use writesame or a 'stripe unit' buffer when we can't use it? > > + > > + while (offset - current_offset > buf_size) { > > + bytes = MIN(offset - current_offset, writesame_max_size); > > + /* > > + * rbd_writesame() supports only request where the size of the > > + * operation is multiple of buffer size. > > + */ > > + bytes -= bytes % buf_size; > > + > > + bytes = rbd_writesame(image, current_offset, bytes, buf, > > + buf_size, 0); > > If the RBD in-memory cache is enabled during this operation, the > writesame will effectively just be turned into a write. Therefore, > when pre-allocating, you will want to disable the cache. > During the creation, when preallocation is often used, we disable the cache: static int qemu_rbd_do_create(BlockdevCreateOptions *options, const char *keypairs, const char *password_secret, Error **errp) { ... ret = qemu_rbd_connect(&cluster, &io_ctx, opts->location, false, keypairs, ^^ cache param ... } Do you think I should disable it in any case during the preallocation? > > + if (bytes < 0) { > > + ret = bytes; > > + error_setg_errno(errp, -ret, > > + "Failed to write for preallocation"); > > + goto out; > > + } > > + > > + current_offset += bytes; > > + } > > + } > > +#endif /* LIBRBD_SUPPORTS_WRITESAME */ > > + > > + while (current_offset < offset) { > > + bytes = rbd_write(image, current_offset, > > + MIN(offset - current_offset, buf_size), buf); > > + if (bytes < 0) { > > + ret = bytes; > > + error_setg_errno(errp, -ret, > > + "Failed to write for preallocation"); > > + goto out; > > + } > > + > > + current_offset += bytes; > > + } > > + > > + ret = rbd_flush(image); > > + if (ret < 0) { > > + error_setg_errno(errp, -ret, "Failed to flush the file"); > > + goto out; > > + } > > + > > + break; > > + } > > + case PREALLOC_MODE_OFF: > > + ret = rbd_resize(image, offset); > > I'm not familiar enough w/ the QEMU block code, but why would the > PREALLOC_MODE_FULL case not need to resize the image? PREALLOC_MODE_FULL need too, I did it just before the g_malloc0() in this patch :-) Thanks, Stefano
On Thu, Jul 25, 2019 at 4:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > On Wed, Jul 24, 2019 at 01:48:42PM -0400, Jason Dillaman wrote: > > On Tue, Jul 23, 2019 at 3:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > > > This patch adds the support of preallocation (off/full) for the RBD > > > block driver. > > > If rbd_writesame() is available and supports zeroed buffers, we use > > > it to quickly fill the image when full preallocation is required. > > > > > > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> > > > --- > > > v3: > > > - rebased on master > > > - filled with zeroed buffer [Max] > > > - used rbd_writesame() only when we can disable the discard of zeroed > > > buffers > > > - added 'since: 4.2' in qapi/block-core.json [Max] > > > - used buffer as large as the "stripe unit" > > > --- > > > block/rbd.c | 202 ++++++++++++++++++++++++++++++++++++++++--- > > > qapi/block-core.json | 5 +- > > > 2 files changed, 192 insertions(+), 15 deletions(-) > > > > > > diff --git a/block/rbd.c b/block/rbd.c > > > index 59757b3120..d923a5a26c 100644 > > > --- a/block/rbd.c > > > +++ b/block/rbd.c > > > @@ -64,6 +64,7 @@ > > > #define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER) > > > > > > #define RBD_MAX_SNAPS 100 > > > +#define RBD_DEFAULT_CONCURRENT_OPS 10 > > > > > > /* The LIBRBD_SUPPORTS_IOVEC is defined in librbd.h */ > > > #ifdef LIBRBD_SUPPORTS_IOVEC > > > @@ -104,6 +105,7 @@ typedef struct BDRVRBDState { > > > char *image_name; > > > char *snap; > > > uint64_t image_size; > > > + bool ws_zero_supported; /* rbd_writesame() supports zeroed buffers */ > > > } BDRVRBDState; > > > > > > static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, > > > @@ -333,6 +335,155 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs) > > > } > > > } > > > > > > +static int qemu_rbd_get_max_concurrent_ops(rados_t cluster) > > > +{ > > > + char buf[16]; > > > + int ret, max_concurrent_ops; > > > + > > > + ret = rados_conf_get(cluster, "rbd_concurrent_management_ops", buf, > > > + sizeof(buf)); > > > + if (ret < 0) { > > > + return RBD_DEFAULT_CONCURRENT_OPS; > > > + } > > > + > > > + ret = qemu_strtoi(buf, NULL, 10, &max_concurrent_ops); > > > + if (ret < 0) { > > > + return RBD_DEFAULT_CONCURRENT_OPS; > > > + } > > > + > > > + return max_concurrent_ops; > > > +} > > > + > > > +static int qemu_rbd_do_truncate(rados_t cluster, rbd_image_t image, > > > + int64_t offset, PreallocMode prealloc, > > > + bool ws_zero_supported, Error **errp) > > > +{ > > > + uint64_t current_length; > > > + char *buf = NULL; > > > + int ret; > > > + > > > + ret = rbd_get_size(image, ¤t_length); > > > + if (ret < 0) { > > > + error_setg_errno(errp, -ret, "Failed to get file length"); > > > + goto out; > > > + } > > > + > > > + if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { > > > + error_setg(errp, "Cannot use preallocation for shrinking files"); > > > + ret = -ENOTSUP; > > > + goto out; > > > + } > > > + > > > + switch (prealloc) { > > > + case PREALLOC_MODE_FULL: { > > > + uint64_t buf_size, current_offset = current_length; > > > + ssize_t bytes; > > > + > > > + ret = rbd_get_stripe_unit(image, &buf_size); > > > + if (ret < 0) { > > > + error_setg_errno(errp, -ret, "Failed to get stripe unit"); > > > + goto out; > > > + } > > > + > > > + ret = rbd_resize(image, offset); > > > + if (ret < 0) { > > > + error_setg_errno(errp, -ret, "Failed to resize file"); > > > + goto out; > > > + } > > > + > > > + buf = g_malloc0(buf_size); > > > + > > > +#ifdef LIBRBD_SUPPORTS_WRITESAME > > > + if (ws_zero_supported) { > > > + uint64_t writesame_max_size; > > > + int max_concurrent_ops; > > > + > > > + max_concurrent_ops = qemu_rbd_get_max_concurrent_ops(cluster); > > > + /* > > > + * We limit the rbd_writesame() size to avoid to spawn more then > > > + * 'rbd_concurrent_management_ops' concurrent operations. > > > + */ > > > + writesame_max_size = MIN(buf_size * max_concurrent_ops, INT_MAX); > > > > In the most efficient world, the 'buf_size' would be some small, fixed > > power of 2 value (like 512 bytes) since there isn't much need to send > > extra zeroes. You would then want to writesame the full stripe period > > (if possible), where a stripe period is the data block object size > > (defaults to 4MiB and is availble via 'rbd_stat') * the stripe count. > > In this case, the stripe count becomes the number of in-flight IOs. > > Therefore, you could substitute its value w/ the max_concurrent_ops to > > ensure you are issuing exactly max_concurrent_ops IOs per > > rbd_writesame call. > > > > Initially, I had a fixed buffer size to 4 KiB, but I noted that, when > we didn't use writesame, the rbd_write() was very slow, so I used the > stripe unit as a buffer size. > > Do you think is better to have a small buffer (512 byte) when we use > writesame or a 'stripe unit' buffer when we can't use it? I'd use a small buffer for rbd_writesame and then just reallocate the buffer to a larger size for "rbd_write". It would be most efficient to allocate a "object size * max concurrent ops" -sized buffer (up to some reasonable maximum) for the standard rbd_write. Just make sure your "rbd_writes" offsets / length is aligned to the stripe period for the most efficient IO (i.e. the initial write might be smaller than the stripe period if the starting offset is unaligned). > > > + > > > + while (offset - current_offset > buf_size) { > > > + bytes = MIN(offset - current_offset, writesame_max_size); > > > + /* > > > + * rbd_writesame() supports only request where the size of the > > > + * operation is multiple of buffer size. > > > + */ > > > + bytes -= bytes % buf_size; > > > + > > > + bytes = rbd_writesame(image, current_offset, bytes, buf, > > > + buf_size, 0); > > > > If the RBD in-memory cache is enabled during this operation, the > > writesame will effectively just be turned into a write. Therefore, > > when pre-allocating, you will want to disable the cache. > > > > During the creation, when preallocation is often used, we disable the cache: > > static int qemu_rbd_do_create(BlockdevCreateOptions *options, > const char *keypairs, const char *password_secret, > Error **errp) > { > ... > > ret = qemu_rbd_connect(&cluster, &io_ctx, opts->location, false, keypairs, > ^^ cache param > ... > } > > > Do you think I should disable it in any case during the preallocation? Assuming you can grow an image w/ full preallocation, I would definitely want to ensure that the cache is disabled since otherwise you would just be performing regular (non-offloaded) writes. > > > > > + if (bytes < 0) { > > > + ret = bytes; > > > + error_setg_errno(errp, -ret, > > > + "Failed to write for preallocation"); > > > + goto out; > > > + } > > > + > > > + current_offset += bytes; > > > + } > > > + } > > > +#endif /* LIBRBD_SUPPORTS_WRITESAME */ > > > + > > > + while (current_offset < offset) { > > > + bytes = rbd_write(image, current_offset, > > > + MIN(offset - current_offset, buf_size), buf); > > > + if (bytes < 0) { > > > + ret = bytes; > > > + error_setg_errno(errp, -ret, > > > + "Failed to write for preallocation"); > > > + goto out; > > > + } > > > + > > > + current_offset += bytes; > > > + } > > > + > > > + ret = rbd_flush(image); > > > + if (ret < 0) { > > > + error_setg_errno(errp, -ret, "Failed to flush the file"); > > > + goto out; > > > + } > > > + > > > + break; > > > + } > > > + case PREALLOC_MODE_OFF: > > > + ret = rbd_resize(image, offset); > > > > I'm not familiar enough w/ the QEMU block code, but why would the > > PREALLOC_MODE_FULL case not need to resize the image? > > PREALLOC_MODE_FULL need too, I did it just before the g_malloc0() in > this patch :-) Sorry I missed it. Would it make more sense to just do it before the switch statement so that you don't duplicate the code and resulting error handling? I guess just validate that the prealloc mode is supported before issuing the resize. > Thanks, > Stefano -- Jason
On Thu, Jul 25, 2019 at 09:30:30AM -0400, Jason Dillaman wrote: > On Thu, Jul 25, 2019 at 4:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > On Wed, Jul 24, 2019 at 01:48:42PM -0400, Jason Dillaman wrote: > > > On Tue, Jul 23, 2019 at 3:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > > > > > This patch adds the support of preallocation (off/full) for the RBD > > > > block driver. > > > > If rbd_writesame() is available and supports zeroed buffers, we use > > > > it to quickly fill the image when full preallocation is required. > > > > > > > > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> > > > > --- > > > > v3: > > > > - rebased on master > > > > - filled with zeroed buffer [Max] > > > > - used rbd_writesame() only when we can disable the discard of zeroed > > > > buffers > > > > - added 'since: 4.2' in qapi/block-core.json [Max] > > > > - used buffer as large as the "stripe unit" > > > > --- > > > > block/rbd.c | 202 ++++++++++++++++++++++++++++++++++++++++--- > > > > qapi/block-core.json | 5 +- > > > > 2 files changed, 192 insertions(+), 15 deletions(-) > > > > > > > > diff --git a/block/rbd.c b/block/rbd.c > > > > index 59757b3120..d923a5a26c 100644 > > > > --- a/block/rbd.c > > > > +++ b/block/rbd.c > > > > @@ -64,6 +64,7 @@ > > > > #define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER) > > > > > > > > #define RBD_MAX_SNAPS 100 > > > > +#define RBD_DEFAULT_CONCURRENT_OPS 10 > > > > > > > > /* The LIBRBD_SUPPORTS_IOVEC is defined in librbd.h */ > > > > #ifdef LIBRBD_SUPPORTS_IOVEC > > > > @@ -104,6 +105,7 @@ typedef struct BDRVRBDState { > > > > char *image_name; > > > > char *snap; > > > > uint64_t image_size; > > > > + bool ws_zero_supported; /* rbd_writesame() supports zeroed buffers */ > > > > } BDRVRBDState; > > > > > > > > static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, > > > > @@ -333,6 +335,155 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs) > > > > } > > > > } > > > > > > > > +static int qemu_rbd_get_max_concurrent_ops(rados_t cluster) > > > > +{ > > > > + char buf[16]; > > > > + int ret, max_concurrent_ops; > > > > + > > > > + ret = rados_conf_get(cluster, "rbd_concurrent_management_ops", buf, > > > > + sizeof(buf)); > > > > + if (ret < 0) { > > > > + return RBD_DEFAULT_CONCURRENT_OPS; > > > > + } > > > > + > > > > + ret = qemu_strtoi(buf, NULL, 10, &max_concurrent_ops); > > > > + if (ret < 0) { > > > > + return RBD_DEFAULT_CONCURRENT_OPS; > > > > + } > > > > + > > > > + return max_concurrent_ops; > > > > +} > > > > + > > > > +static int qemu_rbd_do_truncate(rados_t cluster, rbd_image_t image, > > > > + int64_t offset, PreallocMode prealloc, > > > > + bool ws_zero_supported, Error **errp) > > > > +{ > > > > + uint64_t current_length; > > > > + char *buf = NULL; > > > > + int ret; > > > > + > > > > + ret = rbd_get_size(image, ¤t_length); > > > > + if (ret < 0) { > > > > + error_setg_errno(errp, -ret, "Failed to get file length"); > > > > + goto out; > > > > + } > > > > + > > > > + if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { > > > > + error_setg(errp, "Cannot use preallocation for shrinking files"); > > > > + ret = -ENOTSUP; > > > > + goto out; > > > > + } > > > > + > > > > + switch (prealloc) { > > > > + case PREALLOC_MODE_FULL: { > > > > + uint64_t buf_size, current_offset = current_length; > > > > + ssize_t bytes; > > > > + > > > > + ret = rbd_get_stripe_unit(image, &buf_size); > > > > + if (ret < 0) { > > > > + error_setg_errno(errp, -ret, "Failed to get stripe unit"); > > > > + goto out; > > > > + } > > > > + > > > > + ret = rbd_resize(image, offset); > > > > + if (ret < 0) { > > > > + error_setg_errno(errp, -ret, "Failed to resize file"); > > > > + goto out; > > > > + } > > > > + > > > > + buf = g_malloc0(buf_size); > > > > + > > > > +#ifdef LIBRBD_SUPPORTS_WRITESAME > > > > + if (ws_zero_supported) { > > > > + uint64_t writesame_max_size; > > > > + int max_concurrent_ops; > > > > + > > > > + max_concurrent_ops = qemu_rbd_get_max_concurrent_ops(cluster); > > > > + /* > > > > + * We limit the rbd_writesame() size to avoid to spawn more then > > > > + * 'rbd_concurrent_management_ops' concurrent operations. > > > > + */ > > > > + writesame_max_size = MIN(buf_size * max_concurrent_ops, INT_MAX); > > > > > > In the most efficient world, the 'buf_size' would be some small, fixed > > > power of 2 value (like 512 bytes) since there isn't much need to send > > > extra zeroes. You would then want to writesame the full stripe period > > > (if possible), where a stripe period is the data block object size > > > (defaults to 4MiB and is availble via 'rbd_stat') * the stripe count. > > > In this case, the stripe count becomes the number of in-flight IOs. > > > Therefore, you could substitute its value w/ the max_concurrent_ops to > > > ensure you are issuing exactly max_concurrent_ops IOs per > > > rbd_writesame call. > > > > > > > Initially, I had a fixed buffer size to 4 KiB, but I noted that, when > > we didn't use writesame, the rbd_write() was very slow, so I used the > > stripe unit as a buffer size. > > > > Do you think is better to have a small buffer (512 byte) when we use > > writesame or a 'stripe unit' buffer when we can't use it? > > I'd use a small buffer for rbd_writesame and then just reallocate the My idea was to allocate a small buffer for rbd_writesame and use the same to write the remaining bytes that should be a few. If the buffer was not allocated (so we didn't use the rbd_writesame), I'll allocate the big one: if (ws_zero_supported) { buf_size = 512; buf = g_malloc0(buf_size); ... } if (!buf) { buf_size = object_size * max_concurrent_ops; buf = g_malloc0(buf_size); } while (current_offset < offset) { bytes = rbd_write(...) ... } > buffer to a larger size for "rbd_write". It would be most efficient to > allocate a "object size * max concurrent ops" -sized buffer (up to Why "object size * max concurrent ops" and not "stripe_unit * max concurrent ops"? IIUC stripe_unit can be smaller than object size. > some reasonable maximum) for the standard rbd_write. Just make sure > your "rbd_writes" offsets / length is aligned to the stripe period for > the most efficient IO (i.e. the initial write might be smaller than > the stripe period if the starting offset is unaligned). > Okay, I'll do the first small write to align the next writes to the stripe period (obj size * stripe_count). > > > > + > > > > + while (offset - current_offset > buf_size) { > > > > + bytes = MIN(offset - current_offset, writesame_max_size); > > > > + /* > > > > + * rbd_writesame() supports only request where the size of the > > > > + * operation is multiple of buffer size. > > > > + */ > > > > + bytes -= bytes % buf_size; > > > > + > > > > + bytes = rbd_writesame(image, current_offset, bytes, buf, > > > > + buf_size, 0); > > > > > > If the RBD in-memory cache is enabled during this operation, the > > > writesame will effectively just be turned into a write. Therefore, > > > when pre-allocating, you will want to disable the cache. > > > > > > > During the creation, when preallocation is often used, we disable the cache: > > > > static int qemu_rbd_do_create(BlockdevCreateOptions *options, > > const char *keypairs, const char *password_secret, > > Error **errp) > > { > > ... > > > > ret = qemu_rbd_connect(&cluster, &io_ctx, opts->location, false, keypairs, > > ^^ cache param > > ... > > } > > > > > > Do you think I should disable it in any case during the preallocation? > > Assuming you can grow an image w/ full preallocation, I would > definitely want to ensure that the cache is disabled since otherwise > you would just be performing regular (non-offloaded) writes. > Yes, make sense, I'll disable the cache. > > > > > > > > + if (bytes < 0) { > > > > + ret = bytes; > > > > + error_setg_errno(errp, -ret, > > > > + "Failed to write for preallocation"); > > > > + goto out; > > > > + } > > > > + > > > > + current_offset += bytes; > > > > + } > > > > + } > > > > +#endif /* LIBRBD_SUPPORTS_WRITESAME */ > > > > + > > > > + while (current_offset < offset) { > > > > + bytes = rbd_write(image, current_offset, > > > > + MIN(offset - current_offset, buf_size), buf); > > > > + if (bytes < 0) { > > > > + ret = bytes; > > > > + error_setg_errno(errp, -ret, > > > > + "Failed to write for preallocation"); > > > > + goto out; > > > > + } > > > > + > > > > + current_offset += bytes; > > > > + } > > > > + > > > > + ret = rbd_flush(image); > > > > + if (ret < 0) { > > > > + error_setg_errno(errp, -ret, "Failed to flush the file"); > > > > + goto out; > > > > + } > > > > + > > > > + break; > > > > + } > > > > + case PREALLOC_MODE_OFF: > > > > + ret = rbd_resize(image, offset); > > > > > > I'm not familiar enough w/ the QEMU block code, but why would the > > > PREALLOC_MODE_FULL case not need to resize the image? > > > > PREALLOC_MODE_FULL need too, I did it just before the g_malloc0() in > > this patch :-) > > Sorry I missed it. Would it make more sense to just do it before the > switch statement so that you don't duplicate the code and resulting > error handling? I guess just validate that the prealloc mode is > supported before issuing the resize. > Sure, it's more robust in this way. Thanks, Stefano
On Fri, Jul 26, 2019 at 4:48 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > On Thu, Jul 25, 2019 at 09:30:30AM -0400, Jason Dillaman wrote: > > On Thu, Jul 25, 2019 at 4:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > > > On Wed, Jul 24, 2019 at 01:48:42PM -0400, Jason Dillaman wrote: > > > > On Tue, Jul 23, 2019 at 3:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > > > > > > > This patch adds the support of preallocation (off/full) for the RBD > > > > > block driver. > > > > > If rbd_writesame() is available and supports zeroed buffers, we use > > > > > it to quickly fill the image when full preallocation is required. > > > > > > > > > > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> > > > > > --- > > > > > v3: > > > > > - rebased on master > > > > > - filled with zeroed buffer [Max] > > > > > - used rbd_writesame() only when we can disable the discard of zeroed > > > > > buffers > > > > > - added 'since: 4.2' in qapi/block-core.json [Max] > > > > > - used buffer as large as the "stripe unit" > > > > > --- > > > > > block/rbd.c | 202 ++++++++++++++++++++++++++++++++++++++++--- > > > > > qapi/block-core.json | 5 +- > > > > > 2 files changed, 192 insertions(+), 15 deletions(-) > > > > > > > > > > diff --git a/block/rbd.c b/block/rbd.c > > > > > index 59757b3120..d923a5a26c 100644 > > > > > --- a/block/rbd.c > > > > > +++ b/block/rbd.c > > > > > @@ -64,6 +64,7 @@ > > > > > #define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER) > > > > > > > > > > #define RBD_MAX_SNAPS 100 > > > > > +#define RBD_DEFAULT_CONCURRENT_OPS 10 > > > > > > > > > > /* The LIBRBD_SUPPORTS_IOVEC is defined in librbd.h */ > > > > > #ifdef LIBRBD_SUPPORTS_IOVEC > > > > > @@ -104,6 +105,7 @@ typedef struct BDRVRBDState { > > > > > char *image_name; > > > > > char *snap; > > > > > uint64_t image_size; > > > > > + bool ws_zero_supported; /* rbd_writesame() supports zeroed buffers */ > > > > > } BDRVRBDState; > > > > > > > > > > static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, > > > > > @@ -333,6 +335,155 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs) > > > > > } > > > > > } > > > > > > > > > > +static int qemu_rbd_get_max_concurrent_ops(rados_t cluster) > > > > > +{ > > > > > + char buf[16]; > > > > > + int ret, max_concurrent_ops; > > > > > + > > > > > + ret = rados_conf_get(cluster, "rbd_concurrent_management_ops", buf, > > > > > + sizeof(buf)); > > > > > + if (ret < 0) { > > > > > + return RBD_DEFAULT_CONCURRENT_OPS; > > > > > + } > > > > > + > > > > > + ret = qemu_strtoi(buf, NULL, 10, &max_concurrent_ops); > > > > > + if (ret < 0) { > > > > > + return RBD_DEFAULT_CONCURRENT_OPS; > > > > > + } > > > > > + > > > > > + return max_concurrent_ops; > > > > > +} > > > > > + > > > > > +static int qemu_rbd_do_truncate(rados_t cluster, rbd_image_t image, > > > > > + int64_t offset, PreallocMode prealloc, > > > > > + bool ws_zero_supported, Error **errp) > > > > > +{ > > > > > + uint64_t current_length; > > > > > + char *buf = NULL; > > > > > + int ret; > > > > > + > > > > > + ret = rbd_get_size(image, ¤t_length); > > > > > + if (ret < 0) { > > > > > + error_setg_errno(errp, -ret, "Failed to get file length"); > > > > > + goto out; > > > > > + } > > > > > + > > > > > + if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { > > > > > + error_setg(errp, "Cannot use preallocation for shrinking files"); > > > > > + ret = -ENOTSUP; > > > > > + goto out; > > > > > + } > > > > > + > > > > > + switch (prealloc) { > > > > > + case PREALLOC_MODE_FULL: { > > > > > + uint64_t buf_size, current_offset = current_length; > > > > > + ssize_t bytes; > > > > > + > > > > > + ret = rbd_get_stripe_unit(image, &buf_size); > > > > > + if (ret < 0) { > > > > > + error_setg_errno(errp, -ret, "Failed to get stripe unit"); > > > > > + goto out; > > > > > + } > > > > > + > > > > > + ret = rbd_resize(image, offset); > > > > > + if (ret < 0) { > > > > > + error_setg_errno(errp, -ret, "Failed to resize file"); > > > > > + goto out; > > > > > + } > > > > > + > > > > > + buf = g_malloc0(buf_size); > > > > > + > > > > > +#ifdef LIBRBD_SUPPORTS_WRITESAME > > > > > + if (ws_zero_supported) { > > > > > + uint64_t writesame_max_size; > > > > > + int max_concurrent_ops; > > > > > + > > > > > + max_concurrent_ops = qemu_rbd_get_max_concurrent_ops(cluster); > > > > > + /* > > > > > + * We limit the rbd_writesame() size to avoid to spawn more then > > > > > + * 'rbd_concurrent_management_ops' concurrent operations. > > > > > + */ > > > > > + writesame_max_size = MIN(buf_size * max_concurrent_ops, INT_MAX); > > > > > > > > In the most efficient world, the 'buf_size' would be some small, fixed > > > > power of 2 value (like 512 bytes) since there isn't much need to send > > > > extra zeroes. You would then want to writesame the full stripe period > > > > (if possible), where a stripe period is the data block object size > > > > (defaults to 4MiB and is availble via 'rbd_stat') * the stripe count. > > > > In this case, the stripe count becomes the number of in-flight IOs. > > > > Therefore, you could substitute its value w/ the max_concurrent_ops to > > > > ensure you are issuing exactly max_concurrent_ops IOs per > > > > rbd_writesame call. > > > > > > > > > > Initially, I had a fixed buffer size to 4 KiB, but I noted that, when > > > we didn't use writesame, the rbd_write() was very slow, so I used the > > > stripe unit as a buffer size. > > > > > > Do you think is better to have a small buffer (512 byte) when we use > > > writesame or a 'stripe unit' buffer when we can't use it? > > > > I'd use a small buffer for rbd_writesame and then just reallocate the > > My idea was to allocate a small buffer for rbd_writesame and use the > same to write the remaining bytes that should be a few. > If the buffer was not allocated (so we didn't use the rbd_writesame), > I'll allocate the big one: > > if (ws_zero_supported) { > buf_size = 512; > buf = g_malloc0(buf_size); > ... > } > > if (!buf) { > buf_size = object_size * max_concurrent_ops; > buf = g_malloc0(buf_size); > } > > while (current_offset < offset) { > bytes = rbd_write(...) > ... > } > > > buffer to a larger size for "rbd_write". It would be most efficient to > > allocate a "object size * max concurrent ops" -sized buffer (up to > > Why "object size * max concurrent ops" and not > "stripe_unit * max concurrent ops"? > IIUC stripe_unit can be smaller than object size. Correct, stripe unit *must* be smaller than the object size (and both are powers of two). However, it's more efficient to send fewer larger writes to a backing object than sending more small writes -- especially in the case of writesame where you don't have the network overhead of transferring a large zeroed buffer. Replacing the full backing object is even more efficient since it will just need to perform a single backing disk allocation that will be continuous instead of fragmented. > > some reasonable maximum) for the standard rbd_write. Just make sure > > your "rbd_writes" offsets / length is aligned to the stripe period for > > the most efficient IO (i.e. the initial write might be smaller than > > the stripe period if the starting offset is unaligned). > > > > Okay, I'll do the first small write to align the next writes to the > stripe period (obj size * stripe_count). > > > > > > + > > > > > + while (offset - current_offset > buf_size) { > > > > > + bytes = MIN(offset - current_offset, writesame_max_size); > > > > > + /* > > > > > + * rbd_writesame() supports only request where the size of the > > > > > + * operation is multiple of buffer size. > > > > > + */ > > > > > + bytes -= bytes % buf_size; > > > > > + > > > > > + bytes = rbd_writesame(image, current_offset, bytes, buf, > > > > > + buf_size, 0); > > > > > > > > If the RBD in-memory cache is enabled during this operation, the > > > > writesame will effectively just be turned into a write. Therefore, > > > > when pre-allocating, you will want to disable the cache. > > > > > > > > > > During the creation, when preallocation is often used, we disable the cache: > > > > > > static int qemu_rbd_do_create(BlockdevCreateOptions *options, > > > const char *keypairs, const char *password_secret, > > > Error **errp) > > > { > > > ... > > > > > > ret = qemu_rbd_connect(&cluster, &io_ctx, opts->location, false, keypairs, > > > ^^ cache param > > > ... > > > } > > > > > > > > > Do you think I should disable it in any case during the preallocation? > > > > Assuming you can grow an image w/ full preallocation, I would > > definitely want to ensure that the cache is disabled since otherwise > > you would just be performing regular (non-offloaded) writes. > > > > Yes, make sense, I'll disable the cache. > > > > > > > > > > > > + if (bytes < 0) { > > > > > + ret = bytes; > > > > > + error_setg_errno(errp, -ret, > > > > > + "Failed to write for preallocation"); > > > > > + goto out; > > > > > + } > > > > > + > > > > > + current_offset += bytes; > > > > > + } > > > > > + } > > > > > +#endif /* LIBRBD_SUPPORTS_WRITESAME */ > > > > > + > > > > > + while (current_offset < offset) { > > > > > + bytes = rbd_write(image, current_offset, > > > > > + MIN(offset - current_offset, buf_size), buf); > > > > > + if (bytes < 0) { > > > > > + ret = bytes; > > > > > + error_setg_errno(errp, -ret, > > > > > + "Failed to write for preallocation"); > > > > > + goto out; > > > > > + } > > > > > + > > > > > + current_offset += bytes; > > > > > + } > > > > > + > > > > > + ret = rbd_flush(image); > > > > > + if (ret < 0) { > > > > > + error_setg_errno(errp, -ret, "Failed to flush the file"); > > > > > + goto out; > > > > > + } > > > > > + > > > > > + break; > > > > > + } > > > > > + case PREALLOC_MODE_OFF: > > > > > + ret = rbd_resize(image, offset); > > > > > > > > I'm not familiar enough w/ the QEMU block code, but why would the > > > > PREALLOC_MODE_FULL case not need to resize the image? > > > > > > PREALLOC_MODE_FULL need too, I did it just before the g_malloc0() in > > > this patch :-) > > > > Sorry I missed it. Would it make more sense to just do it before the > > switch statement so that you don't duplicate the code and resulting > > error handling? I guess just validate that the prealloc mode is > > supported before issuing the resize. > > > > Sure, it's more robust in this way. > > Thanks, > Stefano -- Jason
On Fri, Jul 26, 2019 at 08:46:56AM -0400, Jason Dillaman wrote: > On Fri, Jul 26, 2019 at 4:48 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > On Thu, Jul 25, 2019 at 09:30:30AM -0400, Jason Dillaman wrote: > > > On Thu, Jul 25, 2019 at 4:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > > > > > On Wed, Jul 24, 2019 at 01:48:42PM -0400, Jason Dillaman wrote: > > > > > On Tue, Jul 23, 2019 at 3:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > > > > > > > > > This patch adds the support of preallocation (off/full) for the RBD > > > > > > block driver. > > > > > > If rbd_writesame() is available and supports zeroed buffers, we use > > > > > > it to quickly fill the image when full preallocation is required. > > > > > > > > > > > > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> > > > > > > --- > > > > > > v3: > > > > > > - rebased on master > > > > > > - filled with zeroed buffer [Max] > > > > > > - used rbd_writesame() only when we can disable the discard of zeroed > > > > > > buffers > > > > > > - added 'since: 4.2' in qapi/block-core.json [Max] > > > > > > - used buffer as large as the "stripe unit" > > > > > > --- > > > > > > block/rbd.c | 202 ++++++++++++++++++++++++++++++++++++++++--- > > > > > > qapi/block-core.json | 5 +- > > > > > > 2 files changed, 192 insertions(+), 15 deletions(-) > > > > > > > > > > > > diff --git a/block/rbd.c b/block/rbd.c > > > > > > index 59757b3120..d923a5a26c 100644 > > > > > > --- a/block/rbd.c > > > > > > +++ b/block/rbd.c > > > > > > @@ -64,6 +64,7 @@ > > > > > > #define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER) > > > > > > > > > > > > #define RBD_MAX_SNAPS 100 > > > > > > +#define RBD_DEFAULT_CONCURRENT_OPS 10 > > > > > > > > > > > > /* The LIBRBD_SUPPORTS_IOVEC is defined in librbd.h */ > > > > > > #ifdef LIBRBD_SUPPORTS_IOVEC > > > > > > @@ -104,6 +105,7 @@ typedef struct BDRVRBDState { > > > > > > char *image_name; > > > > > > char *snap; > > > > > > uint64_t image_size; > > > > > > + bool ws_zero_supported; /* rbd_writesame() supports zeroed buffers */ > > > > > > } BDRVRBDState; > > > > > > > > > > > > static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, > > > > > > @@ -333,6 +335,155 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs) > > > > > > } > > > > > > } > > > > > > > > > > > > +static int qemu_rbd_get_max_concurrent_ops(rados_t cluster) > > > > > > +{ > > > > > > + char buf[16]; > > > > > > + int ret, max_concurrent_ops; > > > > > > + > > > > > > + ret = rados_conf_get(cluster, "rbd_concurrent_management_ops", buf, > > > > > > + sizeof(buf)); > > > > > > + if (ret < 0) { > > > > > > + return RBD_DEFAULT_CONCURRENT_OPS; > > > > > > + } > > > > > > + > > > > > > + ret = qemu_strtoi(buf, NULL, 10, &max_concurrent_ops); > > > > > > + if (ret < 0) { > > > > > > + return RBD_DEFAULT_CONCURRENT_OPS; > > > > > > + } > > > > > > + > > > > > > + return max_concurrent_ops; > > > > > > +} > > > > > > + > > > > > > +static int qemu_rbd_do_truncate(rados_t cluster, rbd_image_t image, > > > > > > + int64_t offset, PreallocMode prealloc, > > > > > > + bool ws_zero_supported, Error **errp) > > > > > > +{ > > > > > > + uint64_t current_length; > > > > > > + char *buf = NULL; > > > > > > + int ret; > > > > > > + > > > > > > + ret = rbd_get_size(image, ¤t_length); > > > > > > + if (ret < 0) { > > > > > > + error_setg_errno(errp, -ret, "Failed to get file length"); > > > > > > + goto out; > > > > > > + } > > > > > > + > > > > > > + if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { > > > > > > + error_setg(errp, "Cannot use preallocation for shrinking files"); > > > > > > + ret = -ENOTSUP; > > > > > > + goto out; > > > > > > + } > > > > > > + > > > > > > + switch (prealloc) { > > > > > > + case PREALLOC_MODE_FULL: { > > > > > > + uint64_t buf_size, current_offset = current_length; > > > > > > + ssize_t bytes; > > > > > > + > > > > > > + ret = rbd_get_stripe_unit(image, &buf_size); > > > > > > + if (ret < 0) { > > > > > > + error_setg_errno(errp, -ret, "Failed to get stripe unit"); > > > > > > + goto out; > > > > > > + } > > > > > > + > > > > > > + ret = rbd_resize(image, offset); > > > > > > + if (ret < 0) { > > > > > > + error_setg_errno(errp, -ret, "Failed to resize file"); > > > > > > + goto out; > > > > > > + } > > > > > > + > > > > > > + buf = g_malloc0(buf_size); > > > > > > + > > > > > > +#ifdef LIBRBD_SUPPORTS_WRITESAME > > > > > > + if (ws_zero_supported) { > > > > > > + uint64_t writesame_max_size; > > > > > > + int max_concurrent_ops; > > > > > > + > > > > > > + max_concurrent_ops = qemu_rbd_get_max_concurrent_ops(cluster); > > > > > > + /* > > > > > > + * We limit the rbd_writesame() size to avoid to spawn more then > > > > > > + * 'rbd_concurrent_management_ops' concurrent operations. > > > > > > + */ > > > > > > + writesame_max_size = MIN(buf_size * max_concurrent_ops, INT_MAX); > > > > > > > > > > In the most efficient world, the 'buf_size' would be some small, fixed > > > > > power of 2 value (like 512 bytes) since there isn't much need to send > > > > > extra zeroes. You would then want to writesame the full stripe period > > > > > (if possible), where a stripe period is the data block object size > > > > > (defaults to 4MiB and is availble via 'rbd_stat') * the stripe count. > > > > > In this case, the stripe count becomes the number of in-flight IOs. > > > > > Therefore, you could substitute its value w/ the max_concurrent_ops to > > > > > ensure you are issuing exactly max_concurrent_ops IOs per > > > > > rbd_writesame call. > > > > > > > > > > > > > Initially, I had a fixed buffer size to 4 KiB, but I noted that, when > > > > we didn't use writesame, the rbd_write() was very slow, so I used the > > > > stripe unit as a buffer size. > > > > > > > > Do you think is better to have a small buffer (512 byte) when we use > > > > writesame or a 'stripe unit' buffer when we can't use it? > > > > > > I'd use a small buffer for rbd_writesame and then just reallocate the > > > > My idea was to allocate a small buffer for rbd_writesame and use the > > same to write the remaining bytes that should be a few. > > If the buffer was not allocated (so we didn't use the rbd_writesame), > > I'll allocate the big one: > > > > if (ws_zero_supported) { > > buf_size = 512; > > buf = g_malloc0(buf_size); > > ... > > } > > > > if (!buf) { > > buf_size = object_size * max_concurrent_ops; > > buf = g_malloc0(buf_size); > > } > > > > while (current_offset < offset) { > > bytes = rbd_write(...) > > ... > > } > > > > > buffer to a larger size for "rbd_write". It would be most efficient to > > > allocate a "object size * max concurrent ops" -sized buffer (up to > > > > Why "object size * max concurrent ops" and not > > "stripe_unit * max concurrent ops"? > > IIUC stripe_unit can be smaller than object size. > > Correct, stripe unit *must* be smaller than the object size (and both > are powers of two). However, it's more efficient to send fewer larger > writes to a backing object than sending more small writes -- > especially in the case of writesame where you don't have the network > overhead of transferring a large zeroed buffer. Replacing the full > backing object is even more efficient since it will just need to > perform a single backing disk allocation that will be continuous > instead of fragmented. > Okay, so IIUC I should do the following: - if we can use rbd_writesame ~ allocates a buffer of 512 bytes ~ writes "object size * max concurrent ops" bytes per call - if we use rbd_write ~ allocates a buffer of "object size * max concurrent ops" bytes ~ writes the entire buffer per call (aligning to the stripe unit) Is that correct? Thanks, Stefano
On Mon, Jul 29, 2019 at 5:40 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > On Fri, Jul 26, 2019 at 08:46:56AM -0400, Jason Dillaman wrote: > > On Fri, Jul 26, 2019 at 4:48 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > > > On Thu, Jul 25, 2019 at 09:30:30AM -0400, Jason Dillaman wrote: > > > > On Thu, Jul 25, 2019 at 4:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > > > > > > > On Wed, Jul 24, 2019 at 01:48:42PM -0400, Jason Dillaman wrote: > > > > > > On Tue, Jul 23, 2019 at 3:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote: > > > > > > > > > > > > > > This patch adds the support of preallocation (off/full) for the RBD > > > > > > > block driver. > > > > > > > If rbd_writesame() is available and supports zeroed buffers, we use > > > > > > > it to quickly fill the image when full preallocation is required. > > > > > > > > > > > > > > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> > > > > > > > --- > > > > > > > v3: > > > > > > > - rebased on master > > > > > > > - filled with zeroed buffer [Max] > > > > > > > - used rbd_writesame() only when we can disable the discard of zeroed > > > > > > > buffers > > > > > > > - added 'since: 4.2' in qapi/block-core.json [Max] > > > > > > > - used buffer as large as the "stripe unit" > > > > > > > --- > > > > > > > block/rbd.c | 202 ++++++++++++++++++++++++++++++++++++++++--- > > > > > > > qapi/block-core.json | 5 +- > > > > > > > 2 files changed, 192 insertions(+), 15 deletions(-) > > > > > > > > > > > > > > diff --git a/block/rbd.c b/block/rbd.c > > > > > > > index 59757b3120..d923a5a26c 100644 > > > > > > > --- a/block/rbd.c > > > > > > > +++ b/block/rbd.c > > > > > > > @@ -64,6 +64,7 @@ > > > > > > > #define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER) > > > > > > > > > > > > > > #define RBD_MAX_SNAPS 100 > > > > > > > +#define RBD_DEFAULT_CONCURRENT_OPS 10 > > > > > > > > > > > > > > /* The LIBRBD_SUPPORTS_IOVEC is defined in librbd.h */ > > > > > > > #ifdef LIBRBD_SUPPORTS_IOVEC > > > > > > > @@ -104,6 +105,7 @@ typedef struct BDRVRBDState { > > > > > > > char *image_name; > > > > > > > char *snap; > > > > > > > uint64_t image_size; > > > > > > > + bool ws_zero_supported; /* rbd_writesame() supports zeroed buffers */ > > > > > > > } BDRVRBDState; > > > > > > > > > > > > > > static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, > > > > > > > @@ -333,6 +335,155 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs) > > > > > > > } > > > > > > > } > > > > > > > > > > > > > > +static int qemu_rbd_get_max_concurrent_ops(rados_t cluster) > > > > > > > +{ > > > > > > > + char buf[16]; > > > > > > > + int ret, max_concurrent_ops; > > > > > > > + > > > > > > > + ret = rados_conf_get(cluster, "rbd_concurrent_management_ops", buf, > > > > > > > + sizeof(buf)); > > > > > > > + if (ret < 0) { > > > > > > > + return RBD_DEFAULT_CONCURRENT_OPS; > > > > > > > + } > > > > > > > + > > > > > > > + ret = qemu_strtoi(buf, NULL, 10, &max_concurrent_ops); > > > > > > > + if (ret < 0) { > > > > > > > + return RBD_DEFAULT_CONCURRENT_OPS; > > > > > > > + } > > > > > > > + > > > > > > > + return max_concurrent_ops; > > > > > > > +} > > > > > > > + > > > > > > > +static int qemu_rbd_do_truncate(rados_t cluster, rbd_image_t image, > > > > > > > + int64_t offset, PreallocMode prealloc, > > > > > > > + bool ws_zero_supported, Error **errp) > > > > > > > +{ > > > > > > > + uint64_t current_length; > > > > > > > + char *buf = NULL; > > > > > > > + int ret; > > > > > > > + > > > > > > > + ret = rbd_get_size(image, ¤t_length); > > > > > > > + if (ret < 0) { > > > > > > > + error_setg_errno(errp, -ret, "Failed to get file length"); > > > > > > > + goto out; > > > > > > > + } > > > > > > > + > > > > > > > + if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { > > > > > > > + error_setg(errp, "Cannot use preallocation for shrinking files"); > > > > > > > + ret = -ENOTSUP; > > > > > > > + goto out; > > > > > > > + } > > > > > > > + > > > > > > > + switch (prealloc) { > > > > > > > + case PREALLOC_MODE_FULL: { > > > > > > > + uint64_t buf_size, current_offset = current_length; > > > > > > > + ssize_t bytes; > > > > > > > + > > > > > > > + ret = rbd_get_stripe_unit(image, &buf_size); > > > > > > > + if (ret < 0) { > > > > > > > + error_setg_errno(errp, -ret, "Failed to get stripe unit"); > > > > > > > + goto out; > > > > > > > + } > > > > > > > + > > > > > > > + ret = rbd_resize(image, offset); > > > > > > > + if (ret < 0) { > > > > > > > + error_setg_errno(errp, -ret, "Failed to resize file"); > > > > > > > + goto out; > > > > > > > + } > > > > > > > + > > > > > > > + buf = g_malloc0(buf_size); > > > > > > > + > > > > > > > +#ifdef LIBRBD_SUPPORTS_WRITESAME > > > > > > > + if (ws_zero_supported) { > > > > > > > + uint64_t writesame_max_size; > > > > > > > + int max_concurrent_ops; > > > > > > > + > > > > > > > + max_concurrent_ops = qemu_rbd_get_max_concurrent_ops(cluster); > > > > > > > + /* > > > > > > > + * We limit the rbd_writesame() size to avoid to spawn more then > > > > > > > + * 'rbd_concurrent_management_ops' concurrent operations. > > > > > > > + */ > > > > > > > + writesame_max_size = MIN(buf_size * max_concurrent_ops, INT_MAX); > > > > > > > > > > > > In the most efficient world, the 'buf_size' would be some small, fixed > > > > > > power of 2 value (like 512 bytes) since there isn't much need to send > > > > > > extra zeroes. You would then want to writesame the full stripe period > > > > > > (if possible), where a stripe period is the data block object size > > > > > > (defaults to 4MiB and is availble via 'rbd_stat') * the stripe count. > > > > > > In this case, the stripe count becomes the number of in-flight IOs. > > > > > > Therefore, you could substitute its value w/ the max_concurrent_ops to > > > > > > ensure you are issuing exactly max_concurrent_ops IOs per > > > > > > rbd_writesame call. > > > > > > > > > > > > > > > > Initially, I had a fixed buffer size to 4 KiB, but I noted that, when > > > > > we didn't use writesame, the rbd_write() was very slow, so I used the > > > > > stripe unit as a buffer size. > > > > > > > > > > Do you think is better to have a small buffer (512 byte) when we use > > > > > writesame or a 'stripe unit' buffer when we can't use it? > > > > > > > > I'd use a small buffer for rbd_writesame and then just reallocate the > > > > > > My idea was to allocate a small buffer for rbd_writesame and use the > > > same to write the remaining bytes that should be a few. > > > If the buffer was not allocated (so we didn't use the rbd_writesame), > > > I'll allocate the big one: > > > > > > if (ws_zero_supported) { > > > buf_size = 512; > > > buf = g_malloc0(buf_size); > > > ... > > > } > > > > > > if (!buf) { > > > buf_size = object_size * max_concurrent_ops; > > > buf = g_malloc0(buf_size); > > > } > > > > > > while (current_offset < offset) { > > > bytes = rbd_write(...) > > > ... > > > } > > > > > > > buffer to a larger size for "rbd_write". It would be most efficient to > > > > allocate a "object size * max concurrent ops" -sized buffer (up to > > > > > > Why "object size * max concurrent ops" and not > > > "stripe_unit * max concurrent ops"? > > > IIUC stripe_unit can be smaller than object size. > > > > Correct, stripe unit *must* be smaller than the object size (and both > > are powers of two). However, it's more efficient to send fewer larger > > writes to a backing object than sending more small writes -- > > especially in the case of writesame where you don't have the network > > overhead of transferring a large zeroed buffer. Replacing the full > > backing object is even more efficient since it will just need to > > perform a single backing disk allocation that will be continuous > > instead of fragmented. > > > > Okay, so IIUC I should do the following: > - if we can use rbd_writesame > ~ allocates a buffer of 512 bytes > ~ writes "object size * max concurrent ops" bytes per call > - if we use rbd_write > ~ allocates a buffer of "object size * max concurrent ops" bytes > ~ writes the entire buffer per call (aligning to the stripe unit) > > Is that correct? Yes, that is correct. > Thanks, > Stefano -- Jason
© 2016 - 2024 Red Hat, Inc.