1 | The following changes since commit ca4e667dbf431d4a2a5a619cde79d30dd2ac3eb2: | 1 | The following changes since commit 248b23735645f7cbb503d9be6f5bf825f2a603ab: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/kraxel/tags/usb-20170717-pull-request' into staging (2017-07-17 17:54:17 +0100) | 3 | Update version for v2.10.0-rc4 release (2017-08-24 17:34:26 +0100) |
4 | 4 | ||
5 | are available in the git repository at: | 5 | are available in the git repository at: |
6 | 6 | ||
7 | git://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request | 7 | git://github.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to 8508eee740c78d1465e25dad7c3e06137485dfbc: | 9 | for you to fetch changes up to 3e4c705212abfe8c9882a00beb2d1466a8a53cec: |
10 | 10 | ||
11 | live-block-ops.txt: Rename, rewrite, and improve it (2017-07-18 00:11:01 -0400) | 11 | qcow2: allocate cluster_cache/cluster_data on demand (2017-08-30 18:02:10 +0100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Block patches (documentation) | 14 | |
15 | ---------------------------------------------------------------- | 15 | ---------------------------------------------------------------- |
16 | 16 | ||
17 | Kashyap Chamarthy (2): | 17 | Alberto Garcia (8): |
18 | bitmaps.md: Convert to rST; move it into 'interop' dir | 18 | throttle: Fix wrong variable name in the header documentation |
19 | live-block-ops.txt: Rename, rewrite, and improve it | 19 | throttle: Update the throttle_fix_bucket() documentation |
20 | throttle: Make throttle_is_valid() a bit less verbose | ||
21 | throttle: Remove throttle_fix_bucket() / throttle_unfix_bucket() | ||
22 | throttle: Make LeakyBucket.avg and LeakyBucket.max integer types | ||
23 | throttle: Make burst_length 64bit and add range checks | ||
24 | throttle: Test the valid range of config values | ||
25 | misc: Remove unused Error variables | ||
20 | 26 | ||
21 | docs/devel/bitmaps.md | 505 --------------- | 27 | Dan Aloni (1): |
22 | docs/interop/bitmaps.rst | 555 ++++++++++++++++ | 28 | nvme: Fix get/set number of queues feature, again |
23 | docs/interop/live-block-operations.rst | 1088 ++++++++++++++++++++++++++++++++ | 29 | |
24 | docs/live-block-ops.txt | 72 --- | 30 | Eduardo Habkost (1): |
25 | 4 files changed, 1643 insertions(+), 577 deletions(-) | 31 | oslib-posix: Print errors before aborting on qemu_alloc_stack() |
26 | delete mode 100644 docs/devel/bitmaps.md | 32 | |
27 | create mode 100644 docs/interop/bitmaps.rst | 33 | Fred Rolland (1): |
28 | create mode 100644 docs/interop/live-block-operations.rst | 34 | qemu-doc: Add UUID support in initiator name |
29 | delete mode 100644 docs/live-block-ops.txt | 35 | |
36 | Stefan Hajnoczi (4): | ||
37 | scripts: add argparse module for Python 2.6 compatibility | ||
38 | docker.py: Python 2.6 argparse compatibility | ||
39 | tests: migration/guestperf Python 2.6 argparse compatibility | ||
40 | qcow2: allocate cluster_cache/cluster_data on demand | ||
41 | |||
42 | include/qemu/throttle.h | 8 +- | ||
43 | block/qcow.c | 12 +- | ||
44 | block/qcow2-cluster.c | 17 + | ||
45 | block/qcow2.c | 20 +- | ||
46 | dump.c | 4 +- | ||
47 | hw/block/nvme.c | 4 +- | ||
48 | tests/test-throttle.c | 80 +- | ||
49 | util/oslib-posix.c | 2 + | ||
50 | util/throttle.c | 86 +- | ||
51 | COPYING.PYTHON | 270 ++++ | ||
52 | qemu-doc.texi | 5 +- | ||
53 | scripts/argparse.py | 2406 ++++++++++++++++++++++++++++++++++++ | ||
54 | tests/docker/docker.py | 4 +- | ||
55 | tests/migration/guestperf/shell.py | 8 +- | ||
56 | 14 files changed, 2831 insertions(+), 95 deletions(-) | ||
57 | create mode 100644 COPYING.PYTHON | ||
58 | create mode 100644 scripts/argparse.py | ||
30 | 59 | ||
31 | -- | 60 | -- |
32 | 2.9.4 | 61 | 2.13.5 |
33 | 62 | ||
34 | 63 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Dan Aloni <dan@kernelim.com> | ||
1 | 2 | ||
3 | The number of queues that should be return by the admin command should: | ||
4 | |||
5 | 1) Only mention the number of non-admin queues. | ||
6 | 2) It is zero-based, meaning that '0 == one non-admin queue', | ||
7 | '1 == two non-admin queues', and so forth. | ||
8 | |||
9 | Because our `num_queues` means the number of queues _plus_ the admin | ||
10 | queue, then the right calculation for the number returned from the admin | ||
11 | command is `num_queues - 2`, combining the two requirements mentioned. | ||
12 | |||
13 | The issue was discovered by reducing num_queues from 64 to 8 and running | ||
14 | a Linux VM with an SMP parameter larger than that (e.g. 22). It tries to | ||
15 | utilize all queues, and therefore fails with an invalid queue number | ||
16 | when trying to queue I/Os on the last queue. | ||
17 | |||
18 | Signed-off-by: Dan Aloni <dan@kernelim.com> | ||
19 | CC: Alex Friedman <alex@e8storage.com> | ||
20 | CC: Keith Busch <keith.busch@intel.com> | ||
21 | CC: Stefan Hajnoczi <stefanha@redhat.com> | ||
22 | Reviewed-by: Keith Busch <keith.busch@intel.com> | ||
23 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
24 | --- | ||
25 | hw/block/nvme.c | 4 ++-- | ||
26 | 1 file changed, 2 insertions(+), 2 deletions(-) | ||
27 | |||
28 | diff --git a/hw/block/nvme.c b/hw/block/nvme.c | ||
29 | index XXXXXXX..XXXXXXX 100644 | ||
30 | --- a/hw/block/nvme.c | ||
31 | +++ b/hw/block/nvme.c | ||
32 | @@ -XXX,XX +XXX,XX @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) | ||
33 | result = blk_enable_write_cache(n->conf.blk); | ||
34 | break; | ||
35 | case NVME_NUMBER_OF_QUEUES: | ||
36 | - result = cpu_to_le32((n->num_queues - 1) | ((n->num_queues - 1) << 16)); | ||
37 | + result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16)); | ||
38 | break; | ||
39 | default: | ||
40 | return NVME_INVALID_FIELD | NVME_DNR; | ||
41 | @@ -XXX,XX +XXX,XX @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) | ||
42 | break; | ||
43 | case NVME_NUMBER_OF_QUEUES: | ||
44 | req->cqe.result = | ||
45 | - cpu_to_le32((n->num_queues - 1) | ((n->num_queues - 1) << 16)); | ||
46 | + cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16)); | ||
47 | break; | ||
48 | default: | ||
49 | return NVME_INVALID_FIELD | NVME_DNR; | ||
50 | -- | ||
51 | 2.13.5 | ||
52 | |||
53 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alberto Garcia <berto@igalia.com> | ||
1 | 2 | ||
3 | The level of the burst bucket is stored in bkt.burst_level, not | ||
4 | bkt.burst_length. | ||
5 | |||
6 | Signed-off-by: Alberto Garcia <berto@igalia.com> | ||
7 | Reviewed-by: Manos Pitsidianakis <el13635@mail.ntua.gr> | ||
8 | Message-id: 49aab2711d02f285567f3b3b13a113847af33812.1503580370.git.berto@igalia.com | ||
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | --- | ||
11 | include/qemu/throttle.h | 2 +- | ||
12 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
13 | |||
14 | diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/include/qemu/throttle.h | ||
17 | +++ b/include/qemu/throttle.h | ||
18 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
19 | * - The bkt.avg rate does not apply until the bucket is full, | ||
20 | * allowing the user to do bursts until then. The I/O limit during | ||
21 | * bursts is bkt.max. To enforce this limit we keep an additional | ||
22 | - * bucket in bkt.burst_length that leaks at a rate of bkt.max units | ||
23 | + * bucket in bkt.burst_level that leaks at a rate of bkt.max units | ||
24 | * per second. | ||
25 | * | ||
26 | * - Because of all of the above, the user can perform I/O at a | ||
27 | -- | ||
28 | 2.13.5 | ||
29 | |||
30 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alberto Garcia <berto@igalia.com> | ||
1 | 2 | ||
3 | The way the throttling algorithm works is that requests start being | ||
4 | throttled once the bucket level exceeds the burst limit. When we get | ||
5 | there the bucket leaks at the level set by the user (bkt->avg), and | ||
6 | that leak rate is what prevents guest I/O from exceeding the desired | ||
7 | limit. | ||
8 | |||
9 | If we don't allow bursts (i.e. bkt->max == 0) then we can start | ||
10 | throttling requests immediately. The problem with keeping the | ||
11 | threshold at 0 is that it only allows one request at a time, and as | ||
12 | soon as there's a bit of I/O from the guest every other request will | ||
13 | be throttled and performance will suffer considerably. That can even | ||
14 | make the guest unable to reach the throttle limit if that limit is | ||
15 | high enough, and that happens regardless of the block scheduler used | ||
16 | by the guest. | ||
17 | |||
18 | Increasing that threshold gives flexibility to the guest, allowing it | ||
19 | to perform short bursts of I/O before being throttled. Increasing the | ||
20 | threshold too much does not make a difference in the long run (because | ||
21 | it's the leak rate what defines the actual throughput) but it does | ||
22 | allow the guest to perform longer initial bursts and exceed the | ||
23 | throttle limit for a short while. | ||
24 | |||
25 | A burst value of bkt->avg / 10 allows the guest to perform 100ms' | ||
26 | worth of I/O at the target rate without being throttled. | ||
27 | |||
28 | Signed-off-by: Alberto Garcia <berto@igalia.com> | ||
29 | Message-id: 31aae6645f0d1fbf3860fb2b528b757236f0c0a7.1503580370.git.berto@igalia.com | ||
30 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
31 | --- | ||
32 | util/throttle.c | 11 +++-------- | ||
33 | 1 file changed, 3 insertions(+), 8 deletions(-) | ||
34 | |||
35 | diff --git a/util/throttle.c b/util/throttle.c | ||
36 | index XXXXXXX..XXXXXXX 100644 | ||
37 | --- a/util/throttle.c | ||
38 | +++ b/util/throttle.c | ||
39 | @@ -XXX,XX +XXX,XX @@ static void throttle_fix_bucket(LeakyBucket *bkt) | ||
40 | /* zero bucket level */ | ||
41 | bkt->level = bkt->burst_level = 0; | ||
42 | |||
43 | - /* The following is done to cope with the Linux CFQ block scheduler | ||
44 | - * which regroup reads and writes by block of 100ms in the guest. | ||
45 | - * When they are two process one making reads and one making writes cfq | ||
46 | - * make a pattern looking like the following: | ||
47 | - * WWWWWWWWWWWRRRRRRRRRRRRRRWWWWWWWWWWWWWwRRRRRRRRRRRRRRRRR | ||
48 | - * Having a max burst value of 100ms of the average will help smooth the | ||
49 | - * throttling | ||
50 | - */ | ||
51 | + /* If bkt->max is 0 we still want to allow short bursts of I/O | ||
52 | + * from the guest, otherwise every other request will be throttled | ||
53 | + * and performance will suffer considerably. */ | ||
54 | min = bkt->avg / 10; | ||
55 | if (bkt->avg && !bkt->max) { | ||
56 | bkt->max = min; | ||
57 | -- | ||
58 | 2.13.5 | ||
59 | |||
60 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alberto Garcia <berto@igalia.com> | ||
1 | 2 | ||
3 | Use a pointer to the bucket instead of repeating cfg->buckets[i] all | ||
4 | the time. This makes the code more concise and will help us expand the | ||
5 | checks later and save a few line breaks. | ||
6 | |||
7 | Signed-off-by: Alberto Garcia <berto@igalia.com> | ||
8 | Message-id: 763ffc40a26b17d54cf93f5a999e4656049fcf0c.1503580370.git.berto@igalia.com | ||
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | --- | ||
11 | util/throttle.c | 15 +++++++-------- | ||
12 | 1 file changed, 7 insertions(+), 8 deletions(-) | ||
13 | |||
14 | diff --git a/util/throttle.c b/util/throttle.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/util/throttle.c | ||
17 | +++ b/util/throttle.c | ||
18 | @@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp) | ||
19 | } | ||
20 | |||
21 | for (i = 0; i < BUCKETS_COUNT; i++) { | ||
22 | - if (cfg->buckets[i].avg < 0 || | ||
23 | - cfg->buckets[i].max < 0 || | ||
24 | - cfg->buckets[i].avg > THROTTLE_VALUE_MAX || | ||
25 | - cfg->buckets[i].max > THROTTLE_VALUE_MAX) { | ||
26 | + LeakyBucket *bkt = &cfg->buckets[i]; | ||
27 | + if (bkt->avg < 0 || bkt->max < 0 || | ||
28 | + bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) { | ||
29 | error_setg(errp, "bps/iops/max values must be within [0, %lld]", | ||
30 | THROTTLE_VALUE_MAX); | ||
31 | return false; | ||
32 | } | ||
33 | |||
34 | - if (!cfg->buckets[i].burst_length) { | ||
35 | + if (!bkt->burst_length) { | ||
36 | error_setg(errp, "the burst length cannot be 0"); | ||
37 | return false; | ||
38 | } | ||
39 | |||
40 | - if (cfg->buckets[i].burst_length > 1 && !cfg->buckets[i].max) { | ||
41 | + if (bkt->burst_length > 1 && !bkt->max) { | ||
42 | error_setg(errp, "burst length set without burst rate"); | ||
43 | return false; | ||
44 | } | ||
45 | |||
46 | - if (cfg->buckets[i].max && !cfg->buckets[i].avg) { | ||
47 | + if (bkt->max && !bkt->avg) { | ||
48 | error_setg(errp, "bps_max/iops_max require corresponding" | ||
49 | " bps/iops values"); | ||
50 | return false; | ||
51 | } | ||
52 | |||
53 | - if (cfg->buckets[i].max && cfg->buckets[i].max < cfg->buckets[i].avg) { | ||
54 | + if (bkt->max && bkt->max < bkt->avg) { | ||
55 | error_setg(errp, "bps_max/iops_max cannot be lower than bps/iops"); | ||
56 | return false; | ||
57 | } | ||
58 | -- | ||
59 | 2.13.5 | ||
60 | |||
61 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alberto Garcia <berto@igalia.com> | ||
1 | 2 | ||
3 | The throttling code can change internally the value of bkt->max if it | ||
4 | hasn't been set by the user. The problem with this is that if we want | ||
5 | to retrieve the original value we have to undo this change first. This | ||
6 | is ugly and unnecessary: this patch removes the throttle_fix_bucket() | ||
7 | and throttle_unfix_bucket() functions completely and moves the logic | ||
8 | to throttle_compute_wait(). | ||
9 | |||
10 | Signed-off-by: Alberto Garcia <berto@igalia.com> | ||
11 | Reviewed-by: Manos Pitsidianakis <el13635@mail.ntua.gr> | ||
12 | Message-id: 5b0b9e1ac6eb208d709eddc7b09e7669a523bff3.1503580370.git.berto@igalia.com | ||
13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
14 | --- | ||
15 | util/throttle.c | 62 +++++++++++++++++++++------------------------------------ | ||
16 | 1 file changed, 23 insertions(+), 39 deletions(-) | ||
17 | |||
18 | diff --git a/util/throttle.c b/util/throttle.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/util/throttle.c | ||
21 | +++ b/util/throttle.c | ||
22 | @@ -XXX,XX +XXX,XX @@ static int64_t throttle_do_compute_wait(double limit, double extra) | ||
23 | int64_t throttle_compute_wait(LeakyBucket *bkt) | ||
24 | { | ||
25 | double extra; /* the number of extra units blocking the io */ | ||
26 | + double bucket_size; /* I/O before throttling to bkt->avg */ | ||
27 | + double burst_bucket_size; /* Before throttling to bkt->max */ | ||
28 | |||
29 | if (!bkt->avg) { | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | - /* If the bucket is full then we have to wait */ | ||
34 | - extra = bkt->level - bkt->max * bkt->burst_length; | ||
35 | + if (!bkt->max) { | ||
36 | + /* If bkt->max is 0 we still want to allow short bursts of I/O | ||
37 | + * from the guest, otherwise every other request will be throttled | ||
38 | + * and performance will suffer considerably. */ | ||
39 | + bucket_size = bkt->avg / 10; | ||
40 | + burst_bucket_size = 0; | ||
41 | + } else { | ||
42 | + /* If we have a burst limit then we have to wait until all I/O | ||
43 | + * at burst rate has finished before throttling to bkt->avg */ | ||
44 | + bucket_size = bkt->max * bkt->burst_length; | ||
45 | + burst_bucket_size = bkt->max / 10; | ||
46 | + } | ||
47 | + | ||
48 | + /* If the main bucket is full then we have to wait */ | ||
49 | + extra = bkt->level - bucket_size; | ||
50 | if (extra > 0) { | ||
51 | return throttle_do_compute_wait(bkt->avg, extra); | ||
52 | } | ||
53 | |||
54 | - /* If the bucket is not full yet we have to make sure that we | ||
55 | - * fulfill the goal of bkt->max units per second. */ | ||
56 | + /* If the main bucket is not full yet we still have to check the | ||
57 | + * burst bucket in order to enforce the burst limit */ | ||
58 | if (bkt->burst_length > 1) { | ||
59 | - /* We use 1/10 of the max value to smooth the throttling. | ||
60 | - * See throttle_fix_bucket() for more details. */ | ||
61 | - extra = bkt->burst_level - bkt->max / 10; | ||
62 | + extra = bkt->burst_level - burst_bucket_size; | ||
63 | if (extra > 0) { | ||
64 | return throttle_do_compute_wait(bkt->max, extra); | ||
65 | } | ||
66 | @@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp) | ||
67 | return true; | ||
68 | } | ||
69 | |||
70 | -/* fix bucket parameters */ | ||
71 | -static void throttle_fix_bucket(LeakyBucket *bkt) | ||
72 | -{ | ||
73 | - double min; | ||
74 | - | ||
75 | - /* zero bucket level */ | ||
76 | - bkt->level = bkt->burst_level = 0; | ||
77 | - | ||
78 | - /* If bkt->max is 0 we still want to allow short bursts of I/O | ||
79 | - * from the guest, otherwise every other request will be throttled | ||
80 | - * and performance will suffer considerably. */ | ||
81 | - min = bkt->avg / 10; | ||
82 | - if (bkt->avg && !bkt->max) { | ||
83 | - bkt->max = min; | ||
84 | - } | ||
85 | -} | ||
86 | - | ||
87 | -/* undo internal bucket parameter changes (see throttle_fix_bucket()) */ | ||
88 | -static void throttle_unfix_bucket(LeakyBucket *bkt) | ||
89 | -{ | ||
90 | - if (bkt->max < bkt->avg) { | ||
91 | - bkt->max = 0; | ||
92 | - } | ||
93 | -} | ||
94 | - | ||
95 | /* Used to configure the throttle | ||
96 | * | ||
97 | * @ts: the throttle state we are working on | ||
98 | @@ -XXX,XX +XXX,XX @@ void throttle_config(ThrottleState *ts, | ||
99 | |||
100 | ts->cfg = *cfg; | ||
101 | |||
102 | + /* Zero bucket level */ | ||
103 | for (i = 0; i < BUCKETS_COUNT; i++) { | ||
104 | - throttle_fix_bucket(&ts->cfg.buckets[i]); | ||
105 | + ts->cfg.buckets[i].level = 0; | ||
106 | + ts->cfg.buckets[i].burst_level = 0; | ||
107 | } | ||
108 | |||
109 | ts->previous_leak = qemu_clock_get_ns(clock_type); | ||
110 | @@ -XXX,XX +XXX,XX @@ void throttle_config(ThrottleState *ts, | ||
111 | */ | ||
112 | void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg) | ||
113 | { | ||
114 | - int i; | ||
115 | - | ||
116 | *cfg = ts->cfg; | ||
117 | - | ||
118 | - for (i = 0; i < BUCKETS_COUNT; i++) { | ||
119 | - throttle_unfix_bucket(&cfg->buckets[i]); | ||
120 | - } | ||
121 | } | ||
122 | |||
123 | |||
124 | -- | ||
125 | 2.13.5 | ||
126 | |||
127 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alberto Garcia <berto@igalia.com> | ||
1 | 2 | ||
3 | Both the throttling limits set with the throttling.iops-* and | ||
4 | throttling.bps-* options and their QMP equivalents defined in the | ||
5 | BlockIOThrottle struct are integer values. | ||
6 | |||
7 | Those limits are also reported in the BlockDeviceInfo struct and they | ||
8 | are integers there as well. | ||
9 | |||
10 | Therefore there's no reason to store them internally as double and do | ||
11 | the conversion everytime we're setting or querying them, so this patch | ||
12 | uses uint64_t for those types. Let's also use an unsigned type because | ||
13 | we don't allow negative values anyway. | ||
14 | |||
15 | LeakyBucket.level and LeakyBucket.burst_level do however remain double | ||
16 | because their value changes depending on the fraction of time elapsed | ||
17 | since the previous I/O operation. | ||
18 | |||
19 | Signed-off-by: Alberto Garcia <berto@igalia.com> | ||
20 | Message-id: f29b840422767b5be2c41c2dfdbbbf6c5f8fedf8.1503580370.git.berto@igalia.com | ||
21 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
22 | --- | ||
23 | include/qemu/throttle.h | 4 ++-- | ||
24 | tests/test-throttle.c | 3 ++- | ||
25 | util/throttle.c | 7 +++---- | ||
26 | 3 files changed, 7 insertions(+), 7 deletions(-) | ||
27 | |||
28 | diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h | ||
29 | index XXXXXXX..XXXXXXX 100644 | ||
30 | --- a/include/qemu/throttle.h | ||
31 | +++ b/include/qemu/throttle.h | ||
32 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
33 | */ | ||
34 | |||
35 | typedef struct LeakyBucket { | ||
36 | - double avg; /* average goal in units per second */ | ||
37 | - double max; /* leaky bucket max burst in units */ | ||
38 | + uint64_t avg; /* average goal in units per second */ | ||
39 | + uint64_t max; /* leaky bucket max burst in units */ | ||
40 | double level; /* bucket level in units */ | ||
41 | double burst_level; /* bucket level in units (for computing bursts) */ | ||
42 | unsigned burst_length; /* max length of the burst period, in seconds */ | ||
43 | diff --git a/tests/test-throttle.c b/tests/test-throttle.c | ||
44 | index XXXXXXX..XXXXXXX 100644 | ||
45 | --- a/tests/test-throttle.c | ||
46 | +++ b/tests/test-throttle.c | ||
47 | @@ -XXX,XX +XXX,XX @@ static void test_enabled(void) | ||
48 | for (i = 0; i < BUCKETS_COUNT; i++) { | ||
49 | throttle_config_init(&cfg); | ||
50 | set_cfg_value(false, i, 150); | ||
51 | + g_assert(throttle_is_valid(&cfg, NULL)); | ||
52 | g_assert(throttle_enabled(&cfg)); | ||
53 | } | ||
54 | |||
55 | for (i = 0; i < BUCKETS_COUNT; i++) { | ||
56 | throttle_config_init(&cfg); | ||
57 | set_cfg_value(false, i, -150); | ||
58 | - g_assert(!throttle_enabled(&cfg)); | ||
59 | + g_assert(!throttle_is_valid(&cfg, NULL)); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | diff --git a/util/throttle.c b/util/throttle.c | ||
64 | index XXXXXXX..XXXXXXX 100644 | ||
65 | --- a/util/throttle.c | ||
66 | +++ b/util/throttle.c | ||
67 | @@ -XXX,XX +XXX,XX @@ int64_t throttle_compute_wait(LeakyBucket *bkt) | ||
68 | /* If bkt->max is 0 we still want to allow short bursts of I/O | ||
69 | * from the guest, otherwise every other request will be throttled | ||
70 | * and performance will suffer considerably. */ | ||
71 | - bucket_size = bkt->avg / 10; | ||
72 | + bucket_size = (double) bkt->avg / 10; | ||
73 | burst_bucket_size = 0; | ||
74 | } else { | ||
75 | /* If we have a burst limit then we have to wait until all I/O | ||
76 | * at burst rate has finished before throttling to bkt->avg */ | ||
77 | bucket_size = bkt->max * bkt->burst_length; | ||
78 | - burst_bucket_size = bkt->max / 10; | ||
79 | + burst_bucket_size = (double) bkt->max / 10; | ||
80 | } | ||
81 | |||
82 | /* If the main bucket is full then we have to wait */ | ||
83 | @@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp) | ||
84 | |||
85 | for (i = 0; i < BUCKETS_COUNT; i++) { | ||
86 | LeakyBucket *bkt = &cfg->buckets[i]; | ||
87 | - if (bkt->avg < 0 || bkt->max < 0 || | ||
88 | - bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) { | ||
89 | + if (bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) { | ||
90 | error_setg(errp, "bps/iops/max values must be within [0, %lld]", | ||
91 | THROTTLE_VALUE_MAX); | ||
92 | return false; | ||
93 | -- | ||
94 | 2.13.5 | ||
95 | |||
96 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alberto Garcia <berto@igalia.com> | ||
1 | 2 | ||
3 | LeakyBucket.burst_length is defined as an unsigned integer but the | ||
4 | code never checks for overflows and it only makes sure that the value | ||
5 | is not 0. | ||
6 | |||
7 | In practice this means that the user can set something like | ||
8 | throttling.iops-total-max-length=4294967300 despite being larger than | ||
9 | UINT_MAX and the final value after casting to unsigned int will be 4. | ||
10 | |||
11 | This patch changes the data type to uint64_t. This does not increase | ||
12 | the storage size of LeakyBucket, and allows us to assign the value | ||
13 | directly from qemu_opt_get_number() or BlockIOThrottle and then do the | ||
14 | checks directly in throttle_is_valid(). | ||
15 | |||
16 | The value of burst_length does not have a specific upper limit, | ||
17 | but since the bucket size is defined by max * burst_length we have | ||
18 | to prevent overflows. Instead of going for UINT64_MAX or something | ||
19 | similar this patch reuses THROTTLE_VALUE_MAX, which allows I/O bursts | ||
20 | of 1 GiB/s for 10 days in a row. | ||
21 | |||
22 | Signed-off-by: Alberto Garcia <berto@igalia.com> | ||
23 | Message-id: 1b2e3049803f71cafb2e1fa1be4fb47147a0d398.1503580370.git.berto@igalia.com | ||
24 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
25 | --- | ||
26 | include/qemu/throttle.h | 2 +- | ||
27 | util/throttle.c | 5 +++++ | ||
28 | 2 files changed, 6 insertions(+), 1 deletion(-) | ||
29 | |||
30 | diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/include/qemu/throttle.h | ||
33 | +++ b/include/qemu/throttle.h | ||
34 | @@ -XXX,XX +XXX,XX @@ typedef struct LeakyBucket { | ||
35 | uint64_t max; /* leaky bucket max burst in units */ | ||
36 | double level; /* bucket level in units */ | ||
37 | double burst_level; /* bucket level in units (for computing bursts) */ | ||
38 | - unsigned burst_length; /* max length of the burst period, in seconds */ | ||
39 | + uint64_t burst_length; /* max length of the burst period, in seconds */ | ||
40 | } LeakyBucket; | ||
41 | |||
42 | /* The following structure is used to configure a ThrottleState | ||
43 | diff --git a/util/throttle.c b/util/throttle.c | ||
44 | index XXXXXXX..XXXXXXX 100644 | ||
45 | --- a/util/throttle.c | ||
46 | +++ b/util/throttle.c | ||
47 | @@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp) | ||
48 | return false; | ||
49 | } | ||
50 | |||
51 | + if (bkt->max && bkt->burst_length > THROTTLE_VALUE_MAX / bkt->max) { | ||
52 | + error_setg(errp, "burst length too high for this burst rate"); | ||
53 | + return false; | ||
54 | + } | ||
55 | + | ||
56 | if (bkt->max && !bkt->avg) { | ||
57 | error_setg(errp, "bps_max/iops_max require corresponding" | ||
58 | " bps/iops values"); | ||
59 | -- | ||
60 | 2.13.5 | ||
61 | |||
62 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alberto Garcia <berto@igalia.com> | ||
1 | 2 | ||
3 | Signed-off-by: Alberto Garcia <berto@igalia.com> | ||
4 | Message-id: a57dd6274e1b6dc9c28769fec4c7ea543be5c5e3.1503580370.git.berto@igalia.com | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | --- | ||
7 | tests/test-throttle.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
8 | 1 file changed, 77 insertions(+) | ||
9 | |||
10 | diff --git a/tests/test-throttle.c b/tests/test-throttle.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/tests/test-throttle.c | ||
13 | +++ b/tests/test-throttle.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void test_is_valid(void) | ||
15 | test_is_valid_for_value(1, true); | ||
16 | } | ||
17 | |||
18 | +static void test_ranges(void) | ||
19 | +{ | ||
20 | + int i; | ||
21 | + | ||
22 | + for (i = 0; i < BUCKETS_COUNT; i++) { | ||
23 | + LeakyBucket *b = &cfg.buckets[i]; | ||
24 | + throttle_config_init(&cfg); | ||
25 | + | ||
26 | + /* avg = 0 means throttling is disabled, but the config is valid */ | ||
27 | + b->avg = 0; | ||
28 | + g_assert(throttle_is_valid(&cfg, NULL)); | ||
29 | + g_assert(!throttle_enabled(&cfg)); | ||
30 | + | ||
31 | + /* These are valid configurations (values <= THROTTLE_VALUE_MAX) */ | ||
32 | + b->avg = 1; | ||
33 | + g_assert(throttle_is_valid(&cfg, NULL)); | ||
34 | + | ||
35 | + b->avg = THROTTLE_VALUE_MAX; | ||
36 | + g_assert(throttle_is_valid(&cfg, NULL)); | ||
37 | + | ||
38 | + b->avg = THROTTLE_VALUE_MAX; | ||
39 | + b->max = THROTTLE_VALUE_MAX; | ||
40 | + g_assert(throttle_is_valid(&cfg, NULL)); | ||
41 | + | ||
42 | + /* Values over THROTTLE_VALUE_MAX are not allowed */ | ||
43 | + b->avg = THROTTLE_VALUE_MAX + 1; | ||
44 | + g_assert(!throttle_is_valid(&cfg, NULL)); | ||
45 | + | ||
46 | + b->avg = THROTTLE_VALUE_MAX; | ||
47 | + b->max = THROTTLE_VALUE_MAX + 1; | ||
48 | + g_assert(!throttle_is_valid(&cfg, NULL)); | ||
49 | + | ||
50 | + /* burst_length must be between 1 and THROTTLE_VALUE_MAX */ | ||
51 | + b->avg = 1; | ||
52 | + b->max = 1; | ||
53 | + b->burst_length = 0; | ||
54 | + g_assert(!throttle_is_valid(&cfg, NULL)); | ||
55 | + | ||
56 | + b->avg = 1; | ||
57 | + b->max = 1; | ||
58 | + b->burst_length = 1; | ||
59 | + g_assert(throttle_is_valid(&cfg, NULL)); | ||
60 | + | ||
61 | + b->avg = 1; | ||
62 | + b->max = 1; | ||
63 | + b->burst_length = THROTTLE_VALUE_MAX; | ||
64 | + g_assert(throttle_is_valid(&cfg, NULL)); | ||
65 | + | ||
66 | + b->avg = 1; | ||
67 | + b->max = 1; | ||
68 | + b->burst_length = THROTTLE_VALUE_MAX + 1; | ||
69 | + g_assert(!throttle_is_valid(&cfg, NULL)); | ||
70 | + | ||
71 | + /* burst_length * max cannot exceed THROTTLE_VALUE_MAX */ | ||
72 | + b->avg = 1; | ||
73 | + b->max = 2; | ||
74 | + b->burst_length = THROTTLE_VALUE_MAX / 2; | ||
75 | + g_assert(throttle_is_valid(&cfg, NULL)); | ||
76 | + | ||
77 | + b->avg = 1; | ||
78 | + b->max = 3; | ||
79 | + b->burst_length = THROTTLE_VALUE_MAX / 2; | ||
80 | + g_assert(!throttle_is_valid(&cfg, NULL)); | ||
81 | + | ||
82 | + b->avg = 1; | ||
83 | + b->max = THROTTLE_VALUE_MAX; | ||
84 | + b->burst_length = 1; | ||
85 | + g_assert(throttle_is_valid(&cfg, NULL)); | ||
86 | + | ||
87 | + b->avg = 1; | ||
88 | + b->max = THROTTLE_VALUE_MAX; | ||
89 | + b->burst_length = 2; | ||
90 | + g_assert(!throttle_is_valid(&cfg, NULL)); | ||
91 | + } | ||
92 | +} | ||
93 | + | ||
94 | static void test_max_is_missing_limit(void) | ||
95 | { | ||
96 | int i; | ||
97 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
98 | g_test_add_func("/throttle/config/enabled", test_enabled); | ||
99 | g_test_add_func("/throttle/config/conflicting", test_conflicting_config); | ||
100 | g_test_add_func("/throttle/config/is_valid", test_is_valid); | ||
101 | + g_test_add_func("/throttle/config/ranges", test_ranges); | ||
102 | g_test_add_func("/throttle/config/max", test_max_is_missing_limit); | ||
103 | g_test_add_func("/throttle/config/iops_size", | ||
104 | test_iops_size_is_missing_limit); | ||
105 | -- | ||
106 | 2.13.5 | ||
107 | |||
108 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Eduardo Habkost <ehabkost@redhat.com> | ||
1 | 2 | ||
3 | If QEMU is running on a system that's out of memory and mmap() | ||
4 | fails, QEMU aborts with no error message at all, making it hard | ||
5 | to debug the reason for the failure. | ||
6 | |||
7 | Add perror() calls that will print error information before | ||
8 | aborting. | ||
9 | |||
10 | Signed-off-by: Eduardo Habkost <ehabkost@redhat.com> | ||
11 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
12 | Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
13 | Message-id: 20170829212053.6003-1-ehabkost@redhat.com | ||
14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
15 | --- | ||
16 | util/oslib-posix.c | 2 ++ | ||
17 | 1 file changed, 2 insertions(+) | ||
18 | |||
19 | diff --git a/util/oslib-posix.c b/util/oslib-posix.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/util/oslib-posix.c | ||
22 | +++ b/util/oslib-posix.c | ||
23 | @@ -XXX,XX +XXX,XX @@ void *qemu_alloc_stack(size_t *sz) | ||
24 | ptr = mmap(NULL, *sz, PROT_READ | PROT_WRITE, | ||
25 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | ||
26 | if (ptr == MAP_FAILED) { | ||
27 | + perror("failed to allocate memory for stack"); | ||
28 | abort(); | ||
29 | } | ||
30 | |||
31 | @@ -XXX,XX +XXX,XX @@ void *qemu_alloc_stack(size_t *sz) | ||
32 | guardpage = ptr; | ||
33 | #endif | ||
34 | if (mprotect(guardpage, pagesz, PROT_NONE) != 0) { | ||
35 | + perror("failed to set up stack guard page"); | ||
36 | abort(); | ||
37 | } | ||
38 | |||
39 | -- | ||
40 | 2.13.5 | ||
41 | |||
42 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alberto Garcia <berto@igalia.com> | ||
1 | 2 | ||
3 | There's a few cases which we're passing an Error pointer to a function | ||
4 | only to discard it immediately afterwards without checking it. In | ||
5 | these cases we can simply remove the variable and pass NULL instead. | ||
6 | |||
7 | Signed-off-by: Alberto Garcia <berto@igalia.com> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
10 | Message-id: 20170829120836.16091-1-berto@igalia.com | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/qcow.c | 12 +++--------- | ||
14 | block/qcow2.c | 8 ++------ | ||
15 | dump.c | 4 +--- | ||
16 | 3 files changed, 6 insertions(+), 18 deletions(-) | ||
17 | |||
18 | diff --git a/block/qcow.c b/block/qcow.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/block/qcow.c | ||
21 | +++ b/block/qcow.c | ||
22 | @@ -XXX,XX +XXX,XX @@ static uint64_t get_cluster_offset(BlockDriverState *bs, | ||
23 | start_sect = (offset & ~(s->cluster_size - 1)) >> 9; | ||
24 | for(i = 0; i < s->cluster_sectors; i++) { | ||
25 | if (i < n_start || i >= n_end) { | ||
26 | - Error *err = NULL; | ||
27 | memset(s->cluster_data, 0x00, 512); | ||
28 | if (qcrypto_block_encrypt(s->crypto, start_sect + i, | ||
29 | s->cluster_data, | ||
30 | BDRV_SECTOR_SIZE, | ||
31 | - &err) < 0) { | ||
32 | - error_free(err); | ||
33 | + NULL) < 0) { | ||
34 | errno = EIO; | ||
35 | return -1; | ||
36 | } | ||
37 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num, | ||
38 | QEMUIOVector hd_qiov; | ||
39 | uint8_t *buf; | ||
40 | void *orig_buf; | ||
41 | - Error *err = NULL; | ||
42 | |||
43 | if (qiov->niov > 1) { | ||
44 | buf = orig_buf = qemu_try_blockalign(bs, qiov->size); | ||
45 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num, | ||
46 | if (bs->encrypted) { | ||
47 | assert(s->crypto); | ||
48 | if (qcrypto_block_decrypt(s->crypto, sector_num, buf, | ||
49 | - n * BDRV_SECTOR_SIZE, &err) < 0) { | ||
50 | + n * BDRV_SECTOR_SIZE, NULL) < 0) { | ||
51 | goto fail; | ||
52 | } | ||
53 | } | ||
54 | @@ -XXX,XX +XXX,XX @@ done: | ||
55 | return ret; | ||
56 | |||
57 | fail: | ||
58 | - error_free(err); | ||
59 | ret = -EIO; | ||
60 | goto done; | ||
61 | } | ||
62 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_writev(BlockDriverState *bs, int64_t sector_num, | ||
63 | break; | ||
64 | } | ||
65 | if (bs->encrypted) { | ||
66 | - Error *err = NULL; | ||
67 | assert(s->crypto); | ||
68 | if (qcrypto_block_encrypt(s->crypto, sector_num, buf, | ||
69 | - n * BDRV_SECTOR_SIZE, &err) < 0) { | ||
70 | - error_free(err); | ||
71 | + n * BDRV_SECTOR_SIZE, NULL) < 0) { | ||
72 | ret = -EIO; | ||
73 | break; | ||
74 | } | ||
75 | diff --git a/block/qcow2.c b/block/qcow2.c | ||
76 | index XXXXXXX..XXXXXXX 100644 | ||
77 | --- a/block/qcow2.c | ||
78 | +++ b/block/qcow2.c | ||
79 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset, | ||
80 | assert(s->crypto); | ||
81 | assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); | ||
82 | assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0); | ||
83 | - Error *err = NULL; | ||
84 | if (qcrypto_block_decrypt(s->crypto, | ||
85 | (s->crypt_physical_offset ? | ||
86 | cluster_offset + offset_in_cluster : | ||
87 | offset) >> BDRV_SECTOR_BITS, | ||
88 | cluster_data, | ||
89 | cur_bytes, | ||
90 | - &err) < 0) { | ||
91 | - error_free(err); | ||
92 | + NULL) < 0) { | ||
93 | ret = -EIO; | ||
94 | goto fail; | ||
95 | } | ||
96 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset, | ||
97 | qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); | ||
98 | |||
99 | if (bs->encrypted) { | ||
100 | - Error *err = NULL; | ||
101 | assert(s->crypto); | ||
102 | if (!cluster_data) { | ||
103 | cluster_data = qemu_try_blockalign(bs->file->bs, | ||
104 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset, | ||
105 | cluster_offset + offset_in_cluster : | ||
106 | offset) >> BDRV_SECTOR_BITS, | ||
107 | cluster_data, | ||
108 | - cur_bytes, &err) < 0) { | ||
109 | - error_free(err); | ||
110 | + cur_bytes, NULL) < 0) { | ||
111 | ret = -EIO; | ||
112 | goto fail; | ||
113 | } | ||
114 | diff --git a/dump.c b/dump.c | ||
115 | index XXXXXXX..XXXXXXX 100644 | ||
116 | --- a/dump.c | ||
117 | +++ b/dump.c | ||
118 | @@ -XXX,XX +XXX,XX @@ static void dump_process(DumpState *s, Error **errp) | ||
119 | |||
120 | static void *dump_thread(void *data) | ||
121 | { | ||
122 | - Error *err = NULL; | ||
123 | DumpState *s = (DumpState *)data; | ||
124 | - dump_process(s, &err); | ||
125 | - error_free(err); | ||
126 | + dump_process(s, NULL); | ||
127 | return NULL; | ||
128 | } | ||
129 | |||
130 | -- | ||
131 | 2.13.5 | ||
132 | |||
133 | diff view generated by jsdifflib |
1 | From: Kashyap Chamarthy <kchamart@redhat.com> | 1 | The minimum Python version supported by QEMU is 2.6. The argparse |
---|---|---|---|
2 | standard library module was only added in Python 2.7. Many scripts | ||
3 | would like to use argparse because it supports command-line | ||
4 | sub-commands. | ||
2 | 5 | ||
3 | This is part of the on-going effort to convert QEMU upstream | 6 | This patch adds argparse. See the top of argparse.py for details. |
4 | documentation syntax to reStructuredText (rST). | ||
5 | 7 | ||
6 | The conversion to rST was done using: | 8 | Suggested-by: Daniel P. Berrange <berrange@redhat.com> |
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Acked-by: John Snow <jsnow@redhat.com> | ||
11 | Message-id: 20170825155732.15665-2-stefanha@redhat.com | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
13 | --- | ||
14 | COPYING.PYTHON | 270 ++++++ | ||
15 | scripts/argparse.py | 2406 +++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
16 | 2 files changed, 2676 insertions(+) | ||
17 | create mode 100644 COPYING.PYTHON | ||
18 | create mode 100644 scripts/argparse.py | ||
7 | 19 | ||
8 | $ pandoc -f markdown -t rst bitmaps.md -o bitmaps.rst | 20 | diff --git a/COPYING.PYTHON b/COPYING.PYTHON |
9 | |||
10 | Then, make a couple of small syntactical adjustments. While at it, | ||
11 | reword a statement to avoid ambiguity. Addressing the feedback from | ||
12 | this thread: | ||
13 | |||
14 | https://lists.nongnu.org/archive/html/qemu-devel/2017-06/msg05428.html | ||
15 | |||
16 | Signed-off-by: Kashyap Chamarthy <kchamart@redhat.com> | ||
17 | Reviewed-by: John Snow <jsnow@redhat.com> | ||
18 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
19 | Message-id: 20170717105205.32639-2-kchamart@redhat.com | ||
20 | Signed-off-by: Jeff Cody <jcody@redhat.com> | ||
21 | --- | ||
22 | docs/devel/bitmaps.md | 505 ------------------------------------------ | ||
23 | docs/interop/bitmaps.rst | 555 +++++++++++++++++++++++++++++++++++++++++++++++ | ||
24 | 2 files changed, 555 insertions(+), 505 deletions(-) | ||
25 | delete mode 100644 docs/devel/bitmaps.md | ||
26 | create mode 100644 docs/interop/bitmaps.rst | ||
27 | |||
28 | diff --git a/docs/devel/bitmaps.md b/docs/devel/bitmaps.md | ||
29 | deleted file mode 100644 | ||
30 | index XXXXXXX..XXXXXXX | ||
31 | --- a/docs/devel/bitmaps.md | ||
32 | +++ /dev/null | ||
33 | @@ -XXX,XX +XXX,XX @@ | ||
34 | -<!-- | ||
35 | -Copyright 2015 John Snow <jsnow@redhat.com> and Red Hat, Inc. | ||
36 | -All rights reserved. | ||
37 | - | ||
38 | -This file is licensed via The FreeBSD Documentation License, the full text of | ||
39 | -which is included at the end of this document. | ||
40 | ---> | ||
41 | - | ||
42 | -# Dirty Bitmaps and Incremental Backup | ||
43 | - | ||
44 | -* Dirty Bitmaps are objects that track which data needs to be backed up for the | ||
45 | - next incremental backup. | ||
46 | - | ||
47 | -* Dirty bitmaps can be created at any time and attached to any node | ||
48 | - (not just complete drives.) | ||
49 | - | ||
50 | -## Dirty Bitmap Names | ||
51 | - | ||
52 | -* A dirty bitmap's name is unique to the node, but bitmaps attached to different | ||
53 | - nodes can share the same name. | ||
54 | - | ||
55 | -* Dirty bitmaps created for internal use by QEMU may be anonymous and have no | ||
56 | - name, but any user-created bitmaps may not be. There can be any number of | ||
57 | - anonymous bitmaps per node. | ||
58 | - | ||
59 | -* The name of a user-created bitmap must not be empty (""). | ||
60 | - | ||
61 | -## Bitmap Modes | ||
62 | - | ||
63 | -* A Bitmap can be "frozen," which means that it is currently in-use by a backup | ||
64 | - operation and cannot be deleted, renamed, written to, reset, | ||
65 | - etc. | ||
66 | - | ||
67 | -* The normal operating mode for a bitmap is "active." | ||
68 | - | ||
69 | -## Basic QMP Usage | ||
70 | - | ||
71 | -### Supported Commands ### | ||
72 | - | ||
73 | -* block-dirty-bitmap-add | ||
74 | -* block-dirty-bitmap-remove | ||
75 | -* block-dirty-bitmap-clear | ||
76 | - | ||
77 | -### Creation | ||
78 | - | ||
79 | -* To create a new bitmap, enabled, on the drive with id=drive0: | ||
80 | - | ||
81 | -```json | ||
82 | -{ "execute": "block-dirty-bitmap-add", | ||
83 | - "arguments": { | ||
84 | - "node": "drive0", | ||
85 | - "name": "bitmap0" | ||
86 | - } | ||
87 | -} | ||
88 | -``` | ||
89 | - | ||
90 | -* This bitmap will have a default granularity that matches the cluster size of | ||
91 | - its associated drive, if available, clamped to between [4KiB, 64KiB]. | ||
92 | - The current default for qcow2 is 64KiB. | ||
93 | - | ||
94 | -* To create a new bitmap that tracks changes in 32KiB segments: | ||
95 | - | ||
96 | -```json | ||
97 | -{ "execute": "block-dirty-bitmap-add", | ||
98 | - "arguments": { | ||
99 | - "node": "drive0", | ||
100 | - "name": "bitmap0", | ||
101 | - "granularity": 32768 | ||
102 | - } | ||
103 | -} | ||
104 | -``` | ||
105 | - | ||
106 | -### Deletion | ||
107 | - | ||
108 | -* Bitmaps that are frozen cannot be deleted. | ||
109 | - | ||
110 | -* Deleting the bitmap does not impact any other bitmaps attached to the same | ||
111 | - node, nor does it affect any backups already created from this node. | ||
112 | - | ||
113 | -* Because bitmaps are only unique to the node to which they are attached, | ||
114 | - you must specify the node/drive name here, too. | ||
115 | - | ||
116 | -```json | ||
117 | -{ "execute": "block-dirty-bitmap-remove", | ||
118 | - "arguments": { | ||
119 | - "node": "drive0", | ||
120 | - "name": "bitmap0" | ||
121 | - } | ||
122 | -} | ||
123 | -``` | ||
124 | - | ||
125 | -### Resetting | ||
126 | - | ||
127 | -* Resetting a bitmap will clear all information it holds. | ||
128 | - | ||
129 | -* An incremental backup created from an empty bitmap will copy no data, | ||
130 | - as if nothing has changed. | ||
131 | - | ||
132 | -```json | ||
133 | -{ "execute": "block-dirty-bitmap-clear", | ||
134 | - "arguments": { | ||
135 | - "node": "drive0", | ||
136 | - "name": "bitmap0" | ||
137 | - } | ||
138 | -} | ||
139 | -``` | ||
140 | - | ||
141 | -## Transactions | ||
142 | - | ||
143 | -### Justification | ||
144 | - | ||
145 | -Bitmaps can be safely modified when the VM is paused or halted by using | ||
146 | -the basic QMP commands. For instance, you might perform the following actions: | ||
147 | - | ||
148 | -1. Boot the VM in a paused state. | ||
149 | -2. Create a full drive backup of drive0. | ||
150 | -3. Create a new bitmap attached to drive0. | ||
151 | -4. Resume execution of the VM. | ||
152 | -5. Incremental backups are ready to be created. | ||
153 | - | ||
154 | -At this point, the bitmap and drive backup would be correctly in sync, | ||
155 | -and incremental backups made from this point forward would be correctly aligned | ||
156 | -to the full drive backup. | ||
157 | - | ||
158 | -This is not particularly useful if we decide we want to start incremental | ||
159 | -backups after the VM has been running for a while, for which we will need to | ||
160 | -perform actions such as the following: | ||
161 | - | ||
162 | -1. Boot the VM and begin execution. | ||
163 | -2. Using a single transaction, perform the following operations: | ||
164 | - * Create bitmap0. | ||
165 | - * Create a full drive backup of drive0. | ||
166 | -3. Incremental backups are now ready to be created. | ||
167 | - | ||
168 | -### Supported Bitmap Transactions | ||
169 | - | ||
170 | -* block-dirty-bitmap-add | ||
171 | -* block-dirty-bitmap-clear | ||
172 | - | ||
173 | -The usages are identical to their respective QMP commands, but see below | ||
174 | -for examples. | ||
175 | - | ||
176 | -### Example: New Incremental Backup | ||
177 | - | ||
178 | -As outlined in the justification, perhaps we want to create a new incremental | ||
179 | -backup chain attached to a drive. | ||
180 | - | ||
181 | -```json | ||
182 | -{ "execute": "transaction", | ||
183 | - "arguments": { | ||
184 | - "actions": [ | ||
185 | - {"type": "block-dirty-bitmap-add", | ||
186 | - "data": {"node": "drive0", "name": "bitmap0"} }, | ||
187 | - {"type": "drive-backup", | ||
188 | - "data": {"device": "drive0", "target": "/path/to/full_backup.img", | ||
189 | - "sync": "full", "format": "qcow2"} } | ||
190 | - ] | ||
191 | - } | ||
192 | -} | ||
193 | -``` | ||
194 | - | ||
195 | -### Example: New Incremental Backup Anchor Point | ||
196 | - | ||
197 | -Maybe we just want to create a new full backup with an existing bitmap and | ||
198 | -want to reset the bitmap to track the new chain. | ||
199 | - | ||
200 | -```json | ||
201 | -{ "execute": "transaction", | ||
202 | - "arguments": { | ||
203 | - "actions": [ | ||
204 | - {"type": "block-dirty-bitmap-clear", | ||
205 | - "data": {"node": "drive0", "name": "bitmap0"} }, | ||
206 | - {"type": "drive-backup", | ||
207 | - "data": {"device": "drive0", "target": "/path/to/new_full_backup.img", | ||
208 | - "sync": "full", "format": "qcow2"} } | ||
209 | - ] | ||
210 | - } | ||
211 | -} | ||
212 | -``` | ||
213 | - | ||
214 | -## Incremental Backups | ||
215 | - | ||
216 | -The star of the show. | ||
217 | - | ||
218 | -**Nota Bene!** Only incremental backups of entire drives are supported for now. | ||
219 | -So despite the fact that you can attach a bitmap to any arbitrary node, they are | ||
220 | -only currently useful when attached to the root node. This is because | ||
221 | -drive-backup only supports drives/devices instead of arbitrary nodes. | ||
222 | - | ||
223 | -### Example: First Incremental Backup | ||
224 | - | ||
225 | -1. Create a full backup and sync it to the dirty bitmap, as in the transactional | ||
226 | -examples above; or with the VM offline, manually create a full copy and then | ||
227 | -create a new bitmap before the VM begins execution. | ||
228 | - | ||
229 | - * Let's assume the full backup is named 'full_backup.img'. | ||
230 | - * Let's assume the bitmap you created is 'bitmap0' attached to 'drive0'. | ||
231 | - | ||
232 | -2. Create a destination image for the incremental backup that utilizes the | ||
233 | -full backup as a backing image. | ||
234 | - | ||
235 | - * Let's assume it is named 'incremental.0.img'. | ||
236 | - | ||
237 | - ```sh | ||
238 | - # qemu-img create -f qcow2 incremental.0.img -b full_backup.img -F qcow2 | ||
239 | - ``` | ||
240 | - | ||
241 | -3. Issue the incremental backup command: | ||
242 | - | ||
243 | - ```json | ||
244 | - { "execute": "drive-backup", | ||
245 | - "arguments": { | ||
246 | - "device": "drive0", | ||
247 | - "bitmap": "bitmap0", | ||
248 | - "target": "incremental.0.img", | ||
249 | - "format": "qcow2", | ||
250 | - "sync": "incremental", | ||
251 | - "mode": "existing" | ||
252 | - } | ||
253 | - } | ||
254 | - ``` | ||
255 | - | ||
256 | -### Example: Second Incremental Backup | ||
257 | - | ||
258 | -1. Create a new destination image for the incremental backup that points to the | ||
259 | - previous one, e.g.: 'incremental.1.img' | ||
260 | - | ||
261 | - ```sh | ||
262 | - # qemu-img create -f qcow2 incremental.1.img -b incremental.0.img -F qcow2 | ||
263 | - ``` | ||
264 | - | ||
265 | -2. Issue a new incremental backup command. The only difference here is that we | ||
266 | - have changed the target image below. | ||
267 | - | ||
268 | - ```json | ||
269 | - { "execute": "drive-backup", | ||
270 | - "arguments": { | ||
271 | - "device": "drive0", | ||
272 | - "bitmap": "bitmap0", | ||
273 | - "target": "incremental.1.img", | ||
274 | - "format": "qcow2", | ||
275 | - "sync": "incremental", | ||
276 | - "mode": "existing" | ||
277 | - } | ||
278 | - } | ||
279 | - ``` | ||
280 | - | ||
281 | -## Errors | ||
282 | - | ||
283 | -* In the event of an error that occurs after a backup job is successfully | ||
284 | - launched, either by a direct QMP command or a QMP transaction, the user | ||
285 | - will receive a BLOCK_JOB_COMPLETE event with a failure message, accompanied | ||
286 | - by a BLOCK_JOB_ERROR event. | ||
287 | - | ||
288 | -* In the case of an event being cancelled, the user will receive a | ||
289 | - BLOCK_JOB_CANCELLED event instead of a pair of COMPLETE and ERROR events. | ||
290 | - | ||
291 | -* In either case, the incremental backup data contained within the bitmap is | ||
292 | - safely rolled back, and the data within the bitmap is not lost. The image | ||
293 | - file created for the failed attempt can be safely deleted. | ||
294 | - | ||
295 | -* Once the underlying problem is fixed (e.g. more storage space is freed up), | ||
296 | - you can simply retry the incremental backup command with the same bitmap. | ||
297 | - | ||
298 | -### Example | ||
299 | - | ||
300 | -1. Create a target image: | ||
301 | - | ||
302 | - ```sh | ||
303 | - # qemu-img create -f qcow2 incremental.0.img -b full_backup.img -F qcow2 | ||
304 | - ``` | ||
305 | - | ||
306 | -2. Attempt to create an incremental backup via QMP: | ||
307 | - | ||
308 | - ```json | ||
309 | - { "execute": "drive-backup", | ||
310 | - "arguments": { | ||
311 | - "device": "drive0", | ||
312 | - "bitmap": "bitmap0", | ||
313 | - "target": "incremental.0.img", | ||
314 | - "format": "qcow2", | ||
315 | - "sync": "incremental", | ||
316 | - "mode": "existing" | ||
317 | - } | ||
318 | - } | ||
319 | - ``` | ||
320 | - | ||
321 | -3. Receive an event notifying us of failure: | ||
322 | - | ||
323 | - ```json | ||
324 | - { "timestamp": { "seconds": 1424709442, "microseconds": 844524 }, | ||
325 | - "data": { "speed": 0, "offset": 0, "len": 67108864, | ||
326 | - "error": "No space left on device", | ||
327 | - "device": "drive1", "type": "backup" }, | ||
328 | - "event": "BLOCK_JOB_COMPLETED" } | ||
329 | - ``` | ||
330 | - | ||
331 | -4. Delete the failed incremental, and re-create the image. | ||
332 | - | ||
333 | - ```sh | ||
334 | - # rm incremental.0.img | ||
335 | - # qemu-img create -f qcow2 incremental.0.img -b full_backup.img -F qcow2 | ||
336 | - ``` | ||
337 | - | ||
338 | -5. Retry the command after fixing the underlying problem, | ||
339 | - such as freeing up space on the backup volume: | ||
340 | - | ||
341 | - ```json | ||
342 | - { "execute": "drive-backup", | ||
343 | - "arguments": { | ||
344 | - "device": "drive0", | ||
345 | - "bitmap": "bitmap0", | ||
346 | - "target": "incremental.0.img", | ||
347 | - "format": "qcow2", | ||
348 | - "sync": "incremental", | ||
349 | - "mode": "existing" | ||
350 | - } | ||
351 | - } | ||
352 | - ``` | ||
353 | - | ||
354 | -6. Receive confirmation that the job completed successfully: | ||
355 | - | ||
356 | - ```json | ||
357 | - { "timestamp": { "seconds": 1424709668, "microseconds": 526525 }, | ||
358 | - "data": { "device": "drive1", "type": "backup", | ||
359 | - "speed": 0, "len": 67108864, "offset": 67108864}, | ||
360 | - "event": "BLOCK_JOB_COMPLETED" } | ||
361 | - ``` | ||
362 | - | ||
363 | -### Partial Transactional Failures | ||
364 | - | ||
365 | -* Sometimes, a transaction will succeed in launching and return success, | ||
366 | - but then later the backup jobs themselves may fail. It is possible that | ||
367 | - a management application may have to deal with a partial backup failure | ||
368 | - after a successful transaction. | ||
369 | - | ||
370 | -* If multiple backup jobs are specified in a single transaction, when one of | ||
371 | - them fails, it will not interact with the other backup jobs in any way. | ||
372 | - | ||
373 | -* The job(s) that succeeded will clear the dirty bitmap associated with the | ||
374 | - operation, but the job(s) that failed will not. It is not "safe" to delete | ||
375 | - any incremental backups that were created successfully in this scenario, | ||
376 | - even though others failed. | ||
377 | - | ||
378 | -#### Example | ||
379 | - | ||
380 | -* QMP example highlighting two backup jobs: | ||
381 | - | ||
382 | - ```json | ||
383 | - { "execute": "transaction", | ||
384 | - "arguments": { | ||
385 | - "actions": [ | ||
386 | - { "type": "drive-backup", | ||
387 | - "data": { "device": "drive0", "bitmap": "bitmap0", | ||
388 | - "format": "qcow2", "mode": "existing", | ||
389 | - "sync": "incremental", "target": "d0-incr-1.qcow2" } }, | ||
390 | - { "type": "drive-backup", | ||
391 | - "data": { "device": "drive1", "bitmap": "bitmap1", | ||
392 | - "format": "qcow2", "mode": "existing", | ||
393 | - "sync": "incremental", "target": "d1-incr-1.qcow2" } }, | ||
394 | - ] | ||
395 | - } | ||
396 | - } | ||
397 | - ``` | ||
398 | - | ||
399 | -* QMP example response, highlighting one success and one failure: | ||
400 | - * Acknowledgement that the Transaction was accepted and jobs were launched: | ||
401 | - ```json | ||
402 | - { "return": {} } | ||
403 | - ``` | ||
404 | - | ||
405 | - * Later, QEMU sends notice that the first job was completed: | ||
406 | - ```json | ||
407 | - { "timestamp": { "seconds": 1447192343, "microseconds": 615698 }, | ||
408 | - "data": { "device": "drive0", "type": "backup", | ||
409 | - "speed": 0, "len": 67108864, "offset": 67108864 }, | ||
410 | - "event": "BLOCK_JOB_COMPLETED" | ||
411 | - } | ||
412 | - ``` | ||
413 | - | ||
414 | - * Later yet, QEMU sends notice that the second job has failed: | ||
415 | - ```json | ||
416 | - { "timestamp": { "seconds": 1447192399, "microseconds": 683015 }, | ||
417 | - "data": { "device": "drive1", "action": "report", | ||
418 | - "operation": "read" }, | ||
419 | - "event": "BLOCK_JOB_ERROR" } | ||
420 | - ``` | ||
421 | - | ||
422 | - ```json | ||
423 | - { "timestamp": { "seconds": 1447192399, "microseconds": 685853 }, | ||
424 | - "data": { "speed": 0, "offset": 0, "len": 67108864, | ||
425 | - "error": "Input/output error", | ||
426 | - "device": "drive1", "type": "backup" }, | ||
427 | - "event": "BLOCK_JOB_COMPLETED" } | ||
428 | - | ||
429 | -* In the above example, "d0-incr-1.qcow2" is valid and must be kept, | ||
430 | - but "d1-incr-1.qcow2" is invalid and should be deleted. If a VM-wide | ||
431 | - incremental backup of all drives at a point-in-time is to be made, | ||
432 | - new backups for both drives will need to be made, taking into account | ||
433 | - that a new incremental backup for drive0 needs to be based on top of | ||
434 | - "d0-incr-1.qcow2." | ||
435 | - | ||
436 | -### Grouped Completion Mode | ||
437 | - | ||
438 | -* While jobs launched by transactions normally complete or fail on their own, | ||
439 | - it is possible to instruct them to complete or fail together as a group. | ||
440 | - | ||
441 | -* QMP transactions take an optional properties structure that can affect | ||
442 | - the semantics of the transaction. | ||
443 | - | ||
444 | -* The "completion-mode" transaction property can be either "individual" | ||
445 | - which is the default, legacy behavior described above, or "grouped," | ||
446 | - a new behavior detailed below. | ||
447 | - | ||
448 | -* Delayed Completion: In grouped completion mode, no jobs will report | ||
449 | - success until all jobs are ready to report success. | ||
450 | - | ||
451 | -* Grouped failure: If any job fails in grouped completion mode, all remaining | ||
452 | - jobs will be cancelled. Any incremental backups will restore their dirty | ||
453 | - bitmap objects as if no backup command was ever issued. | ||
454 | - | ||
455 | - * Regardless of if QEMU reports a particular incremental backup job as | ||
456 | - CANCELLED or as an ERROR, the in-memory bitmap will be restored. | ||
457 | - | ||
458 | -#### Example | ||
459 | - | ||
460 | -* Here's the same example scenario from above with the new property: | ||
461 | - | ||
462 | - ```json | ||
463 | - { "execute": "transaction", | ||
464 | - "arguments": { | ||
465 | - "actions": [ | ||
466 | - { "type": "drive-backup", | ||
467 | - "data": { "device": "drive0", "bitmap": "bitmap0", | ||
468 | - "format": "qcow2", "mode": "existing", | ||
469 | - "sync": "incremental", "target": "d0-incr-1.qcow2" } }, | ||
470 | - { "type": "drive-backup", | ||
471 | - "data": { "device": "drive1", "bitmap": "bitmap1", | ||
472 | - "format": "qcow2", "mode": "existing", | ||
473 | - "sync": "incremental", "target": "d1-incr-1.qcow2" } }, | ||
474 | - ], | ||
475 | - "properties": { | ||
476 | - "completion-mode": "grouped" | ||
477 | - } | ||
478 | - } | ||
479 | - } | ||
480 | - ``` | ||
481 | - | ||
482 | -* QMP example response, highlighting a failure for drive2: | ||
483 | - * Acknowledgement that the Transaction was accepted and jobs were launched: | ||
484 | - ```json | ||
485 | - { "return": {} } | ||
486 | - ``` | ||
487 | - | ||
488 | - * Later, QEMU sends notice that the second job has errored out, | ||
489 | - but that the first job was also cancelled: | ||
490 | - ```json | ||
491 | - { "timestamp": { "seconds": 1447193702, "microseconds": 632377 }, | ||
492 | - "data": { "device": "drive1", "action": "report", | ||
493 | - "operation": "read" }, | ||
494 | - "event": "BLOCK_JOB_ERROR" } | ||
495 | - ``` | ||
496 | - | ||
497 | - ```json | ||
498 | - { "timestamp": { "seconds": 1447193702, "microseconds": 640074 }, | ||
499 | - "data": { "speed": 0, "offset": 0, "len": 67108864, | ||
500 | - "error": "Input/output error", | ||
501 | - "device": "drive1", "type": "backup" }, | ||
502 | - "event": "BLOCK_JOB_COMPLETED" } | ||
503 | - ``` | ||
504 | - | ||
505 | - ```json | ||
506 | - { "timestamp": { "seconds": 1447193702, "microseconds": 640163 }, | ||
507 | - "data": { "device": "drive0", "type": "backup", "speed": 0, | ||
508 | - "len": 67108864, "offset": 16777216 }, | ||
509 | - "event": "BLOCK_JOB_CANCELLED" } | ||
510 | - ``` | ||
511 | - | ||
512 | -<!-- | ||
513 | -The FreeBSD Documentation License | ||
514 | - | ||
515 | -Redistribution and use in source (Markdown) and 'compiled' forms (SGML, HTML, | ||
516 | -PDF, PostScript, RTF and so forth) with or without modification, are permitted | ||
517 | -provided that the following conditions are met: | ||
518 | - | ||
519 | -Redistributions of source code (Markdown) must retain the above copyright | ||
520 | -notice, this list of conditions and the following disclaimer of this file | ||
521 | -unmodified. | ||
522 | - | ||
523 | -Redistributions in compiled form (transformed to other DTDs, converted to PDF, | ||
524 | -PostScript, RTF and other formats) must reproduce the above copyright notice, | ||
525 | -this list of conditions and the following disclaimer in the documentation and/or | ||
526 | -other materials provided with the distribution. | ||
527 | - | ||
528 | -THIS DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
529 | -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
530 | -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
531 | -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | ||
532 | -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
533 | -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | ||
534 | -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
535 | -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
536 | -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
537 | -THIS DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
538 | ---> | ||
539 | diff --git a/docs/interop/bitmaps.rst b/docs/interop/bitmaps.rst | ||
540 | new file mode 100644 | 21 | new file mode 100644 |
541 | index XXXXXXX..XXXXXXX | 22 | index XXXXXXX..XXXXXXX |
542 | --- /dev/null | 23 | --- /dev/null |
543 | +++ b/docs/interop/bitmaps.rst | 24 | +++ b/COPYING.PYTHON |
544 | @@ -XXX,XX +XXX,XX @@ | 25 | @@ -XXX,XX +XXX,XX @@ |
545 | +.. | 26 | +A. HISTORY OF THE SOFTWARE |
546 | + Copyright 2015 John Snow <jsnow@redhat.com> and Red Hat, Inc. | 27 | +========================== |
547 | + All rights reserved. | 28 | + |
548 | + | 29 | +Python was created in the early 1990s by Guido van Rossum at Stichting |
549 | + This file is licensed via The FreeBSD Documentation License, the full | 30 | +Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands |
550 | + text of which is included at the end of this document. | 31 | +as a successor of a language called ABC. Guido remains Python's |
551 | + | 32 | +principal author, although it includes many contributions from others. |
552 | +==================================== | 33 | + |
553 | +Dirty Bitmaps and Incremental Backup | 34 | +In 1995, Guido continued his work on Python at the Corporation for |
554 | +==================================== | 35 | +National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) |
555 | + | 36 | +in Reston, Virginia where he released several versions of the |
556 | +- Dirty Bitmaps are objects that track which data needs to be backed up | 37 | +software. |
557 | + for the next incremental backup. | 38 | + |
558 | + | 39 | +In May 2000, Guido and the Python core development team moved to |
559 | +- Dirty bitmaps can be created at any time and attached to any node | 40 | +BeOpen.com to form the BeOpen PythonLabs team. In October of the same |
560 | + (not just complete drives). | 41 | +year, the PythonLabs team moved to Digital Creations (now Zope |
561 | + | 42 | +Corporation, see http://www.zope.com). In 2001, the Python Software |
562 | +.. contents:: | 43 | +Foundation (PSF, see http://www.python.org/psf/) was formed, a |
563 | + | 44 | +non-profit organization created specifically to own Python-related |
564 | +Dirty Bitmap Names | 45 | +Intellectual Property. Zope Corporation is a sponsoring member of |
565 | +------------------ | 46 | +the PSF. |
566 | + | 47 | + |
567 | +- A dirty bitmap's name is unique to the node, but bitmaps attached to | 48 | +All Python releases are Open Source (see http://www.opensource.org for |
568 | + different nodes can share the same name. | 49 | +the Open Source Definition). Historically, most, but not all, Python |
569 | + | 50 | +releases have also been GPL-compatible; the table below summarizes |
570 | +- Dirty bitmaps created for internal use by QEMU may be anonymous and | 51 | +the various releases. |
571 | + have no name, but any user-created bitmaps must have a name. There | 52 | + |
572 | + can be any number of anonymous bitmaps per node. | 53 | + Release Derived Year Owner GPL- |
573 | + | 54 | + from compatible? (1) |
574 | +- The name of a user-created bitmap must not be empty (""). | 55 | + |
575 | + | 56 | + 0.9.0 thru 1.2 1991-1995 CWI yes |
576 | +Bitmap Modes | 57 | + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes |
577 | +------------ | 58 | + 1.6 1.5.2 2000 CNRI no |
578 | + | 59 | + 2.0 1.6 2000 BeOpen.com no |
579 | +- A bitmap can be "frozen," which means that it is currently in-use by | 60 | + 1.6.1 1.6 2001 CNRI yes (2) |
580 | + a backup operation and cannot be deleted, renamed, written to, reset, | 61 | + 2.1 2.0+1.6.1 2001 PSF no |
581 | + etc. | 62 | + 2.0.1 2.0+1.6.1 2001 PSF yes |
582 | + | 63 | + 2.1.1 2.1+2.0.1 2001 PSF yes |
583 | +- The normal operating mode for a bitmap is "active." | 64 | + 2.2 2.1.1 2001 PSF yes |
584 | + | 65 | + 2.1.2 2.1.1 2002 PSF yes |
585 | +Basic QMP Usage | 66 | + 2.1.3 2.1.2 2002 PSF yes |
586 | +--------------- | 67 | + 2.2.1 2.2 2002 PSF yes |
587 | + | 68 | + 2.2.2 2.2.1 2002 PSF yes |
588 | +Supported Commands | 69 | + 2.2.3 2.2.2 2003 PSF yes |
589 | +~~~~~~~~~~~~~~~~~~ | 70 | + 2.3 2.2.2 2002-2003 PSF yes |
590 | + | 71 | + 2.3.1 2.3 2002-2003 PSF yes |
591 | +- ``block-dirty-bitmap-add`` | 72 | + 2.3.2 2.3.1 2002-2003 PSF yes |
592 | +- ``block-dirty-bitmap-remove`` | 73 | + 2.3.3 2.3.2 2002-2003 PSF yes |
593 | +- ``block-dirty-bitmap-clear`` | 74 | + 2.3.4 2.3.3 2004 PSF yes |
594 | + | 75 | + 2.3.5 2.3.4 2005 PSF yes |
595 | +Creation | 76 | + 2.4 2.3 2004 PSF yes |
596 | +~~~~~~~~ | 77 | + 2.4.1 2.4 2005 PSF yes |
597 | + | 78 | + 2.4.2 2.4.1 2005 PSF yes |
598 | +- To create a new bitmap, enabled, on the drive with id=drive0: | 79 | + 2.4.3 2.4.2 2006 PSF yes |
599 | + | 80 | + 2.5 2.4 2006 PSF yes |
600 | +.. code:: json | 81 | + 2.7 2.6 2010 PSF yes |
601 | + | 82 | + |
602 | + { "execute": "block-dirty-bitmap-add", | 83 | +Footnotes: |
603 | + "arguments": { | 84 | + |
604 | + "node": "drive0", | 85 | +(1) GPL-compatible doesn't mean that we're distributing Python under |
605 | + "name": "bitmap0" | 86 | + the GPL. All Python licenses, unlike the GPL, let you distribute |
606 | + } | 87 | + a modified version without making your changes open source. The |
607 | + } | 88 | + GPL-compatible licenses make it possible to combine Python with |
608 | + | 89 | + other software that is released under the GPL; the others don't. |
609 | +- This bitmap will have a default granularity that matches the cluster | 90 | + |
610 | + size of its associated drive, if available, clamped to between [4KiB, | 91 | +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, |
611 | + 64KiB]. The current default for qcow2 is 64KiB. | 92 | + because its license has a choice of law clause. According to |
612 | + | 93 | + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 |
613 | +- To create a new bitmap that tracks changes in 32KiB segments: | 94 | + is "not incompatible" with the GPL. |
614 | + | 95 | + |
615 | +.. code:: json | 96 | +Thanks to the many outside volunteers who have worked under Guido's |
616 | + | 97 | +direction to make these releases possible. |
617 | + { "execute": "block-dirty-bitmap-add", | 98 | + |
618 | + "arguments": { | 99 | + |
619 | + "node": "drive0", | 100 | +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON |
620 | + "name": "bitmap0", | 101 | +=============================================================== |
621 | + "granularity": 32768 | 102 | + |
622 | + } | 103 | +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 |
623 | + } | 104 | +-------------------------------------------- |
624 | + | 105 | + |
625 | +Deletion | 106 | +1. This LICENSE AGREEMENT is between the Python Software Foundation |
626 | +~~~~~~~~ | 107 | +("PSF"), and the Individual or Organization ("Licensee") accessing and |
627 | + | 108 | +otherwise using this software ("Python") in source or binary form and |
628 | +- Bitmaps that are frozen cannot be deleted. | 109 | +its associated documentation. |
629 | + | 110 | + |
630 | +- Deleting the bitmap does not impact any other bitmaps attached to the | 111 | +2. Subject to the terms and conditions of this License Agreement, PSF |
631 | + same node, nor does it affect any backups already created from this | 112 | +hereby grants Licensee a nonexclusive, royalty-free, world-wide |
632 | + node. | 113 | +license to reproduce, analyze, test, perform and/or display publicly, |
633 | + | 114 | +prepare derivative works, distribute, and otherwise use Python |
634 | +- Because bitmaps are only unique to the node to which they are | 115 | +alone or in any derivative version, provided, however, that PSF's |
635 | + attached, you must specify the node/drive name here, too. | 116 | +License Agreement and PSF's notice of copyright, i.e., "Copyright (c) |
636 | + | 117 | +2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights |
637 | +.. code:: json | 118 | +Reserved" are retained in Python alone or in any derivative version |
638 | + | 119 | +prepared by Licensee. |
639 | + { "execute": "block-dirty-bitmap-remove", | 120 | + |
640 | + "arguments": { | 121 | +3. In the event Licensee prepares a derivative work that is based on |
641 | + "node": "drive0", | 122 | +or incorporates Python or any part thereof, and wants to make |
642 | + "name": "bitmap0" | 123 | +the derivative work available to others as provided herein, then |
643 | + } | 124 | +Licensee hereby agrees to include in any such work a brief summary of |
644 | + } | 125 | +the changes made to Python. |
645 | + | 126 | + |
646 | +Resetting | 127 | +4. PSF is making Python available to Licensee on an "AS IS" |
647 | +~~~~~~~~~ | 128 | +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR |
648 | + | 129 | +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND |
649 | +- Resetting a bitmap will clear all information it holds. | 130 | +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS |
650 | + | 131 | +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT |
651 | +- An incremental backup created from an empty bitmap will copy no data, | 132 | +INFRINGE ANY THIRD PARTY RIGHTS. |
652 | + as if nothing has changed. | 133 | + |
653 | + | 134 | +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON |
654 | +.. code:: json | 135 | +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS |
655 | + | 136 | +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, |
656 | + { "execute": "block-dirty-bitmap-clear", | 137 | +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. |
657 | + "arguments": { | 138 | + |
658 | + "node": "drive0", | 139 | +6. This License Agreement will automatically terminate upon a material |
659 | + "name": "bitmap0" | 140 | +breach of its terms and conditions. |
660 | + } | 141 | + |
661 | + } | 142 | +7. Nothing in this License Agreement shall be deemed to create any |
662 | + | 143 | +relationship of agency, partnership, or joint venture between PSF and |
663 | +Transactions | 144 | +Licensee. This License Agreement does not grant permission to use PSF |
664 | +------------ | 145 | +trademarks or trade name in a trademark sense to endorse or promote |
665 | + | 146 | +products or services of Licensee, or any third party. |
666 | +Justification | 147 | + |
667 | +~~~~~~~~~~~~~ | 148 | +8. By copying, installing or otherwise using Python, Licensee |
668 | + | 149 | +agrees to be bound by the terms and conditions of this License |
669 | +Bitmaps can be safely modified when the VM is paused or halted by using | 150 | +Agreement. |
670 | +the basic QMP commands. For instance, you might perform the following | 151 | + |
671 | +actions: | 152 | + |
672 | + | 153 | +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 |
673 | +1. Boot the VM in a paused state. | 154 | +------------------------------------------- |
674 | +2. Create a full drive backup of drive0. | 155 | + |
675 | +3. Create a new bitmap attached to drive0. | 156 | +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 |
676 | +4. Resume execution of the VM. | 157 | + |
677 | +5. Incremental backups are ready to be created. | 158 | +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an |
678 | + | 159 | +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the |
679 | +At this point, the bitmap and drive backup would be correctly in sync, | 160 | +Individual or Organization ("Licensee") accessing and otherwise using |
680 | +and incremental backups made from this point forward would be correctly | 161 | +this software in source or binary form and its associated |
681 | +aligned to the full drive backup. | 162 | +documentation ("the Software"). |
682 | + | 163 | + |
683 | +This is not particularly useful if we decide we want to start | 164 | +2. Subject to the terms and conditions of this BeOpen Python License |
684 | +incremental backups after the VM has been running for a while, for which | 165 | +Agreement, BeOpen hereby grants Licensee a non-exclusive, |
685 | +we will need to perform actions such as the following: | 166 | +royalty-free, world-wide license to reproduce, analyze, test, perform |
686 | + | 167 | +and/or display publicly, prepare derivative works, distribute, and |
687 | +1. Boot the VM and begin execution. | 168 | +otherwise use the Software alone or in any derivative version, |
688 | +2. Using a single transaction, perform the following operations: | 169 | +provided, however, that the BeOpen Python License is retained in the |
689 | + | 170 | +Software, alone or in any derivative version prepared by Licensee. |
690 | + - Create ``bitmap0``. | 171 | + |
691 | + - Create a full drive backup of ``drive0``. | 172 | +3. BeOpen is making the Software available to Licensee on an "AS IS" |
692 | + | 173 | +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR |
693 | +3. Incremental backups are now ready to be created. | 174 | +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND |
694 | + | 175 | +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS |
695 | +Supported Bitmap Transactions | 176 | +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT |
696 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 177 | +INFRINGE ANY THIRD PARTY RIGHTS. |
697 | + | 178 | + |
698 | +- ``block-dirty-bitmap-add`` | 179 | +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE |
699 | +- ``block-dirty-bitmap-clear`` | 180 | +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS |
700 | + | 181 | +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY |
701 | +The usages are identical to their respective QMP commands, but see below | 182 | +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. |
702 | +for examples. | 183 | + |
703 | + | 184 | +5. This License Agreement will automatically terminate upon a material |
704 | +Example: New Incremental Backup | 185 | +breach of its terms and conditions. |
705 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 186 | + |
706 | + | 187 | +6. This License Agreement shall be governed by and interpreted in all |
707 | +As outlined in the justification, perhaps we want to create a new | 188 | +respects by the law of the State of California, excluding conflict of |
708 | +incremental backup chain attached to a drive. | 189 | +law provisions. Nothing in this License Agreement shall be deemed to |
709 | + | 190 | +create any relationship of agency, partnership, or joint venture |
710 | +.. code:: json | 191 | +between BeOpen and Licensee. This License Agreement does not grant |
711 | + | 192 | +permission to use BeOpen trademarks or trade names in a trademark |
712 | + { "execute": "transaction", | 193 | +sense to endorse or promote products or services of Licensee, or any |
713 | + "arguments": { | 194 | +third party. As an exception, the "BeOpen Python" logos available at |
714 | + "actions": [ | 195 | +http://www.pythonlabs.com/logos.html may be used according to the |
715 | + {"type": "block-dirty-bitmap-add", | 196 | +permissions granted on that web page. |
716 | + "data": {"node": "drive0", "name": "bitmap0"} }, | 197 | + |
717 | + {"type": "drive-backup", | 198 | +7. By copying, installing or otherwise using the software, Licensee |
718 | + "data": {"device": "drive0", "target": "/path/to/full_backup.img", | 199 | +agrees to be bound by the terms and conditions of this License |
719 | + "sync": "full", "format": "qcow2"} } | 200 | +Agreement. |
201 | + | ||
202 | + | ||
203 | +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 | ||
204 | +--------------------------------------- | ||
205 | + | ||
206 | +1. This LICENSE AGREEMENT is between the Corporation for National | ||
207 | +Research Initiatives, having an office at 1895 Preston White Drive, | ||
208 | +Reston, VA 20191 ("CNRI"), and the Individual or Organization | ||
209 | +("Licensee") accessing and otherwise using Python 1.6.1 software in | ||
210 | +source or binary form and its associated documentation. | ||
211 | + | ||
212 | +2. Subject to the terms and conditions of this License Agreement, CNRI | ||
213 | +hereby grants Licensee a nonexclusive, royalty-free, world-wide | ||
214 | +license to reproduce, analyze, test, perform and/or display publicly, | ||
215 | +prepare derivative works, distribute, and otherwise use Python 1.6.1 | ||
216 | +alone or in any derivative version, provided, however, that CNRI's | ||
217 | +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) | ||
218 | +1995-2001 Corporation for National Research Initiatives; All Rights | ||
219 | +Reserved" are retained in Python 1.6.1 alone or in any derivative | ||
220 | +version prepared by Licensee. Alternately, in lieu of CNRI's License | ||
221 | +Agreement, Licensee may substitute the following text (omitting the | ||
222 | +quotes): "Python 1.6.1 is made available subject to the terms and | ||
223 | +conditions in CNRI's License Agreement. This Agreement together with | ||
224 | +Python 1.6.1 may be located on the Internet using the following | ||
225 | +unique, persistent identifier (known as a handle): 1895.22/1013. This | ||
226 | +Agreement may also be obtained from a proxy server on the Internet | ||
227 | +using the following URL: http://hdl.handle.net/1895.22/1013". | ||
228 | + | ||
229 | +3. In the event Licensee prepares a derivative work that is based on | ||
230 | +or incorporates Python 1.6.1 or any part thereof, and wants to make | ||
231 | +the derivative work available to others as provided herein, then | ||
232 | +Licensee hereby agrees to include in any such work a brief summary of | ||
233 | +the changes made to Python 1.6.1. | ||
234 | + | ||
235 | +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" | ||
236 | +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR | ||
237 | +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND | ||
238 | +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS | ||
239 | +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT | ||
240 | +INFRINGE ANY THIRD PARTY RIGHTS. | ||
241 | + | ||
242 | +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON | ||
243 | +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS | ||
244 | +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, | ||
245 | +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. | ||
246 | + | ||
247 | +6. This License Agreement will automatically terminate upon a material | ||
248 | +breach of its terms and conditions. | ||
249 | + | ||
250 | +7. This License Agreement shall be governed by the federal | ||
251 | +intellectual property law of the United States, including without | ||
252 | +limitation the federal copyright law, and, to the extent such | ||
253 | +U.S. federal law does not apply, by the law of the Commonwealth of | ||
254 | +Virginia, excluding Virginia's conflict of law provisions. | ||
255 | +Notwithstanding the foregoing, with regard to derivative works based | ||
256 | +on Python 1.6.1 that incorporate non-separable material that was | ||
257 | +previously distributed under the GNU General Public License (GPL), the | ||
258 | +law of the Commonwealth of Virginia shall govern this License | ||
259 | +Agreement only as to issues arising under or with respect to | ||
260 | +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this | ||
261 | +License Agreement shall be deemed to create any relationship of | ||
262 | +agency, partnership, or joint venture between CNRI and Licensee. This | ||
263 | +License Agreement does not grant permission to use CNRI trademarks or | ||
264 | +trade name in a trademark sense to endorse or promote products or | ||
265 | +services of Licensee, or any third party. | ||
266 | + | ||
267 | +8. By clicking on the "ACCEPT" button where indicated, or by copying, | ||
268 | +installing or otherwise using Python 1.6.1, Licensee agrees to be | ||
269 | +bound by the terms and conditions of this License Agreement. | ||
270 | + | ||
271 | + ACCEPT | ||
272 | + | ||
273 | + | ||
274 | +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 | ||
275 | +-------------------------------------------------- | ||
276 | + | ||
277 | +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, | ||
278 | +The Netherlands. All rights reserved. | ||
279 | + | ||
280 | +Permission to use, copy, modify, and distribute this software and its | ||
281 | +documentation for any purpose and without fee is hereby granted, | ||
282 | +provided that the above copyright notice appear in all copies and that | ||
283 | +both that copyright notice and this permission notice appear in | ||
284 | +supporting documentation, and that the name of Stichting Mathematisch | ||
285 | +Centrum or CWI not be used in advertising or publicity pertaining to | ||
286 | +distribution of the software without specific, written prior | ||
287 | +permission. | ||
288 | + | ||
289 | +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO | ||
290 | +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND | ||
291 | +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE | ||
292 | +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
293 | +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
294 | +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT | ||
295 | +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
296 | diff --git a/scripts/argparse.py b/scripts/argparse.py | ||
297 | new file mode 100644 | ||
298 | index XXXXXXX..XXXXXXX | ||
299 | --- /dev/null | ||
300 | +++ b/scripts/argparse.py | ||
301 | @@ -XXX,XX +XXX,XX @@ | ||
302 | +# This is a local copy of the standard library argparse module taken from PyPI. | ||
303 | +# It is licensed under the Python Software Foundation License. This is a | ||
304 | +# fallback for Python 2.6 which does not include this module. Python 2.7+ and | ||
305 | +# 3+ will never load this module because built-in modules are loaded before | ||
306 | +# anything in sys.path. | ||
307 | +# | ||
308 | +# If your script is not located in the same directory as this file, import it | ||
309 | +# like this: | ||
310 | +# | ||
311 | +# import os | ||
312 | +# import sys | ||
313 | +# sys.path.append(os.path.join(os.path.dirname(__file__), ..., 'scripts')) | ||
314 | +# import argparse | ||
315 | + | ||
316 | +# Author: Steven J. Bethard <steven.bethard@gmail.com>. | ||
317 | +# Maintainer: Thomas Waldmann <tw@waldmann-edv.de> | ||
318 | + | ||
319 | +"""Command-line parsing library | ||
320 | + | ||
321 | +This module is an optparse-inspired command-line parsing library that: | ||
322 | + | ||
323 | + - handles both optional and positional arguments | ||
324 | + - produces highly informative usage messages | ||
325 | + - supports parsers that dispatch to sub-parsers | ||
326 | + | ||
327 | +The following is a simple usage example that sums integers from the | ||
328 | +command-line and writes the result to a file:: | ||
329 | + | ||
330 | + parser = argparse.ArgumentParser( | ||
331 | + description='sum the integers at the command line') | ||
332 | + parser.add_argument( | ||
333 | + 'integers', metavar='int', nargs='+', type=int, | ||
334 | + help='an integer to be summed') | ||
335 | + parser.add_argument( | ||
336 | + '--log', default=sys.stdout, type=argparse.FileType('w'), | ||
337 | + help='the file where the sum should be written') | ||
338 | + args = parser.parse_args() | ||
339 | + args.log.write('%s' % sum(args.integers)) | ||
340 | + args.log.close() | ||
341 | + | ||
342 | +The module contains the following public classes: | ||
343 | + | ||
344 | + - ArgumentParser -- The main entry point for command-line parsing. As the | ||
345 | + example above shows, the add_argument() method is used to populate | ||
346 | + the parser with actions for optional and positional arguments. Then | ||
347 | + the parse_args() method is invoked to convert the args at the | ||
348 | + command-line into an object with attributes. | ||
349 | + | ||
350 | + - ArgumentError -- The exception raised by ArgumentParser objects when | ||
351 | + there are errors with the parser's actions. Errors raised while | ||
352 | + parsing the command-line are caught by ArgumentParser and emitted | ||
353 | + as command-line messages. | ||
354 | + | ||
355 | + - FileType -- A factory for defining types of files to be created. As the | ||
356 | + example above shows, instances of FileType are typically passed as | ||
357 | + the type= argument of add_argument() calls. | ||
358 | + | ||
359 | + - Action -- The base class for parser actions. Typically actions are | ||
360 | + selected by passing strings like 'store_true' or 'append_const' to | ||
361 | + the action= argument of add_argument(). However, for greater | ||
362 | + customization of ArgumentParser actions, subclasses of Action may | ||
363 | + be defined and passed as the action= argument. | ||
364 | + | ||
365 | + - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, | ||
366 | + ArgumentDefaultsHelpFormatter -- Formatter classes which | ||
367 | + may be passed as the formatter_class= argument to the | ||
368 | + ArgumentParser constructor. HelpFormatter is the default, | ||
369 | + RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser | ||
370 | + not to change the formatting for help text, and | ||
371 | + ArgumentDefaultsHelpFormatter adds information about argument defaults | ||
372 | + to the help. | ||
373 | + | ||
374 | +All other classes in this module are considered implementation details. | ||
375 | +(Also note that HelpFormatter and RawDescriptionHelpFormatter are only | ||
376 | +considered public as object names -- the API of the formatter objects is | ||
377 | +still considered an implementation detail.) | ||
378 | +""" | ||
379 | + | ||
380 | +__version__ = '1.4.0' # we use our own version number independant of the | ||
381 | + # one in stdlib and we release this on pypi. | ||
382 | + | ||
383 | +__external_lib__ = True # to make sure the tests really test THIS lib, | ||
384 | + # not the builtin one in Python stdlib | ||
385 | + | ||
386 | +__all__ = [ | ||
387 | + 'ArgumentParser', | ||
388 | + 'ArgumentError', | ||
389 | + 'ArgumentTypeError', | ||
390 | + 'FileType', | ||
391 | + 'HelpFormatter', | ||
392 | + 'ArgumentDefaultsHelpFormatter', | ||
393 | + 'RawDescriptionHelpFormatter', | ||
394 | + 'RawTextHelpFormatter', | ||
395 | + 'Namespace', | ||
396 | + 'Action', | ||
397 | + 'ONE_OR_MORE', | ||
398 | + 'OPTIONAL', | ||
399 | + 'PARSER', | ||
400 | + 'REMAINDER', | ||
401 | + 'SUPPRESS', | ||
402 | + 'ZERO_OR_MORE', | ||
403 | +] | ||
404 | + | ||
405 | + | ||
406 | +import copy as _copy | ||
407 | +import os as _os | ||
408 | +import re as _re | ||
409 | +import sys as _sys | ||
410 | +import textwrap as _textwrap | ||
411 | + | ||
412 | +from gettext import gettext as _ | ||
413 | + | ||
414 | +try: | ||
415 | + set | ||
416 | +except NameError: | ||
417 | + # for python < 2.4 compatibility (sets module is there since 2.3): | ||
418 | + from sets import Set as set | ||
419 | + | ||
420 | +try: | ||
421 | + basestring | ||
422 | +except NameError: | ||
423 | + basestring = str | ||
424 | + | ||
425 | +try: | ||
426 | + sorted | ||
427 | +except NameError: | ||
428 | + # for python < 2.4 compatibility: | ||
429 | + def sorted(iterable, reverse=False): | ||
430 | + result = list(iterable) | ||
431 | + result.sort() | ||
432 | + if reverse: | ||
433 | + result.reverse() | ||
434 | + return result | ||
435 | + | ||
436 | + | ||
437 | +def _callable(obj): | ||
438 | + return hasattr(obj, '__call__') or hasattr(obj, '__bases__') | ||
439 | + | ||
440 | + | ||
441 | +SUPPRESS = '==SUPPRESS==' | ||
442 | + | ||
443 | +OPTIONAL = '?' | ||
444 | +ZERO_OR_MORE = '*' | ||
445 | +ONE_OR_MORE = '+' | ||
446 | +PARSER = 'A...' | ||
447 | +REMAINDER = '...' | ||
448 | +_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' | ||
449 | + | ||
450 | +# ============================= | ||
451 | +# Utility functions and classes | ||
452 | +# ============================= | ||
453 | + | ||
454 | +class _AttributeHolder(object): | ||
455 | + """Abstract base class that provides __repr__. | ||
456 | + | ||
457 | + The __repr__ method returns a string in the format:: | ||
458 | + ClassName(attr=name, attr=name, ...) | ||
459 | + The attributes are determined either by a class-level attribute, | ||
460 | + '_kwarg_names', or by inspecting the instance __dict__. | ||
461 | + """ | ||
462 | + | ||
463 | + def __repr__(self): | ||
464 | + type_name = type(self).__name__ | ||
465 | + arg_strings = [] | ||
466 | + for arg in self._get_args(): | ||
467 | + arg_strings.append(repr(arg)) | ||
468 | + for name, value in self._get_kwargs(): | ||
469 | + arg_strings.append('%s=%r' % (name, value)) | ||
470 | + return '%s(%s)' % (type_name, ', '.join(arg_strings)) | ||
471 | + | ||
472 | + def _get_kwargs(self): | ||
473 | + return sorted(self.__dict__.items()) | ||
474 | + | ||
475 | + def _get_args(self): | ||
476 | + return [] | ||
477 | + | ||
478 | + | ||
479 | +def _ensure_value(namespace, name, value): | ||
480 | + if getattr(namespace, name, None) is None: | ||
481 | + setattr(namespace, name, value) | ||
482 | + return getattr(namespace, name) | ||
483 | + | ||
484 | + | ||
485 | +# =============== | ||
486 | +# Formatting Help | ||
487 | +# =============== | ||
488 | + | ||
489 | +class HelpFormatter(object): | ||
490 | + """Formatter for generating usage messages and argument help strings. | ||
491 | + | ||
492 | + Only the name of this class is considered a public API. All the methods | ||
493 | + provided by the class are considered an implementation detail. | ||
494 | + """ | ||
495 | + | ||
496 | + def __init__(self, | ||
497 | + prog, | ||
498 | + indent_increment=2, | ||
499 | + max_help_position=24, | ||
500 | + width=None): | ||
501 | + | ||
502 | + # default setting for width | ||
503 | + if width is None: | ||
504 | + try: | ||
505 | + width = int(_os.environ['COLUMNS']) | ||
506 | + except (KeyError, ValueError): | ||
507 | + width = 80 | ||
508 | + width -= 2 | ||
509 | + | ||
510 | + self._prog = prog | ||
511 | + self._indent_increment = indent_increment | ||
512 | + self._max_help_position = max_help_position | ||
513 | + self._width = width | ||
514 | + | ||
515 | + self._current_indent = 0 | ||
516 | + self._level = 0 | ||
517 | + self._action_max_length = 0 | ||
518 | + | ||
519 | + self._root_section = self._Section(self, None) | ||
520 | + self._current_section = self._root_section | ||
521 | + | ||
522 | + self._whitespace_matcher = _re.compile(r'\s+') | ||
523 | + self._long_break_matcher = _re.compile(r'\n\n\n+') | ||
524 | + | ||
525 | + # =============================== | ||
526 | + # Section and indentation methods | ||
527 | + # =============================== | ||
528 | + def _indent(self): | ||
529 | + self._current_indent += self._indent_increment | ||
530 | + self._level += 1 | ||
531 | + | ||
532 | + def _dedent(self): | ||
533 | + self._current_indent -= self._indent_increment | ||
534 | + assert self._current_indent >= 0, 'Indent decreased below 0.' | ||
535 | + self._level -= 1 | ||
536 | + | ||
537 | + class _Section(object): | ||
538 | + | ||
539 | + def __init__(self, formatter, parent, heading=None): | ||
540 | + self.formatter = formatter | ||
541 | + self.parent = parent | ||
542 | + self.heading = heading | ||
543 | + self.items = [] | ||
544 | + | ||
545 | + def format_help(self): | ||
546 | + # format the indented section | ||
547 | + if self.parent is not None: | ||
548 | + self.formatter._indent() | ||
549 | + join = self.formatter._join_parts | ||
550 | + for func, args in self.items: | ||
551 | + func(*args) | ||
552 | + item_help = join([func(*args) for func, args in self.items]) | ||
553 | + if self.parent is not None: | ||
554 | + self.formatter._dedent() | ||
555 | + | ||
556 | + # return nothing if the section was empty | ||
557 | + if not item_help: | ||
558 | + return '' | ||
559 | + | ||
560 | + # add the heading if the section was non-empty | ||
561 | + if self.heading is not SUPPRESS and self.heading is not None: | ||
562 | + current_indent = self.formatter._current_indent | ||
563 | + heading = '%*s%s:\n' % (current_indent, '', self.heading) | ||
564 | + else: | ||
565 | + heading = '' | ||
566 | + | ||
567 | + # join the section-initial newline, the heading and the help | ||
568 | + return join(['\n', heading, item_help, '\n']) | ||
569 | + | ||
570 | + def _add_item(self, func, args): | ||
571 | + self._current_section.items.append((func, args)) | ||
572 | + | ||
573 | + # ======================== | ||
574 | + # Message building methods | ||
575 | + # ======================== | ||
576 | + def start_section(self, heading): | ||
577 | + self._indent() | ||
578 | + section = self._Section(self, self._current_section, heading) | ||
579 | + self._add_item(section.format_help, []) | ||
580 | + self._current_section = section | ||
581 | + | ||
582 | + def end_section(self): | ||
583 | + self._current_section = self._current_section.parent | ||
584 | + self._dedent() | ||
585 | + | ||
586 | + def add_text(self, text): | ||
587 | + if text is not SUPPRESS and text is not None: | ||
588 | + self._add_item(self._format_text, [text]) | ||
589 | + | ||
590 | + def add_usage(self, usage, actions, groups, prefix=None): | ||
591 | + if usage is not SUPPRESS: | ||
592 | + args = usage, actions, groups, prefix | ||
593 | + self._add_item(self._format_usage, args) | ||
594 | + | ||
595 | + def add_argument(self, action): | ||
596 | + if action.help is not SUPPRESS: | ||
597 | + | ||
598 | + # find all invocations | ||
599 | + get_invocation = self._format_action_invocation | ||
600 | + invocations = [get_invocation(action)] | ||
601 | + for subaction in self._iter_indented_subactions(action): | ||
602 | + invocations.append(get_invocation(subaction)) | ||
603 | + | ||
604 | + # update the maximum item length | ||
605 | + invocation_length = max([len(s) for s in invocations]) | ||
606 | + action_length = invocation_length + self._current_indent | ||
607 | + self._action_max_length = max(self._action_max_length, | ||
608 | + action_length) | ||
609 | + | ||
610 | + # add the item to the list | ||
611 | + self._add_item(self._format_action, [action]) | ||
612 | + | ||
613 | + def add_arguments(self, actions): | ||
614 | + for action in actions: | ||
615 | + self.add_argument(action) | ||
616 | + | ||
617 | + # ======================= | ||
618 | + # Help-formatting methods | ||
619 | + # ======================= | ||
620 | + def format_help(self): | ||
621 | + help = self._root_section.format_help() | ||
622 | + if help: | ||
623 | + help = self._long_break_matcher.sub('\n\n', help) | ||
624 | + help = help.strip('\n') + '\n' | ||
625 | + return help | ||
626 | + | ||
627 | + def _join_parts(self, part_strings): | ||
628 | + return ''.join([part | ||
629 | + for part in part_strings | ||
630 | + if part and part is not SUPPRESS]) | ||
631 | + | ||
632 | + def _format_usage(self, usage, actions, groups, prefix): | ||
633 | + if prefix is None: | ||
634 | + prefix = _('usage: ') | ||
635 | + | ||
636 | + # if usage is specified, use that | ||
637 | + if usage is not None: | ||
638 | + usage = usage % dict(prog=self._prog) | ||
639 | + | ||
640 | + # if no optionals or positionals are available, usage is just prog | ||
641 | + elif usage is None and not actions: | ||
642 | + usage = '%(prog)s' % dict(prog=self._prog) | ||
643 | + | ||
644 | + # if optionals and positionals are available, calculate usage | ||
645 | + elif usage is None: | ||
646 | + prog = '%(prog)s' % dict(prog=self._prog) | ||
647 | + | ||
648 | + # split optionals from positionals | ||
649 | + optionals = [] | ||
650 | + positionals = [] | ||
651 | + for action in actions: | ||
652 | + if action.option_strings: | ||
653 | + optionals.append(action) | ||
654 | + else: | ||
655 | + positionals.append(action) | ||
656 | + | ||
657 | + # build full usage string | ||
658 | + format = self._format_actions_usage | ||
659 | + action_usage = format(optionals + positionals, groups) | ||
660 | + usage = ' '.join([s for s in [prog, action_usage] if s]) | ||
661 | + | ||
662 | + # wrap the usage parts if it's too long | ||
663 | + text_width = self._width - self._current_indent | ||
664 | + if len(prefix) + len(usage) > text_width: | ||
665 | + | ||
666 | + # break usage into wrappable parts | ||
667 | + part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' | ||
668 | + opt_usage = format(optionals, groups) | ||
669 | + pos_usage = format(positionals, groups) | ||
670 | + opt_parts = _re.findall(part_regexp, opt_usage) | ||
671 | + pos_parts = _re.findall(part_regexp, pos_usage) | ||
672 | + assert ' '.join(opt_parts) == opt_usage | ||
673 | + assert ' '.join(pos_parts) == pos_usage | ||
674 | + | ||
675 | + # helper for wrapping lines | ||
676 | + def get_lines(parts, indent, prefix=None): | ||
677 | + lines = [] | ||
678 | + line = [] | ||
679 | + if prefix is not None: | ||
680 | + line_len = len(prefix) - 1 | ||
681 | + else: | ||
682 | + line_len = len(indent) - 1 | ||
683 | + for part in parts: | ||
684 | + if line_len + 1 + len(part) > text_width: | ||
685 | + lines.append(indent + ' '.join(line)) | ||
686 | + line = [] | ||
687 | + line_len = len(indent) - 1 | ||
688 | + line.append(part) | ||
689 | + line_len += len(part) + 1 | ||
690 | + if line: | ||
691 | + lines.append(indent + ' '.join(line)) | ||
692 | + if prefix is not None: | ||
693 | + lines[0] = lines[0][len(indent):] | ||
694 | + return lines | ||
695 | + | ||
696 | + # if prog is short, follow it with optionals or positionals | ||
697 | + if len(prefix) + len(prog) <= 0.75 * text_width: | ||
698 | + indent = ' ' * (len(prefix) + len(prog) + 1) | ||
699 | + if opt_parts: | ||
700 | + lines = get_lines([prog] + opt_parts, indent, prefix) | ||
701 | + lines.extend(get_lines(pos_parts, indent)) | ||
702 | + elif pos_parts: | ||
703 | + lines = get_lines([prog] + pos_parts, indent, prefix) | ||
704 | + else: | ||
705 | + lines = [prog] | ||
706 | + | ||
707 | + # if prog is long, put it on its own line | ||
708 | + else: | ||
709 | + indent = ' ' * len(prefix) | ||
710 | + parts = opt_parts + pos_parts | ||
711 | + lines = get_lines(parts, indent) | ||
712 | + if len(lines) > 1: | ||
713 | + lines = [] | ||
714 | + lines.extend(get_lines(opt_parts, indent)) | ||
715 | + lines.extend(get_lines(pos_parts, indent)) | ||
716 | + lines = [prog] + lines | ||
717 | + | ||
718 | + # join lines into usage | ||
719 | + usage = '\n'.join(lines) | ||
720 | + | ||
721 | + # prefix with 'usage:' | ||
722 | + return '%s%s\n\n' % (prefix, usage) | ||
723 | + | ||
724 | + def _format_actions_usage(self, actions, groups): | ||
725 | + # find group indices and identify actions in groups | ||
726 | + group_actions = set() | ||
727 | + inserts = {} | ||
728 | + for group in groups: | ||
729 | + try: | ||
730 | + start = actions.index(group._group_actions[0]) | ||
731 | + except ValueError: | ||
732 | + continue | ||
733 | + else: | ||
734 | + end = start + len(group._group_actions) | ||
735 | + if actions[start:end] == group._group_actions: | ||
736 | + for action in group._group_actions: | ||
737 | + group_actions.add(action) | ||
738 | + if not group.required: | ||
739 | + if start in inserts: | ||
740 | + inserts[start] += ' [' | ||
741 | + else: | ||
742 | + inserts[start] = '[' | ||
743 | + inserts[end] = ']' | ||
744 | + else: | ||
745 | + if start in inserts: | ||
746 | + inserts[start] += ' (' | ||
747 | + else: | ||
748 | + inserts[start] = '(' | ||
749 | + inserts[end] = ')' | ||
750 | + for i in range(start + 1, end): | ||
751 | + inserts[i] = '|' | ||
752 | + | ||
753 | + # collect all actions format strings | ||
754 | + parts = [] | ||
755 | + for i, action in enumerate(actions): | ||
756 | + | ||
757 | + # suppressed arguments are marked with None | ||
758 | + # remove | separators for suppressed arguments | ||
759 | + if action.help is SUPPRESS: | ||
760 | + parts.append(None) | ||
761 | + if inserts.get(i) == '|': | ||
762 | + inserts.pop(i) | ||
763 | + elif inserts.get(i + 1) == '|': | ||
764 | + inserts.pop(i + 1) | ||
765 | + | ||
766 | + # produce all arg strings | ||
767 | + elif not action.option_strings: | ||
768 | + part = self._format_args(action, action.dest) | ||
769 | + | ||
770 | + # if it's in a group, strip the outer [] | ||
771 | + if action in group_actions: | ||
772 | + if part[0] == '[' and part[-1] == ']': | ||
773 | + part = part[1:-1] | ||
774 | + | ||
775 | + # add the action string to the list | ||
776 | + parts.append(part) | ||
777 | + | ||
778 | + # produce the first way to invoke the option in brackets | ||
779 | + else: | ||
780 | + option_string = action.option_strings[0] | ||
781 | + | ||
782 | + # if the Optional doesn't take a value, format is: | ||
783 | + # -s or --long | ||
784 | + if action.nargs == 0: | ||
785 | + part = '%s' % option_string | ||
786 | + | ||
787 | + # if the Optional takes a value, format is: | ||
788 | + # -s ARGS or --long ARGS | ||
789 | + else: | ||
790 | + default = action.dest.upper() | ||
791 | + args_string = self._format_args(action, default) | ||
792 | + part = '%s %s' % (option_string, args_string) | ||
793 | + | ||
794 | + # make it look optional if it's not required or in a group | ||
795 | + if not action.required and action not in group_actions: | ||
796 | + part = '[%s]' % part | ||
797 | + | ||
798 | + # add the action string to the list | ||
799 | + parts.append(part) | ||
800 | + | ||
801 | + # insert things at the necessary indices | ||
802 | + for i in sorted(inserts, reverse=True): | ||
803 | + parts[i:i] = [inserts[i]] | ||
804 | + | ||
805 | + # join all the action items with spaces | ||
806 | + text = ' '.join([item for item in parts if item is not None]) | ||
807 | + | ||
808 | + # clean up separators for mutually exclusive groups | ||
809 | + open = r'[\[(]' | ||
810 | + close = r'[\])]' | ||
811 | + text = _re.sub(r'(%s) ' % open, r'\1', text) | ||
812 | + text = _re.sub(r' (%s)' % close, r'\1', text) | ||
813 | + text = _re.sub(r'%s *%s' % (open, close), r'', text) | ||
814 | + text = _re.sub(r'\(([^|]*)\)', r'\1', text) | ||
815 | + text = text.strip() | ||
816 | + | ||
817 | + # return the text | ||
818 | + return text | ||
819 | + | ||
820 | + def _format_text(self, text): | ||
821 | + if '%(prog)' in text: | ||
822 | + text = text % dict(prog=self._prog) | ||
823 | + text_width = self._width - self._current_indent | ||
824 | + indent = ' ' * self._current_indent | ||
825 | + return self._fill_text(text, text_width, indent) + '\n\n' | ||
826 | + | ||
827 | + def _format_action(self, action): | ||
828 | + # determine the required width and the entry label | ||
829 | + help_position = min(self._action_max_length + 2, | ||
830 | + self._max_help_position) | ||
831 | + help_width = self._width - help_position | ||
832 | + action_width = help_position - self._current_indent - 2 | ||
833 | + action_header = self._format_action_invocation(action) | ||
834 | + | ||
835 | + # ho nelp; start on same line and add a final newline | ||
836 | + if not action.help: | ||
837 | + tup = self._current_indent, '', action_header | ||
838 | + action_header = '%*s%s\n' % tup | ||
839 | + | ||
840 | + # short action name; start on the same line and pad two spaces | ||
841 | + elif len(action_header) <= action_width: | ||
842 | + tup = self._current_indent, '', action_width, action_header | ||
843 | + action_header = '%*s%-*s ' % tup | ||
844 | + indent_first = 0 | ||
845 | + | ||
846 | + # long action name; start on the next line | ||
847 | + else: | ||
848 | + tup = self._current_indent, '', action_header | ||
849 | + action_header = '%*s%s\n' % tup | ||
850 | + indent_first = help_position | ||
851 | + | ||
852 | + # collect the pieces of the action help | ||
853 | + parts = [action_header] | ||
854 | + | ||
855 | + # if there was help for the action, add lines of help text | ||
856 | + if action.help: | ||
857 | + help_text = self._expand_help(action) | ||
858 | + help_lines = self._split_lines(help_text, help_width) | ||
859 | + parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) | ||
860 | + for line in help_lines[1:]: | ||
861 | + parts.append('%*s%s\n' % (help_position, '', line)) | ||
862 | + | ||
863 | + # or add a newline if the description doesn't end with one | ||
864 | + elif not action_header.endswith('\n'): | ||
865 | + parts.append('\n') | ||
866 | + | ||
867 | + # if there are any sub-actions, add their help as well | ||
868 | + for subaction in self._iter_indented_subactions(action): | ||
869 | + parts.append(self._format_action(subaction)) | ||
870 | + | ||
871 | + # return a single string | ||
872 | + return self._join_parts(parts) | ||
873 | + | ||
874 | + def _format_action_invocation(self, action): | ||
875 | + if not action.option_strings: | ||
876 | + metavar, = self._metavar_formatter(action, action.dest)(1) | ||
877 | + return metavar | ||
878 | + | ||
879 | + else: | ||
880 | + parts = [] | ||
881 | + | ||
882 | + # if the Optional doesn't take a value, format is: | ||
883 | + # -s, --long | ||
884 | + if action.nargs == 0: | ||
885 | + parts.extend(action.option_strings) | ||
886 | + | ||
887 | + # if the Optional takes a value, format is: | ||
888 | + # -s ARGS, --long ARGS | ||
889 | + else: | ||
890 | + default = action.dest.upper() | ||
891 | + args_string = self._format_args(action, default) | ||
892 | + for option_string in action.option_strings: | ||
893 | + parts.append('%s %s' % (option_string, args_string)) | ||
894 | + | ||
895 | + return ', '.join(parts) | ||
896 | + | ||
897 | + def _metavar_formatter(self, action, default_metavar): | ||
898 | + if action.metavar is not None: | ||
899 | + result = action.metavar | ||
900 | + elif action.choices is not None: | ||
901 | + choice_strs = [str(choice) for choice in action.choices] | ||
902 | + result = '{%s}' % ','.join(choice_strs) | ||
903 | + else: | ||
904 | + result = default_metavar | ||
905 | + | ||
906 | + def format(tuple_size): | ||
907 | + if isinstance(result, tuple): | ||
908 | + return result | ||
909 | + else: | ||
910 | + return (result, ) * tuple_size | ||
911 | + return format | ||
912 | + | ||
913 | + def _format_args(self, action, default_metavar): | ||
914 | + get_metavar = self._metavar_formatter(action, default_metavar) | ||
915 | + if action.nargs is None: | ||
916 | + result = '%s' % get_metavar(1) | ||
917 | + elif action.nargs == OPTIONAL: | ||
918 | + result = '[%s]' % get_metavar(1) | ||
919 | + elif action.nargs == ZERO_OR_MORE: | ||
920 | + result = '[%s [%s ...]]' % get_metavar(2) | ||
921 | + elif action.nargs == ONE_OR_MORE: | ||
922 | + result = '%s [%s ...]' % get_metavar(2) | ||
923 | + elif action.nargs == REMAINDER: | ||
924 | + result = '...' | ||
925 | + elif action.nargs == PARSER: | ||
926 | + result = '%s ...' % get_metavar(1) | ||
927 | + else: | ||
928 | + formats = ['%s' for _ in range(action.nargs)] | ||
929 | + result = ' '.join(formats) % get_metavar(action.nargs) | ||
930 | + return result | ||
931 | + | ||
932 | + def _expand_help(self, action): | ||
933 | + params = dict(vars(action), prog=self._prog) | ||
934 | + for name in list(params): | ||
935 | + if params[name] is SUPPRESS: | ||
936 | + del params[name] | ||
937 | + for name in list(params): | ||
938 | + if hasattr(params[name], '__name__'): | ||
939 | + params[name] = params[name].__name__ | ||
940 | + if params.get('choices') is not None: | ||
941 | + choices_str = ', '.join([str(c) for c in params['choices']]) | ||
942 | + params['choices'] = choices_str | ||
943 | + return self._get_help_string(action) % params | ||
944 | + | ||
945 | + def _iter_indented_subactions(self, action): | ||
946 | + try: | ||
947 | + get_subactions = action._get_subactions | ||
948 | + except AttributeError: | ||
949 | + pass | ||
950 | + else: | ||
951 | + self._indent() | ||
952 | + for subaction in get_subactions(): | ||
953 | + yield subaction | ||
954 | + self._dedent() | ||
955 | + | ||
956 | + def _split_lines(self, text, width): | ||
957 | + text = self._whitespace_matcher.sub(' ', text).strip() | ||
958 | + return _textwrap.wrap(text, width) | ||
959 | + | ||
960 | + def _fill_text(self, text, width, indent): | ||
961 | + text = self._whitespace_matcher.sub(' ', text).strip() | ||
962 | + return _textwrap.fill(text, width, initial_indent=indent, | ||
963 | + subsequent_indent=indent) | ||
964 | + | ||
965 | + def _get_help_string(self, action): | ||
966 | + return action.help | ||
967 | + | ||
968 | + | ||
969 | +class RawDescriptionHelpFormatter(HelpFormatter): | ||
970 | + """Help message formatter which retains any formatting in descriptions. | ||
971 | + | ||
972 | + Only the name of this class is considered a public API. All the methods | ||
973 | + provided by the class are considered an implementation detail. | ||
974 | + """ | ||
975 | + | ||
976 | + def _fill_text(self, text, width, indent): | ||
977 | + return ''.join([indent + line for line in text.splitlines(True)]) | ||
978 | + | ||
979 | + | ||
980 | +class RawTextHelpFormatter(RawDescriptionHelpFormatter): | ||
981 | + """Help message formatter which retains formatting of all help text. | ||
982 | + | ||
983 | + Only the name of this class is considered a public API. All the methods | ||
984 | + provided by the class are considered an implementation detail. | ||
985 | + """ | ||
986 | + | ||
987 | + def _split_lines(self, text, width): | ||
988 | + return text.splitlines() | ||
989 | + | ||
990 | + | ||
991 | +class ArgumentDefaultsHelpFormatter(HelpFormatter): | ||
992 | + """Help message formatter which adds default values to argument help. | ||
993 | + | ||
994 | + Only the name of this class is considered a public API. All the methods | ||
995 | + provided by the class are considered an implementation detail. | ||
996 | + """ | ||
997 | + | ||
998 | + def _get_help_string(self, action): | ||
999 | + help = action.help | ||
1000 | + if '%(default)' not in action.help: | ||
1001 | + if action.default is not SUPPRESS: | ||
1002 | + defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] | ||
1003 | + if action.option_strings or action.nargs in defaulting_nargs: | ||
1004 | + help += ' (default: %(default)s)' | ||
1005 | + return help | ||
1006 | + | ||
1007 | + | ||
1008 | +# ===================== | ||
1009 | +# Options and Arguments | ||
1010 | +# ===================== | ||
1011 | + | ||
1012 | +def _get_action_name(argument): | ||
1013 | + if argument is None: | ||
1014 | + return None | ||
1015 | + elif argument.option_strings: | ||
1016 | + return '/'.join(argument.option_strings) | ||
1017 | + elif argument.metavar not in (None, SUPPRESS): | ||
1018 | + return argument.metavar | ||
1019 | + elif argument.dest not in (None, SUPPRESS): | ||
1020 | + return argument.dest | ||
1021 | + else: | ||
1022 | + return None | ||
1023 | + | ||
1024 | + | ||
1025 | +class ArgumentError(Exception): | ||
1026 | + """An error from creating or using an argument (optional or positional). | ||
1027 | + | ||
1028 | + The string value of this exception is the message, augmented with | ||
1029 | + information about the argument that caused it. | ||
1030 | + """ | ||
1031 | + | ||
1032 | + def __init__(self, argument, message): | ||
1033 | + self.argument_name = _get_action_name(argument) | ||
1034 | + self.message = message | ||
1035 | + | ||
1036 | + def __str__(self): | ||
1037 | + if self.argument_name is None: | ||
1038 | + format = '%(message)s' | ||
1039 | + else: | ||
1040 | + format = 'argument %(argument_name)s: %(message)s' | ||
1041 | + return format % dict(message=self.message, | ||
1042 | + argument_name=self.argument_name) | ||
1043 | + | ||
1044 | + | ||
1045 | +class ArgumentTypeError(Exception): | ||
1046 | + """An error from trying to convert a command line string to a type.""" | ||
1047 | + pass | ||
1048 | + | ||
1049 | + | ||
1050 | +# ============== | ||
1051 | +# Action classes | ||
1052 | +# ============== | ||
1053 | + | ||
1054 | +class Action(_AttributeHolder): | ||
1055 | + """Information about how to convert command line strings to Python objects. | ||
1056 | + | ||
1057 | + Action objects are used by an ArgumentParser to represent the information | ||
1058 | + needed to parse a single argument from one or more strings from the | ||
1059 | + command line. The keyword arguments to the Action constructor are also | ||
1060 | + all attributes of Action instances. | ||
1061 | + | ||
1062 | + Keyword Arguments: | ||
1063 | + | ||
1064 | + - option_strings -- A list of command-line option strings which | ||
1065 | + should be associated with this action. | ||
1066 | + | ||
1067 | + - dest -- The name of the attribute to hold the created object(s) | ||
1068 | + | ||
1069 | + - nargs -- The number of command-line arguments that should be | ||
1070 | + consumed. By default, one argument will be consumed and a single | ||
1071 | + value will be produced. Other values include: | ||
1072 | + - N (an integer) consumes N arguments (and produces a list) | ||
1073 | + - '?' consumes zero or one arguments | ||
1074 | + - '*' consumes zero or more arguments (and produces a list) | ||
1075 | + - '+' consumes one or more arguments (and produces a list) | ||
1076 | + Note that the difference between the default and nargs=1 is that | ||
1077 | + with the default, a single value will be produced, while with | ||
1078 | + nargs=1, a list containing a single value will be produced. | ||
1079 | + | ||
1080 | + - const -- The value to be produced if the option is specified and the | ||
1081 | + option uses an action that takes no values. | ||
1082 | + | ||
1083 | + - default -- The value to be produced if the option is not specified. | ||
1084 | + | ||
1085 | + - type -- The type which the command-line arguments should be converted | ||
1086 | + to, should be one of 'string', 'int', 'float', 'complex' or a | ||
1087 | + callable object that accepts a single string argument. If None, | ||
1088 | + 'string' is assumed. | ||
1089 | + | ||
1090 | + - choices -- A container of values that should be allowed. If not None, | ||
1091 | + after a command-line argument has been converted to the appropriate | ||
1092 | + type, an exception will be raised if it is not a member of this | ||
1093 | + collection. | ||
1094 | + | ||
1095 | + - required -- True if the action must always be specified at the | ||
1096 | + command line. This is only meaningful for optional command-line | ||
1097 | + arguments. | ||
1098 | + | ||
1099 | + - help -- The help string describing the argument. | ||
1100 | + | ||
1101 | + - metavar -- The name to be used for the option's argument with the | ||
1102 | + help string. If None, the 'dest' value will be used as the name. | ||
1103 | + """ | ||
1104 | + | ||
1105 | + def __init__(self, | ||
1106 | + option_strings, | ||
1107 | + dest, | ||
1108 | + nargs=None, | ||
1109 | + const=None, | ||
1110 | + default=None, | ||
1111 | + type=None, | ||
1112 | + choices=None, | ||
1113 | + required=False, | ||
1114 | + help=None, | ||
1115 | + metavar=None): | ||
1116 | + self.option_strings = option_strings | ||
1117 | + self.dest = dest | ||
1118 | + self.nargs = nargs | ||
1119 | + self.const = const | ||
1120 | + self.default = default | ||
1121 | + self.type = type | ||
1122 | + self.choices = choices | ||
1123 | + self.required = required | ||
1124 | + self.help = help | ||
1125 | + self.metavar = metavar | ||
1126 | + | ||
1127 | + def _get_kwargs(self): | ||
1128 | + names = [ | ||
1129 | + 'option_strings', | ||
1130 | + 'dest', | ||
1131 | + 'nargs', | ||
1132 | + 'const', | ||
1133 | + 'default', | ||
1134 | + 'type', | ||
1135 | + 'choices', | ||
1136 | + 'help', | ||
1137 | + 'metavar', | ||
720 | + ] | 1138 | + ] |
721 | + } | 1139 | + return [(name, getattr(self, name)) for name in names] |
722 | + } | 1140 | + |
723 | + | 1141 | + def __call__(self, parser, namespace, values, option_string=None): |
724 | +Example: New Incremental Backup Anchor Point | 1142 | + raise NotImplementedError(_('.__call__() not defined')) |
725 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 1143 | + |
726 | + | 1144 | + |
727 | +Maybe we just want to create a new full backup with an existing bitmap | 1145 | +class _StoreAction(Action): |
728 | +and want to reset the bitmap to track the new chain. | 1146 | + |
729 | + | 1147 | + def __init__(self, |
730 | +.. code:: json | 1148 | + option_strings, |
731 | + | 1149 | + dest, |
732 | + { "execute": "transaction", | 1150 | + nargs=None, |
733 | + "arguments": { | 1151 | + const=None, |
734 | + "actions": [ | 1152 | + default=None, |
735 | + {"type": "block-dirty-bitmap-clear", | 1153 | + type=None, |
736 | + "data": {"node": "drive0", "name": "bitmap0"} }, | 1154 | + choices=None, |
737 | + {"type": "drive-backup", | 1155 | + required=False, |
738 | + "data": {"device": "drive0", "target": "/path/to/new_full_backup.img", | 1156 | + help=None, |
739 | + "sync": "full", "format": "qcow2"} } | 1157 | + metavar=None): |
1158 | + if nargs == 0: | ||
1159 | + raise ValueError('nargs for store actions must be > 0; if you ' | ||
1160 | + 'have nothing to store, actions such as store ' | ||
1161 | + 'true or store const may be more appropriate') | ||
1162 | + if const is not None and nargs != OPTIONAL: | ||
1163 | + raise ValueError('nargs must be %r to supply const' % OPTIONAL) | ||
1164 | + super(_StoreAction, self).__init__( | ||
1165 | + option_strings=option_strings, | ||
1166 | + dest=dest, | ||
1167 | + nargs=nargs, | ||
1168 | + const=const, | ||
1169 | + default=default, | ||
1170 | + type=type, | ||
1171 | + choices=choices, | ||
1172 | + required=required, | ||
1173 | + help=help, | ||
1174 | + metavar=metavar) | ||
1175 | + | ||
1176 | + def __call__(self, parser, namespace, values, option_string=None): | ||
1177 | + setattr(namespace, self.dest, values) | ||
1178 | + | ||
1179 | + | ||
1180 | +class _StoreConstAction(Action): | ||
1181 | + | ||
1182 | + def __init__(self, | ||
1183 | + option_strings, | ||
1184 | + dest, | ||
1185 | + const, | ||
1186 | + default=None, | ||
1187 | + required=False, | ||
1188 | + help=None, | ||
1189 | + metavar=None): | ||
1190 | + super(_StoreConstAction, self).__init__( | ||
1191 | + option_strings=option_strings, | ||
1192 | + dest=dest, | ||
1193 | + nargs=0, | ||
1194 | + const=const, | ||
1195 | + default=default, | ||
1196 | + required=required, | ||
1197 | + help=help) | ||
1198 | + | ||
1199 | + def __call__(self, parser, namespace, values, option_string=None): | ||
1200 | + setattr(namespace, self.dest, self.const) | ||
1201 | + | ||
1202 | + | ||
1203 | +class _StoreTrueAction(_StoreConstAction): | ||
1204 | + | ||
1205 | + def __init__(self, | ||
1206 | + option_strings, | ||
1207 | + dest, | ||
1208 | + default=False, | ||
1209 | + required=False, | ||
1210 | + help=None): | ||
1211 | + super(_StoreTrueAction, self).__init__( | ||
1212 | + option_strings=option_strings, | ||
1213 | + dest=dest, | ||
1214 | + const=True, | ||
1215 | + default=default, | ||
1216 | + required=required, | ||
1217 | + help=help) | ||
1218 | + | ||
1219 | + | ||
1220 | +class _StoreFalseAction(_StoreConstAction): | ||
1221 | + | ||
1222 | + def __init__(self, | ||
1223 | + option_strings, | ||
1224 | + dest, | ||
1225 | + default=True, | ||
1226 | + required=False, | ||
1227 | + help=None): | ||
1228 | + super(_StoreFalseAction, self).__init__( | ||
1229 | + option_strings=option_strings, | ||
1230 | + dest=dest, | ||
1231 | + const=False, | ||
1232 | + default=default, | ||
1233 | + required=required, | ||
1234 | + help=help) | ||
1235 | + | ||
1236 | + | ||
1237 | +class _AppendAction(Action): | ||
1238 | + | ||
1239 | + def __init__(self, | ||
1240 | + option_strings, | ||
1241 | + dest, | ||
1242 | + nargs=None, | ||
1243 | + const=None, | ||
1244 | + default=None, | ||
1245 | + type=None, | ||
1246 | + choices=None, | ||
1247 | + required=False, | ||
1248 | + help=None, | ||
1249 | + metavar=None): | ||
1250 | + if nargs == 0: | ||
1251 | + raise ValueError('nargs for append actions must be > 0; if arg ' | ||
1252 | + 'strings are not supplying the value to append, ' | ||
1253 | + 'the append const action may be more appropriate') | ||
1254 | + if const is not None and nargs != OPTIONAL: | ||
1255 | + raise ValueError('nargs must be %r to supply const' % OPTIONAL) | ||
1256 | + super(_AppendAction, self).__init__( | ||
1257 | + option_strings=option_strings, | ||
1258 | + dest=dest, | ||
1259 | + nargs=nargs, | ||
1260 | + const=const, | ||
1261 | + default=default, | ||
1262 | + type=type, | ||
1263 | + choices=choices, | ||
1264 | + required=required, | ||
1265 | + help=help, | ||
1266 | + metavar=metavar) | ||
1267 | + | ||
1268 | + def __call__(self, parser, namespace, values, option_string=None): | ||
1269 | + items = _copy.copy(_ensure_value(namespace, self.dest, [])) | ||
1270 | + items.append(values) | ||
1271 | + setattr(namespace, self.dest, items) | ||
1272 | + | ||
1273 | + | ||
1274 | +class _AppendConstAction(Action): | ||
1275 | + | ||
1276 | + def __init__(self, | ||
1277 | + option_strings, | ||
1278 | + dest, | ||
1279 | + const, | ||
1280 | + default=None, | ||
1281 | + required=False, | ||
1282 | + help=None, | ||
1283 | + metavar=None): | ||
1284 | + super(_AppendConstAction, self).__init__( | ||
1285 | + option_strings=option_strings, | ||
1286 | + dest=dest, | ||
1287 | + nargs=0, | ||
1288 | + const=const, | ||
1289 | + default=default, | ||
1290 | + required=required, | ||
1291 | + help=help, | ||
1292 | + metavar=metavar) | ||
1293 | + | ||
1294 | + def __call__(self, parser, namespace, values, option_string=None): | ||
1295 | + items = _copy.copy(_ensure_value(namespace, self.dest, [])) | ||
1296 | + items.append(self.const) | ||
1297 | + setattr(namespace, self.dest, items) | ||
1298 | + | ||
1299 | + | ||
1300 | +class _CountAction(Action): | ||
1301 | + | ||
1302 | + def __init__(self, | ||
1303 | + option_strings, | ||
1304 | + dest, | ||
1305 | + default=None, | ||
1306 | + required=False, | ||
1307 | + help=None): | ||
1308 | + super(_CountAction, self).__init__( | ||
1309 | + option_strings=option_strings, | ||
1310 | + dest=dest, | ||
1311 | + nargs=0, | ||
1312 | + default=default, | ||
1313 | + required=required, | ||
1314 | + help=help) | ||
1315 | + | ||
1316 | + def __call__(self, parser, namespace, values, option_string=None): | ||
1317 | + new_count = _ensure_value(namespace, self.dest, 0) + 1 | ||
1318 | + setattr(namespace, self.dest, new_count) | ||
1319 | + | ||
1320 | + | ||
1321 | +class _HelpAction(Action): | ||
1322 | + | ||
1323 | + def __init__(self, | ||
1324 | + option_strings, | ||
1325 | + dest=SUPPRESS, | ||
1326 | + default=SUPPRESS, | ||
1327 | + help=None): | ||
1328 | + super(_HelpAction, self).__init__( | ||
1329 | + option_strings=option_strings, | ||
1330 | + dest=dest, | ||
1331 | + default=default, | ||
1332 | + nargs=0, | ||
1333 | + help=help) | ||
1334 | + | ||
1335 | + def __call__(self, parser, namespace, values, option_string=None): | ||
1336 | + parser.print_help() | ||
1337 | + parser.exit() | ||
1338 | + | ||
1339 | + | ||
1340 | +class _VersionAction(Action): | ||
1341 | + | ||
1342 | + def __init__(self, | ||
1343 | + option_strings, | ||
1344 | + version=None, | ||
1345 | + dest=SUPPRESS, | ||
1346 | + default=SUPPRESS, | ||
1347 | + help="show program's version number and exit"): | ||
1348 | + super(_VersionAction, self).__init__( | ||
1349 | + option_strings=option_strings, | ||
1350 | + dest=dest, | ||
1351 | + default=default, | ||
1352 | + nargs=0, | ||
1353 | + help=help) | ||
1354 | + self.version = version | ||
1355 | + | ||
1356 | + def __call__(self, parser, namespace, values, option_string=None): | ||
1357 | + version = self.version | ||
1358 | + if version is None: | ||
1359 | + version = parser.version | ||
1360 | + formatter = parser._get_formatter() | ||
1361 | + formatter.add_text(version) | ||
1362 | + parser.exit(message=formatter.format_help()) | ||
1363 | + | ||
1364 | + | ||
1365 | +class _SubParsersAction(Action): | ||
1366 | + | ||
1367 | + class _ChoicesPseudoAction(Action): | ||
1368 | + | ||
1369 | + def __init__(self, name, aliases, help): | ||
1370 | + metavar = dest = name | ||
1371 | + if aliases: | ||
1372 | + metavar += ' (%s)' % ', '.join(aliases) | ||
1373 | + sup = super(_SubParsersAction._ChoicesPseudoAction, self) | ||
1374 | + sup.__init__(option_strings=[], dest=dest, help=help, | ||
1375 | + metavar=metavar) | ||
1376 | + | ||
1377 | + def __init__(self, | ||
1378 | + option_strings, | ||
1379 | + prog, | ||
1380 | + parser_class, | ||
1381 | + dest=SUPPRESS, | ||
1382 | + help=None, | ||
1383 | + metavar=None): | ||
1384 | + | ||
1385 | + self._prog_prefix = prog | ||
1386 | + self._parser_class = parser_class | ||
1387 | + self._name_parser_map = {} | ||
1388 | + self._choices_actions = [] | ||
1389 | + | ||
1390 | + super(_SubParsersAction, self).__init__( | ||
1391 | + option_strings=option_strings, | ||
1392 | + dest=dest, | ||
1393 | + nargs=PARSER, | ||
1394 | + choices=self._name_parser_map, | ||
1395 | + help=help, | ||
1396 | + metavar=metavar) | ||
1397 | + | ||
1398 | + def add_parser(self, name, **kwargs): | ||
1399 | + # set prog from the existing prefix | ||
1400 | + if kwargs.get('prog') is None: | ||
1401 | + kwargs['prog'] = '%s %s' % (self._prog_prefix, name) | ||
1402 | + | ||
1403 | + aliases = kwargs.pop('aliases', ()) | ||
1404 | + | ||
1405 | + # create a pseudo-action to hold the choice help | ||
1406 | + if 'help' in kwargs: | ||
1407 | + help = kwargs.pop('help') | ||
1408 | + choice_action = self._ChoicesPseudoAction(name, aliases, help) | ||
1409 | + self._choices_actions.append(choice_action) | ||
1410 | + | ||
1411 | + # create the parser and add it to the map | ||
1412 | + parser = self._parser_class(**kwargs) | ||
1413 | + self._name_parser_map[name] = parser | ||
1414 | + | ||
1415 | + # make parser available under aliases also | ||
1416 | + for alias in aliases: | ||
1417 | + self._name_parser_map[alias] = parser | ||
1418 | + | ||
1419 | + return parser | ||
1420 | + | ||
1421 | + def _get_subactions(self): | ||
1422 | + return self._choices_actions | ||
1423 | + | ||
1424 | + def __call__(self, parser, namespace, values, option_string=None): | ||
1425 | + parser_name = values[0] | ||
1426 | + arg_strings = values[1:] | ||
1427 | + | ||
1428 | + # set the parser name if requested | ||
1429 | + if self.dest is not SUPPRESS: | ||
1430 | + setattr(namespace, self.dest, parser_name) | ||
1431 | + | ||
1432 | + # select the parser | ||
1433 | + try: | ||
1434 | + parser = self._name_parser_map[parser_name] | ||
1435 | + except KeyError: | ||
1436 | + tup = parser_name, ', '.join(self._name_parser_map) | ||
1437 | + msg = _('unknown parser %r (choices: %s)' % tup) | ||
1438 | + raise ArgumentError(self, msg) | ||
1439 | + | ||
1440 | + # parse all the remaining options into the namespace | ||
1441 | + # store any unrecognized options on the object, so that the top | ||
1442 | + # level parser can decide what to do with them | ||
1443 | + namespace, arg_strings = parser.parse_known_args(arg_strings, namespace) | ||
1444 | + if arg_strings: | ||
1445 | + vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) | ||
1446 | + getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) | ||
1447 | + | ||
1448 | + | ||
1449 | +# ============== | ||
1450 | +# Type classes | ||
1451 | +# ============== | ||
1452 | + | ||
1453 | +class FileType(object): | ||
1454 | + """Factory for creating file object types | ||
1455 | + | ||
1456 | + Instances of FileType are typically passed as type= arguments to the | ||
1457 | + ArgumentParser add_argument() method. | ||
1458 | + | ||
1459 | + Keyword Arguments: | ||
1460 | + - mode -- A string indicating how the file is to be opened. Accepts the | ||
1461 | + same values as the builtin open() function. | ||
1462 | + - bufsize -- The file's desired buffer size. Accepts the same values as | ||
1463 | + the builtin open() function. | ||
1464 | + """ | ||
1465 | + | ||
1466 | + def __init__(self, mode='r', bufsize=None): | ||
1467 | + self._mode = mode | ||
1468 | + self._bufsize = bufsize | ||
1469 | + | ||
1470 | + def __call__(self, string): | ||
1471 | + # the special argument "-" means sys.std{in,out} | ||
1472 | + if string == '-': | ||
1473 | + if 'r' in self._mode: | ||
1474 | + return _sys.stdin | ||
1475 | + elif 'w' in self._mode: | ||
1476 | + return _sys.stdout | ||
1477 | + else: | ||
1478 | + msg = _('argument "-" with mode %r' % self._mode) | ||
1479 | + raise ValueError(msg) | ||
1480 | + | ||
1481 | + try: | ||
1482 | + # all other arguments are used as file names | ||
1483 | + if self._bufsize: | ||
1484 | + return open(string, self._mode, self._bufsize) | ||
1485 | + else: | ||
1486 | + return open(string, self._mode) | ||
1487 | + except IOError: | ||
1488 | + err = _sys.exc_info()[1] | ||
1489 | + message = _("can't open '%s': %s") | ||
1490 | + raise ArgumentTypeError(message % (string, err)) | ||
1491 | + | ||
1492 | + def __repr__(self): | ||
1493 | + args = [self._mode, self._bufsize] | ||
1494 | + args_str = ', '.join([repr(arg) for arg in args if arg is not None]) | ||
1495 | + return '%s(%s)' % (type(self).__name__, args_str) | ||
1496 | + | ||
1497 | +# =========================== | ||
1498 | +# Optional and Positional Parsing | ||
1499 | +# =========================== | ||
1500 | + | ||
1501 | +class Namespace(_AttributeHolder): | ||
1502 | + """Simple object for storing attributes. | ||
1503 | + | ||
1504 | + Implements equality by attribute names and values, and provides a simple | ||
1505 | + string representation. | ||
1506 | + """ | ||
1507 | + | ||
1508 | + def __init__(self, **kwargs): | ||
1509 | + for name in kwargs: | ||
1510 | + setattr(self, name, kwargs[name]) | ||
1511 | + | ||
1512 | + __hash__ = None | ||
1513 | + | ||
1514 | + def __eq__(self, other): | ||
1515 | + return vars(self) == vars(other) | ||
1516 | + | ||
1517 | + def __ne__(self, other): | ||
1518 | + return not (self == other) | ||
1519 | + | ||
1520 | + def __contains__(self, key): | ||
1521 | + return key in self.__dict__ | ||
1522 | + | ||
1523 | + | ||
1524 | +class _ActionsContainer(object): | ||
1525 | + | ||
1526 | + def __init__(self, | ||
1527 | + description, | ||
1528 | + prefix_chars, | ||
1529 | + argument_default, | ||
1530 | + conflict_handler): | ||
1531 | + super(_ActionsContainer, self).__init__() | ||
1532 | + | ||
1533 | + self.description = description | ||
1534 | + self.argument_default = argument_default | ||
1535 | + self.prefix_chars = prefix_chars | ||
1536 | + self.conflict_handler = conflict_handler | ||
1537 | + | ||
1538 | + # set up registries | ||
1539 | + self._registries = {} | ||
1540 | + | ||
1541 | + # register actions | ||
1542 | + self.register('action', None, _StoreAction) | ||
1543 | + self.register('action', 'store', _StoreAction) | ||
1544 | + self.register('action', 'store_const', _StoreConstAction) | ||
1545 | + self.register('action', 'store_true', _StoreTrueAction) | ||
1546 | + self.register('action', 'store_false', _StoreFalseAction) | ||
1547 | + self.register('action', 'append', _AppendAction) | ||
1548 | + self.register('action', 'append_const', _AppendConstAction) | ||
1549 | + self.register('action', 'count', _CountAction) | ||
1550 | + self.register('action', 'help', _HelpAction) | ||
1551 | + self.register('action', 'version', _VersionAction) | ||
1552 | + self.register('action', 'parsers', _SubParsersAction) | ||
1553 | + | ||
1554 | + # raise an exception if the conflict handler is invalid | ||
1555 | + self._get_handler() | ||
1556 | + | ||
1557 | + # action storage | ||
1558 | + self._actions = [] | ||
1559 | + self._option_string_actions = {} | ||
1560 | + | ||
1561 | + # groups | ||
1562 | + self._action_groups = [] | ||
1563 | + self._mutually_exclusive_groups = [] | ||
1564 | + | ||
1565 | + # defaults storage | ||
1566 | + self._defaults = {} | ||
1567 | + | ||
1568 | + # determines whether an "option" looks like a negative number | ||
1569 | + self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') | ||
1570 | + | ||
1571 | + # whether or not there are any optionals that look like negative | ||
1572 | + # numbers -- uses a list so it can be shared and edited | ||
1573 | + self._has_negative_number_optionals = [] | ||
1574 | + | ||
1575 | + # ==================== | ||
1576 | + # Registration methods | ||
1577 | + # ==================== | ||
1578 | + def register(self, registry_name, value, object): | ||
1579 | + registry = self._registries.setdefault(registry_name, {}) | ||
1580 | + registry[value] = object | ||
1581 | + | ||
1582 | + def _registry_get(self, registry_name, value, default=None): | ||
1583 | + return self._registries[registry_name].get(value, default) | ||
1584 | + | ||
1585 | + # ================================== | ||
1586 | + # Namespace default accessor methods | ||
1587 | + # ================================== | ||
1588 | + def set_defaults(self, **kwargs): | ||
1589 | + self._defaults.update(kwargs) | ||
1590 | + | ||
1591 | + # if these defaults match any existing arguments, replace | ||
1592 | + # the previous default on the object with the new one | ||
1593 | + for action in self._actions: | ||
1594 | + if action.dest in kwargs: | ||
1595 | + action.default = kwargs[action.dest] | ||
1596 | + | ||
1597 | + def get_default(self, dest): | ||
1598 | + for action in self._actions: | ||
1599 | + if action.dest == dest and action.default is not None: | ||
1600 | + return action.default | ||
1601 | + return self._defaults.get(dest, None) | ||
1602 | + | ||
1603 | + | ||
1604 | + # ======================= | ||
1605 | + # Adding argument actions | ||
1606 | + # ======================= | ||
1607 | + def add_argument(self, *args, **kwargs): | ||
1608 | + """ | ||
1609 | + add_argument(dest, ..., name=value, ...) | ||
1610 | + add_argument(option_string, option_string, ..., name=value, ...) | ||
1611 | + """ | ||
1612 | + | ||
1613 | + # if no positional args are supplied or only one is supplied and | ||
1614 | + # it doesn't look like an option string, parse a positional | ||
1615 | + # argument | ||
1616 | + chars = self.prefix_chars | ||
1617 | + if not args or len(args) == 1 and args[0][0] not in chars: | ||
1618 | + if args and 'dest' in kwargs: | ||
1619 | + raise ValueError('dest supplied twice for positional argument') | ||
1620 | + kwargs = self._get_positional_kwargs(*args, **kwargs) | ||
1621 | + | ||
1622 | + # otherwise, we're adding an optional argument | ||
1623 | + else: | ||
1624 | + kwargs = self._get_optional_kwargs(*args, **kwargs) | ||
1625 | + | ||
1626 | + # if no default was supplied, use the parser-level default | ||
1627 | + if 'default' not in kwargs: | ||
1628 | + dest = kwargs['dest'] | ||
1629 | + if dest in self._defaults: | ||
1630 | + kwargs['default'] = self._defaults[dest] | ||
1631 | + elif self.argument_default is not None: | ||
1632 | + kwargs['default'] = self.argument_default | ||
1633 | + | ||
1634 | + # create the action object, and add it to the parser | ||
1635 | + action_class = self._pop_action_class(kwargs) | ||
1636 | + if not _callable(action_class): | ||
1637 | + raise ValueError('unknown action "%s"' % action_class) | ||
1638 | + action = action_class(**kwargs) | ||
1639 | + | ||
1640 | + # raise an error if the action type is not callable | ||
1641 | + type_func = self._registry_get('type', action.type, action.type) | ||
1642 | + if not _callable(type_func): | ||
1643 | + raise ValueError('%r is not callable' % type_func) | ||
1644 | + | ||
1645 | + return self._add_action(action) | ||
1646 | + | ||
1647 | + def add_argument_group(self, *args, **kwargs): | ||
1648 | + group = _ArgumentGroup(self, *args, **kwargs) | ||
1649 | + self._action_groups.append(group) | ||
1650 | + return group | ||
1651 | + | ||
1652 | + def add_mutually_exclusive_group(self, **kwargs): | ||
1653 | + group = _MutuallyExclusiveGroup(self, **kwargs) | ||
1654 | + self._mutually_exclusive_groups.append(group) | ||
1655 | + return group | ||
1656 | + | ||
1657 | + def _add_action(self, action): | ||
1658 | + # resolve any conflicts | ||
1659 | + self._check_conflict(action) | ||
1660 | + | ||
1661 | + # add to actions list | ||
1662 | + self._actions.append(action) | ||
1663 | + action.container = self | ||
1664 | + | ||
1665 | + # index the action by any option strings it has | ||
1666 | + for option_string in action.option_strings: | ||
1667 | + self._option_string_actions[option_string] = action | ||
1668 | + | ||
1669 | + # set the flag if any option strings look like negative numbers | ||
1670 | + for option_string in action.option_strings: | ||
1671 | + if self._negative_number_matcher.match(option_string): | ||
1672 | + if not self._has_negative_number_optionals: | ||
1673 | + self._has_negative_number_optionals.append(True) | ||
1674 | + | ||
1675 | + # return the created action | ||
1676 | + return action | ||
1677 | + | ||
1678 | + def _remove_action(self, action): | ||
1679 | + self._actions.remove(action) | ||
1680 | + | ||
1681 | + def _add_container_actions(self, container): | ||
1682 | + # collect groups by titles | ||
1683 | + title_group_map = {} | ||
1684 | + for group in self._action_groups: | ||
1685 | + if group.title in title_group_map: | ||
1686 | + msg = _('cannot merge actions - two groups are named %r') | ||
1687 | + raise ValueError(msg % (group.title)) | ||
1688 | + title_group_map[group.title] = group | ||
1689 | + | ||
1690 | + # map each action to its group | ||
1691 | + group_map = {} | ||
1692 | + for group in container._action_groups: | ||
1693 | + | ||
1694 | + # if a group with the title exists, use that, otherwise | ||
1695 | + # create a new group matching the container's group | ||
1696 | + if group.title not in title_group_map: | ||
1697 | + title_group_map[group.title] = self.add_argument_group( | ||
1698 | + title=group.title, | ||
1699 | + description=group.description, | ||
1700 | + conflict_handler=group.conflict_handler) | ||
1701 | + | ||
1702 | + # map the actions to their new group | ||
1703 | + for action in group._group_actions: | ||
1704 | + group_map[action] = title_group_map[group.title] | ||
1705 | + | ||
1706 | + # add container's mutually exclusive groups | ||
1707 | + # NOTE: if add_mutually_exclusive_group ever gains title= and | ||
1708 | + # description= then this code will need to be expanded as above | ||
1709 | + for group in container._mutually_exclusive_groups: | ||
1710 | + mutex_group = self.add_mutually_exclusive_group( | ||
1711 | + required=group.required) | ||
1712 | + | ||
1713 | + # map the actions to their new mutex group | ||
1714 | + for action in group._group_actions: | ||
1715 | + group_map[action] = mutex_group | ||
1716 | + | ||
1717 | + # add all actions to this container or their group | ||
1718 | + for action in container._actions: | ||
1719 | + group_map.get(action, self)._add_action(action) | ||
1720 | + | ||
1721 | + def _get_positional_kwargs(self, dest, **kwargs): | ||
1722 | + # make sure required is not specified | ||
1723 | + if 'required' in kwargs: | ||
1724 | + msg = _("'required' is an invalid argument for positionals") | ||
1725 | + raise TypeError(msg) | ||
1726 | + | ||
1727 | + # mark positional arguments as required if at least one is | ||
1728 | + # always required | ||
1729 | + if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: | ||
1730 | + kwargs['required'] = True | ||
1731 | + if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: | ||
1732 | + kwargs['required'] = True | ||
1733 | + | ||
1734 | + # return the keyword arguments with no option strings | ||
1735 | + return dict(kwargs, dest=dest, option_strings=[]) | ||
1736 | + | ||
1737 | + def _get_optional_kwargs(self, *args, **kwargs): | ||
1738 | + # determine short and long option strings | ||
1739 | + option_strings = [] | ||
1740 | + long_option_strings = [] | ||
1741 | + for option_string in args: | ||
1742 | + # error on strings that don't start with an appropriate prefix | ||
1743 | + if not option_string[0] in self.prefix_chars: | ||
1744 | + msg = _('invalid option string %r: ' | ||
1745 | + 'must start with a character %r') | ||
1746 | + tup = option_string, self.prefix_chars | ||
1747 | + raise ValueError(msg % tup) | ||
1748 | + | ||
1749 | + # strings starting with two prefix characters are long options | ||
1750 | + option_strings.append(option_string) | ||
1751 | + if option_string[0] in self.prefix_chars: | ||
1752 | + if len(option_string) > 1: | ||
1753 | + if option_string[1] in self.prefix_chars: | ||
1754 | + long_option_strings.append(option_string) | ||
1755 | + | ||
1756 | + # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' | ||
1757 | + dest = kwargs.pop('dest', None) | ||
1758 | + if dest is None: | ||
1759 | + if long_option_strings: | ||
1760 | + dest_option_string = long_option_strings[0] | ||
1761 | + else: | ||
1762 | + dest_option_string = option_strings[0] | ||
1763 | + dest = dest_option_string.lstrip(self.prefix_chars) | ||
1764 | + if not dest: | ||
1765 | + msg = _('dest= is required for options like %r') | ||
1766 | + raise ValueError(msg % option_string) | ||
1767 | + dest = dest.replace('-', '_') | ||
1768 | + | ||
1769 | + # return the updated keyword arguments | ||
1770 | + return dict(kwargs, dest=dest, option_strings=option_strings) | ||
1771 | + | ||
1772 | + def _pop_action_class(self, kwargs, default=None): | ||
1773 | + action = kwargs.pop('action', default) | ||
1774 | + return self._registry_get('action', action, action) | ||
1775 | + | ||
1776 | + def _get_handler(self): | ||
1777 | + # determine function from conflict handler string | ||
1778 | + handler_func_name = '_handle_conflict_%s' % self.conflict_handler | ||
1779 | + try: | ||
1780 | + return getattr(self, handler_func_name) | ||
1781 | + except AttributeError: | ||
1782 | + msg = _('invalid conflict_resolution value: %r') | ||
1783 | + raise ValueError(msg % self.conflict_handler) | ||
1784 | + | ||
1785 | + def _check_conflict(self, action): | ||
1786 | + | ||
1787 | + # find all options that conflict with this option | ||
1788 | + confl_optionals = [] | ||
1789 | + for option_string in action.option_strings: | ||
1790 | + if option_string in self._option_string_actions: | ||
1791 | + confl_optional = self._option_string_actions[option_string] | ||
1792 | + confl_optionals.append((option_string, confl_optional)) | ||
1793 | + | ||
1794 | + # resolve any conflicts | ||
1795 | + if confl_optionals: | ||
1796 | + conflict_handler = self._get_handler() | ||
1797 | + conflict_handler(action, confl_optionals) | ||
1798 | + | ||
1799 | + def _handle_conflict_error(self, action, conflicting_actions): | ||
1800 | + message = _('conflicting option string(s): %s') | ||
1801 | + conflict_string = ', '.join([option_string | ||
1802 | + for option_string, action | ||
1803 | + in conflicting_actions]) | ||
1804 | + raise ArgumentError(action, message % conflict_string) | ||
1805 | + | ||
1806 | + def _handle_conflict_resolve(self, action, conflicting_actions): | ||
1807 | + | ||
1808 | + # remove all conflicting options | ||
1809 | + for option_string, action in conflicting_actions: | ||
1810 | + | ||
1811 | + # remove the conflicting option | ||
1812 | + action.option_strings.remove(option_string) | ||
1813 | + self._option_string_actions.pop(option_string, None) | ||
1814 | + | ||
1815 | + # if the option now has no option string, remove it from the | ||
1816 | + # container holding it | ||
1817 | + if not action.option_strings: | ||
1818 | + action.container._remove_action(action) | ||
1819 | + | ||
1820 | + | ||
1821 | +class _ArgumentGroup(_ActionsContainer): | ||
1822 | + | ||
1823 | + def __init__(self, container, title=None, description=None, **kwargs): | ||
1824 | + # add any missing keyword arguments by checking the container | ||
1825 | + update = kwargs.setdefault | ||
1826 | + update('conflict_handler', container.conflict_handler) | ||
1827 | + update('prefix_chars', container.prefix_chars) | ||
1828 | + update('argument_default', container.argument_default) | ||
1829 | + super_init = super(_ArgumentGroup, self).__init__ | ||
1830 | + super_init(description=description, **kwargs) | ||
1831 | + | ||
1832 | + # group attributes | ||
1833 | + self.title = title | ||
1834 | + self._group_actions = [] | ||
1835 | + | ||
1836 | + # share most attributes with the container | ||
1837 | + self._registries = container._registries | ||
1838 | + self._actions = container._actions | ||
1839 | + self._option_string_actions = container._option_string_actions | ||
1840 | + self._defaults = container._defaults | ||
1841 | + self._has_negative_number_optionals = \ | ||
1842 | + container._has_negative_number_optionals | ||
1843 | + | ||
1844 | + def _add_action(self, action): | ||
1845 | + action = super(_ArgumentGroup, self)._add_action(action) | ||
1846 | + self._group_actions.append(action) | ||
1847 | + return action | ||
1848 | + | ||
1849 | + def _remove_action(self, action): | ||
1850 | + super(_ArgumentGroup, self)._remove_action(action) | ||
1851 | + self._group_actions.remove(action) | ||
1852 | + | ||
1853 | + | ||
1854 | +class _MutuallyExclusiveGroup(_ArgumentGroup): | ||
1855 | + | ||
1856 | + def __init__(self, container, required=False): | ||
1857 | + super(_MutuallyExclusiveGroup, self).__init__(container) | ||
1858 | + self.required = required | ||
1859 | + self._container = container | ||
1860 | + | ||
1861 | + def _add_action(self, action): | ||
1862 | + if action.required: | ||
1863 | + msg = _('mutually exclusive arguments must be optional') | ||
1864 | + raise ValueError(msg) | ||
1865 | + action = self._container._add_action(action) | ||
1866 | + self._group_actions.append(action) | ||
1867 | + return action | ||
1868 | + | ||
1869 | + def _remove_action(self, action): | ||
1870 | + self._container._remove_action(action) | ||
1871 | + self._group_actions.remove(action) | ||
1872 | + | ||
1873 | + | ||
1874 | +class ArgumentParser(_AttributeHolder, _ActionsContainer): | ||
1875 | + """Object for parsing command line strings into Python objects. | ||
1876 | + | ||
1877 | + Keyword Arguments: | ||
1878 | + - prog -- The name of the program (default: sys.argv[0]) | ||
1879 | + - usage -- A usage message (default: auto-generated from arguments) | ||
1880 | + - description -- A description of what the program does | ||
1881 | + - epilog -- Text following the argument descriptions | ||
1882 | + - parents -- Parsers whose arguments should be copied into this one | ||
1883 | + - formatter_class -- HelpFormatter class for printing help messages | ||
1884 | + - prefix_chars -- Characters that prefix optional arguments | ||
1885 | + - fromfile_prefix_chars -- Characters that prefix files containing | ||
1886 | + additional arguments | ||
1887 | + - argument_default -- The default value for all arguments | ||
1888 | + - conflict_handler -- String indicating how to handle conflicts | ||
1889 | + - add_help -- Add a -h/-help option | ||
1890 | + """ | ||
1891 | + | ||
1892 | + def __init__(self, | ||
1893 | + prog=None, | ||
1894 | + usage=None, | ||
1895 | + description=None, | ||
1896 | + epilog=None, | ||
1897 | + version=None, | ||
1898 | + parents=[], | ||
1899 | + formatter_class=HelpFormatter, | ||
1900 | + prefix_chars='-', | ||
1901 | + fromfile_prefix_chars=None, | ||
1902 | + argument_default=None, | ||
1903 | + conflict_handler='error', | ||
1904 | + add_help=True): | ||
1905 | + | ||
1906 | + if version is not None: | ||
1907 | + import warnings | ||
1908 | + warnings.warn( | ||
1909 | + """The "version" argument to ArgumentParser is deprecated. """ | ||
1910 | + """Please use """ | ||
1911 | + """"add_argument(..., action='version', version="N", ...)" """ | ||
1912 | + """instead""", DeprecationWarning) | ||
1913 | + | ||
1914 | + superinit = super(ArgumentParser, self).__init__ | ||
1915 | + superinit(description=description, | ||
1916 | + prefix_chars=prefix_chars, | ||
1917 | + argument_default=argument_default, | ||
1918 | + conflict_handler=conflict_handler) | ||
1919 | + | ||
1920 | + # default setting for prog | ||
1921 | + if prog is None: | ||
1922 | + prog = _os.path.basename(_sys.argv[0]) | ||
1923 | + | ||
1924 | + self.prog = prog | ||
1925 | + self.usage = usage | ||
1926 | + self.epilog = epilog | ||
1927 | + self.version = version | ||
1928 | + self.formatter_class = formatter_class | ||
1929 | + self.fromfile_prefix_chars = fromfile_prefix_chars | ||
1930 | + self.add_help = add_help | ||
1931 | + | ||
1932 | + add_group = self.add_argument_group | ||
1933 | + self._positionals = add_group(_('positional arguments')) | ||
1934 | + self._optionals = add_group(_('optional arguments')) | ||
1935 | + self._subparsers = None | ||
1936 | + | ||
1937 | + # register types | ||
1938 | + def identity(string): | ||
1939 | + return string | ||
1940 | + self.register('type', None, identity) | ||
1941 | + | ||
1942 | + # add help and version arguments if necessary | ||
1943 | + # (using explicit default to override global argument_default) | ||
1944 | + if '-' in prefix_chars: | ||
1945 | + default_prefix = '-' | ||
1946 | + else: | ||
1947 | + default_prefix = prefix_chars[0] | ||
1948 | + if self.add_help: | ||
1949 | + self.add_argument( | ||
1950 | + default_prefix+'h', default_prefix*2+'help', | ||
1951 | + action='help', default=SUPPRESS, | ||
1952 | + help=_('show this help message and exit')) | ||
1953 | + if self.version: | ||
1954 | + self.add_argument( | ||
1955 | + default_prefix+'v', default_prefix*2+'version', | ||
1956 | + action='version', default=SUPPRESS, | ||
1957 | + version=self.version, | ||
1958 | + help=_("show program's version number and exit")) | ||
1959 | + | ||
1960 | + # add parent arguments and defaults | ||
1961 | + for parent in parents: | ||
1962 | + self._add_container_actions(parent) | ||
1963 | + try: | ||
1964 | + defaults = parent._defaults | ||
1965 | + except AttributeError: | ||
1966 | + pass | ||
1967 | + else: | ||
1968 | + self._defaults.update(defaults) | ||
1969 | + | ||
1970 | + # ======================= | ||
1971 | + # Pretty __repr__ methods | ||
1972 | + # ======================= | ||
1973 | + def _get_kwargs(self): | ||
1974 | + names = [ | ||
1975 | + 'prog', | ||
1976 | + 'usage', | ||
1977 | + 'description', | ||
1978 | + 'version', | ||
1979 | + 'formatter_class', | ||
1980 | + 'conflict_handler', | ||
1981 | + 'add_help', | ||
740 | + ] | 1982 | + ] |
741 | + } | 1983 | + return [(name, getattr(self, name)) for name in names] |
742 | + } | 1984 | + |
743 | + | 1985 | + # ================================== |
744 | +Incremental Backups | 1986 | + # Optional/Positional adding methods |
745 | +------------------- | 1987 | + # ================================== |
746 | + | 1988 | + def add_subparsers(self, **kwargs): |
747 | +The star of the show. | 1989 | + if self._subparsers is not None: |
748 | + | 1990 | + self.error(_('cannot have multiple subparser arguments')) |
749 | +**Nota Bene!** Only incremental backups of entire drives are supported | 1991 | + |
750 | +for now. So despite the fact that you can attach a bitmap to any | 1992 | + # add the parser class to the arguments if it's not present |
751 | +arbitrary node, they are only currently useful when attached to the root | 1993 | + kwargs.setdefault('parser_class', type(self)) |
752 | +node. This is because drive-backup only supports drives/devices instead | 1994 | + |
753 | +of arbitrary nodes. | 1995 | + if 'title' in kwargs or 'description' in kwargs: |
754 | + | 1996 | + title = _(kwargs.pop('title', 'subcommands')) |
755 | +Example: First Incremental Backup | 1997 | + description = _(kwargs.pop('description', None)) |
756 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 1998 | + self._subparsers = self.add_argument_group(title, description) |
757 | + | 1999 | + else: |
758 | +1. Create a full backup and sync it to the dirty bitmap, as in the | 2000 | + self._subparsers = self._positionals |
759 | + transactional examples above; or with the VM offline, manually create | 2001 | + |
760 | + a full copy and then create a new bitmap before the VM begins | 2002 | + # prog defaults to the usage message of this parser, skipping |
761 | + execution. | 2003 | + # optional arguments and with no "usage:" prefix |
762 | + | 2004 | + if kwargs.get('prog') is None: |
763 | + - Let's assume the full backup is named ``full_backup.img``. | 2005 | + formatter = self._get_formatter() |
764 | + - Let's assume the bitmap you created is ``bitmap0`` attached to | 2006 | + positionals = self._get_positional_actions() |
765 | + ``drive0``. | 2007 | + groups = self._mutually_exclusive_groups |
766 | + | 2008 | + formatter.add_usage(self.usage, positionals, groups, '') |
767 | +2. Create a destination image for the incremental backup that utilizes | 2009 | + kwargs['prog'] = formatter.format_help().strip() |
768 | + the full backup as a backing image. | 2010 | + |
769 | + | 2011 | + # create the parsers action and add it to the positionals list |
770 | + - Let's assume the new incremental image is named | 2012 | + parsers_class = self._pop_action_class(kwargs, 'parsers') |
771 | + ``incremental.0.img``. | 2013 | + action = parsers_class(option_strings=[], **kwargs) |
772 | + | 2014 | + self._subparsers._add_action(action) |
773 | + .. code:: bash | 2015 | + |
774 | + | 2016 | + # return the created parsers action |
775 | + $ qemu-img create -f qcow2 incremental.0.img -b full_backup.img -F qcow2 | 2017 | + return action |
776 | + | 2018 | + |
777 | +3. Issue the incremental backup command: | 2019 | + def _add_action(self, action): |
778 | + | 2020 | + if action.option_strings: |
779 | + .. code:: json | 2021 | + self._optionals._add_action(action) |
780 | + | 2022 | + else: |
781 | + { "execute": "drive-backup", | 2023 | + self._positionals._add_action(action) |
782 | + "arguments": { | 2024 | + return action |
783 | + "device": "drive0", | 2025 | + |
784 | + "bitmap": "bitmap0", | 2026 | + def _get_optional_actions(self): |
785 | + "target": "incremental.0.img", | 2027 | + return [action |
786 | + "format": "qcow2", | 2028 | + for action in self._actions |
787 | + "sync": "incremental", | 2029 | + if action.option_strings] |
788 | + "mode": "existing" | 2030 | + |
789 | + } | 2031 | + def _get_positional_actions(self): |
790 | + } | 2032 | + return [action |
791 | + | 2033 | + for action in self._actions |
792 | +Example: Second Incremental Backup | 2034 | + if not action.option_strings] |
793 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 2035 | + |
794 | + | 2036 | + # ===================================== |
795 | +1. Create a new destination image for the incremental backup that points | 2037 | + # Command line argument parsing methods |
796 | + to the previous one, e.g.: ``incremental.1.img`` | 2038 | + # ===================================== |
797 | + | 2039 | + def parse_args(self, args=None, namespace=None): |
798 | + .. code:: bash | 2040 | + args, argv = self.parse_known_args(args, namespace) |
799 | + | 2041 | + if argv: |
800 | + $ qemu-img create -f qcow2 incremental.1.img -b incremental.0.img -F qcow2 | 2042 | + msg = _('unrecognized arguments: %s') |
801 | + | 2043 | + self.error(msg % ' '.join(argv)) |
802 | +2. Issue a new incremental backup command. The only difference here is | 2044 | + return args |
803 | + that we have changed the target image below. | 2045 | + |
804 | + | 2046 | + def parse_known_args(self, args=None, namespace=None): |
805 | + .. code:: json | 2047 | + # args default to the system args |
806 | + | 2048 | + if args is None: |
807 | + { "execute": "drive-backup", | 2049 | + args = _sys.argv[1:] |
808 | + "arguments": { | 2050 | + |
809 | + "device": "drive0", | 2051 | + # default Namespace built from parser defaults |
810 | + "bitmap": "bitmap0", | 2052 | + if namespace is None: |
811 | + "target": "incremental.1.img", | 2053 | + namespace = Namespace() |
812 | + "format": "qcow2", | 2054 | + |
813 | + "sync": "incremental", | 2055 | + # add any action defaults that aren't present |
814 | + "mode": "existing" | 2056 | + for action in self._actions: |
815 | + } | 2057 | + if action.dest is not SUPPRESS: |
816 | + } | 2058 | + if not hasattr(namespace, action.dest): |
817 | + | 2059 | + if action.default is not SUPPRESS: |
818 | +Errors | 2060 | + setattr(namespace, action.dest, action.default) |
819 | +------ | 2061 | + |
820 | + | 2062 | + # add any parser defaults that aren't present |
821 | +- In the event of an error that occurs after a backup job is | 2063 | + for dest in self._defaults: |
822 | + successfully launched, either by a direct QMP command or a QMP | 2064 | + if not hasattr(namespace, dest): |
823 | + transaction, the user will receive a ``BLOCK_JOB_COMPLETE`` event with | 2065 | + setattr(namespace, dest, self._defaults[dest]) |
824 | + a failure message, accompanied by a ``BLOCK_JOB_ERROR`` event. | 2066 | + |
825 | + | 2067 | + # parse the arguments and exit if there are any errors |
826 | +- In the case of an event being cancelled, the user will receive a | 2068 | + try: |
827 | + ``BLOCK_JOB_CANCELLED`` event instead of a pair of COMPLETE and ERROR | 2069 | + namespace, args = self._parse_known_args(args, namespace) |
828 | + events. | 2070 | + if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): |
829 | + | 2071 | + args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) |
830 | +- In either case, the incremental backup data contained within the | 2072 | + delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) |
831 | + bitmap is safely rolled back, and the data within the bitmap is not | 2073 | + return namespace, args |
832 | + lost. The image file created for the failed attempt can be safely | 2074 | + except ArgumentError: |
833 | + deleted. | 2075 | + err = _sys.exc_info()[1] |
834 | + | 2076 | + self.error(str(err)) |
835 | +- Once the underlying problem is fixed (e.g. more storage space is | 2077 | + |
836 | + freed up), you can simply retry the incremental backup command with | 2078 | + def _parse_known_args(self, arg_strings, namespace): |
837 | + the same bitmap. | 2079 | + # replace arg strings that are file references |
838 | + | 2080 | + if self.fromfile_prefix_chars is not None: |
839 | +Example | 2081 | + arg_strings = self._read_args_from_files(arg_strings) |
840 | +~~~~~~~ | 2082 | + |
841 | + | 2083 | + # map all mutually exclusive arguments to the other arguments |
842 | +1. Create a target image: | 2084 | + # they can't occur with |
843 | + | 2085 | + action_conflicts = {} |
844 | + .. code:: bash | 2086 | + for mutex_group in self._mutually_exclusive_groups: |
845 | + | 2087 | + group_actions = mutex_group._group_actions |
846 | + $ qemu-img create -f qcow2 incremental.0.img -b full_backup.img -F qcow2 | 2088 | + for i, mutex_action in enumerate(mutex_group._group_actions): |
847 | + | 2089 | + conflicts = action_conflicts.setdefault(mutex_action, []) |
848 | +2. Attempt to create an incremental backup via QMP: | 2090 | + conflicts.extend(group_actions[:i]) |
849 | + | 2091 | + conflicts.extend(group_actions[i + 1:]) |
850 | + .. code:: json | 2092 | + |
851 | + | 2093 | + # find all option indices, and determine the arg_string_pattern |
852 | + { "execute": "drive-backup", | 2094 | + # which has an 'O' if there is an option at an index, |
853 | + "arguments": { | 2095 | + # an 'A' if there is an argument, or a '-' if there is a '--' |
854 | + "device": "drive0", | 2096 | + option_string_indices = {} |
855 | + "bitmap": "bitmap0", | 2097 | + arg_string_pattern_parts = [] |
856 | + "target": "incremental.0.img", | 2098 | + arg_strings_iter = iter(arg_strings) |
857 | + "format": "qcow2", | 2099 | + for i, arg_string in enumerate(arg_strings_iter): |
858 | + "sync": "incremental", | 2100 | + |
859 | + "mode": "existing" | 2101 | + # all args after -- are non-options |
860 | + } | 2102 | + if arg_string == '--': |
861 | + } | 2103 | + arg_string_pattern_parts.append('-') |
862 | + | 2104 | + for arg_string in arg_strings_iter: |
863 | +3. Receive an event notifying us of failure: | 2105 | + arg_string_pattern_parts.append('A') |
864 | + | 2106 | + |
865 | + .. code:: json | 2107 | + # otherwise, add the arg to the arg strings |
866 | + | 2108 | + # and note the index if it was an option |
867 | + { "timestamp": { "seconds": 1424709442, "microseconds": 844524 }, | 2109 | + else: |
868 | + "data": { "speed": 0, "offset": 0, "len": 67108864, | 2110 | + option_tuple = self._parse_optional(arg_string) |
869 | + "error": "No space left on device", | 2111 | + if option_tuple is None: |
870 | + "device": "drive1", "type": "backup" }, | 2112 | + pattern = 'A' |
871 | + "event": "BLOCK_JOB_COMPLETED" } | 2113 | + else: |
872 | + | 2114 | + option_string_indices[i] = option_tuple |
873 | +4. Delete the failed incremental, and re-create the image. | 2115 | + pattern = 'O' |
874 | + | 2116 | + arg_string_pattern_parts.append(pattern) |
875 | + .. code:: bash | 2117 | + |
876 | + | 2118 | + # join the pieces together to form the pattern |
877 | + $ rm incremental.0.img | 2119 | + arg_strings_pattern = ''.join(arg_string_pattern_parts) |
878 | + $ qemu-img create -f qcow2 incremental.0.img -b full_backup.img -F qcow2 | 2120 | + |
879 | + | 2121 | + # converts arg strings to the appropriate and then takes the action |
880 | +5. Retry the command after fixing the underlying problem, such as | 2122 | + seen_actions = set() |
881 | + freeing up space on the backup volume: | 2123 | + seen_non_default_actions = set() |
882 | + | 2124 | + |
883 | + .. code:: json | 2125 | + def take_action(action, argument_strings, option_string=None): |
884 | + | 2126 | + seen_actions.add(action) |
885 | + { "execute": "drive-backup", | 2127 | + argument_values = self._get_values(action, argument_strings) |
886 | + "arguments": { | 2128 | + |
887 | + "device": "drive0", | 2129 | + # error if this argument is not allowed with other previously |
888 | + "bitmap": "bitmap0", | 2130 | + # seen arguments, assuming that actions that use the default |
889 | + "target": "incremental.0.img", | 2131 | + # value don't really count as "present" |
890 | + "format": "qcow2", | 2132 | + if argument_values is not action.default: |
891 | + "sync": "incremental", | 2133 | + seen_non_default_actions.add(action) |
892 | + "mode": "existing" | 2134 | + for conflict_action in action_conflicts.get(action, []): |
893 | + } | 2135 | + if conflict_action in seen_non_default_actions: |
894 | + } | 2136 | + msg = _('not allowed with argument %s') |
895 | + | 2137 | + action_name = _get_action_name(conflict_action) |
896 | +6. Receive confirmation that the job completed successfully: | 2138 | + raise ArgumentError(action, msg % action_name) |
897 | + | 2139 | + |
898 | + .. code:: json | 2140 | + # take the action if we didn't receive a SUPPRESS value |
899 | + | 2141 | + # (e.g. from a default) |
900 | + { "timestamp": { "seconds": 1424709668, "microseconds": 526525 }, | 2142 | + if argument_values is not SUPPRESS: |
901 | + "data": { "device": "drive1", "type": "backup", | 2143 | + action(self, namespace, argument_values, option_string) |
902 | + "speed": 0, "len": 67108864, "offset": 67108864}, | 2144 | + |
903 | + "event": "BLOCK_JOB_COMPLETED" } | 2145 | + # function to convert arg_strings into an optional action |
904 | + | 2146 | + def consume_optional(start_index): |
905 | +Partial Transactional Failures | 2147 | + |
906 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 2148 | + # get the optional identified at this index |
907 | + | 2149 | + option_tuple = option_string_indices[start_index] |
908 | +- Sometimes, a transaction will succeed in launching and return | 2150 | + action, option_string, explicit_arg = option_tuple |
909 | + success, but then later the backup jobs themselves may fail. It is | 2151 | + |
910 | + possible that a management application may have to deal with a | 2152 | + # identify additional optionals in the same arg string |
911 | + partial backup failure after a successful transaction. | 2153 | + # (e.g. -xyz is the same as -x -y -z if no args are required) |
912 | + | 2154 | + match_argument = self._match_argument |
913 | +- If multiple backup jobs are specified in a single transaction, when | 2155 | + action_tuples = [] |
914 | + one of them fails, it will not interact with the other backup jobs in | 2156 | + while True: |
915 | + any way. | 2157 | + |
916 | + | 2158 | + # if we found no optional action, skip it |
917 | +- The job(s) that succeeded will clear the dirty bitmap associated with | 2159 | + if action is None: |
918 | + the operation, but the job(s) that failed will not. It is not "safe" | 2160 | + extras.append(arg_strings[start_index]) |
919 | + to delete any incremental backups that were created successfully in | 2161 | + return start_index + 1 |
920 | + this scenario, even though others failed. | 2162 | + |
921 | + | 2163 | + # if there is an explicit argument, try to match the |
922 | +Example | 2164 | + # optional's string arguments to only this |
923 | +^^^^^^^ | 2165 | + if explicit_arg is not None: |
924 | + | 2166 | + arg_count = match_argument(action, 'A') |
925 | +- QMP example highlighting two backup jobs: | 2167 | + |
926 | + | 2168 | + # if the action is a single-dash option and takes no |
927 | + .. code:: json | 2169 | + # arguments, try to parse more single-dash options out |
928 | + | 2170 | + # of the tail of the option string |
929 | + { "execute": "transaction", | 2171 | + chars = self.prefix_chars |
930 | + "arguments": { | 2172 | + if arg_count == 0 and option_string[1] not in chars: |
931 | + "actions": [ | 2173 | + action_tuples.append((action, [], option_string)) |
932 | + { "type": "drive-backup", | 2174 | + char = option_string[0] |
933 | + "data": { "device": "drive0", "bitmap": "bitmap0", | 2175 | + option_string = char + explicit_arg[0] |
934 | + "format": "qcow2", "mode": "existing", | 2176 | + new_explicit_arg = explicit_arg[1:] or None |
935 | + "sync": "incremental", "target": "d0-incr-1.qcow2" } }, | 2177 | + optionals_map = self._option_string_actions |
936 | + { "type": "drive-backup", | 2178 | + if option_string in optionals_map: |
937 | + "data": { "device": "drive1", "bitmap": "bitmap1", | 2179 | + action = optionals_map[option_string] |
938 | + "format": "qcow2", "mode": "existing", | 2180 | + explicit_arg = new_explicit_arg |
939 | + "sync": "incremental", "target": "d1-incr-1.qcow2" } }, | 2181 | + else: |
940 | + ] | 2182 | + msg = _('ignored explicit argument %r') |
941 | + } | 2183 | + raise ArgumentError(action, msg % explicit_arg) |
942 | + } | 2184 | + |
943 | + | 2185 | + # if the action expect exactly one argument, we've |
944 | +- QMP example response, highlighting one success and one failure: | 2186 | + # successfully matched the option; exit the loop |
945 | + | 2187 | + elif arg_count == 1: |
946 | + - Acknowledgement that the Transaction was accepted and jobs were | 2188 | + stop = start_index + 1 |
947 | + launched: | 2189 | + args = [explicit_arg] |
948 | + | 2190 | + action_tuples.append((action, args, option_string)) |
949 | + .. code:: json | 2191 | + break |
950 | + | 2192 | + |
951 | + { "return": {} } | 2193 | + # error if a double-dash option did not use the |
952 | + | 2194 | + # explicit argument |
953 | + - Later, QEMU sends notice that the first job was completed: | 2195 | + else: |
954 | + | 2196 | + msg = _('ignored explicit argument %r') |
955 | + .. code:: json | 2197 | + raise ArgumentError(action, msg % explicit_arg) |
956 | + | 2198 | + |
957 | + { "timestamp": { "seconds": 1447192343, "microseconds": 615698 }, | 2199 | + # if there is no explicit argument, try to match the |
958 | + "data": { "device": "drive0", "type": "backup", | 2200 | + # optional's string arguments with the following strings |
959 | + "speed": 0, "len": 67108864, "offset": 67108864 }, | 2201 | + # if successful, exit the loop |
960 | + "event": "BLOCK_JOB_COMPLETED" | 2202 | + else: |
961 | + } | 2203 | + start = start_index + 1 |
962 | + | 2204 | + selected_patterns = arg_strings_pattern[start:] |
963 | + - Later yet, QEMU sends notice that the second job has failed: | 2205 | + arg_count = match_argument(action, selected_patterns) |
964 | + | 2206 | + stop = start + arg_count |
965 | + .. code:: json | 2207 | + args = arg_strings[start:stop] |
966 | + | 2208 | + action_tuples.append((action, args, option_string)) |
967 | + { "timestamp": { "seconds": 1447192399, "microseconds": 683015 }, | 2209 | + break |
968 | + "data": { "device": "drive1", "action": "report", | 2210 | + |
969 | + "operation": "read" }, | 2211 | + # add the Optional to the list and return the index at which |
970 | + "event": "BLOCK_JOB_ERROR" } | 2212 | + # the Optional's string args stopped |
971 | + | 2213 | + assert action_tuples |
972 | + .. code:: json | 2214 | + for action, args, option_string in action_tuples: |
973 | + | 2215 | + take_action(action, args, option_string) |
974 | + { "timestamp": { "seconds": 1447192399, "microseconds": | 2216 | + return stop |
975 | + 685853 }, "data": { "speed": 0, "offset": 0, "len": 67108864, | 2217 | + |
976 | + "error": "Input/output error", "device": "drive1", "type": | 2218 | + # the list of Positionals left to be parsed; this is modified |
977 | + "backup" }, "event": "BLOCK_JOB_COMPLETED" } | 2219 | + # by consume_positionals() |
978 | + | 2220 | + positionals = self._get_positional_actions() |
979 | +- In the above example, ``d0-incr-1.qcow2`` is valid and must be kept, | 2221 | + |
980 | + but ``d1-incr-1.qcow2`` is invalid and should be deleted. If a VM-wide | 2222 | + # function to convert arg_strings into positional actions |
981 | + incremental backup of all drives at a point-in-time is to be made, | 2223 | + def consume_positionals(start_index): |
982 | + new backups for both drives will need to be made, taking into account | 2224 | + # match as many Positionals as possible |
983 | + that a new incremental backup for drive0 needs to be based on top of | 2225 | + match_partial = self._match_arguments_partial |
984 | + ``d0-incr-1.qcow2``. | 2226 | + selected_pattern = arg_strings_pattern[start_index:] |
985 | + | 2227 | + arg_counts = match_partial(positionals, selected_pattern) |
986 | +Grouped Completion Mode | 2228 | + |
987 | +~~~~~~~~~~~~~~~~~~~~~~~ | 2229 | + # slice off the appropriate arg strings for each Positional |
988 | + | 2230 | + # and add the Positional and its args to the list |
989 | +- While jobs launched by transactions normally complete or fail on | 2231 | + for action, arg_count in zip(positionals, arg_counts): |
990 | + their own, it is possible to instruct them to complete or fail | 2232 | + args = arg_strings[start_index: start_index + arg_count] |
991 | + together as a group. | 2233 | + start_index += arg_count |
992 | + | 2234 | + take_action(action, args) |
993 | +- QMP transactions take an optional properties structure that can | 2235 | + |
994 | + affect the semantics of the transaction. | 2236 | + # slice off the Positionals that we just parsed and return the |
995 | + | 2237 | + # index at which the Positionals' string args stopped |
996 | +- The "completion-mode" transaction property can be either "individual" | 2238 | + positionals[:] = positionals[len(arg_counts):] |
997 | + which is the default, legacy behavior described above, or "grouped," | 2239 | + return start_index |
998 | + a new behavior detailed below. | 2240 | + |
999 | + | 2241 | + # consume Positionals and Optionals alternately, until we have |
1000 | +- Delayed Completion: In grouped completion mode, no jobs will report | 2242 | + # passed the last option string |
1001 | + success until all jobs are ready to report success. | 2243 | + extras = [] |
1002 | + | 2244 | + start_index = 0 |
1003 | +- Grouped failure: If any job fails in grouped completion mode, all | 2245 | + if option_string_indices: |
1004 | + remaining jobs will be cancelled. Any incremental backups will | 2246 | + max_option_string_index = max(option_string_indices) |
1005 | + restore their dirty bitmap objects as if no backup command was ever | 2247 | + else: |
1006 | + issued. | 2248 | + max_option_string_index = -1 |
1007 | + | 2249 | + while start_index <= max_option_string_index: |
1008 | + - Regardless of if QEMU reports a particular incremental backup job | 2250 | + |
1009 | + as CANCELLED or as an ERROR, the in-memory bitmap will be | 2251 | + # consume any Positionals preceding the next option |
1010 | + restored. | 2252 | + next_option_string_index = min([ |
1011 | + | 2253 | + index |
1012 | +Example | 2254 | + for index in option_string_indices |
1013 | +^^^^^^^ | 2255 | + if index >= start_index]) |
1014 | + | 2256 | + if start_index != next_option_string_index: |
1015 | +- Here's the same example scenario from above with the new property: | 2257 | + positionals_end_index = consume_positionals(start_index) |
1016 | + | 2258 | + |
1017 | + .. code:: json | 2259 | + # only try to parse the next optional if we didn't consume |
1018 | + | 2260 | + # the option string during the positionals parsing |
1019 | + { "execute": "transaction", | 2261 | + if positionals_end_index > start_index: |
1020 | + "arguments": { | 2262 | + start_index = positionals_end_index |
1021 | + "actions": [ | 2263 | + continue |
1022 | + { "type": "drive-backup", | 2264 | + else: |
1023 | + "data": { "device": "drive0", "bitmap": "bitmap0", | 2265 | + start_index = positionals_end_index |
1024 | + "format": "qcow2", "mode": "existing", | 2266 | + |
1025 | + "sync": "incremental", "target": "d0-incr-1.qcow2" } }, | 2267 | + # if we consumed all the positionals we could and we're not |
1026 | + { "type": "drive-backup", | 2268 | + # at the index of an option string, there were extra arguments |
1027 | + "data": { "device": "drive1", "bitmap": "bitmap1", | 2269 | + if start_index not in option_string_indices: |
1028 | + "format": "qcow2", "mode": "existing", | 2270 | + strings = arg_strings[start_index:next_option_string_index] |
1029 | + "sync": "incremental", "target": "d1-incr-1.qcow2" } }, | 2271 | + extras.extend(strings) |
1030 | + ], | 2272 | + start_index = next_option_string_index |
1031 | + "properties": { | 2273 | + |
1032 | + "completion-mode": "grouped" | 2274 | + # consume the next optional and any arguments for it |
1033 | + } | 2275 | + start_index = consume_optional(start_index) |
1034 | + } | 2276 | + |
1035 | + } | 2277 | + # consume any positionals following the last Optional |
1036 | + | 2278 | + stop_index = consume_positionals(start_index) |
1037 | +- QMP example response, highlighting a failure for ``drive2``: | 2279 | + |
1038 | + | 2280 | + # if we didn't consume all the argument strings, there were extras |
1039 | + - Acknowledgement that the Transaction was accepted and jobs were | 2281 | + extras.extend(arg_strings[stop_index:]) |
1040 | + launched: | 2282 | + |
1041 | + | 2283 | + # if we didn't use all the Positional objects, there were too few |
1042 | + .. code:: json | 2284 | + # arg strings supplied. |
1043 | + | 2285 | + if positionals: |
1044 | + { "return": {} } | 2286 | + self.error(_('too few arguments')) |
1045 | + | 2287 | + |
1046 | + - Later, QEMU sends notice that the second job has errored out, but | 2288 | + # make sure all required actions were present, and convert defaults. |
1047 | + that the first job was also cancelled: | 2289 | + for action in self._actions: |
1048 | + | 2290 | + if action not in seen_actions: |
1049 | + .. code:: json | 2291 | + if action.required: |
1050 | + | 2292 | + name = _get_action_name(action) |
1051 | + { "timestamp": { "seconds": 1447193702, "microseconds": 632377 }, | 2293 | + self.error(_('argument %s is required') % name) |
1052 | + "data": { "device": "drive1", "action": "report", | 2294 | + else: |
1053 | + "operation": "read" }, | 2295 | + # Convert action default now instead of doing it before |
1054 | + "event": "BLOCK_JOB_ERROR" } | 2296 | + # parsing arguments to avoid calling convert functions |
1055 | + | 2297 | + # twice (which may fail) if the argument was given, but |
1056 | + .. code:: json | 2298 | + # only if it was defined already in the namespace |
1057 | + | 2299 | + if (action.default is not None and |
1058 | + { "timestamp": { "seconds": 1447193702, "microseconds": 640074 }, | 2300 | + isinstance(action.default, basestring) and |
1059 | + "data": { "speed": 0, "offset": 0, "len": 67108864, | 2301 | + hasattr(namespace, action.dest) and |
1060 | + "error": "Input/output error", | 2302 | + action.default is getattr(namespace, action.dest)): |
1061 | + "device": "drive1", "type": "backup" }, | 2303 | + setattr(namespace, action.dest, |
1062 | + "event": "BLOCK_JOB_COMPLETED" } | 2304 | + self._get_value(action, action.default)) |
1063 | + | 2305 | + |
1064 | + .. code:: json | 2306 | + # make sure all required groups had one option present |
1065 | + | 2307 | + for group in self._mutually_exclusive_groups: |
1066 | + { "timestamp": { "seconds": 1447193702, "microseconds": 640163 }, | 2308 | + if group.required: |
1067 | + "data": { "device": "drive0", "type": "backup", "speed": 0, | 2309 | + for action in group._group_actions: |
1068 | + "len": 67108864, "offset": 16777216 }, | 2310 | + if action in seen_non_default_actions: |
1069 | + "event": "BLOCK_JOB_CANCELLED" } | 2311 | + break |
1070 | + | 2312 | + |
1071 | +.. raw:: html | 2313 | + # if no actions were used, report the error |
1072 | + | 2314 | + else: |
1073 | + <!-- | 2315 | + names = [_get_action_name(action) |
1074 | + The FreeBSD Documentation License | 2316 | + for action in group._group_actions |
1075 | + | 2317 | + if action.help is not SUPPRESS] |
1076 | + Redistribution and use in source (Markdown) and 'compiled' forms (SGML, HTML, | 2318 | + msg = _('one of the arguments %s is required') |
1077 | + PDF, PostScript, RTF and so forth) with or without modification, are permitted | 2319 | + self.error(msg % ' '.join(names)) |
1078 | + provided that the following conditions are met: | 2320 | + |
1079 | + | 2321 | + # return the updated namespace and the extra arguments |
1080 | + Redistributions of source code (Markdown) must retain the above copyright | 2322 | + return namespace, extras |
1081 | + notice, this list of conditions and the following disclaimer of this file | 2323 | + |
1082 | + unmodified. | 2324 | + def _read_args_from_files(self, arg_strings): |
1083 | + | 2325 | + # expand arguments referencing files |
1084 | + Redistributions in compiled form (transformed to other DTDs, converted to PDF, | 2326 | + new_arg_strings = [] |
1085 | + PostScript, RTF and other formats) must reproduce the above copyright notice, | 2327 | + for arg_string in arg_strings: |
1086 | + this list of conditions and the following disclaimer in the documentation and/or | 2328 | + |
1087 | + other materials provided with the distribution. | 2329 | + # for regular arguments, just add them back into the list |
1088 | + | 2330 | + if arg_string[0] not in self.fromfile_prefix_chars: |
1089 | + THIS DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | 2331 | + new_arg_strings.append(arg_string) |
1090 | + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 2332 | + |
1091 | + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | 2333 | + # replace arguments referencing files with the file content |
1092 | + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | 2334 | + else: |
1093 | + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 2335 | + try: |
1094 | + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | 2336 | + args_file = open(arg_string[1:]) |
1095 | + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | 2337 | + try: |
1096 | + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | 2338 | + arg_strings = [] |
1097 | + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 2339 | + for arg_line in args_file.read().splitlines(): |
1098 | + THIS DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 2340 | + for arg in self.convert_arg_line_to_args(arg_line): |
1099 | + --> | 2341 | + arg_strings.append(arg) |
2342 | + arg_strings = self._read_args_from_files(arg_strings) | ||
2343 | + new_arg_strings.extend(arg_strings) | ||
2344 | + finally: | ||
2345 | + args_file.close() | ||
2346 | + except IOError: | ||
2347 | + err = _sys.exc_info()[1] | ||
2348 | + self.error(str(err)) | ||
2349 | + | ||
2350 | + # return the modified argument list | ||
2351 | + return new_arg_strings | ||
2352 | + | ||
2353 | + def convert_arg_line_to_args(self, arg_line): | ||
2354 | + return [arg_line] | ||
2355 | + | ||
2356 | + def _match_argument(self, action, arg_strings_pattern): | ||
2357 | + # match the pattern for this action to the arg strings | ||
2358 | + nargs_pattern = self._get_nargs_pattern(action) | ||
2359 | + match = _re.match(nargs_pattern, arg_strings_pattern) | ||
2360 | + | ||
2361 | + # raise an exception if we weren't able to find a match | ||
2362 | + if match is None: | ||
2363 | + nargs_errors = { | ||
2364 | + None: _('expected one argument'), | ||
2365 | + OPTIONAL: _('expected at most one argument'), | ||
2366 | + ONE_OR_MORE: _('expected at least one argument'), | ||
2367 | + } | ||
2368 | + default = _('expected %s argument(s)') % action.nargs | ||
2369 | + msg = nargs_errors.get(action.nargs, default) | ||
2370 | + raise ArgumentError(action, msg) | ||
2371 | + | ||
2372 | + # return the number of arguments matched | ||
2373 | + return len(match.group(1)) | ||
2374 | + | ||
2375 | + def _match_arguments_partial(self, actions, arg_strings_pattern): | ||
2376 | + # progressively shorten the actions list by slicing off the | ||
2377 | + # final actions until we find a match | ||
2378 | + result = [] | ||
2379 | + for i in range(len(actions), 0, -1): | ||
2380 | + actions_slice = actions[:i] | ||
2381 | + pattern = ''.join([self._get_nargs_pattern(action) | ||
2382 | + for action in actions_slice]) | ||
2383 | + match = _re.match(pattern, arg_strings_pattern) | ||
2384 | + if match is not None: | ||
2385 | + result.extend([len(string) for string in match.groups()]) | ||
2386 | + break | ||
2387 | + | ||
2388 | + # return the list of arg string counts | ||
2389 | + return result | ||
2390 | + | ||
2391 | + def _parse_optional(self, arg_string): | ||
2392 | + # if it's an empty string, it was meant to be a positional | ||
2393 | + if not arg_string: | ||
2394 | + return None | ||
2395 | + | ||
2396 | + # if it doesn't start with a prefix, it was meant to be positional | ||
2397 | + if not arg_string[0] in self.prefix_chars: | ||
2398 | + return None | ||
2399 | + | ||
2400 | + # if the option string is present in the parser, return the action | ||
2401 | + if arg_string in self._option_string_actions: | ||
2402 | + action = self._option_string_actions[arg_string] | ||
2403 | + return action, arg_string, None | ||
2404 | + | ||
2405 | + # if it's just a single character, it was meant to be positional | ||
2406 | + if len(arg_string) == 1: | ||
2407 | + return None | ||
2408 | + | ||
2409 | + # if the option string before the "=" is present, return the action | ||
2410 | + if '=' in arg_string: | ||
2411 | + option_string, explicit_arg = arg_string.split('=', 1) | ||
2412 | + if option_string in self._option_string_actions: | ||
2413 | + action = self._option_string_actions[option_string] | ||
2414 | + return action, option_string, explicit_arg | ||
2415 | + | ||
2416 | + # search through all possible prefixes of the option string | ||
2417 | + # and all actions in the parser for possible interpretations | ||
2418 | + option_tuples = self._get_option_tuples(arg_string) | ||
2419 | + | ||
2420 | + # if multiple actions match, the option string was ambiguous | ||
2421 | + if len(option_tuples) > 1: | ||
2422 | + options = ', '.join([option_string | ||
2423 | + for action, option_string, explicit_arg in option_tuples]) | ||
2424 | + tup = arg_string, options | ||
2425 | + self.error(_('ambiguous option: %s could match %s') % tup) | ||
2426 | + | ||
2427 | + # if exactly one action matched, this segmentation is good, | ||
2428 | + # so return the parsed action | ||
2429 | + elif len(option_tuples) == 1: | ||
2430 | + option_tuple, = option_tuples | ||
2431 | + return option_tuple | ||
2432 | + | ||
2433 | + # if it was not found as an option, but it looks like a negative | ||
2434 | + # number, it was meant to be positional | ||
2435 | + # unless there are negative-number-like options | ||
2436 | + if self._negative_number_matcher.match(arg_string): | ||
2437 | + if not self._has_negative_number_optionals: | ||
2438 | + return None | ||
2439 | + | ||
2440 | + # if it contains a space, it was meant to be a positional | ||
2441 | + if ' ' in arg_string: | ||
2442 | + return None | ||
2443 | + | ||
2444 | + # it was meant to be an optional but there is no such option | ||
2445 | + # in this parser (though it might be a valid option in a subparser) | ||
2446 | + return None, arg_string, None | ||
2447 | + | ||
2448 | + def _get_option_tuples(self, option_string): | ||
2449 | + result = [] | ||
2450 | + | ||
2451 | + # option strings starting with two prefix characters are only | ||
2452 | + # split at the '=' | ||
2453 | + chars = self.prefix_chars | ||
2454 | + if option_string[0] in chars and option_string[1] in chars: | ||
2455 | + if '=' in option_string: | ||
2456 | + option_prefix, explicit_arg = option_string.split('=', 1) | ||
2457 | + else: | ||
2458 | + option_prefix = option_string | ||
2459 | + explicit_arg = None | ||
2460 | + for option_string in self._option_string_actions: | ||
2461 | + if option_string.startswith(option_prefix): | ||
2462 | + action = self._option_string_actions[option_string] | ||
2463 | + tup = action, option_string, explicit_arg | ||
2464 | + result.append(tup) | ||
2465 | + | ||
2466 | + # single character options can be concatenated with their arguments | ||
2467 | + # but multiple character options always have to have their argument | ||
2468 | + # separate | ||
2469 | + elif option_string[0] in chars and option_string[1] not in chars: | ||
2470 | + option_prefix = option_string | ||
2471 | + explicit_arg = None | ||
2472 | + short_option_prefix = option_string[:2] | ||
2473 | + short_explicit_arg = option_string[2:] | ||
2474 | + | ||
2475 | + for option_string in self._option_string_actions: | ||
2476 | + if option_string == short_option_prefix: | ||
2477 | + action = self._option_string_actions[option_string] | ||
2478 | + tup = action, option_string, short_explicit_arg | ||
2479 | + result.append(tup) | ||
2480 | + elif option_string.startswith(option_prefix): | ||
2481 | + action = self._option_string_actions[option_string] | ||
2482 | + tup = action, option_string, explicit_arg | ||
2483 | + result.append(tup) | ||
2484 | + | ||
2485 | + # shouldn't ever get here | ||
2486 | + else: | ||
2487 | + self.error(_('unexpected option string: %s') % option_string) | ||
2488 | + | ||
2489 | + # return the collected option tuples | ||
2490 | + return result | ||
2491 | + | ||
2492 | + def _get_nargs_pattern(self, action): | ||
2493 | + # in all examples below, we have to allow for '--' args | ||
2494 | + # which are represented as '-' in the pattern | ||
2495 | + nargs = action.nargs | ||
2496 | + | ||
2497 | + # the default (None) is assumed to be a single argument | ||
2498 | + if nargs is None: | ||
2499 | + nargs_pattern = '(-*A-*)' | ||
2500 | + | ||
2501 | + # allow zero or one arguments | ||
2502 | + elif nargs == OPTIONAL: | ||
2503 | + nargs_pattern = '(-*A?-*)' | ||
2504 | + | ||
2505 | + # allow zero or more arguments | ||
2506 | + elif nargs == ZERO_OR_MORE: | ||
2507 | + nargs_pattern = '(-*[A-]*)' | ||
2508 | + | ||
2509 | + # allow one or more arguments | ||
2510 | + elif nargs == ONE_OR_MORE: | ||
2511 | + nargs_pattern = '(-*A[A-]*)' | ||
2512 | + | ||
2513 | + # allow any number of options or arguments | ||
2514 | + elif nargs == REMAINDER: | ||
2515 | + nargs_pattern = '([-AO]*)' | ||
2516 | + | ||
2517 | + # allow one argument followed by any number of options or arguments | ||
2518 | + elif nargs == PARSER: | ||
2519 | + nargs_pattern = '(-*A[-AO]*)' | ||
2520 | + | ||
2521 | + # all others should be integers | ||
2522 | + else: | ||
2523 | + nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) | ||
2524 | + | ||
2525 | + # if this is an optional action, -- is not allowed | ||
2526 | + if action.option_strings: | ||
2527 | + nargs_pattern = nargs_pattern.replace('-*', '') | ||
2528 | + nargs_pattern = nargs_pattern.replace('-', '') | ||
2529 | + | ||
2530 | + # return the pattern | ||
2531 | + return nargs_pattern | ||
2532 | + | ||
2533 | + # ======================== | ||
2534 | + # Value conversion methods | ||
2535 | + # ======================== | ||
2536 | + def _get_values(self, action, arg_strings): | ||
2537 | + # for everything but PARSER args, strip out '--' | ||
2538 | + if action.nargs not in [PARSER, REMAINDER]: | ||
2539 | + arg_strings = [s for s in arg_strings if s != '--'] | ||
2540 | + | ||
2541 | + # optional argument produces a default when not present | ||
2542 | + if not arg_strings and action.nargs == OPTIONAL: | ||
2543 | + if action.option_strings: | ||
2544 | + value = action.const | ||
2545 | + else: | ||
2546 | + value = action.default | ||
2547 | + if isinstance(value, basestring): | ||
2548 | + value = self._get_value(action, value) | ||
2549 | + self._check_value(action, value) | ||
2550 | + | ||
2551 | + # when nargs='*' on a positional, if there were no command-line | ||
2552 | + # args, use the default if it is anything other than None | ||
2553 | + elif (not arg_strings and action.nargs == ZERO_OR_MORE and | ||
2554 | + not action.option_strings): | ||
2555 | + if action.default is not None: | ||
2556 | + value = action.default | ||
2557 | + else: | ||
2558 | + value = arg_strings | ||
2559 | + self._check_value(action, value) | ||
2560 | + | ||
2561 | + # single argument or optional argument produces a single value | ||
2562 | + elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: | ||
2563 | + arg_string, = arg_strings | ||
2564 | + value = self._get_value(action, arg_string) | ||
2565 | + self._check_value(action, value) | ||
2566 | + | ||
2567 | + # REMAINDER arguments convert all values, checking none | ||
2568 | + elif action.nargs == REMAINDER: | ||
2569 | + value = [self._get_value(action, v) for v in arg_strings] | ||
2570 | + | ||
2571 | + # PARSER arguments convert all values, but check only the first | ||
2572 | + elif action.nargs == PARSER: | ||
2573 | + value = [self._get_value(action, v) for v in arg_strings] | ||
2574 | + self._check_value(action, value[0]) | ||
2575 | + | ||
2576 | + # all other types of nargs produce a list | ||
2577 | + else: | ||
2578 | + value = [self._get_value(action, v) for v in arg_strings] | ||
2579 | + for v in value: | ||
2580 | + self._check_value(action, v) | ||
2581 | + | ||
2582 | + # return the converted value | ||
2583 | + return value | ||
2584 | + | ||
2585 | + def _get_value(self, action, arg_string): | ||
2586 | + type_func = self._registry_get('type', action.type, action.type) | ||
2587 | + if not _callable(type_func): | ||
2588 | + msg = _('%r is not callable') | ||
2589 | + raise ArgumentError(action, msg % type_func) | ||
2590 | + | ||
2591 | + # convert the value to the appropriate type | ||
2592 | + try: | ||
2593 | + result = type_func(arg_string) | ||
2594 | + | ||
2595 | + # ArgumentTypeErrors indicate errors | ||
2596 | + except ArgumentTypeError: | ||
2597 | + name = getattr(action.type, '__name__', repr(action.type)) | ||
2598 | + msg = str(_sys.exc_info()[1]) | ||
2599 | + raise ArgumentError(action, msg) | ||
2600 | + | ||
2601 | + # TypeErrors or ValueErrors also indicate errors | ||
2602 | + except (TypeError, ValueError): | ||
2603 | + name = getattr(action.type, '__name__', repr(action.type)) | ||
2604 | + msg = _('invalid %s value: %r') | ||
2605 | + raise ArgumentError(action, msg % (name, arg_string)) | ||
2606 | + | ||
2607 | + # return the converted value | ||
2608 | + return result | ||
2609 | + | ||
2610 | + def _check_value(self, action, value): | ||
2611 | + # converted value must be one of the choices (if specified) | ||
2612 | + if action.choices is not None and value not in action.choices: | ||
2613 | + tup = value, ', '.join(map(repr, action.choices)) | ||
2614 | + msg = _('invalid choice: %r (choose from %s)') % tup | ||
2615 | + raise ArgumentError(action, msg) | ||
2616 | + | ||
2617 | + # ======================= | ||
2618 | + # Help-formatting methods | ||
2619 | + # ======================= | ||
2620 | + def format_usage(self): | ||
2621 | + formatter = self._get_formatter() | ||
2622 | + formatter.add_usage(self.usage, self._actions, | ||
2623 | + self._mutually_exclusive_groups) | ||
2624 | + return formatter.format_help() | ||
2625 | + | ||
2626 | + def format_help(self): | ||
2627 | + formatter = self._get_formatter() | ||
2628 | + | ||
2629 | + # usage | ||
2630 | + formatter.add_usage(self.usage, self._actions, | ||
2631 | + self._mutually_exclusive_groups) | ||
2632 | + | ||
2633 | + # description | ||
2634 | + formatter.add_text(self.description) | ||
2635 | + | ||
2636 | + # positionals, optionals and user-defined groups | ||
2637 | + for action_group in self._action_groups: | ||
2638 | + formatter.start_section(action_group.title) | ||
2639 | + formatter.add_text(action_group.description) | ||
2640 | + formatter.add_arguments(action_group._group_actions) | ||
2641 | + formatter.end_section() | ||
2642 | + | ||
2643 | + # epilog | ||
2644 | + formatter.add_text(self.epilog) | ||
2645 | + | ||
2646 | + # determine help from format above | ||
2647 | + return formatter.format_help() | ||
2648 | + | ||
2649 | + def format_version(self): | ||
2650 | + import warnings | ||
2651 | + warnings.warn( | ||
2652 | + 'The format_version method is deprecated -- the "version" ' | ||
2653 | + 'argument to ArgumentParser is no longer supported.', | ||
2654 | + DeprecationWarning) | ||
2655 | + formatter = self._get_formatter() | ||
2656 | + formatter.add_text(self.version) | ||
2657 | + return formatter.format_help() | ||
2658 | + | ||
2659 | + def _get_formatter(self): | ||
2660 | + return self.formatter_class(prog=self.prog) | ||
2661 | + | ||
2662 | + # ===================== | ||
2663 | + # Help-printing methods | ||
2664 | + # ===================== | ||
2665 | + def print_usage(self, file=None): | ||
2666 | + if file is None: | ||
2667 | + file = _sys.stdout | ||
2668 | + self._print_message(self.format_usage(), file) | ||
2669 | + | ||
2670 | + def print_help(self, file=None): | ||
2671 | + if file is None: | ||
2672 | + file = _sys.stdout | ||
2673 | + self._print_message(self.format_help(), file) | ||
2674 | + | ||
2675 | + def print_version(self, file=None): | ||
2676 | + import warnings | ||
2677 | + warnings.warn( | ||
2678 | + 'The print_version method is deprecated -- the "version" ' | ||
2679 | + 'argument to ArgumentParser is no longer supported.', | ||
2680 | + DeprecationWarning) | ||
2681 | + self._print_message(self.format_version(), file) | ||
2682 | + | ||
2683 | + def _print_message(self, message, file=None): | ||
2684 | + if message: | ||
2685 | + if file is None: | ||
2686 | + file = _sys.stderr | ||
2687 | + file.write(message) | ||
2688 | + | ||
2689 | + # =============== | ||
2690 | + # Exiting methods | ||
2691 | + # =============== | ||
2692 | + def exit(self, status=0, message=None): | ||
2693 | + if message: | ||
2694 | + self._print_message(message, _sys.stderr) | ||
2695 | + _sys.exit(status) | ||
2696 | + | ||
2697 | + def error(self, message): | ||
2698 | + """error(message: string) | ||
2699 | + | ||
2700 | + Prints a usage message incorporating the message to stderr and | ||
2701 | + exits. | ||
2702 | + | ||
2703 | + If you override this in a subclass, it should not return -- it | ||
2704 | + should either exit or raise an exception. | ||
2705 | + """ | ||
2706 | + self.print_usage(_sys.stderr) | ||
2707 | + self.exit(2, _('%s: error: %s\n') % (self.prog, message)) | ||
1100 | -- | 2708 | -- |
1101 | 2.9.4 | 2709 | 2.13.5 |
1102 | 2710 | ||
1103 | 2711 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Add the scripts/ directory to sys.path so Python 2.6 will be able to | ||
2 | import argparse. | ||
1 | 3 | ||
4 | Cc: Fam Zheng <famz@redhat.com> | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Acked-by: John Snow <jsnow@redhat.com> | ||
7 | Acked-by: Fam Zheng <famz@redhat.com> | ||
8 | Message-id: 20170825155732.15665-3-stefanha@redhat.com | ||
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | --- | ||
11 | tests/docker/docker.py | 4 +++- | ||
12 | 1 file changed, 3 insertions(+), 1 deletion(-) | ||
13 | |||
14 | diff --git a/tests/docker/docker.py b/tests/docker/docker.py | ||
15 | index XXXXXXX..XXXXXXX 100755 | ||
16 | --- a/tests/docker/docker.py | ||
17 | +++ b/tests/docker/docker.py | ||
18 | @@ -XXX,XX +XXX,XX @@ | ||
19 | |||
20 | import os | ||
21 | import sys | ||
22 | +sys.path.append(os.path.join(os.path.dirname(__file__), | ||
23 | + '..', '..', 'scripts')) | ||
24 | +import argparse | ||
25 | import subprocess | ||
26 | import json | ||
27 | import hashlib | ||
28 | import atexit | ||
29 | import uuid | ||
30 | -import argparse | ||
31 | import tempfile | ||
32 | import re | ||
33 | import signal | ||
34 | -- | ||
35 | 2.13.5 | ||
36 | |||
37 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Add the scripts/ directory to sys.path so Python 2.6 will be able to | ||
2 | import argparse. | ||
1 | 3 | ||
4 | Cc: Daniel P. Berrange <berrange@redhat.com> | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Acked-by: John Snow <jsnow@redhat.com> | ||
7 | Acked-by: Fam Zheng <famz@redhat.com> | ||
8 | Message-id: 20170825155732.15665-4-stefanha@redhat.com | ||
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | --- | ||
11 | tests/migration/guestperf/shell.py | 8 +++++--- | ||
12 | 1 file changed, 5 insertions(+), 3 deletions(-) | ||
13 | |||
14 | diff --git a/tests/migration/guestperf/shell.py b/tests/migration/guestperf/shell.py | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/tests/migration/guestperf/shell.py | ||
17 | +++ b/tests/migration/guestperf/shell.py | ||
18 | @@ -XXX,XX +XXX,XX @@ | ||
19 | # | ||
20 | |||
21 | |||
22 | -import argparse | ||
23 | -import fnmatch | ||
24 | import os | ||
25 | import os.path | ||
26 | -import platform | ||
27 | import sys | ||
28 | +sys.path.append(os.path.join(os.path.dirname(__file__), | ||
29 | + '..', '..', '..', 'scripts')) | ||
30 | +import argparse | ||
31 | +import fnmatch | ||
32 | +import platform | ||
33 | |||
34 | from guestperf.hardware import Hardware | ||
35 | from guestperf.engine import Engine | ||
36 | -- | ||
37 | 2.13.5 | ||
38 | |||
39 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Fred Rolland <rollandf@gmail.com> | ||
1 | 2 | ||
3 | Update doc with the usage of UUID for initiator name. | ||
4 | |||
5 | Related-To: https://bugzilla.redhat.com/1006468 | ||
6 | Signed-off-by: Fred Rolland <frolland@redhat.com> | ||
7 | Message-id: 20170823084830.30500-1-frolland@redhat.com | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | ||
10 | qemu-doc.texi | 5 +++-- | ||
11 | 1 file changed, 3 insertions(+), 2 deletions(-) | ||
12 | |||
13 | diff --git a/qemu-doc.texi b/qemu-doc.texi | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/qemu-doc.texi | ||
16 | +++ b/qemu-doc.texi | ||
17 | @@ -XXX,XX +XXX,XX @@ in a configuration file provided via '-readconfig' or directly on the | ||
18 | command line. | ||
19 | |||
20 | If the initiator-name is not specified qemu will use a default name | ||
21 | -of 'iqn.2008-11.org.linux-kvm[:<name>'] where <name> is the name of the | ||
22 | +of 'iqn.2008-11.org.linux-kvm[:<uuid>'] where <uuid> is the UUID of the | ||
23 | +virtual machine. If the UUID is not specified qemu will use | ||
24 | +'iqn.2008-11.org.linux-kvm[:<name>'] where <name> is the name of the | ||
25 | virtual machine. | ||
26 | |||
27 | - | ||
28 | @example | ||
29 | Setting a specific initiator name to use when logging in to the target | ||
30 | -iscsi initiator-name=iqn.qemu.test:my-initiator | ||
31 | -- | ||
32 | 2.13.5 | ||
33 | |||
34 | diff view generated by jsdifflib |
1 | From: Kashyap Chamarthy <kchamart@redhat.com> | 1 | Most qcow2 files are uncompressed so it is wasteful to allocate (32 + 1) |
---|---|---|---|
2 | * cluster_size + 512 bytes upfront. Allocate s->cluster_cache and | ||
3 | s->cluster_data when the first read operation is performance on a | ||
4 | compressed cluster. | ||
2 | 5 | ||
3 | This patch documents (including their QMP invocations) all the four | 6 | The buffers are freed in .bdrv_close(). .bdrv_open() no longer has any |
4 | major kinds of live block operations: | 7 | code paths that can allocate these buffers, so remove the free functions |
8 | in the error code path. | ||
5 | 9 | ||
6 | - `block-stream` | 10 | This patch can result in significant memory savings when many qcow2 |
7 | - `block-commit` | 11 | disks are attached or backing file chains are long: |
8 | - `drive-mirror` (& `blockdev-mirror`) | ||
9 | - `drive-backup` (& `blockdev-backup`) | ||
10 | 12 | ||
11 | Things considered while writing this document: | 13 | Before 12.81% (1,023,193,088B) |
14 | After 5.36% (393,893,888B) | ||
12 | 15 | ||
13 | - Use reStructuredText as markup language (with the goal of generating | 16 | Reported-by: Alexey Kardashevskiy <aik@ozlabs.ru> |
14 | the HTML output using the Sphinx Documentation Generator). It is | 17 | Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru> |
15 | gentler on the eye, and can be trivially converted to different | 18 | Reviewed-by: Eric Blake <eblake@redhat.com> |
16 | formats. (Another reason: upstream QEMU is considering to switch to | 19 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
17 | Sphinx, which uses reStructuredText as its markup language.) | 20 | Message-id: 20170821135530.32344-1-stefanha@redhat.com |
21 | Cc: Kevin Wolf <kwolf@redhat.com> | ||
22 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
23 | --- | ||
24 | block/qcow2-cluster.c | 17 +++++++++++++++++ | ||
25 | block/qcow2.c | 12 ------------ | ||
26 | 2 files changed, 17 insertions(+), 12 deletions(-) | ||
18 | 27 | ||
19 | - Raw QMP JSON output vs. 'qmp-shell'. I debated with myself whether | 28 | diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c |
20 | to only show raw QMP JSON output (as that is the canonical | 29 | index XXXXXXX..XXXXXXX 100644 |
21 | representation), or use 'qmp-shell', which takes key-value pairs. I | 30 | --- a/block/qcow2-cluster.c |
22 | settled on the approach of: for the first occurrence of a command, | 31 | +++ b/block/qcow2-cluster.c |
23 | use raw JSON; for subsequent occurrences, use 'qmp-shell', with an | 32 | @@ -XXX,XX +XXX,XX @@ int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) |
24 | occasional exception. | 33 | nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; |
25 | 34 | sector_offset = coffset & 511; | |
26 | - Usage of `-blockdev` command-line. | 35 | csize = nb_csectors * 512 - sector_offset; |
27 | |||
28 | - Usage of 'node-name' vs. file path to refer to disks. While we have | ||
29 | `blockdev-{mirror, backup}` as 'node-name'-alternatives for | ||
30 | `drive-{mirror, backup}`, the `block-commit` command still operates | ||
31 | on file names for parameters 'base' and 'top'. So I added a caveat | ||
32 | at the beginning to that effect. | ||
33 | |||
34 | Refer this related thread that I started (where I learnt | ||
35 | `block-stream` was recently reworked to accept 'node-name' for 'top' | ||
36 | and 'base' parameters): | ||
37 | https://lists.nongnu.org/archive/html/qemu-devel/2017-05/msg06466.html | ||
38 | "[RFC] Making 'block-stream', and 'block-commit' accept node-name" | ||
39 | |||
40 | All commands showed in this document were tested while documenting. | ||
41 | |||
42 | Thanks: Eric Blake for the section: "A note on points-in-time vs file | ||
43 | names". This useful bit was originally articulated by Eric in his | ||
44 | KVMForum 2015 presentation, so I included that specific bit in this | ||
45 | document. | ||
46 | |||
47 | Signed-off-by: Kashyap Chamarthy <kchamart@redhat.com> | ||
48 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
49 | Message-id: 20170717105205.32639-3-kchamart@redhat.com | ||
50 | Signed-off-by: Jeff Cody <jcody@redhat.com> | ||
51 | --- | ||
52 | docs/interop/live-block-operations.rst | 1088 ++++++++++++++++++++++++++++++++ | ||
53 | docs/live-block-ops.txt | 72 --- | ||
54 | 2 files changed, 1088 insertions(+), 72 deletions(-) | ||
55 | create mode 100644 docs/interop/live-block-operations.rst | ||
56 | delete mode 100644 docs/live-block-ops.txt | ||
57 | |||
58 | diff --git a/docs/interop/live-block-operations.rst b/docs/interop/live-block-operations.rst | ||
59 | new file mode 100644 | ||
60 | index XXXXXXX..XXXXXXX | ||
61 | --- /dev/null | ||
62 | +++ b/docs/interop/live-block-operations.rst | ||
63 | @@ -XXX,XX +XXX,XX @@ | ||
64 | +.. | ||
65 | + Copyright (C) 2017 Red Hat Inc. | ||
66 | + | 36 | + |
67 | + This work is licensed under the terms of the GNU GPL, version 2 or | 37 | + /* Allocate buffers on first decompress operation, most images are |
68 | + later. See the COPYING file in the top-level directory. | 38 | + * uncompressed and the memory overhead can be avoided. The buffers |
69 | + | 39 | + * are freed in .bdrv_close(). |
70 | +============================ | 40 | + */ |
71 | +Live Block Device Operations | 41 | + if (!s->cluster_data) { |
72 | +============================ | 42 | + /* one more sector for decompressed data alignment */ |
73 | + | 43 | + s->cluster_data = qemu_try_blockalign(bs->file->bs, |
74 | +QEMU Block Layer currently (as of QEMU 2.9) supports four major kinds of | 44 | + QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512); |
75 | +live block device jobs -- stream, commit, mirror, and backup. These can | 45 | + if (!s->cluster_data) { |
76 | +be used to manipulate disk image chains to accomplish certain tasks, | 46 | + return -ENOMEM; |
77 | +namely: live copy data from backing files into overlays; shorten long | ||
78 | +disk image chains by merging data from overlays into backing files; live | ||
79 | +synchronize data from a disk image chain (including current active disk) | ||
80 | +to another target image; and point-in-time (and incremental) backups of | ||
81 | +a block device. Below is a description of the said block (QMP) | ||
82 | +primitives, and some (non-exhaustive list of) examples to illustrate | ||
83 | +their use. | ||
84 | + | ||
85 | +.. note:: | ||
86 | + The file ``qapi/block-core.json`` in the QEMU source tree has the | ||
87 | + canonical QEMU API (QAPI) schema documentation for the QMP | ||
88 | + primitives discussed here. | ||
89 | + | ||
90 | +.. todo (kashyapc):: Remove the ".. contents::" directive when Sphinx is | ||
91 | + integrated. | ||
92 | + | ||
93 | +.. contents:: | ||
94 | + | ||
95 | +Disk image backing chain notation | ||
96 | +--------------------------------- | ||
97 | + | ||
98 | +A simple disk image chain. (This can be created live using QMP | ||
99 | +``blockdev-snapshot-sync``, or offline via ``qemu-img``):: | ||
100 | + | ||
101 | + (Live QEMU) | ||
102 | + | | ||
103 | + . | ||
104 | + V | ||
105 | + | ||
106 | + [A] <----- [B] | ||
107 | + | ||
108 | + (backing file) (overlay) | ||
109 | + | ||
110 | +The arrow can be read as: Image [A] is the backing file of disk image | ||
111 | +[B]. And live QEMU is currently writing to image [B], consequently, it | ||
112 | +is also referred to as the "active layer". | ||
113 | + | ||
114 | +There are two kinds of terminology that are common when referring to | ||
115 | +files in a disk image backing chain: | ||
116 | + | ||
117 | +(1) Directional: 'base' and 'top'. Given the simple disk image chain | ||
118 | + above, image [A] can be referred to as 'base', and image [B] as | ||
119 | + 'top'. (This terminology can be seen in in QAPI schema file, | ||
120 | + block-core.json.) | ||
121 | + | ||
122 | +(2) Relational: 'backing file' and 'overlay'. Again, taking the same | ||
123 | + simple disk image chain from the above, disk image [A] is referred | ||
124 | + to as the backing file, and image [B] as overlay. | ||
125 | + | ||
126 | + Throughout this document, we will use the relational terminology. | ||
127 | + | ||
128 | +.. important:: | ||
129 | + The overlay files can generally be any format that supports a | ||
130 | + backing file, although QCOW2 is the preferred format and the one | ||
131 | + used in this document. | ||
132 | + | ||
133 | + | ||
134 | +Brief overview of live block QMP primitives | ||
135 | +------------------------------------------- | ||
136 | + | ||
137 | +The following are the four different kinds of live block operations that | ||
138 | +QEMU block layer supports. | ||
139 | + | ||
140 | +(1) ``block-stream``: Live copy of data from backing files into overlay | ||
141 | + files. | ||
142 | + | ||
143 | + .. note:: Once the 'stream' operation has finished, three things to | ||
144 | + note: | ||
145 | + | ||
146 | + (a) QEMU rewrites the backing chain to remove | ||
147 | + reference to the now-streamed and redundant backing | ||
148 | + file; | ||
149 | + | ||
150 | + (b) the streamed file *itself* won't be removed by QEMU, | ||
151 | + and must be explicitly discarded by the user; | ||
152 | + | ||
153 | + (c) the streamed file remains valid -- i.e. further | ||
154 | + overlays can be created based on it. Refer the | ||
155 | + ``block-stream`` section further below for more | ||
156 | + details. | ||
157 | + | ||
158 | +(2) ``block-commit``: Live merge of data from overlay files into backing | ||
159 | + files (with the optional goal of removing the overlay file from the | ||
160 | + chain). Since QEMU 2.0, this includes "active ``block-commit``" | ||
161 | + (i.e. merge the current active layer into the base image). | ||
162 | + | ||
163 | + .. note:: Once the 'commit' operation has finished, there are three | ||
164 | + things to note here as well: | ||
165 | + | ||
166 | + (a) QEMU rewrites the backing chain to remove reference | ||
167 | + to now-redundant overlay images that have been | ||
168 | + committed into a backing file; | ||
169 | + | ||
170 | + (b) the committed file *itself* won't be removed by QEMU | ||
171 | + -- it ought to be manually removed; | ||
172 | + | ||
173 | + (c) however, unlike in the case of ``block-stream``, the | ||
174 | + intermediate images will be rendered invalid -- i.e. | ||
175 | + no more further overlays can be created based on | ||
176 | + them. Refer the ``block-commit`` section further | ||
177 | + below for more details. | ||
178 | + | ||
179 | +(3) ``drive-mirror`` (and ``blockdev-mirror``): Synchronize a running | ||
180 | + disk to another image. | ||
181 | + | ||
182 | +(4) ``drive-backup`` (and ``blockdev-backup``): Point-in-time (live) copy | ||
183 | + of a block device to a destination. | ||
184 | + | ||
185 | + | ||
186 | +.. _`Interacting with a QEMU instance`: | ||
187 | + | ||
188 | +Interacting with a QEMU instance | ||
189 | +-------------------------------- | ||
190 | + | ||
191 | +To show some example invocations of command-line, we will use the | ||
192 | +following invocation of QEMU, with a QMP server running over UNIX | ||
193 | +socket:: | ||
194 | + | ||
195 | + $ ./x86_64-softmmu/qemu-system-x86_64 -display none -nodefconfig \ | ||
196 | + -M q35 -nodefaults -m 512 \ | ||
197 | + -blockdev node-name=node-A,driver=qcow2,file.driver=file,file.node-name=file,file.filename=./a.qcow2 \ | ||
198 | + -device virtio-blk,drive=node-A,id=virtio0 \ | ||
199 | + -monitor stdio -qmp unix:/tmp/qmp-sock,server,nowait | ||
200 | + | ||
201 | +The ``-blockdev`` command-line option, used above, is available from | ||
202 | +QEMU 2.9 onwards. In the above invocation, notice the ``node-name`` | ||
203 | +parameter that is used to refer to the disk image a.qcow2 ('node-A') -- | ||
204 | +this is a cleaner way to refer to a disk image (as opposed to referring | ||
205 | +to it by spelling out file paths). So, we will continue to designate a | ||
206 | +``node-name`` to each further disk image created (either via | ||
207 | +``blockdev-snapshot-sync``, or ``blockdev-add``) as part of the disk | ||
208 | +image chain, and continue to refer to the disks using their | ||
209 | +``node-name`` (where possible, because ``block-commit`` does not yet, as | ||
210 | +of QEMU 2.9, accept ``node-name`` parameter) when performing various | ||
211 | +block operations. | ||
212 | + | ||
213 | +To interact with the QEMU instance launched above, we will use the | ||
214 | +``qmp-shell`` utility (located at: ``qemu/scripts/qmp``, as part of the | ||
215 | +QEMU source directory), which takes key-value pairs for QMP commands. | ||
216 | +Invoke it as below (which will also print out the complete raw JSON | ||
217 | +syntax for reference -- examples in the following sections):: | ||
218 | + | ||
219 | + $ ./qmp-shell -v -p /tmp/qmp-sock | ||
220 | + (QEMU) | ||
221 | + | ||
222 | +.. note:: | ||
223 | + In the event we have to repeat a certain QMP command, we will: for | ||
224 | + the first occurrence of it, show the ``qmp-shell`` invocation, *and* | ||
225 | + the corresponding raw JSON QMP syntax; but for subsequent | ||
226 | + invocations, present just the ``qmp-shell`` syntax, and omit the | ||
227 | + equivalent JSON output. | ||
228 | + | ||
229 | + | ||
230 | +Example disk image chain | ||
231 | +------------------------ | ||
232 | + | ||
233 | +We will use the below disk image chain (and occasionally spelling it | ||
234 | +out where appropriate) when discussing various primitives:: | ||
235 | + | ||
236 | + [A] <-- [B] <-- [C] <-- [D] | ||
237 | + | ||
238 | +Where [A] is the original base image; [B] and [C] are intermediate | ||
239 | +overlay images; image [D] is the active layer -- i.e. live QEMU is | ||
240 | +writing to it. (The rule of thumb is: live QEMU will always be pointing | ||
241 | +to the rightmost image in a disk image chain.) | ||
242 | + | ||
243 | +The above image chain can be created by invoking | ||
244 | +``blockdev-snapshot-sync`` commands as following (which shows the | ||
245 | +creation of overlay image [B]) using the ``qmp-shell`` (our invocation | ||
246 | +also prints the raw JSON invocation of it):: | ||
247 | + | ||
248 | + (QEMU) blockdev-snapshot-sync node-name=node-A snapshot-file=b.qcow2 snapshot-node-name=node-B format=qcow2 | ||
249 | + { | ||
250 | + "execute": "blockdev-snapshot-sync", | ||
251 | + "arguments": { | ||
252 | + "node-name": "node-A", | ||
253 | + "snapshot-file": "b.qcow2", | ||
254 | + "format": "qcow2", | ||
255 | + "snapshot-node-name": "node-B" | ||
256 | + } | ||
257 | + } | ||
258 | + | ||
259 | +Here, "node-A" is the name QEMU internally uses to refer to the base | ||
260 | +image [A] -- it is the backing file, based on which the overlay image, | ||
261 | +[B], is created. | ||
262 | + | ||
263 | +To create the rest of the overlay images, [C], and [D] (omitting the raw | ||
264 | +JSON output for brevity):: | ||
265 | + | ||
266 | + (QEMU) blockdev-snapshot-sync node-name=node-B snapshot-file=c.qcow2 snapshot-node-name=node-C format=qcow2 | ||
267 | + (QEMU) blockdev-snapshot-sync node-name=node-C snapshot-file=d.qcow2 snapshot-node-name=node-D format=qcow2 | ||
268 | + | ||
269 | + | ||
270 | +A note on points-in-time vs file names | ||
271 | +-------------------------------------- | ||
272 | + | ||
273 | +In our disk image chain:: | ||
274 | + | ||
275 | + [A] <-- [B] <-- [C] <-- [D] | ||
276 | + | ||
277 | +We have *three* points in time and an active layer: | ||
278 | + | ||
279 | +- Point 1: Guest state when [B] was created is contained in file [A] | ||
280 | +- Point 2: Guest state when [C] was created is contained in [A] + [B] | ||
281 | +- Point 3: Guest state when [D] was created is contained in | ||
282 | + [A] + [B] + [C] | ||
283 | +- Active layer: Current guest state is contained in [A] + [B] + [C] + | ||
284 | + [D] | ||
285 | + | ||
286 | +Therefore, be aware with naming choices: | ||
287 | + | ||
288 | +- Naming a file after the time it is created is misleading -- the | ||
289 | + guest data for that point in time is *not* contained in that file | ||
290 | + (as explained earlier) | ||
291 | +- Rather, think of files as a *delta* from the backing file | ||
292 | + | ||
293 | + | ||
294 | +Live block streaming --- ``block-stream`` | ||
295 | +----------------------------------------- | ||
296 | + | ||
297 | +The ``block-stream`` command allows you to do live copy data from backing | ||
298 | +files into overlay images. | ||
299 | + | ||
300 | +Given our original example disk image chain from earlier:: | ||
301 | + | ||
302 | + [A] <-- [B] <-- [C] <-- [D] | ||
303 | + | ||
304 | +The disk image chain can be shortened in one of the following different | ||
305 | +ways (not an exhaustive list). | ||
306 | + | ||
307 | +.. _`Case-1`: | ||
308 | + | ||
309 | +(1) Merge everything into the active layer: I.e. copy all contents from | ||
310 | + the base image, [A], and overlay images, [B] and [C], into [D], | ||
311 | + *while* the guest is running. The resulting chain will be a | ||
312 | + standalone image, [D] -- with contents from [A], [B] and [C] merged | ||
313 | + into it (where live QEMU writes go to):: | ||
314 | + | ||
315 | + [D] | ||
316 | + | ||
317 | +.. _`Case-2`: | ||
318 | + | ||
319 | +(2) Taking the same example disk image chain mentioned earlier, merge | ||
320 | + only images [B] and [C] into [D], the active layer. The result will | ||
321 | + be contents of images [B] and [C] will be copied into [D], and the | ||
322 | + backing file pointer of image [D] will be adjusted to point to image | ||
323 | + [A]. The resulting chain will be:: | ||
324 | + | ||
325 | + [A] <-- [D] | ||
326 | + | ||
327 | +.. _`Case-3`: | ||
328 | + | ||
329 | +(3) Intermediate streaming (available since QEMU 2.8): Starting afresh | ||
330 | + with the original example disk image chain, with a total of four | ||
331 | + images, it is possible to copy contents from image [B] into image | ||
332 | + [C]. Once the copy is finished, image [B] can now be (optionally) | ||
333 | + discarded; and the backing file pointer of image [C] will be | ||
334 | + adjusted to point to [A]. I.e. after performing "intermediate | ||
335 | + streaming" of [B] into [C], the resulting image chain will be (where | ||
336 | + live QEMU is writing to [D]):: | ||
337 | + | ||
338 | + [A] <-- [C] <-- [D] | ||
339 | + | ||
340 | + | ||
341 | +QMP invocation for ``block-stream`` | ||
342 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
343 | + | ||
344 | +For `Case-1`_, to merge contents of all the backing files into the | ||
345 | +active layer, where 'node-D' is the current active image (by default | ||
346 | +``block-stream`` will flatten the entire chain); ``qmp-shell`` (and its | ||
347 | +corresponding JSON output):: | ||
348 | + | ||
349 | + (QEMU) block-stream device=node-D job-id=job0 | ||
350 | + { | ||
351 | + "execute": "block-stream", | ||
352 | + "arguments": { | ||
353 | + "device": "node-D", | ||
354 | + "job-id": "job0" | ||
355 | + } | ||
356 | + } | ||
357 | + | ||
358 | +For `Case-2`_, merge contents of the images [B] and [C] into [D], where | ||
359 | +image [D] ends up referring to image [A] as its backing file:: | ||
360 | + | ||
361 | + (QEMU) block-stream device=node-D base-node=node-A job-id=job0 | ||
362 | + | ||
363 | +And for `Case-3`_, of "intermediate" streaming", merge contents of | ||
364 | +images [B] into [C], where [C] ends up referring to [A] as its backing | ||
365 | +image:: | ||
366 | + | ||
367 | + (QEMU) block-stream device=node-C base-node=node-A job-id=job0 | ||
368 | + | ||
369 | +Progress of a ``block-stream`` operation can be monitored via the QMP | ||
370 | +command:: | ||
371 | + | ||
372 | + (QEMU) query-block-jobs | ||
373 | + { | ||
374 | + "execute": "query-block-jobs", | ||
375 | + "arguments": {} | ||
376 | + } | ||
377 | + | ||
378 | + | ||
379 | +Once the ``block-stream`` operation has completed, QEMU will emit an | ||
380 | +event, ``BLOCK_JOB_COMPLETED``. The intermediate overlays remain valid, | ||
381 | +and can now be (optionally) discarded, or retained to create further | ||
382 | +overlays based on them. Finally, the ``block-stream`` jobs can be | ||
383 | +restarted at anytime. | ||
384 | + | ||
385 | + | ||
386 | +Live block commit --- ``block-commit`` | ||
387 | +-------------------------------------- | ||
388 | + | ||
389 | +The ``block-commit`` command lets you merge live data from overlay | ||
390 | +images into backing file(s). Since QEMU 2.0, this includes "live active | ||
391 | +commit" (i.e. it is possible to merge the "active layer", the right-most | ||
392 | +image in a disk image chain where live QEMU will be writing to, into the | ||
393 | +base image). This is analogous to ``block-stream``, but in the opposite | ||
394 | +direction. | ||
395 | + | ||
396 | +Again, starting afresh with our example disk image chain, where live | ||
397 | +QEMU is writing to the right-most image in the chain, [D]:: | ||
398 | + | ||
399 | + [A] <-- [B] <-- [C] <-- [D] | ||
400 | + | ||
401 | +The disk image chain can be shortened in one of the following ways: | ||
402 | + | ||
403 | +.. _`block-commit_Case-1`: | ||
404 | + | ||
405 | +(1) Commit content from only image [B] into image [A]. The resulting | ||
406 | + chain is the following, where image [C] is adjusted to point at [A] | ||
407 | + as its new backing file:: | ||
408 | + | ||
409 | + [A] <-- [C] <-- [D] | ||
410 | + | ||
411 | +(2) Commit content from images [B] and [C] into image [A]. The | ||
412 | + resulting chain, where image [D] is adjusted to point to image [A] | ||
413 | + as its new backing file:: | ||
414 | + | ||
415 | + [A] <-- [D] | ||
416 | + | ||
417 | +.. _`block-commit_Case-3`: | ||
418 | + | ||
419 | +(3) Commit content from images [B], [C], and the active layer [D] into | ||
420 | + image [A]. The resulting chain (in this case, a consolidated single | ||
421 | + image):: | ||
422 | + | ||
423 | + [A] | ||
424 | + | ||
425 | +(4) Commit content from image only image [C] into image [B]. The | ||
426 | + resulting chain:: | ||
427 | + | ||
428 | + [A] <-- [B] <-- [D] | ||
429 | + | ||
430 | +(5) Commit content from image [C] and the active layer [D] into image | ||
431 | + [B]. The resulting chain:: | ||
432 | + | ||
433 | + [A] <-- [B] | ||
434 | + | ||
435 | + | ||
436 | +QMP invocation for ``block-commit`` | ||
437 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
438 | + | ||
439 | +For :ref:`Case-1 <block-commit_Case-1>`, to merge contents only from | ||
440 | +image [B] into image [A], the invocation is as follows:: | ||
441 | + | ||
442 | + (QEMU) block-commit device=node-D base=a.qcow2 top=b.qcow2 job-id=job0 | ||
443 | + { | ||
444 | + "execute": "block-commit", | ||
445 | + "arguments": { | ||
446 | + "device": "node-D", | ||
447 | + "job-id": "job0", | ||
448 | + "top": "b.qcow2", | ||
449 | + "base": "a.qcow2" | ||
450 | + } | ||
451 | + } | ||
452 | + | ||
453 | +Once the above ``block-commit`` operation has completed, a | ||
454 | +``BLOCK_JOB_COMPLETED`` event will be issued, and no further action is | ||
455 | +required. As the end result, the backing file of image [C] is adjusted | ||
456 | +to point to image [A], and the original 4-image chain will end up being | ||
457 | +transformed to:: | ||
458 | + | ||
459 | + [A] <-- [C] <-- [D] | ||
460 | + | ||
461 | +.. note:: | ||
462 | + The intermediate image [B] is invalid (as in: no more further | ||
463 | + overlays based on it can be created). | ||
464 | + | ||
465 | + Reasoning: An intermediate image after a 'stream' operation still | ||
466 | + represents that old point-in-time, and may be valid in that context. | ||
467 | + However, an intermediate image after a 'commit' operation no longer | ||
468 | + represents any point-in-time, and is invalid in any context. | ||
469 | + | ||
470 | + | ||
471 | +However, :ref:`Case-3 <block-commit_Case-3>` (also called: "active | ||
472 | +``block-commit``") is a *two-phase* operation: In the first phase, the | ||
473 | +content from the active overlay, along with the intermediate overlays, | ||
474 | +is copied into the backing file (also called the base image). In the | ||
475 | +second phase, adjust the said backing file as the current active image | ||
476 | +-- possible via issuing the command ``block-job-complete``. Optionally, | ||
477 | +the ``block-commit`` operation can be cancelled by issuing the command | ||
478 | +``block-job-cancel``, but be careful when doing this. | ||
479 | + | ||
480 | +Once the ``block-commit`` operation has completed, the event | ||
481 | +``BLOCK_JOB_READY`` will be emitted, signalling that the synchronization | ||
482 | +has finished. Now the job can be gracefully completed by issuing the | ||
483 | +command ``block-job-complete`` -- until such a command is issued, the | ||
484 | +'commit' operation remains active. | ||
485 | + | ||
486 | +The following is the flow for :ref:`Case-3 <block-commit_Case-3>` to | ||
487 | +convert a disk image chain such as this:: | ||
488 | + | ||
489 | + [A] <-- [B] <-- [C] <-- [D] | ||
490 | + | ||
491 | +Into:: | ||
492 | + | ||
493 | + [A] | ||
494 | + | ||
495 | +Where content from all the subsequent overlays, [B], and [C], including | ||
496 | +the active layer, [D], is committed back to [A] -- which is where live | ||
497 | +QEMU is performing all its current writes). | ||
498 | + | ||
499 | +Start the "active ``block-commit``" operation:: | ||
500 | + | ||
501 | + (QEMU) block-commit device=node-D base=a.qcow2 top=d.qcow2 job-id=job0 | ||
502 | + { | ||
503 | + "execute": "block-commit", | ||
504 | + "arguments": { | ||
505 | + "device": "node-D", | ||
506 | + "job-id": "job0", | ||
507 | + "top": "d.qcow2", | ||
508 | + "base": "a.qcow2" | ||
509 | + } | ||
510 | + } | ||
511 | + | ||
512 | + | ||
513 | +Once the synchronization has completed, the event ``BLOCK_JOB_READY`` will | ||
514 | +be emitted. | ||
515 | + | ||
516 | +Then, optionally query for the status of the active block operations. | ||
517 | +We can see the 'commit' job is now ready to be completed, as indicated | ||
518 | +by the line *"ready": true*:: | ||
519 | + | ||
520 | + (QEMU) query-block-jobs | ||
521 | + { | ||
522 | + "execute": "query-block-jobs", | ||
523 | + "arguments": {} | ||
524 | + } | ||
525 | + { | ||
526 | + "return": [ | ||
527 | + { | ||
528 | + "busy": false, | ||
529 | + "type": "commit", | ||
530 | + "len": 1376256, | ||
531 | + "paused": false, | ||
532 | + "ready": true, | ||
533 | + "io-status": "ok", | ||
534 | + "offset": 1376256, | ||
535 | + "device": "job0", | ||
536 | + "speed": 0 | ||
537 | + } | ||
538 | + ] | ||
539 | + } | ||
540 | + | ||
541 | +Gracefully complete the 'commit' block device job:: | ||
542 | + | ||
543 | + (QEMU) block-job-complete device=job0 | ||
544 | + { | ||
545 | + "execute": "block-job-complete", | ||
546 | + "arguments": { | ||
547 | + "device": "job0" | ||
548 | + } | ||
549 | + } | ||
550 | + { | ||
551 | + "return": {} | ||
552 | + } | ||
553 | + | ||
554 | +Finally, once the above job is completed, an event | ||
555 | +``BLOCK_JOB_COMPLETED`` will be emitted. | ||
556 | + | ||
557 | +.. note:: | ||
558 | + The invocation for rest of the cases (2, 4, and 5), discussed in the | ||
559 | + previous section, is omitted for brevity. | ||
560 | + | ||
561 | + | ||
562 | +Live disk synchronization --- ``drive-mirror`` and ``blockdev-mirror`` | ||
563 | +---------------------------------------------------------------------- | ||
564 | + | ||
565 | +Synchronize a running disk image chain (all or part of it) to a target | ||
566 | +image. | ||
567 | + | ||
568 | +Again, given our familiar disk image chain:: | ||
569 | + | ||
570 | + [A] <-- [B] <-- [C] <-- [D] | ||
571 | + | ||
572 | +The ``drive-mirror`` (and its newer equivalent ``blockdev-mirror``) allows | ||
573 | +you to copy data from the entire chain into a single target image (which | ||
574 | +can be located on a different host). | ||
575 | + | ||
576 | +Once a 'mirror' job has started, there are two possible actions while a | ||
577 | +``drive-mirror`` job is active: | ||
578 | + | ||
579 | +(1) Issuing the command ``block-job-cancel`` after it emits the event | ||
580 | + ``BLOCK_JOB_CANCELLED``: will (after completing synchronization of | ||
581 | + the content from the disk image chain to the target image, [E]) | ||
582 | + create a point-in-time (which is at the time of *triggering* the | ||
583 | + cancel command) copy, contained in image [E], of the the entire disk | ||
584 | + image chain (or only the top-most image, depending on the ``sync`` | ||
585 | + mode). | ||
586 | + | ||
587 | +(2) Issuing the command ``block-job-complete`` after it emits the event | ||
588 | + ``BLOCK_JOB_COMPLETED``: will, after completing synchronization of | ||
589 | + the content, adjust the guest device (i.e. live QEMU) to point to | ||
590 | + the target image, and, causing all the new writes from this point on | ||
591 | + to happen there. One use case for this is live storage migration. | ||
592 | + | ||
593 | +About synchronization modes: The synchronization mode determines | ||
594 | +*which* part of the disk image chain will be copied to the target. | ||
595 | +Currently, there are four different kinds: | ||
596 | + | ||
597 | +(1) ``full`` -- Synchronize the content of entire disk image chain to | ||
598 | + the target | ||
599 | + | ||
600 | +(2) ``top`` -- Synchronize only the contents of the top-most disk image | ||
601 | + in the chain to the target | ||
602 | + | ||
603 | +(3) ``none`` -- Synchronize only the new writes from this point on. | ||
604 | + | ||
605 | + .. note:: In the case of ``drive-backup`` (or ``blockdev-backup``), | ||
606 | + the behavior of ``none`` synchronization mode is different. | ||
607 | + Normally, a ``backup`` job consists of two parts: Anything | ||
608 | + that is overwritten by the guest is first copied out to | ||
609 | + the backup, and in the background the whole image is | ||
610 | + copied from start to end. With ``sync=none``, it's only | ||
611 | + the first part. | ||
612 | + | ||
613 | +(4) ``incremental`` -- Synchronize content that is described by the | ||
614 | + dirty bitmap | ||
615 | + | ||
616 | +.. note:: | ||
617 | + Refer to the :doc:`bitmaps` document in the QEMU source | ||
618 | + tree to learn about the detailed workings of the ``incremental`` | ||
619 | + synchronization mode. | ||
620 | + | ||
621 | + | ||
622 | +QMP invocation for ``drive-mirror`` | ||
623 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
624 | + | ||
625 | +To copy the contents of the entire disk image chain, from [A] all the | ||
626 | +way to [D], to a new target (``drive-mirror`` will create the destination | ||
627 | +file, if it doesn't already exist), call it [E]:: | ||
628 | + | ||
629 | + (QEMU) drive-mirror device=node-D target=e.qcow2 sync=full job-id=job0 | ||
630 | + { | ||
631 | + "execute": "drive-mirror", | ||
632 | + "arguments": { | ||
633 | + "device": "node-D", | ||
634 | + "job-id": "job0", | ||
635 | + "target": "e.qcow2", | ||
636 | + "sync": "full" | ||
637 | + } | ||
638 | + } | ||
639 | + | ||
640 | +The ``"sync": "full"``, from the above, means: copy the *entire* chain | ||
641 | +to the destination. | ||
642 | + | ||
643 | +Following the above, querying for active block jobs will show that a | ||
644 | +'mirror' job is "ready" to be completed (and QEMU will also emit an | ||
645 | +event, ``BLOCK_JOB_READY``):: | ||
646 | + | ||
647 | + (QEMU) query-block-jobs | ||
648 | + { | ||
649 | + "execute": "query-block-jobs", | ||
650 | + "arguments": {} | ||
651 | + } | ||
652 | + { | ||
653 | + "return": [ | ||
654 | + { | ||
655 | + "busy": false, | ||
656 | + "type": "mirror", | ||
657 | + "len": 21757952, | ||
658 | + "paused": false, | ||
659 | + "ready": true, | ||
660 | + "io-status": "ok", | ||
661 | + "offset": 21757952, | ||
662 | + "device": "job0", | ||
663 | + "speed": 0 | ||
664 | + } | ||
665 | + ] | ||
666 | + } | ||
667 | + | ||
668 | +And, as noted in the previous section, there are two possible actions | ||
669 | +at this point: | ||
670 | + | ||
671 | +(a) Create a point-in-time snapshot by ending the synchronization. The | ||
672 | + point-in-time is at the time of *ending* the sync. (The result of | ||
673 | + the following being: the target image, [E], will be populated with | ||
674 | + content from the entire chain, [A] to [D]):: | ||
675 | + | ||
676 | + (QEMU) block-job-cancel device=job0 | ||
677 | + { | ||
678 | + "execute": "block-job-cancel", | ||
679 | + "arguments": { | ||
680 | + "device": "job0" | ||
681 | + } | 47 | + } |
682 | + } | 48 | + } |
683 | + | 49 | + if (!s->cluster_cache) { |
684 | +(b) Or, complete the operation and pivot the live QEMU to the target | 50 | + s->cluster_cache = g_malloc(s->cluster_size); |
685 | + copy:: | ||
686 | + | ||
687 | + (QEMU) block-job-complete device=job0 | ||
688 | + | ||
689 | +In either of the above cases, if you once again run the | ||
690 | +`query-block-jobs` command, there should not be any active block | ||
691 | +operation. | ||
692 | + | ||
693 | +Comparing 'commit' and 'mirror': In both then cases, the overlay images | ||
694 | +can be discarded. However, with 'commit', the *existing* base image | ||
695 | +will be modified (by updating it with contents from overlays); while in | ||
696 | +the case of 'mirror', a *new* target image is populated with the data | ||
697 | +from the disk image chain. | ||
698 | + | ||
699 | + | ||
700 | +QMP invocation for live storage migration with ``drive-mirror`` + NBD | ||
701 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
702 | + | ||
703 | +Live storage migration (without shared storage setup) is one of the most | ||
704 | +common use-cases that takes advantage of the ``drive-mirror`` primitive | ||
705 | +and QEMU's built-in Network Block Device (NBD) server. Here's a quick | ||
706 | +walk-through of this setup. | ||
707 | + | ||
708 | +Given the disk image chain:: | ||
709 | + | ||
710 | + [A] <-- [B] <-- [C] <-- [D] | ||
711 | + | ||
712 | +Instead of copying content from the entire chain, synchronize *only* the | ||
713 | +contents of the *top*-most disk image (i.e. the active layer), [D], to a | ||
714 | +target, say, [TargetDisk]. | ||
715 | + | ||
716 | +.. important:: | ||
717 | + The destination host must already have the contents of the backing | ||
718 | + chain, involving images [A], [B], and [C], visible via other means | ||
719 | + -- whether by ``cp``, ``rsync``, or by some storage array-specific | ||
720 | + command.) | ||
721 | + | ||
722 | +Sometimes, this is also referred to as "shallow copy" -- because only | ||
723 | +the "active layer", and not the rest of the image chain, is copied to | ||
724 | +the destination. | ||
725 | + | ||
726 | +.. note:: | ||
727 | + In this example, for the sake of simplicity, we'll be using the same | ||
728 | + ``localhost`` as both source and destination. | ||
729 | + | ||
730 | +As noted earlier, on the destination host the contents of the backing | ||
731 | +chain -- from images [A] to [C] -- are already expected to exist in some | ||
732 | +form (e.g. in a file called, ``Contents-of-A-B-C.qcow2``). Now, on the | ||
733 | +destination host, let's create a target overlay image (with the image | ||
734 | +``Contents-of-A-B-C.qcow2`` as its backing file), to which the contents | ||
735 | +of image [D] (from the source QEMU) will be mirrored to:: | ||
736 | + | ||
737 | + $ qemu-img create -f qcow2 -b ./Contents-of-A-B-C.qcow2 \ | ||
738 | + -F qcow2 ./target-disk.qcow2 | ||
739 | + | ||
740 | +And start the destination QEMU (we already have the source QEMU running | ||
741 | +-- discussed in the section: `Interacting with a QEMU instance`_) | ||
742 | +instance, with the following invocation. (As noted earlier, for | ||
743 | +simplicity's sake, the destination QEMU is started on the same host, but | ||
744 | +it could be located elsewhere):: | ||
745 | + | ||
746 | + $ ./x86_64-softmmu/qemu-system-x86_64 -display none -nodefconfig \ | ||
747 | + -M q35 -nodefaults -m 512 \ | ||
748 | + -blockdev node-name=node-TargetDisk,driver=qcow2,file.driver=file,file.node-name=file,file.filename=./target-disk.qcow2 \ | ||
749 | + -device virtio-blk,drive=node-TargetDisk,id=virtio0 \ | ||
750 | + -S -monitor stdio -qmp unix:./qmp-sock2,server,nowait \ | ||
751 | + -incoming tcp:localhost:6666 | ||
752 | + | ||
753 | +Given the disk image chain on source QEMU:: | ||
754 | + | ||
755 | + [A] <-- [B] <-- [C] <-- [D] | ||
756 | + | ||
757 | +On the destination host, it is expected that the contents of the chain | ||
758 | +``[A] <-- [B] <-- [C]`` are *already* present, and therefore copy *only* | ||
759 | +the content of image [D]. | ||
760 | + | ||
761 | +(1) [On *destination* QEMU] As part of the first step, start the | ||
762 | + built-in NBD server on a given host (local host, represented by | ||
763 | + ``::``)and port:: | ||
764 | + | ||
765 | + (QEMU) nbd-server-start addr={"type":"inet","data":{"host":"::","port":"49153"}} | ||
766 | + { | ||
767 | + "execute": "nbd-server-start", | ||
768 | + "arguments": { | ||
769 | + "addr": { | ||
770 | + "data": { | ||
771 | + "host": "::", | ||
772 | + "port": "49153" | ||
773 | + }, | ||
774 | + "type": "inet" | ||
775 | + } | ||
776 | + } | ||
777 | + } | 51 | + } |
778 | + | 52 | + |
779 | +(2) [On *destination* QEMU] And export the destination disk image using | 53 | BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); |
780 | + QEMU's built-in NBD server:: | 54 | ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, |
781 | + | 55 | nb_csectors); |
782 | + (QEMU) nbd-server-add device=node-TargetDisk writable=true | 56 | diff --git a/block/qcow2.c b/block/qcow2.c |
783 | + { | 57 | index XXXXXXX..XXXXXXX 100644 |
784 | + "execute": "nbd-server-add", | 58 | --- a/block/qcow2.c |
785 | + "arguments": { | 59 | +++ b/block/qcow2.c |
786 | + "device": "node-TargetDisk" | 60 | @@ -XXX,XX +XXX,XX @@ static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, |
787 | + } | 61 | goto fail; |
788 | + } | 62 | } |
789 | + | 63 | |
790 | +(3) [On *source* QEMU] Then, invoke ``drive-mirror`` (NB: since we're | 64 | - s->cluster_cache = g_malloc(s->cluster_size); |
791 | + running ``drive-mirror`` with ``mode=existing`` (meaning: | 65 | - /* one more sector for decompressed data alignment */ |
792 | + synchronize to a pre-created file, therefore 'existing', file on the | 66 | - s->cluster_data = qemu_try_blockalign(bs->file->bs, QCOW_MAX_CRYPT_CLUSTERS |
793 | + target host), with the synchronization mode as 'top' (``"sync: | 67 | - * s->cluster_size + 512); |
794 | + "top"``):: | 68 | - if (s->cluster_data == NULL) { |
795 | + | 69 | - error_setg(errp, "Could not allocate temporary cluster buffer"); |
796 | + (QEMU) drive-mirror device=node-D target=nbd:localhost:49153:exportname=node-TargetDisk sync=top mode=existing job-id=job0 | 70 | - ret = -ENOMEM; |
797 | + { | 71 | - goto fail; |
798 | + "execute": "drive-mirror", | 72 | - } |
799 | + "arguments": { | ||
800 | + "device": "node-D", | ||
801 | + "mode": "existing", | ||
802 | + "job-id": "job0", | ||
803 | + "target": "nbd:localhost:49153:exportname=node-TargetDisk", | ||
804 | + "sync": "top" | ||
805 | + } | ||
806 | + } | ||
807 | + | ||
808 | +(4) [On *source* QEMU] Once ``drive-mirror`` copies the entire data, and the | ||
809 | + event ``BLOCK_JOB_READY`` is emitted, issue ``block-job-cancel`` to | ||
810 | + gracefully end the synchronization, from source QEMU:: | ||
811 | + | ||
812 | + (QEMU) block-job-cancel device=job0 | ||
813 | + { | ||
814 | + "execute": "block-job-cancel", | ||
815 | + "arguments": { | ||
816 | + "device": "job0" | ||
817 | + } | ||
818 | + } | ||
819 | + | ||
820 | +(5) [On *destination* QEMU] Then, stop the NBD server:: | ||
821 | + | ||
822 | + (QEMU) nbd-server-stop | ||
823 | + { | ||
824 | + "execute": "nbd-server-stop", | ||
825 | + "arguments": {} | ||
826 | + } | ||
827 | + | ||
828 | +(6) [On *destination* QEMU] Finally, resume the guest vCPUs by issuing the | ||
829 | + QMP command `cont`:: | ||
830 | + | ||
831 | + (QEMU) cont | ||
832 | + { | ||
833 | + "execute": "cont", | ||
834 | + "arguments": {} | ||
835 | + } | ||
836 | + | ||
837 | +.. note:: | ||
838 | + Higher-level libraries (e.g. libvirt) automate the entire above | ||
839 | + process (although note that libvirt does not allow same-host | ||
840 | + migrations to localhost for other reasons). | ||
841 | + | ||
842 | + | ||
843 | +Notes on ``blockdev-mirror`` | ||
844 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
845 | + | ||
846 | +The ``blockdev-mirror`` command is equivalent in core functionality to | ||
847 | +``drive-mirror``, except that it operates at node-level in a BDS graph. | ||
848 | + | ||
849 | +Also: for ``blockdev-mirror``, the 'target' image needs to be explicitly | ||
850 | +created (using ``qemu-img``) and attach it to live QEMU via | ||
851 | +``blockdev-add``, which assigns a name to the to-be created target node. | ||
852 | + | ||
853 | +E.g. the sequence of actions to create a point-in-time backup of an | ||
854 | +entire disk image chain, to a target, using ``blockdev-mirror`` would be: | ||
855 | + | ||
856 | +(0) Create the QCOW2 overlays, to arrive at a backing chain of desired | ||
857 | + depth | ||
858 | + | ||
859 | +(1) Create the target image (using ``qemu-img``), say, ``e.qcow2`` | ||
860 | + | ||
861 | +(2) Attach the above created file (``e.qcow2``), run-time, using | ||
862 | + ``blockdev-add`` to QEMU | ||
863 | + | ||
864 | +(3) Perform ``blockdev-mirror`` (use ``"sync": "full"`` to copy the | ||
865 | + entire chain to the target). And notice the event | ||
866 | + ``BLOCK_JOB_READY`` | ||
867 | + | ||
868 | +(4) Optionally, query for active block jobs, there should be a 'mirror' | ||
869 | + job ready to be completed | ||
870 | + | ||
871 | +(5) Gracefully complete the 'mirror' block device job, and notice the | ||
872 | + the event ``BLOCK_JOB_COMPLETED`` | ||
873 | + | ||
874 | +(6) Shutdown the guest by issuing the QMP ``quit`` command so that | ||
875 | + caches are flushed | ||
876 | + | ||
877 | +(7) Then, finally, compare the contents of the disk image chain, and | ||
878 | + the target copy with ``qemu-img compare``. You should notice: | ||
879 | + "Images are identical" | ||
880 | + | ||
881 | + | ||
882 | +QMP invocation for ``blockdev-mirror`` | ||
883 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
884 | + | ||
885 | +Given the disk image chain:: | ||
886 | + | ||
887 | + [A] <-- [B] <-- [C] <-- [D] | ||
888 | + | ||
889 | +To copy the contents of the entire disk image chain, from [A] all the | ||
890 | +way to [D], to a new target, call it [E]. The following is the flow. | ||
891 | + | ||
892 | +Create the overlay images, [B], [C], and [D]:: | ||
893 | + | ||
894 | + (QEMU) blockdev-snapshot-sync node-name=node-A snapshot-file=b.qcow2 snapshot-node-name=node-B format=qcow2 | ||
895 | + (QEMU) blockdev-snapshot-sync node-name=node-B snapshot-file=c.qcow2 snapshot-node-name=node-C format=qcow2 | ||
896 | + (QEMU) blockdev-snapshot-sync node-name=node-C snapshot-file=d.qcow2 snapshot-node-name=node-D format=qcow2 | ||
897 | + | ||
898 | +Create the target image, [E]:: | ||
899 | + | ||
900 | + $ qemu-img create -f qcow2 e.qcow2 39M | ||
901 | + | ||
902 | +Add the above created target image to QEMU, via ``blockdev-add``:: | ||
903 | + | ||
904 | + (QEMU) blockdev-add driver=qcow2 node-name=node-E file={"driver":"file","filename":"e.qcow2"} | ||
905 | + { | ||
906 | + "execute": "blockdev-add", | ||
907 | + "arguments": { | ||
908 | + "node-name": "node-E", | ||
909 | + "driver": "qcow2", | ||
910 | + "file": { | ||
911 | + "driver": "file", | ||
912 | + "filename": "e.qcow2" | ||
913 | + } | ||
914 | + } | ||
915 | + } | ||
916 | + | ||
917 | +Perform ``blockdev-mirror``, and notice the event ``BLOCK_JOB_READY``:: | ||
918 | + | ||
919 | + (QEMU) blockdev-mirror device=node-B target=node-E sync=full job-id=job0 | ||
920 | + { | ||
921 | + "execute": "blockdev-mirror", | ||
922 | + "arguments": { | ||
923 | + "device": "node-D", | ||
924 | + "job-id": "job0", | ||
925 | + "target": "node-E", | ||
926 | + "sync": "full" | ||
927 | + } | ||
928 | + } | ||
929 | + | ||
930 | +Query for active block jobs, there should be a 'mirror' job ready:: | ||
931 | + | ||
932 | + (QEMU) query-block-jobs | ||
933 | + { | ||
934 | + "execute": "query-block-jobs", | ||
935 | + "arguments": {} | ||
936 | + } | ||
937 | + { | ||
938 | + "return": [ | ||
939 | + { | ||
940 | + "busy": false, | ||
941 | + "type": "mirror", | ||
942 | + "len": 21561344, | ||
943 | + "paused": false, | ||
944 | + "ready": true, | ||
945 | + "io-status": "ok", | ||
946 | + "offset": 21561344, | ||
947 | + "device": "job0", | ||
948 | + "speed": 0 | ||
949 | + } | ||
950 | + ] | ||
951 | + } | ||
952 | + | ||
953 | +Gracefully complete the block device job operation, and notice the | ||
954 | +event ``BLOCK_JOB_COMPLETED``:: | ||
955 | + | ||
956 | + (QEMU) block-job-complete device=job0 | ||
957 | + { | ||
958 | + "execute": "block-job-complete", | ||
959 | + "arguments": { | ||
960 | + "device": "job0" | ||
961 | + } | ||
962 | + } | ||
963 | + { | ||
964 | + "return": {} | ||
965 | + } | ||
966 | + | ||
967 | +Shutdown the guest, by issuing the ``quit`` QMP command:: | ||
968 | + | ||
969 | + (QEMU) quit | ||
970 | + { | ||
971 | + "execute": "quit", | ||
972 | + "arguments": {} | ||
973 | + } | ||
974 | + | ||
975 | + | ||
976 | +Live disk backup --- ``drive-backup`` and ``blockdev-backup`` | ||
977 | +------------------------------------------------------------- | ||
978 | + | ||
979 | +The ``drive-backup`` (and its newer equivalent ``blockdev-backup``) allows | ||
980 | +you to create a point-in-time snapshot. | ||
981 | + | ||
982 | +In this case, the point-in-time is when you *start* the ``drive-backup`` | ||
983 | +(or its newer equivalent ``blockdev-backup``) command. | ||
984 | + | ||
985 | + | ||
986 | +QMP invocation for ``drive-backup`` | ||
987 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
988 | + | ||
989 | +Yet again, starting afresh with our example disk image chain:: | ||
990 | + | ||
991 | + [A] <-- [B] <-- [C] <-- [D] | ||
992 | + | ||
993 | +To create a target image [E], with content populated from image [A] to | ||
994 | +[D], from the above chain, the following is the syntax. (If the target | ||
995 | +image does not exist, ``drive-backup`` will create it):: | ||
996 | + | ||
997 | + (QEMU) drive-backup device=node-D sync=full target=e.qcow2 job-id=job0 | ||
998 | + { | ||
999 | + "execute": "drive-backup", | ||
1000 | + "arguments": { | ||
1001 | + "device": "node-D", | ||
1002 | + "job-id": "job0", | ||
1003 | + "sync": "full", | ||
1004 | + "target": "e.qcow2" | ||
1005 | + } | ||
1006 | + } | ||
1007 | + | ||
1008 | +Once the above ``drive-backup`` has completed, a ``BLOCK_JOB_COMPLETED`` event | ||
1009 | +will be issued, indicating the live block device job operation has | ||
1010 | +completed, and no further action is required. | ||
1011 | + | ||
1012 | + | ||
1013 | +Notes on ``blockdev-backup`` | ||
1014 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
1015 | + | ||
1016 | +The ``blockdev-backup`` command is equivalent in functionality to | ||
1017 | +``drive-backup``, except that it operates at node-level in a Block Driver | ||
1018 | +State (BDS) graph. | ||
1019 | + | ||
1020 | +E.g. the sequence of actions to create a point-in-time backup | ||
1021 | +of an entire disk image chain, to a target, using ``blockdev-backup`` | ||
1022 | +would be: | ||
1023 | + | ||
1024 | +(0) Create the QCOW2 overlays, to arrive at a backing chain of desired | ||
1025 | + depth | ||
1026 | + | ||
1027 | +(1) Create the target image (using ``qemu-img``), say, ``e.qcow2`` | ||
1028 | + | ||
1029 | +(2) Attach the above created file (``e.qcow2``), run-time, using | ||
1030 | + ``blockdev-add`` to QEMU | ||
1031 | + | ||
1032 | +(3) Perform ``blockdev-backup`` (use ``"sync": "full"`` to copy the | ||
1033 | + entire chain to the target). And notice the event | ||
1034 | + ``BLOCK_JOB_COMPLETED`` | ||
1035 | + | ||
1036 | +(4) Shutdown the guest, by issuing the QMP ``quit`` command, so that | ||
1037 | + caches are flushed | ||
1038 | + | ||
1039 | +(5) Then, finally, compare the contents of the disk image chain, and | ||
1040 | + the target copy with ``qemu-img compare``. You should notice: | ||
1041 | + "Images are identical" | ||
1042 | + | ||
1043 | +The following section shows an example QMP invocation for | ||
1044 | +``blockdev-backup``. | ||
1045 | + | ||
1046 | +QMP invocation for ``blockdev-backup`` | ||
1047 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
1048 | + | ||
1049 | +Given a disk image chain of depth 1 where image [B] is the active | ||
1050 | +overlay (live QEMU is writing to it):: | ||
1051 | + | ||
1052 | + [A] <-- [B] | ||
1053 | + | ||
1054 | +The following is the procedure to copy the content from the entire chain | ||
1055 | +to a target image (say, [E]), which has the full content from [A] and | ||
1056 | +[B]. | ||
1057 | + | ||
1058 | +Create the overlay [B]:: | ||
1059 | + | ||
1060 | + (QEMU) blockdev-snapshot-sync node-name=node-A snapshot-file=b.qcow2 snapshot-node-name=node-B format=qcow2 | ||
1061 | + { | ||
1062 | + "execute": "blockdev-snapshot-sync", | ||
1063 | + "arguments": { | ||
1064 | + "node-name": "node-A", | ||
1065 | + "snapshot-file": "b.qcow2", | ||
1066 | + "format": "qcow2", | ||
1067 | + "snapshot-node-name": "node-B" | ||
1068 | + } | ||
1069 | + } | ||
1070 | + | ||
1071 | + | ||
1072 | +Create a target image that will contain the copy:: | ||
1073 | + | ||
1074 | + $ qemu-img create -f qcow2 e.qcow2 39M | ||
1075 | + | ||
1076 | +Then add it to QEMU via ``blockdev-add``:: | ||
1077 | + | ||
1078 | + (QEMU) blockdev-add driver=qcow2 node-name=node-E file={"driver":"file","filename":"e.qcow2"} | ||
1079 | + { | ||
1080 | + "execute": "blockdev-add", | ||
1081 | + "arguments": { | ||
1082 | + "node-name": "node-E", | ||
1083 | + "driver": "qcow2", | ||
1084 | + "file": { | ||
1085 | + "driver": "file", | ||
1086 | + "filename": "e.qcow2" | ||
1087 | + } | ||
1088 | + } | ||
1089 | + } | ||
1090 | + | ||
1091 | +Then invoke ``blockdev-backup`` to copy the contents from the entire | ||
1092 | +image chain, consisting of images [A] and [B] to the target image | ||
1093 | +'e.qcow2':: | ||
1094 | + | ||
1095 | + (QEMU) blockdev-backup device=node-B target=node-E sync=full job-id=job0 | ||
1096 | + { | ||
1097 | + "execute": "blockdev-backup", | ||
1098 | + "arguments": { | ||
1099 | + "device": "node-B", | ||
1100 | + "job-id": "job0", | ||
1101 | + "target": "node-E", | ||
1102 | + "sync": "full" | ||
1103 | + } | ||
1104 | + } | ||
1105 | + | ||
1106 | +Once the above 'backup' operation has completed, the event, | ||
1107 | +``BLOCK_JOB_COMPLETED`` will be emitted, signalling successful | ||
1108 | +completion. | ||
1109 | + | ||
1110 | +Next, query for any active block device jobs (there should be none):: | ||
1111 | + | ||
1112 | + (QEMU) query-block-jobs | ||
1113 | + { | ||
1114 | + "execute": "query-block-jobs", | ||
1115 | + "arguments": {} | ||
1116 | + } | ||
1117 | + | ||
1118 | +Shutdown the guest:: | ||
1119 | + | ||
1120 | + (QEMU) quit | ||
1121 | + { | ||
1122 | + "execute": "quit", | ||
1123 | + "arguments": {} | ||
1124 | + } | ||
1125 | + "return": {} | ||
1126 | + } | ||
1127 | + | ||
1128 | +.. note:: | ||
1129 | + The above step is really important; if forgotten, an error, "Failed | ||
1130 | + to get shared "write" lock on e.qcow2", will be thrown when you do | ||
1131 | + ``qemu-img compare`` to verify the integrity of the disk image | ||
1132 | + with the backup content. | ||
1133 | + | ||
1134 | + | ||
1135 | +The end result will be the image 'e.qcow2' containing a | ||
1136 | +point-in-time backup of the disk image chain -- i.e. contents from | ||
1137 | +images [A] and [B] at the time the ``blockdev-backup`` command was | ||
1138 | +initiated. | ||
1139 | + | ||
1140 | +One way to confirm the backup disk image contains the identical content | ||
1141 | +with the disk image chain is to compare the backup and the contents of | ||
1142 | +the chain, you should see "Images are identical". (NB: this is assuming | ||
1143 | +QEMU was launched with ``-S`` option, which will not start the CPUs at | ||
1144 | +guest boot up):: | ||
1145 | + | ||
1146 | + $ qemu-img compare b.qcow2 e.qcow2 | ||
1147 | + Warning: Image size mismatch! | ||
1148 | + Images are identical. | ||
1149 | + | ||
1150 | +NOTE: The "Warning: Image size mismatch!" is expected, as we created the | ||
1151 | +target image (e.qcow2) with 39M size. | ||
1152 | diff --git a/docs/live-block-ops.txt b/docs/live-block-ops.txt | ||
1153 | deleted file mode 100644 | ||
1154 | index XXXXXXX..XXXXXXX | ||
1155 | --- a/docs/live-block-ops.txt | ||
1156 | +++ /dev/null | ||
1157 | @@ -XXX,XX +XXX,XX @@ | ||
1158 | -LIVE BLOCK OPERATIONS | ||
1159 | -===================== | ||
1160 | - | 73 | - |
1161 | -High level description of live block operations. Note these are not | 74 | s->cluster_cache_offset = -1; |
1162 | -supported for use with the raw format at the moment. | 75 | s->flags = flags; |
1163 | - | 76 | |
1164 | -Note also that this document is incomplete and it currently only | 77 | @@ -XXX,XX +XXX,XX @@ static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, |
1165 | -covers the 'stream' operation. Other operations supported by QEMU such | 78 | if (s->refcount_block_cache) { |
1166 | -as 'commit', 'mirror' and 'backup' are not described here yet. Please | 79 | qcow2_cache_destroy(bs, s->refcount_block_cache); |
1167 | -refer to the qapi/block-core.json file for an overview of those. | 80 | } |
1168 | - | 81 | - g_free(s->cluster_cache); |
1169 | -Snapshot live merge | 82 | - qemu_vfree(s->cluster_data); |
1170 | -=================== | 83 | qcrypto_block_free(s->crypto); |
1171 | - | 84 | qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); |
1172 | -Given a snapshot chain, described in this document in the following | 85 | return ret; |
1173 | -format: | ||
1174 | - | ||
1175 | -[A] <- [B] <- [C] <- [D] <- [E] | ||
1176 | - | ||
1177 | -Where the rightmost object ([E] in the example) described is the current | ||
1178 | -image which the guest OS has write access to. To the left of it is its base | ||
1179 | -image, and so on accordingly until the leftmost image, which has no | ||
1180 | -base. | ||
1181 | - | ||
1182 | -The snapshot live merge operation transforms such a chain into a | ||
1183 | -smaller one with fewer elements, such as this transformation relative | ||
1184 | -to the first example: | ||
1185 | - | ||
1186 | -[A] <- [E] | ||
1187 | - | ||
1188 | -Data is copied in the right direction with destination being the | ||
1189 | -rightmost image, but any other intermediate image can be specified | ||
1190 | -instead. In this example data is copied from [C] into [D], so [D] can | ||
1191 | -be backed by [B]: | ||
1192 | - | ||
1193 | -[A] <- [B] <- [D] <- [E] | ||
1194 | - | ||
1195 | -The operation is implemented in QEMU through image streaming facilities. | ||
1196 | - | ||
1197 | -The basic idea is to execute 'block_stream virtio0' while the guest is | ||
1198 | -running. Progress can be monitored using 'info block-jobs'. When the | ||
1199 | -streaming operation completes it raises a QMP event. 'block_stream' | ||
1200 | -copies data from the backing file(s) into the active image. When finished, | ||
1201 | -it adjusts the backing file pointer. | ||
1202 | - | ||
1203 | -The 'base' parameter specifies an image which data need not be | ||
1204 | -streamed from. This image will be used as the backing file for the | ||
1205 | -destination image when the operation is finished. | ||
1206 | - | ||
1207 | -In the first example above, the command would be: | ||
1208 | - | ||
1209 | -(qemu) block_stream virtio0 file-A.img | ||
1210 | - | ||
1211 | -In order to specify a destination image different from the active | ||
1212 | -(rightmost) one we can use its node name instead. | ||
1213 | - | ||
1214 | -In the second example above, the command would be: | ||
1215 | - | ||
1216 | -(qemu) block_stream node-D file-B.img | ||
1217 | - | ||
1218 | -Live block copy | ||
1219 | -=============== | ||
1220 | - | ||
1221 | -To copy an in use image to another destination in the filesystem, one | ||
1222 | -should create a live snapshot in the desired destination, then stream | ||
1223 | -into that image. Example: | ||
1224 | - | ||
1225 | -(qemu) snapshot_blkdev ide0-hd0 /new-path/disk.img qcow2 | ||
1226 | - | ||
1227 | -(qemu) block_stream ide0-hd0 | ||
1228 | - | ||
1229 | - | ||
1230 | -- | 86 | -- |
1231 | 2.9.4 | 87 | 2.13.5 |
1232 | 88 | ||
1233 | 89 | diff view generated by jsdifflib |