1
The following changes since commit 248b23735645f7cbb503d9be6f5bf825f2a603ab:
1
The following changes since commit 19b599f7664b2ebfd0f405fb79c14dd241557452:
2
2
3
Update version for v2.10.0-rc4 release (2017-08-24 17:34:26 +0100)
3
Merge remote-tracking branch 'remotes/armbru/tags/pull-error-2018-08-27-v2' into staging (2018-08-27 16:44:20 +0100)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
https://git.xanclic.moe/XanClic/qemu.git tags/pull-block-2018-08-31-v2
8
8
9
for you to fetch changes up to 3e4c705212abfe8c9882a00beb2d1466a8a53cec:
9
for you to fetch changes up to e21a1c9831fc80ae3f3c1affdfa43350035d8588:
10
10
11
qcow2: allocate cluster_cache/cluster_data on demand (2017-08-30 18:02:10 +0100)
11
jobs: remove job_defer_to_main_loop (2018-08-31 16:28:33 +0200)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block patches:
15
- (Block) job exit refactoring, part 1
16
(removing job_defer_to_main_loop())
17
- test-bdrv-drain leak fix
14
18
15
----------------------------------------------------------------
19
----------------------------------------------------------------
20
John Snow (9):
21
jobs: change start callback to run callback
22
jobs: canonize Error object
23
jobs: add exit shim
24
block/commit: utilize job_exit shim
25
block/mirror: utilize job_exit shim
26
jobs: utilize job_exit shim
27
block/backup: make function variables consistently named
28
jobs: remove ret argument to job_completed; privatize it
29
jobs: remove job_defer_to_main_loop
16
30
17
Alberto Garcia (8):
31
Marc-André Lureau (1):
18
throttle: Fix wrong variable name in the header documentation
32
tests: fix bdrv-drain leak
19
throttle: Update the throttle_fix_bucket() documentation
20
throttle: Make throttle_is_valid() a bit less verbose
21
throttle: Remove throttle_fix_bucket() / throttle_unfix_bucket()
22
throttle: Make LeakyBucket.avg and LeakyBucket.max integer types
23
throttle: Make burst_length 64bit and add range checks
24
throttle: Test the valid range of config values
25
misc: Remove unused Error variables
26
33
27
Dan Aloni (1):
34
include/qemu/job.h | 70 ++++++++++++++++-----------------
28
nvme: Fix get/set number of queues feature, again
35
block/backup.c | 81 ++++++++++++++++-----------------------
29
36
block/commit.c | 29 +++++---------
30
Eduardo Habkost (1):
37
block/create.c | 19 +++------
31
oslib-posix: Print errors before aborting on qemu_alloc_stack()
38
block/mirror.c | 39 ++++++++-----------
32
39
block/stream.c | 29 ++++++--------
33
Fred Rolland (1):
40
job-qmp.c | 5 ++-
34
qemu-doc: Add UUID support in initiator name
41
job.c | 73 ++++++++++++-----------------------
35
42
tests/test-bdrv-drain.c | 14 +++----
36
Stefan Hajnoczi (4):
43
tests/test-blockjob-txn.c | 25 +++++-------
37
scripts: add argparse module for Python 2.6 compatibility
44
tests/test-blockjob.c | 17 ++++----
38
docker.py: Python 2.6 argparse compatibility
45
trace-events | 2 +-
39
tests: migration/guestperf Python 2.6 argparse compatibility
46
12 files changed, 161 insertions(+), 242 deletions(-)
40
qcow2: allocate cluster_cache/cluster_data on demand
41
42
include/qemu/throttle.h | 8 +-
43
block/qcow.c | 12 +-
44
block/qcow2-cluster.c | 17 +
45
block/qcow2.c | 20 +-
46
dump.c | 4 +-
47
hw/block/nvme.c | 4 +-
48
tests/test-throttle.c | 80 +-
49
util/oslib-posix.c | 2 +
50
util/throttle.c | 86 +-
51
COPYING.PYTHON | 270 ++++
52
qemu-doc.texi | 5 +-
53
scripts/argparse.py | 2406 ++++++++++++++++++++++++++++++++++++
54
tests/docker/docker.py | 4 +-
55
tests/migration/guestperf/shell.py | 8 +-
56
14 files changed, 2831 insertions(+), 95 deletions(-)
57
create mode 100644 COPYING.PYTHON
58
create mode 100644 scripts/argparse.py
59
47
60
--
48
--
61
2.13.5
49
2.17.1
62
50
63
51
diff view generated by jsdifflib
Deleted patch
1
From: Dan Aloni <dan@kernelim.com>
2
1
3
The number of queues that should be return by the admin command should:
4
5
1) Only mention the number of non-admin queues.
6
2) It is zero-based, meaning that '0 == one non-admin queue',
7
'1 == two non-admin queues', and so forth.
8
9
Because our `num_queues` means the number of queues _plus_ the admin
10
queue, then the right calculation for the number returned from the admin
11
command is `num_queues - 2`, combining the two requirements mentioned.
12
13
The issue was discovered by reducing num_queues from 64 to 8 and running
14
a Linux VM with an SMP parameter larger than that (e.g. 22). It tries to
15
utilize all queues, and therefore fails with an invalid queue number
16
when trying to queue I/Os on the last queue.
17
18
Signed-off-by: Dan Aloni <dan@kernelim.com>
19
CC: Alex Friedman <alex@e8storage.com>
20
CC: Keith Busch <keith.busch@intel.com>
21
CC: Stefan Hajnoczi <stefanha@redhat.com>
22
Reviewed-by: Keith Busch <keith.busch@intel.com>
23
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
24
---
25
hw/block/nvme.c | 4 ++--
26
1 file changed, 2 insertions(+), 2 deletions(-)
27
28
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/block/nvme.c
31
+++ b/hw/block/nvme.c
32
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
33
result = blk_enable_write_cache(n->conf.blk);
34
break;
35
case NVME_NUMBER_OF_QUEUES:
36
- result = cpu_to_le32((n->num_queues - 1) | ((n->num_queues - 1) << 16));
37
+ result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
38
break;
39
default:
40
return NVME_INVALID_FIELD | NVME_DNR;
41
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
42
break;
43
case NVME_NUMBER_OF_QUEUES:
44
req->cqe.result =
45
- cpu_to_le32((n->num_queues - 1) | ((n->num_queues - 1) << 16));
46
+ cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
47
break;
48
default:
49
return NVME_INVALID_FIELD | NVME_DNR;
50
--
51
2.13.5
52
53
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
The level of the burst bucket is stored in bkt.burst_level, not
4
bkt.burst_length.
5
6
Signed-off-by: Alberto Garcia <berto@igalia.com>
7
Reviewed-by: Manos Pitsidianakis <el13635@mail.ntua.gr>
8
Message-id: 49aab2711d02f285567f3b3b13a113847af33812.1503580370.git.berto@igalia.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
include/qemu/throttle.h | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/qemu/throttle.h
17
+++ b/include/qemu/throttle.h
18
@@ -XXX,XX +XXX,XX @@ typedef enum {
19
* - The bkt.avg rate does not apply until the bucket is full,
20
* allowing the user to do bursts until then. The I/O limit during
21
* bursts is bkt.max. To enforce this limit we keep an additional
22
- * bucket in bkt.burst_length that leaks at a rate of bkt.max units
23
+ * bucket in bkt.burst_level that leaks at a rate of bkt.max units
24
* per second.
25
*
26
* - Because of all of the above, the user can perform I/O at a
27
--
28
2.13.5
29
30
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
The way the throttling algorithm works is that requests start being
4
throttled once the bucket level exceeds the burst limit. When we get
5
there the bucket leaks at the level set by the user (bkt->avg), and
6
that leak rate is what prevents guest I/O from exceeding the desired
7
limit.
8
9
If we don't allow bursts (i.e. bkt->max == 0) then we can start
10
throttling requests immediately. The problem with keeping the
11
threshold at 0 is that it only allows one request at a time, and as
12
soon as there's a bit of I/O from the guest every other request will
13
be throttled and performance will suffer considerably. That can even
14
make the guest unable to reach the throttle limit if that limit is
15
high enough, and that happens regardless of the block scheduler used
16
by the guest.
17
18
Increasing that threshold gives flexibility to the guest, allowing it
19
to perform short bursts of I/O before being throttled. Increasing the
20
threshold too much does not make a difference in the long run (because
21
it's the leak rate what defines the actual throughput) but it does
22
allow the guest to perform longer initial bursts and exceed the
23
throttle limit for a short while.
24
25
A burst value of bkt->avg / 10 allows the guest to perform 100ms'
26
worth of I/O at the target rate without being throttled.
27
28
Signed-off-by: Alberto Garcia <berto@igalia.com>
29
Message-id: 31aae6645f0d1fbf3860fb2b528b757236f0c0a7.1503580370.git.berto@igalia.com
30
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
31
---
32
util/throttle.c | 11 +++--------
33
1 file changed, 3 insertions(+), 8 deletions(-)
34
35
diff --git a/util/throttle.c b/util/throttle.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/util/throttle.c
38
+++ b/util/throttle.c
39
@@ -XXX,XX +XXX,XX @@ static void throttle_fix_bucket(LeakyBucket *bkt)
40
/* zero bucket level */
41
bkt->level = bkt->burst_level = 0;
42
43
- /* The following is done to cope with the Linux CFQ block scheduler
44
- * which regroup reads and writes by block of 100ms in the guest.
45
- * When they are two process one making reads and one making writes cfq
46
- * make a pattern looking like the following:
47
- * WWWWWWWWWWWRRRRRRRRRRRRRRWWWWWWWWWWWWWwRRRRRRRRRRRRRRRRR
48
- * Having a max burst value of 100ms of the average will help smooth the
49
- * throttling
50
- */
51
+ /* If bkt->max is 0 we still want to allow short bursts of I/O
52
+ * from the guest, otherwise every other request will be throttled
53
+ * and performance will suffer considerably. */
54
min = bkt->avg / 10;
55
if (bkt->avg && !bkt->max) {
56
bkt->max = min;
57
--
58
2.13.5
59
60
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
From: Marc-André Lureau <marcandre.lureau@redhat.com>
2
2
3
Use a pointer to the bucket instead of repeating cfg->buckets[i] all
3
Spotted by ASAN:
4
the time. This makes the code more concise and will help us expand the
5
checks later and save a few line breaks.
6
4
7
Signed-off-by: Alberto Garcia <berto@igalia.com>
5
=================================================================
8
Message-id: 763ffc40a26b17d54cf93f5a999e4656049fcf0c.1503580370.git.berto@igalia.com
6
==5378==ERROR: LeakSanitizer: detected memory leaks
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
8
Direct leak of 65536 byte(s) in 1 object(s) allocated from:
9
#0 0x7f788f83bc48 in malloc (/lib64/libasan.so.5+0xeec48)
10
#1 0x7f788c9923c5 in g_malloc (/lib64/libglib-2.0.so.0+0x523c5)
11
#2 0x5622a1fe37bc in coroutine_trampoline /home/elmarco/src/qq/util/coroutine-ucontext.c:116
12
#3 0x7f788a15d75f in __correctly_grouped_prefixwc (/lib64/libc.so.6+0x4c75f)
13
14
(Broken in commit 4c8158e359d.)
15
16
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
17
Message-id: 20180809114417.28718-3-marcandre.lureau@redhat.com
18
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
19
---
11
util/throttle.c | 15 +++++++--------
20
tests/test-bdrv-drain.c | 1 +
12
1 file changed, 7 insertions(+), 8 deletions(-)
21
1 file changed, 1 insertion(+)
13
22
14
diff --git a/util/throttle.c b/util/throttle.c
23
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
15
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
16
--- a/util/throttle.c
25
--- a/tests/test-bdrv-drain.c
17
+++ b/util/throttle.c
26
+++ b/tests/test-bdrv-drain.c
18
@@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp)
27
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
19
}
28
}
20
29
21
for (i = 0; i < BUCKETS_COUNT; i++) {
30
dbdd->done = true;
22
- if (cfg->buckets[i].avg < 0 ||
31
+ g_free(buffer);
23
- cfg->buckets[i].max < 0 ||
32
}
24
- cfg->buckets[i].avg > THROTTLE_VALUE_MAX ||
33
25
- cfg->buckets[i].max > THROTTLE_VALUE_MAX) {
34
/**
26
+ LeakyBucket *bkt = &cfg->buckets[i];
27
+ if (bkt->avg < 0 || bkt->max < 0 ||
28
+ bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) {
29
error_setg(errp, "bps/iops/max values must be within [0, %lld]",
30
THROTTLE_VALUE_MAX);
31
return false;
32
}
33
34
- if (!cfg->buckets[i].burst_length) {
35
+ if (!bkt->burst_length) {
36
error_setg(errp, "the burst length cannot be 0");
37
return false;
38
}
39
40
- if (cfg->buckets[i].burst_length > 1 && !cfg->buckets[i].max) {
41
+ if (bkt->burst_length > 1 && !bkt->max) {
42
error_setg(errp, "burst length set without burst rate");
43
return false;
44
}
45
46
- if (cfg->buckets[i].max && !cfg->buckets[i].avg) {
47
+ if (bkt->max && !bkt->avg) {
48
error_setg(errp, "bps_max/iops_max require corresponding"
49
" bps/iops values");
50
return false;
51
}
52
53
- if (cfg->buckets[i].max && cfg->buckets[i].max < cfg->buckets[i].avg) {
54
+ if (bkt->max && bkt->max < bkt->avg) {
55
error_setg(errp, "bps_max/iops_max cannot be lower than bps/iops");
56
return false;
57
}
58
--
35
--
59
2.13.5
36
2.17.1
60
37
61
38
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
From: John Snow <jsnow@redhat.com>
2
2
3
LeakyBucket.burst_length is defined as an unsigned integer but the
3
Presently we codify the entry point for a job as the "start" callback,
4
code never checks for overflows and it only makes sure that the value
4
but a more apt name would be "run" to clarify the idea that when this
5
is not 0.
5
function returns we consider the job to have "finished," except for
6
6
any cleanup which occurs in separate callbacks later.
7
In practice this means that the user can set something like
7
8
throttling.iops-total-max-length=4294967300 despite being larger than
8
As part of this clarification, change the signature to include an error
9
UINT_MAX and the final value after casting to unsigned int will be 4.
9
object and a return code. The error ptr is not yet used, and the return
10
10
code while captured, will be overwritten by actions in the job_completed
11
This patch changes the data type to uint64_t. This does not increase
11
function.
12
the storage size of LeakyBucket, and allows us to assign the value
12
13
directly from qemu_opt_get_number() or BlockIOThrottle and then do the
13
Signed-off-by: John Snow <jsnow@redhat.com>
14
checks directly in throttle_is_valid().
14
Reviewed-by: Max Reitz <mreitz@redhat.com>
15
15
Message-id: 20180830015734.19765-2-jsnow@redhat.com
16
The value of burst_length does not have a specific upper limit,
16
Reviewed-by: Jeff Cody <jcody@redhat.com>
17
but since the bucket size is defined by max * burst_length we have
17
Signed-off-by: Max Reitz <mreitz@redhat.com>
18
to prevent overflows. Instead of going for UINT64_MAX or something
19
similar this patch reuses THROTTLE_VALUE_MAX, which allows I/O bursts
20
of 1 GiB/s for 10 days in a row.
21
22
Signed-off-by: Alberto Garcia <berto@igalia.com>
23
Message-id: 1b2e3049803f71cafb2e1fa1be4fb47147a0d398.1503580370.git.berto@igalia.com
24
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
25
---
18
---
26
include/qemu/throttle.h | 2 +-
19
include/qemu/job.h | 2 +-
27
util/throttle.c | 5 +++++
20
block/backup.c | 7 ++++---
28
2 files changed, 6 insertions(+), 1 deletion(-)
21
block/commit.c | 7 ++++---
29
22
block/create.c | 8 +++++---
30
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
23
block/mirror.c | 10 ++++++----
31
index XXXXXXX..XXXXXXX 100644
24
block/stream.c | 7 ++++---
32
--- a/include/qemu/throttle.h
25
job.c | 6 +++---
33
+++ b/include/qemu/throttle.h
26
tests/test-bdrv-drain.c | 7 ++++---
34
@@ -XXX,XX +XXX,XX @@ typedef struct LeakyBucket {
27
tests/test-blockjob-txn.c | 16 ++++++++--------
35
uint64_t max; /* leaky bucket max burst in units */
28
tests/test-blockjob.c | 7 ++++---
36
double level; /* bucket level in units */
29
10 files changed, 43 insertions(+), 34 deletions(-)
37
double burst_level; /* bucket level in units (for computing bursts) */
30
38
- unsigned burst_length; /* max length of the burst period, in seconds */
31
diff --git a/include/qemu/job.h b/include/qemu/job.h
39
+ uint64_t burst_length; /* max length of the burst period, in seconds */
32
index XXXXXXX..XXXXXXX 100644
40
} LeakyBucket;
33
--- a/include/qemu/job.h
41
34
+++ b/include/qemu/job.h
42
/* The following structure is used to configure a ThrottleState
35
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
43
diff --git a/util/throttle.c b/util/throttle.c
36
JobType job_type;
44
index XXXXXXX..XXXXXXX 100644
37
45
--- a/util/throttle.c
38
/** Mandatory: Entrypoint for the Coroutine. */
46
+++ b/util/throttle.c
39
- CoroutineEntry *start;
47
@@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp)
40
+ int coroutine_fn (*run)(Job *job, Error **errp);
48
return false;
41
42
/**
43
* If the callback is not NULL, it will be invoked when the job transitions
44
diff --git a/block/backup.c b/block/backup.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/block/backup.c
47
+++ b/block/backup.c
48
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
49
bdrv_dirty_iter_free(dbi);
50
}
51
52
-static void coroutine_fn backup_run(void *opaque)
53
+static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
54
{
55
- BackupBlockJob *job = opaque;
56
+ BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
57
BackupCompleteData *data;
58
BlockDriverState *bs = blk_bs(job->common.blk);
59
int64_t offset, nb_clusters;
60
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn backup_run(void *opaque)
61
data = g_malloc(sizeof(*data));
62
data->ret = ret;
63
job_defer_to_main_loop(&job->common.job, backup_complete, data);
64
+ return ret;
65
}
66
67
static const BlockJobDriver backup_job_driver = {
68
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver backup_job_driver = {
69
.free = block_job_free,
70
.user_resume = block_job_user_resume,
71
.drain = block_job_drain,
72
- .start = backup_run,
73
+ .run = backup_run,
74
.commit = backup_commit,
75
.abort = backup_abort,
76
.clean = backup_clean,
77
diff --git a/block/commit.c b/block/commit.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/block/commit.c
80
+++ b/block/commit.c
81
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
82
bdrv_unref(top);
83
}
84
85
-static void coroutine_fn commit_run(void *opaque)
86
+static int coroutine_fn commit_run(Job *job, Error **errp)
87
{
88
- CommitBlockJob *s = opaque;
89
+ CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
90
CommitCompleteData *data;
91
int64_t offset;
92
uint64_t delay_ns = 0;
93
@@ -XXX,XX +XXX,XX @@ out:
94
data = g_malloc(sizeof(*data));
95
data->ret = ret;
96
job_defer_to_main_loop(&s->common.job, commit_complete, data);
97
+ return ret;
98
}
99
100
static const BlockJobDriver commit_job_driver = {
101
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_job_driver = {
102
.free = block_job_free,
103
.user_resume = block_job_user_resume,
104
.drain = block_job_drain,
105
- .start = commit_run,
106
+ .run = commit_run,
107
},
108
};
109
110
diff --git a/block/create.c b/block/create.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/block/create.c
113
+++ b/block/create.c
114
@@ -XXX,XX +XXX,XX @@ static void blockdev_create_complete(Job *job, void *opaque)
115
job_completed(job, s->ret, s->err);
116
}
117
118
-static void coroutine_fn blockdev_create_run(void *opaque)
119
+static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
120
{
121
- BlockdevCreateJob *s = opaque;
122
+ BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
123
124
job_progress_set_remaining(&s->common, 1);
125
s->ret = s->drv->bdrv_co_create(s->opts, &s->err);
126
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn blockdev_create_run(void *opaque)
127
128
qapi_free_BlockdevCreateOptions(s->opts);
129
job_defer_to_main_loop(&s->common, blockdev_create_complete, NULL);
130
+
131
+ return s->ret;
132
}
133
134
static const JobDriver blockdev_create_job_driver = {
135
.instance_size = sizeof(BlockdevCreateJob),
136
.job_type = JOB_TYPE_CREATE,
137
- .start = blockdev_create_run,
138
+ .run = blockdev_create_run,
139
};
140
141
void qmp_blockdev_create(const char *job_id, BlockdevCreateOptions *options,
142
diff --git a/block/mirror.c b/block/mirror.c
143
index XXXXXXX..XXXXXXX 100644
144
--- a/block/mirror.c
145
+++ b/block/mirror.c
146
@@ -XXX,XX +XXX,XX @@ static int mirror_flush(MirrorBlockJob *s)
147
return ret;
148
}
149
150
-static void coroutine_fn mirror_run(void *opaque)
151
+static int coroutine_fn mirror_run(Job *job, Error **errp)
152
{
153
- MirrorBlockJob *s = opaque;
154
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
155
MirrorExitData *data;
156
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
157
BlockDriverState *target_bs = blk_bs(s->target);
158
@@ -XXX,XX +XXX,XX @@ immediate_exit:
159
if (need_drain) {
160
bdrv_drained_begin(bs);
161
}
162
+
163
job_defer_to_main_loop(&s->common.job, mirror_exit, data);
164
+ return ret;
165
}
166
167
static void mirror_complete(Job *job, Error **errp)
168
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver mirror_job_driver = {
169
.free = block_job_free,
170
.user_resume = block_job_user_resume,
171
.drain = block_job_drain,
172
- .start = mirror_run,
173
+ .run = mirror_run,
174
.pause = mirror_pause,
175
.complete = mirror_complete,
176
},
177
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_active_job_driver = {
178
.free = block_job_free,
179
.user_resume = block_job_user_resume,
180
.drain = block_job_drain,
181
- .start = mirror_run,
182
+ .run = mirror_run,
183
.pause = mirror_pause,
184
.complete = mirror_complete,
185
},
186
diff --git a/block/stream.c b/block/stream.c
187
index XXXXXXX..XXXXXXX 100644
188
--- a/block/stream.c
189
+++ b/block/stream.c
190
@@ -XXX,XX +XXX,XX @@ out:
191
g_free(data);
192
}
193
194
-static void coroutine_fn stream_run(void *opaque)
195
+static int coroutine_fn stream_run(Job *job, Error **errp)
196
{
197
- StreamBlockJob *s = opaque;
198
+ StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
199
StreamCompleteData *data;
200
BlockBackend *blk = s->common.blk;
201
BlockDriverState *bs = blk_bs(blk);
202
@@ -XXX,XX +XXX,XX @@ out:
203
data = g_malloc(sizeof(*data));
204
data->ret = ret;
205
job_defer_to_main_loop(&s->common.job, stream_complete, data);
206
+ return ret;
207
}
208
209
static const BlockJobDriver stream_job_driver = {
210
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver stream_job_driver = {
211
.instance_size = sizeof(StreamBlockJob),
212
.job_type = JOB_TYPE_STREAM,
213
.free = block_job_free,
214
- .start = stream_run,
215
+ .run = stream_run,
216
.user_resume = block_job_user_resume,
217
.drain = block_job_drain,
218
},
219
diff --git a/job.c b/job.c
220
index XXXXXXX..XXXXXXX 100644
221
--- a/job.c
222
+++ b/job.c
223
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
224
{
225
Job *job = opaque;
226
227
- assert(job && job->driver && job->driver->start);
228
+ assert(job && job->driver && job->driver->run);
229
job_pause_point(job);
230
- job->driver->start(job);
231
+ job->ret = job->driver->run(job, NULL);
232
}
233
234
235
void job_start(Job *job)
236
{
237
assert(job && !job_started(job) && job->paused &&
238
- job->driver && job->driver->start);
239
+ job->driver && job->driver->run);
240
job->co = qemu_coroutine_create(job_co_entry, job);
241
job->pause_count--;
242
job->busy = true;
243
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
244
index XXXXXXX..XXXXXXX 100644
245
--- a/tests/test-bdrv-drain.c
246
+++ b/tests/test-bdrv-drain.c
247
@@ -XXX,XX +XXX,XX @@ static void test_job_completed(Job *job, void *opaque)
248
job_completed(job, 0, NULL);
249
}
250
251
-static void coroutine_fn test_job_start(void *opaque)
252
+static int coroutine_fn test_job_run(Job *job, Error **errp)
253
{
254
- TestBlockJob *s = opaque;
255
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
256
257
job_transition_to_ready(&s->common.job);
258
while (!s->should_complete) {
259
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_job_start(void *opaque)
260
}
261
262
job_defer_to_main_loop(&s->common.job, test_job_completed, NULL);
263
+ return 0;
264
}
265
266
static void test_job_complete(Job *job, Error **errp)
267
@@ -XXX,XX +XXX,XX @@ BlockJobDriver test_job_driver = {
268
.free = block_job_free,
269
.user_resume = block_job_user_resume,
270
.drain = block_job_drain,
271
- .start = test_job_start,
272
+ .run = test_job_run,
273
.complete = test_job_complete,
274
},
275
};
276
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
277
index XXXXXXX..XXXXXXX 100644
278
--- a/tests/test-blockjob-txn.c
279
+++ b/tests/test-blockjob-txn.c
280
@@ -XXX,XX +XXX,XX @@ static void test_block_job_complete(Job *job, void *opaque)
281
bdrv_unref(bs);
282
}
283
284
-static void coroutine_fn test_block_job_run(void *opaque)
285
+static int coroutine_fn test_block_job_run(Job *job, Error **errp)
286
{
287
- TestBlockJob *s = opaque;
288
- BlockJob *job = &s->common;
289
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
290
291
while (s->iterations--) {
292
if (s->use_timer) {
293
- job_sleep_ns(&job->job, 0);
294
+ job_sleep_ns(job, 0);
295
} else {
296
- job_yield(&job->job);
297
+ job_yield(job);
49
}
298
}
50
299
51
+ if (bkt->max && bkt->burst_length > THROTTLE_VALUE_MAX / bkt->max) {
300
- if (job_is_cancelled(&job->job)) {
52
+ error_setg(errp, "burst length too high for this burst rate");
301
+ if (job_is_cancelled(job)) {
53
+ return false;
302
break;
54
+ }
303
}
55
+
304
}
56
if (bkt->max && !bkt->avg) {
305
57
error_setg(errp, "bps_max/iops_max require corresponding"
306
- job_defer_to_main_loop(&job->job, test_block_job_complete,
58
" bps/iops values");
307
+ job_defer_to_main_loop(job, test_block_job_complete,
308
(void *)(intptr_t)s->rc);
309
+ return s->rc;
310
}
311
312
typedef struct {
313
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_block_job_driver = {
314
.free = block_job_free,
315
.user_resume = block_job_user_resume,
316
.drain = block_job_drain,
317
- .start = test_block_job_run,
318
+ .run = test_block_job_run,
319
},
320
};
321
322
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
323
index XXXXXXX..XXXXXXX 100644
324
--- a/tests/test-blockjob.c
325
+++ b/tests/test-blockjob.c
326
@@ -XXX,XX +XXX,XX @@ static void cancel_job_complete(Job *job, Error **errp)
327
s->should_complete = true;
328
}
329
330
-static void coroutine_fn cancel_job_start(void *opaque)
331
+static int coroutine_fn cancel_job_run(Job *job, Error **errp)
332
{
333
- CancelJob *s = opaque;
334
+ CancelJob *s = container_of(job, CancelJob, common.job);
335
336
while (!s->should_complete) {
337
if (job_is_cancelled(&s->common.job)) {
338
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn cancel_job_start(void *opaque)
339
340
defer:
341
job_defer_to_main_loop(&s->common.job, cancel_job_completed, s);
342
+ return 0;
343
}
344
345
static const BlockJobDriver test_cancel_driver = {
346
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_cancel_driver = {
347
.free = block_job_free,
348
.user_resume = block_job_user_resume,
349
.drain = block_job_drain,
350
- .start = cancel_job_start,
351
+ .run = cancel_job_run,
352
.complete = cancel_job_complete,
353
},
354
};
59
--
355
--
60
2.13.5
356
2.17.1
61
357
62
358
diff view generated by jsdifflib
1
From: Fred Rolland <rollandf@gmail.com>
1
From: John Snow <jsnow@redhat.com>
2
2
3
Update doc with the usage of UUID for initiator name.
3
Jobs presently use both an Error object in the case of the create job,
4
4
and char strings in the case of generic errors elsewhere.
5
Related-To: https://bugzilla.redhat.com/1006468
5
6
Signed-off-by: Fred Rolland <frolland@redhat.com>
6
Unify the two paths as just j->err, and remove the extra argument from
7
Message-id: 20170823084830.30500-1-frolland@redhat.com
7
job_completed. The integer error code for job_completed is kept for now,
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
to be removed shortly in a separate patch.
9
10
Signed-off-by: John Snow <jsnow@redhat.com>
11
Message-id: 20180830015734.19765-3-jsnow@redhat.com
12
[mreitz: Dropped a superfluous g_strdup()]
13
Reviewed-by: Eric Blake <eblake@redhat.com>
14
Signed-off-by: Max Reitz <mreitz@redhat.com>
9
---
15
---
10
qemu-doc.texi | 5 +++--
16
include/qemu/job.h | 14 ++++++++------
11
1 file changed, 3 insertions(+), 2 deletions(-)
17
block/backup.c | 2 +-
12
18
block/commit.c | 2 +-
13
diff --git a/qemu-doc.texi b/qemu-doc.texi
19
block/create.c | 5 ++---
14
index XXXXXXX..XXXXXXX 100644
20
block/mirror.c | 2 +-
15
--- a/qemu-doc.texi
21
block/stream.c | 2 +-
16
+++ b/qemu-doc.texi
22
job-qmp.c | 5 +++--
17
@@ -XXX,XX +XXX,XX @@ in a configuration file provided via '-readconfig' or directly on the
23
job.c | 18 ++++++------------
18
command line.
24
tests/test-bdrv-drain.c | 2 +-
19
25
tests/test-blockjob-txn.c | 2 +-
20
If the initiator-name is not specified qemu will use a default name
26
tests/test-blockjob.c | 2 +-
21
-of 'iqn.2008-11.org.linux-kvm[:<name>'] where <name> is the name of the
27
11 files changed, 26 insertions(+), 30 deletions(-)
22
+of 'iqn.2008-11.org.linux-kvm[:<uuid>'] where <uuid> is the UUID of the
28
23
+virtual machine. If the UUID is not specified qemu will use
29
diff --git a/include/qemu/job.h b/include/qemu/job.h
24
+'iqn.2008-11.org.linux-kvm[:<name>'] where <name> is the name of the
30
index XXXXXXX..XXXXXXX 100644
25
virtual machine.
31
--- a/include/qemu/job.h
26
32
+++ b/include/qemu/job.h
33
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
34
/** Estimated progress_current value at the completion of the job */
35
int64_t progress_total;
36
37
- /** Error string for a failed job (NULL if, and only if, job->ret == 0) */
38
- char *error;
27
-
39
-
28
@example
40
/** ret code passed to job_completed. */
29
Setting a specific initiator name to use when logging in to the target
41
int ret;
30
-iscsi initiator-name=iqn.qemu.test:my-initiator
42
43
+ /**
44
+ * Error object for a failed job.
45
+ * If job->ret is nonzero and an error object was not set, it will be set
46
+ * to strerror(-job->ret) during job_completed.
47
+ */
48
+ Error *err;
49
+
50
/** The completion function that will be called when the job completes. */
51
BlockCompletionFunc *cb;
52
53
@@ -XXX,XX +XXX,XX @@ void job_transition_to_ready(Job *job);
54
/**
55
* @job: The job being completed.
56
* @ret: The status code.
57
- * @error: The error message for a failing job (only with @ret < 0). If @ret is
58
- * negative, but NULL is given for @error, strerror() is used.
59
*
60
* Marks @job as completed. If @ret is non-zero, the job transaction it is part
61
* of is aborted. If @ret is zero, the job moves into the WAITING state. If it
62
* is the last job to complete in its transaction, all jobs in the transaction
63
* move from WAITING to PENDING.
64
*/
65
-void job_completed(Job *job, int ret, Error *error);
66
+void job_completed(Job *job, int ret);
67
68
/** Asynchronously complete the specified @job. */
69
void job_complete(Job *job, Error **errp);
70
diff --git a/block/backup.c b/block/backup.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/block/backup.c
73
+++ b/block/backup.c
74
@@ -XXX,XX +XXX,XX @@ static void backup_complete(Job *job, void *opaque)
75
{
76
BackupCompleteData *data = opaque;
77
78
- job_completed(job, data->ret, NULL);
79
+ job_completed(job, data->ret);
80
g_free(data);
81
}
82
83
diff --git a/block/commit.c b/block/commit.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/block/commit.c
86
+++ b/block/commit.c
87
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
88
* bdrv_set_backing_hd() to fail. */
89
block_job_remove_all_bdrv(bjob);
90
91
- job_completed(job, ret, NULL);
92
+ job_completed(job, ret);
93
g_free(data);
94
95
/* If bdrv_drop_intermediate() didn't already do that, remove the commit
96
diff --git a/block/create.c b/block/create.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/block/create.c
99
+++ b/block/create.c
100
@@ -XXX,XX +XXX,XX @@ typedef struct BlockdevCreateJob {
101
BlockDriver *drv;
102
BlockdevCreateOptions *opts;
103
int ret;
104
- Error *err;
105
} BlockdevCreateJob;
106
107
static void blockdev_create_complete(Job *job, void *opaque)
108
{
109
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
110
111
- job_completed(job, s->ret, s->err);
112
+ job_completed(job, s->ret);
113
}
114
115
static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
116
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
117
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
118
119
job_progress_set_remaining(&s->common, 1);
120
- s->ret = s->drv->bdrv_co_create(s->opts, &s->err);
121
+ s->ret = s->drv->bdrv_co_create(s->opts, errp);
122
job_progress_update(&s->common, 1);
123
124
qapi_free_BlockdevCreateOptions(s->opts);
125
diff --git a/block/mirror.c b/block/mirror.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/block/mirror.c
128
+++ b/block/mirror.c
129
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
130
blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
131
132
bs_opaque->job = NULL;
133
- job_completed(job, data->ret, NULL);
134
+ job_completed(job, data->ret);
135
136
g_free(data);
137
bdrv_drained_end(src);
138
diff --git a/block/stream.c b/block/stream.c
139
index XXXXXXX..XXXXXXX 100644
140
--- a/block/stream.c
141
+++ b/block/stream.c
142
@@ -XXX,XX +XXX,XX @@ out:
143
}
144
145
g_free(s->backing_file_str);
146
- job_completed(job, data->ret, NULL);
147
+ job_completed(job, data->ret);
148
g_free(data);
149
}
150
151
diff --git a/job-qmp.c b/job-qmp.c
152
index XXXXXXX..XXXXXXX 100644
153
--- a/job-qmp.c
154
+++ b/job-qmp.c
155
@@ -XXX,XX +XXX,XX @@ static JobInfo *job_query_single(Job *job, Error **errp)
156
.status = job->status,
157
.current_progress = job->progress_current,
158
.total_progress = job->progress_total,
159
- .has_error = !!job->error,
160
- .error = g_strdup(job->error),
161
+ .has_error = !!job->err,
162
+ .error = job->err ? \
163
+ g_strdup(error_get_pretty(job->err)) : NULL,
164
};
165
166
return info;
167
diff --git a/job.c b/job.c
168
index XXXXXXX..XXXXXXX 100644
169
--- a/job.c
170
+++ b/job.c
171
@@ -XXX,XX +XXX,XX @@ void job_unref(Job *job)
172
173
QLIST_REMOVE(job, job_list);
174
175
- g_free(job->error);
176
+ error_free(job->err);
177
g_free(job->id);
178
g_free(job);
179
}
180
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
181
182
assert(job && job->driver && job->driver->run);
183
job_pause_point(job);
184
- job->ret = job->driver->run(job, NULL);
185
+ job->ret = job->driver->run(job, &job->err);
186
}
187
188
189
@@ -XXX,XX +XXX,XX @@ static void job_update_rc(Job *job)
190
job->ret = -ECANCELED;
191
}
192
if (job->ret) {
193
- if (!job->error) {
194
- job->error = g_strdup(strerror(-job->ret));
195
+ if (!job->err) {
196
+ error_setg(&job->err, "%s", strerror(-job->ret));
197
}
198
job_state_transition(job, JOB_STATUS_ABORTING);
199
}
200
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_success(Job *job)
201
}
202
}
203
204
-void job_completed(Job *job, int ret, Error *error)
205
+void job_completed(Job *job, int ret)
206
{
207
assert(job && job->txn && !job_is_completed(job));
208
209
job->ret = ret;
210
- if (error) {
211
- assert(job->ret < 0);
212
- job->error = g_strdup(error_get_pretty(error));
213
- error_free(error);
214
- }
215
-
216
job_update_rc(job);
217
trace_job_completed(job, ret, job->ret);
218
if (job->ret) {
219
@@ -XXX,XX +XXX,XX @@ void job_cancel(Job *job, bool force)
220
}
221
job_cancel_async(job, force);
222
if (!job_started(job)) {
223
- job_completed(job, -ECANCELED, NULL);
224
+ job_completed(job, -ECANCELED);
225
} else if (job->deferred_to_main_loop) {
226
job_completed_txn_abort(job);
227
} else {
228
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
229
index XXXXXXX..XXXXXXX 100644
230
--- a/tests/test-bdrv-drain.c
231
+++ b/tests/test-bdrv-drain.c
232
@@ -XXX,XX +XXX,XX @@ typedef struct TestBlockJob {
233
234
static void test_job_completed(Job *job, void *opaque)
235
{
236
- job_completed(job, 0, NULL);
237
+ job_completed(job, 0);
238
}
239
240
static int coroutine_fn test_job_run(Job *job, Error **errp)
241
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
242
index XXXXXXX..XXXXXXX 100644
243
--- a/tests/test-blockjob-txn.c
244
+++ b/tests/test-blockjob-txn.c
245
@@ -XXX,XX +XXX,XX @@ static void test_block_job_complete(Job *job, void *opaque)
246
rc = -ECANCELED;
247
}
248
249
- job_completed(job, rc, NULL);
250
+ job_completed(job, rc);
251
bdrv_unref(bs);
252
}
253
254
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
255
index XXXXXXX..XXXXXXX 100644
256
--- a/tests/test-blockjob.c
257
+++ b/tests/test-blockjob.c
258
@@ -XXX,XX +XXX,XX @@ static void cancel_job_completed(Job *job, void *opaque)
259
{
260
CancelJob *s = opaque;
261
s->completed = true;
262
- job_completed(job, 0, NULL);
263
+ job_completed(job, 0);
264
}
265
266
static void cancel_job_complete(Job *job, Error **errp)
31
--
267
--
32
2.13.5
268
2.17.1
33
269
34
270
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
From: John Snow <jsnow@redhat.com>
2
2
3
There's a few cases which we're passing an Error pointer to a function
3
All jobs do the same thing when they leave their running loop:
4
only to discard it immediately afterwards without checking it. In
4
- Store the return code in a structure
5
these cases we can simply remove the variable and pass NULL instead.
5
- wait to receive this structure in the main thread
6
- signal job completion via job_completed
6
7
7
Signed-off-by: Alberto Garcia <berto@igalia.com>
8
Few jobs do anything beyond exactly this. Consolidate this exit
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
logic for a net reduction in SLOC.
9
Reviewed-by: Eric Blake <eblake@redhat.com>
10
10
Message-id: 20170829120836.16091-1-berto@igalia.com
11
More seriously, when we utilize job_defer_to_main_loop_bh to call
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
a function that calls job_completed, job_finalize_single will run
13
in a context where it has recursively taken the aio_context lock,
14
which can cause hangs if it puts down a reference that causes a flush.
15
16
You can observe this in practice by looking at mirror_exit's careful
17
placement of job_completed and bdrv_unref calls.
18
19
If we centralize job exiting, we can signal job completion from outside
20
of the aio_context, which should allow for job cleanup code to run with
21
only one lock, which makes cleanup callbacks less tricky to write.
22
23
Signed-off-by: John Snow <jsnow@redhat.com>
24
Reviewed-by: Max Reitz <mreitz@redhat.com>
25
Message-id: 20180830015734.19765-4-jsnow@redhat.com
26
Reviewed-by: Jeff Cody <jcody@redhat.com>
27
Signed-off-by: Max Reitz <mreitz@redhat.com>
12
---
28
---
13
block/qcow.c | 12 +++---------
29
include/qemu/job.h | 11 +++++++++++
14
block/qcow2.c | 8 ++------
30
job.c | 18 ++++++++++++++++++
15
dump.c | 4 +---
31
2 files changed, 29 insertions(+)
16
3 files changed, 6 insertions(+), 18 deletions(-)
17
32
18
diff --git a/block/qcow.c b/block/qcow.c
33
diff --git a/include/qemu/job.h b/include/qemu/job.h
19
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
20
--- a/block/qcow.c
35
--- a/include/qemu/job.h
21
+++ b/block/qcow.c
36
+++ b/include/qemu/job.h
22
@@ -XXX,XX +XXX,XX @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
37
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
23
start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
38
*/
24
for(i = 0; i < s->cluster_sectors; i++) {
39
void (*drain)(Job *job);
25
if (i < n_start || i >= n_end) {
40
26
- Error *err = NULL;
41
+ /**
27
memset(s->cluster_data, 0x00, 512);
42
+ * If the callback is not NULL, exit will be invoked from the main thread
28
if (qcrypto_block_encrypt(s->crypto, start_sect + i,
43
+ * when the job's coroutine has finished, but before transactional
29
s->cluster_data,
44
+ * convergence; before @prepare or @abort.
30
BDRV_SECTOR_SIZE,
45
+ *
31
- &err) < 0) {
46
+ * FIXME TODO: This callback is only temporary to transition remaining jobs
32
- error_free(err);
47
+ * to prepare/commit/abort/clean callbacks and will be removed before 3.1.
33
+ NULL) < 0) {
48
+ * is released.
34
errno = EIO;
49
+ */
35
return -1;
50
+ void (*exit)(Job *job);
36
}
51
+
37
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
52
/**
38
QEMUIOVector hd_qiov;
53
* If the callback is not NULL, prepare will be invoked when all the jobs
39
uint8_t *buf;
54
* belonging to the same transaction complete; or upon this job's completion
40
void *orig_buf;
55
diff --git a/job.c b/job.c
41
- Error *err = NULL;
56
index XXXXXXX..XXXXXXX 100644
42
57
--- a/job.c
43
if (qiov->niov > 1) {
58
+++ b/job.c
44
buf = orig_buf = qemu_try_blockalign(bs, qiov->size);
59
@@ -XXX,XX +XXX,XX @@ void job_drain(Job *job)
45
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
60
}
46
if (bs->encrypted) {
47
assert(s->crypto);
48
if (qcrypto_block_decrypt(s->crypto, sector_num, buf,
49
- n * BDRV_SECTOR_SIZE, &err) < 0) {
50
+ n * BDRV_SECTOR_SIZE, NULL) < 0) {
51
goto fail;
52
}
53
}
54
@@ -XXX,XX +XXX,XX @@ done:
55
return ret;
56
57
fail:
58
- error_free(err);
59
ret = -EIO;
60
goto done;
61
}
61
}
62
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_writev(BlockDriverState *bs, int64_t sector_num,
62
63
break;
63
+static void job_exit(void *opaque)
64
}
64
+{
65
if (bs->encrypted) {
65
+ Job *job = (Job *)opaque;
66
- Error *err = NULL;
66
+ AioContext *aio_context = job->aio_context;
67
assert(s->crypto);
67
+
68
if (qcrypto_block_encrypt(s->crypto, sector_num, buf,
68
+ if (job->driver->exit) {
69
- n * BDRV_SECTOR_SIZE, &err) < 0) {
69
+ aio_context_acquire(aio_context);
70
- error_free(err);
70
+ job->driver->exit(job);
71
+ n * BDRV_SECTOR_SIZE, NULL) < 0) {
71
+ aio_context_release(aio_context);
72
ret = -EIO;
72
+ }
73
break;
73
+ job_completed(job, job->ret);
74
}
74
+}
75
diff --git a/block/qcow2.c b/block/qcow2.c
75
76
index XXXXXXX..XXXXXXX 100644
76
/**
77
--- a/block/qcow2.c
77
* All jobs must allow a pause point before entering their job proper. This
78
+++ b/block/qcow2.c
78
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
79
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
79
assert(job && job->driver && job->driver->run);
80
assert(s->crypto);
80
job_pause_point(job);
81
assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
81
job->ret = job->driver->run(job, &job->err);
82
assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
82
+ if (!job->deferred_to_main_loop) {
83
- Error *err = NULL;
83
+ job->deferred_to_main_loop = true;
84
if (qcrypto_block_decrypt(s->crypto,
84
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
85
(s->crypt_physical_offset ?
85
+ job_exit,
86
cluster_offset + offset_in_cluster :
86
+ job);
87
offset) >> BDRV_SECTOR_BITS,
87
+ }
88
cluster_data,
89
cur_bytes,
90
- &err) < 0) {
91
- error_free(err);
92
+ NULL) < 0) {
93
ret = -EIO;
94
goto fail;
95
}
96
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
97
qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes);
98
99
if (bs->encrypted) {
100
- Error *err = NULL;
101
assert(s->crypto);
102
if (!cluster_data) {
103
cluster_data = qemu_try_blockalign(bs->file->bs,
104
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
105
cluster_offset + offset_in_cluster :
106
offset) >> BDRV_SECTOR_BITS,
107
cluster_data,
108
- cur_bytes, &err) < 0) {
109
- error_free(err);
110
+ cur_bytes, NULL) < 0) {
111
ret = -EIO;
112
goto fail;
113
}
114
diff --git a/dump.c b/dump.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/dump.c
117
+++ b/dump.c
118
@@ -XXX,XX +XXX,XX @@ static void dump_process(DumpState *s, Error **errp)
119
120
static void *dump_thread(void *data)
121
{
122
- Error *err = NULL;
123
DumpState *s = (DumpState *)data;
124
- dump_process(s, &err);
125
- error_free(err);
126
+ dump_process(s, NULL);
127
return NULL;
128
}
88
}
129
89
90
130
--
91
--
131
2.13.5
92
2.17.1
132
93
133
94
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
From: John Snow <jsnow@redhat.com>
2
2
3
Signed-off-by: Alberto Garcia <berto@igalia.com>
3
Change the manual deferment to commit_complete into the implicit
4
Message-id: a57dd6274e1b6dc9c28769fec4c7ea543be5c5e3.1503580370.git.berto@igalia.com
4
callback to job_exit, renaming commit_complete to commit_exit.
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
5
6
This conversion does change the timing of when job_completed is
7
called to after the bdrv_replace_node and bdrv_unref calls, which
8
could have implications for bjob->blk which will now be put down
9
after this cleanup.
10
11
Kevin highlights that we did not take any permissions for that backend
12
at job creation time, so it is safe to reorder these operations.
13
14
Signed-off-by: John Snow <jsnow@redhat.com>
15
Reviewed-by: Max Reitz <mreitz@redhat.com>
16
Message-id: 20180830015734.19765-5-jsnow@redhat.com
17
Reviewed-by: Jeff Cody <jcody@redhat.com>
18
Signed-off-by: Max Reitz <mreitz@redhat.com>
6
---
19
---
7
tests/test-throttle.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++
20
block/commit.c | 22 +++++-----------------
8
1 file changed, 77 insertions(+)
21
1 file changed, 5 insertions(+), 17 deletions(-)
9
22
10
diff --git a/tests/test-throttle.c b/tests/test-throttle.c
23
diff --git a/block/commit.c b/block/commit.c
11
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
12
--- a/tests/test-throttle.c
25
--- a/block/commit.c
13
+++ b/tests/test-throttle.c
26
+++ b/block/commit.c
14
@@ -XXX,XX +XXX,XX @@ static void test_is_valid(void)
27
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
15
test_is_valid_for_value(1, true);
28
return 0;
16
}
29
}
17
30
18
+static void test_ranges(void)
31
-typedef struct {
19
+{
32
- int ret;
20
+ int i;
33
-} CommitCompleteData;
21
+
34
-
22
+ for (i = 0; i < BUCKETS_COUNT; i++) {
35
-static void commit_complete(Job *job, void *opaque)
23
+ LeakyBucket *b = &cfg.buckets[i];
36
+static void commit_exit(Job *job)
24
+ throttle_config_init(&cfg);
25
+
26
+ /* avg = 0 means throttling is disabled, but the config is valid */
27
+ b->avg = 0;
28
+ g_assert(throttle_is_valid(&cfg, NULL));
29
+ g_assert(!throttle_enabled(&cfg));
30
+
31
+ /* These are valid configurations (values <= THROTTLE_VALUE_MAX) */
32
+ b->avg = 1;
33
+ g_assert(throttle_is_valid(&cfg, NULL));
34
+
35
+ b->avg = THROTTLE_VALUE_MAX;
36
+ g_assert(throttle_is_valid(&cfg, NULL));
37
+
38
+ b->avg = THROTTLE_VALUE_MAX;
39
+ b->max = THROTTLE_VALUE_MAX;
40
+ g_assert(throttle_is_valid(&cfg, NULL));
41
+
42
+ /* Values over THROTTLE_VALUE_MAX are not allowed */
43
+ b->avg = THROTTLE_VALUE_MAX + 1;
44
+ g_assert(!throttle_is_valid(&cfg, NULL));
45
+
46
+ b->avg = THROTTLE_VALUE_MAX;
47
+ b->max = THROTTLE_VALUE_MAX + 1;
48
+ g_assert(!throttle_is_valid(&cfg, NULL));
49
+
50
+ /* burst_length must be between 1 and THROTTLE_VALUE_MAX */
51
+ b->avg = 1;
52
+ b->max = 1;
53
+ b->burst_length = 0;
54
+ g_assert(!throttle_is_valid(&cfg, NULL));
55
+
56
+ b->avg = 1;
57
+ b->max = 1;
58
+ b->burst_length = 1;
59
+ g_assert(throttle_is_valid(&cfg, NULL));
60
+
61
+ b->avg = 1;
62
+ b->max = 1;
63
+ b->burst_length = THROTTLE_VALUE_MAX;
64
+ g_assert(throttle_is_valid(&cfg, NULL));
65
+
66
+ b->avg = 1;
67
+ b->max = 1;
68
+ b->burst_length = THROTTLE_VALUE_MAX + 1;
69
+ g_assert(!throttle_is_valid(&cfg, NULL));
70
+
71
+ /* burst_length * max cannot exceed THROTTLE_VALUE_MAX */
72
+ b->avg = 1;
73
+ b->max = 2;
74
+ b->burst_length = THROTTLE_VALUE_MAX / 2;
75
+ g_assert(throttle_is_valid(&cfg, NULL));
76
+
77
+ b->avg = 1;
78
+ b->max = 3;
79
+ b->burst_length = THROTTLE_VALUE_MAX / 2;
80
+ g_assert(!throttle_is_valid(&cfg, NULL));
81
+
82
+ b->avg = 1;
83
+ b->max = THROTTLE_VALUE_MAX;
84
+ b->burst_length = 1;
85
+ g_assert(throttle_is_valid(&cfg, NULL));
86
+
87
+ b->avg = 1;
88
+ b->max = THROTTLE_VALUE_MAX;
89
+ b->burst_length = 2;
90
+ g_assert(!throttle_is_valid(&cfg, NULL));
91
+ }
92
+}
93
+
94
static void test_max_is_missing_limit(void)
95
{
37
{
96
int i;
38
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
97
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
39
BlockJob *bjob = &s->common;
98
g_test_add_func("/throttle/config/enabled", test_enabled);
40
- CommitCompleteData *data = opaque;
99
g_test_add_func("/throttle/config/conflicting", test_conflicting_config);
41
BlockDriverState *top = blk_bs(s->top);
100
g_test_add_func("/throttle/config/is_valid", test_is_valid);
42
BlockDriverState *base = blk_bs(s->base);
101
+ g_test_add_func("/throttle/config/ranges", test_ranges);
43
BlockDriverState *commit_top_bs = s->commit_top_bs;
102
g_test_add_func("/throttle/config/max", test_max_is_missing_limit);
44
- int ret = data->ret;
103
g_test_add_func("/throttle/config/iops_size",
45
bool remove_commit_top_bs = false;
104
test_iops_size_is_missing_limit);
46
47
/* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
48
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
49
* the normal backing chain can be restored. */
50
blk_unref(s->base);
51
52
- if (!job_is_cancelled(job) && ret == 0) {
53
+ if (!job_is_cancelled(job) && job->ret == 0) {
54
/* success */
55
- ret = bdrv_drop_intermediate(s->commit_top_bs, base,
56
- s->backing_file_str);
57
+ job->ret = bdrv_drop_intermediate(s->commit_top_bs, base,
58
+ s->backing_file_str);
59
} else {
60
/* XXX Can (or should) we somehow keep 'consistent read' blocked even
61
* after the failed/cancelled commit job is gone? If we already wrote
62
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
63
* bdrv_set_backing_hd() to fail. */
64
block_job_remove_all_bdrv(bjob);
65
66
- job_completed(job, ret);
67
- g_free(data);
68
-
69
/* If bdrv_drop_intermediate() didn't already do that, remove the commit
70
* filter driver from the backing chain. Do this as the final step so that
71
* the 'consistent read' permission can be granted. */
72
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
73
static int coroutine_fn commit_run(Job *job, Error **errp)
74
{
75
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
76
- CommitCompleteData *data;
77
int64_t offset;
78
uint64_t delay_ns = 0;
79
int ret = 0;
80
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn commit_run(Job *job, Error **errp)
81
out:
82
qemu_vfree(buf);
83
84
- data = g_malloc(sizeof(*data));
85
- data->ret = ret;
86
- job_defer_to_main_loop(&s->common.job, commit_complete, data);
87
return ret;
88
}
89
90
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_job_driver = {
91
.user_resume = block_job_user_resume,
92
.drain = block_job_drain,
93
.run = commit_run,
94
+ .exit = commit_exit,
95
},
96
};
97
105
--
98
--
106
2.13.5
99
2.17.1
107
100
108
101
diff view generated by jsdifflib
1
Most qcow2 files are uncompressed so it is wasteful to allocate (32 + 1)
1
From: John Snow <jsnow@redhat.com>
2
* cluster_size + 512 bytes upfront. Allocate s->cluster_cache and
3
s->cluster_data when the first read operation is performance on a
4
compressed cluster.
5
2
6
The buffers are freed in .bdrv_close(). .bdrv_open() no longer has any
3
Change the manual deferment to mirror_exit into the implicit
7
code paths that can allocate these buffers, so remove the free functions
4
callback to job_exit and the mirror_exit callback.
8
in the error code path.
9
5
10
This patch can result in significant memory savings when many qcow2
6
This does change the order of some bdrv_unref calls and job_completed,
11
disks are attached or backing file chains are long:
7
but thanks to the new context in which we call .exit, this is safe to
8
defer the possible flushing of any nodes to the job_finalize_single
9
cleanup stage.
12
10
13
Before 12.81% (1,023,193,088B)
11
Signed-off-by: John Snow <jsnow@redhat.com>
14
After 5.36% (393,893,888B)
12
Message-id: 20180830015734.19765-6-jsnow@redhat.com
13
Reviewed-by: Max Reitz <mreitz@redhat.com>
14
Reviewed-by: Jeff Cody <jcody@redhat.com>
15
Signed-off-by: Max Reitz <mreitz@redhat.com>
16
---
17
block/mirror.c | 29 +++++++++++------------------
18
1 file changed, 11 insertions(+), 18 deletions(-)
15
19
16
Reported-by: Alexey Kardashevskiy <aik@ozlabs.ru>
20
diff --git a/block/mirror.c b/block/mirror.c
17
Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru>
18
Reviewed-by: Eric Blake <eblake@redhat.com>
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
20
Message-id: 20170821135530.32344-1-stefanha@redhat.com
21
Cc: Kevin Wolf <kwolf@redhat.com>
22
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
23
---
24
block/qcow2-cluster.c | 17 +++++++++++++++++
25
block/qcow2.c | 12 ------------
26
2 files changed, 17 insertions(+), 12 deletions(-)
27
28
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
29
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
30
--- a/block/qcow2-cluster.c
22
--- a/block/mirror.c
31
+++ b/block/qcow2-cluster.c
23
+++ b/block/mirror.c
32
@@ -XXX,XX +XXX,XX @@ int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
24
@@ -XXX,XX +XXX,XX @@ static void mirror_wait_for_all_io(MirrorBlockJob *s)
33
nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
25
}
34
sector_offset = coffset & 511;
26
}
35
csize = nb_csectors * 512 - sector_offset;
27
28
-typedef struct {
29
- int ret;
30
-} MirrorExitData;
31
-
32
-static void mirror_exit(Job *job, void *opaque)
33
+static void mirror_exit(Job *job)
34
{
35
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
36
BlockJob *bjob = &s->common;
37
- MirrorExitData *data = opaque;
38
MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque;
39
AioContext *replace_aio_context = NULL;
40
BlockDriverState *src = s->mirror_top_bs->backing->bs;
41
BlockDriverState *target_bs = blk_bs(s->target);
42
BlockDriverState *mirror_top_bs = s->mirror_top_bs;
43
Error *local_err = NULL;
44
+ int ret = job->ret;
45
46
bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
47
48
- /* Make sure that the source BDS doesn't go away before we called
49
- * job_completed(). */
50
+ /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
51
+ * before we can call bdrv_drained_end */
52
bdrv_ref(src);
53
bdrv_ref(mirror_top_bs);
54
bdrv_ref(target_bs);
55
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
56
bdrv_set_backing_hd(target_bs, backing, &local_err);
57
if (local_err) {
58
error_report_err(local_err);
59
- data->ret = -EPERM;
60
+ ret = -EPERM;
61
}
62
}
63
}
64
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
65
aio_context_acquire(replace_aio_context);
66
}
67
68
- if (s->should_complete && data->ret == 0) {
69
+ if (s->should_complete && ret == 0) {
70
BlockDriverState *to_replace = src;
71
if (s->to_replace) {
72
to_replace = s->to_replace;
73
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
74
bdrv_drained_end(target_bs);
75
if (local_err) {
76
error_report_err(local_err);
77
- data->ret = -EPERM;
78
+ ret = -EPERM;
79
}
80
}
81
if (s->to_replace) {
82
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
83
blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
84
85
bs_opaque->job = NULL;
86
- job_completed(job, data->ret);
87
88
- g_free(data);
89
bdrv_drained_end(src);
90
bdrv_unref(mirror_top_bs);
91
bdrv_unref(src);
36
+
92
+
37
+ /* Allocate buffers on first decompress operation, most images are
93
+ job->ret = ret;
38
+ * uncompressed and the memory overhead can be avoided. The buffers
94
}
39
+ * are freed in .bdrv_close().
95
40
+ */
96
static void mirror_throttle(MirrorBlockJob *s)
41
+ if (!s->cluster_data) {
97
@@ -XXX,XX +XXX,XX @@ static int mirror_flush(MirrorBlockJob *s)
42
+ /* one more sector for decompressed data alignment */
98
static int coroutine_fn mirror_run(Job *job, Error **errp)
43
+ s->cluster_data = qemu_try_blockalign(bs->file->bs,
99
{
44
+ QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512);
100
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
45
+ if (!s->cluster_data) {
101
- MirrorExitData *data;
46
+ return -ENOMEM;
102
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
47
+ }
103
BlockDriverState *target_bs = blk_bs(s->target);
48
+ }
104
bool need_drain = true;
49
+ if (!s->cluster_cache) {
105
@@ -XXX,XX +XXX,XX @@ immediate_exit:
50
+ s->cluster_cache = g_malloc(s->cluster_size);
106
g_free(s->in_flight_bitmap);
51
+ }
107
bdrv_dirty_iter_free(s->dbi);
52
+
108
53
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
109
- data = g_malloc(sizeof(*data));
54
ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data,
110
- data->ret = ret;
55
nb_csectors);
111
-
56
diff --git a/block/qcow2.c b/block/qcow2.c
112
if (need_drain) {
57
index XXXXXXX..XXXXXXX 100644
113
bdrv_drained_begin(bs);
58
--- a/block/qcow2.c
59
+++ b/block/qcow2.c
60
@@ -XXX,XX +XXX,XX @@ static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags,
61
goto fail;
62
}
114
}
63
115
64
- s->cluster_cache = g_malloc(s->cluster_size);
116
- job_defer_to_main_loop(&s->common.job, mirror_exit, data);
65
- /* one more sector for decompressed data alignment */
66
- s->cluster_data = qemu_try_blockalign(bs->file->bs, QCOW_MAX_CRYPT_CLUSTERS
67
- * s->cluster_size + 512);
68
- if (s->cluster_data == NULL) {
69
- error_setg(errp, "Could not allocate temporary cluster buffer");
70
- ret = -ENOMEM;
71
- goto fail;
72
- }
73
-
74
s->cluster_cache_offset = -1;
75
s->flags = flags;
76
77
@@ -XXX,XX +XXX,XX @@ static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags,
78
if (s->refcount_block_cache) {
79
qcow2_cache_destroy(bs, s->refcount_block_cache);
80
}
81
- g_free(s->cluster_cache);
82
- qemu_vfree(s->cluster_data);
83
qcrypto_block_free(s->crypto);
84
qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
85
return ret;
117
return ret;
118
}
119
120
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver mirror_job_driver = {
121
.user_resume = block_job_user_resume,
122
.drain = block_job_drain,
123
.run = mirror_run,
124
+ .exit = mirror_exit,
125
.pause = mirror_pause,
126
.complete = mirror_complete,
127
},
128
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_active_job_driver = {
129
.user_resume = block_job_user_resume,
130
.drain = block_job_drain,
131
.run = mirror_run,
132
+ .exit = mirror_exit,
133
.pause = mirror_pause,
134
.complete = mirror_complete,
135
},
86
--
136
--
87
2.13.5
137
2.17.1
88
138
89
139
diff view generated by jsdifflib
1
Add the scripts/ directory to sys.path so Python 2.6 will be able to
1
From: John Snow <jsnow@redhat.com>
2
import argparse.
2
3
3
Utilize the job_exit shim by not calling job_defer_to_main_loop, and
4
Cc: Daniel P. Berrange <berrange@redhat.com>
4
where applicable, converting the deferred callback into the job_exit
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
5
callback.
6
Acked-by: John Snow <jsnow@redhat.com>
6
7
Acked-by: Fam Zheng <famz@redhat.com>
7
This converts backup, stream, create, and the unit tests all at once.
8
Message-id: 20170825155732.15665-4-stefanha@redhat.com
8
Most of these jobs do not see any changes to the order in which they
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
clean up their resources, except the test-blockjob-txn test, which
10
now puts down its bs before job_completed is called.
11
12
This is safe for the same reason the reordering in the mirror job is
13
safe, because job_completed no longer runs under two locks, making
14
the unref safe even if it causes a flush.
15
16
Signed-off-by: John Snow <jsnow@redhat.com>
17
Reviewed-by: Max Reitz <mreitz@redhat.com>
18
Message-id: 20180830015734.19765-7-jsnow@redhat.com
19
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
20
---
11
tests/migration/guestperf/shell.py | 8 +++++---
21
block/backup.c | 16 ----------------
12
1 file changed, 5 insertions(+), 3 deletions(-)
22
block/create.c | 14 +++-----------
13
23
block/stream.c | 22 +++++++---------------
14
diff --git a/tests/migration/guestperf/shell.py b/tests/migration/guestperf/shell.py
24
tests/test-bdrv-drain.c | 6 ------
15
index XXXXXXX..XXXXXXX 100644
25
tests/test-blockjob-txn.c | 11 ++---------
16
--- a/tests/migration/guestperf/shell.py
26
tests/test-blockjob.c | 10 ++++------
17
+++ b/tests/migration/guestperf/shell.py
27
6 files changed, 16 insertions(+), 63 deletions(-)
18
@@ -XXX,XX +XXX,XX @@
28
19
#
29
diff --git a/block/backup.c b/block/backup.c
20
30
index XXXXXXX..XXXXXXX 100644
21
31
--- a/block/backup.c
22
-import argparse
32
+++ b/block/backup.c
23
-import fnmatch
33
@@ -XXX,XX +XXX,XX @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
24
import os
34
}
25
import os.path
35
}
26
-import platform
36
27
import sys
37
-typedef struct {
28
+sys.path.append(os.path.join(os.path.dirname(__file__),
38
- int ret;
29
+ '..', '..', '..', 'scripts'))
39
-} BackupCompleteData;
30
+import argparse
40
-
31
+import fnmatch
41
-static void backup_complete(Job *job, void *opaque)
32
+import platform
42
-{
33
43
- BackupCompleteData *data = opaque;
34
from guestperf.hardware import Hardware
44
-
35
from guestperf.engine import Engine
45
- job_completed(job, data->ret);
46
- g_free(data);
47
-}
48
-
49
static bool coroutine_fn yield_and_check(BackupBlockJob *job)
50
{
51
uint64_t delay_ns;
52
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
53
static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
54
{
55
BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
56
- BackupCompleteData *data;
57
BlockDriverState *bs = blk_bs(job->common.blk);
58
int64_t offset, nb_clusters;
59
int ret = 0;
60
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
61
qemu_co_rwlock_unlock(&job->flush_rwlock);
62
hbitmap_free(job->copy_bitmap);
63
64
- data = g_malloc(sizeof(*data));
65
- data->ret = ret;
66
- job_defer_to_main_loop(&job->common.job, backup_complete, data);
67
return ret;
68
}
69
70
diff --git a/block/create.c b/block/create.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/block/create.c
73
+++ b/block/create.c
74
@@ -XXX,XX +XXX,XX @@ typedef struct BlockdevCreateJob {
75
Job common;
76
BlockDriver *drv;
77
BlockdevCreateOptions *opts;
78
- int ret;
79
} BlockdevCreateJob;
80
81
-static void blockdev_create_complete(Job *job, void *opaque)
82
-{
83
- BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
84
-
85
- job_completed(job, s->ret);
86
-}
87
-
88
static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
89
{
90
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
91
+ int ret;
92
93
job_progress_set_remaining(&s->common, 1);
94
- s->ret = s->drv->bdrv_co_create(s->opts, errp);
95
+ ret = s->drv->bdrv_co_create(s->opts, errp);
96
job_progress_update(&s->common, 1);
97
98
qapi_free_BlockdevCreateOptions(s->opts);
99
- job_defer_to_main_loop(&s->common, blockdev_create_complete, NULL);
100
101
- return s->ret;
102
+ return ret;
103
}
104
105
static const JobDriver blockdev_create_job_driver = {
106
diff --git a/block/stream.c b/block/stream.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/block/stream.c
109
+++ b/block/stream.c
110
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn stream_populate(BlockBackend *blk,
111
return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ);
112
}
113
114
-typedef struct {
115
- int ret;
116
-} StreamCompleteData;
117
-
118
-static void stream_complete(Job *job, void *opaque)
119
+static void stream_exit(Job *job)
120
{
121
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
122
BlockJob *bjob = &s->common;
123
- StreamCompleteData *data = opaque;
124
BlockDriverState *bs = blk_bs(bjob->blk);
125
BlockDriverState *base = s->base;
126
Error *local_err = NULL;
127
+ int ret = job->ret;
128
129
- if (!job_is_cancelled(job) && bs->backing && data->ret == 0) {
130
+ if (!job_is_cancelled(job) && bs->backing && ret == 0) {
131
const char *base_id = NULL, *base_fmt = NULL;
132
if (base) {
133
base_id = s->backing_file_str;
134
@@ -XXX,XX +XXX,XX @@ static void stream_complete(Job *job, void *opaque)
135
base_fmt = base->drv->format_name;
136
}
137
}
138
- data->ret = bdrv_change_backing_file(bs, base_id, base_fmt);
139
+ ret = bdrv_change_backing_file(bs, base_id, base_fmt);
140
bdrv_set_backing_hd(bs, base, &local_err);
141
if (local_err) {
142
error_report_err(local_err);
143
- data->ret = -EPERM;
144
+ ret = -EPERM;
145
goto out;
146
}
147
}
148
@@ -XXX,XX +XXX,XX @@ out:
149
}
150
151
g_free(s->backing_file_str);
152
- job_completed(job, data->ret);
153
- g_free(data);
154
+ job->ret = ret;
155
}
156
157
static int coroutine_fn stream_run(Job *job, Error **errp)
158
{
159
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
160
- StreamCompleteData *data;
161
BlockBackend *blk = s->common.blk;
162
BlockDriverState *bs = blk_bs(blk);
163
BlockDriverState *base = s->base;
164
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn stream_run(Job *job, Error **errp)
165
166
out:
167
/* Modify backing chain and close BDSes in main loop */
168
- data = g_malloc(sizeof(*data));
169
- data->ret = ret;
170
- job_defer_to_main_loop(&s->common.job, stream_complete, data);
171
return ret;
172
}
173
174
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver stream_job_driver = {
175
.job_type = JOB_TYPE_STREAM,
176
.free = block_job_free,
177
.run = stream_run,
178
+ .exit = stream_exit,
179
.user_resume = block_job_user_resume,
180
.drain = block_job_drain,
181
},
182
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tests/test-bdrv-drain.c
185
+++ b/tests/test-bdrv-drain.c
186
@@ -XXX,XX +XXX,XX @@ typedef struct TestBlockJob {
187
bool should_complete;
188
} TestBlockJob;
189
190
-static void test_job_completed(Job *job, void *opaque)
191
-{
192
- job_completed(job, 0);
193
-}
194
-
195
static int coroutine_fn test_job_run(Job *job, Error **errp)
196
{
197
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
198
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
199
job_pause_point(&s->common.job);
200
}
201
202
- job_defer_to_main_loop(&s->common.job, test_job_completed, NULL);
203
return 0;
204
}
205
206
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
207
index XXXXXXX..XXXXXXX 100644
208
--- a/tests/test-blockjob-txn.c
209
+++ b/tests/test-blockjob-txn.c
210
@@ -XXX,XX +XXX,XX @@ typedef struct {
211
int *result;
212
} TestBlockJob;
213
214
-static void test_block_job_complete(Job *job, void *opaque)
215
+static void test_block_job_exit(Job *job)
216
{
217
BlockJob *bjob = container_of(job, BlockJob, job);
218
BlockDriverState *bs = blk_bs(bjob->blk);
219
- int rc = (intptr_t)opaque;
220
221
- if (job_is_cancelled(job)) {
222
- rc = -ECANCELED;
223
- }
224
-
225
- job_completed(job, rc);
226
bdrv_unref(bs);
227
}
228
229
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn test_block_job_run(Job *job, Error **errp)
230
}
231
}
232
233
- job_defer_to_main_loop(job, test_block_job_complete,
234
- (void *)(intptr_t)s->rc);
235
return s->rc;
236
}
237
238
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_block_job_driver = {
239
.user_resume = block_job_user_resume,
240
.drain = block_job_drain,
241
.run = test_block_job_run,
242
+ .exit = test_block_job_exit,
243
},
244
};
245
246
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
247
index XXXXXXX..XXXXXXX 100644
248
--- a/tests/test-blockjob.c
249
+++ b/tests/test-blockjob.c
250
@@ -XXX,XX +XXX,XX @@ typedef struct CancelJob {
251
bool completed;
252
} CancelJob;
253
254
-static void cancel_job_completed(Job *job, void *opaque)
255
+static void cancel_job_exit(Job *job)
256
{
257
- CancelJob *s = opaque;
258
+ CancelJob *s = container_of(job, CancelJob, common.job);
259
s->completed = true;
260
- job_completed(job, 0);
261
}
262
263
static void cancel_job_complete(Job *job, Error **errp)
264
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn cancel_job_run(Job *job, Error **errp)
265
266
while (!s->should_complete) {
267
if (job_is_cancelled(&s->common.job)) {
268
- goto defer;
269
+ return 0;
270
}
271
272
if (!job_is_ready(&s->common.job) && s->should_converge) {
273
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn cancel_job_run(Job *job, Error **errp)
274
job_sleep_ns(&s->common.job, 100000);
275
}
276
277
- defer:
278
- job_defer_to_main_loop(&s->common.job, cancel_job_completed, s);
279
return 0;
280
}
281
282
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_cancel_driver = {
283
.user_resume = block_job_user_resume,
284
.drain = block_job_drain,
285
.run = cancel_job_run,
286
+ .exit = cancel_job_exit,
287
.complete = cancel_job_complete,
288
},
289
};
36
--
290
--
37
2.13.5
291
2.17.1
38
292
39
293
diff view generated by jsdifflib
1
From: Eduardo Habkost <ehabkost@redhat.com>
1
From: John Snow <jsnow@redhat.com>
2
2
3
If QEMU is running on a system that's out of memory and mmap()
3
Rename opaque_job to job to be consistent with other job implementations.
4
fails, QEMU aborts with no error message at all, making it hard
4
Rename 'job', the BackupBlockJob object, to 's' to also be consistent.
5
to debug the reason for the failure.
6
5
7
Add perror() calls that will print error information before
6
Suggested-by: Eric Blake <eblake@redhat.com>
8
aborting.
7
Signed-off-by: John Snow <jsnow@redhat.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Message-id: 20180830015734.19765-8-jsnow@redhat.com
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
12
block/backup.c | 62 +++++++++++++++++++++++++-------------------------
13
1 file changed, 31 insertions(+), 31 deletions(-)
9
14
10
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
15
diff --git a/block/backup.c b/block/backup.c
11
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Message-id: 20170829212053.6003-1-ehabkost@redhat.com
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
16
util/oslib-posix.c | 2 ++
17
1 file changed, 2 insertions(+)
18
19
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
20
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
21
--- a/util/oslib-posix.c
17
--- a/block/backup.c
22
+++ b/util/oslib-posix.c
18
+++ b/block/backup.c
23
@@ -XXX,XX +XXX,XX @@ void *qemu_alloc_stack(size_t *sz)
19
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
24
ptr = mmap(NULL, *sz, PROT_READ | PROT_WRITE,
20
bdrv_dirty_iter_free(dbi);
25
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
21
}
26
if (ptr == MAP_FAILED) {
22
27
+ perror("failed to allocate memory for stack");
23
-static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
28
abort();
24
+static int coroutine_fn backup_run(Job *job, Error **errp)
25
{
26
- BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
27
- BlockDriverState *bs = blk_bs(job->common.blk);
28
+ BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
29
+ BlockDriverState *bs = blk_bs(s->common.blk);
30
int64_t offset, nb_clusters;
31
int ret = 0;
32
33
- QLIST_INIT(&job->inflight_reqs);
34
- qemu_co_rwlock_init(&job->flush_rwlock);
35
+ QLIST_INIT(&s->inflight_reqs);
36
+ qemu_co_rwlock_init(&s->flush_rwlock);
37
38
- nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size);
39
- job_progress_set_remaining(&job->common.job, job->len);
40
+ nb_clusters = DIV_ROUND_UP(s->len, s->cluster_size);
41
+ job_progress_set_remaining(job, s->len);
42
43
- job->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
44
- if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
45
- backup_incremental_init_copy_bitmap(job);
46
+ s->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
47
+ if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
48
+ backup_incremental_init_copy_bitmap(s);
49
} else {
50
- hbitmap_set(job->copy_bitmap, 0, nb_clusters);
51
+ hbitmap_set(s->copy_bitmap, 0, nb_clusters);
29
}
52
}
30
53
31
@@ -XXX,XX +XXX,XX @@ void *qemu_alloc_stack(size_t *sz)
54
32
guardpage = ptr;
55
- job->before_write.notify = backup_before_write_notify;
33
#endif
56
- bdrv_add_before_write_notifier(bs, &job->before_write);
34
if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
57
+ s->before_write.notify = backup_before_write_notify;
35
+ perror("failed to set up stack guard page");
58
+ bdrv_add_before_write_notifier(bs, &s->before_write);
36
abort();
59
60
- if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
61
+ if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
62
/* All bits are set in copy_bitmap to allow any cluster to be copied.
63
* This does not actually require them to be copied. */
64
- while (!job_is_cancelled(&job->common.job)) {
65
+ while (!job_is_cancelled(job)) {
66
/* Yield until the job is cancelled. We just let our before_write
67
* notify callback service CoW requests. */
68
- job_yield(&job->common.job);
69
+ job_yield(job);
70
}
71
- } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
72
- ret = backup_run_incremental(job);
73
+ } else if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
74
+ ret = backup_run_incremental(s);
75
} else {
76
/* Both FULL and TOP SYNC_MODE's require copying.. */
77
- for (offset = 0; offset < job->len;
78
- offset += job->cluster_size) {
79
+ for (offset = 0; offset < s->len;
80
+ offset += s->cluster_size) {
81
bool error_is_read;
82
int alloced = 0;
83
84
- if (yield_and_check(job)) {
85
+ if (yield_and_check(s)) {
86
break;
87
}
88
89
- if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
90
+ if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
91
int i;
92
int64_t n;
93
94
/* Check to see if these blocks are already in the
95
* backing file. */
96
97
- for (i = 0; i < job->cluster_size;) {
98
+ for (i = 0; i < s->cluster_size;) {
99
/* bdrv_is_allocated() only returns true/false based
100
* on the first set of sectors it comes across that
101
* are are all in the same state.
102
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
103
* needed but at some point that is always the case. */
104
alloced =
105
bdrv_is_allocated(bs, offset + i,
106
- job->cluster_size - i, &n);
107
+ s->cluster_size - i, &n);
108
i += n;
109
110
if (alloced || n == 0) {
111
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
112
if (alloced < 0) {
113
ret = alloced;
114
} else {
115
- ret = backup_do_cow(job, offset, job->cluster_size,
116
+ ret = backup_do_cow(s, offset, s->cluster_size,
117
&error_is_read, false);
118
}
119
if (ret < 0) {
120
/* Depending on error action, fail now or retry cluster */
121
BlockErrorAction action =
122
- backup_error_action(job, error_is_read, -ret);
123
+ backup_error_action(s, error_is_read, -ret);
124
if (action == BLOCK_ERROR_ACTION_REPORT) {
125
break;
126
} else {
127
- offset -= job->cluster_size;
128
+ offset -= s->cluster_size;
129
continue;
130
}
131
}
132
}
37
}
133
}
38
134
135
- notifier_with_return_remove(&job->before_write);
136
+ notifier_with_return_remove(&s->before_write);
137
138
/* wait until pending backup_do_cow() calls have completed */
139
- qemu_co_rwlock_wrlock(&job->flush_rwlock);
140
- qemu_co_rwlock_unlock(&job->flush_rwlock);
141
- hbitmap_free(job->copy_bitmap);
142
+ qemu_co_rwlock_wrlock(&s->flush_rwlock);
143
+ qemu_co_rwlock_unlock(&s->flush_rwlock);
144
+ hbitmap_free(s->copy_bitmap);
145
146
return ret;
147
}
39
--
148
--
40
2.13.5
149
2.17.1
41
150
42
151
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
From: John Snow <jsnow@redhat.com>
2
2
3
Both the throttling limits set with the throttling.iops-* and
3
Jobs are now expected to return their retcode on the stack, from the
4
throttling.bps-* options and their QMP equivalents defined in the
4
.run callback, so we can remove that argument.
5
BlockIOThrottle struct are integer values.
6
5
7
Those limits are also reported in the BlockDeviceInfo struct and they
6
job_cancel does not need to set -ECANCELED because job_completed will
8
are integers there as well.
7
update the return code itself if the job was canceled.
9
8
10
Therefore there's no reason to store them internally as double and do
9
While we're here, make job_completed static to job.c and remove it from
11
the conversion everytime we're setting or querying them, so this patch
10
job.h; move the documentation of return code to the .run() callback and
12
uses uint64_t for those types. Let's also use an unsigned type because
11
to the job->ret property, accordingly.
13
we don't allow negative values anyway.
14
12
15
LeakyBucket.level and LeakyBucket.burst_level do however remain double
13
Signed-off-by: John Snow <jsnow@redhat.com>
16
because their value changes depending on the fraction of time elapsed
14
Message-id: 20180830015734.19765-9-jsnow@redhat.com
17
since the previous I/O operation.
15
Reviewed-by: Max Reitz <mreitz@redhat.com>
16
Signed-off-by: Max Reitz <mreitz@redhat.com>
17
---
18
include/qemu/job.h | 28 +++++++++++++++-------------
19
job.c | 11 ++++++-----
20
trace-events | 2 +-
21
3 files changed, 22 insertions(+), 19 deletions(-)
18
22
19
Signed-off-by: Alberto Garcia <berto@igalia.com>
23
diff --git a/include/qemu/job.h b/include/qemu/job.h
20
Message-id: f29b840422767b5be2c41c2dfdbbbf6c5f8fedf8.1503580370.git.berto@igalia.com
21
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
22
---
23
include/qemu/throttle.h | 4 ++--
24
tests/test-throttle.c | 3 ++-
25
util/throttle.c | 7 +++----
26
3 files changed, 7 insertions(+), 7 deletions(-)
27
28
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
29
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
30
--- a/include/qemu/throttle.h
25
--- a/include/qemu/job.h
31
+++ b/include/qemu/throttle.h
26
+++ b/include/qemu/job.h
32
@@ -XXX,XX +XXX,XX @@ typedef enum {
27
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
33
*/
28
/** Estimated progress_current value at the completion of the job */
34
29
int64_t progress_total;
35
typedef struct LeakyBucket {
30
36
- double avg; /* average goal in units per second */
31
- /** ret code passed to job_completed. */
37
- double max; /* leaky bucket max burst in units */
32
+ /**
38
+ uint64_t avg; /* average goal in units per second */
33
+ * Return code from @run and/or @prepare callback(s).
39
+ uint64_t max; /* leaky bucket max burst in units */
34
+ * Not final until the job has reached the CONCLUDED status.
40
double level; /* bucket level in units */
35
+ * 0 on success, -errno on failure.
41
double burst_level; /* bucket level in units (for computing bursts) */
36
+ */
42
unsigned burst_length; /* max length of the burst period, in seconds */
37
int ret;
43
diff --git a/tests/test-throttle.c b/tests/test-throttle.c
38
39
/**
40
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
41
/** Enum describing the operation */
42
JobType job_type;
43
44
- /** Mandatory: Entrypoint for the Coroutine. */
45
+ /**
46
+ * Mandatory: Entrypoint for the Coroutine.
47
+ *
48
+ * This callback will be invoked when moving from CREATED to RUNNING.
49
+ *
50
+ * If this callback returns nonzero, the job transaction it is part of is
51
+ * aborted. If it returns zero, the job moves into the WAITING state. If it
52
+ * is the last job to complete in its transaction, all jobs in the
53
+ * transaction move from WAITING to PENDING.
54
+ */
55
int coroutine_fn (*run)(Job *job, Error **errp);
56
57
/**
58
@@ -XXX,XX +XXX,XX @@ void job_early_fail(Job *job);
59
/** Moves the @job from RUNNING to READY */
60
void job_transition_to_ready(Job *job);
61
62
-/**
63
- * @job: The job being completed.
64
- * @ret: The status code.
65
- *
66
- * Marks @job as completed. If @ret is non-zero, the job transaction it is part
67
- * of is aborted. If @ret is zero, the job moves into the WAITING state. If it
68
- * is the last job to complete in its transaction, all jobs in the transaction
69
- * move from WAITING to PENDING.
70
- */
71
-void job_completed(Job *job, int ret);
72
-
73
/** Asynchronously complete the specified @job. */
74
void job_complete(Job *job, Error **errp);
75
76
diff --git a/job.c b/job.c
44
index XXXXXXX..XXXXXXX 100644
77
index XXXXXXX..XXXXXXX 100644
45
--- a/tests/test-throttle.c
78
--- a/job.c
46
+++ b/tests/test-throttle.c
79
+++ b/job.c
47
@@ -XXX,XX +XXX,XX @@ static void test_enabled(void)
80
@@ -XXX,XX +XXX,XX @@ void job_drain(Job *job)
48
for (i = 0; i < BUCKETS_COUNT; i++) {
49
throttle_config_init(&cfg);
50
set_cfg_value(false, i, 150);
51
+ g_assert(throttle_is_valid(&cfg, NULL));
52
g_assert(throttle_enabled(&cfg));
53
}
54
55
for (i = 0; i < BUCKETS_COUNT; i++) {
56
throttle_config_init(&cfg);
57
set_cfg_value(false, i, -150);
58
- g_assert(!throttle_enabled(&cfg));
59
+ g_assert(!throttle_is_valid(&cfg, NULL));
60
}
81
}
61
}
82
}
62
83
63
diff --git a/util/throttle.c b/util/throttle.c
84
+static void job_completed(Job *job);
85
+
86
static void job_exit(void *opaque)
87
{
88
Job *job = (Job *)opaque;
89
@@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque)
90
job->driver->exit(job);
91
aio_context_release(aio_context);
92
}
93
- job_completed(job, job->ret);
94
+ job_completed(job);
95
}
96
97
/**
98
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_success(Job *job)
99
}
100
}
101
102
-void job_completed(Job *job, int ret)
103
+static void job_completed(Job *job)
104
{
105
assert(job && job->txn && !job_is_completed(job));
106
107
- job->ret = ret;
108
job_update_rc(job);
109
- trace_job_completed(job, ret, job->ret);
110
+ trace_job_completed(job, job->ret);
111
if (job->ret) {
112
job_completed_txn_abort(job);
113
} else {
114
@@ -XXX,XX +XXX,XX @@ void job_cancel(Job *job, bool force)
115
}
116
job_cancel_async(job, force);
117
if (!job_started(job)) {
118
- job_completed(job, -ECANCELED);
119
+ job_completed(job);
120
} else if (job->deferred_to_main_loop) {
121
job_completed_txn_abort(job);
122
} else {
123
diff --git a/trace-events b/trace-events
64
index XXXXXXX..XXXXXXX 100644
124
index XXXXXXX..XXXXXXX 100644
65
--- a/util/throttle.c
125
--- a/trace-events
66
+++ b/util/throttle.c
126
+++ b/trace-events
67
@@ -XXX,XX +XXX,XX @@ int64_t throttle_compute_wait(LeakyBucket *bkt)
127
@@ -XXX,XX +XXX,XX @@ gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packe
68
/* If bkt->max is 0 we still want to allow short bursts of I/O
128
# job.c
69
* from the guest, otherwise every other request will be throttled
129
job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
70
* and performance will suffer considerably. */
130
job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
71
- bucket_size = bkt->avg / 10;
131
-job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d"
72
+ bucket_size = (double) bkt->avg / 10;
132
+job_completed(void *job, int ret) "job %p ret %d"
73
burst_bucket_size = 0;
133
74
} else {
134
# job-qmp.c
75
/* If we have a burst limit then we have to wait until all I/O
135
qmp_job_cancel(void *job) "job %p"
76
* at burst rate has finished before throttling to bkt->avg */
77
bucket_size = bkt->max * bkt->burst_length;
78
- burst_bucket_size = bkt->max / 10;
79
+ burst_bucket_size = (double) bkt->max / 10;
80
}
81
82
/* If the main bucket is full then we have to wait */
83
@@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp)
84
85
for (i = 0; i < BUCKETS_COUNT; i++) {
86
LeakyBucket *bkt = &cfg->buckets[i];
87
- if (bkt->avg < 0 || bkt->max < 0 ||
88
- bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) {
89
+ if (bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) {
90
error_setg(errp, "bps/iops/max values must be within [0, %lld]",
91
THROTTLE_VALUE_MAX);
92
return false;
93
--
136
--
94
2.13.5
137
2.17.1
95
138
96
139
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
From: John Snow <jsnow@redhat.com>
2
2
3
The throttling code can change internally the value of bkt->max if it
3
Now that the job infrastructure is handling the job_completed call for
4
hasn't been set by the user. The problem with this is that if we want
4
all implemented jobs, we can remove the interface that allowed jobs to
5
to retrieve the original value we have to undo this change first. This
5
schedule their own completion.
6
is ugly and unnecessary: this patch removes the throttle_fix_bucket()
7
and throttle_unfix_bucket() functions completely and moves the logic
8
to throttle_compute_wait().
9
6
10
Signed-off-by: Alberto Garcia <berto@igalia.com>
7
Signed-off-by: John Snow <jsnow@redhat.com>
11
Reviewed-by: Manos Pitsidianakis <el13635@mail.ntua.gr>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
12
Message-id: 5b0b9e1ac6eb208d709eddc7b09e7669a523bff3.1503580370.git.berto@igalia.com
9
Message-id: 20180830015734.19765-10-jsnow@redhat.com
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
14
---
11
---
15
util/throttle.c | 62 +++++++++++++++++++++------------------------------------
12
include/qemu/job.h | 17 -----------------
16
1 file changed, 23 insertions(+), 39 deletions(-)
13
job.c | 40 ++--------------------------------------
14
2 files changed, 2 insertions(+), 55 deletions(-)
17
15
18
diff --git a/util/throttle.c b/util/throttle.c
16
diff --git a/include/qemu/job.h b/include/qemu/job.h
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/util/throttle.c
18
--- a/include/qemu/job.h
21
+++ b/util/throttle.c
19
+++ b/include/qemu/job.h
22
@@ -XXX,XX +XXX,XX @@ static int64_t throttle_do_compute_wait(double limit, double extra)
20
@@ -XXX,XX +XXX,XX @@ void job_finalize(Job *job, Error **errp);
23
int64_t throttle_compute_wait(LeakyBucket *bkt)
21
*/
24
{
22
void job_dismiss(Job **job, Error **errp);
25
double extra; /* the number of extra units blocking the io */
23
26
+ double bucket_size; /* I/O before throttling to bkt->avg */
24
-typedef void JobDeferToMainLoopFn(Job *job, void *opaque);
27
+ double burst_bucket_size; /* Before throttling to bkt->max */
25
-
28
26
-/**
29
if (!bkt->avg) {
27
- * @job: The job
30
return 0;
28
- * @fn: The function to run in the main loop
31
}
29
- * @opaque: The opaque value that is passed to @fn
32
30
- *
33
- /* If the bucket is full then we have to wait */
31
- * This function must be called by the main job coroutine just before it
34
- extra = bkt->level - bkt->max * bkt->burst_length;
32
- * returns. @fn is executed in the main loop with the job AioContext acquired.
35
+ if (!bkt->max) {
33
- *
36
+ /* If bkt->max is 0 we still want to allow short bursts of I/O
34
- * Block jobs must call bdrv_unref(), bdrv_close(), and anything that uses
37
+ * from the guest, otherwise every other request will be throttled
35
- * bdrv_drain_all() in the main loop.
38
+ * and performance will suffer considerably. */
36
- *
39
+ bucket_size = bkt->avg / 10;
37
- * The @job AioContext is held while @fn executes.
40
+ burst_bucket_size = 0;
38
- */
41
+ } else {
39
-void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque);
42
+ /* If we have a burst limit then we have to wait until all I/O
40
-
43
+ * at burst rate has finished before throttling to bkt->avg */
41
/**
44
+ bucket_size = bkt->max * bkt->burst_length;
42
* Synchronously finishes the given @job. If @finish is given, it is called to
45
+ burst_bucket_size = bkt->max / 10;
43
* trigger completion or cancellation of the job.
46
+ }
44
diff --git a/job.c b/job.c
47
+
45
index XXXXXXX..XXXXXXX 100644
48
+ /* If the main bucket is full then we have to wait */
46
--- a/job.c
49
+ extra = bkt->level - bucket_size;
47
+++ b/job.c
50
if (extra > 0) {
48
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
51
return throttle_do_compute_wait(bkt->avg, extra);
49
assert(job && job->driver && job->driver->run);
52
}
50
job_pause_point(job);
53
51
job->ret = job->driver->run(job, &job->err);
54
- /* If the bucket is not full yet we have to make sure that we
52
- if (!job->deferred_to_main_loop) {
55
- * fulfill the goal of bkt->max units per second. */
53
- job->deferred_to_main_loop = true;
56
+ /* If the main bucket is not full yet we still have to check the
54
- aio_bh_schedule_oneshot(qemu_get_aio_context(),
57
+ * burst bucket in order to enforce the burst limit */
55
- job_exit,
58
if (bkt->burst_length > 1) {
56
- job);
59
- /* We use 1/10 of the max value to smooth the throttling.
57
- }
60
- * See throttle_fix_bucket() for more details. */
58
+ job->deferred_to_main_loop = true;
61
- extra = bkt->burst_level - bkt->max / 10;
59
+ aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
62
+ extra = bkt->burst_level - burst_bucket_size;
63
if (extra > 0) {
64
return throttle_do_compute_wait(bkt->max, extra);
65
}
66
@@ -XXX,XX +XXX,XX @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp)
67
return true;
68
}
60
}
69
61
70
-/* fix bucket parameters */
62
71
-static void throttle_fix_bucket(LeakyBucket *bkt)
63
@@ -XXX,XX +XXX,XX @@ void job_complete(Job *job, Error **errp)
64
job->driver->complete(job, errp);
65
}
66
67
-
68
-typedef struct {
69
- Job *job;
70
- JobDeferToMainLoopFn *fn;
71
- void *opaque;
72
-} JobDeferToMainLoopData;
73
-
74
-static void job_defer_to_main_loop_bh(void *opaque)
72
-{
75
-{
73
- double min;
76
- JobDeferToMainLoopData *data = opaque;
77
- Job *job = data->job;
78
- AioContext *aio_context = job->aio_context;
74
-
79
-
75
- /* zero bucket level */
80
- aio_context_acquire(aio_context);
76
- bkt->level = bkt->burst_level = 0;
81
- data->fn(data->job, data->opaque);
82
- aio_context_release(aio_context);
77
-
83
-
78
- /* If bkt->max is 0 we still want to allow short bursts of I/O
84
- g_free(data);
79
- * from the guest, otherwise every other request will be throttled
80
- * and performance will suffer considerably. */
81
- min = bkt->avg / 10;
82
- if (bkt->avg && !bkt->max) {
83
- bkt->max = min;
84
- }
85
-}
85
-}
86
-
86
-
87
-/* undo internal bucket parameter changes (see throttle_fix_bucket()) */
87
-void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque)
88
-static void throttle_unfix_bucket(LeakyBucket *bkt)
89
-{
88
-{
90
- if (bkt->max < bkt->avg) {
89
- JobDeferToMainLoopData *data = g_malloc(sizeof(*data));
91
- bkt->max = 0;
90
- data->job = job;
92
- }
91
- data->fn = fn;
92
- data->opaque = opaque;
93
- job->deferred_to_main_loop = true;
94
-
95
- aio_bh_schedule_oneshot(qemu_get_aio_context(),
96
- job_defer_to_main_loop_bh, data);
93
-}
97
-}
94
-
98
-
95
/* Used to configure the throttle
99
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
96
*
97
* @ts: the throttle state we are working on
98
@@ -XXX,XX +XXX,XX @@ void throttle_config(ThrottleState *ts,
99
100
ts->cfg = *cfg;
101
102
+ /* Zero bucket level */
103
for (i = 0; i < BUCKETS_COUNT; i++) {
104
- throttle_fix_bucket(&ts->cfg.buckets[i]);
105
+ ts->cfg.buckets[i].level = 0;
106
+ ts->cfg.buckets[i].burst_level = 0;
107
}
108
109
ts->previous_leak = qemu_clock_get_ns(clock_type);
110
@@ -XXX,XX +XXX,XX @@ void throttle_config(ThrottleState *ts,
111
*/
112
void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg)
113
{
100
{
114
- int i;
101
Error *local_err = NULL;
115
-
116
*cfg = ts->cfg;
117
-
118
- for (i = 0; i < BUCKETS_COUNT; i++) {
119
- throttle_unfix_bucket(&cfg->buckets[i]);
120
- }
121
}
122
123
124
--
102
--
125
2.13.5
103
2.17.1
126
104
127
105
diff view generated by jsdifflib
Deleted patch
1
The minimum Python version supported by QEMU is 2.6. The argparse
2
standard library module was only added in Python 2.7. Many scripts
3
would like to use argparse because it supports command-line
4
sub-commands.
5
1
6
This patch adds argparse. See the top of argparse.py for details.
7
8
Suggested-by: Daniel P. Berrange <berrange@redhat.com>
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Acked-by: John Snow <jsnow@redhat.com>
11
Message-id: 20170825155732.15665-2-stefanha@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
COPYING.PYTHON | 270 ++++++
15
scripts/argparse.py | 2406 +++++++++++++++++++++++++++++++++++++++++++++++++++
16
2 files changed, 2676 insertions(+)
17
create mode 100644 COPYING.PYTHON
18
create mode 100644 scripts/argparse.py
19
20
diff --git a/COPYING.PYTHON b/COPYING.PYTHON
21
new file mode 100644
22
index XXXXXXX..XXXXXXX
23
--- /dev/null
24
+++ b/COPYING.PYTHON
25
@@ -XXX,XX +XXX,XX @@
26
+A. HISTORY OF THE SOFTWARE
27
+==========================
28
+
29
+Python was created in the early 1990s by Guido van Rossum at Stichting
30
+Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
31
+as a successor of a language called ABC. Guido remains Python's
32
+principal author, although it includes many contributions from others.
33
+
34
+In 1995, Guido continued his work on Python at the Corporation for
35
+National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
36
+in Reston, Virginia where he released several versions of the
37
+software.
38
+
39
+In May 2000, Guido and the Python core development team moved to
40
+BeOpen.com to form the BeOpen PythonLabs team. In October of the same
41
+year, the PythonLabs team moved to Digital Creations (now Zope
42
+Corporation, see http://www.zope.com). In 2001, the Python Software
43
+Foundation (PSF, see http://www.python.org/psf/) was formed, a
44
+non-profit organization created specifically to own Python-related
45
+Intellectual Property. Zope Corporation is a sponsoring member of
46
+the PSF.
47
+
48
+All Python releases are Open Source (see http://www.opensource.org for
49
+the Open Source Definition). Historically, most, but not all, Python
50
+releases have also been GPL-compatible; the table below summarizes
51
+the various releases.
52
+
53
+ Release Derived Year Owner GPL-
54
+ from compatible? (1)
55
+
56
+ 0.9.0 thru 1.2 1991-1995 CWI yes
57
+ 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
58
+ 1.6 1.5.2 2000 CNRI no
59
+ 2.0 1.6 2000 BeOpen.com no
60
+ 1.6.1 1.6 2001 CNRI yes (2)
61
+ 2.1 2.0+1.6.1 2001 PSF no
62
+ 2.0.1 2.0+1.6.1 2001 PSF yes
63
+ 2.1.1 2.1+2.0.1 2001 PSF yes
64
+ 2.2 2.1.1 2001 PSF yes
65
+ 2.1.2 2.1.1 2002 PSF yes
66
+ 2.1.3 2.1.2 2002 PSF yes
67
+ 2.2.1 2.2 2002 PSF yes
68
+ 2.2.2 2.2.1 2002 PSF yes
69
+ 2.2.3 2.2.2 2003 PSF yes
70
+ 2.3 2.2.2 2002-2003 PSF yes
71
+ 2.3.1 2.3 2002-2003 PSF yes
72
+ 2.3.2 2.3.1 2002-2003 PSF yes
73
+ 2.3.3 2.3.2 2002-2003 PSF yes
74
+ 2.3.4 2.3.3 2004 PSF yes
75
+ 2.3.5 2.3.4 2005 PSF yes
76
+ 2.4 2.3 2004 PSF yes
77
+ 2.4.1 2.4 2005 PSF yes
78
+ 2.4.2 2.4.1 2005 PSF yes
79
+ 2.4.3 2.4.2 2006 PSF yes
80
+ 2.5 2.4 2006 PSF yes
81
+ 2.7 2.6 2010 PSF yes
82
+
83
+Footnotes:
84
+
85
+(1) GPL-compatible doesn't mean that we're distributing Python under
86
+ the GPL. All Python licenses, unlike the GPL, let you distribute
87
+ a modified version without making your changes open source. The
88
+ GPL-compatible licenses make it possible to combine Python with
89
+ other software that is released under the GPL; the others don't.
90
+
91
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
92
+ because its license has a choice of law clause. According to
93
+ CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
94
+ is "not incompatible" with the GPL.
95
+
96
+Thanks to the many outside volunteers who have worked under Guido's
97
+direction to make these releases possible.
98
+
99
+
100
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
101
+===============================================================
102
+
103
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
104
+--------------------------------------------
105
+
106
+1. This LICENSE AGREEMENT is between the Python Software Foundation
107
+("PSF"), and the Individual or Organization ("Licensee") accessing and
108
+otherwise using this software ("Python") in source or binary form and
109
+its associated documentation.
110
+
111
+2. Subject to the terms and conditions of this License Agreement, PSF
112
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
113
+license to reproduce, analyze, test, perform and/or display publicly,
114
+prepare derivative works, distribute, and otherwise use Python
115
+alone or in any derivative version, provided, however, that PSF's
116
+License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
117
+2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights
118
+Reserved" are retained in Python alone or in any derivative version
119
+prepared by Licensee.
120
+
121
+3. In the event Licensee prepares a derivative work that is based on
122
+or incorporates Python or any part thereof, and wants to make
123
+the derivative work available to others as provided herein, then
124
+Licensee hereby agrees to include in any such work a brief summary of
125
+the changes made to Python.
126
+
127
+4. PSF is making Python available to Licensee on an "AS IS"
128
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
129
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
130
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
131
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
132
+INFRINGE ANY THIRD PARTY RIGHTS.
133
+
134
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
135
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
136
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
137
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
138
+
139
+6. This License Agreement will automatically terminate upon a material
140
+breach of its terms and conditions.
141
+
142
+7. Nothing in this License Agreement shall be deemed to create any
143
+relationship of agency, partnership, or joint venture between PSF and
144
+Licensee. This License Agreement does not grant permission to use PSF
145
+trademarks or trade name in a trademark sense to endorse or promote
146
+products or services of Licensee, or any third party.
147
+
148
+8. By copying, installing or otherwise using Python, Licensee
149
+agrees to be bound by the terms and conditions of this License
150
+Agreement.
151
+
152
+
153
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
154
+-------------------------------------------
155
+
156
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
157
+
158
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
159
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
160
+Individual or Organization ("Licensee") accessing and otherwise using
161
+this software in source or binary form and its associated
162
+documentation ("the Software").
163
+
164
+2. Subject to the terms and conditions of this BeOpen Python License
165
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
166
+royalty-free, world-wide license to reproduce, analyze, test, perform
167
+and/or display publicly, prepare derivative works, distribute, and
168
+otherwise use the Software alone or in any derivative version,
169
+provided, however, that the BeOpen Python License is retained in the
170
+Software, alone or in any derivative version prepared by Licensee.
171
+
172
+3. BeOpen is making the Software available to Licensee on an "AS IS"
173
+basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
174
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
175
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
176
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
177
+INFRINGE ANY THIRD PARTY RIGHTS.
178
+
179
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
180
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
181
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
182
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
183
+
184
+5. This License Agreement will automatically terminate upon a material
185
+breach of its terms and conditions.
186
+
187
+6. This License Agreement shall be governed by and interpreted in all
188
+respects by the law of the State of California, excluding conflict of
189
+law provisions. Nothing in this License Agreement shall be deemed to
190
+create any relationship of agency, partnership, or joint venture
191
+between BeOpen and Licensee. This License Agreement does not grant
192
+permission to use BeOpen trademarks or trade names in a trademark
193
+sense to endorse or promote products or services of Licensee, or any
194
+third party. As an exception, the "BeOpen Python" logos available at
195
+http://www.pythonlabs.com/logos.html may be used according to the
196
+permissions granted on that web page.
197
+
198
+7. By copying, installing or otherwise using the software, Licensee
199
+agrees to be bound by the terms and conditions of this License
200
+Agreement.
201
+
202
+
203
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
204
+---------------------------------------
205
+
206
+1. This LICENSE AGREEMENT is between the Corporation for National
207
+Research Initiatives, having an office at 1895 Preston White Drive,
208
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
209
+("Licensee") accessing and otherwise using Python 1.6.1 software in
210
+source or binary form and its associated documentation.
211
+
212
+2. Subject to the terms and conditions of this License Agreement, CNRI
213
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
214
+license to reproduce, analyze, test, perform and/or display publicly,
215
+prepare derivative works, distribute, and otherwise use Python 1.6.1
216
+alone or in any derivative version, provided, however, that CNRI's
217
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
218
+1995-2001 Corporation for National Research Initiatives; All Rights
219
+Reserved" are retained in Python 1.6.1 alone or in any derivative
220
+version prepared by Licensee. Alternately, in lieu of CNRI's License
221
+Agreement, Licensee may substitute the following text (omitting the
222
+quotes): "Python 1.6.1 is made available subject to the terms and
223
+conditions in CNRI's License Agreement. This Agreement together with
224
+Python 1.6.1 may be located on the Internet using the following
225
+unique, persistent identifier (known as a handle): 1895.22/1013. This
226
+Agreement may also be obtained from a proxy server on the Internet
227
+using the following URL: http://hdl.handle.net/1895.22/1013".
228
+
229
+3. In the event Licensee prepares a derivative work that is based on
230
+or incorporates Python 1.6.1 or any part thereof, and wants to make
231
+the derivative work available to others as provided herein, then
232
+Licensee hereby agrees to include in any such work a brief summary of
233
+the changes made to Python 1.6.1.
234
+
235
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
236
+basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
237
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
238
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
239
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
240
+INFRINGE ANY THIRD PARTY RIGHTS.
241
+
242
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
243
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
244
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
245
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
246
+
247
+6. This License Agreement will automatically terminate upon a material
248
+breach of its terms and conditions.
249
+
250
+7. This License Agreement shall be governed by the federal
251
+intellectual property law of the United States, including without
252
+limitation the federal copyright law, and, to the extent such
253
+U.S. federal law does not apply, by the law of the Commonwealth of
254
+Virginia, excluding Virginia's conflict of law provisions.
255
+Notwithstanding the foregoing, with regard to derivative works based
256
+on Python 1.6.1 that incorporate non-separable material that was
257
+previously distributed under the GNU General Public License (GPL), the
258
+law of the Commonwealth of Virginia shall govern this License
259
+Agreement only as to issues arising under or with respect to
260
+Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
261
+License Agreement shall be deemed to create any relationship of
262
+agency, partnership, or joint venture between CNRI and Licensee. This
263
+License Agreement does not grant permission to use CNRI trademarks or
264
+trade name in a trademark sense to endorse or promote products or
265
+services of Licensee, or any third party.
266
+
267
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
268
+installing or otherwise using Python 1.6.1, Licensee agrees to be
269
+bound by the terms and conditions of this License Agreement.
270
+
271
+ ACCEPT
272
+
273
+
274
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
275
+--------------------------------------------------
276
+
277
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
278
+The Netherlands. All rights reserved.
279
+
280
+Permission to use, copy, modify, and distribute this software and its
281
+documentation for any purpose and without fee is hereby granted,
282
+provided that the above copyright notice appear in all copies and that
283
+both that copyright notice and this permission notice appear in
284
+supporting documentation, and that the name of Stichting Mathematisch
285
+Centrum or CWI not be used in advertising or publicity pertaining to
286
+distribution of the software without specific, written prior
287
+permission.
288
+
289
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
290
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
291
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
292
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
293
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
294
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
295
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
296
diff --git a/scripts/argparse.py b/scripts/argparse.py
297
new file mode 100644
298
index XXXXXXX..XXXXXXX
299
--- /dev/null
300
+++ b/scripts/argparse.py
301
@@ -XXX,XX +XXX,XX @@
302
+# This is a local copy of the standard library argparse module taken from PyPI.
303
+# It is licensed under the Python Software Foundation License. This is a
304
+# fallback for Python 2.6 which does not include this module. Python 2.7+ and
305
+# 3+ will never load this module because built-in modules are loaded before
306
+# anything in sys.path.
307
+#
308
+# If your script is not located in the same directory as this file, import it
309
+# like this:
310
+#
311
+# import os
312
+# import sys
313
+# sys.path.append(os.path.join(os.path.dirname(__file__), ..., 'scripts'))
314
+# import argparse
315
+
316
+# Author: Steven J. Bethard <steven.bethard@gmail.com>.
317
+# Maintainer: Thomas Waldmann <tw@waldmann-edv.de>
318
+
319
+"""Command-line parsing library
320
+
321
+This module is an optparse-inspired command-line parsing library that:
322
+
323
+ - handles both optional and positional arguments
324
+ - produces highly informative usage messages
325
+ - supports parsers that dispatch to sub-parsers
326
+
327
+The following is a simple usage example that sums integers from the
328
+command-line and writes the result to a file::
329
+
330
+ parser = argparse.ArgumentParser(
331
+ description='sum the integers at the command line')
332
+ parser.add_argument(
333
+ 'integers', metavar='int', nargs='+', type=int,
334
+ help='an integer to be summed')
335
+ parser.add_argument(
336
+ '--log', default=sys.stdout, type=argparse.FileType('w'),
337
+ help='the file where the sum should be written')
338
+ args = parser.parse_args()
339
+ args.log.write('%s' % sum(args.integers))
340
+ args.log.close()
341
+
342
+The module contains the following public classes:
343
+
344
+ - ArgumentParser -- The main entry point for command-line parsing. As the
345
+ example above shows, the add_argument() method is used to populate
346
+ the parser with actions for optional and positional arguments. Then
347
+ the parse_args() method is invoked to convert the args at the
348
+ command-line into an object with attributes.
349
+
350
+ - ArgumentError -- The exception raised by ArgumentParser objects when
351
+ there are errors with the parser's actions. Errors raised while
352
+ parsing the command-line are caught by ArgumentParser and emitted
353
+ as command-line messages.
354
+
355
+ - FileType -- A factory for defining types of files to be created. As the
356
+ example above shows, instances of FileType are typically passed as
357
+ the type= argument of add_argument() calls.
358
+
359
+ - Action -- The base class for parser actions. Typically actions are
360
+ selected by passing strings like 'store_true' or 'append_const' to
361
+ the action= argument of add_argument(). However, for greater
362
+ customization of ArgumentParser actions, subclasses of Action may
363
+ be defined and passed as the action= argument.
364
+
365
+ - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
366
+ ArgumentDefaultsHelpFormatter -- Formatter classes which
367
+ may be passed as the formatter_class= argument to the
368
+ ArgumentParser constructor. HelpFormatter is the default,
369
+ RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
370
+ not to change the formatting for help text, and
371
+ ArgumentDefaultsHelpFormatter adds information about argument defaults
372
+ to the help.
373
+
374
+All other classes in this module are considered implementation details.
375
+(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
376
+considered public as object names -- the API of the formatter objects is
377
+still considered an implementation detail.)
378
+"""
379
+
380
+__version__ = '1.4.0' # we use our own version number independant of the
381
+ # one in stdlib and we release this on pypi.
382
+
383
+__external_lib__ = True # to make sure the tests really test THIS lib,
384
+ # not the builtin one in Python stdlib
385
+
386
+__all__ = [
387
+ 'ArgumentParser',
388
+ 'ArgumentError',
389
+ 'ArgumentTypeError',
390
+ 'FileType',
391
+ 'HelpFormatter',
392
+ 'ArgumentDefaultsHelpFormatter',
393
+ 'RawDescriptionHelpFormatter',
394
+ 'RawTextHelpFormatter',
395
+ 'Namespace',
396
+ 'Action',
397
+ 'ONE_OR_MORE',
398
+ 'OPTIONAL',
399
+ 'PARSER',
400
+ 'REMAINDER',
401
+ 'SUPPRESS',
402
+ 'ZERO_OR_MORE',
403
+]
404
+
405
+
406
+import copy as _copy
407
+import os as _os
408
+import re as _re
409
+import sys as _sys
410
+import textwrap as _textwrap
411
+
412
+from gettext import gettext as _
413
+
414
+try:
415
+ set
416
+except NameError:
417
+ # for python < 2.4 compatibility (sets module is there since 2.3):
418
+ from sets import Set as set
419
+
420
+try:
421
+ basestring
422
+except NameError:
423
+ basestring = str
424
+
425
+try:
426
+ sorted
427
+except NameError:
428
+ # for python < 2.4 compatibility:
429
+ def sorted(iterable, reverse=False):
430
+ result = list(iterable)
431
+ result.sort()
432
+ if reverse:
433
+ result.reverse()
434
+ return result
435
+
436
+
437
+def _callable(obj):
438
+ return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
439
+
440
+
441
+SUPPRESS = '==SUPPRESS=='
442
+
443
+OPTIONAL = '?'
444
+ZERO_OR_MORE = '*'
445
+ONE_OR_MORE = '+'
446
+PARSER = 'A...'
447
+REMAINDER = '...'
448
+_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
449
+
450
+# =============================
451
+# Utility functions and classes
452
+# =============================
453
+
454
+class _AttributeHolder(object):
455
+ """Abstract base class that provides __repr__.
456
+
457
+ The __repr__ method returns a string in the format::
458
+ ClassName(attr=name, attr=name, ...)
459
+ The attributes are determined either by a class-level attribute,
460
+ '_kwarg_names', or by inspecting the instance __dict__.
461
+ """
462
+
463
+ def __repr__(self):
464
+ type_name = type(self).__name__
465
+ arg_strings = []
466
+ for arg in self._get_args():
467
+ arg_strings.append(repr(arg))
468
+ for name, value in self._get_kwargs():
469
+ arg_strings.append('%s=%r' % (name, value))
470
+ return '%s(%s)' % (type_name, ', '.join(arg_strings))
471
+
472
+ def _get_kwargs(self):
473
+ return sorted(self.__dict__.items())
474
+
475
+ def _get_args(self):
476
+ return []
477
+
478
+
479
+def _ensure_value(namespace, name, value):
480
+ if getattr(namespace, name, None) is None:
481
+ setattr(namespace, name, value)
482
+ return getattr(namespace, name)
483
+
484
+
485
+# ===============
486
+# Formatting Help
487
+# ===============
488
+
489
+class HelpFormatter(object):
490
+ """Formatter for generating usage messages and argument help strings.
491
+
492
+ Only the name of this class is considered a public API. All the methods
493
+ provided by the class are considered an implementation detail.
494
+ """
495
+
496
+ def __init__(self,
497
+ prog,
498
+ indent_increment=2,
499
+ max_help_position=24,
500
+ width=None):
501
+
502
+ # default setting for width
503
+ if width is None:
504
+ try:
505
+ width = int(_os.environ['COLUMNS'])
506
+ except (KeyError, ValueError):
507
+ width = 80
508
+ width -= 2
509
+
510
+ self._prog = prog
511
+ self._indent_increment = indent_increment
512
+ self._max_help_position = max_help_position
513
+ self._width = width
514
+
515
+ self._current_indent = 0
516
+ self._level = 0
517
+ self._action_max_length = 0
518
+
519
+ self._root_section = self._Section(self, None)
520
+ self._current_section = self._root_section
521
+
522
+ self._whitespace_matcher = _re.compile(r'\s+')
523
+ self._long_break_matcher = _re.compile(r'\n\n\n+')
524
+
525
+ # ===============================
526
+ # Section and indentation methods
527
+ # ===============================
528
+ def _indent(self):
529
+ self._current_indent += self._indent_increment
530
+ self._level += 1
531
+
532
+ def _dedent(self):
533
+ self._current_indent -= self._indent_increment
534
+ assert self._current_indent >= 0, 'Indent decreased below 0.'
535
+ self._level -= 1
536
+
537
+ class _Section(object):
538
+
539
+ def __init__(self, formatter, parent, heading=None):
540
+ self.formatter = formatter
541
+ self.parent = parent
542
+ self.heading = heading
543
+ self.items = []
544
+
545
+ def format_help(self):
546
+ # format the indented section
547
+ if self.parent is not None:
548
+ self.formatter._indent()
549
+ join = self.formatter._join_parts
550
+ for func, args in self.items:
551
+ func(*args)
552
+ item_help = join([func(*args) for func, args in self.items])
553
+ if self.parent is not None:
554
+ self.formatter._dedent()
555
+
556
+ # return nothing if the section was empty
557
+ if not item_help:
558
+ return ''
559
+
560
+ # add the heading if the section was non-empty
561
+ if self.heading is not SUPPRESS and self.heading is not None:
562
+ current_indent = self.formatter._current_indent
563
+ heading = '%*s%s:\n' % (current_indent, '', self.heading)
564
+ else:
565
+ heading = ''
566
+
567
+ # join the section-initial newline, the heading and the help
568
+ return join(['\n', heading, item_help, '\n'])
569
+
570
+ def _add_item(self, func, args):
571
+ self._current_section.items.append((func, args))
572
+
573
+ # ========================
574
+ # Message building methods
575
+ # ========================
576
+ def start_section(self, heading):
577
+ self._indent()
578
+ section = self._Section(self, self._current_section, heading)
579
+ self._add_item(section.format_help, [])
580
+ self._current_section = section
581
+
582
+ def end_section(self):
583
+ self._current_section = self._current_section.parent
584
+ self._dedent()
585
+
586
+ def add_text(self, text):
587
+ if text is not SUPPRESS and text is not None:
588
+ self._add_item(self._format_text, [text])
589
+
590
+ def add_usage(self, usage, actions, groups, prefix=None):
591
+ if usage is not SUPPRESS:
592
+ args = usage, actions, groups, prefix
593
+ self._add_item(self._format_usage, args)
594
+
595
+ def add_argument(self, action):
596
+ if action.help is not SUPPRESS:
597
+
598
+ # find all invocations
599
+ get_invocation = self._format_action_invocation
600
+ invocations = [get_invocation(action)]
601
+ for subaction in self._iter_indented_subactions(action):
602
+ invocations.append(get_invocation(subaction))
603
+
604
+ # update the maximum item length
605
+ invocation_length = max([len(s) for s in invocations])
606
+ action_length = invocation_length + self._current_indent
607
+ self._action_max_length = max(self._action_max_length,
608
+ action_length)
609
+
610
+ # add the item to the list
611
+ self._add_item(self._format_action, [action])
612
+
613
+ def add_arguments(self, actions):
614
+ for action in actions:
615
+ self.add_argument(action)
616
+
617
+ # =======================
618
+ # Help-formatting methods
619
+ # =======================
620
+ def format_help(self):
621
+ help = self._root_section.format_help()
622
+ if help:
623
+ help = self._long_break_matcher.sub('\n\n', help)
624
+ help = help.strip('\n') + '\n'
625
+ return help
626
+
627
+ def _join_parts(self, part_strings):
628
+ return ''.join([part
629
+ for part in part_strings
630
+ if part and part is not SUPPRESS])
631
+
632
+ def _format_usage(self, usage, actions, groups, prefix):
633
+ if prefix is None:
634
+ prefix = _('usage: ')
635
+
636
+ # if usage is specified, use that
637
+ if usage is not None:
638
+ usage = usage % dict(prog=self._prog)
639
+
640
+ # if no optionals or positionals are available, usage is just prog
641
+ elif usage is None and not actions:
642
+ usage = '%(prog)s' % dict(prog=self._prog)
643
+
644
+ # if optionals and positionals are available, calculate usage
645
+ elif usage is None:
646
+ prog = '%(prog)s' % dict(prog=self._prog)
647
+
648
+ # split optionals from positionals
649
+ optionals = []
650
+ positionals = []
651
+ for action in actions:
652
+ if action.option_strings:
653
+ optionals.append(action)
654
+ else:
655
+ positionals.append(action)
656
+
657
+ # build full usage string
658
+ format = self._format_actions_usage
659
+ action_usage = format(optionals + positionals, groups)
660
+ usage = ' '.join([s for s in [prog, action_usage] if s])
661
+
662
+ # wrap the usage parts if it's too long
663
+ text_width = self._width - self._current_indent
664
+ if len(prefix) + len(usage) > text_width:
665
+
666
+ # break usage into wrappable parts
667
+ part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
668
+ opt_usage = format(optionals, groups)
669
+ pos_usage = format(positionals, groups)
670
+ opt_parts = _re.findall(part_regexp, opt_usage)
671
+ pos_parts = _re.findall(part_regexp, pos_usage)
672
+ assert ' '.join(opt_parts) == opt_usage
673
+ assert ' '.join(pos_parts) == pos_usage
674
+
675
+ # helper for wrapping lines
676
+ def get_lines(parts, indent, prefix=None):
677
+ lines = []
678
+ line = []
679
+ if prefix is not None:
680
+ line_len = len(prefix) - 1
681
+ else:
682
+ line_len = len(indent) - 1
683
+ for part in parts:
684
+ if line_len + 1 + len(part) > text_width:
685
+ lines.append(indent + ' '.join(line))
686
+ line = []
687
+ line_len = len(indent) - 1
688
+ line.append(part)
689
+ line_len += len(part) + 1
690
+ if line:
691
+ lines.append(indent + ' '.join(line))
692
+ if prefix is not None:
693
+ lines[0] = lines[0][len(indent):]
694
+ return lines
695
+
696
+ # if prog is short, follow it with optionals or positionals
697
+ if len(prefix) + len(prog) <= 0.75 * text_width:
698
+ indent = ' ' * (len(prefix) + len(prog) + 1)
699
+ if opt_parts:
700
+ lines = get_lines([prog] + opt_parts, indent, prefix)
701
+ lines.extend(get_lines(pos_parts, indent))
702
+ elif pos_parts:
703
+ lines = get_lines([prog] + pos_parts, indent, prefix)
704
+ else:
705
+ lines = [prog]
706
+
707
+ # if prog is long, put it on its own line
708
+ else:
709
+ indent = ' ' * len(prefix)
710
+ parts = opt_parts + pos_parts
711
+ lines = get_lines(parts, indent)
712
+ if len(lines) > 1:
713
+ lines = []
714
+ lines.extend(get_lines(opt_parts, indent))
715
+ lines.extend(get_lines(pos_parts, indent))
716
+ lines = [prog] + lines
717
+
718
+ # join lines into usage
719
+ usage = '\n'.join(lines)
720
+
721
+ # prefix with 'usage:'
722
+ return '%s%s\n\n' % (prefix, usage)
723
+
724
+ def _format_actions_usage(self, actions, groups):
725
+ # find group indices and identify actions in groups
726
+ group_actions = set()
727
+ inserts = {}
728
+ for group in groups:
729
+ try:
730
+ start = actions.index(group._group_actions[0])
731
+ except ValueError:
732
+ continue
733
+ else:
734
+ end = start + len(group._group_actions)
735
+ if actions[start:end] == group._group_actions:
736
+ for action in group._group_actions:
737
+ group_actions.add(action)
738
+ if not group.required:
739
+ if start in inserts:
740
+ inserts[start] += ' ['
741
+ else:
742
+ inserts[start] = '['
743
+ inserts[end] = ']'
744
+ else:
745
+ if start in inserts:
746
+ inserts[start] += ' ('
747
+ else:
748
+ inserts[start] = '('
749
+ inserts[end] = ')'
750
+ for i in range(start + 1, end):
751
+ inserts[i] = '|'
752
+
753
+ # collect all actions format strings
754
+ parts = []
755
+ for i, action in enumerate(actions):
756
+
757
+ # suppressed arguments are marked with None
758
+ # remove | separators for suppressed arguments
759
+ if action.help is SUPPRESS:
760
+ parts.append(None)
761
+ if inserts.get(i) == '|':
762
+ inserts.pop(i)
763
+ elif inserts.get(i + 1) == '|':
764
+ inserts.pop(i + 1)
765
+
766
+ # produce all arg strings
767
+ elif not action.option_strings:
768
+ part = self._format_args(action, action.dest)
769
+
770
+ # if it's in a group, strip the outer []
771
+ if action in group_actions:
772
+ if part[0] == '[' and part[-1] == ']':
773
+ part = part[1:-1]
774
+
775
+ # add the action string to the list
776
+ parts.append(part)
777
+
778
+ # produce the first way to invoke the option in brackets
779
+ else:
780
+ option_string = action.option_strings[0]
781
+
782
+ # if the Optional doesn't take a value, format is:
783
+ # -s or --long
784
+ if action.nargs == 0:
785
+ part = '%s' % option_string
786
+
787
+ # if the Optional takes a value, format is:
788
+ # -s ARGS or --long ARGS
789
+ else:
790
+ default = action.dest.upper()
791
+ args_string = self._format_args(action, default)
792
+ part = '%s %s' % (option_string, args_string)
793
+
794
+ # make it look optional if it's not required or in a group
795
+ if not action.required and action not in group_actions:
796
+ part = '[%s]' % part
797
+
798
+ # add the action string to the list
799
+ parts.append(part)
800
+
801
+ # insert things at the necessary indices
802
+ for i in sorted(inserts, reverse=True):
803
+ parts[i:i] = [inserts[i]]
804
+
805
+ # join all the action items with spaces
806
+ text = ' '.join([item for item in parts if item is not None])
807
+
808
+ # clean up separators for mutually exclusive groups
809
+ open = r'[\[(]'
810
+ close = r'[\])]'
811
+ text = _re.sub(r'(%s) ' % open, r'\1', text)
812
+ text = _re.sub(r' (%s)' % close, r'\1', text)
813
+ text = _re.sub(r'%s *%s' % (open, close), r'', text)
814
+ text = _re.sub(r'\(([^|]*)\)', r'\1', text)
815
+ text = text.strip()
816
+
817
+ # return the text
818
+ return text
819
+
820
+ def _format_text(self, text):
821
+ if '%(prog)' in text:
822
+ text = text % dict(prog=self._prog)
823
+ text_width = self._width - self._current_indent
824
+ indent = ' ' * self._current_indent
825
+ return self._fill_text(text, text_width, indent) + '\n\n'
826
+
827
+ def _format_action(self, action):
828
+ # determine the required width and the entry label
829
+ help_position = min(self._action_max_length + 2,
830
+ self._max_help_position)
831
+ help_width = self._width - help_position
832
+ action_width = help_position - self._current_indent - 2
833
+ action_header = self._format_action_invocation(action)
834
+
835
+ # ho nelp; start on same line and add a final newline
836
+ if not action.help:
837
+ tup = self._current_indent, '', action_header
838
+ action_header = '%*s%s\n' % tup
839
+
840
+ # short action name; start on the same line and pad two spaces
841
+ elif len(action_header) <= action_width:
842
+ tup = self._current_indent, '', action_width, action_header
843
+ action_header = '%*s%-*s ' % tup
844
+ indent_first = 0
845
+
846
+ # long action name; start on the next line
847
+ else:
848
+ tup = self._current_indent, '', action_header
849
+ action_header = '%*s%s\n' % tup
850
+ indent_first = help_position
851
+
852
+ # collect the pieces of the action help
853
+ parts = [action_header]
854
+
855
+ # if there was help for the action, add lines of help text
856
+ if action.help:
857
+ help_text = self._expand_help(action)
858
+ help_lines = self._split_lines(help_text, help_width)
859
+ parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
860
+ for line in help_lines[1:]:
861
+ parts.append('%*s%s\n' % (help_position, '', line))
862
+
863
+ # or add a newline if the description doesn't end with one
864
+ elif not action_header.endswith('\n'):
865
+ parts.append('\n')
866
+
867
+ # if there are any sub-actions, add their help as well
868
+ for subaction in self._iter_indented_subactions(action):
869
+ parts.append(self._format_action(subaction))
870
+
871
+ # return a single string
872
+ return self._join_parts(parts)
873
+
874
+ def _format_action_invocation(self, action):
875
+ if not action.option_strings:
876
+ metavar, = self._metavar_formatter(action, action.dest)(1)
877
+ return metavar
878
+
879
+ else:
880
+ parts = []
881
+
882
+ # if the Optional doesn't take a value, format is:
883
+ # -s, --long
884
+ if action.nargs == 0:
885
+ parts.extend(action.option_strings)
886
+
887
+ # if the Optional takes a value, format is:
888
+ # -s ARGS, --long ARGS
889
+ else:
890
+ default = action.dest.upper()
891
+ args_string = self._format_args(action, default)
892
+ for option_string in action.option_strings:
893
+ parts.append('%s %s' % (option_string, args_string))
894
+
895
+ return ', '.join(parts)
896
+
897
+ def _metavar_formatter(self, action, default_metavar):
898
+ if action.metavar is not None:
899
+ result = action.metavar
900
+ elif action.choices is not None:
901
+ choice_strs = [str(choice) for choice in action.choices]
902
+ result = '{%s}' % ','.join(choice_strs)
903
+ else:
904
+ result = default_metavar
905
+
906
+ def format(tuple_size):
907
+ if isinstance(result, tuple):
908
+ return result
909
+ else:
910
+ return (result, ) * tuple_size
911
+ return format
912
+
913
+ def _format_args(self, action, default_metavar):
914
+ get_metavar = self._metavar_formatter(action, default_metavar)
915
+ if action.nargs is None:
916
+ result = '%s' % get_metavar(1)
917
+ elif action.nargs == OPTIONAL:
918
+ result = '[%s]' % get_metavar(1)
919
+ elif action.nargs == ZERO_OR_MORE:
920
+ result = '[%s [%s ...]]' % get_metavar(2)
921
+ elif action.nargs == ONE_OR_MORE:
922
+ result = '%s [%s ...]' % get_metavar(2)
923
+ elif action.nargs == REMAINDER:
924
+ result = '...'
925
+ elif action.nargs == PARSER:
926
+ result = '%s ...' % get_metavar(1)
927
+ else:
928
+ formats = ['%s' for _ in range(action.nargs)]
929
+ result = ' '.join(formats) % get_metavar(action.nargs)
930
+ return result
931
+
932
+ def _expand_help(self, action):
933
+ params = dict(vars(action), prog=self._prog)
934
+ for name in list(params):
935
+ if params[name] is SUPPRESS:
936
+ del params[name]
937
+ for name in list(params):
938
+ if hasattr(params[name], '__name__'):
939
+ params[name] = params[name].__name__
940
+ if params.get('choices') is not None:
941
+ choices_str = ', '.join([str(c) for c in params['choices']])
942
+ params['choices'] = choices_str
943
+ return self._get_help_string(action) % params
944
+
945
+ def _iter_indented_subactions(self, action):
946
+ try:
947
+ get_subactions = action._get_subactions
948
+ except AttributeError:
949
+ pass
950
+ else:
951
+ self._indent()
952
+ for subaction in get_subactions():
953
+ yield subaction
954
+ self._dedent()
955
+
956
+ def _split_lines(self, text, width):
957
+ text = self._whitespace_matcher.sub(' ', text).strip()
958
+ return _textwrap.wrap(text, width)
959
+
960
+ def _fill_text(self, text, width, indent):
961
+ text = self._whitespace_matcher.sub(' ', text).strip()
962
+ return _textwrap.fill(text, width, initial_indent=indent,
963
+ subsequent_indent=indent)
964
+
965
+ def _get_help_string(self, action):
966
+ return action.help
967
+
968
+
969
+class RawDescriptionHelpFormatter(HelpFormatter):
970
+ """Help message formatter which retains any formatting in descriptions.
971
+
972
+ Only the name of this class is considered a public API. All the methods
973
+ provided by the class are considered an implementation detail.
974
+ """
975
+
976
+ def _fill_text(self, text, width, indent):
977
+ return ''.join([indent + line for line in text.splitlines(True)])
978
+
979
+
980
+class RawTextHelpFormatter(RawDescriptionHelpFormatter):
981
+ """Help message formatter which retains formatting of all help text.
982
+
983
+ Only the name of this class is considered a public API. All the methods
984
+ provided by the class are considered an implementation detail.
985
+ """
986
+
987
+ def _split_lines(self, text, width):
988
+ return text.splitlines()
989
+
990
+
991
+class ArgumentDefaultsHelpFormatter(HelpFormatter):
992
+ """Help message formatter which adds default values to argument help.
993
+
994
+ Only the name of this class is considered a public API. All the methods
995
+ provided by the class are considered an implementation detail.
996
+ """
997
+
998
+ def _get_help_string(self, action):
999
+ help = action.help
1000
+ if '%(default)' not in action.help:
1001
+ if action.default is not SUPPRESS:
1002
+ defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
1003
+ if action.option_strings or action.nargs in defaulting_nargs:
1004
+ help += ' (default: %(default)s)'
1005
+ return help
1006
+
1007
+
1008
+# =====================
1009
+# Options and Arguments
1010
+# =====================
1011
+
1012
+def _get_action_name(argument):
1013
+ if argument is None:
1014
+ return None
1015
+ elif argument.option_strings:
1016
+ return '/'.join(argument.option_strings)
1017
+ elif argument.metavar not in (None, SUPPRESS):
1018
+ return argument.metavar
1019
+ elif argument.dest not in (None, SUPPRESS):
1020
+ return argument.dest
1021
+ else:
1022
+ return None
1023
+
1024
+
1025
+class ArgumentError(Exception):
1026
+ """An error from creating or using an argument (optional or positional).
1027
+
1028
+ The string value of this exception is the message, augmented with
1029
+ information about the argument that caused it.
1030
+ """
1031
+
1032
+ def __init__(self, argument, message):
1033
+ self.argument_name = _get_action_name(argument)
1034
+ self.message = message
1035
+
1036
+ def __str__(self):
1037
+ if self.argument_name is None:
1038
+ format = '%(message)s'
1039
+ else:
1040
+ format = 'argument %(argument_name)s: %(message)s'
1041
+ return format % dict(message=self.message,
1042
+ argument_name=self.argument_name)
1043
+
1044
+
1045
+class ArgumentTypeError(Exception):
1046
+ """An error from trying to convert a command line string to a type."""
1047
+ pass
1048
+
1049
+
1050
+# ==============
1051
+# Action classes
1052
+# ==============
1053
+
1054
+class Action(_AttributeHolder):
1055
+ """Information about how to convert command line strings to Python objects.
1056
+
1057
+ Action objects are used by an ArgumentParser to represent the information
1058
+ needed to parse a single argument from one or more strings from the
1059
+ command line. The keyword arguments to the Action constructor are also
1060
+ all attributes of Action instances.
1061
+
1062
+ Keyword Arguments:
1063
+
1064
+ - option_strings -- A list of command-line option strings which
1065
+ should be associated with this action.
1066
+
1067
+ - dest -- The name of the attribute to hold the created object(s)
1068
+
1069
+ - nargs -- The number of command-line arguments that should be
1070
+ consumed. By default, one argument will be consumed and a single
1071
+ value will be produced. Other values include:
1072
+ - N (an integer) consumes N arguments (and produces a list)
1073
+ - '?' consumes zero or one arguments
1074
+ - '*' consumes zero or more arguments (and produces a list)
1075
+ - '+' consumes one or more arguments (and produces a list)
1076
+ Note that the difference between the default and nargs=1 is that
1077
+ with the default, a single value will be produced, while with
1078
+ nargs=1, a list containing a single value will be produced.
1079
+
1080
+ - const -- The value to be produced if the option is specified and the
1081
+ option uses an action that takes no values.
1082
+
1083
+ - default -- The value to be produced if the option is not specified.
1084
+
1085
+ - type -- The type which the command-line arguments should be converted
1086
+ to, should be one of 'string', 'int', 'float', 'complex' or a
1087
+ callable object that accepts a single string argument. If None,
1088
+ 'string' is assumed.
1089
+
1090
+ - choices -- A container of values that should be allowed. If not None,
1091
+ after a command-line argument has been converted to the appropriate
1092
+ type, an exception will be raised if it is not a member of this
1093
+ collection.
1094
+
1095
+ - required -- True if the action must always be specified at the
1096
+ command line. This is only meaningful for optional command-line
1097
+ arguments.
1098
+
1099
+ - help -- The help string describing the argument.
1100
+
1101
+ - metavar -- The name to be used for the option's argument with the
1102
+ help string. If None, the 'dest' value will be used as the name.
1103
+ """
1104
+
1105
+ def __init__(self,
1106
+ option_strings,
1107
+ dest,
1108
+ nargs=None,
1109
+ const=None,
1110
+ default=None,
1111
+ type=None,
1112
+ choices=None,
1113
+ required=False,
1114
+ help=None,
1115
+ metavar=None):
1116
+ self.option_strings = option_strings
1117
+ self.dest = dest
1118
+ self.nargs = nargs
1119
+ self.const = const
1120
+ self.default = default
1121
+ self.type = type
1122
+ self.choices = choices
1123
+ self.required = required
1124
+ self.help = help
1125
+ self.metavar = metavar
1126
+
1127
+ def _get_kwargs(self):
1128
+ names = [
1129
+ 'option_strings',
1130
+ 'dest',
1131
+ 'nargs',
1132
+ 'const',
1133
+ 'default',
1134
+ 'type',
1135
+ 'choices',
1136
+ 'help',
1137
+ 'metavar',
1138
+ ]
1139
+ return [(name, getattr(self, name)) for name in names]
1140
+
1141
+ def __call__(self, parser, namespace, values, option_string=None):
1142
+ raise NotImplementedError(_('.__call__() not defined'))
1143
+
1144
+
1145
+class _StoreAction(Action):
1146
+
1147
+ def __init__(self,
1148
+ option_strings,
1149
+ dest,
1150
+ nargs=None,
1151
+ const=None,
1152
+ default=None,
1153
+ type=None,
1154
+ choices=None,
1155
+ required=False,
1156
+ help=None,
1157
+ metavar=None):
1158
+ if nargs == 0:
1159
+ raise ValueError('nargs for store actions must be > 0; if you '
1160
+ 'have nothing to store, actions such as store '
1161
+ 'true or store const may be more appropriate')
1162
+ if const is not None and nargs != OPTIONAL:
1163
+ raise ValueError('nargs must be %r to supply const' % OPTIONAL)
1164
+ super(_StoreAction, self).__init__(
1165
+ option_strings=option_strings,
1166
+ dest=dest,
1167
+ nargs=nargs,
1168
+ const=const,
1169
+ default=default,
1170
+ type=type,
1171
+ choices=choices,
1172
+ required=required,
1173
+ help=help,
1174
+ metavar=metavar)
1175
+
1176
+ def __call__(self, parser, namespace, values, option_string=None):
1177
+ setattr(namespace, self.dest, values)
1178
+
1179
+
1180
+class _StoreConstAction(Action):
1181
+
1182
+ def __init__(self,
1183
+ option_strings,
1184
+ dest,
1185
+ const,
1186
+ default=None,
1187
+ required=False,
1188
+ help=None,
1189
+ metavar=None):
1190
+ super(_StoreConstAction, self).__init__(
1191
+ option_strings=option_strings,
1192
+ dest=dest,
1193
+ nargs=0,
1194
+ const=const,
1195
+ default=default,
1196
+ required=required,
1197
+ help=help)
1198
+
1199
+ def __call__(self, parser, namespace, values, option_string=None):
1200
+ setattr(namespace, self.dest, self.const)
1201
+
1202
+
1203
+class _StoreTrueAction(_StoreConstAction):
1204
+
1205
+ def __init__(self,
1206
+ option_strings,
1207
+ dest,
1208
+ default=False,
1209
+ required=False,
1210
+ help=None):
1211
+ super(_StoreTrueAction, self).__init__(
1212
+ option_strings=option_strings,
1213
+ dest=dest,
1214
+ const=True,
1215
+ default=default,
1216
+ required=required,
1217
+ help=help)
1218
+
1219
+
1220
+class _StoreFalseAction(_StoreConstAction):
1221
+
1222
+ def __init__(self,
1223
+ option_strings,
1224
+ dest,
1225
+ default=True,
1226
+ required=False,
1227
+ help=None):
1228
+ super(_StoreFalseAction, self).__init__(
1229
+ option_strings=option_strings,
1230
+ dest=dest,
1231
+ const=False,
1232
+ default=default,
1233
+ required=required,
1234
+ help=help)
1235
+
1236
+
1237
+class _AppendAction(Action):
1238
+
1239
+ def __init__(self,
1240
+ option_strings,
1241
+ dest,
1242
+ nargs=None,
1243
+ const=None,
1244
+ default=None,
1245
+ type=None,
1246
+ choices=None,
1247
+ required=False,
1248
+ help=None,
1249
+ metavar=None):
1250
+ if nargs == 0:
1251
+ raise ValueError('nargs for append actions must be > 0; if arg '
1252
+ 'strings are not supplying the value to append, '
1253
+ 'the append const action may be more appropriate')
1254
+ if const is not None and nargs != OPTIONAL:
1255
+ raise ValueError('nargs must be %r to supply const' % OPTIONAL)
1256
+ super(_AppendAction, self).__init__(
1257
+ option_strings=option_strings,
1258
+ dest=dest,
1259
+ nargs=nargs,
1260
+ const=const,
1261
+ default=default,
1262
+ type=type,
1263
+ choices=choices,
1264
+ required=required,
1265
+ help=help,
1266
+ metavar=metavar)
1267
+
1268
+ def __call__(self, parser, namespace, values, option_string=None):
1269
+ items = _copy.copy(_ensure_value(namespace, self.dest, []))
1270
+ items.append(values)
1271
+ setattr(namespace, self.dest, items)
1272
+
1273
+
1274
+class _AppendConstAction(Action):
1275
+
1276
+ def __init__(self,
1277
+ option_strings,
1278
+ dest,
1279
+ const,
1280
+ default=None,
1281
+ required=False,
1282
+ help=None,
1283
+ metavar=None):
1284
+ super(_AppendConstAction, self).__init__(
1285
+ option_strings=option_strings,
1286
+ dest=dest,
1287
+ nargs=0,
1288
+ const=const,
1289
+ default=default,
1290
+ required=required,
1291
+ help=help,
1292
+ metavar=metavar)
1293
+
1294
+ def __call__(self, parser, namespace, values, option_string=None):
1295
+ items = _copy.copy(_ensure_value(namespace, self.dest, []))
1296
+ items.append(self.const)
1297
+ setattr(namespace, self.dest, items)
1298
+
1299
+
1300
+class _CountAction(Action):
1301
+
1302
+ def __init__(self,
1303
+ option_strings,
1304
+ dest,
1305
+ default=None,
1306
+ required=False,
1307
+ help=None):
1308
+ super(_CountAction, self).__init__(
1309
+ option_strings=option_strings,
1310
+ dest=dest,
1311
+ nargs=0,
1312
+ default=default,
1313
+ required=required,
1314
+ help=help)
1315
+
1316
+ def __call__(self, parser, namespace, values, option_string=None):
1317
+ new_count = _ensure_value(namespace, self.dest, 0) + 1
1318
+ setattr(namespace, self.dest, new_count)
1319
+
1320
+
1321
+class _HelpAction(Action):
1322
+
1323
+ def __init__(self,
1324
+ option_strings,
1325
+ dest=SUPPRESS,
1326
+ default=SUPPRESS,
1327
+ help=None):
1328
+ super(_HelpAction, self).__init__(
1329
+ option_strings=option_strings,
1330
+ dest=dest,
1331
+ default=default,
1332
+ nargs=0,
1333
+ help=help)
1334
+
1335
+ def __call__(self, parser, namespace, values, option_string=None):
1336
+ parser.print_help()
1337
+ parser.exit()
1338
+
1339
+
1340
+class _VersionAction(Action):
1341
+
1342
+ def __init__(self,
1343
+ option_strings,
1344
+ version=None,
1345
+ dest=SUPPRESS,
1346
+ default=SUPPRESS,
1347
+ help="show program's version number and exit"):
1348
+ super(_VersionAction, self).__init__(
1349
+ option_strings=option_strings,
1350
+ dest=dest,
1351
+ default=default,
1352
+ nargs=0,
1353
+ help=help)
1354
+ self.version = version
1355
+
1356
+ def __call__(self, parser, namespace, values, option_string=None):
1357
+ version = self.version
1358
+ if version is None:
1359
+ version = parser.version
1360
+ formatter = parser._get_formatter()
1361
+ formatter.add_text(version)
1362
+ parser.exit(message=formatter.format_help())
1363
+
1364
+
1365
+class _SubParsersAction(Action):
1366
+
1367
+ class _ChoicesPseudoAction(Action):
1368
+
1369
+ def __init__(self, name, aliases, help):
1370
+ metavar = dest = name
1371
+ if aliases:
1372
+ metavar += ' (%s)' % ', '.join(aliases)
1373
+ sup = super(_SubParsersAction._ChoicesPseudoAction, self)
1374
+ sup.__init__(option_strings=[], dest=dest, help=help,
1375
+ metavar=metavar)
1376
+
1377
+ def __init__(self,
1378
+ option_strings,
1379
+ prog,
1380
+ parser_class,
1381
+ dest=SUPPRESS,
1382
+ help=None,
1383
+ metavar=None):
1384
+
1385
+ self._prog_prefix = prog
1386
+ self._parser_class = parser_class
1387
+ self._name_parser_map = {}
1388
+ self._choices_actions = []
1389
+
1390
+ super(_SubParsersAction, self).__init__(
1391
+ option_strings=option_strings,
1392
+ dest=dest,
1393
+ nargs=PARSER,
1394
+ choices=self._name_parser_map,
1395
+ help=help,
1396
+ metavar=metavar)
1397
+
1398
+ def add_parser(self, name, **kwargs):
1399
+ # set prog from the existing prefix
1400
+ if kwargs.get('prog') is None:
1401
+ kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
1402
+
1403
+ aliases = kwargs.pop('aliases', ())
1404
+
1405
+ # create a pseudo-action to hold the choice help
1406
+ if 'help' in kwargs:
1407
+ help = kwargs.pop('help')
1408
+ choice_action = self._ChoicesPseudoAction(name, aliases, help)
1409
+ self._choices_actions.append(choice_action)
1410
+
1411
+ # create the parser and add it to the map
1412
+ parser = self._parser_class(**kwargs)
1413
+ self._name_parser_map[name] = parser
1414
+
1415
+ # make parser available under aliases also
1416
+ for alias in aliases:
1417
+ self._name_parser_map[alias] = parser
1418
+
1419
+ return parser
1420
+
1421
+ def _get_subactions(self):
1422
+ return self._choices_actions
1423
+
1424
+ def __call__(self, parser, namespace, values, option_string=None):
1425
+ parser_name = values[0]
1426
+ arg_strings = values[1:]
1427
+
1428
+ # set the parser name if requested
1429
+ if self.dest is not SUPPRESS:
1430
+ setattr(namespace, self.dest, parser_name)
1431
+
1432
+ # select the parser
1433
+ try:
1434
+ parser = self._name_parser_map[parser_name]
1435
+ except KeyError:
1436
+ tup = parser_name, ', '.join(self._name_parser_map)
1437
+ msg = _('unknown parser %r (choices: %s)' % tup)
1438
+ raise ArgumentError(self, msg)
1439
+
1440
+ # parse all the remaining options into the namespace
1441
+ # store any unrecognized options on the object, so that the top
1442
+ # level parser can decide what to do with them
1443
+ namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
1444
+ if arg_strings:
1445
+ vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
1446
+ getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
1447
+
1448
+
1449
+# ==============
1450
+# Type classes
1451
+# ==============
1452
+
1453
+class FileType(object):
1454
+ """Factory for creating file object types
1455
+
1456
+ Instances of FileType are typically passed as type= arguments to the
1457
+ ArgumentParser add_argument() method.
1458
+
1459
+ Keyword Arguments:
1460
+ - mode -- A string indicating how the file is to be opened. Accepts the
1461
+ same values as the builtin open() function.
1462
+ - bufsize -- The file's desired buffer size. Accepts the same values as
1463
+ the builtin open() function.
1464
+ """
1465
+
1466
+ def __init__(self, mode='r', bufsize=None):
1467
+ self._mode = mode
1468
+ self._bufsize = bufsize
1469
+
1470
+ def __call__(self, string):
1471
+ # the special argument "-" means sys.std{in,out}
1472
+ if string == '-':
1473
+ if 'r' in self._mode:
1474
+ return _sys.stdin
1475
+ elif 'w' in self._mode:
1476
+ return _sys.stdout
1477
+ else:
1478
+ msg = _('argument "-" with mode %r' % self._mode)
1479
+ raise ValueError(msg)
1480
+
1481
+ try:
1482
+ # all other arguments are used as file names
1483
+ if self._bufsize:
1484
+ return open(string, self._mode, self._bufsize)
1485
+ else:
1486
+ return open(string, self._mode)
1487
+ except IOError:
1488
+ err = _sys.exc_info()[1]
1489
+ message = _("can't open '%s': %s")
1490
+ raise ArgumentTypeError(message % (string, err))
1491
+
1492
+ def __repr__(self):
1493
+ args = [self._mode, self._bufsize]
1494
+ args_str = ', '.join([repr(arg) for arg in args if arg is not None])
1495
+ return '%s(%s)' % (type(self).__name__, args_str)
1496
+
1497
+# ===========================
1498
+# Optional and Positional Parsing
1499
+# ===========================
1500
+
1501
+class Namespace(_AttributeHolder):
1502
+ """Simple object for storing attributes.
1503
+
1504
+ Implements equality by attribute names and values, and provides a simple
1505
+ string representation.
1506
+ """
1507
+
1508
+ def __init__(self, **kwargs):
1509
+ for name in kwargs:
1510
+ setattr(self, name, kwargs[name])
1511
+
1512
+ __hash__ = None
1513
+
1514
+ def __eq__(self, other):
1515
+ return vars(self) == vars(other)
1516
+
1517
+ def __ne__(self, other):
1518
+ return not (self == other)
1519
+
1520
+ def __contains__(self, key):
1521
+ return key in self.__dict__
1522
+
1523
+
1524
+class _ActionsContainer(object):
1525
+
1526
+ def __init__(self,
1527
+ description,
1528
+ prefix_chars,
1529
+ argument_default,
1530
+ conflict_handler):
1531
+ super(_ActionsContainer, self).__init__()
1532
+
1533
+ self.description = description
1534
+ self.argument_default = argument_default
1535
+ self.prefix_chars = prefix_chars
1536
+ self.conflict_handler = conflict_handler
1537
+
1538
+ # set up registries
1539
+ self._registries = {}
1540
+
1541
+ # register actions
1542
+ self.register('action', None, _StoreAction)
1543
+ self.register('action', 'store', _StoreAction)
1544
+ self.register('action', 'store_const', _StoreConstAction)
1545
+ self.register('action', 'store_true', _StoreTrueAction)
1546
+ self.register('action', 'store_false', _StoreFalseAction)
1547
+ self.register('action', 'append', _AppendAction)
1548
+ self.register('action', 'append_const', _AppendConstAction)
1549
+ self.register('action', 'count', _CountAction)
1550
+ self.register('action', 'help', _HelpAction)
1551
+ self.register('action', 'version', _VersionAction)
1552
+ self.register('action', 'parsers', _SubParsersAction)
1553
+
1554
+ # raise an exception if the conflict handler is invalid
1555
+ self._get_handler()
1556
+
1557
+ # action storage
1558
+ self._actions = []
1559
+ self._option_string_actions = {}
1560
+
1561
+ # groups
1562
+ self._action_groups = []
1563
+ self._mutually_exclusive_groups = []
1564
+
1565
+ # defaults storage
1566
+ self._defaults = {}
1567
+
1568
+ # determines whether an "option" looks like a negative number
1569
+ self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
1570
+
1571
+ # whether or not there are any optionals that look like negative
1572
+ # numbers -- uses a list so it can be shared and edited
1573
+ self._has_negative_number_optionals = []
1574
+
1575
+ # ====================
1576
+ # Registration methods
1577
+ # ====================
1578
+ def register(self, registry_name, value, object):
1579
+ registry = self._registries.setdefault(registry_name, {})
1580
+ registry[value] = object
1581
+
1582
+ def _registry_get(self, registry_name, value, default=None):
1583
+ return self._registries[registry_name].get(value, default)
1584
+
1585
+ # ==================================
1586
+ # Namespace default accessor methods
1587
+ # ==================================
1588
+ def set_defaults(self, **kwargs):
1589
+ self._defaults.update(kwargs)
1590
+
1591
+ # if these defaults match any existing arguments, replace
1592
+ # the previous default on the object with the new one
1593
+ for action in self._actions:
1594
+ if action.dest in kwargs:
1595
+ action.default = kwargs[action.dest]
1596
+
1597
+ def get_default(self, dest):
1598
+ for action in self._actions:
1599
+ if action.dest == dest and action.default is not None:
1600
+ return action.default
1601
+ return self._defaults.get(dest, None)
1602
+
1603
+
1604
+ # =======================
1605
+ # Adding argument actions
1606
+ # =======================
1607
+ def add_argument(self, *args, **kwargs):
1608
+ """
1609
+ add_argument(dest, ..., name=value, ...)
1610
+ add_argument(option_string, option_string, ..., name=value, ...)
1611
+ """
1612
+
1613
+ # if no positional args are supplied or only one is supplied and
1614
+ # it doesn't look like an option string, parse a positional
1615
+ # argument
1616
+ chars = self.prefix_chars
1617
+ if not args or len(args) == 1 and args[0][0] not in chars:
1618
+ if args and 'dest' in kwargs:
1619
+ raise ValueError('dest supplied twice for positional argument')
1620
+ kwargs = self._get_positional_kwargs(*args, **kwargs)
1621
+
1622
+ # otherwise, we're adding an optional argument
1623
+ else:
1624
+ kwargs = self._get_optional_kwargs(*args, **kwargs)
1625
+
1626
+ # if no default was supplied, use the parser-level default
1627
+ if 'default' not in kwargs:
1628
+ dest = kwargs['dest']
1629
+ if dest in self._defaults:
1630
+ kwargs['default'] = self._defaults[dest]
1631
+ elif self.argument_default is not None:
1632
+ kwargs['default'] = self.argument_default
1633
+
1634
+ # create the action object, and add it to the parser
1635
+ action_class = self._pop_action_class(kwargs)
1636
+ if not _callable(action_class):
1637
+ raise ValueError('unknown action "%s"' % action_class)
1638
+ action = action_class(**kwargs)
1639
+
1640
+ # raise an error if the action type is not callable
1641
+ type_func = self._registry_get('type', action.type, action.type)
1642
+ if not _callable(type_func):
1643
+ raise ValueError('%r is not callable' % type_func)
1644
+
1645
+ return self._add_action(action)
1646
+
1647
+ def add_argument_group(self, *args, **kwargs):
1648
+ group = _ArgumentGroup(self, *args, **kwargs)
1649
+ self._action_groups.append(group)
1650
+ return group
1651
+
1652
+ def add_mutually_exclusive_group(self, **kwargs):
1653
+ group = _MutuallyExclusiveGroup(self, **kwargs)
1654
+ self._mutually_exclusive_groups.append(group)
1655
+ return group
1656
+
1657
+ def _add_action(self, action):
1658
+ # resolve any conflicts
1659
+ self._check_conflict(action)
1660
+
1661
+ # add to actions list
1662
+ self._actions.append(action)
1663
+ action.container = self
1664
+
1665
+ # index the action by any option strings it has
1666
+ for option_string in action.option_strings:
1667
+ self._option_string_actions[option_string] = action
1668
+
1669
+ # set the flag if any option strings look like negative numbers
1670
+ for option_string in action.option_strings:
1671
+ if self._negative_number_matcher.match(option_string):
1672
+ if not self._has_negative_number_optionals:
1673
+ self._has_negative_number_optionals.append(True)
1674
+
1675
+ # return the created action
1676
+ return action
1677
+
1678
+ def _remove_action(self, action):
1679
+ self._actions.remove(action)
1680
+
1681
+ def _add_container_actions(self, container):
1682
+ # collect groups by titles
1683
+ title_group_map = {}
1684
+ for group in self._action_groups:
1685
+ if group.title in title_group_map:
1686
+ msg = _('cannot merge actions - two groups are named %r')
1687
+ raise ValueError(msg % (group.title))
1688
+ title_group_map[group.title] = group
1689
+
1690
+ # map each action to its group
1691
+ group_map = {}
1692
+ for group in container._action_groups:
1693
+
1694
+ # if a group with the title exists, use that, otherwise
1695
+ # create a new group matching the container's group
1696
+ if group.title not in title_group_map:
1697
+ title_group_map[group.title] = self.add_argument_group(
1698
+ title=group.title,
1699
+ description=group.description,
1700
+ conflict_handler=group.conflict_handler)
1701
+
1702
+ # map the actions to their new group
1703
+ for action in group._group_actions:
1704
+ group_map[action] = title_group_map[group.title]
1705
+
1706
+ # add container's mutually exclusive groups
1707
+ # NOTE: if add_mutually_exclusive_group ever gains title= and
1708
+ # description= then this code will need to be expanded as above
1709
+ for group in container._mutually_exclusive_groups:
1710
+ mutex_group = self.add_mutually_exclusive_group(
1711
+ required=group.required)
1712
+
1713
+ # map the actions to their new mutex group
1714
+ for action in group._group_actions:
1715
+ group_map[action] = mutex_group
1716
+
1717
+ # add all actions to this container or their group
1718
+ for action in container._actions:
1719
+ group_map.get(action, self)._add_action(action)
1720
+
1721
+ def _get_positional_kwargs(self, dest, **kwargs):
1722
+ # make sure required is not specified
1723
+ if 'required' in kwargs:
1724
+ msg = _("'required' is an invalid argument for positionals")
1725
+ raise TypeError(msg)
1726
+
1727
+ # mark positional arguments as required if at least one is
1728
+ # always required
1729
+ if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
1730
+ kwargs['required'] = True
1731
+ if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
1732
+ kwargs['required'] = True
1733
+
1734
+ # return the keyword arguments with no option strings
1735
+ return dict(kwargs, dest=dest, option_strings=[])
1736
+
1737
+ def _get_optional_kwargs(self, *args, **kwargs):
1738
+ # determine short and long option strings
1739
+ option_strings = []
1740
+ long_option_strings = []
1741
+ for option_string in args:
1742
+ # error on strings that don't start with an appropriate prefix
1743
+ if not option_string[0] in self.prefix_chars:
1744
+ msg = _('invalid option string %r: '
1745
+ 'must start with a character %r')
1746
+ tup = option_string, self.prefix_chars
1747
+ raise ValueError(msg % tup)
1748
+
1749
+ # strings starting with two prefix characters are long options
1750
+ option_strings.append(option_string)
1751
+ if option_string[0] in self.prefix_chars:
1752
+ if len(option_string) > 1:
1753
+ if option_string[1] in self.prefix_chars:
1754
+ long_option_strings.append(option_string)
1755
+
1756
+ # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
1757
+ dest = kwargs.pop('dest', None)
1758
+ if dest is None:
1759
+ if long_option_strings:
1760
+ dest_option_string = long_option_strings[0]
1761
+ else:
1762
+ dest_option_string = option_strings[0]
1763
+ dest = dest_option_string.lstrip(self.prefix_chars)
1764
+ if not dest:
1765
+ msg = _('dest= is required for options like %r')
1766
+ raise ValueError(msg % option_string)
1767
+ dest = dest.replace('-', '_')
1768
+
1769
+ # return the updated keyword arguments
1770
+ return dict(kwargs, dest=dest, option_strings=option_strings)
1771
+
1772
+ def _pop_action_class(self, kwargs, default=None):
1773
+ action = kwargs.pop('action', default)
1774
+ return self._registry_get('action', action, action)
1775
+
1776
+ def _get_handler(self):
1777
+ # determine function from conflict handler string
1778
+ handler_func_name = '_handle_conflict_%s' % self.conflict_handler
1779
+ try:
1780
+ return getattr(self, handler_func_name)
1781
+ except AttributeError:
1782
+ msg = _('invalid conflict_resolution value: %r')
1783
+ raise ValueError(msg % self.conflict_handler)
1784
+
1785
+ def _check_conflict(self, action):
1786
+
1787
+ # find all options that conflict with this option
1788
+ confl_optionals = []
1789
+ for option_string in action.option_strings:
1790
+ if option_string in self._option_string_actions:
1791
+ confl_optional = self._option_string_actions[option_string]
1792
+ confl_optionals.append((option_string, confl_optional))
1793
+
1794
+ # resolve any conflicts
1795
+ if confl_optionals:
1796
+ conflict_handler = self._get_handler()
1797
+ conflict_handler(action, confl_optionals)
1798
+
1799
+ def _handle_conflict_error(self, action, conflicting_actions):
1800
+ message = _('conflicting option string(s): %s')
1801
+ conflict_string = ', '.join([option_string
1802
+ for option_string, action
1803
+ in conflicting_actions])
1804
+ raise ArgumentError(action, message % conflict_string)
1805
+
1806
+ def _handle_conflict_resolve(self, action, conflicting_actions):
1807
+
1808
+ # remove all conflicting options
1809
+ for option_string, action in conflicting_actions:
1810
+
1811
+ # remove the conflicting option
1812
+ action.option_strings.remove(option_string)
1813
+ self._option_string_actions.pop(option_string, None)
1814
+
1815
+ # if the option now has no option string, remove it from the
1816
+ # container holding it
1817
+ if not action.option_strings:
1818
+ action.container._remove_action(action)
1819
+
1820
+
1821
+class _ArgumentGroup(_ActionsContainer):
1822
+
1823
+ def __init__(self, container, title=None, description=None, **kwargs):
1824
+ # add any missing keyword arguments by checking the container
1825
+ update = kwargs.setdefault
1826
+ update('conflict_handler', container.conflict_handler)
1827
+ update('prefix_chars', container.prefix_chars)
1828
+ update('argument_default', container.argument_default)
1829
+ super_init = super(_ArgumentGroup, self).__init__
1830
+ super_init(description=description, **kwargs)
1831
+
1832
+ # group attributes
1833
+ self.title = title
1834
+ self._group_actions = []
1835
+
1836
+ # share most attributes with the container
1837
+ self._registries = container._registries
1838
+ self._actions = container._actions
1839
+ self._option_string_actions = container._option_string_actions
1840
+ self._defaults = container._defaults
1841
+ self._has_negative_number_optionals = \
1842
+ container._has_negative_number_optionals
1843
+
1844
+ def _add_action(self, action):
1845
+ action = super(_ArgumentGroup, self)._add_action(action)
1846
+ self._group_actions.append(action)
1847
+ return action
1848
+
1849
+ def _remove_action(self, action):
1850
+ super(_ArgumentGroup, self)._remove_action(action)
1851
+ self._group_actions.remove(action)
1852
+
1853
+
1854
+class _MutuallyExclusiveGroup(_ArgumentGroup):
1855
+
1856
+ def __init__(self, container, required=False):
1857
+ super(_MutuallyExclusiveGroup, self).__init__(container)
1858
+ self.required = required
1859
+ self._container = container
1860
+
1861
+ def _add_action(self, action):
1862
+ if action.required:
1863
+ msg = _('mutually exclusive arguments must be optional')
1864
+ raise ValueError(msg)
1865
+ action = self._container._add_action(action)
1866
+ self._group_actions.append(action)
1867
+ return action
1868
+
1869
+ def _remove_action(self, action):
1870
+ self._container._remove_action(action)
1871
+ self._group_actions.remove(action)
1872
+
1873
+
1874
+class ArgumentParser(_AttributeHolder, _ActionsContainer):
1875
+ """Object for parsing command line strings into Python objects.
1876
+
1877
+ Keyword Arguments:
1878
+ - prog -- The name of the program (default: sys.argv[0])
1879
+ - usage -- A usage message (default: auto-generated from arguments)
1880
+ - description -- A description of what the program does
1881
+ - epilog -- Text following the argument descriptions
1882
+ - parents -- Parsers whose arguments should be copied into this one
1883
+ - formatter_class -- HelpFormatter class for printing help messages
1884
+ - prefix_chars -- Characters that prefix optional arguments
1885
+ - fromfile_prefix_chars -- Characters that prefix files containing
1886
+ additional arguments
1887
+ - argument_default -- The default value for all arguments
1888
+ - conflict_handler -- String indicating how to handle conflicts
1889
+ - add_help -- Add a -h/-help option
1890
+ """
1891
+
1892
+ def __init__(self,
1893
+ prog=None,
1894
+ usage=None,
1895
+ description=None,
1896
+ epilog=None,
1897
+ version=None,
1898
+ parents=[],
1899
+ formatter_class=HelpFormatter,
1900
+ prefix_chars='-',
1901
+ fromfile_prefix_chars=None,
1902
+ argument_default=None,
1903
+ conflict_handler='error',
1904
+ add_help=True):
1905
+
1906
+ if version is not None:
1907
+ import warnings
1908
+ warnings.warn(
1909
+ """The "version" argument to ArgumentParser is deprecated. """
1910
+ """Please use """
1911
+ """"add_argument(..., action='version', version="N", ...)" """
1912
+ """instead""", DeprecationWarning)
1913
+
1914
+ superinit = super(ArgumentParser, self).__init__
1915
+ superinit(description=description,
1916
+ prefix_chars=prefix_chars,
1917
+ argument_default=argument_default,
1918
+ conflict_handler=conflict_handler)
1919
+
1920
+ # default setting for prog
1921
+ if prog is None:
1922
+ prog = _os.path.basename(_sys.argv[0])
1923
+
1924
+ self.prog = prog
1925
+ self.usage = usage
1926
+ self.epilog = epilog
1927
+ self.version = version
1928
+ self.formatter_class = formatter_class
1929
+ self.fromfile_prefix_chars = fromfile_prefix_chars
1930
+ self.add_help = add_help
1931
+
1932
+ add_group = self.add_argument_group
1933
+ self._positionals = add_group(_('positional arguments'))
1934
+ self._optionals = add_group(_('optional arguments'))
1935
+ self._subparsers = None
1936
+
1937
+ # register types
1938
+ def identity(string):
1939
+ return string
1940
+ self.register('type', None, identity)
1941
+
1942
+ # add help and version arguments if necessary
1943
+ # (using explicit default to override global argument_default)
1944
+ if '-' in prefix_chars:
1945
+ default_prefix = '-'
1946
+ else:
1947
+ default_prefix = prefix_chars[0]
1948
+ if self.add_help:
1949
+ self.add_argument(
1950
+ default_prefix+'h', default_prefix*2+'help',
1951
+ action='help', default=SUPPRESS,
1952
+ help=_('show this help message and exit'))
1953
+ if self.version:
1954
+ self.add_argument(
1955
+ default_prefix+'v', default_prefix*2+'version',
1956
+ action='version', default=SUPPRESS,
1957
+ version=self.version,
1958
+ help=_("show program's version number and exit"))
1959
+
1960
+ # add parent arguments and defaults
1961
+ for parent in parents:
1962
+ self._add_container_actions(parent)
1963
+ try:
1964
+ defaults = parent._defaults
1965
+ except AttributeError:
1966
+ pass
1967
+ else:
1968
+ self._defaults.update(defaults)
1969
+
1970
+ # =======================
1971
+ # Pretty __repr__ methods
1972
+ # =======================
1973
+ def _get_kwargs(self):
1974
+ names = [
1975
+ 'prog',
1976
+ 'usage',
1977
+ 'description',
1978
+ 'version',
1979
+ 'formatter_class',
1980
+ 'conflict_handler',
1981
+ 'add_help',
1982
+ ]
1983
+ return [(name, getattr(self, name)) for name in names]
1984
+
1985
+ # ==================================
1986
+ # Optional/Positional adding methods
1987
+ # ==================================
1988
+ def add_subparsers(self, **kwargs):
1989
+ if self._subparsers is not None:
1990
+ self.error(_('cannot have multiple subparser arguments'))
1991
+
1992
+ # add the parser class to the arguments if it's not present
1993
+ kwargs.setdefault('parser_class', type(self))
1994
+
1995
+ if 'title' in kwargs or 'description' in kwargs:
1996
+ title = _(kwargs.pop('title', 'subcommands'))
1997
+ description = _(kwargs.pop('description', None))
1998
+ self._subparsers = self.add_argument_group(title, description)
1999
+ else:
2000
+ self._subparsers = self._positionals
2001
+
2002
+ # prog defaults to the usage message of this parser, skipping
2003
+ # optional arguments and with no "usage:" prefix
2004
+ if kwargs.get('prog') is None:
2005
+ formatter = self._get_formatter()
2006
+ positionals = self._get_positional_actions()
2007
+ groups = self._mutually_exclusive_groups
2008
+ formatter.add_usage(self.usage, positionals, groups, '')
2009
+ kwargs['prog'] = formatter.format_help().strip()
2010
+
2011
+ # create the parsers action and add it to the positionals list
2012
+ parsers_class = self._pop_action_class(kwargs, 'parsers')
2013
+ action = parsers_class(option_strings=[], **kwargs)
2014
+ self._subparsers._add_action(action)
2015
+
2016
+ # return the created parsers action
2017
+ return action
2018
+
2019
+ def _add_action(self, action):
2020
+ if action.option_strings:
2021
+ self._optionals._add_action(action)
2022
+ else:
2023
+ self._positionals._add_action(action)
2024
+ return action
2025
+
2026
+ def _get_optional_actions(self):
2027
+ return [action
2028
+ for action in self._actions
2029
+ if action.option_strings]
2030
+
2031
+ def _get_positional_actions(self):
2032
+ return [action
2033
+ for action in self._actions
2034
+ if not action.option_strings]
2035
+
2036
+ # =====================================
2037
+ # Command line argument parsing methods
2038
+ # =====================================
2039
+ def parse_args(self, args=None, namespace=None):
2040
+ args, argv = self.parse_known_args(args, namespace)
2041
+ if argv:
2042
+ msg = _('unrecognized arguments: %s')
2043
+ self.error(msg % ' '.join(argv))
2044
+ return args
2045
+
2046
+ def parse_known_args(self, args=None, namespace=None):
2047
+ # args default to the system args
2048
+ if args is None:
2049
+ args = _sys.argv[1:]
2050
+
2051
+ # default Namespace built from parser defaults
2052
+ if namespace is None:
2053
+ namespace = Namespace()
2054
+
2055
+ # add any action defaults that aren't present
2056
+ for action in self._actions:
2057
+ if action.dest is not SUPPRESS:
2058
+ if not hasattr(namespace, action.dest):
2059
+ if action.default is not SUPPRESS:
2060
+ setattr(namespace, action.dest, action.default)
2061
+
2062
+ # add any parser defaults that aren't present
2063
+ for dest in self._defaults:
2064
+ if not hasattr(namespace, dest):
2065
+ setattr(namespace, dest, self._defaults[dest])
2066
+
2067
+ # parse the arguments and exit if there are any errors
2068
+ try:
2069
+ namespace, args = self._parse_known_args(args, namespace)
2070
+ if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
2071
+ args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
2072
+ delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
2073
+ return namespace, args
2074
+ except ArgumentError:
2075
+ err = _sys.exc_info()[1]
2076
+ self.error(str(err))
2077
+
2078
+ def _parse_known_args(self, arg_strings, namespace):
2079
+ # replace arg strings that are file references
2080
+ if self.fromfile_prefix_chars is not None:
2081
+ arg_strings = self._read_args_from_files(arg_strings)
2082
+
2083
+ # map all mutually exclusive arguments to the other arguments
2084
+ # they can't occur with
2085
+ action_conflicts = {}
2086
+ for mutex_group in self._mutually_exclusive_groups:
2087
+ group_actions = mutex_group._group_actions
2088
+ for i, mutex_action in enumerate(mutex_group._group_actions):
2089
+ conflicts = action_conflicts.setdefault(mutex_action, [])
2090
+ conflicts.extend(group_actions[:i])
2091
+ conflicts.extend(group_actions[i + 1:])
2092
+
2093
+ # find all option indices, and determine the arg_string_pattern
2094
+ # which has an 'O' if there is an option at an index,
2095
+ # an 'A' if there is an argument, or a '-' if there is a '--'
2096
+ option_string_indices = {}
2097
+ arg_string_pattern_parts = []
2098
+ arg_strings_iter = iter(arg_strings)
2099
+ for i, arg_string in enumerate(arg_strings_iter):
2100
+
2101
+ # all args after -- are non-options
2102
+ if arg_string == '--':
2103
+ arg_string_pattern_parts.append('-')
2104
+ for arg_string in arg_strings_iter:
2105
+ arg_string_pattern_parts.append('A')
2106
+
2107
+ # otherwise, add the arg to the arg strings
2108
+ # and note the index if it was an option
2109
+ else:
2110
+ option_tuple = self._parse_optional(arg_string)
2111
+ if option_tuple is None:
2112
+ pattern = 'A'
2113
+ else:
2114
+ option_string_indices[i] = option_tuple
2115
+ pattern = 'O'
2116
+ arg_string_pattern_parts.append(pattern)
2117
+
2118
+ # join the pieces together to form the pattern
2119
+ arg_strings_pattern = ''.join(arg_string_pattern_parts)
2120
+
2121
+ # converts arg strings to the appropriate and then takes the action
2122
+ seen_actions = set()
2123
+ seen_non_default_actions = set()
2124
+
2125
+ def take_action(action, argument_strings, option_string=None):
2126
+ seen_actions.add(action)
2127
+ argument_values = self._get_values(action, argument_strings)
2128
+
2129
+ # error if this argument is not allowed with other previously
2130
+ # seen arguments, assuming that actions that use the default
2131
+ # value don't really count as "present"
2132
+ if argument_values is not action.default:
2133
+ seen_non_default_actions.add(action)
2134
+ for conflict_action in action_conflicts.get(action, []):
2135
+ if conflict_action in seen_non_default_actions:
2136
+ msg = _('not allowed with argument %s')
2137
+ action_name = _get_action_name(conflict_action)
2138
+ raise ArgumentError(action, msg % action_name)
2139
+
2140
+ # take the action if we didn't receive a SUPPRESS value
2141
+ # (e.g. from a default)
2142
+ if argument_values is not SUPPRESS:
2143
+ action(self, namespace, argument_values, option_string)
2144
+
2145
+ # function to convert arg_strings into an optional action
2146
+ def consume_optional(start_index):
2147
+
2148
+ # get the optional identified at this index
2149
+ option_tuple = option_string_indices[start_index]
2150
+ action, option_string, explicit_arg = option_tuple
2151
+
2152
+ # identify additional optionals in the same arg string
2153
+ # (e.g. -xyz is the same as -x -y -z if no args are required)
2154
+ match_argument = self._match_argument
2155
+ action_tuples = []
2156
+ while True:
2157
+
2158
+ # if we found no optional action, skip it
2159
+ if action is None:
2160
+ extras.append(arg_strings[start_index])
2161
+ return start_index + 1
2162
+
2163
+ # if there is an explicit argument, try to match the
2164
+ # optional's string arguments to only this
2165
+ if explicit_arg is not None:
2166
+ arg_count = match_argument(action, 'A')
2167
+
2168
+ # if the action is a single-dash option and takes no
2169
+ # arguments, try to parse more single-dash options out
2170
+ # of the tail of the option string
2171
+ chars = self.prefix_chars
2172
+ if arg_count == 0 and option_string[1] not in chars:
2173
+ action_tuples.append((action, [], option_string))
2174
+ char = option_string[0]
2175
+ option_string = char + explicit_arg[0]
2176
+ new_explicit_arg = explicit_arg[1:] or None
2177
+ optionals_map = self._option_string_actions
2178
+ if option_string in optionals_map:
2179
+ action = optionals_map[option_string]
2180
+ explicit_arg = new_explicit_arg
2181
+ else:
2182
+ msg = _('ignored explicit argument %r')
2183
+ raise ArgumentError(action, msg % explicit_arg)
2184
+
2185
+ # if the action expect exactly one argument, we've
2186
+ # successfully matched the option; exit the loop
2187
+ elif arg_count == 1:
2188
+ stop = start_index + 1
2189
+ args = [explicit_arg]
2190
+ action_tuples.append((action, args, option_string))
2191
+ break
2192
+
2193
+ # error if a double-dash option did not use the
2194
+ # explicit argument
2195
+ else:
2196
+ msg = _('ignored explicit argument %r')
2197
+ raise ArgumentError(action, msg % explicit_arg)
2198
+
2199
+ # if there is no explicit argument, try to match the
2200
+ # optional's string arguments with the following strings
2201
+ # if successful, exit the loop
2202
+ else:
2203
+ start = start_index + 1
2204
+ selected_patterns = arg_strings_pattern[start:]
2205
+ arg_count = match_argument(action, selected_patterns)
2206
+ stop = start + arg_count
2207
+ args = arg_strings[start:stop]
2208
+ action_tuples.append((action, args, option_string))
2209
+ break
2210
+
2211
+ # add the Optional to the list and return the index at which
2212
+ # the Optional's string args stopped
2213
+ assert action_tuples
2214
+ for action, args, option_string in action_tuples:
2215
+ take_action(action, args, option_string)
2216
+ return stop
2217
+
2218
+ # the list of Positionals left to be parsed; this is modified
2219
+ # by consume_positionals()
2220
+ positionals = self._get_positional_actions()
2221
+
2222
+ # function to convert arg_strings into positional actions
2223
+ def consume_positionals(start_index):
2224
+ # match as many Positionals as possible
2225
+ match_partial = self._match_arguments_partial
2226
+ selected_pattern = arg_strings_pattern[start_index:]
2227
+ arg_counts = match_partial(positionals, selected_pattern)
2228
+
2229
+ # slice off the appropriate arg strings for each Positional
2230
+ # and add the Positional and its args to the list
2231
+ for action, arg_count in zip(positionals, arg_counts):
2232
+ args = arg_strings[start_index: start_index + arg_count]
2233
+ start_index += arg_count
2234
+ take_action(action, args)
2235
+
2236
+ # slice off the Positionals that we just parsed and return the
2237
+ # index at which the Positionals' string args stopped
2238
+ positionals[:] = positionals[len(arg_counts):]
2239
+ return start_index
2240
+
2241
+ # consume Positionals and Optionals alternately, until we have
2242
+ # passed the last option string
2243
+ extras = []
2244
+ start_index = 0
2245
+ if option_string_indices:
2246
+ max_option_string_index = max(option_string_indices)
2247
+ else:
2248
+ max_option_string_index = -1
2249
+ while start_index <= max_option_string_index:
2250
+
2251
+ # consume any Positionals preceding the next option
2252
+ next_option_string_index = min([
2253
+ index
2254
+ for index in option_string_indices
2255
+ if index >= start_index])
2256
+ if start_index != next_option_string_index:
2257
+ positionals_end_index = consume_positionals(start_index)
2258
+
2259
+ # only try to parse the next optional if we didn't consume
2260
+ # the option string during the positionals parsing
2261
+ if positionals_end_index > start_index:
2262
+ start_index = positionals_end_index
2263
+ continue
2264
+ else:
2265
+ start_index = positionals_end_index
2266
+
2267
+ # if we consumed all the positionals we could and we're not
2268
+ # at the index of an option string, there were extra arguments
2269
+ if start_index not in option_string_indices:
2270
+ strings = arg_strings[start_index:next_option_string_index]
2271
+ extras.extend(strings)
2272
+ start_index = next_option_string_index
2273
+
2274
+ # consume the next optional and any arguments for it
2275
+ start_index = consume_optional(start_index)
2276
+
2277
+ # consume any positionals following the last Optional
2278
+ stop_index = consume_positionals(start_index)
2279
+
2280
+ # if we didn't consume all the argument strings, there were extras
2281
+ extras.extend(arg_strings[stop_index:])
2282
+
2283
+ # if we didn't use all the Positional objects, there were too few
2284
+ # arg strings supplied.
2285
+ if positionals:
2286
+ self.error(_('too few arguments'))
2287
+
2288
+ # make sure all required actions were present, and convert defaults.
2289
+ for action in self._actions:
2290
+ if action not in seen_actions:
2291
+ if action.required:
2292
+ name = _get_action_name(action)
2293
+ self.error(_('argument %s is required') % name)
2294
+ else:
2295
+ # Convert action default now instead of doing it before
2296
+ # parsing arguments to avoid calling convert functions
2297
+ # twice (which may fail) if the argument was given, but
2298
+ # only if it was defined already in the namespace
2299
+ if (action.default is not None and
2300
+ isinstance(action.default, basestring) and
2301
+ hasattr(namespace, action.dest) and
2302
+ action.default is getattr(namespace, action.dest)):
2303
+ setattr(namespace, action.dest,
2304
+ self._get_value(action, action.default))
2305
+
2306
+ # make sure all required groups had one option present
2307
+ for group in self._mutually_exclusive_groups:
2308
+ if group.required:
2309
+ for action in group._group_actions:
2310
+ if action in seen_non_default_actions:
2311
+ break
2312
+
2313
+ # if no actions were used, report the error
2314
+ else:
2315
+ names = [_get_action_name(action)
2316
+ for action in group._group_actions
2317
+ if action.help is not SUPPRESS]
2318
+ msg = _('one of the arguments %s is required')
2319
+ self.error(msg % ' '.join(names))
2320
+
2321
+ # return the updated namespace and the extra arguments
2322
+ return namespace, extras
2323
+
2324
+ def _read_args_from_files(self, arg_strings):
2325
+ # expand arguments referencing files
2326
+ new_arg_strings = []
2327
+ for arg_string in arg_strings:
2328
+
2329
+ # for regular arguments, just add them back into the list
2330
+ if arg_string[0] not in self.fromfile_prefix_chars:
2331
+ new_arg_strings.append(arg_string)
2332
+
2333
+ # replace arguments referencing files with the file content
2334
+ else:
2335
+ try:
2336
+ args_file = open(arg_string[1:])
2337
+ try:
2338
+ arg_strings = []
2339
+ for arg_line in args_file.read().splitlines():
2340
+ for arg in self.convert_arg_line_to_args(arg_line):
2341
+ arg_strings.append(arg)
2342
+ arg_strings = self._read_args_from_files(arg_strings)
2343
+ new_arg_strings.extend(arg_strings)
2344
+ finally:
2345
+ args_file.close()
2346
+ except IOError:
2347
+ err = _sys.exc_info()[1]
2348
+ self.error(str(err))
2349
+
2350
+ # return the modified argument list
2351
+ return new_arg_strings
2352
+
2353
+ def convert_arg_line_to_args(self, arg_line):
2354
+ return [arg_line]
2355
+
2356
+ def _match_argument(self, action, arg_strings_pattern):
2357
+ # match the pattern for this action to the arg strings
2358
+ nargs_pattern = self._get_nargs_pattern(action)
2359
+ match = _re.match(nargs_pattern, arg_strings_pattern)
2360
+
2361
+ # raise an exception if we weren't able to find a match
2362
+ if match is None:
2363
+ nargs_errors = {
2364
+ None: _('expected one argument'),
2365
+ OPTIONAL: _('expected at most one argument'),
2366
+ ONE_OR_MORE: _('expected at least one argument'),
2367
+ }
2368
+ default = _('expected %s argument(s)') % action.nargs
2369
+ msg = nargs_errors.get(action.nargs, default)
2370
+ raise ArgumentError(action, msg)
2371
+
2372
+ # return the number of arguments matched
2373
+ return len(match.group(1))
2374
+
2375
+ def _match_arguments_partial(self, actions, arg_strings_pattern):
2376
+ # progressively shorten the actions list by slicing off the
2377
+ # final actions until we find a match
2378
+ result = []
2379
+ for i in range(len(actions), 0, -1):
2380
+ actions_slice = actions[:i]
2381
+ pattern = ''.join([self._get_nargs_pattern(action)
2382
+ for action in actions_slice])
2383
+ match = _re.match(pattern, arg_strings_pattern)
2384
+ if match is not None:
2385
+ result.extend([len(string) for string in match.groups()])
2386
+ break
2387
+
2388
+ # return the list of arg string counts
2389
+ return result
2390
+
2391
+ def _parse_optional(self, arg_string):
2392
+ # if it's an empty string, it was meant to be a positional
2393
+ if not arg_string:
2394
+ return None
2395
+
2396
+ # if it doesn't start with a prefix, it was meant to be positional
2397
+ if not arg_string[0] in self.prefix_chars:
2398
+ return None
2399
+
2400
+ # if the option string is present in the parser, return the action
2401
+ if arg_string in self._option_string_actions:
2402
+ action = self._option_string_actions[arg_string]
2403
+ return action, arg_string, None
2404
+
2405
+ # if it's just a single character, it was meant to be positional
2406
+ if len(arg_string) == 1:
2407
+ return None
2408
+
2409
+ # if the option string before the "=" is present, return the action
2410
+ if '=' in arg_string:
2411
+ option_string, explicit_arg = arg_string.split('=', 1)
2412
+ if option_string in self._option_string_actions:
2413
+ action = self._option_string_actions[option_string]
2414
+ return action, option_string, explicit_arg
2415
+
2416
+ # search through all possible prefixes of the option string
2417
+ # and all actions in the parser for possible interpretations
2418
+ option_tuples = self._get_option_tuples(arg_string)
2419
+
2420
+ # if multiple actions match, the option string was ambiguous
2421
+ if len(option_tuples) > 1:
2422
+ options = ', '.join([option_string
2423
+ for action, option_string, explicit_arg in option_tuples])
2424
+ tup = arg_string, options
2425
+ self.error(_('ambiguous option: %s could match %s') % tup)
2426
+
2427
+ # if exactly one action matched, this segmentation is good,
2428
+ # so return the parsed action
2429
+ elif len(option_tuples) == 1:
2430
+ option_tuple, = option_tuples
2431
+ return option_tuple
2432
+
2433
+ # if it was not found as an option, but it looks like a negative
2434
+ # number, it was meant to be positional
2435
+ # unless there are negative-number-like options
2436
+ if self._negative_number_matcher.match(arg_string):
2437
+ if not self._has_negative_number_optionals:
2438
+ return None
2439
+
2440
+ # if it contains a space, it was meant to be a positional
2441
+ if ' ' in arg_string:
2442
+ return None
2443
+
2444
+ # it was meant to be an optional but there is no such option
2445
+ # in this parser (though it might be a valid option in a subparser)
2446
+ return None, arg_string, None
2447
+
2448
+ def _get_option_tuples(self, option_string):
2449
+ result = []
2450
+
2451
+ # option strings starting with two prefix characters are only
2452
+ # split at the '='
2453
+ chars = self.prefix_chars
2454
+ if option_string[0] in chars and option_string[1] in chars:
2455
+ if '=' in option_string:
2456
+ option_prefix, explicit_arg = option_string.split('=', 1)
2457
+ else:
2458
+ option_prefix = option_string
2459
+ explicit_arg = None
2460
+ for option_string in self._option_string_actions:
2461
+ if option_string.startswith(option_prefix):
2462
+ action = self._option_string_actions[option_string]
2463
+ tup = action, option_string, explicit_arg
2464
+ result.append(tup)
2465
+
2466
+ # single character options can be concatenated with their arguments
2467
+ # but multiple character options always have to have their argument
2468
+ # separate
2469
+ elif option_string[0] in chars and option_string[1] not in chars:
2470
+ option_prefix = option_string
2471
+ explicit_arg = None
2472
+ short_option_prefix = option_string[:2]
2473
+ short_explicit_arg = option_string[2:]
2474
+
2475
+ for option_string in self._option_string_actions:
2476
+ if option_string == short_option_prefix:
2477
+ action = self._option_string_actions[option_string]
2478
+ tup = action, option_string, short_explicit_arg
2479
+ result.append(tup)
2480
+ elif option_string.startswith(option_prefix):
2481
+ action = self._option_string_actions[option_string]
2482
+ tup = action, option_string, explicit_arg
2483
+ result.append(tup)
2484
+
2485
+ # shouldn't ever get here
2486
+ else:
2487
+ self.error(_('unexpected option string: %s') % option_string)
2488
+
2489
+ # return the collected option tuples
2490
+ return result
2491
+
2492
+ def _get_nargs_pattern(self, action):
2493
+ # in all examples below, we have to allow for '--' args
2494
+ # which are represented as '-' in the pattern
2495
+ nargs = action.nargs
2496
+
2497
+ # the default (None) is assumed to be a single argument
2498
+ if nargs is None:
2499
+ nargs_pattern = '(-*A-*)'
2500
+
2501
+ # allow zero or one arguments
2502
+ elif nargs == OPTIONAL:
2503
+ nargs_pattern = '(-*A?-*)'
2504
+
2505
+ # allow zero or more arguments
2506
+ elif nargs == ZERO_OR_MORE:
2507
+ nargs_pattern = '(-*[A-]*)'
2508
+
2509
+ # allow one or more arguments
2510
+ elif nargs == ONE_OR_MORE:
2511
+ nargs_pattern = '(-*A[A-]*)'
2512
+
2513
+ # allow any number of options or arguments
2514
+ elif nargs == REMAINDER:
2515
+ nargs_pattern = '([-AO]*)'
2516
+
2517
+ # allow one argument followed by any number of options or arguments
2518
+ elif nargs == PARSER:
2519
+ nargs_pattern = '(-*A[-AO]*)'
2520
+
2521
+ # all others should be integers
2522
+ else:
2523
+ nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
2524
+
2525
+ # if this is an optional action, -- is not allowed
2526
+ if action.option_strings:
2527
+ nargs_pattern = nargs_pattern.replace('-*', '')
2528
+ nargs_pattern = nargs_pattern.replace('-', '')
2529
+
2530
+ # return the pattern
2531
+ return nargs_pattern
2532
+
2533
+ # ========================
2534
+ # Value conversion methods
2535
+ # ========================
2536
+ def _get_values(self, action, arg_strings):
2537
+ # for everything but PARSER args, strip out '--'
2538
+ if action.nargs not in [PARSER, REMAINDER]:
2539
+ arg_strings = [s for s in arg_strings if s != '--']
2540
+
2541
+ # optional argument produces a default when not present
2542
+ if not arg_strings and action.nargs == OPTIONAL:
2543
+ if action.option_strings:
2544
+ value = action.const
2545
+ else:
2546
+ value = action.default
2547
+ if isinstance(value, basestring):
2548
+ value = self._get_value(action, value)
2549
+ self._check_value(action, value)
2550
+
2551
+ # when nargs='*' on a positional, if there were no command-line
2552
+ # args, use the default if it is anything other than None
2553
+ elif (not arg_strings and action.nargs == ZERO_OR_MORE and
2554
+ not action.option_strings):
2555
+ if action.default is not None:
2556
+ value = action.default
2557
+ else:
2558
+ value = arg_strings
2559
+ self._check_value(action, value)
2560
+
2561
+ # single argument or optional argument produces a single value
2562
+ elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
2563
+ arg_string, = arg_strings
2564
+ value = self._get_value(action, arg_string)
2565
+ self._check_value(action, value)
2566
+
2567
+ # REMAINDER arguments convert all values, checking none
2568
+ elif action.nargs == REMAINDER:
2569
+ value = [self._get_value(action, v) for v in arg_strings]
2570
+
2571
+ # PARSER arguments convert all values, but check only the first
2572
+ elif action.nargs == PARSER:
2573
+ value = [self._get_value(action, v) for v in arg_strings]
2574
+ self._check_value(action, value[0])
2575
+
2576
+ # all other types of nargs produce a list
2577
+ else:
2578
+ value = [self._get_value(action, v) for v in arg_strings]
2579
+ for v in value:
2580
+ self._check_value(action, v)
2581
+
2582
+ # return the converted value
2583
+ return value
2584
+
2585
+ def _get_value(self, action, arg_string):
2586
+ type_func = self._registry_get('type', action.type, action.type)
2587
+ if not _callable(type_func):
2588
+ msg = _('%r is not callable')
2589
+ raise ArgumentError(action, msg % type_func)
2590
+
2591
+ # convert the value to the appropriate type
2592
+ try:
2593
+ result = type_func(arg_string)
2594
+
2595
+ # ArgumentTypeErrors indicate errors
2596
+ except ArgumentTypeError:
2597
+ name = getattr(action.type, '__name__', repr(action.type))
2598
+ msg = str(_sys.exc_info()[1])
2599
+ raise ArgumentError(action, msg)
2600
+
2601
+ # TypeErrors or ValueErrors also indicate errors
2602
+ except (TypeError, ValueError):
2603
+ name = getattr(action.type, '__name__', repr(action.type))
2604
+ msg = _('invalid %s value: %r')
2605
+ raise ArgumentError(action, msg % (name, arg_string))
2606
+
2607
+ # return the converted value
2608
+ return result
2609
+
2610
+ def _check_value(self, action, value):
2611
+ # converted value must be one of the choices (if specified)
2612
+ if action.choices is not None and value not in action.choices:
2613
+ tup = value, ', '.join(map(repr, action.choices))
2614
+ msg = _('invalid choice: %r (choose from %s)') % tup
2615
+ raise ArgumentError(action, msg)
2616
+
2617
+ # =======================
2618
+ # Help-formatting methods
2619
+ # =======================
2620
+ def format_usage(self):
2621
+ formatter = self._get_formatter()
2622
+ formatter.add_usage(self.usage, self._actions,
2623
+ self._mutually_exclusive_groups)
2624
+ return formatter.format_help()
2625
+
2626
+ def format_help(self):
2627
+ formatter = self._get_formatter()
2628
+
2629
+ # usage
2630
+ formatter.add_usage(self.usage, self._actions,
2631
+ self._mutually_exclusive_groups)
2632
+
2633
+ # description
2634
+ formatter.add_text(self.description)
2635
+
2636
+ # positionals, optionals and user-defined groups
2637
+ for action_group in self._action_groups:
2638
+ formatter.start_section(action_group.title)
2639
+ formatter.add_text(action_group.description)
2640
+ formatter.add_arguments(action_group._group_actions)
2641
+ formatter.end_section()
2642
+
2643
+ # epilog
2644
+ formatter.add_text(self.epilog)
2645
+
2646
+ # determine help from format above
2647
+ return formatter.format_help()
2648
+
2649
+ def format_version(self):
2650
+ import warnings
2651
+ warnings.warn(
2652
+ 'The format_version method is deprecated -- the "version" '
2653
+ 'argument to ArgumentParser is no longer supported.',
2654
+ DeprecationWarning)
2655
+ formatter = self._get_formatter()
2656
+ formatter.add_text(self.version)
2657
+ return formatter.format_help()
2658
+
2659
+ def _get_formatter(self):
2660
+ return self.formatter_class(prog=self.prog)
2661
+
2662
+ # =====================
2663
+ # Help-printing methods
2664
+ # =====================
2665
+ def print_usage(self, file=None):
2666
+ if file is None:
2667
+ file = _sys.stdout
2668
+ self._print_message(self.format_usage(), file)
2669
+
2670
+ def print_help(self, file=None):
2671
+ if file is None:
2672
+ file = _sys.stdout
2673
+ self._print_message(self.format_help(), file)
2674
+
2675
+ def print_version(self, file=None):
2676
+ import warnings
2677
+ warnings.warn(
2678
+ 'The print_version method is deprecated -- the "version" '
2679
+ 'argument to ArgumentParser is no longer supported.',
2680
+ DeprecationWarning)
2681
+ self._print_message(self.format_version(), file)
2682
+
2683
+ def _print_message(self, message, file=None):
2684
+ if message:
2685
+ if file is None:
2686
+ file = _sys.stderr
2687
+ file.write(message)
2688
+
2689
+ # ===============
2690
+ # Exiting methods
2691
+ # ===============
2692
+ def exit(self, status=0, message=None):
2693
+ if message:
2694
+ self._print_message(message, _sys.stderr)
2695
+ _sys.exit(status)
2696
+
2697
+ def error(self, message):
2698
+ """error(message: string)
2699
+
2700
+ Prints a usage message incorporating the message to stderr and
2701
+ exits.
2702
+
2703
+ If you override this in a subclass, it should not return -- it
2704
+ should either exit or raise an exception.
2705
+ """
2706
+ self.print_usage(_sys.stderr)
2707
+ self.exit(2, _('%s: error: %s\n') % (self.prog, message))
2708
--
2709
2.13.5
2710
2711
diff view generated by jsdifflib
Deleted patch
1
Add the scripts/ directory to sys.path so Python 2.6 will be able to
2
import argparse.
3
1
4
Cc: Fam Zheng <famz@redhat.com>
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
Acked-by: John Snow <jsnow@redhat.com>
7
Acked-by: Fam Zheng <famz@redhat.com>
8
Message-id: 20170825155732.15665-3-stefanha@redhat.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
tests/docker/docker.py | 4 +++-
12
1 file changed, 3 insertions(+), 1 deletion(-)
13
14
diff --git a/tests/docker/docker.py b/tests/docker/docker.py
15
index XXXXXXX..XXXXXXX 100755
16
--- a/tests/docker/docker.py
17
+++ b/tests/docker/docker.py
18
@@ -XXX,XX +XXX,XX @@
19
20
import os
21
import sys
22
+sys.path.append(os.path.join(os.path.dirname(__file__),
23
+ '..', '..', 'scripts'))
24
+import argparse
25
import subprocess
26
import json
27
import hashlib
28
import atexit
29
import uuid
30
-import argparse
31
import tempfile
32
import re
33
import signal
34
--
35
2.13.5
36
37
diff view generated by jsdifflib