aboutsummaryrefslogtreecommitdiffstats
path: root/block/qcow2.c
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2009-10-08 15:02:08 +0200
committerAnthony Liguori <aliguori@us.ibm.com>2009-10-15 09:32:04 -0500
commitef845c3bf421290153154635dc18eaa677cecb43 (patch)
treeba19ca2a96584e32e0fa08bcbea93ea46232507e /block/qcow2.c
parentbe8b28a94020fd564c9bb9f0094e8c3ff45c0771 (diff)
qcow2: Bring synchronous read/write back to life
When the synchronous read and write functions were dropped, they were replaced by generic emulation functions. Unfortunately, these emulation functions don't provide the same semantics as the original functions did. The original bdrv_read would mean that we read some data synchronously and that we won't be interrupted during this read. The latter assumption is no longer true with the emulation function which needs to use qemu_aio_poll and therefore allows the callback of any other concurrent AIO request to be run during the read. Which in turn means that (meta)data read earlier could have changed and be invalid now. qcow2 is not prepared to work in this way and it's just scary how many places there are where other requests could run. I'm not sure yet where exactly it breaks, but you'll see breakage with virtio on qcow2 with a backing file. Providing synchronous functions again fixes the problem for me. Patchworks-ID: 35437 Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'block/qcow2.c')
-rw-r--r--block/qcow2.c51
1 files changed, 49 insertions, 2 deletions
diff --git a/block/qcow2.c b/block/qcow2.c
index a9e768268..52584ed23 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -934,6 +934,51 @@ static int qcow_make_empty(BlockDriverState *bs)
return 0;
}
+static int qcow2_write(BlockDriverState *bs, int64_t sector_num,
+ const uint8_t *buf, int nb_sectors)
+{
+ BDRVQcowState *s = bs->opaque;
+ int ret, index_in_cluster, n;
+ uint64_t cluster_offset;
+ int n_end;
+ QCowL2Meta l2meta;
+
+ while (nb_sectors > 0) {
+ memset(&l2meta, 0, sizeof(l2meta));
+
+ index_in_cluster = sector_num & (s->cluster_sectors - 1);
+ n_end = index_in_cluster + nb_sectors;
+ if (s->crypt_method &&
+ n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
+ n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
+ cluster_offset = qcow2_alloc_cluster_offset(bs, sector_num << 9,
+ index_in_cluster,
+ n_end, &n, &l2meta);
+ if (!cluster_offset)
+ return -1;
+ if (s->crypt_method) {
+ qcow2_encrypt_sectors(s, sector_num, s->cluster_data, buf, n, 1,
+ &s->aes_encrypt_key);
+ ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512,
+ s->cluster_data, n * 512);
+ } else {
+ ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
+ }
+ if (ret != n * 512 || qcow2_alloc_cluster_link_l2(bs, cluster_offset, &l2meta) < 0) {
+ qcow2_free_any_clusters(bs, cluster_offset, l2meta.nb_clusters);
+ return -1;
+ }
+ nb_sectors -= n;
+ sector_num += n;
+ buf += n * 512;
+ if (l2meta.nb_clusters != 0) {
+ QLIST_REMOVE(&l2meta, next_in_flight);
+ }
+ }
+ s->cluster_cache_offset = -1; /* disable compressed cache */
+ return 0;
+}
+
/* XXX: put compressed sectors first, then all the cluster aligned
tables to avoid losing bytes in alignment */
static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
@@ -1121,8 +1166,10 @@ static BlockDriver bdrv_qcow2 = {
.bdrv_set_key = qcow_set_key,
.bdrv_make_empty = qcow_make_empty,
- .bdrv_aio_readv = qcow_aio_readv,
- .bdrv_aio_writev = qcow_aio_writev,
+ .bdrv_read = qcow2_read,
+ .bdrv_write = qcow2_write,
+ .bdrv_aio_readv = qcow_aio_readv,
+ .bdrv_aio_writev = qcow_aio_writev,
.bdrv_write_compressed = qcow_write_compressed,
.bdrv_snapshot_create = qcow2_snapshot_create,