Commit 869a5c6df19aad2685bd7bc61db89a9735474105
1 parent
ea8a5d7f
Stop VM on error in virtio-blk. (Gleb Natapov)
Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6410 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
2 changed files
with
125 additions
and
47 deletions
hw/virtio-blk.c
... | ... | @@ -11,6 +11,8 @@ |
11 | 11 | * |
12 | 12 | */ |
13 | 13 | |
14 | +#include <qemu-common.h> | |
15 | +#include <sysemu.h> | |
14 | 16 | #include "virtio-blk.h" |
15 | 17 | #include "block_int.h" |
16 | 18 | |
... | ... | @@ -19,6 +21,7 @@ typedef struct VirtIOBlock |
19 | 21 | VirtIODevice vdev; |
20 | 22 | BlockDriverState *bs; |
21 | 23 | VirtQueue *vq; |
24 | + void *rq; | |
22 | 25 | } VirtIOBlock; |
23 | 26 | |
24 | 27 | static VirtIOBlock *to_virtio_blk(VirtIODevice *vdev) |
... | ... | @@ -34,12 +37,44 @@ typedef struct VirtIOBlockReq |
34 | 37 | struct virtio_blk_outhdr *out; |
35 | 38 | size_t size; |
36 | 39 | uint8_t *buffer; |
40 | + struct VirtIOBlockReq *next; | |
37 | 41 | } VirtIOBlockReq; |
38 | 42 | |
43 | +static void virtio_blk_req_complete(VirtIOBlockReq *req, int status) | |
44 | +{ | |
45 | + VirtIOBlock *s = req->dev; | |
46 | + | |
47 | + req->in->status = status; | |
48 | + virtqueue_push(s->vq, &req->elem, req->size + sizeof(*req->in)); | |
49 | + virtio_notify(&s->vdev, s->vq); | |
50 | + | |
51 | + qemu_free(req->buffer); | |
52 | + qemu_free(req); | |
53 | +} | |
54 | + | |
55 | +static int virtio_blk_handle_write_error(VirtIOBlockReq *req, int error) | |
56 | +{ | |
57 | + BlockInterfaceErrorAction action = drive_get_onerror(req->dev->bs); | |
58 | + VirtIOBlock *s = req->dev; | |
59 | + | |
60 | + if (action == BLOCK_ERR_IGNORE) | |
61 | + return 0; | |
62 | + | |
63 | + if ((error == ENOSPC && action == BLOCK_ERR_STOP_ENOSPC) | |
64 | + || action == BLOCK_ERR_STOP_ANY) { | |
65 | + req->next = s->rq; | |
66 | + s->rq = req; | |
67 | + vm_stop(0); | |
68 | + } else { | |
69 | + virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); | |
70 | + } | |
71 | + | |
72 | + return 1; | |
73 | +} | |
74 | + | |
39 | 75 | static void virtio_blk_rw_complete(void *opaque, int ret) |
40 | 76 | { |
41 | 77 | VirtIOBlockReq *req = opaque; |
42 | - VirtIOBlock *s = req->dev; | |
43 | 78 | |
44 | 79 | /* Copy read data to the guest */ |
45 | 80 | if (!ret && !(req->out->type & VIRTIO_BLK_T_OUT)) { |
... | ... | @@ -58,33 +93,71 @@ static void virtio_blk_rw_complete(void *opaque, int ret) |
58 | 93 | len); |
59 | 94 | offset += len; |
60 | 95 | } |
96 | + } else if (ret && (req->out->type & VIRTIO_BLK_T_OUT)) { | |
97 | + if (virtio_blk_handle_write_error(req, -ret)) | |
98 | + return; | |
61 | 99 | } |
62 | 100 | |
63 | - req->in->status = ret ? VIRTIO_BLK_S_IOERR : VIRTIO_BLK_S_OK; | |
64 | - virtqueue_push(s->vq, &req->elem, req->size + sizeof(*req->in)); | |
65 | - virtio_notify(&s->vdev, s->vq); | |
101 | + virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | |
102 | +} | |
66 | 103 | |
67 | - qemu_free(req->buffer); | |
68 | - qemu_free(req); | |
104 | +static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s) | |
105 | +{ | |
106 | + VirtIOBlockReq *req = qemu_mallocz(sizeof(*req)); | |
107 | + if (req != NULL) | |
108 | + req->dev = s; | |
109 | + return req; | |
69 | 110 | } |
70 | 111 | |
71 | 112 | static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s) |
72 | 113 | { |
73 | - VirtIOBlockReq *req; | |
114 | + VirtIOBlockReq *req = virtio_blk_alloc_request(s); | |
74 | 115 | |
75 | - req = qemu_mallocz(sizeof(*req)); | |
76 | - if (req == NULL) | |
77 | - return NULL; | |
78 | - | |
79 | - req->dev = s; | |
80 | - if (!virtqueue_pop(s->vq, &req->elem)) { | |
81 | - qemu_free(req); | |
82 | - return NULL; | |
116 | + if (req != NULL) { | |
117 | + if (!virtqueue_pop(s->vq, &req->elem)) { | |
118 | + qemu_free(req); | |
119 | + return NULL; | |
120 | + } | |
83 | 121 | } |
84 | 122 | |
85 | 123 | return req; |
86 | 124 | } |
87 | 125 | |
126 | +static int virtio_blk_handle_write(VirtIOBlockReq *req) | |
127 | +{ | |
128 | + if (!req->buffer) { | |
129 | + size_t offset = 0; | |
130 | + int i; | |
131 | + | |
132 | + for (i = 1; i < req->elem.out_num; i++) | |
133 | + req->size += req->elem.out_sg[i].iov_len; | |
134 | + | |
135 | + req->buffer = qemu_memalign(512, req->size); | |
136 | + if (req->buffer == NULL) { | |
137 | + qemu_free(req); | |
138 | + return -1; | |
139 | + } | |
140 | + | |
141 | + /* We copy the data from the SG list to avoid splitting up the request. | |
142 | + This helps performance a lot until we can pass full sg lists as AIO | |
143 | + operations */ | |
144 | + for (i = 1; i < req->elem.out_num; i++) { | |
145 | + size_t len; | |
146 | + | |
147 | + len = MIN(req->elem.out_sg[i].iov_len, | |
148 | + req->size - offset); | |
149 | + memcpy(req->buffer + offset, | |
150 | + req->elem.out_sg[i].iov_base, | |
151 | + len); | |
152 | + offset += len; | |
153 | + } | |
154 | + } | |
155 | + | |
156 | + bdrv_aio_write(req->dev->bs, req->out->sector, req->buffer, req->size / 512, | |
157 | + virtio_blk_rw_complete, req); | |
158 | + return 0; | |
159 | +} | |
160 | + | |
88 | 161 | static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) |
89 | 162 | { |
90 | 163 | VirtIOBlock *s = to_virtio_blk(vdev); |
... | ... | @@ -115,36 +188,8 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) |
115 | 188 | virtio_notify(vdev, vq); |
116 | 189 | qemu_free(req); |
117 | 190 | } else if (req->out->type & VIRTIO_BLK_T_OUT) { |
118 | - size_t offset; | |
119 | - | |
120 | - for (i = 1; i < req->elem.out_num; i++) | |
121 | - req->size += req->elem.out_sg[i].iov_len; | |
122 | - | |
123 | - req->buffer = qemu_memalign(512, req->size); | |
124 | - if (req->buffer == NULL) { | |
125 | - qemu_free(req); | |
191 | + if (virtio_blk_handle_write(req) < 0) | |
126 | 192 | break; |
127 | - } | |
128 | - | |
129 | - /* We copy the data from the SG list to avoid splitting up the request. This helps | |
130 | - performance a lot until we can pass full sg lists as AIO operations */ | |
131 | - offset = 0; | |
132 | - for (i = 1; i < req->elem.out_num; i++) { | |
133 | - size_t len; | |
134 | - | |
135 | - len = MIN(req->elem.out_sg[i].iov_len, | |
136 | - req->size - offset); | |
137 | - memcpy(req->buffer + offset, | |
138 | - req->elem.out_sg[i].iov_base, | |
139 | - len); | |
140 | - offset += len; | |
141 | - } | |
142 | - | |
143 | - bdrv_aio_write(s->bs, req->out->sector, | |
144 | - req->buffer, | |
145 | - req->size / 512, | |
146 | - virtio_blk_rw_complete, | |
147 | - req); | |
148 | 193 | } else { |
149 | 194 | for (i = 0; i < req->elem.in_num - 1; i++) |
150 | 195 | req->size += req->elem.in_sg[i].iov_len; |
... | ... | @@ -169,6 +214,22 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) |
169 | 214 | */ |
170 | 215 | } |
171 | 216 | |
217 | +static void virtio_blk_dma_restart_cb(void *opaque, int running, int reason) | |
218 | +{ | |
219 | + VirtIOBlock *s = opaque; | |
220 | + VirtIOBlockReq *req = s->rq; | |
221 | + | |
222 | + if (!running) | |
223 | + return; | |
224 | + | |
225 | + s->rq = NULL; | |
226 | + | |
227 | + while (req) { | |
228 | + virtio_blk_handle_write(req); | |
229 | + req = req->next; | |
230 | + } | |
231 | +} | |
232 | + | |
172 | 233 | static void virtio_blk_reset(VirtIODevice *vdev) |
173 | 234 | { |
174 | 235 | /* |
... | ... | @@ -203,17 +264,32 @@ static uint32_t virtio_blk_get_features(VirtIODevice *vdev) |
203 | 264 | static void virtio_blk_save(QEMUFile *f, void *opaque) |
204 | 265 | { |
205 | 266 | VirtIOBlock *s = opaque; |
267 | + VirtIOBlockReq *req = s->rq; | |
268 | + | |
206 | 269 | virtio_save(&s->vdev, f); |
270 | + | |
271 | + while (req) { | |
272 | + qemu_put_sbyte(f, 1); | |
273 | + qemu_put_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); | |
274 | + req = req->next; | |
275 | + } | |
276 | + qemu_put_sbyte(f, 0); | |
207 | 277 | } |
208 | 278 | |
209 | 279 | static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id) |
210 | 280 | { |
211 | 281 | VirtIOBlock *s = opaque; |
212 | 282 | |
213 | - if (version_id != 1) | |
283 | + if (version_id != 2) | |
214 | 284 | return -EINVAL; |
215 | 285 | |
216 | 286 | virtio_load(&s->vdev, f); |
287 | + while (qemu_get_sbyte(f)) { | |
288 | + VirtIOBlockReq *req = virtio_blk_alloc_request(s); | |
289 | + qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); | |
290 | + req->next = s->rq; | |
291 | + s->rq = req->next; | |
292 | + } | |
217 | 293 | |
218 | 294 | return 0; |
219 | 295 | } |
... | ... | @@ -237,12 +313,14 @@ void *virtio_blk_init(PCIBus *bus, BlockDriverState *bs) |
237 | 313 | s->vdev.get_features = virtio_blk_get_features; |
238 | 314 | s->vdev.reset = virtio_blk_reset; |
239 | 315 | s->bs = bs; |
316 | + s->rq = NULL; | |
240 | 317 | bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs); |
241 | 318 | bdrv_set_geometry_hint(s->bs, cylinders, heads, secs); |
242 | 319 | |
243 | 320 | s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output); |
244 | 321 | |
245 | - register_savevm("virtio-blk", virtio_blk_id++, 1, | |
322 | + qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); | |
323 | + register_savevm("virtio-blk", virtio_blk_id++, 2, | |
246 | 324 | virtio_blk_save, virtio_blk_load, s); |
247 | 325 | |
248 | 326 | return s; | ... | ... |
vl.c
... | ... | @@ -2432,7 +2432,7 @@ static int drive_init(struct drive_opt *arg, int snapshot, |
2432 | 2432 | |
2433 | 2433 | onerror = BLOCK_ERR_REPORT; |
2434 | 2434 | if (get_param_value(buf, sizeof(serial), "werror", str)) { |
2435 | - if (type != IF_IDE && type != IF_SCSI) { | |
2435 | + if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO) { | |
2436 | 2436 | fprintf(stderr, "werror is no supported by this format\n"); |
2437 | 2437 | return -1; |
2438 | 2438 | } | ... | ... |