Commit 26b258e13860a885b337cf7564162b93894863e4
1 parent
d28a1b6e
Use the DMA api to map virtio elements.
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6904 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
17 additions
and
82 deletions
hw/virtio.c
| @@ -16,8 +16,6 @@ | @@ -16,8 +16,6 @@ | ||
| 16 | #include "virtio.h" | 16 | #include "virtio.h" |
| 17 | #include "sysemu.h" | 17 | #include "sysemu.h" |
| 18 | 18 | ||
| 19 | -//#define VIRTIO_ZERO_COPY | ||
| 20 | - | ||
| 21 | /* from Linux's linux/virtio_pci.h */ | 19 | /* from Linux's linux/virtio_pci.h */ |
| 22 | 20 | ||
| 23 | /* A 32-bit r/o bitmask of the features supported by the host */ | 21 | /* A 32-bit r/o bitmask of the features supported by the host */ |
| @@ -113,43 +111,6 @@ struct VirtQueue | @@ -113,43 +111,6 @@ struct VirtQueue | ||
| 113 | #define VIRTIO_PCI_QUEUE_MAX 16 | 111 | #define VIRTIO_PCI_QUEUE_MAX 16 |
| 114 | 112 | ||
| 115 | /* virt queue functions */ | 113 | /* virt queue functions */ |
| 116 | -#ifdef VIRTIO_ZERO_COPY | ||
| 117 | -static void *virtio_map_gpa(target_phys_addr_t addr, size_t size) | ||
| 118 | -{ | ||
| 119 | - ram_addr_t off; | ||
| 120 | - target_phys_addr_t addr1; | ||
| 121 | - | ||
| 122 | - off = cpu_get_physical_page_desc(addr); | ||
| 123 | - if ((off & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | ||
| 124 | - fprintf(stderr, "virtio DMA to IO ram\n"); | ||
| 125 | - exit(1); | ||
| 126 | - } | ||
| 127 | - | ||
| 128 | - off = (off & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK); | ||
| 129 | - | ||
| 130 | - for (addr1 = addr + TARGET_PAGE_SIZE; | ||
| 131 | - addr1 < TARGET_PAGE_ALIGN(addr + size); | ||
| 132 | - addr1 += TARGET_PAGE_SIZE) { | ||
| 133 | - ram_addr_t off1; | ||
| 134 | - | ||
| 135 | - off1 = cpu_get_physical_page_desc(addr1); | ||
| 136 | - if ((off1 & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | ||
| 137 | - fprintf(stderr, "virtio DMA to IO ram\n"); | ||
| 138 | - exit(1); | ||
| 139 | - } | ||
| 140 | - | ||
| 141 | - off1 = (off1 & TARGET_PAGE_MASK) | (addr1 & ~TARGET_PAGE_MASK); | ||
| 142 | - | ||
| 143 | - if (off1 != (off + (addr1 - addr))) { | ||
| 144 | - fprintf(stderr, "discontigous virtio memory\n"); | ||
| 145 | - exit(1); | ||
| 146 | - } | ||
| 147 | - } | ||
| 148 | - | ||
| 149 | - return phys_ram_base + off; | ||
| 150 | -} | ||
| 151 | -#endif | ||
| 152 | - | ||
| 153 | static void virtqueue_init(VirtQueue *vq, target_phys_addr_t pa) | 114 | static void virtqueue_init(VirtQueue *vq, target_phys_addr_t pa) |
| 154 | { | 115 | { |
| 155 | vq->vring.desc = pa; | 116 | vq->vring.desc = pa; |
| @@ -274,35 +235,22 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, | @@ -274,35 +235,22 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, | ||
| 274 | unsigned int offset; | 235 | unsigned int offset; |
| 275 | int i; | 236 | int i; |
| 276 | 237 | ||
| 277 | -#ifndef VIRTIO_ZERO_COPY | ||
| 278 | - for (i = 0; i < elem->out_num; i++) | ||
| 279 | - qemu_free(elem->out_sg[i].iov_base); | ||
| 280 | -#endif | ||
| 281 | - | ||
| 282 | offset = 0; | 238 | offset = 0; |
| 283 | for (i = 0; i < elem->in_num; i++) { | 239 | for (i = 0; i < elem->in_num; i++) { |
| 284 | size_t size = MIN(len - offset, elem->in_sg[i].iov_len); | 240 | size_t size = MIN(len - offset, elem->in_sg[i].iov_len); |
| 285 | 241 | ||
| 286 | -#ifdef VIRTIO_ZERO_COPY | ||
| 287 | - if (size) { | ||
| 288 | - ram_addr_t addr = (uint8_t *)elem->in_sg[i].iov_base - phys_ram_base; | ||
| 289 | - ram_addr_t off; | 242 | + cpu_physical_memory_unmap(elem->in_sg[i].iov_base, |
| 243 | + elem->in_sg[i].iov_len, | ||
| 244 | + 1, size); | ||
| 290 | 245 | ||
| 291 | - for (off = 0; off < size; off += TARGET_PAGE_SIZE) | ||
| 292 | - cpu_physical_memory_set_dirty(addr + off); | ||
| 293 | - } | ||
| 294 | -#else | ||
| 295 | - if (size) | ||
| 296 | - cpu_physical_memory_write(elem->in_addr[i], | ||
| 297 | - elem->in_sg[i].iov_base, | ||
| 298 | - size); | ||
| 299 | - | ||
| 300 | - qemu_free(elem->in_sg[i].iov_base); | ||
| 301 | -#endif | ||
| 302 | - | ||
| 303 | - offset += size; | 246 | + offset += elem->in_sg[i].iov_len; |
| 304 | } | 247 | } |
| 305 | 248 | ||
| 249 | + for (i = 0; i < elem->out_num; i++) | ||
| 250 | + cpu_physical_memory_unmap(elem->out_sg[i].iov_base, | ||
| 251 | + elem->out_sg[i].iov_len, | ||
| 252 | + 0, elem->out_sg[i].iov_len); | ||
| 253 | + | ||
| 306 | idx = (idx + vring_used_idx(vq)) % vq->vring.num; | 254 | idx = (idx + vring_used_idx(vq)) % vq->vring.num; |
| 307 | 255 | ||
| 308 | /* Get a pointer to the next entry in the used ring. */ | 256 | /* Get a pointer to the next entry in the used ring. */ |
| @@ -414,6 +362,7 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes) | @@ -414,6 +362,7 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes) | ||
| 414 | int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) | 362 | int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) |
| 415 | { | 363 | { |
| 416 | unsigned int i, head; | 364 | unsigned int i, head; |
| 365 | + target_phys_addr_t len; | ||
| 417 | 366 | ||
| 418 | if (!virtqueue_num_heads(vq, vq->last_avail_idx)) | 367 | if (!virtqueue_num_heads(vq, vq->last_avail_idx)) |
| 419 | return 0; | 368 | return 0; |
| @@ -424,37 +373,23 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) | @@ -424,37 +373,23 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) | ||
| 424 | i = head = virtqueue_get_head(vq, vq->last_avail_idx++); | 373 | i = head = virtqueue_get_head(vq, vq->last_avail_idx++); |
| 425 | do { | 374 | do { |
| 426 | struct iovec *sg; | 375 | struct iovec *sg; |
| 376 | + int is_write = 0; | ||
| 427 | 377 | ||
| 428 | if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) { | 378 | if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) { |
| 429 | elem->in_addr[elem->in_num] = vring_desc_addr(vq, i); | 379 | elem->in_addr[elem->in_num] = vring_desc_addr(vq, i); |
| 430 | sg = &elem->in_sg[elem->in_num++]; | 380 | sg = &elem->in_sg[elem->in_num++]; |
| 381 | + is_write = 1; | ||
| 431 | } else | 382 | } else |
| 432 | sg = &elem->out_sg[elem->out_num++]; | 383 | sg = &elem->out_sg[elem->out_num++]; |
| 433 | 384 | ||
| 434 | /* Grab the first descriptor, and check it's OK. */ | 385 | /* Grab the first descriptor, and check it's OK. */ |
| 435 | sg->iov_len = vring_desc_len(vq, i); | 386 | sg->iov_len = vring_desc_len(vq, i); |
| 387 | + len = sg->iov_len; | ||
| 436 | 388 | ||
| 437 | -#ifdef VIRTIO_ZERO_COPY | ||
| 438 | - sg->iov_base = virtio_map_gpa(vring_desc_addr(vq, i), sg->iov_len); | ||
| 439 | -#else | ||
| 440 | - /* cap individual scatter element size to prevent unbounded allocations | ||
| 441 | - of memory from the guest. Practically speaking, no virtio driver | ||
| 442 | - will ever pass more than a page in each element. We set the cap to | ||
| 443 | - be 2MB in case for some reason a large page makes it way into the | ||
| 444 | - sg list. When we implement a zero copy API, this limitation will | ||
| 445 | - disappear */ | ||
| 446 | - if (sg->iov_len > (2 << 20)) | ||
| 447 | - sg->iov_len = 2 << 20; | ||
| 448 | - | ||
| 449 | - sg->iov_base = qemu_malloc(sg->iov_len); | ||
| 450 | - if (!(vring_desc_flags(vq, i) & VRING_DESC_F_WRITE)) { | ||
| 451 | - cpu_physical_memory_read(vring_desc_addr(vq, i), | ||
| 452 | - sg->iov_base, | ||
| 453 | - sg->iov_len); | ||
| 454 | - } | ||
| 455 | -#endif | ||
| 456 | - if (sg->iov_base == NULL) { | ||
| 457 | - fprintf(stderr, "Invalid mapping\n"); | 389 | + sg->iov_base = cpu_physical_memory_map(vring_desc_addr(vq, i), &len, is_write); |
| 390 | + | ||
| 391 | + if (sg->iov_base == NULL || len != sg->iov_len) { | ||
| 392 | + fprintf(stderr, "virtio: trying to map MMIO memory\n"); | ||
| 458 | exit(1); | 393 | exit(1); |
| 459 | } | 394 | } |
| 460 | 395 |