Commit 26b258e13860a885b337cf7564162b93894863e4

Authored by aliguori
1 parent d28a1b6e

Use the DMA api to map virtio elements.

Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6904 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 1 changed file with 17 additions and 82 deletions
hw/virtio.c
... ... @@ -16,8 +16,6 @@
16 16 #include "virtio.h"
17 17 #include "sysemu.h"
18 18  
19   -//#define VIRTIO_ZERO_COPY
20   -
21 19 /* from Linux's linux/virtio_pci.h */
22 20  
23 21 /* A 32-bit r/o bitmask of the features supported by the host */
... ... @@ -113,43 +111,6 @@ struct VirtQueue
113 111 #define VIRTIO_PCI_QUEUE_MAX 16
114 112  
115 113 /* virt queue functions */
116   -#ifdef VIRTIO_ZERO_COPY
117   -static void *virtio_map_gpa(target_phys_addr_t addr, size_t size)
118   -{
119   - ram_addr_t off;
120   - target_phys_addr_t addr1;
121   -
122   - off = cpu_get_physical_page_desc(addr);
123   - if ((off & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
124   - fprintf(stderr, "virtio DMA to IO ram\n");
125   - exit(1);
126   - }
127   -
128   - off = (off & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK);
129   -
130   - for (addr1 = addr + TARGET_PAGE_SIZE;
131   - addr1 < TARGET_PAGE_ALIGN(addr + size);
132   - addr1 += TARGET_PAGE_SIZE) {
133   - ram_addr_t off1;
134   -
135   - off1 = cpu_get_physical_page_desc(addr1);
136   - if ((off1 & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
137   - fprintf(stderr, "virtio DMA to IO ram\n");
138   - exit(1);
139   - }
140   -
141   - off1 = (off1 & TARGET_PAGE_MASK) | (addr1 & ~TARGET_PAGE_MASK);
142   -
143   - if (off1 != (off + (addr1 - addr))) {
144   - fprintf(stderr, "discontigous virtio memory\n");
145   - exit(1);
146   - }
147   - }
148   -
149   - return phys_ram_base + off;
150   -}
151   -#endif
152   -
153 114 static void virtqueue_init(VirtQueue *vq, target_phys_addr_t pa)
154 115 {
155 116 vq->vring.desc = pa;
... ... @@ -274,35 +235,22 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
274 235 unsigned int offset;
275 236 int i;
276 237  
277   -#ifndef VIRTIO_ZERO_COPY
278   - for (i = 0; i < elem->out_num; i++)
279   - qemu_free(elem->out_sg[i].iov_base);
280   -#endif
281   -
282 238 offset = 0;
283 239 for (i = 0; i < elem->in_num; i++) {
284 240 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
285 241  
286   -#ifdef VIRTIO_ZERO_COPY
287   - if (size) {
288   - ram_addr_t addr = (uint8_t *)elem->in_sg[i].iov_base - phys_ram_base;
289   - ram_addr_t off;
  242 + cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
  243 + elem->in_sg[i].iov_len,
  244 + 1, size);
290 245  
291   - for (off = 0; off < size; off += TARGET_PAGE_SIZE)
292   - cpu_physical_memory_set_dirty(addr + off);
293   - }
294   -#else
295   - if (size)
296   - cpu_physical_memory_write(elem->in_addr[i],
297   - elem->in_sg[i].iov_base,
298   - size);
299   -
300   - qemu_free(elem->in_sg[i].iov_base);
301   -#endif
302   -
303   - offset += size;
  246 + offset += elem->in_sg[i].iov_len;
304 247 }
305 248  
  249 + for (i = 0; i < elem->out_num; i++)
  250 + cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
  251 + elem->out_sg[i].iov_len,
  252 + 0, elem->out_sg[i].iov_len);
  253 +
306 254 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
307 255  
308 256 /* Get a pointer to the next entry in the used ring. */
... ... @@ -414,6 +362,7 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
414 362 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
415 363 {
416 364 unsigned int i, head;
  365 + target_phys_addr_t len;
417 366  
418 367 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
419 368 return 0;
... ... @@ -424,37 +373,23 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
424 373 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
425 374 do {
426 375 struct iovec *sg;
  376 + int is_write = 0;
427 377  
428 378 if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) {
429 379 elem->in_addr[elem->in_num] = vring_desc_addr(vq, i);
430 380 sg = &elem->in_sg[elem->in_num++];
  381 + is_write = 1;
431 382 } else
432 383 sg = &elem->out_sg[elem->out_num++];
433 384  
434 385 /* Grab the first descriptor, and check it's OK. */
435 386 sg->iov_len = vring_desc_len(vq, i);
  387 + len = sg->iov_len;
436 388  
437   -#ifdef VIRTIO_ZERO_COPY
438   - sg->iov_base = virtio_map_gpa(vring_desc_addr(vq, i), sg->iov_len);
439   -#else
440   - /* cap individual scatter element size to prevent unbounded allocations
441   - of memory from the guest. Practically speaking, no virtio driver
442   - will ever pass more than a page in each element. We set the cap to
443   - be 2MB in case for some reason a large page makes it way into the
444   - sg list. When we implement a zero copy API, this limitation will
445   - disappear */
446   - if (sg->iov_len > (2 << 20))
447   - sg->iov_len = 2 << 20;
448   -
449   - sg->iov_base = qemu_malloc(sg->iov_len);
450   - if (!(vring_desc_flags(vq, i) & VRING_DESC_F_WRITE)) {
451   - cpu_physical_memory_read(vring_desc_addr(vq, i),
452   - sg->iov_base,
453   - sg->iov_len);
454   - }
455   -#endif
456   - if (sg->iov_base == NULL) {
457   - fprintf(stderr, "Invalid mapping\n");
  389 + sg->iov_base = cpu_physical_memory_map(vring_desc_addr(vq, i), &len, is_write);
  390 +
  391 + if (sg->iov_base == NULL || len != sg->iov_len) {
  392 + fprintf(stderr, "virtio: trying to map MMIO memory\n");
458 393 exit(1);
459 394 }
460 395  
... ...