@@ -34,6 +34,7 @@
#define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET
#define PCI_DEVICE_ID_IVSHMEM 0x1110
+#define IVSHMEM_MAX_PEERS G_MAXUINT16
#define IVSHMEM_IOEVENTFD 0
#define IVSHMEM_MSI 1
@@ -416,31 +417,28 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
/* this function increase the dynamic storage need to store data about other
* guests */
-static int increase_dynamic_storage(IVShmemState *s, int new_min_size)
+static int resize_peers(IVShmemState *s, int new_min_size)
{
- int j, old_nb_alloc;
+ int j, old_size;
- /* check for integer overflow */
- if (new_min_size >= INT_MAX / sizeof(Peer) - 1 || new_min_size <= 0) {
+ /* limit number of max peers */
+ if (new_min_size <= 0 || new_min_size > IVSHMEM_MAX_PEERS) {
return -1;
}
-
- old_nb_alloc = s->nb_peers;
-
- if (new_min_size >= s->nb_peers) {
- /* +1 because #new_min_size is used as last array index */
- s->nb_peers = new_min_size + 1;
- } else {
+ if (new_min_size <= s->nb_peers) {
return 0;
}
+ old_size = s->nb_peers;
+ s->nb_peers = new_min_size;
+
IVSHMEM_DPRINTF("bumping storage to %d guests\n", s->nb_peers);
+
s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer));
- /* zero out new pointers */
- for (j = old_nb_alloc; j < s->nb_peers; j++) {
- s->peers[j].eventfds = NULL;
+ for (j = old_size; j < s->nb_peers; j++) {
+ s->peers[j].eventfds = g_new(EventNotifier, s->vectors);
s->peers[j].nb_eventfds = 0;
}
@@ -507,8 +505,8 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
/* make sure we have enough space for this guest */
if (incoming_posn >= s->nb_peers) {
- if (increase_dynamic_storage(s, incoming_posn) < 0) {
- error_report("increase_dynamic_storage() failed");
+ if (resize_peers(s, incoming_posn + 1) < 0) {
+ error_report("failed to resize peers array");
if (incoming_fd != -1) {
close(incoming_fd);
}
@@ -518,8 +516,7 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
if (incoming_fd == -1) {
/* if posn is positive and unseen before then this is our posn*/
- if ((incoming_posn >= 0) &&
- (s->peers[incoming_posn].eventfds == NULL)) {
+ if (incoming_posn >= 0 && s->vm_id == -1) {
/* receive our posn */
s->vm_id = incoming_posn;
return;
@@ -570,11 +567,6 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
* guests for each VM */
guest_max_eventfd = s->peers[incoming_posn].nb_eventfds;
- if (guest_max_eventfd == 0) {
- /* one eventfd per MSI vector */
- s->peers[incoming_posn].eventfds = g_new(EventNotifier, s->vectors);
- }
-
/* this is an eventfd for a particular guest VM */
IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn,
guest_max_eventfd, incoming_fd);
@@ -811,12 +803,9 @@ static void pci_ivshmem_realize(PCIDevice *dev, Error **errp)
}
/* we allocate enough space for 16 guests and grow as needed */
- s->nb_peers = 16;
+ resize_peers(s, 16);
s->vm_id = -1;
- /* allocate/initialize space for interrupt handling */
- s->peers = g_malloc0(s->nb_peers * sizeof(Peer));
-
pci_register_bar(dev, 2, attr, &s->bar);
s->eventfd_chr = g_malloc0(s->vectors * sizeof(CharDriverState *));