diff mbox

[v4,1/3] memory: introduce IOMMUNotifier and its caps

Message ID 1473389864-19694-2-git-send-email-peterx@redhat.com
State New
Headers show

Commit Message

Peter Xu Sept. 9, 2016, 2:57 a.m. UTC
IOMMU Notifier list is used for notifying IO address mapping changes.
Currently VFIO is the only user.

However it is possible that future consumer like vhost would like to
only listen to part of its notifications (e.g., cache invalidations).

This patch introduced IOMMUNotifier and IOMMUNotfierFlag bits for a
finer grained control of it.

IOMMUNotifier contains a bitfield for the notify consumer describing
what kind of notification it is interested in. Currently two kinds of
notifications are defined:

- IOMMU_NOTIFIER_MAP:    for newly mapped entries (additions)
- IOMMU_NOTIFIER_UNMAP:  for entries to be removed (cache invalidates)

When registering the IOMMU notifier, we need to specify one or multiple
types of messages to listen to.

When notifications are triggered, its type will be checked against the
notifier's type bits, and only notifiers with registered bits will be
notified.

Signed-off-by: Peter Xu <peterx@redhat.com>
---
 hw/vfio/common.c              |  3 ++-
 include/exec/memory.h         | 38 +++++++++++++++++++++++++++++++-------
 include/hw/vfio/vfio-common.h |  2 +-
 memory.c                      | 37 ++++++++++++++++++++++++++++---------
 4 files changed, 62 insertions(+), 18 deletions(-)

Comments

David Gibson Sept. 14, 2016, 5:48 a.m. UTC | #1
On Fri, Sep 09, 2016 at 10:57:42AM +0800, Peter Xu wrote:
> IOMMU Notifier list is used for notifying IO address mapping changes.
> Currently VFIO is the only user.
> 
> However it is possible that future consumer like vhost would like to
> only listen to part of its notifications (e.g., cache invalidations).
> 
> This patch introduced IOMMUNotifier and IOMMUNotfierFlag bits for a
> finer grained control of it.
> 
> IOMMUNotifier contains a bitfield for the notify consumer describing
> what kind of notification it is interested in. Currently two kinds of
> notifications are defined:
> 
> - IOMMU_NOTIFIER_MAP:    for newly mapped entries (additions)
> - IOMMU_NOTIFIER_UNMAP:  for entries to be removed (cache invalidates)
> 
> When registering the IOMMU notifier, we need to specify one or multiple
> types of messages to listen to.
> 
> When notifications are triggered, its type will be checked against the
> notifier's type bits, and only notifiers with registered bits will be
> notified.
> 
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  hw/vfio/common.c              |  3 ++-
>  include/exec/memory.h         | 38 +++++++++++++++++++++++++++++++-------
>  include/hw/vfio/vfio-common.h |  2 +-
>  memory.c                      | 37 ++++++++++++++++++++++++++++---------
>  4 files changed, 62 insertions(+), 18 deletions(-)
> 
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index b313e7c..41b6a13 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -293,7 +293,7 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
>             section->offset_within_address_space & (1ULL << 63);
>  }
>  
> -static void vfio_iommu_map_notify(Notifier *n, void *data)
> +static void vfio_iommu_map_notify(IOMMUNotifier *n, void *data)
>  {
>      VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
>      VFIOContainer *container = giommu->container;
> @@ -454,6 +454,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
>                                 section->offset_within_region;
>          giommu->container = container;
>          giommu->n.notify = vfio_iommu_map_notify;
> +        giommu->n.notifier_flags = IOMMU_NOTIFIER_ALL;
>          QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
>  
>          memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
> diff --git a/include/exec/memory.h b/include/exec/memory.h
> index 3e4d416..e69e984 100644
> --- a/include/exec/memory.h
> +++ b/include/exec/memory.h
> @@ -67,6 +67,27 @@ struct IOMMUTLBEntry {
>      IOMMUAccessFlags perm;
>  };
>  
> +/*
> + * Bitmap for differnet IOMMUNotifier capabilities. Each notifier can
> + * register with one or multiple IOMMU Notifier capability bit(s).
> + */
> +typedef enum {
> +    IOMMU_NOTIFIER_NONE = 0,
> +    /* Notify cache invalidations */
> +    IOMMU_NOTIFIER_UNMAP = 0x1,
> +    /* Notify entry changes (newly created entries) */
> +    IOMMU_NOTIFIER_MAP = 0x2,
> +} IOMMUNotifierFlag;
> +
> +#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
> +
> +struct IOMMUNotifier {
> +    void (*notify)(struct IOMMUNotifier *notifier, void *data);
> +    IOMMUNotifierFlag notifier_flags;
> +    QLIST_ENTRY(IOMMUNotifier) node;
> +};
> +typedef struct IOMMUNotifier IOMMUNotifier;
> +
>  /* New-style MMIO accessors can indicate that the transaction failed.
>   * A zero (MEMTX_OK) response means success; anything else is a failure
>   * of some kind. The memory subsystem will bitwise-OR together results
> @@ -201,7 +222,7 @@ struct MemoryRegion {
>      const char *name;
>      unsigned ioeventfd_nb;
>      MemoryRegionIoeventfd *ioeventfds;
> -    NotifierList iommu_notify;
> +    QLIST_HEAD(, IOMMUNotifier) iommu_notify;
>  };
>  
>  /**
> @@ -620,11 +641,12 @@ void memory_region_notify_iommu(MemoryRegion *mr,
>   * IOMMU translation entries.
>   *
>   * @mr: the memory region to observe
> - * @n: the notifier to be added; the notifier receives a pointer to an
> - *     #IOMMUTLBEntry as the opaque value; the pointer ceases to be
> - *     valid on exit from the notifier.
> + * @n: the IOMMUNotifier to be added; the notify callback receives a
> + *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
> + *     ceases to be valid on exit from the notifier.
>   */
> -void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
> +void memory_region_register_iommu_notifier(MemoryRegion *mr,
> +                                           IOMMUNotifier *n);
>  
>  /**
>   * memory_region_iommu_replay: replay existing IOMMU translations to
> @@ -636,7 +658,8 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
>   * @is_write: Whether to treat the replay as a translate "write"
>   *     through the iommu
>   */
> -void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write);
> +void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
> +                                bool is_write);
>  
>  /**
>   * memory_region_unregister_iommu_notifier: unregister a notifier for
> @@ -646,7 +669,8 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write);
>   *      needs to be called
>   * @n: the notifier to be removed.
>   */
> -void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n);
> +void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
> +                                             IOMMUNotifier *n);
>  
>  /**
>   * memory_region_name: get a memory region's name
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index 94dfae3..c17602e 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -93,7 +93,7 @@ typedef struct VFIOGuestIOMMU {
>      VFIOContainer *container;
>      MemoryRegion *iommu;
>      hwaddr iommu_offset;
> -    Notifier n;
> +    IOMMUNotifier n;
>      QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
>  } VFIOGuestIOMMU;
>  
> diff --git a/memory.c b/memory.c
> index 0eb6895..f65c600 100644
> --- a/memory.c
> +++ b/memory.c
> @@ -1418,7 +1418,7 @@ void memory_region_init_iommu(MemoryRegion *mr,
>      memory_region_init(mr, owner, name, size);
>      mr->iommu_ops = ops,
>      mr->terminates = true;  /* then re-forwards */
> -    notifier_list_init(&mr->iommu_notify);
> +    QLIST_INIT(&mr->iommu_notify);
>  }
>  
>  static void memory_region_finalize(Object *obj)
> @@ -1513,13 +1513,16 @@ bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
>      return memory_region_get_dirty_log_mask(mr) & (1 << client);
>  }
>  
> -void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
> +void memory_region_register_iommu_notifier(MemoryRegion *mr,
> +                                           IOMMUNotifier *n)
>  {
> +    /* We need to register for at least one bitfield */
> +    assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
>      if (mr->iommu_ops->notify_started &&
> -        QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
> +        QLIST_EMPTY(&mr->iommu_notify)) {
>          mr->iommu_ops->notify_started(mr);
>      }
> -    notifier_list_add(&mr->iommu_notify, n);
> +    QLIST_INSERT_HEAD(&mr->iommu_notify, n, node);
>  }
>  
>  uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
> @@ -1531,7 +1534,8 @@ uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
>      return TARGET_PAGE_SIZE;
>  }
>  
> -void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
> +void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
> +                                bool is_write)
>  {
>      hwaddr addr, granularity;
>      IOMMUTLBEntry iotlb;
> @@ -1552,11 +1556,12 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
>      }
>  }
>  
> -void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
> +void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
> +                                             IOMMUNotifier *n)
>  {
> -    notifier_remove(n);
> +    QLIST_REMOVE(n, node);
>      if (mr->iommu_ops->notify_stopped &&
> -        QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
> +        QLIST_EMPTY(&mr->iommu_notify)) {
>          mr->iommu_ops->notify_stopped(mr);
>      }
>  }
> @@ -1564,8 +1569,22 @@ void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
>  void memory_region_notify_iommu(MemoryRegion *mr,
>                                  IOMMUTLBEntry entry)
>  {
> +    IOMMUNotifier *iommu_notifier;
> +    IOMMUNotifierFlag request_flags;
> +
>      assert(memory_region_is_iommu(mr));
> -    notifier_list_notify(&mr->iommu_notify, &entry);
> +
> +    if (entry.perm & IOMMU_RW) {
> +        request_flags = IOMMU_NOTIFIER_MAP;
> +    } else {
> +        request_flags = IOMMU_NOTIFIER_UNMAP;
> +    }

This is still wrong.  UNMAP depends on the *previous* state of the
mapping, not the new state.

> +
> +    QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
> +        if (iommu_notifier->notifier_flags & request_flags) {
> +            iommu_notifier->notify(iommu_notifier, &entry);
> +        }
> +    }
>  }
>  
>  void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
David Gibson Sept. 14, 2016, 7:15 a.m. UTC | #2
On Wed, Sep 14, 2016 at 03:48:32PM +1000, David Gibson wrote:
> On Fri, Sep 09, 2016 at 10:57:42AM +0800, Peter Xu wrote:
> > IOMMU Notifier list is used for notifying IO address mapping changes.
> > Currently VFIO is the only user.
> > 
> > However it is possible that future consumer like vhost would like to
> > only listen to part of its notifications (e.g., cache invalidations).
> > 
> > This patch introduced IOMMUNotifier and IOMMUNotfierFlag bits for a
> > finer grained control of it.
> > 
> > IOMMUNotifier contains a bitfield for the notify consumer describing
> > what kind of notification it is interested in. Currently two kinds of
> > notifications are defined:
> > 
> > - IOMMU_NOTIFIER_MAP:    for newly mapped entries (additions)
> > - IOMMU_NOTIFIER_UNMAP:  for entries to be removed (cache invalidates)
> > 
> > When registering the IOMMU notifier, we need to specify one or multiple
> > types of messages to listen to.
> > 
> > When notifications are triggered, its type will be checked against the
> > notifier's type bits, and only notifiers with registered bits will be
> > notified.
> > 
> > Signed-off-by: Peter Xu <peterx@redhat.com>
> > ---
> >  hw/vfio/common.c              |  3 ++-
> >  include/exec/memory.h         | 38 +++++++++++++++++++++++++++++++-------
> >  include/hw/vfio/vfio-common.h |  2 +-
> >  memory.c                      | 37 ++++++++++++++++++++++++++++---------
> >  4 files changed, 62 insertions(+), 18 deletions(-)
> > 
> > diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> > index b313e7c..41b6a13 100644
> > --- a/hw/vfio/common.c
> > +++ b/hw/vfio/common.c
> > @@ -293,7 +293,7 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
> >             section->offset_within_address_space & (1ULL << 63);
> >  }
> >  
> > -static void vfio_iommu_map_notify(Notifier *n, void *data)
> > +static void vfio_iommu_map_notify(IOMMUNotifier *n, void *data)
> >  {
> >      VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
> >      VFIOContainer *container = giommu->container;
> > @@ -454,6 +454,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
> >                                 section->offset_within_region;
> >          giommu->container = container;
> >          giommu->n.notify = vfio_iommu_map_notify;
> > +        giommu->n.notifier_flags = IOMMU_NOTIFIER_ALL;
> >          QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
> >  
> >          memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
> > diff --git a/include/exec/memory.h b/include/exec/memory.h
> > index 3e4d416..e69e984 100644
> > --- a/include/exec/memory.h
> > +++ b/include/exec/memory.h
> > @@ -67,6 +67,27 @@ struct IOMMUTLBEntry {
> >      IOMMUAccessFlags perm;
> >  };
> >  
> > +/*
> > + * Bitmap for differnet IOMMUNotifier capabilities. Each notifier can
> > + * register with one or multiple IOMMU Notifier capability bit(s).
> > + */
> > +typedef enum {
> > +    IOMMU_NOTIFIER_NONE = 0,
> > +    /* Notify cache invalidations */
> > +    IOMMU_NOTIFIER_UNMAP = 0x1,
> > +    /* Notify entry changes (newly created entries) */
> > +    IOMMU_NOTIFIER_MAP = 0x2,
> > +} IOMMUNotifierFlag;
> > +
> > +#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
> > +
> > +struct IOMMUNotifier {
> > +    void (*notify)(struct IOMMUNotifier *notifier, void *data);
> > +    IOMMUNotifierFlag notifier_flags;
> > +    QLIST_ENTRY(IOMMUNotifier) node;
> > +};
> > +typedef struct IOMMUNotifier IOMMUNotifier;
> > +
> >  /* New-style MMIO accessors can indicate that the transaction failed.
> >   * A zero (MEMTX_OK) response means success; anything else is a failure
> >   * of some kind. The memory subsystem will bitwise-OR together results
> > @@ -201,7 +222,7 @@ struct MemoryRegion {
> >      const char *name;
> >      unsigned ioeventfd_nb;
> >      MemoryRegionIoeventfd *ioeventfds;
> > -    NotifierList iommu_notify;
> > +    QLIST_HEAD(, IOMMUNotifier) iommu_notify;
> >  };
> >  
> >  /**
> > @@ -620,11 +641,12 @@ void memory_region_notify_iommu(MemoryRegion *mr,
> >   * IOMMU translation entries.
> >   *
> >   * @mr: the memory region to observe
> > - * @n: the notifier to be added; the notifier receives a pointer to an
> > - *     #IOMMUTLBEntry as the opaque value; the pointer ceases to be
> > - *     valid on exit from the notifier.
> > + * @n: the IOMMUNotifier to be added; the notify callback receives a
> > + *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
> > + *     ceases to be valid on exit from the notifier.
> >   */
> > -void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
> > +void memory_region_register_iommu_notifier(MemoryRegion *mr,
> > +                                           IOMMUNotifier *n);
> >  
> >  /**
> >   * memory_region_iommu_replay: replay existing IOMMU translations to
> > @@ -636,7 +658,8 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
> >   * @is_write: Whether to treat the replay as a translate "write"
> >   *     through the iommu
> >   */
> > -void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write);
> > +void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
> > +                                bool is_write);
> >  
> >  /**
> >   * memory_region_unregister_iommu_notifier: unregister a notifier for
> > @@ -646,7 +669,8 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write);
> >   *      needs to be called
> >   * @n: the notifier to be removed.
> >   */
> > -void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n);
> > +void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
> > +                                             IOMMUNotifier *n);
> >  
> >  /**
> >   * memory_region_name: get a memory region's name
> > diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> > index 94dfae3..c17602e 100644
> > --- a/include/hw/vfio/vfio-common.h
> > +++ b/include/hw/vfio/vfio-common.h
> > @@ -93,7 +93,7 @@ typedef struct VFIOGuestIOMMU {
> >      VFIOContainer *container;
> >      MemoryRegion *iommu;
> >      hwaddr iommu_offset;
> > -    Notifier n;
> > +    IOMMUNotifier n;
> >      QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
> >  } VFIOGuestIOMMU;
> >  
> > diff --git a/memory.c b/memory.c
> > index 0eb6895..f65c600 100644
> > --- a/memory.c
> > +++ b/memory.c
> > @@ -1418,7 +1418,7 @@ void memory_region_init_iommu(MemoryRegion *mr,
> >      memory_region_init(mr, owner, name, size);
> >      mr->iommu_ops = ops,
> >      mr->terminates = true;  /* then re-forwards */
> > -    notifier_list_init(&mr->iommu_notify);
> > +    QLIST_INIT(&mr->iommu_notify);
> >  }
> >  
> >  static void memory_region_finalize(Object *obj)
> > @@ -1513,13 +1513,16 @@ bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
> >      return memory_region_get_dirty_log_mask(mr) & (1 << client);
> >  }
> >  
> > -void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
> > +void memory_region_register_iommu_notifier(MemoryRegion *mr,
> > +                                           IOMMUNotifier *n)
> >  {
> > +    /* We need to register for at least one bitfield */
> > +    assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
> >      if (mr->iommu_ops->notify_started &&
> > -        QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
> > +        QLIST_EMPTY(&mr->iommu_notify)) {
> >          mr->iommu_ops->notify_started(mr);
> >      }
> > -    notifier_list_add(&mr->iommu_notify, n);
> > +    QLIST_INSERT_HEAD(&mr->iommu_notify, n, node);
> >  }
> >  
> >  uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
> > @@ -1531,7 +1534,8 @@ uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
> >      return TARGET_PAGE_SIZE;
> >  }
> >  
> > -void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
> > +void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
> > +                                bool is_write)
> >  {
> >      hwaddr addr, granularity;
> >      IOMMUTLBEntry iotlb;
> > @@ -1552,11 +1556,12 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
> >      }
> >  }
> >  
> > -void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
> > +void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
> > +                                             IOMMUNotifier *n)
> >  {
> > -    notifier_remove(n);
> > +    QLIST_REMOVE(n, node);
> >      if (mr->iommu_ops->notify_stopped &&
> > -        QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
> > +        QLIST_EMPTY(&mr->iommu_notify)) {
> >          mr->iommu_ops->notify_stopped(mr);
> >      }
> >  }
> > @@ -1564,8 +1569,22 @@ void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
> >  void memory_region_notify_iommu(MemoryRegion *mr,
> >                                  IOMMUTLBEntry entry)
> >  {
> > +    IOMMUNotifier *iommu_notifier;
> > +    IOMMUNotifierFlag request_flags;
> > +
> >      assert(memory_region_is_iommu(mr));
> > -    notifier_list_notify(&mr->iommu_notify, &entry);
> > +
> > +    if (entry.perm & IOMMU_RW) {
> > +        request_flags = IOMMU_NOTIFIER_MAP;
> > +    } else {
> > +        request_flags = IOMMU_NOTIFIER_UNMAP;
> > +    }
> 
> This is still wrong.  UNMAP depends on the *previous* state of the
> mapping, not the new state.

Peter pointed out to be on IRC that VFIO already assumes that it's
only an unmap if the new permissions are NONE.  So one can argue that
it's an existing constraint of the IOMMUTLBEntry interface that a
mapping can only ever transition from valid->invalid or
invalid->valid.  Changing one valid entry to another would require two
notifications one switching it to a blank entry with NONE permissions,
then another notifying the new valid mapping.

Assuming that constraint, Peter's patch is correct.

I'm pretty uneasy about that constraint, because it's not necessarily
obvious to someone implementing a new vIOMMU device, which is
responsible for triggering the notifies.  From just the callback, it
looks like it should be fine to just fire the notify with the new
mapping which replaced the old.

Peter suggested commenting this next to the IOTLBEntry definition, and
I think that's probably ok for now.  I do think we should consider
changing the notify interface to make this more obvious.  I can see
one of two ways to do that:

    * Fully allow in-place changes to be notified - the callback would
      need to be passed both the new entry and at least the old
      permissions, if not the old entry.

    * Instead have separate map and unmap notifier chains with
      separate callbacks.  That should make it obvious to a vIOMMU
      author that an in-place change would need first an unmap
      notify, then a map notify.

> 
> > +
> > +    QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
> > +        if (iommu_notifier->notifier_flags & request_flags) {
> > +            iommu_notifier->notify(iommu_notifier, &entry);
> > +        }
> > +    }
> >  }
> >  
> >  void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
>
Peter Xu Sept. 14, 2016, 7:40 a.m. UTC | #3
On Wed, Sep 14, 2016 at 05:15:03PM +1000, David Gibson wrote:

[...]

> > > @@ -1564,8 +1569,22 @@ void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
> > >  void memory_region_notify_iommu(MemoryRegion *mr,
> > >                                  IOMMUTLBEntry entry)
> > >  {
> > > +    IOMMUNotifier *iommu_notifier;
> > > +    IOMMUNotifierFlag request_flags;
> > > +
> > >      assert(memory_region_is_iommu(mr));
> > > -    notifier_list_notify(&mr->iommu_notify, &entry);
> > > +
> > > +    if (entry.perm & IOMMU_RW) {
> > > +        request_flags = IOMMU_NOTIFIER_MAP;
> > > +    } else {
> > > +        request_flags = IOMMU_NOTIFIER_UNMAP;
> > > +    }
> > 
> > This is still wrong.  UNMAP depends on the *previous* state of the
> > mapping, not the new state.
> 
> Peter pointed out to be on IRC that VFIO already assumes that it's
> only an unmap if the new permissions are NONE.  So one can argue that
> it's an existing constraint of the IOMMUTLBEntry interface that a
> mapping can only ever transition from valid->invalid or
> invalid->valid.  Changing one valid entry to another would require two
> notifications one switching it to a blank entry with NONE permissions,
> then another notifying the new valid mapping.
> 
> Assuming that constraint, Peter's patch is correct.
> 
> I'm pretty uneasy about that constraint, because it's not necessarily
> obvious to someone implementing a new vIOMMU device, which is
> responsible for triggering the notifies.  From just the callback, it
> looks like it should be fine to just fire the notify with the new
> mapping which replaced the old.
> 
> Peter suggested commenting this next to the IOTLBEntry definition, and
> I think that's probably ok for now.  I do think we should consider
> changing the notify interface to make this more obvious.  I can see
> one of two ways to do that:
> 
>     * Fully allow in-place changes to be notified - the callback would
>       need to be passed both the new entry and at least the old
>       permissions, if not the old entry.
> 
>     * Instead have separate map and unmap notifier chains with
>       separate callbacks.  That should make it obvious to a vIOMMU
>       author that an in-place change would need first an unmap
>       notify, then a map notify.

Thanks for the summary!

Since we are at this... I am still curious about when we will need
this CHANGE interface.

Not to talk about Linux kernel, yes we can have other guest OS
running. However for any guests, IMHO changing IOMMU PTE is extremely
dangerous. For example, if we have mapped an area of memory, e.g.
three DMA pages, each 4K (which really doesn't matter):

    page1 (0-4k)
    page2 (4k-8k)
    page3 (8k-12k)

If we want to modify the 12K mapping (e.g., change in-place from page1
to page3 in order), the result can be undefined. Since IOMMU might
still be using these page mappings during the modification. The
problem is that, we cannot do this change for the three pages in an
atomic operation. So if IOMMU uses these pages during the modification
(e.g., CPU just changed page1, but not yet for page2 and page3), IOMMU
will see an inconsistent view of memory. That's trouble.

I guess this is why Linux is using unmap_page() and map_page() for it?
Or say, we just do not allow to change the content of it directly. Not
sure. Also, I assume we may need something like "quiesce" IOMMU
operation (or not operation, but existing procedures?) to finally make
sure that the pages we are removing will never be touched by IOMMU any
more before freeing them.

All above is wild guess of me, just want to know when and why we will
need this CHANGE stuff.

And before we finally realize we need this, I would still suggest to
keep the old interface (as long as it can work for us, no extra effort
needed), and as David has mentioned, we can add comment for
IOMMUTLBEntry to make sure people can know its meaning easier (before
starting to read vfio_iommu_map_notify() codes).

Thanks,

-- peterx
Peter Xu Sept. 14, 2016, 8:17 a.m. UTC | #4
On Wed, Sep 14, 2016 at 05:15:03PM +1000, David Gibson wrote:

[...]

> Peter suggested commenting this next to the IOTLBEntry definition, and
> I think that's probably ok for now.

Looks like we have something already (just not that obvious):

/**
 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
 *
 * @mr: the memory region that was changed
 * @entry: the new entry in the IOMMU translation table.  The entry
 *         replaces all old entries for the same virtual I/O address range.
 *         Deleted entries have .@perm == 0.
 */
void memory_region_notify_iommu(MemoryRegion *mr,
                                IOMMUTLBEntry entry);

Though it's quite simple, it did explain that perm==0 is for deleted
entries.

Thanks,

-- peterx
David Gibson Sept. 14, 2016, 10:47 a.m. UTC | #5
On Wed, Sep 14, 2016 at 03:40:08PM +0800, Peter Xu wrote:
> On Wed, Sep 14, 2016 at 05:15:03PM +1000, David Gibson wrote:
> 
> [...]
> 
> > > > @@ -1564,8 +1569,22 @@ void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
> > > >  void memory_region_notify_iommu(MemoryRegion *mr,
> > > >                                  IOMMUTLBEntry entry)
> > > >  {
> > > > +    IOMMUNotifier *iommu_notifier;
> > > > +    IOMMUNotifierFlag request_flags;
> > > > +
> > > >      assert(memory_region_is_iommu(mr));
> > > > -    notifier_list_notify(&mr->iommu_notify, &entry);
> > > > +
> > > > +    if (entry.perm & IOMMU_RW) {
> > > > +        request_flags = IOMMU_NOTIFIER_MAP;
> > > > +    } else {
> > > > +        request_flags = IOMMU_NOTIFIER_UNMAP;
> > > > +    }
> > > 
> > > This is still wrong.  UNMAP depends on the *previous* state of the
> > > mapping, not the new state.
> > 
> > Peter pointed out to be on IRC that VFIO already assumes that it's
> > only an unmap if the new permissions are NONE.  So one can argue that
> > it's an existing constraint of the IOMMUTLBEntry interface that a
> > mapping can only ever transition from valid->invalid or
> > invalid->valid.  Changing one valid entry to another would require two
> > notifications one switching it to a blank entry with NONE permissions,
> > then another notifying the new valid mapping.
> > 
> > Assuming that constraint, Peter's patch is correct.
> > 
> > I'm pretty uneasy about that constraint, because it's not necessarily
> > obvious to someone implementing a new vIOMMU device, which is
> > responsible for triggering the notifies.  From just the callback, it
> > looks like it should be fine to just fire the notify with the new
> > mapping which replaced the old.
> > 
> > Peter suggested commenting this next to the IOTLBEntry definition, and
> > I think that's probably ok for now.  I do think we should consider
> > changing the notify interface to make this more obvious.  I can see
> > one of two ways to do that:
> > 
> >     * Fully allow in-place changes to be notified - the callback would
> >       need to be passed both the new entry and at least the old
> >       permissions, if not the old entry.
> > 
> >     * Instead have separate map and unmap notifier chains with
> >       separate callbacks.  That should make it obvious to a vIOMMU
> >       author that an in-place change would need first an unmap
> >       notify, then a map notify.
> 
> Thanks for the summary!
> 
> Since we are at this... I am still curious about when we will need
> this CHANGE interface.

It doesn't require any new interface, just removal of a subtle
constraint on the current one.

And we don't, strictly speaking, need it.  However, leaving subtle
constraints in how you can use an interface that aren't obvious from
the interface itself, and can't be verified by the interface is just
leaving booby traps for future developers.

> Not to talk about Linux kernel, yes we can have other guest OS
> running. However for any guests, IMHO changing IOMMU PTE is extremely
> dangerous. For example, if we have mapped an area of memory, e.g.
> three DMA pages, each 4K (which really doesn't matter):
> 
>     page1 (0-4k)
>     page2 (4k-8k)
>     page3 (8k-12k)
> 
> If we want to modify the 12K mapping (e.g., change in-place from page1
> to page3 in order), the result can be undefined. Since IOMMU might
> still be using these page mappings during the modification. The
> problem is that, we cannot do this change for the three pages in an
> atomic operation. So if IOMMU uses these pages during the modification
> (e.g., CPU just changed page1, but not yet for page2 and page3), IOMMU
> will see an inconsistent view of memory. That's trouble.

Yes, changing the IOMMU mappings will require synchronization with
device drivers and hardware, but what that involves is between the
guest and the vIOMMU implementation.  Having "simultaneous" unmap/map
is a somewhat unlikely in pratice, I'll grant, but it's absolutely
possible that a vIOMMU could do this safely.

I could even imagine it happening if the guest always maps and unmaps
separately, if the vIOMMU did some sort of operation batching.

> I guess this is why Linux is using unmap_page() and map_page() for it?
> Or say, we just do not allow to change the content of it directly. Not
> sure.

I'm not sure what you mean by that.  Sounds kind of like my option 2
above - only allow separate maps and unmaps, but change the notify
interface to make that more obvious to the vIOMMU implementor.

> Also, I assume we may need something like "quiesce" IOMMU
> operation (or not operation, but existing procedures?) to finally make
> sure that the pages we are removing will never be touched by IOMMU any
> more before freeing them.

Yes.. I'm not sure if you're thinking of the guest side or host side
here.

> All above is wild guess of me, just want to know when and why we will
> need this CHANGE stuff.
> 
> And before we finally realize we need this, I would still suggest to
> keep the old interface (as long as it can work for us, no extra effort
> needed), and as David has mentioned, we can add comment for
> IOMMUTLBEntry to make sure people can know its meaning easier (before
> starting to read vfio_iommu_map_notify() codes).
> 
> Thanks,
> 
> -- peterx
>
David Gibson Sept. 14, 2016, 10:50 a.m. UTC | #6
On Wed, Sep 14, 2016 at 04:17:26PM +0800, Peter Xu wrote:
> On Wed, Sep 14, 2016 at 05:15:03PM +1000, David Gibson wrote:
> 
> [...]
> 
> > Peter suggested commenting this next to the IOTLBEntry definition, and
> > I think that's probably ok for now.
> 
> Looks like we have something already (just not that obvious):
> 
> /**
>  * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
>  *
>  * @mr: the memory region that was changed
>  * @entry: the new entry in the IOMMU translation table.  The entry
>  *         replaces all old entries for the same virtual I/O address range.
>  *         Deleted entries have .@perm == 0.
>  */
> void memory_region_notify_iommu(MemoryRegion *mr,
>                                 IOMMUTLBEntry entry);
> 
> Though it's quite simple, it did explain that perm==0 is for deleted
> entries.

That is definitely not sufficient.  It misses the crucial point is
that @perm != 0 is NOT ALLOWED if there was an existing mapping at
that address.

It's not that we really _need_ to support in-place change.  The point
is that we want to keep our interface contracts simple and clear.
diff mbox

Patch

diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index b313e7c..41b6a13 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -293,7 +293,7 @@  static bool vfio_listener_skipped_section(MemoryRegionSection *section)
            section->offset_within_address_space & (1ULL << 63);
 }
 
-static void vfio_iommu_map_notify(Notifier *n, void *data)
+static void vfio_iommu_map_notify(IOMMUNotifier *n, void *data)
 {
     VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
     VFIOContainer *container = giommu->container;
@@ -454,6 +454,7 @@  static void vfio_listener_region_add(MemoryListener *listener,
                                section->offset_within_region;
         giommu->container = container;
         giommu->n.notify = vfio_iommu_map_notify;
+        giommu->n.notifier_flags = IOMMU_NOTIFIER_ALL;
         QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
 
         memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 3e4d416..e69e984 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -67,6 +67,27 @@  struct IOMMUTLBEntry {
     IOMMUAccessFlags perm;
 };
 
+/*
+ * Bitmap for differnet IOMMUNotifier capabilities. Each notifier can
+ * register with one or multiple IOMMU Notifier capability bit(s).
+ */
+typedef enum {
+    IOMMU_NOTIFIER_NONE = 0,
+    /* Notify cache invalidations */
+    IOMMU_NOTIFIER_UNMAP = 0x1,
+    /* Notify entry changes (newly created entries) */
+    IOMMU_NOTIFIER_MAP = 0x2,
+} IOMMUNotifierFlag;
+
+#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
+
+struct IOMMUNotifier {
+    void (*notify)(struct IOMMUNotifier *notifier, void *data);
+    IOMMUNotifierFlag notifier_flags;
+    QLIST_ENTRY(IOMMUNotifier) node;
+};
+typedef struct IOMMUNotifier IOMMUNotifier;
+
 /* New-style MMIO accessors can indicate that the transaction failed.
  * A zero (MEMTX_OK) response means success; anything else is a failure
  * of some kind. The memory subsystem will bitwise-OR together results
@@ -201,7 +222,7 @@  struct MemoryRegion {
     const char *name;
     unsigned ioeventfd_nb;
     MemoryRegionIoeventfd *ioeventfds;
-    NotifierList iommu_notify;
+    QLIST_HEAD(, IOMMUNotifier) iommu_notify;
 };
 
 /**
@@ -620,11 +641,12 @@  void memory_region_notify_iommu(MemoryRegion *mr,
  * IOMMU translation entries.
  *
  * @mr: the memory region to observe
- * @n: the notifier to be added; the notifier receives a pointer to an
- *     #IOMMUTLBEntry as the opaque value; the pointer ceases to be
- *     valid on exit from the notifier.
+ * @n: the IOMMUNotifier to be added; the notify callback receives a
+ *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
+ *     ceases to be valid on exit from the notifier.
  */
-void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
+void memory_region_register_iommu_notifier(MemoryRegion *mr,
+                                           IOMMUNotifier *n);
 
 /**
  * memory_region_iommu_replay: replay existing IOMMU translations to
@@ -636,7 +658,8 @@  void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
  * @is_write: Whether to treat the replay as a translate "write"
  *     through the iommu
  */
-void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write);
+void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
+                                bool is_write);
 
 /**
  * memory_region_unregister_iommu_notifier: unregister a notifier for
@@ -646,7 +669,8 @@  void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write);
  *      needs to be called
  * @n: the notifier to be removed.
  */
-void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n);
+void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
+                                             IOMMUNotifier *n);
 
 /**
  * memory_region_name: get a memory region's name
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 94dfae3..c17602e 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -93,7 +93,7 @@  typedef struct VFIOGuestIOMMU {
     VFIOContainer *container;
     MemoryRegion *iommu;
     hwaddr iommu_offset;
-    Notifier n;
+    IOMMUNotifier n;
     QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
 } VFIOGuestIOMMU;
 
diff --git a/memory.c b/memory.c
index 0eb6895..f65c600 100644
--- a/memory.c
+++ b/memory.c
@@ -1418,7 +1418,7 @@  void memory_region_init_iommu(MemoryRegion *mr,
     memory_region_init(mr, owner, name, size);
     mr->iommu_ops = ops,
     mr->terminates = true;  /* then re-forwards */
-    notifier_list_init(&mr->iommu_notify);
+    QLIST_INIT(&mr->iommu_notify);
 }
 
 static void memory_region_finalize(Object *obj)
@@ -1513,13 +1513,16 @@  bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
     return memory_region_get_dirty_log_mask(mr) & (1 << client);
 }
 
-void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
+void memory_region_register_iommu_notifier(MemoryRegion *mr,
+                                           IOMMUNotifier *n)
 {
+    /* We need to register for at least one bitfield */
+    assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
     if (mr->iommu_ops->notify_started &&
-        QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
+        QLIST_EMPTY(&mr->iommu_notify)) {
         mr->iommu_ops->notify_started(mr);
     }
-    notifier_list_add(&mr->iommu_notify, n);
+    QLIST_INSERT_HEAD(&mr->iommu_notify, n, node);
 }
 
 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
@@ -1531,7 +1534,8 @@  uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
     return TARGET_PAGE_SIZE;
 }
 
-void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
+void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
+                                bool is_write)
 {
     hwaddr addr, granularity;
     IOMMUTLBEntry iotlb;
@@ -1552,11 +1556,12 @@  void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
     }
 }
 
-void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
+void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
+                                             IOMMUNotifier *n)
 {
-    notifier_remove(n);
+    QLIST_REMOVE(n, node);
     if (mr->iommu_ops->notify_stopped &&
-        QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
+        QLIST_EMPTY(&mr->iommu_notify)) {
         mr->iommu_ops->notify_stopped(mr);
     }
 }
@@ -1564,8 +1569,22 @@  void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
 void memory_region_notify_iommu(MemoryRegion *mr,
                                 IOMMUTLBEntry entry)
 {
+    IOMMUNotifier *iommu_notifier;
+    IOMMUNotifierFlag request_flags;
+
     assert(memory_region_is_iommu(mr));
-    notifier_list_notify(&mr->iommu_notify, &entry);
+
+    if (entry.perm & IOMMU_RW) {
+        request_flags = IOMMU_NOTIFIER_MAP;
+    } else {
+        request_flags = IOMMU_NOTIFIER_UNMAP;
+    }
+
+    QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
+        if (iommu_notifier->notifier_flags & request_flags) {
+            iommu_notifier->notify(iommu_notifier, &entry);
+        }
+    }
 }
 
 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)