@@ -48,12 +48,77 @@ struct Xen9pfsDev {
struct Xen9pfsRing *rings;
};
+static void xen_9pfs_in_sg(struct Xen9pfsRing *ring,
+ struct iovec *in_sg,
+ int *num,
+ uint32_t idx,
+ uint32_t size)
+{
+ RING_IDX cons, prod, masked_prod, masked_cons;
+
+ cons = ring->intf->in_cons;
+ prod = ring->intf->in_prod;
+ xen_rmb();
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+ if (masked_prod < masked_cons) {
+ in_sg[0].iov_base = ring->ring.in + masked_prod;
+ in_sg[0].iov_len = masked_cons - masked_prod;
+ *num = 1;
+ } else {
+ in_sg[0].iov_base = ring->ring.in + masked_prod;
+ in_sg[0].iov_len = XEN_9PFS_RING_SIZE - masked_prod;
+ in_sg[1].iov_base = ring->ring.in;
+ in_sg[1].iov_len = masked_cons;
+ *num = 2;
+ }
+}
+
+static void xen_9pfs_out_sg(struct Xen9pfsRing *ring,
+ struct iovec *out_sg,
+ int *num,
+ uint32_t idx)
+{
+ RING_IDX cons, prod, masked_prod, masked_cons;
+
+ cons = ring->intf->out_cons;
+ prod = ring->intf->out_prod;
+ xen_rmb();
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+ if (masked_cons < masked_prod) {
+ out_sg[0].iov_base = ring->ring.out + masked_cons;
+ out_sg[0].iov_len = ring->out_size;
+ *num = 1;
+ } else {
+ if (ring->out_size > (XEN_9PFS_RING_SIZE - masked_cons)) {
+ out_sg[0].iov_base = ring->ring.out + masked_cons;
+ out_sg[0].iov_len = XEN_9PFS_RING_SIZE - masked_cons;
+ out_sg[1].iov_base = ring->ring.out;
+ out_sg[1].iov_len = ring->out_size - (XEN_9PFS_RING_SIZE - masked_cons);
+ *num = 2;
+ } else {
+ out_sg[0].iov_base = ring->ring.out + masked_cons;
+ out_sg[0].iov_len = ring->out_size;
+ *num = 1;
+ }
+ }
+}
+
static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
size_t offset,
const char *fmt,
va_list ap)
{
- return 0;
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
+ struct iovec in_sg[2];
+ int num;
+
+ xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
+ in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
+ return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
}
static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
@@ -61,13 +126,27 @@ static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
const char *fmt,
va_list ap)
{
- return 0;
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
+ struct iovec out_sg[2];
+ int num;
+
+ xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
+ out_sg, &num, pdu->idx);
+ return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
}
static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
struct iovec **piov,
unsigned int *pniov)
{
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
+ struct Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
+ struct iovec *sg = g_malloc0(sizeof(*sg)*2);
+ int num;
+
+ xen_9pfs_out_sg(ring, sg, &num, pdu->idx);
+ *piov = sg;
+ *pniov = num;
}
static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
@@ -75,6 +154,14 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
unsigned int *pniov,
size_t size)
{
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
+ struct Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
+ struct iovec *sg = g_malloc0(sizeof(*sg)*2);
+ int num;
+
+ xen_9pfs_in_sg(ring, sg, &num, pdu->idx, size);
+ *piov = sg;
+ *pniov = num;
}
static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
Implement xen_9pfs_init_in/out_iov_from_pdu and xen_9pfs_pdu_vmarshal/vunmarshall by creating new sg pointing to the data on the ring. This is safe as we only handle one request per ring at any given time. Signed-off-by: Stefano Stabellini <stefano@aporeto.com> CC: anthony.perard@citrix.com CC: jgross@suse.com CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> CC: Greg Kurz <groug@kaod.org> --- hw/9pfs/xen-9p-backend.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 89 insertions(+), 2 deletions(-)