@@ -101,6 +101,7 @@
#define TM_QW3W2_LP PPC_BIT32(6)
#define TM_QW3W2_LE PPC_BIT32(7)
#define TM_QW3W2_T PPC_BIT32(31)
+#define TM_QW3B8_VT PPC_BIT8(0)
/*
* In addition to normal loads to "peek" and writes (only when invalid)
@@ -128,6 +129,7 @@
#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool */
/* context to reg */
#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */
+#define TM_SPC_PULL_PHYS_CTX 0x838 /* Pull phys ctx to reg */
#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd */
/* line */
#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */
@@ -179,6 +179,17 @@ static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
return qw2w2;
}
+static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size)
+{
+ uint8_t qw3b8_prev = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2];
+ uint8_t qw3b8;
+
+ qw3b8 = qw3b8_prev & ~TM_QW3B8_VT;
+ tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8;
+ return qw3b8;
+}
+
static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size)
{
@@ -527,6 +538,8 @@ static const XiveTmOp xive_tm_operations[] = {
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
xive_tm_pull_pool_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
+ xive_tm_pull_phys_ctx },
};
static const XiveTmOp xive2_tm_operations[] = {
@@ -566,6 +579,8 @@ static const XiveTmOp xive2_tm_operations[] = {
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol,
NULL },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
+ xive_tm_pull_phys_ctx },
};
static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset,