/*
* XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
+ *
+ * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit
+ *
+ * - if a context is enabled with the H bit set, the VP context
+ * information is retrieved from the NVP structure (“check out”)
+ * and stored back on a context pull (“check in”), the SW receives
+ * the same context pull information as on P9
+ *
+ * - the H bit cannot be changed while the V bit is set, i.e. a
+ * context cannot be set up in the TIMA and then be “pushed” into
+ * the NVP by changing the H bit while the context is enabled
*/
+static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
+ uint8_t nvp_blk, uint32_t nvp_idx)
+{
+ CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
+ uint32_t pir = env->spr_cb[SPR_PIR].default_value;
+ Xive2Nvp nvp;
+ uint8_t *regs = &tctx->regs[TM_QW1_OS];
+
+ if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_hw(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_co(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) &&
+ xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: NVP %x/%x invalid checkout Thread %x\n",
+ nvp_blk, nvp_idx, pir);
+ return;
+ }
+
+ nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]);
+ nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]);
+ nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
+ xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
+
+ nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0);
+ /* NVP2_W1_CO_THRID_VALID only set once */
+ nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF);
+ xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1);
+}
+
static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk,
- uint32_t *nvp_idx, bool *vo)
+ uint32_t *nvp_idx, bool *vo, bool *ho)
{
*nvp_blk = xive2_nvp_blk(cam);
*nvp_idx = xive2_nvp_idx(cam);
*vo = !!(cam & TM2_QW1W2_VO);
+ *ho = !!(cam & TM2_QW1W2_HO);
}
uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size)
{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
uint32_t qw1w2_new;
uint32_t cam = be32_to_cpu(qw1w2);
uint8_t nvp_blk;
uint32_t nvp_idx;
bool vo;
+ bool do_save;
- xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo);
+ xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save);
if (!vo) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0);
memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4);
+ if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
+ xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx);
+ }
+
return qw1w2;
}
+static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
+ uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp)
+{
+ CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
+ uint32_t pir = env->spr_cb[SPR_PIR].default_value;
+ uint8_t cppr;
+
+ if (!xive2_nvp_is_hw(nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
+ nvp_blk, nvp_idx);
+ return 0;
+ }
+
+ cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2);
+ nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0);
+ xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2);
+
+ tctx->regs[TM_QW1_OS + TM_CPPR] = cppr;
+ /* we don't model LSMFB */
+
+ nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1);
+ nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1);
+ nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir);
+
+ /*
+ * Checkout privilege: 0:OS, 1:Pool, 2:Hard
+ *
+ * TODO: we only support OS push/pull
+ */
+ nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0);
+
+ xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1);
+
+ /* return restored CPPR to generate a CPU exception if needed */
+ return cppr;
+}
+
static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
- uint8_t nvp_blk, uint32_t nvp_idx)
+ uint8_t nvp_blk, uint32_t nvp_idx,
+ bool do_restore)
{
Xive2Nvp nvp;
uint8_t ipb;
return;
}
+ /* Automatically restore thread context registers */
+ if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE &&
+ do_restore) {
+ cppr = xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp);
+ }
+
ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
if (ipb) {
nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
uint8_t nvp_blk;
uint32_t nvp_idx;
bool vo;
+ bool do_restore;
- xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo);
+ xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
/* First update the thead context */
memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
/* Check the interrupt pending bits */
if (vo) {
- xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx);
+ xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx,
+ do_restore);
}
}
#define TM2_QW0W2_VU PPC_BIT32(0)
#define TM2_QW0W2_LOGIC_SERV PPC_BITMASK32(4, 31)
#define TM2_QW1W2_VO PPC_BIT32(0)
+#define TM2_QW1W2_HO PPC_BIT32(1)
#define TM2_QW1W2_OS_CAM PPC_BITMASK32(4, 31)
#define TM2_QW2W2_VP PPC_BIT32(0)
+#define TM2_QW2W2_HP PPC_BIT32(1)
#define TM2_QW2W2_POOL_CAM PPC_BITMASK32(4, 31)
#define TM2_QW3W2_VT PPC_BIT32(0)
+#define TM2_QW3W2_HT PPC_BIT32(1)
#define TM2_QW3W2_LP PPC_BIT32(6)
#define TM2_QW3W2_LE PPC_BIT32(7)
typedef struct Xive2Nvp {
uint32_t w0;
#define NVP2_W0_VALID PPC_BIT32(0)
+#define NVP2_W0_HW PPC_BIT32(7)
#define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */
uint32_t w1;
+#define NVP2_W1_CO PPC_BIT32(13)
+#define NVP2_W1_CO_PRIV PPC_BITMASK32(14, 15)
+#define NVP2_W1_CO_THRID_VALID PPC_BIT32(16)
+#define NVP2_W1_CO_THRID PPC_BITMASK32(17, 31)
uint32_t w2;
+#define NVP2_W2_CPPR PPC_BITMASK32(0, 7)
#define NVP2_W2_IPB PPC_BITMASK32(8, 15)
+#define NVP2_W2_LSMFB PPC_BITMASK32(16, 23)
uint32_t w3;
uint32_t w4;
#define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */
} Xive2Nvp;
#define xive2_nvp_is_valid(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_VALID)
+#define xive2_nvp_is_hw(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_HW)
+#define xive2_nvp_is_co(nvp) (be32_to_cpu((nvp)->w1) & NVP2_W1_CO)
/*
* The VP number space in a block is defined by the END2_W6_VP_OFFSET