amdgpu_ring_emit_cntxcntl(ring, status);
        }
 
+       /* Setup initial TMZiness and send it off.
+        */
        secure = false;
+       if (job && ring->funcs->emit_frame_cntl) {
+               secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
+               amdgpu_ring_emit_frame_cntl(ring, true, secure);
+       }
+
        for (i = 0; i < num_ibs; ++i) {
                ib = &ibs[i];
 
                    !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
                        continue;
 
-               /* If this IB is TMZ, add frame TMZ start packet,
-                * else, turn off TMZ.
-                */
-               if (ib->flags & AMDGPU_IB_FLAGS_SECURE && ring->funcs->emit_tmz) {
-                       if (!secure) {
-                               secure = true;
-                               amdgpu_ring_emit_tmz(ring, true);
+               if (job && ring->funcs->emit_frame_cntl) {
+                       if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
+                               amdgpu_ring_emit_frame_cntl(ring, false, secure);
+                               secure = !secure;
+                               amdgpu_ring_emit_frame_cntl(ring, true, secure);
                        }
-               } else if (secure) {
-                       secure = false;
-                       amdgpu_ring_emit_tmz(ring, false);
                }
 
                amdgpu_ring_emit_ib(ring, job, ib, status);
                status &= ~AMDGPU_HAVE_CTX_SWITCH;
        }
 
-       if (secure) {
-               secure = false;
-               amdgpu_ring_emit_tmz(ring, false);
-       }
+       if (job && ring->funcs->emit_frame_cntl)
+               amdgpu_ring_emit_frame_cntl(ring, false, secure);
 
 #ifdef CONFIG_X86_64
        if (!(adev->flags & AMD_IS_APU))
 
        void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
                                        uint32_t reg0, uint32_t reg1,
                                        uint32_t ref, uint32_t mask);
-       void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+       void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
+                               bool secure);
        /* Try to soft recover the ring to make the fence signal */
        void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
        int (*preempt_ib)(struct amdgpu_ring *ring);
 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
-#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
+#define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
 
 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
 
 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
 {
                                           sizeof(de_payload) >> 2);
 }
 
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+                                   bool secure)
 {
-       if (amdgpu_is_tmz(ring->adev)) {
-               amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
-               amdgpu_ring_write(ring, FRAME_TMZ | FRAME_CMD(start ? 0 : 1));
-       }
+       uint32_t v = secure ? FRAME_TMZ : 0;
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
+       amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
 }
 
 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
        .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
        .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
        .preempt_ib = gfx_v10_0_ring_preempt_ib,
-       .emit_tmz = gfx_v10_0_ring_emit_tmz,
+       .emit_frame_cntl = gfx_v10_0_ring_emit_frame_cntl,
        .emit_wreg = gfx_v10_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
 
        amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
 }
 
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+                                  bool secure)
 {
-       if (amdgpu_is_tmz(ring->adev)) {
-               amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
-               amdgpu_ring_write(ring, FRAME_TMZ | FRAME_CMD(start ? 0 : 1));
-       }
+       uint32_t v = secure ? FRAME_TMZ : 0;
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
+       amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
 }
 
 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
        .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
        .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
        .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
-       .emit_tmz = gfx_v9_0_ring_emit_tmz,
+       .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
        .emit_wreg = gfx_v9_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,