*/
 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
 {
-       /* we're using static channel owners */
-       ct->host_channel.owner = CTB_OWNER_HOST;
-
        spin_lock_init(&ct->lock);
        INIT_LIST_HEAD(&ct->pending_requests);
        INIT_LIST_HEAD(&ct->incoming_requests);
 }
 
 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
-                                   u32 cmds_addr, u32 size, u32 owner)
+                                   u32 cmds_addr, u32 size)
 {
-       CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
-                       desc, cmds_addr, size, owner);
+       CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
        memset(desc, 0, sizeof(*desc));
        desc->addr = cmds_addr;
        desc->size = size;
-       desc->owner = owner;
+       desc->owner = CTB_OWNER_HOST;
 }
 
 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
 }
 
 static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
-                                          u32 owner,
                                           u32 type)
 {
        u32 action[] = {
                INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
-               owner,
+               CTB_OWNER_HOST,
                type
        };
        int err;
        /* Can't use generic send(), CT deregistration must go over MMIO */
        err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
        if (err)
-               DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
-                         guc_ct_buffer_type_to_str(type), owner, err);
+               DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
+                         guc_ct_buffer_type_to_str(type), err);
        return err;
 }
 
-static int ctch_init(struct intel_guc *guc,
-                    struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_init - Init buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Allocate memory required for buffer-based communication.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_init(struct intel_guc_ct *ct)
 {
+       struct intel_guc *guc = ct_to_guc(ct);
        void *blob;
        int err;
        int i;
 
-       GEM_BUG_ON(ctch->vma);
+       GEM_BUG_ON(ct->vma);
 
        /* We allocate 1 page to hold both descriptors and both buffers.
         *       ___________.....................
         * other code will need updating as well.
         */
 
-       err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ctch->vma, &blob);
+       err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
        if (err) {
-               CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
-                               ctch->owner, err);
+               DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
                return err;
        }
 
        CT_DEBUG_DRIVER("CT: vma base=%#x\n",
-                       intel_guc_ggtt_offset(guc, ctch->vma));
+                       intel_guc_ggtt_offset(guc, ct->vma));
 
        /* store pointers to desc and cmds */
-       for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
-               GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
-               ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
-               ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
+       for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
+               GEM_BUG_ON((i !=  CTB_SEND) && (i != CTB_RECV));
+               ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
+               ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
        }
 
        return 0;
 }
 
-static void ctch_fini(struct intel_guc *guc,
-                     struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_fini - Fini buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Deallocate memory required for buffer-based communication.
+ */
+void intel_guc_ct_fini(struct intel_guc_ct *ct)
 {
-       GEM_BUG_ON(ctch->enabled);
+       GEM_BUG_ON(ct->enabled);
 
-       i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
+       i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
 }
 
-static int ctch_enable(struct intel_guc *guc,
-                      struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_enable - Enable buffer based command transport.
+ * @ct: pointer to CT struct
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_enable(struct intel_guc_ct *ct)
 {
+       struct intel_guc *guc = ct_to_guc(ct);
        u32 base;
        int err;
        int i;
 
-       GEM_BUG_ON(!ctch->vma);
-
-       GEM_BUG_ON(ctch->enabled);
+       GEM_BUG_ON(ct->enabled);
 
        /* vma should be already allocated and map'ed */
-       base = intel_guc_ggtt_offset(guc, ctch->vma);
+       GEM_BUG_ON(!ct->vma);
+       base = intel_guc_ggtt_offset(guc, ct->vma);
 
        /* (re)initialize descriptors
         * cmds buffers are in the second half of the blob page
         */
-       for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
+       for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
                GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
-               guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
+               guc_ct_buffer_desc_init(ct->ctbs[i].desc,
                                        base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
-                                       PAGE_SIZE/4,
-                                       ctch->owner);
+                                       PAGE_SIZE/4);
        }
 
        /* register buffers, starting wirh RECV buffer
        if (unlikely(err))
                goto err_deregister;
 
-       ctch->enabled = true;
+       ct->enabled = true;
 
        return 0;
 
 err_deregister:
        guc_action_deregister_ct_buffer(guc,
-                                       ctch->owner,
                                        INTEL_GUC_CT_BUFFER_TYPE_RECV);
 err_out:
-       DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
+       DRM_ERROR("CT: can't open channel; err=%d\n", err);
        return err;
 }
 
-static void ctch_disable(struct intel_guc *guc,
-                        struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_disable - Disable buffer based command transport.
+ * @ct: pointer to CT struct
+ */
+void intel_guc_ct_disable(struct intel_guc_ct *ct)
 {
-       GEM_BUG_ON(!ctch->enabled);
+       struct intel_guc *guc = ct_to_guc(ct);
 
-       ctch->enabled = false;
+       GEM_BUG_ON(!ct->enabled);
+
+       ct->enabled = false;
 
        if (intel_guc_is_running(guc)) {
                guc_action_deregister_ct_buffer(guc,
-                                               ctch->owner,
                                                INTEL_GUC_CT_BUFFER_TYPE_SEND);
                guc_action_deregister_ct_buffer(guc,
-                                               ctch->owner,
                                                INTEL_GUC_CT_BUFFER_TYPE_RECV);
        }
 }
 
-static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
+static u32 ct_get_next_fence(struct intel_guc_ct *ct)
 {
        /* For now it's trivial */
-       return ++ctch->next_fence;
+       return ++ct->next_fence;
 }
 
 /**
        return err;
 }
 
-static int ctch_send(struct intel_guc_ct *ct,
-                    struct intel_guc_ct_channel *ctch,
-                    const u32 *action,
-                    u32 len,
-                    u32 *response_buf,
-                    u32 response_buf_size,
-                    u32 *status)
+static int ct_send(struct intel_guc_ct *ct,
+                  const u32 *action,
+                  u32 len,
+                  u32 *response_buf,
+                  u32 response_buf_size,
+                  u32 *status)
 {
-       struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
+       struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
        struct guc_ct_buffer_desc *desc = ctb->desc;
        struct ct_request request;
        unsigned long flags;
        u32 fence;
        int err;
 
-       GEM_BUG_ON(!ctch->enabled);
+       GEM_BUG_ON(!ct->enabled);
        GEM_BUG_ON(!len);
        GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
        GEM_BUG_ON(!response_buf && response_buf_size);
 
-       fence = ctch_get_next_fence(ctch);
+       fence = ct_get_next_fence(ct);
        request.fence = fence;
        request.status = 0;
        request.response_len = response_buf_size;
                      u32 *response_buf, u32 response_buf_size)
 {
        struct intel_guc_ct *ct = &guc->ct;
-       struct intel_guc_ct_channel *ctch = &ct->host_channel;
        u32 status = ~0; /* undefined */
        int ret;
 
        mutex_lock(&guc->send_mutex);
 
-       ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
-                       &status);
+       ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
        if (unlikely(ret < 0)) {
                DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
                          action[0], ret, status);
        return 0;
 }
 
-static void ct_process_host_channel(struct intel_guc_ct *ct)
+/*
+ * When we're communicating with the GuC over CT, GuC uses events
+ * to notify us about new messages being posted on the RECV buffer.
+ */
+void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
 {
-       struct intel_guc_ct_channel *ctch = &ct->host_channel;
-       struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
+       struct intel_guc_ct *ct = &guc->ct;
+       struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
        u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
        int err = 0;
 
-       if (!ctch->enabled)
+       if (!ct->enabled)
                return;
 
        do {
        }
 }
 
-/*
- * When we're communicating with the GuC over CT, GuC uses events
- * to notify us about new messages being posted on the RECV buffer.
- */
-void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
-{
-       struct intel_guc_ct *ct = &guc->ct;
-
-       ct_process_host_channel(ct);
-}
-
-/**
- * intel_guc_ct_init - Init CT communication
- * @ct: pointer to CT struct
- *
- * Allocate memory required for communication via
- * the CT channel.
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_guc_ct_init(struct intel_guc_ct *ct)
-{
-       struct intel_guc *guc = ct_to_guc(ct);
-       struct intel_guc_ct_channel *ctch = &ct->host_channel;
-       int err;
-
-       err = ctch_init(guc, ctch);
-       if (unlikely(err)) {
-               DRM_ERROR("CT: can't open channel %d; err=%d\n",
-                         ctch->owner, err);
-               return err;
-       }
-
-       GEM_BUG_ON(!ctch->vma);
-       return 0;
-}
-
-/**
- * intel_guc_ct_fini - Fini CT communication
- * @ct: pointer to CT struct
- *
- * Deallocate memory required for communication via
- * the CT channel.
- */
-void intel_guc_ct_fini(struct intel_guc_ct *ct)
-{
-       struct intel_guc *guc = ct_to_guc(ct);
-       struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
-       ctch_fini(guc, ctch);
-}
-
-/**
- * intel_guc_ct_enable - Enable buffer based command transport.
- * @ct: pointer to CT struct
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_guc_ct_enable(struct intel_guc_ct *ct)
-{
-       struct intel_guc *guc = ct_to_guc(ct);
-       struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
-       return ctch_enable(guc, ctch);
-}
-
-/**
- * intel_guc_ct_disable - Disable buffer based command transport.
- * @ct: pointer to CT struct
- */
-void intel_guc_ct_disable(struct intel_guc_ct *ct)
-{
-       struct intel_guc *guc = ct_to_guc(ct);
-       struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
-       ctch_disable(guc, ctch);
-}