drm/xe: Invert access counter queue head / tail
authorMatthew Brost <matthew.brost@intel.com>
Wed, 10 Jan 2024 01:24:39 +0000 (17:24 -0800)
committerMatthew Brost <matthew.brost@intel.com>
Wed, 10 Jan 2024 23:11:23 +0000 (15:11 -0800)
Convention for queues in Linux is the producer moves the head and
consumer moves the tail. Fix the access counter queue to conform to
this convention.

Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
drivers/gpu/drm/xe/xe_gt_pagefault.c
drivers/gpu/drm/xe/xe_gt_types.h

index 13183088401f65d34b2ba6c43dd8f2ab5884c7c6..5c2603075af9630066d22eb7095cfef7bcdd221e 100644 (file)
@@ -564,9 +564,9 @@ static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
        bool ret = false;
 
        spin_lock(&acc_queue->lock);
-       if (acc_queue->head != acc_queue->tail) {
+       if (acc_queue->tail != acc_queue->head) {
                desc = (const struct xe_guc_acc_desc *)
-                       (acc_queue->data + acc_queue->head);
+                       (acc_queue->data + acc_queue->tail);
 
                acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
                acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
@@ -579,7 +579,7 @@ static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
                acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
                                              desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
 
-               acc_queue->head = (acc_queue->head + ACC_MSG_LEN_DW) %
+               acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) %
                                  ACC_QUEUE_NUM_DW;
                ret = true;
        }
@@ -607,7 +607,7 @@ static void acc_queue_work_func(struct work_struct *w)
                }
 
                if (time_after(jiffies, threshold) &&
-                   acc_queue->head != acc_queue->tail) {
+                   acc_queue->tail != acc_queue->head) {
                        queue_work(gt->usm.acc_wq, w);
                        break;
                }
@@ -618,7 +618,7 @@ static bool acc_queue_full(struct acc_queue *acc_queue)
 {
        lockdep_assert_held(&acc_queue->lock);
 
-       return CIRC_SPACE(acc_queue->tail, acc_queue->head, ACC_QUEUE_NUM_DW) <=
+       return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <=
                ACC_MSG_LEN_DW;
 }
 
@@ -643,9 +643,9 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
        spin_lock(&acc_queue->lock);
        full = acc_queue_full(acc_queue);
        if (!full) {
-               memcpy(acc_queue->data + acc_queue->tail, msg,
+               memcpy(acc_queue->data + acc_queue->head, msg,
                       len * sizeof(u32));
-               acc_queue->tail = (acc_queue->tail + len) % ACC_QUEUE_NUM_DW;
+               acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
                queue_work(gt->usm.acc_wq, &acc_queue->worker);
        } else {
                drm_warn(&gt_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
index b15503dabba4b4888d4138035fec6c8f70079530..047cde6cda107edf7b86844ad9f151aa2af3d3c5 100644 (file)
@@ -252,15 +252,16 @@ struct xe_gt {
                        /** @data: data in the page fault queue */
                        u32 data[ACC_QUEUE_NUM_DW];
                        /**
-                        * @head: head pointer in DWs for page fault queue,
-                        * moved by worker which processes faults.
+                        * @tail: tail pointer in DWs for access counter queue,
+                        * moved by worker which processes counters
+                        * (consumer).
                         */
-                       u16 head;
+                       u16 tail;
                        /**
-                        * @tail: tail pointer in DWs for page fault queue,
-                        * moved by G2H handler.
+                        * @head: head pointer in DWs for access counter queue,
+                        * moved by G2H handler (producer).
                         */
-                       u16 tail;
+                       u16 head;
                        /** @lock: protects page fault queue */
                        spinlock_t lock;
                        /** @worker: to process access counters */