bool ret = false;
spin_lock_irq(&pf_queue->lock);
- if (pf_queue->head != pf_queue->tail) {
+ if (pf_queue->tail != pf_queue->head) {
desc = (const struct xe_guc_pagefault_desc *)
- (pf_queue->data + pf_queue->head);
+ (pf_queue->data + pf_queue->tail);
pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
PFD_VIRTUAL_ADDR_LO_SHIFT;
- pf_queue->head = (pf_queue->head + PF_MSG_LEN_DW) %
+ pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
PF_QUEUE_NUM_DW;
ret = true;
}
{
lockdep_assert_held(&pf_queue->lock);
- return CIRC_SPACE(pf_queue->tail, pf_queue->head, PF_QUEUE_NUM_DW) <=
+ return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <=
PF_MSG_LEN_DW;
}
spin_lock_irqsave(&pf_queue->lock, flags);
full = pf_queue_full(pf_queue);
if (!full) {
- memcpy(pf_queue->data + pf_queue->tail, msg, len * sizeof(u32));
- pf_queue->tail = (pf_queue->tail + len) % PF_QUEUE_NUM_DW;
+ memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
+ pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW;
queue_work(gt->usm.pf_wq, &pf_queue->worker);
} else {
drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
send_pagefault_reply(>->uc.guc, &reply);
if (time_after(jiffies, threshold) &&
- pf_queue->head != pf_queue->tail) {
+ pf_queue->tail != pf_queue->head) {
queue_work(gt->usm.pf_wq, w);
break;
}
#define PF_QUEUE_NUM_DW 128
/** @data: data in the page fault queue */
u32 data[PF_QUEUE_NUM_DW];
- /**
- * @head: head pointer in DWs for page fault queue,
- * moved by worker which processes faults.
- */
- u16 head;
/**
* @tail: tail pointer in DWs for page fault queue,
- * moved by G2H handler.
+ * moved by worker which processes faults (consumer).
*/
u16 tail;
+ /**
+ * @head: head pointer in DWs for page fault queue,
+ * moved by G2H handler (producer).
+ */
+ u16 head;
/** @lock: protects page fault queue */
spinlock_t lock;
/** @worker: to process page faults */