/* check if sh_mem_config register already configured */
        if (qpd->sh_mem_config == 0) {
-               qpd->sh_mem_config =
-                               SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+               qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
                                        SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
 
-               if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) {
-                       /* Aldebaran can safely support different XNACK modes
-                        * per process
-                        */
-                       if (!pdd->process->xnack_enabled)
-                               qpd->sh_mem_config |=
-                                       1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
-               } else if (dqm->dev->noretry &&
-                          !dqm->dev->use_iommu_v2) {
-                       qpd->sh_mem_config |=
-                               1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
-               }
+               if (dqm->dev->noretry && !dqm->dev->use_iommu_v2)
+                       qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
 
                qpd->sh_mem_ape1_limit = 0;
                qpd->sh_mem_ape1_base = 0;
        }
 
+       if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) {
+               if (!pdd->process->xnack_enabled)
+                       qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+               else
+                       qpd->sh_mem_config &= ~(1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT);
+       }
+
        qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
 
-       pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+       pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases,
+                qpd->sh_mem_config);
 
        return 0;
 }
 
 
 #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0])
 #define KFD_IS_SOC15(dev)   ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
+#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\
+               (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
 
 struct kfd_event_interrupt_class {
        bool (*interrupt_isr)(struct kfd_dev *dev,
 
                 * per-process XNACK mode selection. But let the dev->noretry
                 * setting still influence the default XNACK mode.
                 */
-               if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
+               if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
                        continue;
 
                /* GFXv10 and later GPUs do not support shader preemption