We'll need to check the initial value given to spapr->gpu_numa_id when
building the rtas DT, so put it in a helper for easier access and to
avoid repetition.
Tested-by: Cédric Le Goater <clg@kaod.org>
Reviewed-by: Greg Kurz <groug@kaod.org>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <
20210128174213.
1349181-3-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
}
- /*
- * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node.
- * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is
- * called from vPHB reset handler so we initialize the counter here.
- * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM
- * must be equally distant from any other node.
- * The final value of spapr->gpu_numa_id is going to be written to
- * max-associativity-domains in spapr_build_fdt().
- */
- spapr->gpu_numa_id = MAX(1, machine->numa_state->num_nodes);
+ spapr->gpu_numa_id = spapr_numa_initial_nvgpu_numa_id(machine);
/* Init numa_assoc_array */
spapr_numa_associativity_init(spapr, machine);
return true;
}
+/*
+ * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node.
+ * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is
+ * called from vPHB reset handler so we initialize the counter here.
+ * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM
+ * must be equally distant from any other node.
+ * The final value of spapr->gpu_numa_id is going to be written to
+ * max-associativity-domains in spapr_build_fdt().
+ */
+unsigned int spapr_numa_initial_nvgpu_numa_id(MachineState *machine)
+{
+ return MAX(1, machine->numa_state->num_nodes);
+}
+
/*
* This function will translate the user distances into
* what the kernel understand as possible values: 10
int offset, PowerPCCPU *cpu);
int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
int offset);
+unsigned int spapr_numa_initial_nvgpu_numa_id(MachineState *machine);
#endif /* HW_SPAPR_NUMA_H */