static void iommu_set_device_table(struct amd_iommu *iommu)
 {
        u64 entry;
+       u32 dev_table_size = iommu->pci_seg->dev_table_size;
 
        BUG_ON(iommu->mmio_base == NULL);
 
 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
 {
        pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
-                                                     get_order(dev_table_size));
+                                                     get_order(pci_seg->dev_table_size));
        if (!pci_seg->dev_table)
                return -ENOMEM;
 
 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
 {
        free_pages((unsigned long)pci_seg->dev_table,
-                   get_order(dev_table_size));
+                   get_order(pci_seg->dev_table_size));
        pci_seg->dev_table = NULL;
 }
 
        entry = (((u64) hi) << 32) + lo;
 
        old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
-       if (old_devtb_size != dev_table_size) {
+       if (old_devtb_size != pci_seg->dev_table_size) {
                pr_err("The device table size of IOMMU:%d is not expected!\n",
                        iommu->index);
                return false;
        }
        old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
                    ? (__force void *)ioremap_encrypted(old_devtb_phys,
-                                                       dev_table_size)
-                   : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
+                                                       pci_seg->dev_table_size)
+                   : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
 
        if (!old_devtb)
                return false;
 
        gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
        pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
-                                                   get_order(dev_table_size));
+                                                   get_order(pci_seg->dev_table_size));
        if (pci_seg->old_dev_tbl_cpy == NULL) {
                pr_err("Failed to allocate memory for copying old device table!\n");
                memunmap(old_devtb);
 
        pci_seg->last_bdf = last_bdf;
        DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
+       pci_seg->dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
 
        pci_seg->id = id;
        init_llist_head(&pci_seg->dev_data_list);
                for_each_pci_segment(pci_seg) {
                        if (pci_seg->old_dev_tbl_cpy != NULL) {
                                free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
-                                               get_order(dev_table_size));
+                                               get_order(pci_seg->dev_table_size));
                                pci_seg->old_dev_tbl_cpy = NULL;
                        }
                }
 
                for_each_pci_segment(pci_seg) {
                        free_pages((unsigned long)pci_seg->dev_table,
-                                  get_order(dev_table_size));
+                                  get_order(pci_seg->dev_table_size));
                        pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
                }