From: Joerg Roedel Date: Sun, 31 Oct 2021 21:26:53 +0000 (+0100) Subject: Merge branches 'apple/dart', 'arm/mediatek', 'arm/renesas', 'arm/smmu', 'arm/tegra... X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=52d96919d6a846aace5841cd23055927c6e6ec2c;p=linux.git Merge branches 'apple/dart', 'arm/mediatek', 'arm/renesas', 'arm/smmu', 'arm/tegra', 'iommu/fixes', 'x86/amd', 'x86/vt-d' and 'core' into next --- 52d96919d6a846aace5841cd23055927c6e6ec2c diff --cc drivers/iommu/apple-dart.c index fdfa39ec2a4d4,f0f4d1f74f922,559db9259e65c,cdc2e83b21864,559db9259e65c,559db9259e65c,fdfa39ec2a4d4,559db9259e65c,fdfa39ec2a4d4,559db9259e65c..96d4a1f8de797 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@@@@@@@@@@ -698,20 -703,19 -698,6 -696,6 -698,6 -698,6 -698,20 -698,6 -698,20 -698,6 +701,19 @@@@@@@@@@@ static struct iommu_group *apple_dart_d #endif group = generic_device_group(dev); ++++ + + res = ERR_PTR(-ENOMEM); ++++ + + if (!group) ++++ + + goto out; ++++ + + - - - group_master_cfg = kzalloc(sizeof(*group_master_cfg), GFP_KERNEL); + ++++++++ group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg), GFP_KERNEL); ++++ + + if (!group_master_cfg) { ++++ + + iommu_group_put(group); ++++ + + goto out; ++++ + + } ++++ + + - - - memcpy(group_master_cfg, cfg, sizeof(*group_master_cfg)); ++++ + + iommu_group_set_iommudata(group, group_master_cfg, ++++ + + apple_dart_release_group); ++++ + + for_each_stream_map(i, cfg, stream_map) for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) stream_map->dart->sid2group[sid] = group; diff --cc drivers/iommu/dma-iommu.c index 896bea04c347e,896bea04c347e,896bea04c347e,26cb95d3830a3,896bea04c347e,896bea04c347e,3e5a21b0bb24e,896bea04c347e,896bea04c347e,fffa8721a8f01..b42e38a0dbe26 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@@@@@@@@@@ -867,14 -867,14 -867,14 -860,14 -867,14 -867,14 -867,14 -867,14 -867,14 -802,47 +795,47 @@@@@@@@@@@ static dma_addr_t iommu_dma_map_page(st { phys_addr_t phys = page_to_phys(page) + offset; bool coherent = dev_is_dma_coherent(dev); --------- dma_addr_t dma_handle; +++++++++ int prot = dma_info_to_prot(dir, coherent, attrs); +++++++++ struct iommu_domain *domain = iommu_get_dma_domain(dev); +++++++++ struct iommu_dma_cookie *cookie = domain->iova_cookie; +++++++++ struct iova_domain *iovad = &cookie->iovad; +++++++++ dma_addr_t iova, dma_mask = dma_get_mask(dev); +++++++++ +++++++++ /* +++++++++ * If both the physical buffer start address and size are +++++++++ * page aligned, we don't need to use a bounce page. +++++++++ */ +++++++++ if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) { +++++++++ void *padding_start; +++++++++ size_t padding_size, aligned_size; +++++++++ +++++++++ aligned_size = iova_align(iovad, size); +++++++++ phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, +++++++++ iova_mask(iovad), dir, attrs); +++++++++ +++++++++ if (phys == DMA_MAPPING_ERROR) +++++++++ return DMA_MAPPING_ERROR; + ------ -- dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev), ------ -- coherent, dir, attrs); ------ -- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && ------ -- dma_handle != DMA_MAPPING_ERROR) +++++++++ /* Cleanup the padding area. */ +++++++++ padding_start = phys_to_virt(phys); +++++++++ padding_size = aligned_size; +++++++++ +++++++++ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && +++++++++ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { +++++++++ padding_start += size; +++++++++ padding_size -= size; +++++++++ } +++++++++ +++++++++ memset(padding_start, 0, padding_size); +++++++++ } ++++++ ++ - dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev), - coherent, dir, attrs); - if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - dma_handle != DMA_MAPPING_ERROR) +++++++++ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) arch_sync_dma_for_device(phys, size, dir); --------- return dma_handle; +++++++++ +++++++++ iova = __iommu_dma_map(dev, phys, size, prot, dma_mask); +++++++++ if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) +++++++++ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); +++++++++ return iova; } static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, @@@@@@@@@@@ -1016,9 -1016,9 -1016,9 -1009,9 -1016,9 -1016,9 -1016,10 -1016,9 -1016,9 -994,12 +987,13 @@@@@@@@@@@ static int iommu_dma_map_sg(struct devi if (static_branch_unlikely(&iommu_deferred_attach_enabled)) { ret = iommu_deferred_attach(dev, domain); ------ --- goto out; ++++++ +++ if (ret) ++++++ +++ goto out; } +++++++++ if (dev_use_swiotlb(dev)) +++++++++ return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); +++++++++ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir);