From: Linus Torvalds Date: Mon, 8 Aug 2022 00:52:35 +0000 (-0700) Subject: Merge tag 'bitmap-6.0-rc1' of https://github.com/norov/linux X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=4e23eeebb2e57f5a28b36221aa776b5a1122dde5;p=linux.git Merge tag 'bitmap-6.0-rc1' of https://github.com/norov/linux Pull bitmap updates from Yury Norov: - fix the duplicated comments on bitmap_to_arr64() (Qu Wenruo) - optimize out non-atomic bitops on compile-time constants (Alexander Lobakin) - cleanup bitmap-related headers (Yury Norov) - x86/olpc: fix 'logical not is only applied to the left hand side' (Alexander Lobakin) - lib/nodemask: inline wrappers around bitmap (Yury Norov) * tag 'bitmap-6.0-rc1' of https://github.com/norov/linux: (26 commits) lib/nodemask: inline next_node_in() and node_random() powerpc: drop dependency on in archrandom.h x86/olpc: fix 'logical not is only applied to the left hand side' lib/cpumask: move some one-line wrappers to header file headers/deps: mm: align MANITAINERS and Docs with new gfp.h structure headers/deps: mm: Split out of headers/deps: mm: Optimize header dependencies lib/cpumask: move trivial wrappers around find_bit to the header lib/cpumask: change return types to unsigned where appropriate cpumask: change return types to bool where appropriate lib/bitmap: change type of bitmap_weight to unsigned long lib/bitmap: change return types to bool where appropriate arm: align find_bit declarations with generic kernel iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE) lib/test_bitmap: test the tail after bitmap_to_arr64() lib/bitmap: fix off-by-one in bitmap_to_arr64() lib: test_bitmap: add compile-time optimization/evaluations assertions bitmap: don't assume compiler evaluates small mem*() builtins calls net/ice: fix initializing the bitmap in the switch code bitops: let optimize out non-atomic bitops on compile-time constants ... --- 4e23eeebb2e57f5a28b36221aa776b5a1122dde5 diff --cc MAINTAINERS index b7deb6e92acdb,19c8d0ef1177f..868bbf31603d3 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -13133,9 -12845,11 +13132,10 @@@ M: Andrew Morton -#ifdef CONFIG_ARCH_RANDOM - -bool __must_check arch_get_random_seed_long(unsigned long *v); - -static inline bool __must_check arch_get_random_long(unsigned long *v) -{ - return false; -} -- -static inline bool __must_check arch_get_random_int(unsigned int *v) +static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) { - return false; + return 0; } - static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) - -static inline bool __must_check arch_get_random_seed_int(unsigned int *v) --{ - if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v)) - return 1; - return 0; - unsigned long val; - bool rc; - - rc = arch_get_random_seed_long(&val); - if (rc) - *v = val; - - return rc; --} -#endif /* CONFIG_ARCH_RANDOM */ ++size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs); #ifdef CONFIG_PPC_POWERNV -int powernv_hwrng_present(void); -int powernv_get_random_long(unsigned long *v); -int powernv_get_random_real_mode(unsigned long *v); -#else -static inline int powernv_hwrng_present(void) { return 0; } -static inline int powernv_get_random_real_mode(unsigned long *v) { return 0; } +int pnv_get_random_long(unsigned long *v); #endif #endif /* _ASM_POWERPC_ARCHRANDOM_H */ diff --cc arch/powerpc/kernel/setup-common.c index 1a02629ec70b0,5175726c703d7..dd98f43bd685c --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@@ -171,6 -171,18 +171,14 @@@ EXPORT_SYMBOL_GPL(machine_power_off) void (*pm_power_off)(void); EXPORT_SYMBOL_GPL(pm_power_off); -#ifdef CONFIG_ARCH_RANDOM -bool __must_check arch_get_random_seed_long(unsigned long *v) ++size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) + { - if (ppc_md.get_random_seed) - return ppc_md.get_random_seed(v); - - return false; ++ if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v)) ++ return 1; ++ return 0; + } -EXPORT_SYMBOL(arch_get_random_seed_long); - -#endif ++EXPORT_SYMBOL(arch_get_random_seed_longs); + void machine_halt(void) { machine_shutdown(); diff --cc drivers/net/ethernet/mellanox/mlx4/fw.c index 42c96c9d7fb16,af054a3808cae..dcb9eb1899ce2 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@@ -463,7 -463,7 +463,7 @@@ int mlx4_QUERY_FUNC_CAP_wrapper(struct field = min( bitmap_weight(actv_ports.ports, dev->caps.num_ports), - dev->caps.num_ports); - (unsigned long)dev->caps.num_ports); ++ (unsigned int) dev->caps.num_ports); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); size = dev->caps.function_caps; /* set PF behaviours */ diff --cc include/linux/bitmap.h index 2e6cd56810409,035d4ac666419..f65410a49fdac --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@@ -163,7 -163,7 +163,7 @@@ bool __bitmap_intersects(const unsigne const unsigned long *bitmap2, unsigned int nbits); bool __bitmap_subset(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); - int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); -unsigned long __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); ++unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); void __bitmap_set(unsigned long *map, unsigned int start, int len); void __bitmap_clear(unsigned long *map, unsigned int start, int len); @@@ -419,7 -431,8 +431,8 @@@ static inline bool bitmap_full(const un return find_first_zero_bit(src, nbits) == nbits; } - static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits) + static __always_inline -unsigned long bitmap_weight(const unsigned long *src, unsigned int nbits) ++unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); diff --cc include/linux/cpumask.h index 7073873238621,80627362c7746..0d435d0edbcb4 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@@ -179,32 -274,30 +195,48 @@@ static inline unsigned int cpumask_next return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } - int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); - int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu); - +#if NR_CPUS == 1 +/* Uniprocessor: there is only one valid CPU */ +static inline unsigned int cpumask_local_spread(unsigned int i, int node) +{ + return 0; +} + +static inline int cpumask_any_and_distribute(const struct cpumask *src1p, + const struct cpumask *src2p) { + return cpumask_first_and(src1p, src2p); +} + +static inline int cpumask_any_distribute(const struct cpumask *srcp) +{ + return cpumask_first(srcp); +} +#else +unsigned int cpumask_local_spread(unsigned int i, int node); - int cpumask_any_and_distribute(const struct cpumask *src1p, ++unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, + const struct cpumask *src2p); - int cpumask_any_distribute(const struct cpumask *srcp); ++unsigned int cpumask_any_distribute(const struct cpumask *srcp); +#endif /* NR_CPUS */ + + /** + * cpumask_next_and - get the next cpu in *src1p & *src2p + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @src1p: the first cpumask pointer + * @src2p: the second cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus set in both. + */ + static inline + unsigned int cpumask_next_and(int n, const struct cpumask *src1p, + const struct cpumask *src2p) + { + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits, n + 1); + } + -unsigned int cpumask_local_spread(unsigned int i, int node); -unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, - const struct cpumask *src2p); -unsigned int cpumask_any_distribute(const struct cpumask *srcp); - /** * for_each_cpu - iterate over every cpu in a mask * @cpu: the (optionally unsigned) integer iterator @@@ -229,7 -322,7 +261,7 @@@ (cpu) = cpumask_next_zero((cpu), (mask)), \ (cpu) < nr_cpu_ids;) - int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); -unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); ++unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); /** * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location @@@ -265,6 -358,27 +297,26 @@@ (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ (cpu) < nr_cpu_ids;) + /** + * cpumask_any_but - return a "random" in a cpumask, but not this one. + * @mask: the cpumask to search + * @cpu: the cpu to ignore. + * + * Often used to find any cpu but smp_processor_id() in a mask. + * Returns >= nr_cpu_ids if no cpus set. + */ + static inline + unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) + { + unsigned int i; + + cpumask_check(cpu); + for_each_cpu(i, mask) + if (i != cpu) + break; + return i; + } -#endif /* SMP */ + #define CPU_BITS_NONE \ { \ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ diff --cc include/linux/gfp_types.h index 0000000000000,06fc85cee23fe..d88c46ca82e17 mode 000000,100644..100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@@ -1,0 -1,348 +1,348 @@@ + /* SPDX-License-Identifier: GPL-2.0 */ + #ifndef __LINUX_GFP_TYPES_H + #define __LINUX_GFP_TYPES_H + + /* The typedef is in types.h but we want the documentation here */ + #if 0 + /** + * typedef gfp_t - Memory allocation flags. + * + * GFP flags are commonly used throughout Linux to indicate how memory + * should be allocated. The GFP acronym stands for get_free_pages(), + * the underlying memory allocation function. Not every GFP flag is + * supported by every function which may allocate memory. Most users + * will want to use a plain ``GFP_KERNEL``. + */ + typedef unsigned int __bitwise gfp_t; + #endif + + /* + * In case of changes, please don't forget to update + * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c + */ + + /* Plain integer GFP bitmasks. Do not use this directly. */ + #define ___GFP_DMA 0x01u + #define ___GFP_HIGHMEM 0x02u + #define ___GFP_DMA32 0x04u + #define ___GFP_MOVABLE 0x08u + #define ___GFP_RECLAIMABLE 0x10u + #define ___GFP_HIGH 0x20u + #define ___GFP_IO 0x40u + #define ___GFP_FS 0x80u + #define ___GFP_ZERO 0x100u + #define ___GFP_ATOMIC 0x200u + #define ___GFP_DIRECT_RECLAIM 0x400u + #define ___GFP_KSWAPD_RECLAIM 0x800u + #define ___GFP_WRITE 0x1000u + #define ___GFP_NOWARN 0x2000u + #define ___GFP_RETRY_MAYFAIL 0x4000u + #define ___GFP_NOFAIL 0x8000u + #define ___GFP_NORETRY 0x10000u + #define ___GFP_MEMALLOC 0x20000u + #define ___GFP_COMP 0x40000u + #define ___GFP_NOMEMALLOC 0x80000u + #define ___GFP_HARDWALL 0x100000u + #define ___GFP_THISNODE 0x200000u + #define ___GFP_ACCOUNT 0x400000u + #define ___GFP_ZEROTAGS 0x800000u + #ifdef CONFIG_KASAN_HW_TAGS + #define ___GFP_SKIP_ZERO 0x1000000u + #define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u + #define ___GFP_SKIP_KASAN_POISON 0x4000000u + #else + #define ___GFP_SKIP_ZERO 0 + #define ___GFP_SKIP_KASAN_UNPOISON 0 + #define ___GFP_SKIP_KASAN_POISON 0 + #endif + #ifdef CONFIG_LOCKDEP + #define ___GFP_NOLOCKDEP 0x8000000u + #else + #define ___GFP_NOLOCKDEP 0 + #endif + /* If the above are modified, __GFP_BITS_SHIFT may need updating */ + + /* + * Physical address zone modifiers (see linux/mmzone.h - low four bits) + * + * Do not put any conditional on these. If necessary modify the definitions + * without the underscores and use them consistently. The definitions here may + * be used in bit comparisons. + */ + #define __GFP_DMA ((__force gfp_t)___GFP_DMA) + #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) + #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) + #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ + #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) + + /** + * DOC: Page mobility and placement hints + * + * Page mobility and placement hints + * --------------------------------- + * + * These flags provide hints about how mobile the page is. Pages with similar + * mobility are placed within the same pageblocks to minimise problems due + * to external fragmentation. + * + * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be + * moved by page migration during memory compaction or can be reclaimed. + * + * %__GFP_RECLAIMABLE is used for slab allocations that specify + * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. + * + * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible, + * these pages will be spread between local zones to avoid all the dirty + * pages being in one zone (fair zone allocation policy). + * + * %__GFP_HARDWALL enforces the cpuset memory allocation policy. + * + * %__GFP_THISNODE forces the allocation to be satisfied from the requested + * node with no fallbacks or placement policy enforcements. + * + * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + */ + #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) + #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) + #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) + #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) + #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) + + /** + * DOC: Watermark modifiers + * + * Watermark modifiers -- controls access to emergency reserves + * ------------------------------------------------------------ + * + * %__GFP_HIGH indicates that the caller is high-priority and that granting + * the request is necessary before the system can make forward progress. + * For example, creating an IO context to clean pages. + * + * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is + * high priority. Users are typically interrupt handlers. This may be + * used in conjunction with %__GFP_HIGH + * + * %__GFP_MEMALLOC allows access to all memory. This should only be used when + * the caller guarantees the allocation will allow more memory to be freed + * very shortly e.g. process exiting or swapping. Users either should + * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). + * Users of this flag have to be extremely careful to not deplete the reserve + * completely and implement a throttling mechanism which controls the + * consumption of the reserve based on the amount of freed memory. + * Usage of a pre-allocated pool (e.g. mempool) should be always considered + * before using this flag. + * + * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. + * This takes precedence over the %__GFP_MEMALLOC flag if both are set. + */ + #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) + #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) + #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) + #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) + + /** + * DOC: Reclaim modifiers + * + * Reclaim modifiers + * ----------------- + * Please note that all the following flags are only applicable to sleepable + * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). + * + * %__GFP_IO can start physical IO. + * + * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the + * allocator recursing into the filesystem which might already be holding + * locks. + * + * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. + * This flag can be cleared to avoid unnecessary delays when a fallback + * option is available. + * + * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when + * the low watermark is reached and have it reclaim pages until the high + * watermark is reached. A caller may wish to clear this flag when fallback + * options are available and the reclaim is likely to disrupt the system. The + * canonical example is THP allocation where a fallback is cheap but + * reclaim/compaction may cause indirect stalls. + * + * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. + * + * The default allocator behavior depends on the request size. We have a concept + * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). + * !costly allocations are too essential to fail so they are implicitly + * non-failing by default (with some exceptions like OOM victims might fail so + * the caller still has to check for failures) while costly requests try to be + * not disruptive and back off even without invoking the OOM killer. + * The following three modifiers might be used to override some of these + * implicit rules + * + * %__GFP_NORETRY: The VM implementation will try only very lightweight + * memory direct reclaim to get some memory under memory pressure (thus + * it can sleep). It will avoid disruptive actions like OOM killer. The + * caller must handle the failure which is quite likely to happen under + * heavy memory pressure. The flag is suitable when failure can easily be + * handled at small cost, such as reduced throughput + * + * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim + * procedures that have previously failed if there is some indication + * that progress has been made else where. It can wait for other + * tasks to attempt high level approaches to freeing memory such as + * compaction (which removes fragmentation) and page-out. + * There is still a definite limit to the number of retries, but it is + * a larger limit than with %__GFP_NORETRY. + * Allocations with this flag may fail, but only when there is + * genuinely little unused memory. While these allocations do not + * directly trigger the OOM killer, their failure indicates that + * the system is likely to need to use the OOM killer soon. The + * caller must handle failure, but can reasonably do so by failing + * a higher-level request, or completing it only in a much less + * efficient manner. + * If the allocation does fail, and the caller is in a position to + * free some non-essential memory, doing so could benefit the system + * as a whole. + * + * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller + * cannot handle allocation failures. The allocation could block + * indefinitely but will never return with failure. Testing for + * failure is pointless. + * New users should be evaluated carefully (and the flag should be + * used only when there is no reasonable failure policy) but it is + * definitely preferable to use the flag rather than opencode endless + * loop around allocator. + * Using this flag for costly allocations is _highly_ discouraged. + */ + #define __GFP_IO ((__force gfp_t)___GFP_IO) + #define __GFP_FS ((__force gfp_t)___GFP_FS) + #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ + #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ + #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) + #define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) + #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) + #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) + + /** + * DOC: Action modifiers + * + * Action modifiers + * ---------------- + * + * %__GFP_NOWARN suppresses allocation failure reports. + * + * %__GFP_COMP address compound page metadata. + * + * %__GFP_ZERO returns a zeroed page on success. + * + * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself + * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that + * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting + * memory tags at the same time as zeroing memory has minimal additional + * performace impact. + * + * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation. + * Only effective in HW_TAGS mode. + * + * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation. + * Typically, used for userspace pages. Only effective in HW_TAGS mode. + */ + #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) + #define __GFP_COMP ((__force gfp_t)___GFP_COMP) + #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) + #define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) + #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) + #define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) + #define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) + + /* Disable lockdep for GFP context tracking */ + #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) + + /* Room for N __GFP_FOO bits */ + #define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) + #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) + + /** + * DOC: Useful GFP flag combinations + * + * Useful GFP flag combinations + * ---------------------------- + * + * Useful GFP flag combinations that are commonly used. It is recommended + * that subsystems start with one of these combinations and then set/clear + * %__GFP_FOO flags as necessary. + * + * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower + * watermark is applied to allow access to "atomic reserves". + * The current implementation doesn't support NMI and few other strict + * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT. + * + * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires + * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. + * + * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is + * accounted to kmemcg. + * + * %GFP_NOWAIT is for kernel allocations that should not stall for direct + * reclaim, start physical IO or use any filesystem callback. + * + * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages + * that do not require the starting of any physical IO. + * Please try to avoid using this flag directly and instead use + * memalloc_noio_{save,restore} to mark the whole scope which cannot + * perform any IO with a short explanation why. All allocation requests + * will inherit GFP_NOIO implicitly. + * + * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. + * Please try to avoid using this flag directly and instead use + * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't + * recurse into the FS layer with a short explanation why. All allocation + * requests will inherit GFP_NOFS implicitly. + * + * %GFP_USER is for userspace allocations that also need to be directly + * accessibly by the kernel or hardware. It is typically used by hardware + * for buffers that are mapped to userspace (e.g. graphics) that hardware + * still must DMA to. cpuset limits are enforced for these allocations. + * + * %GFP_DMA exists for historical reasons and should be avoided where possible. + * The flags indicates that the caller requires that the lowest zone be + * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but + * it would require careful auditing as some users really require it and + * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the + * lowest zone as a type of emergency reserve. + * + * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit + * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory + * because the DMA32 kmalloc cache array is not implemented. + * (Reason: there is no such user in kernel). + * + * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, + * do not need to be directly accessible by the kernel but that cannot + * move once in use. An example may be a hardware allocation that maps + * data directly into userspace but has no addressing limitations. + * + * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not + * need direct access to but can use kmap() when access is required. They + * are expected to be movable via page reclaim or page migration. Typically, + * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE. + * + * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They + * are compound allocations that will generally fail quickly if memory is not + * available and will not wake kswapd/kcompactd on failure. The _LIGHT + * version does not attempt reclaim/compaction at all and is by default used + * in page fault path, while the non-light is used by khugepaged. + */ + #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) + #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) + #define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) + #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) + #define GFP_NOIO (__GFP_RECLAIM) + #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) + #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) + #define GFP_DMA __GFP_DMA + #define GFP_DMA32 __GFP_DMA32 + #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) + #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ - __GFP_SKIP_KASAN_POISON) ++ __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON) + #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) + #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) + + #endif /* __LINUX_GFP_TYPES_H */ diff --cc lib/Makefile index 17e48da223e28,731cea0342d1e..c952121419282 --- a/lib/Makefile +++ b/lib/Makefile @@@ -33,10 -33,11 +33,10 @@@ lib-y := ctype.o string.o vsprintf.o cm flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ - nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o \ + nmi_backtrace.o win_minmax.o memcat_p.o \ - buildid.o + buildid.o cpumask.o lib-$(CONFIG_PRINTK) += dump_stack.o -lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o klist.o obj-y += lockref.o diff --cc lib/bitmap.c index b18e31ea6e664,2b67cd6576924..488e6c3e5acc8 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@@ -333,10 -333,9 +333,9 @@@ bool __bitmap_subset(const unsigned lon } EXPORT_SYMBOL(__bitmap_subset); - int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) -unsigned long __bitmap_weight(const unsigned long *bitmap, unsigned int bits) ++unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) { - unsigned int k, lim = bits/BITS_PER_LONG; - int w = 0; - unsigned long k, w = 0, lim = bits/BITS_PER_LONG; ++ unsigned int k, lim = bits/BITS_PER_LONG, w = 0; for (k = 0; k < lim; k++) w += hweight_long(bitmap[k]); diff --cc tools/include/linux/bitmap.h index afdf93bebaaf1,ae1852e391423..65d0747c5205c --- a/tools/include/linux/bitmap.h +++ b/tools/include/linux/bitmap.h @@@ -11,10 -11,10 +11,10 @@@ #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] - int __bitmap_weight(const unsigned long *bitmap, int bits); -unsigned long __bitmap_weight(const unsigned long *bitmap, unsigned int bits); ++unsigned int __bitmap_weight(const unsigned long *bitmap, int bits); void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); - int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits); bool __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits); @@@ -61,7 -61,7 +61,7 @@@ static inline bool bitmap_full(const un return find_first_zero_bit(src, nbits) == nbits; } - static inline int bitmap_weight(const unsigned long *src, unsigned int nbits) -static inline unsigned long bitmap_weight(const unsigned long *src, unsigned int nbits) ++static inline unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); diff --cc tools/lib/bitmap.c index 354f8cdc08803,e1fafc131a49c..c3e4871967bcf --- a/tools/lib/bitmap.c +++ b/tools/lib/bitmap.c @@@ -5,9 -5,9 +5,9 @@@ */ #include - int __bitmap_weight(const unsigned long *bitmap, int bits) -unsigned long __bitmap_weight(const unsigned long *bitmap, unsigned int bits) ++unsigned int __bitmap_weight(const unsigned long *bitmap, int bits) { - int k, w = 0, lim = bits/BITS_PER_LONG; - unsigned long k, w = 0, lim = bits/BITS_PER_LONG; ++ unsigned int k, w = 0, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; k++) w += hweight_long(bitmap[k]);