From: Ingo Molnar <mingo@kernel.org>
Date: Fri, 11 Aug 2017 11:51:59 +0000 (+0200)
Subject: Merge branch 'linus' into locking/core, to resolve conflicts
X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=040cca3ab2f6;p=linux.git

Merge branch 'linus' into locking/core, to resolve conflicts

 Conflicts:
	include/linux/mm_types.h
	mm/huge_memory.c

I removed the smp_mb__before_spinlock() like the following commit does:

  8b1b436dd1cc ("mm, locking: Rework {set,clear,mm}_tlb_flush_pending()")

and fixed up the affected commits.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
---

040cca3ab2f6f8b8d26e0e4965abea2b9aa14818
diff --cc include/linux/mm_types.h
index 36ea3cf7d85e6,3cadee0a35088..dc1edec05a3fa
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@@ -531,58 -535,45 +535,68 @@@ extern void tlb_finish_mmu(struct mmu_g
   */
  static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
  {
 +	/*
 +	 * Must be called with PTL held; such that our PTL acquire will have
 +	 * observed the store from set_tlb_flush_pending().
 +	 */
- 	return mm->tlb_flush_pending;
+ 	return atomic_read(&mm->tlb_flush_pending) > 0;
+ }
+ 
+ /*
+  * Returns true if there are two above TLB batching threads in parallel.
+  */
+ static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+ {
+ 	return atomic_read(&mm->tlb_flush_pending) > 1;
+ }
+ 
+ static inline void init_tlb_flush_pending(struct mm_struct *mm)
+ {
+ 	atomic_set(&mm->tlb_flush_pending, 0);
  }
- static inline void set_tlb_flush_pending(struct mm_struct *mm)
+ 
+ static inline void inc_tlb_flush_pending(struct mm_struct *mm)
  {
- 	mm->tlb_flush_pending = true;
+ 	atomic_inc(&mm->tlb_flush_pending);
+ 
  	/*
 -	 * Guarantee that the tlb_flush_pending increase does not leak into the
 -	 * critical section updating the page tables
 +	 * The only time this value is relevant is when there are indeed pages
 +	 * to flush. And we'll only flush pages after changing them, which
 +	 * requires the PTL.
 +	 *
 +	 * So the ordering here is:
 +	 *
- 	 *	mm->tlb_flush_pending = true;
++	 *	atomic_inc(&mm->tlb_flush_pending);
 +	 *	spin_lock(&ptl);
 +	 *	...
 +	 *	set_pte_at();
 +	 *	spin_unlock(&ptl);
 +	 *
 +	 *				spin_lock(&ptl)
 +	 *				mm_tlb_flush_pending();
 +	 *				....
 +	 *				spin_unlock(&ptl);
 +	 *
 +	 *	flush_tlb_range();
- 	 *	mm->tlb_flush_pending = false;
++	 *	atomic_dec(&mm->tlb_flush_pending);
 +	 *
 +	 * So the =true store is constrained by the PTL unlock, and the =false
 +	 * store is constrained by the TLB invalidate.
  	 */
 -	smp_mb__before_spinlock();
  }
+ 
  /* Clearing is done after a TLB flush, which also provides a barrier. */
- static inline void clear_tlb_flush_pending(struct mm_struct *mm)
- {
- 	/* see set_tlb_flush_pending */
- 	mm->tlb_flush_pending = false;
- }
- #else
- static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
- {
- 	return false;
- }
- static inline void set_tlb_flush_pending(struct mm_struct *mm)
- {
- }
- static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+ static inline void dec_tlb_flush_pending(struct mm_struct *mm)
  {
+ 	/*
+ 	 * Guarantee that the tlb_flush_pending does not not leak into the
+ 	 * critical section, since we must order the PTE change and changes to
+ 	 * the pending TLB flush indication. We could have relied on TLB flush
+ 	 * as a memory barrier, but this behavior is not clearly documented.
+ 	 */
+ 	smp_mb__before_atomic();
+ 	atomic_dec(&mm->tlb_flush_pending);
  }
- #endif
  
  struct vm_fault;
  
diff --cc mm/huge_memory.c
index c76a720b936b8,216114f6ef0b7..ce883459e2466
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@@ -1496,17 -1495,13 +1496,24 @@@ int do_huge_pmd_numa_page(struct vm_fau
  		goto clear_pmdnuma;
  	}
  
+ 	/*
+ 	 * The page_table_lock above provides a memory barrier
+ 	 * with change_protection_range.
+ 	 */
+ 	if (mm_tlb_flush_pending(vma->vm_mm))
+ 		flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+ 
 +	/*
 +	 * Since we took the NUMA fault, we must have observed the !accessible
 +	 * bit. Make sure all other CPUs agree with that, to avoid them
 +	 * modifying the page we're about to migrate.
 +	 *
 +	 * Must be done under PTL such that we'll observe the relevant
 +	 * set_tlb_flush_pending().
 +	 */
 +	if (mm_tlb_flush_pending(vma->vm_mm))
 +		need_flush = true;
 +
  	/*
  	 * Migrate the THP to the requested node, returns with page unlocked
  	 * and access rights restored.