#define pmd_free(mm, pmd) free_pmd_fast(pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
-void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
-#define pmd_pgtable(pmd) pmd_page(pmd)
+#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
+#define pmd_pgtable(pmd) (pgtable_t)__pmd_page(pmd)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
-#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
+#define pmd_populate_kernel pmd_populate
pgtable_t pte_alloc_one(struct mm_struct *mm);
return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
}
+static inline unsigned long __pmd_page(pmd_t pmd)
+{
+ unsigned long v;
+
+ if (srmmu_device_memory(pmd_val(pmd)))
+ BUG();
+
+ v = pmd_val(pmd) & SRMMU_PTD_PMASK;
+ return (unsigned long)__nocache_va(v << 4);
+}
+
static inline unsigned long pud_page_vaddr(pud_t pud)
{
if (srmmu_device_memory(pud_val(pud))) {
set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));
}
-void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
-{
- unsigned long ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
- set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));
-}
-
/* Find an entry in the third-level page table.. */
pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
{
*/
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- unsigned long pte;
+ pte_t *ptep;
struct page *page;
- if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
+ if ((ptep = pte_alloc_one_kernel(mm)) == 0)
return NULL;
- page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
+ page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
- return page;
+ return ptep;
}
-void pte_free(struct mm_struct *mm, pgtable_t pte)
+void pte_free(struct mm_struct *mm, pgtable_t ptep)
{
- unsigned long p;
-
- pgtable_pte_page_dtor(pte);
- p = (unsigned long)page_address(pte); /* Cached address (for test) */
- if (p == 0)
- BUG();
- p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
+ struct page *page;
- /* free non cached virtual address*/
- srmmu_free_nocache(__nocache_va(p), SRMMU_PTE_TABLE_SIZE);
+ page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
+ pgtable_pte_page_dtor(page);
+ srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
}
/* context handling - a dynamically sized pool is used */