#include <asm/tlb.h>
#include <asm/tlbflush.h>
+extern void mmu_page_ctor(void *page);
+extern void mmu_page_dtor(void *page);
+
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- if (pte) {
- __flush_page_to_ram(pte);
- flush_tlb_kernel_page(pte);
- nocache_page(pte);
- }
+ if (pte)
+ mmu_page_ctor(pte);
return pte;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- cache_page(pte);
+ mmu_page_dtor(pte);
free_page((unsigned long) pte);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
- pte_t *pte;
page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if(!page)
return NULL;
}
- pte = kmap(page);
- __flush_page_to_ram(pte);
- flush_tlb_kernel_page(pte);
- nocache_page(pte);
+ mmu_page_ctor(kmap(page));
kunmap(page);
+
return page;
}
static inline void pte_free(struct mm_struct *mm, pgtable_t page)
{
pgtable_pte_page_dtor(page);
- cache_page(kmap(page));
+ mmu_page_dtor(kmap(page));
kunmap(page);
__free_page(page);
}
unsigned long address)
{
pgtable_pte_page_dtor(page);
- cache_page(kmap(page));
+ mmu_page_dtor(kmap(page));
kunmap(page);
__free_page(page);
}
if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
return NULL;
- flush_tlb_kernel_page(page);
- nocache_page(page);
+ mmu_page_ctor(page);
new = PD_PTABLE(page);
PD_MARKBITS(new) = 0xfe;
if (PD_MARKBITS(dp) == 0xff) {
/* all tables in page are free, free page */
list_del(dp);
- cache_page((void *)page);
+ mmu_page_dtor((void *)page);
free_page (page);
return 1;
} else if (ptable_list.next != dp) {
EXPORT_SYMBOL(mm_cachebits);
#endif
+
+/*
+ * Motorola 680x0 user's manual recommends using uncached memory for address
+ * translation tables.
+ *
+ * Seeing how the MMU can be external on (some of) these chips, that seems like
+ * a very important recommendation to follow. Provide some helpers to combat
+ * 'variation' amongst the users of this.
+ */
+
+void mmu_page_ctor(void *page)
+{
+ __flush_page_to_ram(page);
+ flush_tlb_kernel_page(page);
+ nocache_page(page);
+}
+
+void mmu_page_dtor(void *page)
+{
+ cache_page(page);
+}
+
/* size of memory already mapped in head.S */
extern __initdata unsigned long m68k_init_mapped_size;
__func__, PAGE_SIZE, PAGE_SIZE);
clear_page(ptablep);
- __flush_page_to_ram(ptablep);
- flush_tlb_kernel_page(ptablep);
- nocache_page(ptablep);
+ mmu_page_ctor(ptablep);
return ptablep;
}
__func__, PAGE_SIZE, PAGE_SIZE);
clear_page(last_pgtable);
- __flush_page_to_ram(last_pgtable);
- flush_tlb_kernel_page(last_pgtable);
- nocache_page(last_pgtable);
+ mmu_page_ctor(last_pgtable);
}
return last_pgtable;