This allows the cast in lowmem_page_address (introduced as a warning
fixup to 
33dd4e0ec911 "mm: make some struct page's const") to be
removed.
Propagate const'ness to page_to_section() as well since it is required
by __page_to_pfn.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
 })
 
 #define __page_to_pfn(pg)                                              \
-({     struct page *__pg = (pg);                                       \
+({     const struct page *__pg = (pg);                                 \
        struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg));     \
        (unsigned long)(__pg - __pgdat->node_mem_map) +                 \
         __pgdat->node_start_pfn;                                       \
  * section[i].section_mem_map == mem_map's address - start_pfn;
  */
 #define __page_to_pfn(pg)                                      \
-({     struct page *__pg = (pg);                               \
+({     const struct page *__pg = (pg);                         \
        int __sec = page_to_section(__pg);                      \
        (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
 })
 
        page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
 }
 
-static inline unsigned long page_to_section(struct page *page)
+static inline unsigned long page_to_section(const struct page *page)
 {
        return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
 }
 
 static __always_inline void *lowmem_page_address(const struct page *page)
 {
-       return __va(PFN_PHYS(page_to_pfn((struct page *)page)));
+       return __va(PFN_PHYS(page_to_pfn(page)));
 }
 
 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)