diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 82c15d96c0fad8..3c885471e3536a 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -31,12 +31,14 @@ /* * TODO: Only one kernel-user split for each MMU currently supported. */ +#define PAGE_OFFSET_SHIFT (CONFIG_LINUX_LINK_BASE - CONFIG_LINUX_RAM_BASE) + #if defined(CONFIG_ARC_MMU_V6_48) -#define PAGE_OFFSET _AC(0xffff000000000000, UL) +#define PAGE_OFFSET (_AC(0xffff000000000000, UL) + PAGE_OFFSET_SHIFT) #elif defined(CONFIG_ARC_MMU_V6_52) -#define PAGE_OFFSET _AC(0xfff0000000000000, UL) +#define PAGE_OFFSET (_AC(0xfff0000000000000, UL) + PAGE_OFFSET_SHIFT) #else -#define PAGE_OFFSET _AC(0x80000000, UL) /* Kernel starts at 2G onwrds */ +#define PAGE_OFFSET (_AC(0x80000000, UL) + PAGE_OFFSET_SHIFT) /* Kernel starts at 2G onwrds */ #endif #define PAGE_MASK (~(PAGE_SIZE-1)) diff --git a/arch/arc/mm/tlb-arcv3.c b/arch/arc/mm/tlb-arcv3.c index cdfeb27f1bdf45..625ede13e5567a 100644 --- a/arch/arc/mm/tlb-arcv3.c +++ b/arch/arc/mm/tlb-arcv3.c @@ -338,10 +338,11 @@ int __init arc_map_memory_in_mm(struct mm_struct *mm) /* * Kernel (__pa(PAGE_OFFSET) to __pa(_end) is already mapped by - * arc_map_kernel_in_mm(), so map only >= __pa(_end). + * arc_map_kernel_in_mm(), let's maping also for 'addr < __pa(PAGE_OFFSET)' + * and 'addr >= __pa(_end)'. * - * We expect that kernel is mapped to the start of physical memory, - * so start >= __pa(PAGE_OFFSET). + * Since we don't expect that kernel is always mapped to the start of physical + * memory, then we also need to map a hole between start and CONFIG_LINUX_LINK_BASE. */ for_each_mem_range(i, &start, &end) { if (start >= end) @@ -350,6 +351,13 @@ int __init arc_map_memory_in_mm(struct mm_struct *mm) if (end <= __pa(_end)) continue; + if (start < CONFIG_LINUX_LINK_BASE) { + arc_map_segment_in_mm(mm, + (unsigned long)__va(start), + PAGE_OFFSET, + PAGE_KERNEL_RW); + } + if (start < __pa(_end)) start = __pa(_end); @@ -380,8 +388,6 @@ void __init arc_mmu_init(void) * Make sure that early mapping does not need more then one struct * per level (pgd/pud/pmd). */ - /* It is always true when PAGE_OFFSET is aligned to pmd. */ - BUILD_BUG_ON(pmd_index(PAGE_OFFSET) != 0); if (mmuinfo.pg_sz_k != TO_KB(PAGE_SIZE)) panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));