|
| 1 | +x86/mm: Move is_vsyscall_vaddr() into asm/vsyscall.h |
| 2 | + |
| 3 | +jira LE-1907 |
| 4 | +cve CVE-2024-26906 |
| 5 | +Rebuild_History Non-Buildable kernel-4.18.0-553.8.1.el8_10 |
| 6 | +commit-author Hou Tao < [email protected]> |
| 7 | +commit ee0e39a63b78849f8abbef268b13e4838569f646 |
| 8 | +Empty-Commit: Cherry-Pick Conflicts during history rebuild. |
| 9 | +Will be included in final tarball splat. Ref for failed cherry-pick at: |
| 10 | +ciq/ciq_backports/kernel-4.18.0-553.8.1.el8_10/ee0e39a6.failed |
| 11 | + |
| 12 | +Move is_vsyscall_vaddr() into asm/vsyscall.h to make it available for |
| 13 | +copy_from_kernel_nofault_allowed() in arch/x86/mm/maccess.c. |
| 14 | + |
| 15 | + Reviewed-by: Sohil Mehta < [email protected]> |
| 16 | + Signed-off-by: Hou Tao < [email protected]> |
| 17 | +Link: https://lore.kernel.org/r/ [email protected] |
| 18 | + Signed-off-by: Alexei Starovoitov < [email protected]> |
| 19 | +(cherry picked from commit ee0e39a63b78849f8abbef268b13e4838569f646) |
| 20 | + Signed-off-by: Jonathan Maple < [email protected]> |
| 21 | + |
| 22 | +# Conflicts: |
| 23 | +# arch/x86/mm/fault.c |
| 24 | +diff --cc arch/x86/mm/fault.c |
| 25 | +index 929bfb03e31a,d6375b3c633b..000000000000 |
| 26 | +--- a/arch/x86/mm/fault.c |
| 27 | ++++ b/arch/x86/mm/fault.c |
| 28 | +@@@ -823,15 -798,6 +823,18 @@@ show_signal_msg(struct pt_regs *regs, u |
| 29 | + show_opcodes(regs, loglvl); |
| 30 | + } |
| 31 | + |
| 32 | +++<<<<<<< HEAD |
| 33 | + +/* |
| 34 | + + * The (legacy) vsyscall page is the long page in the kernel portion |
| 35 | + + * of the address space that has user-accessible permissions. |
| 36 | + + */ |
| 37 | + +static bool is_vsyscall_vaddr(unsigned long vaddr) |
| 38 | + +{ |
| 39 | + + return (vaddr & PAGE_MASK) == VSYSCALL_ADDR; |
| 40 | + +} |
| 41 | + + |
| 42 | +++======= |
| 43 | +++>>>>>>> ee0e39a63b78 (x86/mm: Move is_vsyscall_vaddr() into asm/vsyscall.h) |
| 44 | + static void |
| 45 | + __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
| 46 | + unsigned long address, u32 pkey, int si_code) |
| 47 | +diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h |
| 48 | +index b986b2ca688a..8154b25cb975 100644 |
| 49 | +--- a/arch/x86/include/asm/vsyscall.h |
| 50 | ++++ b/arch/x86/include/asm/vsyscall.h |
| 51 | +@@ -4,6 +4,7 @@ |
| 52 | + |
| 53 | + #include <linux/seqlock.h> |
| 54 | + #include <uapi/asm/vsyscall.h> |
| 55 | ++#include <asm/page_types.h> |
| 56 | + |
| 57 | + #ifdef CONFIG_X86_VSYSCALL_EMULATION |
| 58 | + extern void map_vsyscall(void); |
| 59 | +@@ -22,4 +23,13 @@ static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) |
| 60 | + } |
| 61 | + #endif |
| 62 | + |
| 63 | ++/* |
| 64 | ++ * The (legacy) vsyscall page is the long page in the kernel portion |
| 65 | ++ * of the address space that has user-accessible permissions. |
| 66 | ++ */ |
| 67 | ++static inline bool is_vsyscall_vaddr(unsigned long vaddr) |
| 68 | ++{ |
| 69 | ++ return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); |
| 70 | ++} |
| 71 | ++ |
| 72 | + #endif /* _ASM_X86_VSYSCALL_H */ |
| 73 | +* Unmerged path arch/x86/mm/fault.c |
0 commit comments