Skip to content

Commit 9fbd18c

Browse files
Chenghao Duanchenhuacai
authored andcommitted
LoongArch: BPF: Add dynamic code modification support
This commit adds support for BPF dynamic code modification on the LoongArch architecture: 1. Add bpf_arch_text_copy() for instruction block copying. 2. Add bpf_arch_text_poke() for runtime instruction patching. 3. Add bpf_arch_text_invalidate() for code invalidation. On LoongArch, since symbol addresses in the direct mapping region can't be reached via relative jump instructions from the paged mapping region, we use the move_imm+jirl instruction pair as absolute jump instructions. These require 2-5 instructions, so we reserve 5 NOP instructions in the program as placeholders for function jumps. The larch_insn_text_copy() function is solely used for BPF. And the use of larch_insn_text_copy() requires PAGE_SIZE alignment. Currently, only the size of the BPF trampoline is page-aligned. Co-developed-by: George Guo <[email protected]> Signed-off-by: George Guo <[email protected]> Signed-off-by: Chenghao Duan <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent ed1a1fe commit 9fbd18c

File tree

3 files changed

+151
-1
lines changed

3 files changed

+151
-1
lines changed

arch/loongarch/include/asm/inst.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -497,6 +497,7 @@ void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs);
497497
int larch_insn_read(void *addr, u32 *insnp);
498498
int larch_insn_write(void *addr, u32 insn);
499499
int larch_insn_patch_text(void *addr, u32 insn);
500+
int larch_insn_text_copy(void *dst, void *src, size_t len);
500501

501502
u32 larch_insn_gen_nop(void);
502503
u32 larch_insn_gen_b(unsigned long pc, unsigned long dest);

arch/loongarch/kernel/inst.c

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
*/
55
#include <linux/sizes.h>
66
#include <linux/uaccess.h>
7+
#include <linux/set_memory.h>
8+
#include <linux/stop_machine.h>
79

810
#include <asm/cacheflush.h>
911
#include <asm/inst.h>
@@ -218,6 +220,50 @@ int larch_insn_patch_text(void *addr, u32 insn)
218220
return ret;
219221
}
220222

223+
struct insn_copy {
224+
void *dst;
225+
void *src;
226+
size_t len;
227+
unsigned int cpu;
228+
};
229+
230+
static int text_copy_cb(void *data)
231+
{
232+
int ret = 0;
233+
struct insn_copy *copy = data;
234+
235+
if (smp_processor_id() == copy->cpu) {
236+
ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len);
237+
if (ret)
238+
pr_err("%s: operation failed\n", __func__);
239+
}
240+
241+
flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len);
242+
243+
return ret;
244+
}
245+
246+
int larch_insn_text_copy(void *dst, void *src, size_t len)
247+
{
248+
int ret = 0;
249+
size_t start, end;
250+
struct insn_copy copy = {
251+
.dst = dst,
252+
.src = src,
253+
.len = len,
254+
.cpu = smp_processor_id(),
255+
};
256+
257+
start = round_down((size_t)dst, PAGE_SIZE);
258+
end = round_up((size_t)dst + len, PAGE_SIZE);
259+
260+
set_memory_rw(start, (end - start) / PAGE_SIZE);
261+
ret = stop_machine(text_copy_cb, &copy, cpu_online_mask);
262+
set_memory_rox(start, (end - start) / PAGE_SIZE);
263+
264+
return ret;
265+
}
266+
221267
u32 larch_insn_gen_nop(void)
222268
{
223269
return INSN_NOP;

arch/loongarch/net/bpf_jit.c

Lines changed: 104 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,12 @@
44
*
55
* Copyright (C) 2022 Loongson Technology Corporation Limited
66
*/
7+
#include <linux/memory.h>
78
#include "bpf_jit.h"
89

10+
#define LOONGARCH_LONG_JUMP_NINSNS 5
11+
#define LOONGARCH_LONG_JUMP_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
12+
913
#define REG_TCC LOONGARCH_GPR_A6
1014
#define TCC_SAVED LOONGARCH_GPR_S5
1115

@@ -88,7 +92,7 @@ static u8 tail_call_reg(struct jit_ctx *ctx)
8892
*/
8993
static void build_prologue(struct jit_ctx *ctx)
9094
{
91-
int stack_adjust = 0, store_offset, bpf_stack_adjust;
95+
int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
9296

9397
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
9498

@@ -98,6 +102,10 @@ static void build_prologue(struct jit_ctx *ctx)
98102
stack_adjust = round_up(stack_adjust, 16);
99103
stack_adjust += bpf_stack_adjust;
100104

105+
/* Reserve space for the move_imm + jirl instruction */
106+
for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
107+
emit_insn(ctx, nop);
108+
101109
/*
102110
* First instruction initializes the tail call count (TCC).
103111
* On tail call we skip this instruction, and the TCC is
@@ -1194,6 +1202,101 @@ static int validate_ctx(struct jit_ctx *ctx)
11941202
return 0;
11951203
}
11961204

1205+
static int emit_jump_and_link(struct jit_ctx *ctx, u8 rd, u64 target)
1206+
{
1207+
if (!target) {
1208+
pr_err("bpf_jit: jump target address is error\n");
1209+
return -EFAULT;
1210+
}
1211+
1212+
move_imm(ctx, LOONGARCH_GPR_T1, target, false);
1213+
emit_insn(ctx, jirl, rd, LOONGARCH_GPR_T1, 0);
1214+
1215+
return 0;
1216+
}
1217+
1218+
static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
1219+
{
1220+
int i;
1221+
struct jit_ctx ctx;
1222+
1223+
ctx.idx = 0;
1224+
ctx.image = (union loongarch_instruction *)insns;
1225+
1226+
if (!target) {
1227+
for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
1228+
emit_insn((&ctx), nop);
1229+
return 0;
1230+
}
1231+
1232+
return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target);
1233+
}
1234+
1235+
void *bpf_arch_text_copy(void *dst, void *src, size_t len)
1236+
{
1237+
int ret;
1238+
1239+
mutex_lock(&text_mutex);
1240+
ret = larch_insn_text_copy(dst, src, len);
1241+
mutex_unlock(&text_mutex);
1242+
1243+
return ret ? ERR_PTR(-EINVAL) : dst;
1244+
}
1245+
1246+
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
1247+
void *old_addr, void *new_addr)
1248+
{
1249+
int ret;
1250+
bool is_call = (poke_type == BPF_MOD_CALL);
1251+
u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
1252+
u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
1253+
1254+
if (!is_kernel_text((unsigned long)ip) &&
1255+
!is_bpf_text_address((unsigned long)ip))
1256+
return -ENOTSUPP;
1257+
1258+
ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
1259+
if (ret)
1260+
return ret;
1261+
1262+
if (memcmp(ip, old_insns, LOONGARCH_LONG_JUMP_NBYTES))
1263+
return -EFAULT;
1264+
1265+
ret = emit_jump_or_nops(new_addr, ip, new_insns, is_call);
1266+
if (ret)
1267+
return ret;
1268+
1269+
mutex_lock(&text_mutex);
1270+
if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES))
1271+
ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES);
1272+
mutex_unlock(&text_mutex);
1273+
1274+
return ret;
1275+
}
1276+
1277+
int bpf_arch_text_invalidate(void *dst, size_t len)
1278+
{
1279+
int i;
1280+
int ret = 0;
1281+
u32 *inst;
1282+
1283+
inst = kvmalloc(len, GFP_KERNEL);
1284+
if (!inst)
1285+
return -ENOMEM;
1286+
1287+
for (i = 0; i < (len / sizeof(u32)); i++)
1288+
inst[i] = INSN_BREAK;
1289+
1290+
mutex_lock(&text_mutex);
1291+
if (larch_insn_text_copy(dst, inst, len))
1292+
ret = -EINVAL;
1293+
mutex_unlock(&text_mutex);
1294+
1295+
kvfree(inst);
1296+
1297+
return ret;
1298+
}
1299+
11971300
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
11981301
{
11991302
bool tmp_blinded = false, extra_pass = false;

0 commit comments

Comments
 (0)