xref: /linux/arch/loongarch/net/bpf_jit.h (revision 83affacd18cc225f538e532c6e667d26b3ec69fd) !
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * BPF JIT compiler for LoongArch
4  *
5  * Copyright (C) 2022 Loongson Technology Corporation Limited
6  */
7 #include <linux/bitfield.h>
8 #include <linux/bpf.h>
9 #include <linux/filter.h>
10 #include <asm/cacheflush.h>
11 #include <asm/inst.h>
12 
13 struct jit_ctx {
14 	const struct bpf_prog *prog;
15 	unsigned int idx;
16 	unsigned int flags;
17 	unsigned int epilogue_offset;
18 	u32 *offset;
19 	int num_exentries;
20 	union loongarch_instruction *image;
21 	union loongarch_instruction *ro_image;
22 	u32 stack_size;
23 };
24 
25 struct jit_data {
26 	struct bpf_binary_header *header;
27 	u8 *image;
28 	struct jit_ctx ctx;
29 };
30 
emit_nop(union loongarch_instruction * insn)31 static inline void emit_nop(union loongarch_instruction *insn)
32 {
33 	insn->word = INSN_NOP;
34 }
35 
36 #define emit_insn(ctx, func, ...)						\
37 do {										\
38 	if (ctx->image != NULL) {						\
39 		union loongarch_instruction *insn = &ctx->image[ctx->idx];	\
40 		emit_##func(insn, ##__VA_ARGS__);				\
41 	}									\
42 	ctx->idx++;								\
43 } while (0)
44 
45 #define is_signed_imm12(val)	signed_imm_check(val, 12)
46 #define is_signed_imm14(val)	signed_imm_check(val, 14)
47 #define is_signed_imm16(val)	signed_imm_check(val, 16)
48 #define is_signed_imm26(val)	signed_imm_check(val, 26)
49 #define is_signed_imm32(val)	signed_imm_check(val, 32)
50 #define is_signed_imm52(val)	signed_imm_check(val, 52)
51 #define is_unsigned_imm12(val)	unsigned_imm_check(val, 12)
52 
bpf2la_offset(int bpf_insn,int off,const struct jit_ctx * ctx)53 static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx)
54 {
55 	/* BPF JMP offset is relative to the next instruction */
56 	bpf_insn++;
57 	/*
58 	 * Whereas LoongArch branch instructions encode the offset
59 	 * from the branch itself, so we must subtract 1 from the
60 	 * instruction offset.
61 	 */
62 	return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1));
63 }
64 
epilogue_offset(const struct jit_ctx * ctx)65 static inline int epilogue_offset(const struct jit_ctx *ctx)
66 {
67 	int from = ctx->idx;
68 	int to = ctx->epilogue_offset;
69 
70 	return (to - from);
71 }
72 
73 /* Zero-extend 32 bits into 64 bits */
emit_zext_32(struct jit_ctx * ctx,enum loongarch_gpr reg,bool is32)74 static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
75 {
76 	if (!is32)
77 		return;
78 
79 	emit_insn(ctx, lu32id, reg, 0);
80 }
81 
82 /* Signed-extend 32 bits into 64 bits */
emit_sext_32(struct jit_ctx * ctx,enum loongarch_gpr reg,bool is32)83 static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
84 {
85 	if (!is32)
86 		return;
87 
88 	emit_insn(ctx, addiw, reg, reg, 0);
89 }
90 
move_addr(struct jit_ctx * ctx,enum loongarch_gpr rd,u64 addr)91 static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
92 {
93 	u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
94 
95 	/* lu12iw rd, imm_31_12 */
96 	imm_31_12 = (addr >> 12) & 0xfffff;
97 	emit_insn(ctx, lu12iw, rd, imm_31_12);
98 
99 	/* ori rd, rd, imm_11_0 */
100 	imm_11_0 = addr & 0xfff;
101 	emit_insn(ctx, ori, rd, rd, imm_11_0);
102 
103 	/* lu32id rd, imm_51_32 */
104 	imm_51_32 = (addr >> 32) & 0xfffff;
105 	emit_insn(ctx, lu32id, rd, imm_51_32);
106 
107 	/* lu52id rd, rd, imm_63_52 */
108 	imm_63_52 = (addr >> 52) & 0xfff;
109 	emit_insn(ctx, lu52id, rd, rd, imm_63_52);
110 }
111 
move_imm(struct jit_ctx * ctx,enum loongarch_gpr rd,long imm,bool is32)112 static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
113 {
114 	long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;
115 
116 	/* or rd, $zero, $zero */
117 	if (imm == 0) {
118 		emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO);
119 		return;
120 	}
121 
122 	/* addiw rd, $zero, imm_11_0 */
123 	if (is_signed_imm12(imm)) {
124 		emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm);
125 		goto zext;
126 	}
127 
128 	/* ori rd, $zero, imm_11_0 */
129 	if (is_unsigned_imm12(imm)) {
130 		emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm);
131 		goto zext;
132 	}
133 
134 	/* lu52id rd, $zero, imm_63_52 */
135 	imm_63_52 = (imm >> 52) & 0xfff;
136 	imm_51_0 = imm & 0xfffffffffffff;
137 	if (imm_63_52 != 0 && imm_51_0 == 0) {
138 		emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52);
139 		return;
140 	}
141 
142 	/* lu12iw rd, imm_31_12 */
143 	imm_31_12 = (imm >> 12) & 0xfffff;
144 	emit_insn(ctx, lu12iw, rd, imm_31_12);
145 
146 	/* ori rd, rd, imm_11_0 */
147 	imm_11_0 = imm & 0xfff;
148 	if (imm_11_0 != 0)
149 		emit_insn(ctx, ori, rd, rd, imm_11_0);
150 
151 	if (!is_signed_imm32(imm)) {
152 		if (imm_51_0 != 0) {
153 			/*
154 			 * If bit[51:31] is all 0 or all 1,
155 			 * it means bit[51:32] is sign extended by lu12iw,
156 			 * no need to call lu32id to do a new filled operation.
157 			 */
158 			imm_51_31 = (imm >> 31) & 0x1fffff;
159 			if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) {
160 				/* lu32id rd, imm_51_32 */
161 				imm_51_32 = (imm >> 32) & 0xfffff;
162 				emit_insn(ctx, lu32id, rd, imm_51_32);
163 			}
164 		}
165 
166 		/* lu52id rd, rd, imm_63_52 */
167 		if (!is_signed_imm52(imm))
168 			emit_insn(ctx, lu52id, rd, rd, imm_63_52);
169 	}
170 
171 zext:
172 	emit_zext_32(ctx, rd, is32);
173 }
174 
move_reg(struct jit_ctx * ctx,enum loongarch_gpr rd,enum loongarch_gpr rj)175 static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd,
176 			    enum loongarch_gpr rj)
177 {
178 	emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO);
179 }
180 
invert_jmp_cond(u8 cond)181 static inline int invert_jmp_cond(u8 cond)
182 {
183 	switch (cond) {
184 	case BPF_JEQ:
185 		return BPF_JNE;
186 	case BPF_JNE:
187 	case BPF_JSET:
188 		return BPF_JEQ;
189 	case BPF_JGT:
190 		return BPF_JLE;
191 	case BPF_JGE:
192 		return BPF_JLT;
193 	case BPF_JLT:
194 		return BPF_JGE;
195 	case BPF_JLE:
196 		return BPF_JGT;
197 	case BPF_JSGT:
198 		return BPF_JSLE;
199 	case BPF_JSGE:
200 		return BPF_JSLT;
201 	case BPF_JSLT:
202 		return BPF_JSGE;
203 	case BPF_JSLE:
204 		return BPF_JSGT;
205 	}
206 	return -1;
207 }
208 
cond_jmp_offset(struct jit_ctx * ctx,u8 cond,enum loongarch_gpr rj,enum loongarch_gpr rd,int jmp_offset)209 static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
210 				   enum loongarch_gpr rd, int jmp_offset)
211 {
212 	switch (cond) {
213 	case BPF_JEQ:
214 		/* PC += jmp_offset if rj == rd */
215 		emit_insn(ctx, beq, rj, rd, jmp_offset);
216 		return;
217 	case BPF_JNE:
218 	case BPF_JSET:
219 		/* PC += jmp_offset if rj != rd */
220 		emit_insn(ctx, bne, rj, rd, jmp_offset);
221 		return;
222 	case BPF_JGT:
223 		/* PC += jmp_offset if rj > rd (unsigned) */
224 		emit_insn(ctx, bltu, rd, rj, jmp_offset);
225 		return;
226 	case BPF_JLT:
227 		/* PC += jmp_offset if rj < rd (unsigned) */
228 		emit_insn(ctx, bltu, rj, rd, jmp_offset);
229 		return;
230 	case BPF_JGE:
231 		/* PC += jmp_offset if rj >= rd (unsigned) */
232 		emit_insn(ctx, bgeu, rj, rd, jmp_offset);
233 		return;
234 	case BPF_JLE:
235 		/* PC += jmp_offset if rj <= rd (unsigned) */
236 		emit_insn(ctx, bgeu, rd, rj, jmp_offset);
237 		return;
238 	case BPF_JSGT:
239 		/* PC += jmp_offset if rj > rd (signed) */
240 		emit_insn(ctx, blt, rd, rj, jmp_offset);
241 		return;
242 	case BPF_JSLT:
243 		/* PC += jmp_offset if rj < rd (signed) */
244 		emit_insn(ctx, blt, rj, rd, jmp_offset);
245 		return;
246 	case BPF_JSGE:
247 		/* PC += jmp_offset if rj >= rd (signed) */
248 		emit_insn(ctx, bge, rj, rd, jmp_offset);
249 		return;
250 	case BPF_JSLE:
251 		/* PC += jmp_offset if rj <= rd (signed) */
252 		emit_insn(ctx, bge, rd, rj, jmp_offset);
253 		return;
254 	}
255 }
256 
cond_jmp_offs26(struct jit_ctx * ctx,u8 cond,enum loongarch_gpr rj,enum loongarch_gpr rd,int jmp_offset)257 static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
258 				   enum loongarch_gpr rd, int jmp_offset)
259 {
260 	cond = invert_jmp_cond(cond);
261 	cond_jmp_offset(ctx, cond, rj, rd, 2);
262 	emit_insn(ctx, b, jmp_offset);
263 }
264 
uncond_jmp_offs26(struct jit_ctx * ctx,int jmp_offset)265 static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset)
266 {
267 	emit_insn(ctx, b, jmp_offset);
268 }
269 
emit_cond_jmp(struct jit_ctx * ctx,u8 cond,enum loongarch_gpr rj,enum loongarch_gpr rd,int jmp_offset)270 static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
271 				enum loongarch_gpr rd, int jmp_offset)
272 {
273 	/*
274 	 * A large PC-relative jump offset may overflow the immediate field of
275 	 * the native conditional branch instruction, triggering a conversion
276 	 * to use an absolute jump instead, this jump sequence is particularly
277 	 * nasty. For now, use cond_jmp_offs26() directly to keep it simple.
278 	 * In the future, maybe we can add support for far branching, the branch
279 	 * relaxation requires more than two passes to converge, the code seems
280 	 * too complex to understand, not quite sure whether it is necessary and
281 	 * worth the extra pain. Anyway, just leave it as it is to enhance code
282 	 * readability now.
283 	 */
284 	if (is_signed_imm26(jmp_offset)) {
285 		cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset);
286 		return 0;
287 	}
288 
289 	return -EINVAL;
290 }
291 
emit_uncond_jmp(struct jit_ctx * ctx,int jmp_offset)292 static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset)
293 {
294 	if (is_signed_imm26(jmp_offset)) {
295 		uncond_jmp_offs26(ctx, jmp_offset);
296 		return 0;
297 	}
298 
299 	return -EINVAL;
300 }
301 
emit_tailcall_jmp(struct jit_ctx * ctx,u8 cond,enum loongarch_gpr rj,enum loongarch_gpr rd,int jmp_offset)302 static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
303 				    enum loongarch_gpr rd, int jmp_offset)
304 {
305 	if (is_signed_imm16(jmp_offset)) {
306 		cond_jmp_offset(ctx, cond, rj, rd, jmp_offset);
307 		return 0;
308 	}
309 
310 	return -EINVAL;
311 }
312 
bpf_flush_icache(void * start,void * end)313 static inline void bpf_flush_icache(void *start, void *end)
314 {
315 	flush_icache_range((unsigned long)start, (unsigned long)end);
316 }
317