1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023 SUSE LLC */
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include "../../../include/linux/filter.h"
6 #include "bpf_misc.h"
7
8 SEC("?raw_tp")
9 __success __log_level(2)
10 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
11 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2")
12 __msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2")
13 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8")
bpf_neg(void)14 __naked int bpf_neg(void)
15 {
16 asm volatile (
17 "r2 = 8;"
18 "r2 = -r2;"
19 "if r2 != -8 goto 1f;"
20 "r1 = r10;"
21 "r1 += r2;"
22 "1:"
23 "r0 = 0;"
24 "exit;"
25 ::: __clobber_all);
26 }
27
28 SEC("?raw_tp")
29 __success __log_level(2)
30 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
31 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
32 __msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2")
33 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
bpf_end_to_le(void)34 __naked int bpf_end_to_le(void)
35 {
36 asm volatile (
37 "r2 = 0;"
38 "r2 = le16 r2;"
39 "if r2 != 0 goto 1f;"
40 "r1 = r10;"
41 "r1 += r2;"
42 "1:"
43 "r0 = 0;"
44 "exit;"
45 ::: __clobber_all);
46 }
47
48
49 SEC("?raw_tp")
50 __success __log_level(2)
51 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
52 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
53 __msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2")
54 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
bpf_end_to_be(void)55 __naked int bpf_end_to_be(void)
56 {
57 asm volatile (
58 "r2 = 0;"
59 "r2 = be16 r2;"
60 "if r2 != 0 goto 1f;"
61 "r1 = r10;"
62 "r1 += r2;"
63 "1:"
64 "r0 = 0;"
65 "exit;"
66 ::: __clobber_all);
67 }
68
69 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
70 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
71 defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
72 __clang_major__ >= 18
73
74 SEC("?raw_tp")
75 __success __log_level(2)
76 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
77 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
78 __msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2")
79 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
bpf_end_bswap(void)80 __naked int bpf_end_bswap(void)
81 {
82 asm volatile (
83 "r2 = 0;"
84 "r2 = bswap16 r2;"
85 "if r2 != 0 goto 1f;"
86 "r1 = r10;"
87 "r1 += r2;"
88 "1:"
89 "r0 = 0;"
90 "exit;"
91 ::: __clobber_all);
92 }
93
94 #if defined(ENABLE_ATOMICS_TESTS) && \
95 (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86))
96
97 SEC("?raw_tp")
98 __success __log_level(2)
99 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
100 __msg("mark_precise: frame0: regs=r2 stack= before 2: (db) r2 = load_acquire((u64 *)(r10 -8))")
101 __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
102 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_load_acquire(void)103 __naked int bpf_load_acquire(void)
104 {
105 asm volatile (
106 "r1 = 8;"
107 "*(u64 *)(r10 - 8) = r1;"
108 ".8byte %[load_acquire_insn];" /* r2 = load_acquire((u64 *)(r10 - 8)); */
109 "r3 = r10;"
110 "r3 += r2;" /* mark_precise */
111 "r0 = 0;"
112 "exit;"
113 :
114 : __imm_insn(load_acquire_insn,
115 BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8))
116 : __clobber_all);
117 }
118
119 SEC("?raw_tp")
120 __success __log_level(2)
121 __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r2 = r10")
122 __msg("mark_precise: frame0: regs=r1 stack= before 2: (79) r1 = *(u64 *)(r10 -8)")
123 __msg("mark_precise: frame0: regs= stack=-8 before 1: (db) store_release((u64 *)(r10 -8), r1)")
124 __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
bpf_store_release(void)125 __naked int bpf_store_release(void)
126 {
127 asm volatile (
128 "r1 = 8;"
129 ".8byte %[store_release_insn];" /* store_release((u64 *)(r10 - 8), r1); */
130 "r1 = *(u64 *)(r10 - 8);"
131 "r2 = r10;"
132 "r2 += r1;" /* mark_precise */
133 "r0 = 0;"
134 "exit;"
135 :
136 : __imm_insn(store_release_insn,
137 BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8))
138 : __clobber_all);
139 }
140
141 #endif /* load-acquire, store-release */
142 #endif /* v4 instruction */
143
144 SEC("?raw_tp")
145 __success __log_level(2)
146 /*
147 * Without the bug fix there will be no history between "last_idx 3 first_idx 3"
148 * and "parent state regs=" lines. "R0_w=6" parts are here to help anchor
149 * expected log messages to the one specific mark_chain_precision operation.
150 *
151 * This is quite fragile: if verifier checkpointing heuristic changes, this
152 * might need adjusting.
153 */
154 __msg("2: (07) r0 += 1 ; R0_w=6")
155 __msg("3: (35) if r0 >= 0xa goto pc+1")
156 __msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
157 __msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
158 __msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
159 __msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
160 __msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
161 __msg("mark_precise: frame0: parent state regs= stack=: R0_rw=P4")
162 __msg("3: R0_w=6")
state_loop_first_last_equal(void)163 __naked int state_loop_first_last_equal(void)
164 {
165 asm volatile (
166 "r0 = 0;"
167 "l0_%=:"
168 "r0 += 1;"
169 "r0 += 1;"
170 /* every few iterations we'll have a checkpoint here with
171 * first_idx == last_idx, potentially confusing precision
172 * backtracking logic
173 */
174 "if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */
175 "goto l0_%=;"
176 "l1_%=:"
177 "exit;"
178 ::: __clobber_common
179 );
180 }
181
182 char _license[] SEC("license") = "GPL";
183