1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/types.h> 4 #include <linux/bpf_verifier.h> 5 #include <linux/bpf.h> 6 #include <linux/btf.h> 7 #include <linux/filter.h> 8 #include <net/pkt_sched.h> 9 #include <net/pkt_cls.h> 10 11 #define QDISC_OP_IDX(op) (offsetof(struct Qdisc_ops, op) / sizeof(void (*)(void))) 12 #define QDISC_MOFF_IDX(moff) (moff / sizeof(void (*)(void))) 13 14 static struct bpf_struct_ops bpf_Qdisc_ops; 15 16 struct bpf_sched_data { 17 struct qdisc_watchdog watchdog; 18 }; 19 20 struct bpf_sk_buff_ptr { 21 struct sk_buff *skb; 22 }; 23 24 static int bpf_qdisc_init(struct btf *btf) 25 { 26 return 0; 27 } 28 29 BTF_ID_LIST_SINGLE(bpf_qdisc_ids, struct, Qdisc) 30 BTF_ID_LIST_SINGLE(bpf_sk_buff_ids, struct, sk_buff) 31 BTF_ID_LIST_SINGLE(bpf_sk_buff_ptr_ids, struct, bpf_sk_buff_ptr) 32 33 static bool bpf_qdisc_is_valid_access(int off, int size, 34 enum bpf_access_type type, 35 const struct bpf_prog *prog, 36 struct bpf_insn_access_aux *info) 37 { 38 struct btf *btf = prog->aux->attach_btf; 39 u32 arg; 40 41 arg = btf_ctx_arg_idx(btf, prog->aux->attach_func_proto, off); 42 if (prog->aux->attach_st_ops_member_off == offsetof(struct Qdisc_ops, enqueue)) { 43 if (arg == 2 && type == BPF_READ) { 44 info->reg_type = PTR_TO_BTF_ID | PTR_TRUSTED; 45 info->btf = btf; 46 info->btf_id = bpf_sk_buff_ptr_ids[0]; 47 return true; 48 } 49 } 50 51 return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 52 } 53 54 static int bpf_qdisc_qdisc_access(struct bpf_verifier_log *log, 55 const struct bpf_reg_state *reg, 56 int off, size_t *end) 57 { 58 switch (off) { 59 case offsetof(struct Qdisc, limit): 60 *end = offsetofend(struct Qdisc, limit); 61 break; 62 case offsetof(struct Qdisc, q) + offsetof(struct qdisc_skb_head, qlen): 63 *end = offsetof(struct Qdisc, q) + offsetofend(struct qdisc_skb_head, qlen); 64 break; 65 case offsetof(struct Qdisc, qstats) ... offsetofend(struct Qdisc, qstats) - 1: 66 *end = offsetofend(struct Qdisc, qstats); 67 break; 68 default: 69 return -EACCES; 70 } 71 72 return 0; 73 } 74 75 static int bpf_qdisc_sk_buff_access(struct bpf_verifier_log *log, 76 const struct bpf_reg_state *reg, 77 int off, size_t *end) 78 { 79 switch (off) { 80 case offsetof(struct sk_buff, tstamp): 81 *end = offsetofend(struct sk_buff, tstamp); 82 break; 83 case offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb, data[0]) ... 84 offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb, 85 data[QDISC_CB_PRIV_LEN - 1]): 86 *end = offsetof(struct sk_buff, cb) + 87 offsetofend(struct qdisc_skb_cb, data[QDISC_CB_PRIV_LEN - 1]); 88 break; 89 default: 90 return -EACCES; 91 } 92 93 return 0; 94 } 95 96 static int bpf_qdisc_btf_struct_access(struct bpf_verifier_log *log, 97 const struct bpf_reg_state *reg, 98 int off, int size) 99 { 100 const struct btf_type *t, *skbt, *qdisct; 101 size_t end; 102 int err; 103 104 skbt = btf_type_by_id(reg->btf, bpf_sk_buff_ids[0]); 105 qdisct = btf_type_by_id(reg->btf, bpf_qdisc_ids[0]); 106 t = btf_type_by_id(reg->btf, reg->btf_id); 107 108 if (t == skbt) { 109 err = bpf_qdisc_sk_buff_access(log, reg, off, &end); 110 } else if (t == qdisct) { 111 err = bpf_qdisc_qdisc_access(log, reg, off, &end); 112 } else { 113 bpf_log(log, "only read is supported\n"); 114 return -EACCES; 115 } 116 117 if (err) { 118 bpf_log(log, "no write support to %s at off %d\n", 119 btf_name_by_offset(reg->btf, t->name_off), off); 120 return -EACCES; 121 } 122 123 if (off + size > end) { 124 bpf_log(log, 125 "write access at off %d with size %d beyond the member of %s ended at %zu\n", 126 off, size, btf_name_by_offset(reg->btf, t->name_off), end); 127 return -EACCES; 128 } 129 130 return 0; 131 } 132 133 BTF_ID_LIST(bpf_qdisc_init_prologue_ids) 134 BTF_ID(func, bpf_qdisc_init_prologue) 135 136 static int bpf_qdisc_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, 137 const struct bpf_prog *prog) 138 { 139 struct bpf_insn *insn = insn_buf; 140 141 if (prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, init)) 142 return 0; 143 144 /* r6 = r1; // r6 will be "u64 *ctx". r1 is "u64 *ctx". 145 * r2 = r1[16]; // r2 will be "struct netlink_ext_ack *extack" 146 * r1 = r1[0]; // r1 will be "struct Qdisc *sch" 147 * r0 = bpf_qdisc_init_prologue(r1, r2); 148 * if r0 == 0 goto pc+1; 149 * BPF_EXIT; 150 * r1 = r6; // r1 will be "u64 *ctx". 151 */ 152 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 153 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 16); 154 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 155 *insn++ = BPF_CALL_KFUNC(0, bpf_qdisc_init_prologue_ids[0]); 156 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1); 157 *insn++ = BPF_EXIT_INSN(); 158 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); 159 *insn++ = prog->insnsi[0]; 160 161 return insn - insn_buf; 162 } 163 164 BTF_ID_LIST(bpf_qdisc_reset_destroy_epilogue_ids) 165 BTF_ID(func, bpf_qdisc_reset_destroy_epilogue) 166 167 static int bpf_qdisc_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog, 168 s16 ctx_stack_off) 169 { 170 struct bpf_insn *insn = insn_buf; 171 172 if (prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, reset) && 173 prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, destroy)) 174 return 0; 175 176 /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" 177 * r1 = r1[0]; // r1 will be "struct Qdisc *sch" 178 * r0 = bpf_qdisc_reset_destroy_epilogue(r1); 179 * BPF_EXIT; 180 */ 181 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); 182 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 183 *insn++ = BPF_CALL_KFUNC(0, bpf_qdisc_reset_destroy_epilogue_ids[0]); 184 *insn++ = BPF_EXIT_INSN(); 185 186 return insn - insn_buf; 187 } 188 189 __bpf_kfunc_start_defs(); 190 191 /* bpf_skb_get_hash - Get the flow hash of an skb. 192 * @skb: The skb to get the flow hash from. 193 */ 194 __bpf_kfunc u32 bpf_skb_get_hash(struct sk_buff *skb) 195 { 196 return skb_get_hash(skb); 197 } 198 199 /* bpf_kfree_skb - Release an skb's reference and drop it immediately. 200 * @skb: The skb whose reference to be released and dropped. 201 */ 202 __bpf_kfunc void bpf_kfree_skb(struct sk_buff *skb) 203 { 204 kfree_skb(skb); 205 } 206 207 /* bpf_qdisc_skb_drop - Drop an skb by adding it to a deferred free list. 208 * @skb: The skb whose reference to be released and dropped. 209 * @to_free_list: The list of skbs to be dropped. 210 */ 211 __bpf_kfunc void bpf_qdisc_skb_drop(struct sk_buff *skb, 212 struct bpf_sk_buff_ptr *to_free_list) 213 { 214 __qdisc_drop(skb, (struct sk_buff **)to_free_list); 215 } 216 217 /* bpf_qdisc_watchdog_schedule - Schedule a qdisc to a later time using a timer. 218 * @sch: The qdisc to be scheduled. 219 * @expire: The expiry time of the timer. 220 * @delta_ns: The slack range of the timer. 221 */ 222 __bpf_kfunc void bpf_qdisc_watchdog_schedule(struct Qdisc *sch, u64 expire, u64 delta_ns) 223 { 224 struct bpf_sched_data *q = qdisc_priv(sch); 225 226 qdisc_watchdog_schedule_range_ns(&q->watchdog, expire, delta_ns); 227 } 228 229 /* bpf_qdisc_init_prologue - Hidden kfunc called in prologue of .init. */ 230 __bpf_kfunc int bpf_qdisc_init_prologue(struct Qdisc *sch, 231 struct netlink_ext_ack *extack) 232 { 233 struct bpf_sched_data *q = qdisc_priv(sch); 234 struct net_device *dev = qdisc_dev(sch); 235 struct Qdisc *p; 236 237 qdisc_watchdog_init(&q->watchdog, sch); 238 239 if (sch->parent != TC_H_ROOT) { 240 /* If qdisc_lookup() returns NULL, it means .init is called by 241 * qdisc_create_dflt() in mq/mqprio_init and the parent qdisc 242 * has not been added to qdisc_hash yet. 243 */ 244 p = qdisc_lookup(dev, TC_H_MAJ(sch->parent)); 245 if (p && !(p->flags & TCQ_F_MQROOT)) { 246 NL_SET_ERR_MSG(extack, "BPF qdisc only supported on root or mq"); 247 return -EINVAL; 248 } 249 } 250 251 return 0; 252 } 253 254 /* bpf_qdisc_reset_destroy_epilogue - Hidden kfunc called in epilogue of .reset 255 * and .destroy 256 */ 257 __bpf_kfunc void bpf_qdisc_reset_destroy_epilogue(struct Qdisc *sch) 258 { 259 struct bpf_sched_data *q = qdisc_priv(sch); 260 261 qdisc_watchdog_cancel(&q->watchdog); 262 } 263 264 /* bpf_qdisc_bstats_update - Update Qdisc basic statistics 265 * @sch: The qdisc from which an skb is dequeued. 266 * @skb: The skb to be dequeued. 267 */ 268 __bpf_kfunc void bpf_qdisc_bstats_update(struct Qdisc *sch, const struct sk_buff *skb) 269 { 270 bstats_update(&sch->bstats, skb); 271 } 272 273 __bpf_kfunc_end_defs(); 274 275 BTF_KFUNCS_START(qdisc_kfunc_ids) 276 BTF_ID_FLAGS(func, bpf_skb_get_hash, KF_TRUSTED_ARGS) 277 BTF_ID_FLAGS(func, bpf_kfree_skb, KF_RELEASE) 278 BTF_ID_FLAGS(func, bpf_qdisc_skb_drop, KF_RELEASE) 279 BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS) 280 BTF_ID_FLAGS(func, bpf_qdisc_watchdog_schedule, KF_TRUSTED_ARGS) 281 BTF_ID_FLAGS(func, bpf_qdisc_init_prologue, KF_TRUSTED_ARGS) 282 BTF_ID_FLAGS(func, bpf_qdisc_reset_destroy_epilogue, KF_TRUSTED_ARGS) 283 BTF_ID_FLAGS(func, bpf_qdisc_bstats_update, KF_TRUSTED_ARGS) 284 BTF_KFUNCS_END(qdisc_kfunc_ids) 285 286 BTF_SET_START(qdisc_common_kfunc_set) 287 BTF_ID(func, bpf_skb_get_hash) 288 BTF_ID(func, bpf_kfree_skb) 289 BTF_ID(func, bpf_dynptr_from_skb) 290 BTF_SET_END(qdisc_common_kfunc_set) 291 292 BTF_SET_START(qdisc_enqueue_kfunc_set) 293 BTF_ID(func, bpf_qdisc_skb_drop) 294 BTF_ID(func, bpf_qdisc_watchdog_schedule) 295 BTF_SET_END(qdisc_enqueue_kfunc_set) 296 297 BTF_SET_START(qdisc_dequeue_kfunc_set) 298 BTF_ID(func, bpf_qdisc_watchdog_schedule) 299 BTF_ID(func, bpf_qdisc_bstats_update) 300 BTF_SET_END(qdisc_dequeue_kfunc_set) 301 302 enum qdisc_ops_kf_flags { 303 QDISC_OPS_KF_COMMON = 0, 304 QDISC_OPS_KF_ENQUEUE = 1 << 0, 305 QDISC_OPS_KF_DEQUEUE = 1 << 1, 306 }; 307 308 static const u32 qdisc_ops_context_flags[] = { 309 [QDISC_OP_IDX(enqueue)] = QDISC_OPS_KF_ENQUEUE, 310 [QDISC_OP_IDX(dequeue)] = QDISC_OPS_KF_DEQUEUE, 311 [QDISC_OP_IDX(init)] = QDISC_OPS_KF_COMMON, 312 [QDISC_OP_IDX(reset)] = QDISC_OPS_KF_COMMON, 313 [QDISC_OP_IDX(destroy)] = QDISC_OPS_KF_COMMON, 314 }; 315 316 static int bpf_qdisc_kfunc_filter(const struct bpf_prog *prog, u32 kfunc_id) 317 { 318 u32 moff, flags; 319 320 if (!btf_id_set8_contains(&qdisc_kfunc_ids, kfunc_id)) 321 return 0; 322 323 if (prog->aux->st_ops != &bpf_Qdisc_ops) 324 return -EACCES; 325 326 moff = prog->aux->attach_st_ops_member_off; 327 flags = qdisc_ops_context_flags[QDISC_MOFF_IDX(moff)]; 328 329 if ((flags & QDISC_OPS_KF_ENQUEUE) && 330 btf_id_set_contains(&qdisc_enqueue_kfunc_set, kfunc_id)) 331 return 0; 332 333 if ((flags & QDISC_OPS_KF_DEQUEUE) && 334 btf_id_set_contains(&qdisc_dequeue_kfunc_set, kfunc_id)) 335 return 0; 336 337 if (btf_id_set_contains(&qdisc_common_kfunc_set, kfunc_id)) 338 return 0; 339 340 return -EACCES; 341 } 342 343 static const struct btf_kfunc_id_set bpf_qdisc_kfunc_set = { 344 .owner = THIS_MODULE, 345 .set = &qdisc_kfunc_ids, 346 .filter = bpf_qdisc_kfunc_filter, 347 }; 348 349 static const struct bpf_verifier_ops bpf_qdisc_verifier_ops = { 350 .get_func_proto = bpf_base_func_proto, 351 .is_valid_access = bpf_qdisc_is_valid_access, 352 .btf_struct_access = bpf_qdisc_btf_struct_access, 353 .gen_prologue = bpf_qdisc_gen_prologue, 354 .gen_epilogue = bpf_qdisc_gen_epilogue, 355 }; 356 357 static int bpf_qdisc_init_member(const struct btf_type *t, 358 const struct btf_member *member, 359 void *kdata, const void *udata) 360 { 361 const struct Qdisc_ops *uqdisc_ops; 362 struct Qdisc_ops *qdisc_ops; 363 u32 moff; 364 365 uqdisc_ops = (const struct Qdisc_ops *)udata; 366 qdisc_ops = (struct Qdisc_ops *)kdata; 367 368 moff = __btf_member_bit_offset(t, member) / 8; 369 switch (moff) { 370 case offsetof(struct Qdisc_ops, priv_size): 371 if (uqdisc_ops->priv_size) 372 return -EINVAL; 373 qdisc_ops->priv_size = sizeof(struct bpf_sched_data); 374 return 1; 375 case offsetof(struct Qdisc_ops, peek): 376 qdisc_ops->peek = qdisc_peek_dequeued; 377 return 0; 378 case offsetof(struct Qdisc_ops, id): 379 if (bpf_obj_name_cpy(qdisc_ops->id, uqdisc_ops->id, 380 sizeof(qdisc_ops->id)) <= 0) 381 return -EINVAL; 382 return 1; 383 } 384 385 return 0; 386 } 387 388 static int bpf_qdisc_reg(void *kdata, struct bpf_link *link) 389 { 390 return register_qdisc(kdata); 391 } 392 393 static void bpf_qdisc_unreg(void *kdata, struct bpf_link *link) 394 { 395 return unregister_qdisc(kdata); 396 } 397 398 static int bpf_qdisc_validate(void *kdata) 399 { 400 struct Qdisc_ops *ops = (struct Qdisc_ops *)kdata; 401 402 if (!ops->enqueue || !ops->dequeue || !ops->init || 403 !ops->reset || !ops->destroy) 404 return -EINVAL; 405 406 return 0; 407 } 408 409 static int Qdisc_ops__enqueue(struct sk_buff *skb__ref, struct Qdisc *sch, 410 struct sk_buff **to_free) 411 { 412 return 0; 413 } 414 415 static struct sk_buff *Qdisc_ops__dequeue(struct Qdisc *sch) 416 { 417 return NULL; 418 } 419 420 static int Qdisc_ops__init(struct Qdisc *sch, struct nlattr *arg, 421 struct netlink_ext_ack *extack) 422 { 423 return 0; 424 } 425 426 static void Qdisc_ops__reset(struct Qdisc *sch) 427 { 428 } 429 430 static void Qdisc_ops__destroy(struct Qdisc *sch) 431 { 432 } 433 434 static struct Qdisc_ops __bpf_ops_qdisc_ops = { 435 .enqueue = Qdisc_ops__enqueue, 436 .dequeue = Qdisc_ops__dequeue, 437 .init = Qdisc_ops__init, 438 .reset = Qdisc_ops__reset, 439 .destroy = Qdisc_ops__destroy, 440 }; 441 442 static struct bpf_struct_ops bpf_Qdisc_ops = { 443 .verifier_ops = &bpf_qdisc_verifier_ops, 444 .reg = bpf_qdisc_reg, 445 .unreg = bpf_qdisc_unreg, 446 .validate = bpf_qdisc_validate, 447 .init_member = bpf_qdisc_init_member, 448 .init = bpf_qdisc_init, 449 .name = "Qdisc_ops", 450 .cfi_stubs = &__bpf_ops_qdisc_ops, 451 .owner = THIS_MODULE, 452 }; 453 454 BTF_ID_LIST(bpf_sk_buff_dtor_ids) 455 BTF_ID(func, bpf_kfree_skb) 456 457 static int __init bpf_qdisc_kfunc_init(void) 458 { 459 int ret; 460 const struct btf_id_dtor_kfunc skb_kfunc_dtors[] = { 461 { 462 .btf_id = bpf_sk_buff_ids[0], 463 .kfunc_btf_id = bpf_sk_buff_dtor_ids[0] 464 }, 465 }; 466 467 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_qdisc_kfunc_set); 468 ret = ret ?: register_btf_id_dtor_kfuncs(skb_kfunc_dtors, 469 ARRAY_SIZE(skb_kfunc_dtors), 470 THIS_MODULE); 471 ret = ret ?: register_bpf_struct_ops(&bpf_Qdisc_ops, Qdisc_ops); 472 473 return ret; 474 } 475 late_initcall(bpf_qdisc_kfunc_init); 476