/linux/drivers/net/wireguard/selftest/ |
H A D | allowedips.c | 459 #define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \ macro 516 insert(4, a, 192, 168, 4, 0, 24); in wg_allowedips_selftest() 517 insert(4, b, 192, 168, 4, 4, 32); in wg_allowedips_selftest() 518 insert(4, c, 192, 168, 0, 0, 16); in wg_allowedips_selftest() 519 insert(4, d, 192, 95, 5, 64, 27); in wg_allowedips_selftest() 521 insert(4, c, 192, 95, 5, 65, 27); in wg_allowedips_selftest() 522 insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); in wg_allowedips_selftest() 523 insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64); in wg_allowedips_selftest() 524 insert(4, e, 0, 0, 0, 0, 0); in wg_allowedips_selftest() 525 insert( in wg_allowedips_selftest() [all...] |
/linux/fs/bcachefs/ |
H A D | data_update.c | 115 struct bkey_i *insert) in trace_io_move_finish2() argument 129 prt_str_indented(&buf, "insert:\t"); in trace_io_move_finish2() 130 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); in trace_io_move_finish2() 141 struct bkey_i *insert, in trace_io_move_fail2() argument 154 if (insert) { in trace_io_move_fail2() 162 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) && in trace_io_move_fail2() 184 if (insert) { in trace_io_move_fail2() 186 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); in trace_io_move_fail2() 196 struct bkey_i *insert) in trace_data_update2() argument 206 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); in trace_data_update2() 215 trace_io_move_created_rebalance2(struct data_update * m,struct bkey_s_c old,struct bkey_s_c k,struct bkey_i * insert) trace_io_move_created_rebalance2() argument 238 data_update_invalid_bkey(struct data_update * m,struct bkey_s_c old,struct bkey_s_c k,struct bkey_i * insert) data_update_invalid_bkey() argument 277 struct bkey_i *insert = NULL; __bch2_data_update_index_update() local [all...] |
H A D | str_hash.h | 259 struct bkey_i *insert, in bch2_hash_set_or_get_in_snapshot() argument 269 SPOS(insert->k.p.inode, in bch2_hash_set_or_get_in_snapshot() 270 desc.hash_bkey(info, bkey_i_to_s_c(insert)), in bch2_hash_set_or_get_in_snapshot() 272 POS(insert->k.p.inode, U64_MAX), in bch2_hash_set_or_get_in_snapshot() 275 if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert))) in bch2_hash_set_or_get_in_snapshot() 307 insert->k.p = iter->pos; in bch2_hash_set_or_get_in_snapshot() 308 ret = bch2_trans_update(trans, iter, insert, flags); in bch2_hash_set_or_get_in_snapshot() 319 struct bkey_i *insert, in bch2_hash_set_in_snapshot() argument 324 snapshot, insert, flags); in bch2_hash_set_in_snapshot() 341 struct bkey_i *insert, in bch2_hash_set() argument [all...] |
H A D | btree_update.c | 36 struct bkey_i **insert, in extent_front_merge() argument 51 if (!bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(*insert))) in extent_front_merge() 55 bch2_key_has_snapshot_overwrites(trans, iter->btree_id, (*insert)->k.p); in extent_front_merge() 65 *insert = update; in extent_front_merge() 71 struct bkey_i *insert, in extent_back_merge() argument 80 ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?: in extent_back_merge() 87 bch2_bkey_merge(c, bkey_i_to_s(insert), k); in extent_back_merge() 268 struct bkey_i *insert, in bch2_trans_update_extent() argument 276 bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k), in bch2_trans_update_extent() 280 k = bch2_btree_iter_peek_max(trans, &iter, POS(insert in bch2_trans_update_extent() [all...] |
H A D | btree_trans_commit.c | 100 * a new bset to insert into: in bch2_btree_node_prep_for_write() 150 /* Inserting into a given leaf node (last stage of insert): */ 152 /* Handle overwrites and do insert, for non extents: */ 157 struct bkey_i *insert) in bch2_btree_bset_insert_key() argument 164 EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k)); in bch2_btree_bset_insert_key() 165 EBUG_ON(bpos_lt(insert->k.p, b->data->min_key)); in bch2_btree_bset_insert_key() 166 EBUG_ON(bpos_gt(insert->k.p, b->data->max_key)); in bch2_btree_bset_insert_key() 167 EBUG_ON(insert->k.u64s > bch2_btree_keys_u64s_remaining(b)); in bch2_btree_bset_insert_key() 168 EBUG_ON(!b->c.level && !bpos_eq(insert in bch2_btree_bset_insert_key() 292 bch2_btree_insert_key_leaf(struct btree_trans * trans,struct btree_path * path,struct bkey_i * insert,u64 journal_seq) bch2_btree_insert_key_leaf() argument [all...] |
H A D | io_misc.c | 397 bool insert = shift > 0; in __bch2_resume_logged_op_finsert() local 422 if (insert) { in __bch2_resume_logged_op_finsert() 458 k = insert in __bch2_resume_logged_op_finsert() 473 if (insert && in __bch2_resume_logged_op_finsert() 492 op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset); in __bch2_resume_logged_op_finsert() 512 if (!insert) { in __bch2_resume_logged_op_finsert() 540 u64 offset, u64 len, bool insert, in bch2_fcollapse_finsert() argument 544 s64 shift = insert ? len : -len; in bch2_fcollapse_finsert() 551 op.v.pos = cpu_to_le64(insert ? U64_MAX : offset); in bch2_fcollapse_finsert()
|
H A D | bset.c | 49 * over keys that compare equal to our insert key, and then insert immediately 50 * prior to the first key greater than the key we're inserting - our insert 51 * position will be after all keys that compare equal to our insert key, which 52 * by the time we actually do the insert will all be deleted. 239 struct bkey_packed *insert, unsigned clobber_u64s) in __bch2_verify_insert_pos() argument 248 bkey_iter_cmp(b, prev, insert) > 0); in __bch2_verify_insert_pos() 251 bkey_iter_cmp(b, prev, insert) > 0) { in __bch2_verify_insert_pos() 253 struct bkey k2 = bkey_unpack_key(b, insert); in __bch2_verify_insert_pos() 259 panic("prev > insert in __bch2_verify_insert_pos() 288 bch2_verify_insert_pos(struct btree * b,struct bkey_packed * where,struct bkey_packed * insert,unsigned clobber_u64s) bch2_verify_insert_pos() argument 989 bch2_bset_insert(struct btree * b,struct bkey_packed * where,struct bkey_i * insert,unsigned clobber_u64s) bch2_bset_insert() argument [all...] |
H A D | backpointers.h | 102 bool insert) in bch2_bucket_backpointer_mod() argument 105 return bch2_bucket_backpointer_mod_nowritebuffer(trans, orig_k, bp, insert); in bch2_bucket_backpointer_mod() 107 if (!insert) { in bch2_bucket_backpointer_mod()
|
H A D | buckets.c | 422 struct bkey_s_c k, bool insert, enum bch_sb_error_id id) in bucket_ref_update_err() argument 435 if (insert) { in bucket_ref_update_err() 449 if (print || insert) in bucket_ref_update_err() 600 bool insert) in __mark_pointer() argument 610 if (insert) in __mark_pointer() 623 bool insert = !(flags & BTREE_TRIGGER_overwrite); in bch2_trigger_pointer() local 630 *sectors = insert ? bp.v.bucket_len : -(s64) bp.v.bucket_len; in bch2_trigger_pointer() 634 if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID) in bch2_trigger_pointer() 641 if (insert) { in bch2_trigger_pointer() 651 __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert); in bch2_trigger_pointer() 875 bool insert = !(flags & BTREE_TRIGGER_overwrite); __trigger_extent() local [all...] |
H A D | btree_update_interior.c | 760 * @b is the node we did the final insert into: in btree_update_nodes_written() 931 struct bkey_i *insert = &b->key; in btree_update_updated_root() local 936 BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) > in btree_update_updated_root() 943 insert, insert->k.u64s); in btree_update_updated_root() 1381 struct bkey_i *insert) in bch2_insert_fixup_btree_ptr() argument 1388 BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 && in bch2_insert_fixup_btree_ptr() 1389 !btree_ptr_sectors_written(bkey_i_to_s_c(insert))); in bch2_insert_fixup_btree_ptr() 1392 bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p); in bch2_insert_fixup_btree_ptr() 1400 if (bch2_bkey_validate(c, bkey_i_to_s_c(insert), fro in bch2_insert_fixup_btree_ptr() 1442 struct bkey_i *insert = bch2_keylist_front(keys); bch2_btree_insert_keys_interior() local [all...] |
/linux/virt/kvm/ |
H A D | coalesced_mmio.c | 49 __u32 insert; in coalesced_mmio_write() local 62 insert = READ_ONCE(ring->last); in coalesced_mmio_write() 63 if (insert >= KVM_COALESCED_MMIO_MAX || in coalesced_mmio_write() 64 (insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) { in coalesced_mmio_write() 71 ring->coalesced_mmio[insert].phys_addr = addr; in coalesced_mmio_write() 72 ring->coalesced_mmio[insert].len = len; in coalesced_mmio_write() 73 memcpy(ring->coalesced_mmio[insert].data, val, len); in coalesced_mmio_write() 74 ring->coalesced_mmio[insert].pio = dev->zone.pio; in coalesced_mmio_write() 76 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; in coalesced_mmio_write()
|
/linux/drivers/accessibility/speakup/ |
H A D | DefaultKeyAssignments | 6 We have remapped the insert or zero key on the keypad to act as a 8 InsKeyPad-period means hold down the insert key like a shift key and 31 InsKeyPad-9 Move reading cursor to top of screen (insert pgup) 32 InsKeyPad-3 Move reading cursor to bottom of screen (insert pgdn) 33 InsKeyPad-7 Move reading cursor to left edge of screen (insert home) 34 InsKeyPad-1 Move reading cursor to right edge of screen (insert end)
|
/linux/tools/testing/selftests/timers/ |
H A D | leap-a-day.c | 7 * This test signals the kernel to insert a leap second 184 int insert = 1; in main() local 261 /* Set the leap second insert flag */ in main() 263 if (insert) in main() 320 if (insert) in main() 360 insert = !insert; in main()
|
/linux/fs/notify/ |
H A D | notification.c | 74 * The group can use the @insert hook to insert the event into hash table. 85 void (*insert)(struct fsnotify_group *, in fsnotify_insert_event() 123 if (insert) in fsnotify_insert_event() 124 insert(group, event); in fsnotify_insert_event()
|
/linux/include/linux/ |
H A D | dmar.h | 146 extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); 163 static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) in dmar_iommu_hotplug() argument 170 extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert); 172 static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) in dmar_ir_hotplug() argument
|
/linux/tools/perf/util/ |
H A D | dsos.c | 215 int insert = dsos->cnt; /* Default to inserting at the end. */ in __dsos__add() local 225 insert = mid; in __dsos__add() 228 memmove(&dsos->dsos[insert + 1], &dsos->dsos[insert], in __dsos__add() 229 (dsos->cnt - insert) * sizeof(struct dso *)); in __dsos__add() 231 dsos->dsos[insert] = dso__get(dso); in __dsos__add()
|
/linux/tools/testing/selftests/bpf/prog_tests/ |
H A D | sock_iter_batch.c | 25 static int insert(__u64 cookie, struct sock_count counts[], int counts_len) in insert() function 27 int insert = -1; in insert() local 32 insert = i; in insert() 34 insert = i; in insert() 38 if (insert < 0) in insert() 39 return insert; in insert() 41 counts[insert].cookie = cookie; in insert() 42 counts[insert].count++; in insert() 44 return counts[insert].count; in insert() 58 ASSERT_GE(insert(ou in read_n() [all...] |
/linux/net/rds/ |
H A D | cong.c | 105 struct rds_cong_map *insert) in rds_cong_tree_walk() argument 126 if (insert) { in rds_cong_tree_walk() 127 rb_link_node(&insert->m_rb_node, parent, p); in rds_cong_tree_walk() 128 rb_insert_color(&insert->m_rb_node, &rds_cong_tree); in rds_cong_tree_walk()
|
/linux/fs/ocfs2/ |
H A D | alloc.c | 83 * If this extent tree is supported by an extent map, insert 590 * Tree depth may change during truncate, or insert. If we're in ocfs2_reinit_path() 671 * safely removed if we ever need to insert extent block in ocfs2_path_insert_eb() 2044 * - When our insert into the right path leaf is at the leftmost edge 2371 * - In the case that the rotation requires a post-insert update, 2446 "Owner %llu: error during insert of %u " in ocfs2_rotate_tree_right() 2466 * before the record insert is that an error in ocfs2_rotate_tree_right() 2504 * instead of a real insert, then we have to in ocfs2_rotate_tree_right() 3713 * Since the leftright insert always covers the entire in ocfs2_try_to_merge_extent() 3714 * extent, this call will delete the insert recor in ocfs2_try_to_merge_extent() 3876 ocfs2_insert_at_leaf(struct ocfs2_extent_tree * et,struct ocfs2_extent_rec * insert_rec,struct ocfs2_extent_list * el,struct ocfs2_insert_type * insert) ocfs2_insert_at_leaf() argument 4175 ocfs2_insert_path(handle_t * handle,struct ocfs2_extent_tree * et,struct ocfs2_path * left_path,struct ocfs2_path * right_path,struct ocfs2_extent_rec * insert_rec,struct ocfs2_insert_type * insert) ocfs2_insert_path() argument 4492 ocfs2_figure_contig_type(struct ocfs2_extent_tree * et,struct ocfs2_insert_type * insert,struct ocfs2_extent_list * el,struct ocfs2_extent_rec * insert_rec) ocfs2_figure_contig_type() argument 4537 ocfs2_figure_appending_type(struct ocfs2_insert_type * insert,struct ocfs2_extent_list * el,struct ocfs2_extent_rec * insert_rec) ocfs2_figure_appending_type() argument 4588 ocfs2_figure_insert_type(struct ocfs2_extent_tree * et,struct buffer_head ** last_eb_bh,struct ocfs2_extent_rec * insert_rec,int * free_records,struct ocfs2_insert_type * insert) ocfs2_figure_insert_type() argument 4725 struct ocfs2_insert_type insert = {0, }; ocfs2_insert_extent() local 4930 struct ocfs2_insert_type insert; ocfs2_split_and_insert() local 5297 struct ocfs2_insert_type insert; ocfs2_split_tree() local [all...] |
/linux/rust/macros/ |
H A D | vtable.rs | 48 consts.insert(const_name); in vtable() 77 consts.insert(gen_const_name); in vtable()
|
/linux/Documentation/sphinx/ |
H A D | rstFlatTable.py | 34 * fill-cells: instead of autospann missing cells, insert missing cells 131 tableNode.insert(0, title) 255 self.rows[y].insert(x+c+1, None) 263 self.rows[y+r+1].insert(x+c, None) 346 cellElements.insert(0, target)
|
/linux/drivers/net/can/dev/ |
H A D | rx-offload.c | 80 struct sk_buff *pos, *insert = NULL; in __skb_queue_add_sort() local 97 insert = pos; in __skb_queue_add_sort() 100 if (!insert) in __skb_queue_add_sort() 103 __skb_queue_after(head, insert, new); in __skb_queue_add_sort()
|
/linux/Documentation/userspace-api/media/v4l/ |
H A D | selection-api-intro.rst | 9 can insert the image into larger one. Some video output devices can crop 10 part of an input image, scale it up or down and insert it at an
|
/linux/scripts/ |
H A D | .gitignore | 5 /insert-sys-cert
|
/linux/lib/ |
H A D | rbtree_test.c | 33 static void insert(struct test_node *node, struct rb_root_cached *root) in insert() function 257 insert(nodes + j, &root); in basic_check() 266 printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", in basic_check() 282 printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", in basic_check() 286 insert(nodes + i, &root); in basic_check() 333 insert(nodes + j, &root); in basic_check() 367 printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsigned long long)time); in augmented_check() 382 printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", (unsigned long long)time); in augmented_check()
|