Home
last modified time | relevance | path

Searched refs:kdata (Results 1 – 20 of 20) sorted by relevance

/linux/kernel/
H A Dcapability.c143 struct __user_cap_data_struct kdata[2]; in SYSCALL_DEFINE2() local
164 kdata[0].effective = pE.val; kdata[1].effective = pE.val >> 32; in SYSCALL_DEFINE2()
165 kdata[0].permitted = pP.val; kdata[1].permitted = pP.val >> 32; in SYSCALL_DEFINE2()
166 kdata[0].inheritable = pI.val; kdata[1].inheritable = pI.val >> 32; in SYSCALL_DEFINE2()
187 if (copy_to_user(dataptr, kdata, tocopy * sizeof(kdata[0]))) in SYSCALL_DEFINE2()
218 struct __user_cap_data_struct kdata[ in SYSCALL_DEFINE2() local
[all...]
/linux/drivers/xen/
H A Dprivcmd.c634 struct privcmd_dm_op kdata; in privcmd_ioctl_dm_op() local
643 if (copy_from_user(&kdata, udata, sizeof(kdata))) in privcmd_ioctl_dm_op()
647 if (data->domid != DOMID_INVALID && data->domid != kdata.dom) in privcmd_ioctl_dm_op()
650 if (kdata.num == 0) in privcmd_ioctl_dm_op()
653 if (kdata.num > privcmd_dm_op_max_num) in privcmd_ioctl_dm_op()
656 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL); in privcmd_ioctl_dm_op()
660 if (copy_from_user(kbufs, kdata.ubufs, in privcmd_ioctl_dm_op()
661 sizeof(*kbufs) * kdata.num)) { in privcmd_ioctl_dm_op()
666 for (i = 0; i < kdata in privcmd_ioctl_dm_op()
740 struct privcmd_mmap_resource kdata; privcmd_ioctl_mmap_resource() local
854 struct privcmd_pcidev_get_gsi kdata; privcmd_ioctl_pcidev_get_gsi() local
[all...]
/linux/kernel/trace/
H A Dtrace_hwlat.c163 struct hwlat_kthread_data *kdata = get_cpu_data(); in trace_hwlat_callback() local
165 if (!kdata->kthread) in trace_hwlat_callback()
174 kdata->nmi_ts_start = time_get(); in trace_hwlat_callback()
176 kdata->nmi_total_ts += time_get() - kdata->nmi_ts_start; in trace_hwlat_callback()
180 kdata->nmi_count++; in trace_hwlat_callback()
201 struct hwlat_kthread_data *kdata = get_cpu_data(); in get_sample() local
214 kdata->nmi_total_ts = 0; in get_sample()
215 kdata->nmi_count = 0; in get_sample()
285 if (kdata in get_sample()
393 struct hwlat_kthread_data *kdata = get_cpu_data(); stop_single_kthread() local
418 struct hwlat_kthread_data *kdata = get_cpu_data(); start_single_kthread() local
[all...]
/linux/net/ipv4/
H A Dbpf_tcp_ca.c212 void *kdata, const void *udata) in bpf_tcp_ca_init_member() argument
219 tcp_ca = (struct tcp_congestion_ops *)kdata; in bpf_tcp_ca_init_member()
238 static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link) in bpf_tcp_ca_reg() argument
240 return tcp_register_congestion_control(kdata); in bpf_tcp_ca_reg()
243 static void bpf_tcp_ca_unreg(void *kdata, struct bpf_link *link) in bpf_tcp_ca_unreg() argument
245 tcp_unregister_congestion_control(kdata); in bpf_tcp_ca_unreg()
248 static int bpf_tcp_ca_update(void *kdata, void *old_kdata, struct bpf_link *link) in bpf_tcp_ca_update() argument
250 return tcp_update_congestion_control(kdata, old_kdata); in bpf_tcp_ca_update()
253 static int bpf_tcp_ca_validate(void *kdata) in bpf_tcp_ca_validate() argument
255 return tcp_validate_congestion_control(kdata); in bpf_tcp_ca_validate()
[all...]
/linux/drivers/dma-buf/
H A Ddma-heap.c129 char *kdata = stack_kdata; in dma_heap_ioctl() local
154 kdata = kmalloc(ksize, GFP_KERNEL); in dma_heap_ioctl()
155 if (!kdata) in dma_heap_ioctl()
159 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { in dma_heap_ioctl()
166 memset(kdata + in_size, 0, ksize - in_size); in dma_heap_ioctl()
170 ret = dma_heap_ioctl_allocate(file, kdata); in dma_heap_ioctl()
177 if (copy_to_user((void __user *)arg, kdata, out_size) != 0) in dma_heap_ioctl()
180 if (kdata != stack_kdata) in dma_heap_ioctl()
181 kfree(kdata); in dma_heap_ioctl()
/linux/tools/testing/selftests/bpf/test_kmods/
H A Dbpf_test_no_cfi.c20 void *kdata, const void *udata) in dummy_init_member() argument
25 static int dummy_reg(void *kdata, struct bpf_link *link) in dummy_reg() argument
30 static void dummy_unreg(void *kdata, struct bpf_link *link) in dummy_unreg() argument
H A Dbpf_testmod.c1117 void *kdata, const void *udata) in bpf_testmod_ops_init_member() argument
1125 ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data; in bpf_testmod_ops_init_member()
1145 static int bpf_dummy_reg(void *kdata, struct bpf_link *link) in bpf_dummy_reg() argument
1147 struct bpf_testmod_ops *ops = kdata; in bpf_dummy_reg()
1160 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) in bpf_dummy_unreg() argument
1216 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link) in bpf_dummy_reg2() argument
1218 struct bpf_testmod_ops2 *ops = kdata; in bpf_dummy_reg2()
1239 static int st_ops3_reg(void *kdata, struct bpf_link *link) in st_ops3_reg() argument
1249 st_ops3 = kdata; in st_ops3_reg()
1256 static void st_ops3_unreg(void *kdata, struc argument
1477 st_ops_reg(void * kdata,struct bpf_link * link) st_ops_reg() argument
1494 st_ops_unreg(void * kdata,struct bpf_link * link) st_ops_unreg() argument
1515 st_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata) st_ops_init_member() argument
[all...]
/linux/drivers/hid/bpf/
H A Dhid_bpf_struct_ops.c151 void *kdata, const void *udata) in hid_bpf_ops_init_member() argument
158 khid_bpf_ops = (struct hid_bpf_ops *)kdata; in hid_bpf_ops_init_member()
180 static int hid_bpf_reg(void *kdata, struct bpf_link *link) in hid_bpf_reg() argument
182 struct hid_bpf_ops *ops = kdata; in hid_bpf_reg()
239 static void hid_bpf_unreg(void *kdata, struct bpf_link *link) in hid_bpf_unreg() argument
241 struct hid_bpf_ops *ops = kdata; in hid_bpf_unreg()
/linux/net/sched/
H A Dbpf_qdisc.c357 void *kdata, const void *udata) in bpf_qdisc_init_member() argument
364 qdisc_ops = (struct Qdisc_ops *)kdata; in bpf_qdisc_init_member()
386 static int bpf_qdisc_reg(void *kdata, struct bpf_link *link) in bpf_qdisc_reg() argument
388 return register_qdisc(kdata); in bpf_qdisc_reg()
391 static void bpf_qdisc_unreg(void *kdata, struct bpf_link *link) in bpf_qdisc_unreg() argument
393 return unregister_qdisc(kdata); in bpf_qdisc_unreg()
396 static int bpf_qdisc_validate(void *kdata) in bpf_qdisc_validate() argument
398 struct Qdisc_ops *ops = (struct Qdisc_ops *)kdata; in bpf_qdisc_validate()
/linux/drivers/gpu/drm/radeon/
H A Dradeon_cs.c109 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; in radeon_cs_parser_relocs()
348 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); in radeon_cs_parser_init()
350 if (p->chunks[i].kdata == NULL) { in radeon_cs_parser_init()
353 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in radeon_cs_parser_init()
357 p->cs_flags = p->chunks[i].kdata[0]; in radeon_cs_parser_init()
359 ring = p->chunks[i].kdata[1]; in radeon_cs_parser_init()
361 priority = (s32)p->chunks[i].kdata[2]; in radeon_cs_parser_init()
458 kvfree(parser->chunks[i].kdata); in radeon_cs_parser_fini()
662 if (ib_chunk->kdata) in radeon_cs_ib_fill()
663 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chun in radeon_cs_ib_fill()
[all...]
H A Dradeon.h1011 uint32_t *kdata; member
1051 if (ibc->kdata) in radeon_get_ib_value()
1052 return ibc->kdata[idx]; in radeon_get_ib_value()
/linux/kernel/bpf/
H A Dbpf_struct_ops.c687 void *udata, *kdata; in bpf_struct_ops_map_update_elem() local
730 kdata = &kvalue->data; in bpf_struct_ops_map_update_elem()
749 *(void **)(kdata + moff) = BPF_MODULE_OWNER; in bpf_struct_ops_map_update_elem()
753 err = st_ops->init_member(t, member, kdata, udata); in bpf_struct_ops_map_update_elem()
836 *(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset(); in bpf_struct_ops_map_update_elem()
849 err = st_ops->validate(kdata); in bpf_struct_ops_map_update_elem()
870 err = st_ops->reg(kdata, NULL); in bpf_struct_ops_map_update_elem()
1153 bool bpf_struct_ops_get(const void *kdata) in bpf_struct_ops_get() argument
1159 kvalue = container_of(kdata, struct bpf_struct_ops_value, data); in bpf_struct_ops_get()
1166 void bpf_struct_ops_put(const void *kdata) in bpf_struct_ops_put() argument
[all...]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_cs.c227 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), in amdgpu_cs_pass1()
229 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_pass1()
235 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_pass1()
247 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs); in amdgpu_cs_pass1()
256 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata, in amdgpu_cs_pass1()
270 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); in amdgpu_cs_pass1()
340 kvfree(p->chunks[i].kdata); in amdgpu_cs_pass1()
355 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata; in amdgpu_cs_p2_ib()
412 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; in amdgpu_cs_p2_dependencies()
484 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; in amdgpu_cs_p2_syncobj_in()
[all …]
H A Damdgpu_cs.h40 void *kdata; member
/linux/net/bpf/
H A Dbpf_dummy_struct_ops.c271 void *kdata, const void *udata) in bpf_dummy_init_member() argument
276 static int bpf_dummy_reg(void *kdata, struct bpf_link *link) in bpf_dummy_reg() argument
281 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) in bpf_dummy_unreg() argument
/linux/drivers/net/ethernet/netronome/nfp/flower/
H A Dconntrack.c821 u8 *key, *msk, *kdata, *mdata; in nfp_fl_ct_add_offload() local
868 kdata = flow_pay->unmasked_data; in nfp_fl_ct_add_offload()
872 key = kdata + offset; in nfp_fl_ct_add_offload()
880 key = kdata + offset; in nfp_fl_ct_add_offload()
893 key = kdata + offset; in nfp_fl_ct_add_offload()
916 key = kdata + offset; in nfp_fl_ct_add_offload()
926 key = kdata + offset; in nfp_fl_ct_add_offload()
942 key = kdata + offset; in nfp_fl_ct_add_offload()
953 key = kdata + offset; in nfp_fl_ct_add_offload()
964 key = kdata in nfp_fl_ct_add_offload()
[all...]
/linux/drivers/accel/habanalabs/common/
H A Dhabanalabs_ioctl.c1233 char *kdata = NULL; in _hl_ioctl() local
1257 kdata = stack_kdata; in _hl_ioctl()
1259 kdata = kzalloc(asize, GFP_KERNEL); in _hl_ioctl()
1260 if (!kdata) { in _hl_ioctl()
1268 if (copy_from_user(kdata, (void __user *)arg, usize)) { in _hl_ioctl()
1274 retcode = func(hpriv, kdata); in _hl_ioctl()
1276 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize)) in _hl_ioctl()
1285 if (kdata != stack_kdata) in _hl_ioctl()
1286 kfree(kdata); in _hl_ioctl()
/linux/include/linux/
H A Dbpf.h1898 void *kdata, const void *udata);
1899 int (*reg)(void *kdata, struct bpf_link *link);
1900 void (*unreg)(void *kdata, struct bpf_link *link);
1901 int (*update)(void *kdata, void *old_kdata, struct bpf_link *link);
1902 int (*validate)(void *kdata);
1961 bool bpf_struct_ops_get(const void *kdata);
1962 void bpf_struct_ops_put(const void *kdata);
/linux/kernel/sched/
H A Dext.c5866 void *kdata, const void *udata) in bpf_scx_init_member() argument
5869 struct sched_ext_ops *ops = kdata; in bpf_scx_init_member()
5936 static int bpf_scx_reg(void *kdata, struct bpf_link *link) in bpf_scx_reg() argument
5938 return scx_enable(kdata, link); in bpf_scx_reg()
5941 static void bpf_scx_unreg(void *kdata, struct bpf_link *link) in bpf_scx_unreg() argument
5943 struct sched_ext_ops *ops = kdata; in bpf_scx_unreg()
5958 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link) in bpf_scx_update() argument
5970 static int bpf_scx_validate(void *kdata) in bpf_scx_validate() argument
/linux/tools/power/pm-graph/
H A Dsleepgraph.py617 def defaultKprobe(self, name, kdata): argument
618 k = kdata