/linux/fs/smb/server/ |
H A D | ksmbd_work.c | 21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, KSMBD_DEFAULT_GFP); in ksmbd_alloc_work_struct() local 23 if (work) { in ksmbd_alloc_work_struct() 24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct() 27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct() 28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct() 29 INIT_LIST_HEAD(&work->aux_read_list); in ksmbd_alloc_work_struct() 30 work->iov_alloc_cnt = 4; in ksmbd_alloc_work_struct() 31 work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec), in ksmbd_alloc_work_struct() [all …]
|
H A D | ksmbd_work.h | 86 struct work_struct work; member 96 * @work: smb work containing response buffer 98 static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work) in ksmbd_resp_buf_next() argument 100 return work->response_buf + work->next_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_next() 105 * @work: smb work containing response buffer 107 static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work) in ksmbd_resp_buf_curr() argument 109 return work->response_buf + work->curr_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_curr() 114 * @work: smb work containing response buffer 116 static inline void *ksmbd_req_buf_next(struct ksmbd_work *work) in ksmbd_req_buf_next() argument 118 return work->request_buf + work->next_smb2_rcv_hdr_off + 4; in ksmbd_req_buf_next() [all …]
|
H A D | smb_common.c | 129 * @work: smb work 135 int ksmbd_verify_smb_message(struct ksmbd_work *work) in ksmbd_verify_smb_message() argument 137 struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work); in ksmbd_verify_smb_message() 141 return ksmbd_smb2_check_message(work); in ksmbd_verify_smb_message() 143 hdr = work->request_buf; in ksmbd_verify_smb_message() 146 work->conn->outstanding_credits++; in ksmbd_verify_smb_message() 306 * @work: smb work containing smb header 310 static u16 get_smb1_cmd_val(struct ksmbd_work *work) in get_smb1_cmd_val() argument 317 * @work: smb work containing smb request 321 static int init_smb1_rsp_hdr(struct ksmbd_work *work) in init_smb1_rsp_hdr() argument [all …]
|
/linux/drivers/gpu/drm/ |
H A D | drm_flip_work.c | 47 static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task) in drm_flip_work_queue_task() argument 51 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task() 52 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 53 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task() 57 * drm_flip_work_queue - queue work 58 * @work: the flip-work 61 * Queues work, that will later be run (passed back to drm_flip_func_t 62 * func) on a work queue after drm_flip_work_commit() is called. 64 void drm_flip_work_queue(struct drm_flip_work *work, void *val) in drm_flip_work_queue() argument 71 drm_flip_work_queue_task(work, task); in drm_flip_work_queue() [all …]
|
H A D | drm_vblank_work.c | 40 * generic delayed work implementation which delays work execution until a 41 * particular vblank has passed, and then executes the work at realtime 45 * re-arming work items can be easily implemented. 50 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local 56 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works() 57 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works() 60 list_del_init(&work->node); in drm_handle_vblank_works() 62 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 69 /* Handle cancelling any pending vblank work items and drop respective vblank 74 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local [all …]
|
/linux/include/trace/events/ |
H A D | workqueue.h | 14 * workqueue_queue_work - called when a work gets queued 17 * @work: pointer to struct work_struct 19 * This event occurs when a work is queued immediately or once a 20 * delayed work is actually queued on a workqueue (ie: once the delay 26 struct work_struct *work), 28 TP_ARGS(req_cpu, pwq, work), 31 __field( void *, work ) 39 __entry->work = work; 40 __entry->function = work->func; 46 TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d", [all …]
|
/linux/virt/kvm/ |
H A D | async_pf.c | 45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument 48 container_of(work, struct kvm_async_pf, work); in async_pf_execute() 63 * work item is fully processed. in async_pf_execute() 99 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) in kvm_flush_and_free_async_pf_work() argument 102 * The async #PF is "done", but KVM must wait for the work item itself, in kvm_flush_and_free_async_pf_work() 105 * after the last call to module_put(). Note, flushing the work item in kvm_flush_and_free_async_pf_work() 111 * need to be flushed (but sanity check that the work wasn't queued). in kvm_flush_and_free_async_pf_work() 113 if (work->wakeup_all) in kvm_flush_and_free_async_pf_work() 114 WARN_ON_ONCE(work->work.func); in kvm_flush_and_free_async_pf_work() 116 flush_work(&work->work); in kvm_flush_and_free_async_pf_work() [all …]
|
/linux/fs/btrfs/ |
H A D | async-thread.c | 29 /* List head pointing to ordered work list */ 55 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument 57 return work->wq->fs_info; in btrfs_work_owner() 163 * Hook for threshold which will be called before executing the work, 213 struct btrfs_work *work; in run_ordered_work() local 222 work = list_first_entry(list, struct btrfs_work, ordered_list); in run_ordered_work() 223 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work() 229 * updates from ordinary work function. in run_ordered_work() 235 * we leave the work item on the list as a barrier so in run_ordered_work() 236 * that later work items that are done don't have their in run_ordered_work() [all …]
|
/linux/kernel/ |
H A D | task_work.c | 19 * task_work_add - ask the @task to execute @work->func() 21 * @work: the callback to run 24 * Queue @work for task_work_run() below and notify the @task if @notify 35 * @TWA_RESUME work is run only when the task exits the kernel and returns to 40 * Fails if the @task is exiting/exited and thus it can't process this @work. 41 * Otherwise @work->func() will be called when the @task goes through one of 44 * If the targeted task is exiting, then an error is returned and the work item 54 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument 65 kasan_record_aux_stack(work); in task_work_add() 72 work->next = head; in task_work_add() [all …]
|
H A D | irq_work.c | 57 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument 61 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim() 63 * If the work is already pending, no need to raise the IPI. in irq_work_claim() 79 static __always_inline void irq_work_raise(struct irq_work *work) in irq_work_raise() argument 82 trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); in irq_work_raise() 87 /* Enqueue on current CPU, work must already be claimed and preempt disabled */ 88 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument 95 work_flags = atomic_read(&work->node.a_flags); in __irq_work_queue_local() 107 if (!llist_add(&work->node.llist, list)) in __irq_work_queue_local() 110 /* If the work is "lazy", handle it from next tick if any */ in __irq_work_queue_local() [all …]
|
H A D | kthread.c | 317 * functions which do some additional work in non-modular code such as 963 * when they finish. There is defined a safe point for freezing when one work 972 struct kthread_work *work; in kthread_worker_fn() local 995 work = NULL; in kthread_worker_fn() 998 work = list_first_entry(&worker->work_list, in kthread_worker_fn() 1000 list_del_init(&work->node); in kthread_worker_fn() 1002 worker->current_work = work; in kthread_worker_fn() 1005 if (work) { in kthread_worker_fn() 1006 kthread_work_func_t func = work->func; in kthread_worker_fn() 1008 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn() [all …]
|
H A D | workqueue.c | 18 * This is the generic async execution mechanism. Work items as are 21 * normal work items and the other for high priority ones) and some extra 238 PWQ_STAT_STARTED, /* work items started execution */ 239 PWQ_STAT_COMPLETED, /* work items completed execution */ 245 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */ 269 * When pwq->nr_active >= max_active, new work item is queued to 273 * All work items marked with WORK_STRUCT_INACTIVE do not participate in 274 * nr_active and all work items in pwq->inactive_works are marked with 275 * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are 277 * pool->worklist or worker->scheduled. Those work itmes are only struct [all …]
|
/linux/LICENSES/dual/ |
H A D | copyleft-next-0.3.1 | 26 of, publicly perform and publicly display My Work. 40 Legal Notices contained in My Work (to the extent they remain 47 If You Distribute a Derived Work, You must license the entire Derived 48 Work as a whole under this License, with prominent notice of such 50 separate Distribution of portions of the Derived Work. 52 If the Derived Work includes material licensed under the GPL, You may 53 instead license the Derived Work under the GPL. 57 When Distributing a Covered Work, You may not impose further 58 restrictions on the exercise of rights in the Covered Work granted under 64 However, You may Distribute a Covered Work incorporating material [all …]
|
H A D | Apache-2.0 | 49 "Work" shall mean the work of authorship, whether in Source or Object form, 51 is included in or attached to the work (an example is provided in the 54 "Derivative Works" shall mean any work, whether in Source or Object form, 55 that is based on (or derived from) the Work and for which the editorial 57 a whole, an original work of authorship. For the purposes of this License, 59 merely link (or bind by name) to the interfaces of, the Work and Derivative 62 "Contribution" shall mean any work of authorship, including the original 63 version of the Work and any modifications or additions to that Work or 65 inclusion in the Work by the copyright owner or by an individual or Legal 72 and improving the Work, but excluding communication that is conspicuously [all …]
|
/linux/include/linux/ |
H A D | workqueue.h | 3 * workqueue.h --- work queue handling for Linux. 21 * The first word is the work queue pointer and the flags rolled into 24 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument 27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 28 WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */ 30 WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */ 67 * When a work item is off queue, the high bits encode off-queue flags 115 struct work_struct work; member 118 /* target workqueue and CPU ->timer uses to queue ->work */ 124 struct work_struct work; member [all …]
|
H A D | completion.h | 35 #define COMPLETION_INITIALIZER(work) \ argument 36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument 39 (*({ init_completion_map(&(work), &(map)); &(work); })) 41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument 42 (*({ init_completion(&work); &work; })) 46 * @work: identifier for the completion structure 52 #define DECLARE_COMPLETION(work) \ argument 53 struct completion work = COMPLETION_INITIALIZER(work) 62 * @work: identifier for the completion structure [all …]
|
H A D | entry-common.h | 60 * to be done between establishing state and handling user mode entry work. 65 unsigned long work); 68 * syscall_enter_from_user_mode_work - Check and handle work before invoking 75 * architecture specific work. 84 * It handles the following work items: 92 unsigned long work = READ_ONCE(current_thread_info()->syscall_work); in syscall_enter_from_user_mode_work() local 94 if (work & SYSCALL_WORK_ENTER) in syscall_enter_from_user_mode_work() 95 syscall = syscall_trace_enter(regs, syscall, work); in syscall_enter_from_user_mode_work() 101 * syscall_enter_from_user_mode - Establish state and check and handle work 132 * syscall_exit_work - Handle work before returning to user mode [all …]
|
/linux/tools/perf/ |
H A D | builtin-kwork.c | 315 struct kwork_work *work; in work_search() local 319 work = container_of(node, struct kwork_work, node); in work_search() 320 cmp = work_cmp(sort_list, key, work); in work_search() 326 if (work->name == NULL) in work_search() 327 work->name = key->name; in work_search() 328 return work; in work_search() 362 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local 364 if (work == NULL) { in work_new() 365 pr_err("Failed to zalloc kwork work\n"); in work_new() 370 INIT_LIST_HEAD(&work->atom_list[i]); in work_new() [all …]
|
/linux/drivers/staging/octeon/ |
H A D | ethernet-rx.c | 59 * @work: Work queue entry pointing to the packet. 63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument 68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error() 70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error() 72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error() 81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error() 82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error() 99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error() 102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error() 111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error() [all …]
|
/linux/rust/kernel/ |
H A D | workqueue.rs | 3 //! Work queues. 5 //! This file has two components: The raw work item API, and the safe work item API. 15 //! The raw API consists of the [`RawWorkItem`] trait, where the work item needs to provide an 16 //! arbitrary function that knows how to enqueue the work item. It should usually not be used 21 //! The safe API is used via the [`Work`] struct and [`WorkItem`] traits. Furthermore, it also 24 //! * The [`Work`] struct is the Rust wrapper for the C `work_struct` type. 37 //! use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem}; 43 //! work: Work<MyStruct>, 47 //! impl HasWork<Self> for MyStruct { self.work } 54 //! work <- new_work!("MyStruct::work"), [all …]
|
/linux/net/wireless/ |
H A D | debugfs.c | 115 struct wiphy_work work; member 131 struct wiphy_work *work) in wiphy_locked_debugfs_read_work() argument 133 struct debugfs_read_work *w = container_of(work, typeof(*w), work); in wiphy_locked_debugfs_read_work() 144 wiphy_work_cancel(w->wiphy, &w->work); in wiphy_locked_debugfs_read_cancel() 159 struct debugfs_read_work work = { in wiphy_locked_debugfs_read() local 167 .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), in wiphy_locked_debugfs_read() 171 .cancel_data = &work, in wiphy_locked_debugfs_read() 177 wiphy_work_init(&work.work, wiphy_locked_debugfs_read_work); in wiphy_locked_debugfs_read() 178 wiphy_work_queue(wiphy, &work.work); in wiphy_locked_debugfs_read() 181 wait_for_completion(&work.completion); in wiphy_locked_debugfs_read() [all …]
|
/linux/Documentation/filesystems/bcachefs/future/ |
H A D | idle_work.rst | 1 Idle/background work classes design doc: 4 would be under sustained load, to keep pending work at a "medium" level, to 5 let work build up so we can process it in more efficient batches, while also 8 But for desktops or mobile - scenarios where work is less sustained and power 11 background work while the system should be idle. 15 background task may generate work for another. 27 rebalance, which currently does not actively attempt to let work batch up). 39 journal), while also letting some work accumululate to be batched (or become 46 a lot more work to service each write and the system becomes much slower. 51 When the system becomes idle, we should start flushing our pending work [all …]
|
/linux/LICENSES/deprecated/ |
H A D | CC0-1.0 | 26 and subsequent owner(s) (each and all, an "owner") of an original work of 27 authorship and/or a database (each, a "Work"). 29 Certain owners wish to permanently relinquish those rights to a Work for 37 works, or to gain reputation or greater distribution for their Work in 42 associating CC0 with a Work (the "Affirmer"), to the extent that he or she 43 is an owner of Copyright and Related Rights in the Work, voluntarily 44 elects to apply CC0 to the Work and publicly distribute the Work under its 46 Work and the meaning and intended legal effect of CC0 on those rights. 48 1. Copyright and Related Rights. A Work made available under CC0 may be 54 communicate, and translate a Work; [all …]
|
/linux/drivers/md/ |
H A D | dm-cache-background-tracker.h | 17 * The cache policy decides what background work should be performed, 19 * is in charge of performing the work, and does so when it sees fit. 22 * work that the policy has decided upon, and handing (issuing) it to 32 struct policy_work work; member 47 * Destroy the tracker. No issued, but not complete, work should 49 * work. 56 * Queue some work within the tracker. 'work' should point to the work 59 * copy of the work. 61 * returns -EINVAL iff the work is already queued. -ENOMEM if the work 65 struct policy_work *work, [all …]
|
/linux/kernel/unwind/ |
H A D | deferred.c | 154 struct unwind_work *work; in process_unwind_deferred() local 179 list_for_each_entry_srcu(work, &callbacks, list, in process_unwind_deferred() 181 if (test_bit(work->bit, &bits)) { in process_unwind_deferred() 182 work->func(work, &trace, cookie); in process_unwind_deferred() 184 info->cache->unwind_completed |= BIT(work->bit); in process_unwind_deferred() 203 task_work_cancel(task, &info->work); in unwind_deferred_task_exit() 208 * @work: Unwind descriptor requesting the trace 211 * Schedule a user space unwind to be done in task work before exiting the 220 * It's valid to call this function multiple times for the same @work within 231 int unwind_deferred_request(struct unwind_work *work, u64 *cookie) in unwind_deferred_request() argument [all …]
|