Home
last modified time | relevance | path

Searched refs:work (Results 1 – 25 of 3218) sorted by relevance

12345678910>>...129

/linux-5.15/virt/kvm/
H A Dasync_pf.c45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument
48 container_of(work, struct kvm_async_pf, work); in async_pf_execute()
100 struct kvm_async_pf *work = in kvm_clear_async_pf_completion_queue() local
102 typeof(*work), queue); in kvm_clear_async_pf_completion_queue()
103 list_del(&work->queue); in kvm_clear_async_pf_completion_queue()
109 if (!work->vcpu) in kvm_clear_async_pf_completion_queue()
114 flush_work(&work->work); in kvm_clear_async_pf_completion_queue()
116 if (cancel_work_sync(&work->work)) { in kvm_clear_async_pf_completion_queue()
117 mmput(work->mm); in kvm_clear_async_pf_completion_queue()
119 kmem_cache_free(async_pf_cache, work); in kvm_clear_async_pf_completion_queue()
[all …]
/linux-5.15/drivers/gpu/drm/
A Ddrm_flip_work.c57 void drm_flip_work_queue_task(struct drm_flip_work *work, in drm_flip_work_queue_task() argument
62 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task()
63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task()
64 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task()
76 void drm_flip_work_queue(struct drm_flip_work *work, void *val) in drm_flip_work_queue() argument
83 drm_flip_work_queue_task(work, task); in drm_flip_work_queue()
85 DRM_ERROR("%s could not allocate task!\n", work->name); in drm_flip_work_queue()
86 work->func(work, val); in drm_flip_work_queue()
101 void drm_flip_work_commit(struct drm_flip_work *work, in drm_flip_work_commit() argument
106 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_commit()
[all …]
A Ddrm_vblank_work.c48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local
54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works()
55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works()
58 list_del_init(&work->node); in drm_handle_vblank_works()
60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works()
72 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local
76 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_vblank_cancel_pending_works()
77 list_del_init(&work->node); in drm_vblank_cancel_pending_works()
106 int drm_vblank_work_schedule(struct drm_vblank_work *work, in drm_vblank_work_schedule() argument
109 struct drm_vblank_crtc *vblank = work->vblank; in drm_vblank_work_schedule()
[all …]
/linux-5.15/include/trace/events/
H A Dworkqueue.h26 struct work_struct *work),
28 TP_ARGS(req_cpu, pwq, work),
31 __field( void *, work )
39 __entry->work = work;
40 __entry->function = work->func;
47 __entry->work, __entry->function, __get_str(workqueue),
61 TP_PROTO(struct work_struct *work),
63 TP_ARGS(work),
66 __field( void *, work )
70 __entry->work = work;
[all …]
/linux-5.15/fs/ksmbd/
A Dksmbd_work.c21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL); in ksmbd_alloc_work_struct() local
23 if (work) { in ksmbd_alloc_work_struct()
24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct()
27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct()
28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct()
29 INIT_LIST_HEAD(&work->interim_entry); in ksmbd_alloc_work_struct()
31 return work; in ksmbd_alloc_work_struct()
34 void ksmbd_free_work_struct(struct ksmbd_work *work) in ksmbd_free_work_struct() argument
[all …]
A Dserver.c92 static inline int check_conn_state(struct ksmbd_work *work) in check_conn_state() argument
96 if (ksmbd_conn_exiting(work) || ksmbd_conn_need_reconnect(work)) { in check_conn_state()
97 rsp_hdr = work->response_buf; in check_conn_state()
107 static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn, in __process_request() argument
114 if (check_conn_state(work)) in __process_request()
117 if (ksmbd_verify_smb_message(work)) in __process_request()
120 command = conn->ops->get_cmd_val(work); in __process_request()
125 conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); in __process_request()
132 conn->ops->set_rsp_status(work, STATUS_NOT_IMPLEMENTED); in __process_request()
136 if (work->sess && conn->ops->is_sign_req(work, command)) { in __process_request()
[all …]
A Dsmb2pdu.c41 static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) in __wbuf() argument
43 if (work->next_smb2_rcv_hdr_off) { in __wbuf()
44 *req = ksmbd_req_buf_next(work); in __wbuf()
45 *rsp = ksmbd_resp_buf_next(work); in __wbuf()
47 *req = work->request_buf; in __wbuf()
48 *rsp = work->response_buf; in __wbuf()
94 int smb2_get_ksmbd_tcon(struct ksmbd_work *work) in smb2_get_ksmbd_tcon() argument
96 struct smb2_hdr *req_hdr = work->request_buf; in smb2_get_ksmbd_tcon()
99 work->tcon = NULL; in smb2_get_ksmbd_tcon()
100 if (work->conn->ops->get_cmd_val(work) == SMB2_TREE_CONNECT_HE || in smb2_get_ksmbd_tcon()
[all …]
A Dconnection.h154 int ksmbd_conn_write(struct ksmbd_work *work);
161 void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
162 int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
174 static inline bool ksmbd_conn_good(struct ksmbd_work *work) in ksmbd_conn_good() argument
176 return work->conn->status == KSMBD_SESS_GOOD; in ksmbd_conn_good()
179 static inline bool ksmbd_conn_need_negotiate(struct ksmbd_work *work) in ksmbd_conn_need_negotiate() argument
181 return work->conn->status == KSMBD_SESS_NEED_NEGOTIATE; in ksmbd_conn_need_negotiate()
184 static inline bool ksmbd_conn_need_reconnect(struct ksmbd_work *work) in ksmbd_conn_need_reconnect() argument
186 return work->conn->status == KSMBD_SESS_NEED_RECONNECT; in ksmbd_conn_need_reconnect()
189 static inline bool ksmbd_conn_exiting(struct ksmbd_work *work) in ksmbd_conn_exiting() argument
[all …]
A Dconnection.c101 void ksmbd_conn_enqueue_request(struct ksmbd_work *work) in ksmbd_conn_enqueue_request() argument
103 struct ksmbd_conn *conn = work->conn; in ksmbd_conn_enqueue_request()
106 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) { in ksmbd_conn_enqueue_request()
108 work->syncronous = true; in ksmbd_conn_enqueue_request()
114 list_add_tail(&work->request_entry, requests_queue); in ksmbd_conn_enqueue_request()
119 int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work) in ksmbd_conn_try_dequeue_request() argument
121 struct ksmbd_conn *conn = work->conn; in ksmbd_conn_try_dequeue_request()
124 if (list_empty(&work->request_entry) && in ksmbd_conn_try_dequeue_request()
125 list_empty(&work->async_request_entry)) in ksmbd_conn_try_dequeue_request()
128 if (!work->multiRsp) in ksmbd_conn_try_dequeue_request()
[all …]
/linux-5.15/kernel/
H A Dtask_work.c32 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument
38 kasan_record_aux_stack(work); in task_work_add()
44 work->next = head; in task_work_add()
45 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add()
78 struct callback_head *work; in task_work_cancel_match() local
90 while ((work = READ_ONCE(*pprev))) { in task_work_cancel_match()
91 if (!match(work, data)) in task_work_cancel_match()
92 pprev = &work->next; in task_work_cancel_match()
93 else if (cmpxchg(pprev, work, work->next) == work) in task_work_cancel_match()
98 return work; in task_work_cancel_match()
[all …]
H A Dirq_work.c30 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument
34 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim()
53 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument
56 if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) { in __irq_work_queue_local()
57 if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) && in __irq_work_queue_local()
61 if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list))) in __irq_work_queue_local()
67 bool irq_work_queue(struct irq_work *work) in irq_work_queue() argument
70 if (!irq_work_claim(work)) in irq_work_queue()
75 __irq_work_queue_local(work); in irq_work_queue()
88 bool irq_work_queue_on(struct irq_work *work, int cpu) in irq_work_queue_on() argument
[all …]
H A Dkthread.c725 struct kthread_work *work; in kthread_worker_fn() local
748 work = NULL; in kthread_worker_fn()
751 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
753 list_del_init(&work->node); in kthread_worker_fn()
755 worker->current_work = work; in kthread_worker_fn()
758 if (work) { in kthread_worker_fn()
759 kthread_work_func_t func = work->func; in kthread_worker_fn()
761 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn()
762 work->func(work); in kthread_worker_fn()
767 trace_sched_kthread_work_execute_end(work, func); in kthread_worker_fn()
[all …]
H A Dworkqueue.c457 struct work_struct *work = addr; in work_is_static_object() local
459 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object()
468 struct work_struct *work = addr; in work_fixup_init() local
472 cancel_work_sync(work); in work_fixup_init()
473 debug_object_init(work, &work_debug_descr); in work_fixup_init()
486 struct work_struct *work = addr; in work_fixup_free() local
490 cancel_work_sync(work); in work_fixup_free()
491 debug_object_free(work, &work_debug_descr); in work_fixup_free()
506 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument
508 debug_object_activate(work, &work_debug_descr); in debug_work_activate()
[all …]
/linux-5.15/include/linux/
H A Dcompletion.h35 #define COMPLETION_INITIALIZER(work) \ argument
36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument
39 (*({ init_completion_map(&(work), &(map)); &(work); }))
41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument
42 (*({ init_completion(&work); &work; }))
52 #define DECLARE_COMPLETION(work) \ argument
53 struct completion work = COMPLETION_INITIALIZER(work)
68 # define DECLARE_COMPLETION_ONSTACK(work) \ argument
69 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
[all …]
H A Dworkqueue.h21 typedef void (*work_func_t)(struct work_struct *work);
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument
111 struct work_struct work; member
120 struct work_struct work; member
153 static inline struct delayed_work *to_delayed_work(struct work_struct *work) in to_delayed_work() argument
155 return container_of(work, struct delayed_work, work); in to_delayed_work()
158 static inline struct rcu_work *to_rcu_work(struct work_struct *work) in to_rcu_work() argument
160 return container_of(work, struct rcu_work, work); in to_rcu_work()
164 struct work_struct work; member
187 .work = __WORK_INITIALIZER((n).work, (f)), \
[all …]
A Djump_label_ratelimit.h12 struct delayed_work work; member
18 struct delayed_work work; member
24 struct delayed_work work; member
28 __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
30 __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
33 __static_key_deferred_flush((x), &(x)->work)
37 struct delayed_work *work,
39 extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
43 extern void jump_label_update_timeout(struct work_struct *work);
49 .work = __DELAYED_WORK_INITIALIZER((name).work, \
[all …]
H A Dkthread.h87 typedef void (*kthread_work_func_t)(struct kthread_work *work);
112 struct kthread_work work; member
122 #define KTHREAD_WORK_INIT(work, fn) { \ argument
123 .node = LIST_HEAD_INIT((work).node), \
128 .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
136 #define DEFINE_KTHREAD_WORK(work, fn) \ argument
137 struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
165 #define kthread_init_work(work, fn) \ argument
167 memset((work), 0, sizeof(struct kthread_work)); \
168 INIT_LIST_HEAD(&(work)->node); \
[all …]
/linux-5.15/fs/btrfs/
H A Dasync-thread.c61 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument
63 return work->wq->fs_info; in btrfs_work_owner()
224 struct btrfs_work *work; in run_ordered_work() local
233 work = list_entry(list->next, struct btrfs_work, in run_ordered_work()
235 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work()
244 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) in run_ordered_work()
246 trace_btrfs_ordered_sched(work); in run_ordered_work()
248 work->ordered_func(work); in run_ordered_work()
252 list_del(&work->ordered_list); in run_ordered_work()
255 if (work == self) { in run_ordered_work()
[all …]
/linux-5.15/drivers/staging/octeon/
H A Dethernet-rx.c63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument
68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error()
70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error()
72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error()
81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error()
82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error()
99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error()
102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error()
111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error()
112 work->word1.len -= i + 5; in cvm_oct_check_rcv_error()
[all …]
H A Dethernet-tx.c516 struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); in cvm_oct_xmit_pow() local
518 if (unlikely(!work)) { in cvm_oct_xmit_pow()
531 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); in cvm_oct_xmit_pow()
561 work->word0.pip.cn38xx.hw_chksum = skb->csum; in cvm_oct_xmit_pow()
562 work->word1.len = skb->len; in cvm_oct_xmit_pow()
563 cvmx_wqe_set_port(work, priv->port); in cvm_oct_xmit_pow()
564 cvmx_wqe_set_qos(work, priv->port & 0x7); in cvm_oct_xmit_pow()
565 cvmx_wqe_set_grp(work, pow_send_group); in cvm_oct_xmit_pow()
566 work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE; in cvm_oct_xmit_pow()
567 work->word1.tag = pow_send_group; /* FIXME */ in cvm_oct_xmit_pow()
[all …]
/linux-5.15/drivers/accessibility/speakup/
A Dselection.c20 struct work_struct work; member
25 static void __speakup_set_selection(struct work_struct *work) in __speakup_set_selection() argument
28 container_of(work, struct speakup_selection_work, work); in __speakup_set_selection()
58 .work = __WORK_INITIALIZER(speakup_sel_work.work,
87 schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); in speakup_set_selection()
96 cancel_work_sync(&speakup_sel_work.work); in speakup_cancel_selection()
106 static void __speakup_paste_selection(struct work_struct *work) in __speakup_paste_selection() argument
109 container_of(work, struct speakup_selection_work, work); in __speakup_paste_selection()
117 .work = __WORK_INITIALIZER(speakup_paste_work.work,
129 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); in speakup_paste_selection()
[all …]
/linux-5.15/drivers/net/wireless/st/cw1200/
A Dsta.h58 void cw1200_event_handler(struct work_struct *work);
59 void cw1200_bss_loss_work(struct work_struct *work);
60 void cw1200_bss_params_work(struct work_struct *work);
61 void cw1200_keep_alive_work(struct work_struct *work);
62 void cw1200_tx_failure_work(struct work_struct *work);
78 void cw1200_join_timeout(struct work_struct *work);
79 void cw1200_unjoin_work(struct work_struct *work);
80 void cw1200_join_complete_work(struct work_struct *work);
81 void cw1200_wep_key_work(struct work_struct *work);
84 void cw1200_update_filtering_work(struct work_struct *work);
[all …]
/linux-5.15/fs/
H A Dfs-writeback.c143 struct wb_writeback_work *work) in finish_writeback_work() argument
145 struct wb_completion *done = work->done; in finish_writeback_work()
147 if (work->auto_free) in finish_writeback_work()
148 kfree(work); in finish_writeback_work()
159 struct wb_writeback_work *work) in wb_queue_work() argument
161 trace_writeback_queue(wb, work); in wb_queue_work()
163 if (work->done) in wb_queue_work()
164 atomic_inc(&work->done->cnt); in wb_queue_work()
169 list_add_tail(&work->list, &wb->work_list); in wb_queue_work()
172 finish_writeback_work(wb, work); in wb_queue_work()
[all …]
A Dio-wq.c60 struct work_struct work; member
161 struct io_wq_work *work) in io_work_get_acct() argument
163 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND)); in io_work_get_acct()
358 struct io_wq_work *work) in __io_worker_busy() argument
383 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument
385 return work->flags >> IO_WQ_HASH_SHIFT; in io_get_work_hash()
408 struct io_wq_work *work, *tail; in io_get_next_work() local
415 work = container_of(node, struct io_wq_work, list); in io_get_next_work()
418 if (!io_wq_is_hashed(work)) { in io_get_next_work()
420 return work; in io_get_next_work()
[all …]
/linux-5.15/drivers/infiniband/core/
H A Dcm.c92 struct cm_work *work);
183 struct delayed_work work; member
194 struct cm_work work; member
266 static void cm_work_handler(struct work_struct *work);
700 __be32 remote_id = timewait_info->work.remote_id; in cm_insert_remote_id()
706 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id()
708 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id()
734 if (be32_lt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id()
736 else if (be32_gt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id()
743 res = cm_acquire_id(timewait_info->work.local_id, in cm_find_remote_id()
[all …]

12345678910>>...129