Home
last modified time | relevance | path

Searched refs:flush (Results 1 – 25 of 609) sorted by relevance

12345678910>>...25

/linux-5.15/arch/x86/hyperv/
A Dmmu.c60 struct hv_tlb_flush *flush; in hyperv_flush_tlb_multi() local
83 flush = *flush_pcpu; in hyperv_flush_tlb_multi()
85 if (unlikely(!flush)) { in hyperv_flush_tlb_multi()
95 flush->address_space = virt_to_phys(info->mm->pgd); in hyperv_flush_tlb_multi()
96 flush->address_space &= CR3_ADDR_MASK; in hyperv_flush_tlb_multi()
97 flush->flags = 0; in hyperv_flush_tlb_multi()
99 flush->address_space = 0; in hyperv_flush_tlb_multi()
100 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; in hyperv_flush_tlb_multi()
103 flush->processor_mask = 0; in hyperv_flush_tlb_multi()
105 flush->flags |= HV_FLUSH_ALL_PROCESSORS; in hyperv_flush_tlb_multi()
[all …]
A Dnested.c23 struct hv_guest_mapping_flush *flush; in hyperv_flush_guest_mapping() local
36 flush = *flush_pcpu; in hyperv_flush_guest_mapping()
38 if (unlikely(!flush)) { in hyperv_flush_guest_mapping()
43 flush->address_space = as; in hyperv_flush_guest_mapping()
44 flush->flags = 0; in hyperv_flush_guest_mapping()
47 flush, NULL); in hyperv_flush_guest_mapping()
60 struct hv_guest_mapping_flush_list *flush, in hyperv_fill_flush_guest_mapping_list() argument
77 flush->gpa_list[gpa_n].page.additional_pages = additional_pages; in hyperv_fill_flush_guest_mapping_list()
78 flush->gpa_list[gpa_n].page.largepage = false; in hyperv_fill_flush_guest_mapping_list()
79 flush->gpa_list[gpa_n].page.basepfn = cur; in hyperv_fill_flush_guest_mapping_list()
[all …]
/linux-5.15/fs/btrfs/
A Dspace-info.c297 enum btrfs_reserve_flush_enum flush) in calc_available_free_space() argument
324 if (flush == BTRFS_RESERVE_FLUSH_ALL) in calc_available_free_space()
333 enum btrfs_reserve_flush_enum flush) in btrfs_can_overcommit() argument
343 avail = calc_available_free_space(fs_info, space_info, flush); in btrfs_can_overcommit()
368 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; in btrfs_try_granting_tickets() local
383 flush)) { in btrfs_try_granting_tickets()
398 flush = BTRFS_RESERVE_FLUSH_ALL; in btrfs_try_granting_tickets()
944 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
956 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
999 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
[all …]
A Ddelalloc-space.c118 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_DATA; in btrfs_alloc_data_chunk_ondemand() local
124 flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE; in btrfs_alloc_data_chunk_ondemand()
126 return btrfs_reserve_data_bytes(fs_info, bytes, flush); in btrfs_alloc_data_chunk_ondemand()
295 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; in btrfs_delalloc_reserve_metadata() local
308 flush = BTRFS_RESERVE_NO_FLUSH; in btrfs_delalloc_reserve_metadata()
311 flush = BTRFS_RESERVE_FLUSH_LIMIT; in btrfs_delalloc_reserve_metadata()
334 ret = btrfs_reserve_metadata_bytes(root, block_rsv, meta_reserve, flush); in btrfs_delalloc_reserve_metadata()
A Dspace-info.h35 unsigned int flush:1; /* set if we are trying to make space */ member
129 enum btrfs_reserve_flush_enum flush);
134 enum btrfs_reserve_flush_enum flush);
147 enum btrfs_reserve_flush_enum flush);
/linux-5.15/lib/
H A Ddecompress_inflate.c44 long (*flush)(void*, unsigned long), in __gunzip()
53 if (flush) { in __gunzip()
82 strm->workspace = malloc(flush ? zlib_inflate_workspacesize() : in __gunzip()
138 if (!flush) { in __gunzip()
159 if (flush && strm->next_out > out_buf) { in __gunzip()
161 if (l != flush(out_buf, l)) { in __gunzip()
193 if (flush) in __gunzip()
202 long (*flush)(void*, unsigned long), in gunzip()
207 return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error); in gunzip()
212 long (*flush)(void*, unsigned long), in __decompress()
[all …]
H A Ddecompress_unxz.c253 long (*flush)(void *src, unsigned long size), in unxz()
269 if (fill == NULL && flush == NULL) in unxz()
277 if (flush == NULL) { in unxz()
299 if (fill == NULL && flush == NULL) { in unxz()
325 if (flush != NULL && (b.out_pos == b.out_size in unxz()
332 if (flush(b.out, b.out_pos) != (long)b.out_pos) in unxz()
342 if (flush != NULL) in unxz()
382 if (flush != NULL) in unxz()
400 long (*flush)(void*, unsigned long), in __decompress()
405 return unxz(buf, len, fill, flush, out_buf, pos, error); in __decompress()
A Ddecompress_unzstd.c165 long (*flush)(void*, unsigned long), in __unzstd()
189 if (fill == NULL && flush == NULL) in __unzstd()
225 if (flush != NULL) { in __unzstd()
306 if (flush != NULL && out.pos > 0) { in __unzstd()
307 if (out.pos != flush(out.dst, out.pos)) { in __unzstd()
333 long (*flush)(void*, unsigned long), in unzstd()
338 return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error); in unzstd()
343 long (*flush)(void*, unsigned long), in __decompress()
348 return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error); in __decompress()
H A Ddecompress_unlz4.c33 long (*flush)(void *, unsigned long), in unlz4()
52 } else if (!flush) { in unlz4()
174 if (flush && flush(outp, dest_len) != dest_len) in unlz4()
209 long (*flush)(void*, unsigned long), in __decompress()
215 return unlz4(buf, in_len - 4, fill, flush, output, posp, error); in __decompress()
H A Ddecompress_unlzo.c99 long (*flush)(void *, unsigned long), in unlzo()
112 } else if (!flush) { in unlzo()
243 if (flush && flush(out_buf, dst_len) != dst_len) in unlzo()
279 long (*flush)(void*, unsigned long), in __decompress()
284 return unlzo(buf, len, fill, flush, out_buf, pos, error); in __decompress()
/linux-5.15/net/ipv4/
H A Dtcp_offload.c192 int flush = 1; in tcp_gro_receive() local
238 flush = NAPI_GRO_CB(p)->flush; in tcp_gro_receive()
239 flush |= (__force int)(flags & TCP_FLAG_CWR); in tcp_gro_receive()
240 flush |= (__force int)((flags ^ tcp_flag_word(th2)) & in tcp_gro_receive()
242 flush |= (__force int)(th->ack_seq ^ th2->ack_seq); in tcp_gro_receive()
244 flush |= *(u32 *)((u8 *)th + i) ^ in tcp_gro_receive()
254 flush |= NAPI_GRO_CB(p)->flush_id; in tcp_gro_receive()
260 flush |= (len - 1) >= mss; in tcp_gro_receive()
261 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); in tcp_gro_receive()
263 flush |= p->decrypted ^ skb->decrypted; in tcp_gro_receive()
[all …]
/linux-5.15/lib/zlib_deflate/
H A Ddeflate.c60 #define DEFLATE_HOOK(strm, flush, bstate) 0 argument
69 typedef block_state (*compress_func) (deflate_state *s, int flush);
73 static block_state deflate_stored (deflate_state *s, int flush);
74 static block_state deflate_fast (deflate_state *s, int flush);
75 static block_state deflate_slow (deflate_state *s, int flush);
331 int flush in zlib_deflate() argument
338 flush > Z_FINISH || flush < 0) { in zlib_deflate()
344 (s->status == FINISH_STATE && flush != Z_FINISH)) { in zlib_deflate()
351 s->last_flush = flush; in zlib_deflate()
393 } else if (strm->avail_in == 0 && flush <= old_flush && in zlib_deflate()
[all …]
/linux-5.15/tools/testing/selftests/drivers/net/netdevsim/
A Dnexthop.sh147 $IP nexthop flush &> /dev/null
182 $IP nexthop flush &> /dev/null
202 $IP nexthop flush &> /dev/null
247 $IP nexthop flush &> /dev/null
267 $IP nexthop flush &> /dev/null
289 $IP nexthop flush &> /dev/null
314 $IP nexthop flush &> /dev/null
343 $IP nexthop flush &> /dev/null
373 $IP nexthop flush &> /dev/null
422 $IP nexthop flush &> /dev/null
[all …]
/linux-5.15/block/
H A Dblk-flush.c117 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
131 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
171 BUG_ON(rq->flush.seq & seq); in blk_flush_complete_seq()
172 rq->flush.seq |= seq; in blk_flush_complete_seq()
186 list_move_tail(&rq->flush.list, pending); in blk_flush_complete_seq()
190 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); in blk_flush_complete_seq()
202 list_del_init(&rq->flush.list); in blk_flush_complete_seq()
255 list_for_each_entry_safe(rq, n, running, flush.list) { in flush_end_io()
288 list_first_entry(pending, struct request, flush.list); in blk_kick_flush()
432 memset(&rq->flush, 0, sizeof(rq->flush)); in blk_insert_flush()
[all …]
/linux-5.15/Documentation/x86/
A Dtlb.rst12 from areas other than the one we are trying to flush will be
21 1. The size of the flush being performed. A flush of the entire
25 be no collateral damage caused by doing the global flush, and
26 all of the individual flush will have ended up being wasted
29 damage we do with a full flush. So, the larger the TLB, the
30 more attractive an individual flush looks. Data and
37 especially the contents of the TLB during a given flush. The
38 sizes of the flush will vary greatly depending on the workload as
48 This will cause us to do the global flush for more cases.
53 Despite the fact that a single individual flush on x86 is
[all …]
/linux-5.15/arch/x86/kvm/mmu/
A Dtdp_mmu.c57 gfn_t start, gfn_t end, bool can_yield, bool flush,
690 struct tdp_iter *iter, bool flush, in tdp_mmu_iter_cond_resched() argument
700 if (flush) in tdp_mmu_iter_cond_resched()
738 gfn_t start, gfn_t end, bool can_yield, bool flush, in zap_gfn_range() argument
766 tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) { in zap_gfn_range()
767 flush = false; in zap_gfn_range()
787 flush = true; in zap_gfn_range()
799 return flush; in zap_gfn_range()
809 gfn_t end, bool can_yield, bool flush) in __kvm_tdp_mmu_zap_gfn_range() argument
814 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush, in __kvm_tdp_mmu_zap_gfn_range()
[all …]
A Dtdp_mmu.h23 gfn_t end, bool can_yield, bool flush);
25 gfn_t start, gfn_t end, bool flush) in kvm_tdp_mmu_zap_gfn_range() argument
27 return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush); in kvm_tdp_mmu_zap_gfn_range()
56 bool flush);
71 bool flush);
/linux-5.15/lib/zlib_dfltcc/
A Ddfltcc_deflate.c102 int flush, in dfltcc_deflate() argument
121 no_flush = flush == Z_NO_FLUSH; in dfltcc_deflate()
127 if (flush == Z_FINISH && strm->avail_in == 0 && !param->cf) { in dfltcc_deflate()
186 need_empty_block = flush == Z_FINISH && param->bcf && !param->bhf; in dfltcc_deflate()
194 if (flush == Z_FINISH && !param->bcf) in dfltcc_deflate()
261 if (flush == Z_FINISH) { in dfltcc_deflate()
270 if (flush == Z_FULL_FLUSH) in dfltcc_deflate()
272 *result = flush == Z_NO_FLUSH ? need_more : block_done; in dfltcc_deflate()
A Ddfltcc.h109 int flush,
119 int flush, int *ret);
138 #define INFLATE_TYPEDO_HOOK(strm, flush) \ argument
143 action = dfltcc_inflate((strm), (flush), &ret); \
/linux-5.15/drivers/md/
H A Ddm-delay.c37 struct delay_class flush; member
131 if (dc->flush.dev) in delay_dtr()
132 dm_put_device(ti, dc->flush.dev); in delay_dtr()
205 ret = delay_class_ctr(ti, &dc->flush, argv); in delay_ctr()
215 ret = delay_class_ctr(ti, &dc->flush, argv + 3); in delay_ctr()
221 ret = delay_class_ctr(ti, &dc->flush, argv + 6); in delay_ctr()
290 c = &dc->flush; in delay_map()
315 DMEMIT("%u %u %u", dc->read.ops, dc->write.ops, dc->flush.ops); in delay_status()
326 DMEMIT_DELAY_CLASS(&dc->flush); in delay_status()
348 ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data); in delay_iterate_devices()
/linux-5.15/net/ipv6/
A Dip6_offload.c35 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
193 u16 flush = 1; in ipv6_gro_receive() local
209 flush += ntohs(iph->payload_len) != skb_gro_len(skb); in ipv6_gro_receive()
231 flush--; in ipv6_gro_receive()
264 NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); in ipv6_gro_receive()
265 NAPI_GRO_CB(p)->flush |= flush; in ipv6_gro_receive()
275 NAPI_GRO_CB(skb)->flush |= flush; in ipv6_gro_receive()
286 skb_gro_flush_final(skb, pp, flush); in ipv6_gro_receive()
297 NAPI_GRO_CB(skb)->flush = 1; in sit_ip6ip6_gro_receive()
312 NAPI_GRO_CB(skb)->flush = 1; in ip4ip6_gro_receive()
/linux-5.15/drivers/gpu/drm/etnaviv/
A Detnaviv_buffer.c92 u32 flush = 0; in etnaviv_cmd_select_pipe() local
103 flush = VIVS_GL_FLUSH_CACHE_PE2D; in etnaviv_cmd_select_pipe()
105 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR; in etnaviv_cmd_select_pipe()
107 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); in etnaviv_cmd_select_pipe()
236 u32 link_target, flush = 0; in etnaviv_buffer_end() local
243 flush = VIVS_GL_FLUSH_CACHE_PE2D; in etnaviv_buffer_end()
245 flush = VIVS_GL_FLUSH_CACHE_DEPTH | in etnaviv_buffer_end()
251 if (flush) { in etnaviv_buffer_end()
267 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); in etnaviv_buffer_end()
414 u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK | in etnaviv_buffer_queue() local
[all …]
/linux-5.15/Documentation/block/
A Dstat.rst44 flush I/Os requests number of flush I/Os processed
45 flush ticks milliseconds total wait time for flush requests
53 flush I/Os
56 These values increment when an flush I/O request completes.
58 Block layer combines flush requests and executes at most one at a time.
59 This counts flush requests executed by disk. Not tracked for partitions.
75 read ticks, write ticks, discard ticks, flush ticks
/linux-5.15/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_overlay.c69 SVGAEscapeVideoFlush flush; member
83 fill_escape(&cmd->escape, sizeof(cmd->flush)); in fill_flush()
84 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH; in fill_flush()
85 cmd->flush.streamId = stream_id; in fill_flush()
99 struct vmw_escape_video_flush *flush; in vmw_overlay_send_put() local
123 fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items; in vmw_overlay_send_put()
131 flush = (struct vmw_escape_video_flush *)&items[num_items]; in vmw_overlay_send_put()
170 fill_flush(flush, arg->stream_id); in vmw_overlay_send_put()
190 struct vmw_escape_video_flush flush; in vmw_overlay_send_stop() member
212 fill_flush(&cmds->flush, stream_id); in vmw_overlay_send_stop()
/linux-5.15/arch/arm/mm/
H A Dcache-v4.S40 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
59 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
115 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache

12345678910>>...25