Lines Matching refs:mdsc
45 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc);
46 static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
127 void ceph_caps_init(struct ceph_mds_client *mdsc) in ceph_caps_init() argument
129 INIT_LIST_HEAD(&mdsc->caps_list); in ceph_caps_init()
130 spin_lock_init(&mdsc->caps_list_lock); in ceph_caps_init()
133 void ceph_caps_finalize(struct ceph_mds_client *mdsc) in ceph_caps_finalize() argument
137 spin_lock(&mdsc->caps_list_lock); in ceph_caps_finalize()
138 while (!list_empty(&mdsc->caps_list)) { in ceph_caps_finalize()
139 cap = list_first_entry(&mdsc->caps_list, in ceph_caps_finalize()
144 mdsc->caps_total_count = 0; in ceph_caps_finalize()
145 mdsc->caps_avail_count = 0; in ceph_caps_finalize()
146 mdsc->caps_use_count = 0; in ceph_caps_finalize()
147 mdsc->caps_reserve_count = 0; in ceph_caps_finalize()
148 mdsc->caps_min_count = 0; in ceph_caps_finalize()
149 spin_unlock(&mdsc->caps_list_lock); in ceph_caps_finalize()
152 void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc, in ceph_adjust_caps_max_min() argument
155 spin_lock(&mdsc->caps_list_lock); in ceph_adjust_caps_max_min()
156 mdsc->caps_min_count = fsopt->max_readdir; in ceph_adjust_caps_max_min()
157 if (mdsc->caps_min_count < 1024) in ceph_adjust_caps_max_min()
158 mdsc->caps_min_count = 1024; in ceph_adjust_caps_max_min()
159 mdsc->caps_use_max = fsopt->caps_max; in ceph_adjust_caps_max_min()
160 if (mdsc->caps_use_max > 0 && in ceph_adjust_caps_max_min()
161 mdsc->caps_use_max < mdsc->caps_min_count) in ceph_adjust_caps_max_min()
162 mdsc->caps_use_max = mdsc->caps_min_count; in ceph_adjust_caps_max_min()
163 spin_unlock(&mdsc->caps_list_lock); in ceph_adjust_caps_max_min()
166 static void __ceph_unreserve_caps(struct ceph_mds_client *mdsc, int nr_caps) in __ceph_unreserve_caps() argument
172 BUG_ON(mdsc->caps_reserve_count < nr_caps); in __ceph_unreserve_caps()
173 mdsc->caps_reserve_count -= nr_caps; in __ceph_unreserve_caps()
174 if (mdsc->caps_avail_count >= in __ceph_unreserve_caps()
175 mdsc->caps_reserve_count + mdsc->caps_min_count) { in __ceph_unreserve_caps()
176 mdsc->caps_total_count -= nr_caps; in __ceph_unreserve_caps()
178 cap = list_first_entry(&mdsc->caps_list, in __ceph_unreserve_caps()
184 mdsc->caps_avail_count += nr_caps; in __ceph_unreserve_caps()
189 mdsc->caps_total_count, mdsc->caps_use_count, in __ceph_unreserve_caps()
190 mdsc->caps_reserve_count, mdsc->caps_avail_count); in __ceph_unreserve_caps()
191 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in __ceph_unreserve_caps()
192 mdsc->caps_reserve_count + in __ceph_unreserve_caps()
193 mdsc->caps_avail_count); in __ceph_unreserve_caps()
200 int ceph_reserve_caps(struct ceph_mds_client *mdsc, in ceph_reserve_caps() argument
216 spin_lock(&mdsc->caps_list_lock); in ceph_reserve_caps()
217 if (mdsc->caps_avail_count >= need) in ceph_reserve_caps()
220 have = mdsc->caps_avail_count; in ceph_reserve_caps()
221 mdsc->caps_avail_count -= have; in ceph_reserve_caps()
222 mdsc->caps_reserve_count += have; in ceph_reserve_caps()
223 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in ceph_reserve_caps()
224 mdsc->caps_reserve_count + in ceph_reserve_caps()
225 mdsc->caps_avail_count); in ceph_reserve_caps()
226 spin_unlock(&mdsc->caps_list_lock); in ceph_reserve_caps()
238 for (j = 0; j < mdsc->max_sessions; j++) { in ceph_reserve_caps()
239 s = __ceph_lookup_mds_session(mdsc, j); in ceph_reserve_caps()
242 mutex_unlock(&mdsc->mutex); in ceph_reserve_caps()
246 ceph_trim_caps(mdsc, s, max_caps); in ceph_reserve_caps()
250 mutex_lock(&mdsc->mutex); in ceph_reserve_caps()
254 spin_lock(&mdsc->caps_list_lock); in ceph_reserve_caps()
255 if (mdsc->caps_avail_count) { in ceph_reserve_caps()
257 if (mdsc->caps_avail_count >= need - i) in ceph_reserve_caps()
260 more_have = mdsc->caps_avail_count; in ceph_reserve_caps()
264 mdsc->caps_avail_count -= more_have; in ceph_reserve_caps()
265 mdsc->caps_reserve_count += more_have; in ceph_reserve_caps()
268 spin_unlock(&mdsc->caps_list_lock); in ceph_reserve_caps()
285 spin_lock(&mdsc->caps_list_lock); in ceph_reserve_caps()
286 mdsc->caps_total_count += alloc; in ceph_reserve_caps()
287 mdsc->caps_reserve_count += alloc; in ceph_reserve_caps()
288 list_splice(&newcaps, &mdsc->caps_list); in ceph_reserve_caps()
290 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in ceph_reserve_caps()
291 mdsc->caps_reserve_count + in ceph_reserve_caps()
292 mdsc->caps_avail_count); in ceph_reserve_caps()
295 __ceph_unreserve_caps(mdsc, have + alloc); in ceph_reserve_caps()
297 spin_unlock(&mdsc->caps_list_lock); in ceph_reserve_caps()
300 ctx, mdsc->caps_total_count, mdsc->caps_use_count, in ceph_reserve_caps()
301 mdsc->caps_reserve_count, mdsc->caps_avail_count); in ceph_reserve_caps()
305 void ceph_unreserve_caps(struct ceph_mds_client *mdsc, in ceph_unreserve_caps() argument
313 spin_lock(&mdsc->caps_list_lock); in ceph_unreserve_caps()
314 __ceph_unreserve_caps(mdsc, ctx->count); in ceph_unreserve_caps()
317 if (mdsc->caps_use_max > 0 && in ceph_unreserve_caps()
318 mdsc->caps_use_count > mdsc->caps_use_max) in ceph_unreserve_caps()
320 spin_unlock(&mdsc->caps_list_lock); in ceph_unreserve_caps()
323 ceph_reclaim_caps_nr(mdsc, ctx->used); in ceph_unreserve_caps()
326 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc, in ceph_get_cap() argument
335 spin_lock(&mdsc->caps_list_lock); in ceph_get_cap()
336 mdsc->caps_use_count++; in ceph_get_cap()
337 mdsc->caps_total_count++; in ceph_get_cap()
338 spin_unlock(&mdsc->caps_list_lock); in ceph_get_cap()
340 spin_lock(&mdsc->caps_list_lock); in ceph_get_cap()
341 if (mdsc->caps_avail_count) { in ceph_get_cap()
342 BUG_ON(list_empty(&mdsc->caps_list)); in ceph_get_cap()
344 mdsc->caps_avail_count--; in ceph_get_cap()
345 mdsc->caps_use_count++; in ceph_get_cap()
346 cap = list_first_entry(&mdsc->caps_list, in ceph_get_cap()
350 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in ceph_get_cap()
351 mdsc->caps_reserve_count + mdsc->caps_avail_count); in ceph_get_cap()
353 spin_unlock(&mdsc->caps_list_lock); in ceph_get_cap()
359 spin_lock(&mdsc->caps_list_lock); in ceph_get_cap()
361 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count, in ceph_get_cap()
362 mdsc->caps_reserve_count, mdsc->caps_avail_count); in ceph_get_cap()
364 BUG_ON(ctx->count > mdsc->caps_reserve_count); in ceph_get_cap()
365 BUG_ON(list_empty(&mdsc->caps_list)); in ceph_get_cap()
369 mdsc->caps_reserve_count--; in ceph_get_cap()
370 mdsc->caps_use_count++; in ceph_get_cap()
372 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item); in ceph_get_cap()
375 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in ceph_get_cap()
376 mdsc->caps_reserve_count + mdsc->caps_avail_count); in ceph_get_cap()
377 spin_unlock(&mdsc->caps_list_lock); in ceph_get_cap()
381 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap) in ceph_put_cap() argument
383 spin_lock(&mdsc->caps_list_lock); in ceph_put_cap()
385 cap, mdsc->caps_total_count, mdsc->caps_use_count, in ceph_put_cap()
386 mdsc->caps_reserve_count, mdsc->caps_avail_count); in ceph_put_cap()
387 mdsc->caps_use_count--; in ceph_put_cap()
392 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count + in ceph_put_cap()
393 mdsc->caps_min_count) { in ceph_put_cap()
394 mdsc->caps_total_count--; in ceph_put_cap()
397 mdsc->caps_avail_count++; in ceph_put_cap()
398 list_add(&cap->caps_item, &mdsc->caps_list); in ceph_put_cap()
401 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in ceph_put_cap()
402 mdsc->caps_reserve_count + mdsc->caps_avail_count); in ceph_put_cap()
403 spin_unlock(&mdsc->caps_list_lock); in ceph_put_cap()
410 struct ceph_mds_client *mdsc = fsc->mdsc; in ceph_reservation_status() local
412 spin_lock(&mdsc->caps_list_lock); in ceph_reservation_status()
415 *total = mdsc->caps_total_count; in ceph_reservation_status()
417 *avail = mdsc->caps_avail_count; in ceph_reservation_status()
419 *used = mdsc->caps_use_count; in ceph_reservation_status()
421 *reserved = mdsc->caps_reserve_count; in ceph_reservation_status()
423 *min = mdsc->caps_min_count; in ceph_reservation_status()
425 spin_unlock(&mdsc->caps_list_lock); in ceph_reservation_status()
489 static void __cap_set_timeouts(struct ceph_mds_client *mdsc, in __cap_set_timeouts() argument
492 struct ceph_mount_options *opt = mdsc->fsc->mount_options; in __cap_set_timeouts()
507 static void __cap_delay_requeue(struct ceph_mds_client *mdsc, in __cap_delay_requeue() argument
512 if (!mdsc->stopping) { in __cap_delay_requeue()
513 spin_lock(&mdsc->cap_delay_lock); in __cap_delay_requeue()
519 __cap_set_timeouts(mdsc, ci); in __cap_delay_requeue()
520 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); in __cap_delay_requeue()
522 spin_unlock(&mdsc->cap_delay_lock); in __cap_delay_requeue()
531 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, in __cap_delay_requeue_front() argument
535 spin_lock(&mdsc->cap_delay_lock); in __cap_delay_requeue_front()
539 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); in __cap_delay_requeue_front()
540 spin_unlock(&mdsc->cap_delay_lock); in __cap_delay_requeue_front()
548 static void __cap_delay_cancel(struct ceph_mds_client *mdsc, in __cap_delay_cancel() argument
554 spin_lock(&mdsc->cap_delay_lock); in __cap_delay_cancel()
556 spin_unlock(&mdsc->cap_delay_lock); in __cap_delay_cancel()
636 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; in ceph_add_cap() local
669 atomic64_inc(&mdsc->metric.total_caps); in ceph_add_cap()
704 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, in ceph_add_cap()
727 __cap_delay_requeue(mdsc, ci); in ceph_add_cap()
927 ceph_update_cap_hit(&fsc->mdsc->metric); in __ceph_caps_issued_mask_metric()
929 ceph_update_cap_mis(&fsc->mdsc->metric); in __ceph_caps_issued_mask_metric()
1108 struct ceph_mds_client *mdsc; in __ceph_remove_cap() local
1121 mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc; in __ceph_remove_cap()
1137 atomic64_dec(&mdsc->metric.total_caps); in __ceph_remove_cap()
1164 ceph_put_cap(mdsc, cap); in __ceph_remove_cap()
1174 __cap_delay_cancel(mdsc, ci); in __ceph_remove_cap()
1532 struct ceph_mds_client *mdsc = session->s_mdsc; in __ceph_flush_snaps() local
1556 spin_lock(&mdsc->cap_dirty_lock); in __ceph_flush_snaps()
1557 capsnap->cap_flush.tid = ++mdsc->last_cap_flush_tid; in __ceph_flush_snaps()
1559 &mdsc->cap_flush_list); in __ceph_flush_snaps()
1561 oldest_flush_tid = __get_oldest_flush_tid(mdsc); in __ceph_flush_snaps()
1566 spin_unlock(&mdsc->cap_dirty_lock); in __ceph_flush_snaps()
1625 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; in ceph_flush_snaps() local
1651 mutex_lock(&mdsc->mutex); in ceph_flush_snaps()
1652 session = __ceph_lookup_mds_session(mdsc, mds); in ceph_flush_snaps()
1653 mutex_unlock(&mdsc->mutex); in ceph_flush_snaps()
1659 __kick_flushing_caps(mdsc, session, ci, 0); in ceph_flush_snaps()
1670 spin_lock(&mdsc->snap_flush_lock); in ceph_flush_snaps()
1672 spin_unlock(&mdsc->snap_flush_lock); in ceph_flush_snaps()
1683 struct ceph_mds_client *mdsc = in __ceph_mark_dirty_caps() local
1684 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; in __ceph_mark_dirty_caps()
1709 WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem)); in __ceph_mark_dirty_caps()
1716 spin_lock(&mdsc->cap_dirty_lock); in __ceph_mark_dirty_caps()
1718 spin_unlock(&mdsc->cap_dirty_lock); in __ceph_mark_dirty_caps()
1730 __cap_delay_requeue(mdsc, ci); in __ceph_mark_dirty_caps()
1752 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc) in __get_oldest_flush_tid() argument
1754 if (!list_empty(&mdsc->cap_flush_list)) { in __get_oldest_flush_tid()
1756 list_first_entry(&mdsc->cap_flush_list, in __get_oldest_flush_tid()
1767 static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc, in __detach_cap_flush_from_mdsc() argument
1773 if (wake && cf->g_list.prev != &mdsc->cap_flush_list) { in __detach_cap_flush_from_mdsc()
1807 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in __mark_caps_flushing() local
1830 spin_lock(&mdsc->cap_dirty_lock); in __mark_caps_flushing()
1833 cf->tid = ++mdsc->last_cap_flush_tid; in __mark_caps_flushing()
1834 list_add_tail(&cf->g_list, &mdsc->cap_flush_list); in __mark_caps_flushing()
1835 *oldest_flush_tid = __get_oldest_flush_tid(mdsc); in __mark_caps_flushing()
1839 mdsc->num_cap_flushing++; in __mark_caps_flushing()
1841 spin_unlock(&mdsc->cap_dirty_lock); in __mark_caps_flushing()
1903 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); in ceph_check_caps() local
1940 if (!mdsc->stopping && inode->i_nlink > 0) { in ceph_check_caps()
1986 if ((!(flags & CHECK_CAPS_NOINVAL) || mdsc->stopping) && in ceph_check_caps()
2085 __kick_flushing_caps(mdsc, session, ci, 0); in ceph_check_caps()
2102 spin_lock(&mdsc->cap_dirty_lock); in ceph_check_caps()
2103 oldest_flush_tid = __get_oldest_flush_tid(mdsc); in ceph_check_caps()
2104 spin_unlock(&mdsc->cap_dirty_lock); in ceph_check_caps()
2124 __cap_delay_requeue(mdsc, ci); in ceph_check_caps()
2139 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in try_flush_caps() local
2159 __kick_flushing_caps(mdsc, session, ci, 0); in try_flush_caps()
2217 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in unsafe_request_wait() local
2256 max = mdsc->max_sessions; in unsafe_request_wait()
2396 struct ceph_mds_client *mdsc = in ceph_write_inode() local
2397 ceph_sb_to_client(inode->i_sb)->mdsc; in ceph_write_inode()
2401 __cap_delay_requeue_front(mdsc, ci); in ceph_write_inode()
2407 static void __kick_flushing_caps(struct ceph_mds_client *mdsc, in __kick_flushing_caps() argument
2485 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, in ceph_early_kick_flushing_caps() argument
2494 spin_lock(&mdsc->cap_dirty_lock); in ceph_early_kick_flushing_caps()
2495 oldest_flush_tid = __get_oldest_flush_tid(mdsc); in ceph_early_kick_flushing_caps()
2496 spin_unlock(&mdsc->cap_dirty_lock); in ceph_early_kick_flushing_caps()
2523 __kick_flushing_caps(mdsc, session, ci, in ceph_early_kick_flushing_caps()
2533 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, in ceph_kick_flushing_caps() argument
2544 spin_lock(&mdsc->cap_dirty_lock); in ceph_kick_flushing_caps()
2545 oldest_flush_tid = __get_oldest_flush_tid(mdsc); in ceph_kick_flushing_caps()
2546 spin_unlock(&mdsc->cap_dirty_lock); in ceph_kick_flushing_caps()
2558 __kick_flushing_caps(mdsc, session, ci, in ceph_kick_flushing_caps()
2568 struct ceph_mds_client *mdsc = session->s_mdsc; in ceph_kick_flushing_inode_caps() local
2578 spin_lock(&mdsc->cap_dirty_lock); in ceph_kick_flushing_inode_caps()
2581 oldest_flush_tid = __get_oldest_flush_tid(mdsc); in ceph_kick_flushing_inode_caps()
2582 spin_unlock(&mdsc->cap_dirty_lock); in ceph_kick_flushing_inode_caps()
2584 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid); in ceph_kick_flushing_inode_caps()
2646 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; in try_get_cap_refs() local
2668 up_read(&mdsc->snap_rwsem); in try_get_cap_refs()
2711 if (!down_read_trylock(&mdsc->snap_rwsem)) { in try_get_cap_refs()
2722 down_read(&mdsc->snap_rwsem); in try_get_cap_refs()
2752 if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) { in try_get_cap_refs()
2771 __ceph_touch_fmode(ci, mdsc, flags); in try_get_cap_refs()
2775 up_read(&mdsc->snap_rwsem); in try_get_cap_refs()
2778 ceph_update_cap_mis(&mdsc->metric); in try_get_cap_refs()
2780 ceph_update_cap_hit(&mdsc->metric); in try_get_cap_refs()
2883 struct ceph_mds_client *mdsc = fsc->mdsc; in ceph_get_caps() local
2892 spin_lock(&mdsc->caps_list_lock); in ceph_get_caps()
2893 list_add(&cw.list, &mdsc->cap_wait_list); in ceph_get_caps()
2894 spin_unlock(&mdsc->caps_list_lock); in ceph_get_caps()
2913 spin_lock(&mdsc->caps_list_lock); in ceph_get_caps()
2915 spin_unlock(&mdsc->caps_list_lock); in ceph_get_caps()
3582 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in handle_cap_flush_ack() local
3630 spin_lock(&mdsc->cap_dirty_lock); in handle_cap_flush_ack()
3633 wake_mdsc |= __detach_cap_flush_from_mdsc(mdsc, cf); in handle_cap_flush_ack()
3646 mdsc->num_cap_flushing--; in handle_cap_flush_ack()
3663 spin_unlock(&mdsc->cap_dirty_lock); in handle_cap_flush_ack()
3679 wake_up_all(&mdsc->cap_flushing_wq); in handle_cap_flush_ack()
3688 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in __ceph_remove_capsnap() local
3700 spin_lock(&mdsc->cap_dirty_lock); in __ceph_remove_capsnap()
3704 ret = __detach_cap_flush_from_mdsc(mdsc, &capsnap->cap_flush); in __ceph_remove_capsnap()
3707 spin_unlock(&mdsc->cap_dirty_lock); in __ceph_remove_capsnap()
3732 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in handle_cap_flushsnap_ack() local
3768 wake_up_all(&mdsc->cap_flushing_wq); in handle_cap_flushsnap_ack()
3816 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; in handle_cap_export() local
3891 spin_lock(&mdsc->cap_dirty_lock); in handle_cap_export()
3894 spin_unlock(&mdsc->cap_dirty_lock); in handle_cap_export()
3905 tsession = ceph_mdsc_open_export_target_session(mdsc, target); in handle_cap_export()
3916 new_cap = ceph_get_cap(mdsc, NULL); in handle_cap_export()
3933 ceph_put_cap(mdsc, new_cap); in handle_cap_export()
3941 static void handle_cap_import(struct ceph_mds_client *mdsc, in handle_cap_import() argument
3975 new_cap = ceph_get_cap(mdsc, NULL); in handle_cap_import()
3982 ceph_put_cap(mdsc, new_cap); in handle_cap_import()
4024 struct ceph_mds_client *mdsc = session->s_mdsc; in ceph_handle_caps() local
4088 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; in ceph_handle_caps()
4134 inode = ceph_find_inode(mdsc->fsc->sb, vino); in ceph_handle_caps()
4148 cap = ceph_get_cap(mdsc, NULL); in ceph_handle_caps()
4176 down_write(&mdsc->snap_rwsem); in ceph_handle_caps()
4177 ceph_update_snap_trace(mdsc, snaptrace, in ceph_handle_caps()
4180 downgrade_write(&mdsc->snap_rwsem); in ceph_handle_caps()
4182 down_read(&mdsc->snap_rwsem); in ceph_handle_caps()
4185 handle_cap_import(mdsc, inode, h, peer, session, in ceph_handle_caps()
4190 ceph_put_snap_realm(mdsc, realm); in ceph_handle_caps()
4247 ceph_flush_cap_releases(mdsc, session); in ceph_handle_caps()
4263 unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc) in ceph_check_delayed_caps() argument
4267 struct ceph_mount_options *opt = mdsc->fsc->mount_options; in ceph_check_delayed_caps()
4273 spin_lock(&mdsc->cap_delay_lock); in ceph_check_delayed_caps()
4274 while (!list_empty(&mdsc->cap_delay_list)) { in ceph_check_delayed_caps()
4275 ci = list_first_entry(&mdsc->cap_delay_list, in ceph_check_delayed_caps()
4290 spin_unlock(&mdsc->cap_delay_lock); in ceph_check_delayed_caps()
4294 spin_lock(&mdsc->cap_delay_lock); in ceph_check_delayed_caps()
4297 spin_unlock(&mdsc->cap_delay_lock); in ceph_check_delayed_caps()
4307 struct ceph_mds_client *mdsc = s->s_mdsc; in flush_dirty_session_caps() local
4312 spin_lock(&mdsc->cap_dirty_lock); in flush_dirty_session_caps()
4319 spin_unlock(&mdsc->cap_dirty_lock); in flush_dirty_session_caps()
4322 spin_lock(&mdsc->cap_dirty_lock); in flush_dirty_session_caps()
4324 spin_unlock(&mdsc->cap_dirty_lock); in flush_dirty_session_caps()
4328 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) in ceph_flush_dirty_caps() argument
4330 ceph_mdsc_iterate_sessions(mdsc, flush_dirty_session_caps, true); in ceph_flush_dirty_caps()
4334 struct ceph_mds_client *mdsc, int fmode) in __ceph_touch_fmode() argument
4345 __cap_delay_requeue(mdsc, ci); in __ceph_touch_fmode()
4350 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb); in ceph_get_fmode() local
4356 atomic64_inc(&mdsc->metric.opened_files); in ceph_get_fmode()
4373 percpu_counter_inc(&mdsc->metric.opened_inodes); in ceph_get_fmode()
4384 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb); in ceph_put_fmode() local
4390 atomic64_dec(&mdsc->metric.opened_files); in ceph_put_fmode()
4409 percpu_counter_dec(&mdsc->metric.opened_inodes); in ceph_put_fmode()
4429 struct ceph_mds_client *mdsc = in ceph_drop_caps_for_unlink() local
4430 ceph_inode_to_client(inode)->mdsc; in ceph_drop_caps_for_unlink()
4431 __cap_delay_requeue_front(mdsc, ci); in ceph_drop_caps_for_unlink()