Symbol: q
function parameter
Defined...
-
arch/x86/crypto/curve25519-x86_64.c:975:34-975:39: static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2)
-
arch/x86/lib/msr-smp.c:52:49-52:54: int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
-
arch/x86/lib/msr-smp.c:83:49-83:53: int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
-
arch/x86/lib/msr-smp.c:209:54-209:58: int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
-
arch/x86/lib/msr-smp.c:225:54-225:59: int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
-
block/bfq-cgroup.c:343:34-343:56: void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
-
block/bfq-iosched.c:455:41-455:63: static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
-
block/bfq-iosched.c:2361:8-2361:30: struct request_queue *q)
-
block/bfq-iosched.c:2380:32-2380:54: static void bfq_remove_request(struct request_queue *q,
-
block/bfq-iosched.c:2445:27-2445:49: static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
-
block/bfq-iosched.c:2485:30-2485:52: static int bfq_request_merge(struct request_queue *q, struct request **req,
-
block/bfq-iosched.c:2503:32-2503:54: static void bfq_request_merged(struct request_queue *q, struct request *req,
-
block/bfq-iosched.c:2560:33-2560:55: static void bfq_requests_merged(struct request_queue *q, struct request *rq,
-
block/bfq-iosched.c:3227:33-3227:55: static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
-
block/bfq-iosched.c:3653:33-3653:55: static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
-
block/bfq-iosched.c:5249:39-5249:61: static void bfq_update_dispatch_stats(struct request_queue *q,
-
block/bfq-iosched.c:6202:37-6202:59: static void bfq_update_insert_stats(struct request_queue *q,
-
block/bfq-iosched.c:7183:27-7183:49: static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
-
block/bio.c:972:34-972:56: static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
-
block/bio.c:1001:21-1001:43: int bio_add_hw_page(struct request_queue *q, struct bio *bio,
-
block/bio.c:1053:21-1053:43: int bio_add_pc_page(struct request_queue *q, struct bio *bio,
-
block/blk-cgroup.c:109:34-109:56: static bool blkcg_policy_enabled(struct request_queue *q,
-
block/blk-cgroup.h:248:9-248:31: struct request_queue *q)
-
block/blk-core.c:79:44-79:66: void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:90:46-90:68: void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:104:53-104:75: bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:227:21-227:43: void blk_sync_queue(struct request_queue *q)
-
block/blk-core.c:238:22-238:44: void blk_set_pm_only(struct request_queue *q)
-
block/blk-core.c:244:24-244:46: void blk_clear_pm_only(struct request_queue *q)
-
block/blk-core.c:264:28-264:50: static void blk_free_queue(struct request_queue *q)
-
block/blk-core.c:281:20-281:42: void blk_put_queue(struct request_queue *q)
-
block/blk-core.c:288:28-288:50: void blk_queue_start_drain(struct request_queue *q)
-
block/blk-core.c:307:21-307:43: int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
-
block/blk-core.c:334:23-334:45: int __bio_queue_enter(struct request_queue *q, struct bio *bio)
-
block/blk-core.c:368:21-368:43: void blk_queue_exit(struct request_queue *q)
-
block/blk-core.c:459:20-459:42: bool blk_get_queue(struct request_queue *q)
-
block/blk-core.c:557:50-557:72: static inline blk_status_t blk_check_zone_append(struct request_queue *q,
-
block/blk-core.c:1020:18-1020:40: int blk_lld_busy(struct request_queue *q)
-
block/blk-crypto-profile.c:446:5-446:27: struct request_queue *q)
-
block/blk-flush.c:98:21-98:43: blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
-
block/blk-flush.c:287:28-287:50: static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
-
block/blk-integrity.c:27:31-27:53: int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
-
block/blk-integrity.c:68:29-68:51: int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
-
block/blk-integrity.c:164:29-164:51: bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
-
block/blk-integrity.c:187:30-187:52: bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
-
block/blk-ioc.c:172:22-172:44: void ioc_clear_queue(struct request_queue *q)
-
block/blk-ioc.c:328:30-328:52: struct io_cq *ioc_lookup_icq(struct request_queue *q)
-
block/blk-ioc.c:367:37-367:59: static struct io_cq *ioc_create_icq(struct request_queue *q)
-
block/blk-ioc.c:411:32-411:54: struct io_cq *ioc_find_get_icq(struct request_queue *q)
-
block/blk-iocost.c:665:29-665:51: static struct ioc *q_to_ioc(struct request_queue *q)
-
block/blk-map.c:383:33-383:55: static struct bio *bio_map_kern(struct request_queue *q, void *data,
-
block/blk-map.c:468:34-468:56: static struct bio *bio_copy_kern(struct request_queue *q, void *data,
-
block/blk-map.c:626:25-626:47: int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
-
block/blk-map.c:681:21-681:43: int blk_rq_map_user(struct request_queue *q, struct request *rq,
-
block/blk-map.c:776:21-776:43: int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
-
block/blk-merge.c:52:33-52:55: static inline bool bio_will_gap(struct request_queue *q,
-
block/blk-merge.c:462:33-462:55: static unsigned blk_bvec_map_sg(struct request_queue *q,
-
block/blk-merge.c:507:28-507:50: __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
-
block/blk-merge.c:527:30-527:52: static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
-
block/blk-merge.c:567:21-567:43: int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
-
block/blk-merge.c:673:39-673:61: static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
-
block/blk-merge.c:691:33-691:55: static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
-
block/blk-merge.c:805:38-805:60: static struct request *attempt_merge(struct request_queue *q,
-
block/blk-merge.c:887:43-887:65: static struct request *attempt_back_merge(struct request_queue *q,
-
block/blk-merge.c:898:44-898:66: static struct request *attempt_front_merge(struct request_queue *q,
-
block/blk-merge.c:914:28-914:50: bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
-
block/blk-merge.c:1031:56-1031:78: static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
-
block/blk-merge.c:1056:52-1056:74: static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
-
block/blk-merge.c:1103:29-1103:51: bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-
block/blk-merge.c:1135:25-1135:47: bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
-
block/blk-merge.c:1160:29-1160:51: bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
-
block/blk-mq-debugfs.c:653:30-653:52: void blk_mq_debugfs_register(struct request_queue *q)
-
block/blk-mq-debugfs.c:698:35-698:57: void blk_mq_debugfs_register_hctx(struct request_queue *q,
-
block/blk-mq-debugfs.c:726:36-726:58: void blk_mq_debugfs_register_hctxs(struct request_queue *q)
-
block/blk-mq-debugfs.c:735:38-735:60: void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
-
block/blk-mq-debugfs.c:744:36-744:58: void blk_mq_debugfs_register_sched(struct request_queue *q)
-
block/blk-mq-debugfs.c:765:38-765:60: void blk_mq_debugfs_unregister_sched(struct request_queue *q)
-
block/blk-mq-debugfs.c:814:41-814:63: void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
-
block/blk-mq-sched.c:339:29-339:51: bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
-
block/blk-mq-sched.c:375:36-375:58: bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
-
block/blk-mq-sched.c:382:43-382:65: static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
-
block/blk-mq-sched.c:406:40-406:62: static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
-
block/blk-mq-sched.c:443:23-443:45: int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/blk-mq-sched.c:509:28-509:50: void blk_mq_sched_free_rqs(struct request_queue *q)
-
block/blk-mq-sched.c:526:24-526:46: void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
-
block/blk-mq-sched.h:37:26-37:48: blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
-
block/blk-mq-sysfs.c:192:26-192:48: void blk_mq_sysfs_deinit(struct request_queue *q)
-
block/blk-mq-sysfs.c:204:24-204:46: void blk_mq_sysfs_init(struct request_queue *q)
-
block/blk-mq-sysfs.c:273:36-273:58: void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
-
block/blk-mq-sysfs.c:289:33-289:55: int blk_mq_sysfs_register_hctxs(struct request_queue *q)
-
block/blk-mq-tag.c:307:53-307:75: static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
-
block/blk-mq-tag.c:491:33-491:55: void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
-
block/blk-mq-tag.c:655:42-655:64: void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
-
block/blk-mq.c:51:52-51:74: static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
-
block/blk-mq.c:110:31-110:53: unsigned int blk_mq_in_flight(struct request_queue *q,
-
block/blk-mq.c:120:26-120:48: void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
-
block/blk-mq.c:130:29-130:51: void blk_freeze_queue_start(struct request_queue *q)
-
block/blk-mq.c:144:31-144:53: void blk_mq_freeze_queue_wait(struct request_queue *q)
-
block/blk-mq.c:150:38-150:60: int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
-
block/blk-mq.c:163:23-163:45: void blk_freeze_queue(struct request_queue *q)
-
block/blk-mq.c:176:26-176:48: void blk_mq_freeze_queue(struct request_queue *q)
-
block/blk-mq.c:186:30-186:52: void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
-
block/blk-mq.c:200:28-200:50: void blk_mq_unfreeze_queue(struct request_queue *q)
-
block/blk-mq.c:210:34-210:56: void blk_mq_quiesce_queue_nowait(struct request_queue *q)
-
block/blk-mq.c:248:27-248:49: void blk_mq_quiesce_queue(struct request_queue *q)
-
block/blk-mq.c:264:29-264:51: void blk_mq_unquiesce_queue(struct request_queue *q)
-
block/blk-mq.c:311:26-311:48: void blk_mq_wake_waiters(struct request_queue *q)
-
block/blk-mq.c:321:18-321:40: void blk_rq_init(struct request_queue *q, struct request *rq)
-
block/blk-mq.c:510:45-510:67: static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
-
block/blk-mq.c:535:52-535:74: static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
-
block/blk-mq.c:569:38-569:60: struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
-
block/blk-mq.c:602:43-602:65: struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
-
block/blk-mq.c:1486:31-1486:53: void blk_mq_kick_requeue_list(struct request_queue *q)
-
block/blk-mq.c:1492:37-1492:59: void blk_mq_delay_kick_requeue_list(struct request_queue *q,
-
block/blk-mq.c:1517:28-1517:50: bool blk_mq_queue_inflight(struct request_queue *q)
-
block/blk-mq.c:1963:36-1963:58: static void blk_mq_release_budgets(struct request_queue *q,
-
block/blk-mq.c:2258:49-2258:71: static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
-
block/blk-mq.c:2280:27-2280:49: void blk_mq_run_hw_queues(struct request_queue *q, bool async)
-
block/blk-mq.c:2308:33-2308:55: void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
-
block/blk-mq.c:2365:28-2365:50: void blk_mq_stop_hw_queues(struct request_queue *q)
-
block/blk-mq.c:2383:29-2383:51: void blk_mq_start_hw_queues(struct request_queue *q)
-
block/blk-mq.c:2403:37-2403:59: void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
-
block/blk-mq.c:2699:38-2699:60: static void __blk_mq_flush_plug_list(struct request_queue *q,
-
block/blk-mq.c:2820:38-2820:60: static bool blk_mq_attempt_bio_merge(struct request_queue *q,
-
block/blk-mq.c:2832:48-2832:70: static struct request *blk_mq_get_new_requests(struct request_queue *q,
-
block/blk-mq.c:2869:57-2869:79: static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
-
block/blk-mq.c:3575:30-3575:52: static void blk_mq_exit_hctx(struct request_queue *q,
-
block/blk-mq.c:3602:35-3602:57: static void blk_mq_exit_hw_queues(struct request_queue *q,
-
block/blk-mq.c:3615:29-3615:51: static int blk_mq_init_hctx(struct request_queue *q,
-
block/blk-mq.c:3653:19-3653:41: blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
-
block/blk-mq.c:3717:36-3717:58: static void blk_mq_init_cpu_queues(struct request_queue *q,
-
block/blk-mq.c:3801:32-3801:54: static void blk_mq_map_swqueue(struct request_queue *q)
-
block/blk-mq.c:3908:35-3908:57: static void queue_set_hctx_shared(struct request_queue *q, bool shared)
-
block/blk-mq.c:3937:38-3937:60: static void blk_mq_del_queue_tag_set(struct request_queue *q)
-
block/blk-mq.c:3954:10-3954:32: struct request_queue *q)
-
block/blk-mq.c:3975:30-3975:52: static int blk_mq_alloc_ctxs(struct request_queue *q)
-
block/blk-mq.c:4008:21-4008:43: void blk_mq_release(struct request_queue *q)
-
block/blk-mq.c:4065:27-4065:49: void blk_mq_destroy_queue(struct request_queue *q)
-
block/blk-mq.c:4103:45-4103:67: struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
-
block/blk-mq.c:4118:31-4118:53: struct blk_mq_tag_set *set, struct request_queue *q,
-
block/blk-mq.c:4152:7-4152:29: struct request_queue *q)
-
block/blk-mq.c:4194:37-4194:59: static void blk_mq_update_poll_flag(struct request_queue *q)
-
block/blk-mq.c:4206:3-4206:25: struct request_queue *q)
-
block/blk-mq.c:4254:24-4254:46: void blk_mq_exit_queue(struct request_queue *q)
-
block/blk-mq.c:4539:31-4539:53: int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
-
block/blk-mq.c:4607:3-4607:25: struct request_queue *q)
-
block/blk-mq.c:4634:7-4634:29: struct request_queue *q)
-
block/blk-mq.c:4646:7-4646:29: struct request_queue *q)
-
block/blk-mq.c:4743:17-4743:39: int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
-
block/blk-mq.c:4777:30-4777:52: void blk_mq_cancel_work_sync(struct request_queue *q)
-
block/blk-mq.h:83:59-83:81: static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
-
block/blk-mq.h:110:54-110:76: static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
-
block/blk-mq.h:134:51-134:73: static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
-
block/blk-mq.h:146:49-146:71: static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
-
block/blk-mq.h:249:47-249:69: static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
-
block/blk-mq.h:256:46-256:68: static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
-
block/blk-pm.c:29:26-29:48: void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
-
block/blk-pm.c:59:29-59:51: int blk_pre_runtime_suspend(struct request_queue *q)
-
block/blk-pm.c:120:31-120:53: void blk_post_runtime_suspend(struct request_queue *q, int err)
-
block/blk-pm.c:150:29-150:51: void blk_pre_runtime_resume(struct request_queue *q)
-
block/blk-pm.c:174:30-174:52: void blk_post_runtime_resume(struct request_queue *q)
-
block/blk-pm.c:197:29-197:51: void blk_set_runtime_active(struct request_queue *q)
-
block/blk-pm.h:9:54-9:76: static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
-
block/blk-rq-qos.c:289:18-289:40: void rq_qos_exit(struct request_queue *q)
-
block/blk-rq-qos.h:61:40-61:62: static inline struct rq_qos *rq_qos_id(struct request_queue *q,
-
block/blk-rq-qos.h:72:41-72:63: static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
-
block/blk-rq-qos.h:77:43-77:65: static inline struct rq_qos *iolat_rq_qos(struct request_queue *q)
-
block/blk-rq-qos.h:113:35-113:57: static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
-
block/blk-rq-qos.h:119:32-119:54: static inline void rq_qos_done(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:125:33-125:55: static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:131:35-131:57: static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:147:36-147:58: static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
-
block/blk-rq-qos.h:155:33-155:55: static inline void rq_qos_track(struct request_queue *q, struct request *rq,
-
block/blk-rq-qos.h:162:33-162:55: static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
-
block/blk-rq-qos.h:171:47-171:69: static inline void rq_qos_queue_depth_changed(struct request_queue *q)
-
block/blk-settings.c:22:27-22:49: void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
-
block/blk-settings.c:98:29-98:51: void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
-
block/blk-settings.c:123:31-123:53: void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
-
block/blk-settings.c:167:30-167:52: void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
-
block/blk-settings.c:178:36-178:58: void blk_queue_max_discard_sectors(struct request_queue *q,
-
block/blk-settings.c:191:41-191:63: void blk_queue_max_secure_erase_sectors(struct request_queue *q,
-
block/blk-settings.c:204:41-204:63: void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
-
block/blk-settings.c:216:40-216:62: void blk_queue_max_zone_append_sectors(struct request_queue *q,
-
block/blk-settings.c:247:29-247:51: void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
-
block/blk-settings.c:268:37-268:59: void blk_queue_max_discard_segments(struct request_queue *q,
-
block/blk-settings.c:284:33-284:55: void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
-
block/blk-settings.c:309:35-309:57: void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
-
block/blk-settings.c:338:36-338:58: void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
-
block/blk-settings.c:359:39-359:61: void blk_queue_zone_write_granularity(struct request_queue *q,
-
block/blk-settings.c:383:33-383:55: void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
-
block/blk-settings.c:442:23-442:45: void blk_queue_io_min(struct request_queue *q, unsigned int min)
-
block/blk-settings.c:480:23-480:45: void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
-
block/blk-settings.c:727:31-727:53: void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
-
block/blk-settings.c:739:33-739:55: void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
-
block/blk-settings.c:756:30-756:52: void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
-
block/blk-settings.c:781:30-781:52: void blk_queue_dma_alignment(struct request_queue *q, int mask)
-
block/blk-settings.c:801:37-801:59: void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
-
block/blk-settings.c:816:26-816:48: void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
-
block/blk-settings.c:831:28-831:50: void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
-
block/blk-settings.c:855:43-855:65: void blk_queue_required_elevator_features(struct request_queue *q,
-
block/blk-settings.c:869:40-869:62: bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
-
block/blk-stat.c:136:28-136:50: void blk_stat_add_callback(struct request_queue *q,
-
block/blk-stat.c:157:31-157:53: void blk_stat_remove_callback(struct request_queue *q,
-
block/blk-stat.c:187:34-187:56: void blk_stat_disable_accounting(struct request_queue *q)
-
block/blk-stat.c:198:33-198:55: void blk_stat_enable_accounting(struct request_queue *q)
-
block/blk-sysfs.c:63:36-63:58: static ssize_t queue_requests_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:69:22-69:44: queue_requests_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:91:30-91:52: static ssize_t queue_ra_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:102:16-102:38: queue_ra_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:116:39-116:61: static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:123:40-123:62: static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:128:48-128:70: static ssize_t queue_max_discard_segments_show(struct request_queue *q,
-
block/blk-sysfs.c:134:50-134:72: static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:139:44-139:66: static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:144:46-144:68: static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:149:47-149:69: static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:154:41-154:63: static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:159:34-159:56: static ssize_t queue_io_min_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:164:34-164:56: static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:169:47-169:69: static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:174:42-174:64: static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:181:39-181:61: static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:187:40-187:62: static ssize_t queue_discard_max_store(struct request_queue *q,
-
block/blk-sysfs.c:210:47-210:69: static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:215:42-215:64: static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:220:44-220:66: static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:226:50-226:72: static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
-
block/blk-sysfs.c:232:43-232:65: static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:240:25-240:47: queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:274:42-274:64: static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:281:46-281:68: static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:286:41-286:63: static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:317:1-317:1: QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
-
block/blk-sysfs.c:318:1-318:1: QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
-
block/blk-sysfs.c:319:1-319:1: QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
-
block/blk-sysfs.c:320:1-320:1: QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
-
block/blk-sysfs.c:317:1-317:1: QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
-
block/blk-sysfs.c:318:1-318:1: QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
-
block/blk-sysfs.c:319:1-319:1: QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
-
block/blk-sysfs.c:320:1-320:1: QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
-
block/blk-sysfs.c:323:33-323:55: static ssize_t queue_zoned_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:335:36-335:58: static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:340:42-340:64: static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:345:44-345:66: static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:350:36-350:58: static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:356:37-356:59: static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:375:39-375:61: static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:384:25-384:47: queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:408:38-408:60: static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:413:39-413:61: static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:419:32-419:54: static ssize_t queue_poll_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:424:33-424:55: static ssize_t queue_poll_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:434:38-434:60: static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:439:39-439:61: static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:454:34-454:56: static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:465:35-465:57: static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:509:30-509:52: static ssize_t queue_wc_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:517:31-517:53: static ssize_t queue_wc_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:539:31-539:53: static ssize_t queue_fua_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:544:31-544:53: static ssize_t queue_dax_show(struct request_queue *q, char *page)
-
block/blk-throttle.c:1693:32-1693:54: static void throtl_shutdown_wq(struct request_queue *q)
-
block/blk-throttle.c:2455:37-2455:59: ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
-
block/blk-throttle.c:2462:38-2462:60: ssize_t blk_throtl_sample_time_store(struct request_queue *q,
-
block/blk-timeout.c:23:32-23:54: bool __blk_should_fake_timeout(struct request_queue *q)
-
block/blk-wbt.c:502:19-502:41: bool wbt_disabled(struct request_queue *q)
-
block/blk-wbt.c:510:21-510:43: u64 wbt_get_min_lat(struct request_queue *q)
-
block/blk-wbt.c:518:22-518:44: void wbt_set_min_lat(struct request_queue *q, u64 val)
-
block/blk-wbt.c:719:26-719:48: void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
-
block/blk-wbt.c:756:30-756:52: u64 wbt_default_latency_nsec(struct request_queue *q)
-
block/blk.h:41:40-41:62: static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
-
block/blk.h:79:42-79:64: static inline bool biovec_phys_mergeable(struct request_queue *q,
-
block/blk.h:166:54-166:76: static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
-
block/blk.h:346:36-346:58: static inline void req_set_nomerge(struct request_queue *q, struct request *req)
-
block/blk.h:379:41-379:63: static inline bool blk_queue_may_bounce(struct request_queue *q)
-
block/blk.h:387:3-387:25: struct request_queue *q)
-
block/bsg-lib.c:28:35-28:57: static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
-
block/bsg-lib.c:320:23-320:45: void bsg_remove_queue(struct request_queue *q)
-
block/bsg.c:187:39-187:61: struct bsg_device *bsg_register_queue(struct request_queue *q,
-
block/elevator.c:86:41-86:63: static inline bool elv_support_features(struct request_queue *q,
-
block/elevator.c:116:48-116:70: static struct elevator_type *elevator_find_get(struct request_queue *q,
-
block/elevator.c:131:39-131:61: struct elevator_queue *elevator_alloc(struct request_queue *q,
-
block/elevator.c:159:20-159:42: void elevator_exit(struct request_queue *q)
-
block/elevator.c:179:21-179:43: void elv_rqhash_del(struct request_queue *q, struct request *rq)
-
block/elevator.c:186:21-186:43: void elv_rqhash_add(struct request_queue *q, struct request *rq)
-
block/elevator.c:196:28-196:50: void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
-
block/elevator.c:202:33-202:55: struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
-
block/elevator.c:276:26-276:48: enum elv_merge elv_merge(struct request_queue *q, struct request **req,
-
block/elevator.c:332:31-332:53: bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
-
block/elevator.c:370:25-370:47: void elv_merged_request(struct request_queue *q, struct request *rq,
-
block/elevator.c:384:25-384:47: void elv_merge_requests(struct request_queue *q, struct request *rq,
-
block/elevator.c:396:36-396:58: struct request *elv_latter_request(struct request_queue *q, struct request *rq)
-
block/elevator.c:406:36-406:58: struct request *elv_former_request(struct request_queue *q, struct request *rq)
-
block/elevator.c:463:24-463:46: int elv_register_queue(struct request_queue *q, bool uevent)
-
block/elevator.c:488:27-488:49: void elv_unregister_queue(struct request_queue *q)
-
block/elevator.c:555:40-555:62: static inline bool elv_support_iosched(struct request_queue *q)
-
block/elevator.c:567:51-567:73: static struct elevator_type *elevator_get_default(struct request_queue *q)
-
block/elevator.c:583:55-583:77: static struct elevator_type *elevator_get_by_features(struct request_queue *q)
-
block/elevator.c:609:23-609:45: void elevator_init_mq(struct request_queue *q)
-
block/elevator.c:657:21-657:43: int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
-
block/elevator.c:694:23-694:45: void elevator_disable(struct request_queue *q)
-
block/elevator.c:715:28-715:50: static int elevator_change(struct request_queue *q, const char *elevator_name)
-
block/elevator.c:745:27-745:49: ssize_t elv_iosched_store(struct request_queue *q, const char *buf,
-
block/elevator.c:761:26-761:48: ssize_t elv_iosched_show(struct request_queue *q, char *name)
-
block/elevator.c:790:39-790:61: struct request *elv_rb_former_request(struct request_queue *q,
-
block/elevator.c:802:39-802:61: struct request *elv_rb_latter_request(struct request_queue *q,
-
block/genhd.c:1371:35-1371:57: struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
-
block/kyber-iosched.c:357:56-357:78: static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
-
block/kyber-iosched.c:405:29-405:51: static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/kyber-iosched.c:567:29-567:51: static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
-
block/mq-deadline.c:181:37-181:59: static void deadline_remove_request(struct request_queue *q,
-
block/mq-deadline.c:198:31-198:53: static void dd_request_merged(struct request_queue *q, struct request *req,
-
block/mq-deadline.c:218:32-218:54: static void dd_merged_requests(struct request_queue *q, struct request *req,
-
block/mq-deadline.c:665:26-665:48: static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/mq-deadline.c:716:29-716:51: static int dd_request_merge(struct request_queue *q, struct request **rq,
-
block/mq-deadline.c:748:26-748:48: static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
-
crypto/ecc.c:1341:33-1341:57: const struct ecc_point *p, const struct ecc_point *q,
-
crypto/ecc.c:1364:22-1364:46: const u64 *u2, const struct ecc_point *q,
-
drivers/accel/habanalabs/common/hw_queue.c:31:36-31:56: static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
-
drivers/accel/habanalabs/common/hw_queue.c:83:52-83:72: void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/accel/habanalabs/common/hw_queue.c:117:5-117:25: struct hl_hw_queue *q, int num_of_entries,
-
drivers/accel/habanalabs/common/hw_queue.c:166:6-166:26: struct hl_hw_queue *q,
-
drivers/accel/habanalabs/common/hw_queue.c:200:59-200:79: static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/accel/habanalabs/common/hw_queue.c:813:59-813:79: static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/accel/habanalabs/common/hw_queue.c:854:51-854:71: static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/accel/habanalabs/common/hw_queue.c:874:51-874:71: static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/accel/habanalabs/common/hw_queue.c:879:51-879:71: static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/accel/habanalabs/common/hw_queue.c:884:50-884:70: static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/accel/habanalabs/common/hw_queue.c:985:47-985:67: static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/accel/habanalabs/common/hw_queue.c:1033:48-1033:68: static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/accel/habanalabs/common/irq.c:508:40-508:54: int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
-
drivers/accel/habanalabs/common/irq.c:536:41-536:55: void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
-
drivers/accel/habanalabs/common/irq.c:541:42-541:56: void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
-
drivers/accel/habanalabs/common/irq.c:567:40-567:54: int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
-
drivers/accel/habanalabs/common/irq.c:591:41-591:55: void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
-
drivers/accel/habanalabs/common/irq.c:598:42-598:56: void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
-
drivers/ata/libata-pata-timings.c:61:5-61:24: struct ata_timing *q, int T, int UT)
-
drivers/block/drbd/drbd_int.h:1828:17-1828:41: drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
-
drivers/block/drbd/drbd_int.h:1838:29-1838:53: drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
-
drivers/block/drbd/drbd_nl.c:1192:43-1192:65: static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
-
drivers/block/drbd/drbd_nl.c:1244:60-1244:82: static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
-
drivers/block/drbd/drbd_nl.c:1259:63-1259:85: static void fixup_discard_support(struct drbd_device *device, struct request_queue *q)
-
drivers/block/null_blk/zoned.c:61:51-61:73: int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
-
drivers/block/pktcdvd.c:893:63-893:85: static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
-
drivers/block/pktcdvd.c:2293:36-2293:58: static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
-
drivers/block/rnbd/rnbd-clt.c:137:41-137:60: static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
-
drivers/block/rnbd/rnbd-clt.c:1052:7-1052:26: struct rnbd_queue *q)
-
drivers/block/rnbd/rnbd-clt.c:1311:12-1311:31: struct rnbd_queue *q,
-
drivers/block/virtio_blk.c:780:12-780:34: struct request_queue *q)
-
drivers/char/ipmi/ipmi_msghandler.c:681:32-681:50: static void free_recv_msg_list(struct list_head *q)
-
drivers/char/ipmi/ipmi_msghandler.c:691:31-691:49: static void free_smi_msg_list(struct list_head *q)
-
drivers/clk/clk.c:3125:40-3125:58: bool clk_is_match(const struct clk *p, const struct clk *q)
-
drivers/crypto/cavium/cpt/cptpf_mbox.c:59:55-59:58: static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp)
-
drivers/crypto/cavium/cpt/cptvf_reqmanager.c:15:53-15:75: static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
-
drivers/crypto/cavium/zip/zip_mem.c:57:48-57:52: int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
-
drivers/crypto/cavium/zip/zip_mem.c:76:48-76:52: void zip_cmd_qbuf_free(struct zip_device *zip, int q)
-
drivers/crypto/hisilicon/qm.c:2308:8-2308:28: struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2328:37-2328:57: static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2336:31-2336:51: static int hisi_qm_uacce_mmap(struct uacce_queue *q,
-
drivers/crypto/hisilicon/qm.c:2389:38-2389:58: static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2396:38-2396:58: static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2401:33-2401:53: static int hisi_qm_is_q_updated(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2418:28-2418:48: static void qm_set_sqctype(struct uacce_queue *q, u16 type)
-
drivers/crypto/hisilicon/qm.c:2428:33-2428:53: static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
-
drivers/crypto/hisilicon/sec/sec_drv.c:673:47-673:53: static irqreturn_t sec_isr_handle_th(int irq, void *q)
-
drivers/crypto/hisilicon/sec/sec_drv.c:679:44-679:50: static irqreturn_t sec_isr_handle(int irq, void *q)
-
drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c:135:63-135:66: static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
-
drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c:58:7-58:37: struct otx_cpt_pending_queue *q,
-
drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c:49:6-49:37: struct otx2_cpt_pending_queue *q,
-
drivers/cxl/core/mbox.c:474:5-474:43: struct cxl_mem_query_commands __user *q)
-
drivers/firmware/arm_scmi/raw_mode.c:258:52-258:75: static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
-
drivers/firmware/arm_scmi/raw_mode.c:274:33-274:56: static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
-
drivers/firmware/arm_scmi/raw_mode.c:287:37-287:60: static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
-
drivers/firmware/arm_scmi/raw_mode.c:300:34-300:57: scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
-
drivers/firmware/arm_scmi/raw_mode.c:312:56-312:79: static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
-
drivers/firmware/arm_scmi/raw_mode.c:324:41-324:64: static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
-
drivers/firmware/arm_scmi/raw_mode.c:695:26-695:49: scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:261:39-261:64: static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:581:10-581:35: struct amdgpu_mes_queue *q,
-
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:612:10-612:35: struct amdgpu_mes_queue *q,
-
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:4343:30-4343:34: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:798:30-798:34: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c:3006:25-3006:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c:4149:25-4149:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c:3420:25-3420:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1832:25-1832:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:148:24-148:28: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:173:60-173:74: static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:241:63-241:77: static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:297:7-297:21: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:312:7-312:21: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:330:9-330:23: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:391:5-391:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:416:4-416:18: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:483:5-483:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:501:5-501:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:621:59-621:73: static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:656:5-656:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:730:5-730:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:789:5-789:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:818:59-818:73: static int update_queue(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1340:5-1340:19: struct queue *q, const uint32_t *restore_sdma_id)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1407:5-1407:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1612:65-1612:79: static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1850:5-1850:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2047:6-2047:20: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2077:4-2077:24: const struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2097:6-2097:26: const struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c:181:60-181:74: static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c:198:5-198:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c:76:64-76:78: static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c:76:64-76:78: static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c:87:63-87:77: static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c:229:60-229:74: static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c:246:4-246:18: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:49:59-49:84: struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:65:6-65:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:71:45-71:70: static void set_priority(struct cik_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:78:6-78:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:91:3-91:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:145:4-145:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:174:4-174:29: struct queue_properties *q, struct mqd_update_info *minfo,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:211:4-211:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:225:4-225:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:232:4-232:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:338:3-338:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:344:4-344:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:71:53-71:78: static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:78:3-78:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:91:4-91:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:159:4-159:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:294:4-294:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:308:3-308:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:326:4-326:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:78:53-78:78: static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:85:3-85:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:107:4-107:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:192:10-192:35: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:301:4-301:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:315:3-315:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:338:3-338:28: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:80:44-80:69: static void set_priority(struct v9_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:87:3-87:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:134:4-134:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:224:4-224:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:376:4-376:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:390:3-390:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:408:4-408:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:74:44-74:69: static void set_priority(struct vi_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:81:6-81:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:94:4-94:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:172:4-172:29: struct queue_properties *q, struct mqd_update_info *minfo,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:243:4-243:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:257:4-257:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:329:4-329:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:342:4-342:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:350:3-350:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:366:4-366:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c:188:3-188:17: struct queue *q, bool is_static)
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c:143:3-143:17: struct queue *q, bool is_static)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:495:26-495:40: int kfd_procfs_add_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:655:27-655:41: void kfd_procfs_del_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:181:26-181:41: struct kfd_dev *dev, struct queue **q,
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:569:5-569:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:648:7-648:21: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:28:29-28:54: void print_queue_properties(struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:46:18-46:32: void print_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:67:16-67:31: int init_queue(struct queue **q, const struct queue_properties *properties)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:81:19-81:33: void uninit_queue(struct queue *q)
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:612:30-612:52: static void throttle_release(struct i915_request **q, int count)
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:625:7-625:29: struct i915_request **q, int count)
-
drivers/gpu/drm/v3d/v3d_sched.c:291:54-291:69: v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
-
drivers/gpu/ipu-v3/ipu-image-convert.c:1252:5-1252:23: struct list_head *q)
-
drivers/infiniband/hw/hfi1/ipoib_tx.c:841:52-841:65: void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
-
drivers/infiniband/hw/irdma/uk.c:1508:24-1508:30: void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:371:51-371:77: static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:377:6-377:32: struct ocrdma_queue_info *q, u16 len, u16 entry_size)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:403:11-403:37: struct ocrdma_queue_info *q, int queue_type)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1551:32-1551:59: static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1566:30-1566:57: static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1571:39-1571:66: static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1577:33-1577:60: static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1582:33-1582:60: static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/qedr/verbs.c:744:28-744:47: struct qedr_dev *dev, struct qedr_userq *q,
-
drivers/infiniband/hw/qedr/verbs.c:792:12-792:31: struct qedr_userq *q, u64 buf_addr,
-
drivers/infiniband/sw/rxe/rxe_queue.c:46:29-46:47: inline void rxe_queue_reset(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.c:110:26-110:44: static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
-
drivers/infiniband/sw/rxe/rxe_queue.c:147:22-147:40: int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
-
drivers/infiniband/sw/rxe/rxe_queue.c:193:24-193:42: void rxe_queue_cleanup(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:95:36-95:54: static inline u32 queue_next_index(struct rxe_queue *q, int index)
-
drivers/infiniband/sw/rxe/rxe_queue.h:100:38-100:62: static inline u32 queue_get_producer(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:127:38-127:62: static inline u32 queue_get_consumer(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:154:31-154:49: static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
-
drivers/infiniband/sw/rxe/rxe_queue.h:162:30-162:48: static inline int queue_full(struct rxe_queue *q, enum queue_type type)
-
drivers/infiniband/sw/rxe/rxe_queue.h:170:31-170:55: static inline u32 queue_count(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:179:43-179:61: static inline void queue_advance_producer(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:215:43-215:61: static inline void queue_advance_consumer(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:250:41-250:59: static inline void *queue_producer_addr(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:258:41-258:59: static inline void *queue_consumer_addr(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:266:43-266:61: static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
-
drivers/infiniband/sw/rxe/rxe_queue.h:272:41-272:65: static inline u32 queue_index_from_addr(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:279:32-279:50: static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
-
drivers/input/misc/hisi_powerkey.c:29:52-29:58: static irqreturn_t hi65xx_power_press_isr(int irq, void *q)
-
drivers/input/misc/hisi_powerkey.c:40:54-40:60: static irqreturn_t hi65xx_power_release_isr(int irq, void *q)
-
drivers/input/misc/hisi_powerkey.c:51:55-51:61: static irqreturn_t hi65xx_restart_toggle_isr(int irq, void *q)
-
drivers/input/rmi4/rmi_f54.c:283:32-283:50: static int rmi_f54_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
-
drivers/input/rmi4/rmi_f54.c:363:36-363:54: static void rmi_f54_stop_streaming(struct vb2_queue *q)
-
drivers/input/touchscreen/atmel_mxt_ts.c:2425:28-2425:46: static int mxt_queue_setup(struct vb2_queue *q,
-
drivers/input/touchscreen/sur40.c:845:30-845:48: static int sur40_queue_setup(struct vb2_queue *q,
-
drivers/md/dm-cache-policy-smq.c:270:20-270:34: static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
-
drivers/md/dm-cache-policy-smq.c:288:28-288:42: static unsigned int q_size(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:296:20-296:34: static void q_push(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:306:26-306:40: static void q_push_front(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:316:27-316:41: static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:326:19-326:33: static void q_del(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:336:29-336:43: static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
-
drivers/md/dm-cache-policy-smq.c:358:28-358:42: static struct entry *q_pop(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:373:40-373:54: static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
-
drivers/md/dm-cache-policy-smq.c:387:37-387:51: static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
-
drivers/md/dm-cache-policy-smq.c:407:27-407:41: static void q_set_targets(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:429:28-429:42: static void q_redistribute(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:472:23-472:37: static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
-
drivers/md/dm-rq.c:64:21-64:43: void dm_start_queue(struct request_queue *q)
-
drivers/md/dm-rq.c:70:20-70:42: void dm_stop_queue(struct request_queue *q)
-
drivers/md/dm-rq.c:171:39-171:61: static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
-
drivers/md/dm-table.c:1350:38-1350:60: static void dm_update_crypto_profile(struct request_queue *q,
-
drivers/md/dm-table.c:1928:51-1928:73: int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
-
drivers/md/dm-zone.c:290:51-290:73: int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
-
drivers/md/dm.c:1965:45-1965:67: static void dm_queue_destroy_crypto_profile(struct request_queue *q)
-
drivers/media/common/saa7146/saa7146_fops.c:49:5-49:30: struct saa7146_dmaqueue *q,
-
drivers/media/common/saa7146/saa7146_fops.c:71:7-71:32: struct saa7146_dmaqueue *q,
-
drivers/media/common/saa7146/saa7146_fops.c:102:5-102:30: struct saa7146_dmaqueue *q, int vbi)
-
drivers/media/common/saa7146/saa7146_vbi.c:220:24-220:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/common/saa7146/saa7146_vbi.c:290:28-290:46: static void return_buffers(struct vb2_queue *q, int state)
-
drivers/media/common/saa7146/saa7146_vbi.c:380:28-380:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/common/saa7146/saa7146_vbi.c:393:28-393:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/common/saa7146/saa7146_video.c:556:24-556:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/common/saa7146/saa7146_video.c:635:28-635:46: static void return_buffers(struct vb2_queue *q, int state)
-
drivers/media/common/saa7146/saa7146_video.c:653:28-653:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/common/saa7146/saa7146_video.c:666:28-666:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:379:37-379:55: static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
-
drivers/media/common/videobuf2/videobuf2-core.c:407:30-407:48: static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:479:28-479:46: static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
-
drivers/media/common/videobuf2/videobuf2-core.c:505:30-505:48: static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
-
drivers/media/common/videobuf2/videobuf2-core.c:605:24-605:42: bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
-
drivers/media/common/videobuf2/videobuf2-core.c:627:30-627:48: static bool __buffers_in_use(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:637:24-637:42: void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
-
drivers/media/common/videobuf2/videobuf2-core.c:647:33-647:51: static int __verify_userptr_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:660:30-660:48: static int __verify_mmap_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:673:32-673:50: static int __verify_dmabuf_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:683:28-683:46: int vb2_verify_memory_type(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-core.c:729:33-729:51: static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
-
drivers/media/common/videobuf2/videobuf2-core.c:738:36-738:54: static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
-
drivers/media/common/videobuf2/videobuf2-core.c:747:22-747:40: int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:905:26-905:44: int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:1100:23-1100:41: void vb2_discard_done(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1545:26-1545:44: int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
-
drivers/media/common/videobuf2/videobuf2-core.c:1585:32-1585:50: static int vb2_start_streaming(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1637:19-1637:37: int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
-
drivers/media/common/videobuf2/videobuf2-core.c:1797:35-1797:53: static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
-
drivers/media/common/videobuf2/videobuf2-core.c:1878:30-1878:48: static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
-
drivers/media/common/videobuf2/videobuf2-core.c:1911:30-1911:48: int vb2_wait_for_all_buffers(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1940:20-1940:38: int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
-
drivers/media/common/videobuf2/videobuf2-core.c:2004:32-2004:50: static void __vb2_queue_cancel(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2104:23-2104:41: int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-core.c:2154:22-2154:40: void vb2_queue_error(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2162:24-2162:42: int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-core.c:2190:35-2190:53: static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
-
drivers/media/common/videobuf2/videobuf2-core.c:2232:21-2232:39: int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
-
drivers/media/common/videobuf2/videobuf2-core.c:2305:14-2305:32: int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
-
drivers/media/common/videobuf2/videobuf2-core.c:2412:25-2412:43: int vb2_core_queue_init(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2466:29-2466:47: void vb2_core_queue_release(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2476:24-2476:42: __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
-
drivers/media/common/videobuf2/videobuf2-core.c:2625:30-2625:48: static int __vb2_init_fileio(struct vb2_queue *q, int read)
-
drivers/media/common/videobuf2/videobuf2-core.c:2743:33-2743:51: static int __vb2_cleanup_fileio(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2767:36-2767:54: static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:2928:17-2928:35: size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:2935:18-2935:36: size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:3014:22-3014:40: int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
-
drivers/media/common/videobuf2/videobuf2-core.c:3054:21-3054:39: int vb2_thread_stop(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:344:36-344:54: static void set_buffer_cache_hints(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:366:37-366:55: static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:628:36-628:54: struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:653:18-653:36: int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:675:27-675:45: static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:694:35-694:53: static void validate_memory_flags(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:710:17-710:35: int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:723:21-723:39: int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:742:21-742:39: int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:802:14-802:32: int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:823:15-823:33: int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:854:18-854:36: int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:864:19-864:37: int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:874:16-874:34: int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:881:25-881:43: int vb2_queue_init_name(struct vb2_queue *q, const char *name)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:926:20-926:38: int vb2_queue_init(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:932:24-932:42: void vb2_queue_release(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:938:27-938:45: int vb2_queue_change_type(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:952:19-952:37: __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
-
drivers/media/pci/bt8xx/bttv-driver.c:1490:32-1490:55: static int bttv_prepare_buffer(struct videobuf_queue *q,struct bttv *btv,
-
drivers/media/pci/bt8xx/bttv-driver.c:1591:14-1591:37: buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-
drivers/media/pci/bt8xx/bttv-driver.c:1604:16-1604:39: buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
-
drivers/media/pci/bt8xx/bttv-driver.c:1615:14-1615:37: buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/bt8xx/bttv-driver.c:1629:28-1629:51: static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/bt8xx/bttv-risc.c:482:15-482:38: bttv_dma_free(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf)
-
drivers/media/pci/bt8xx/bttv-vbi.c:70:29-70:52: static int vbi_buffer_setup(struct videobuf_queue *q,
-
drivers/media/pci/bt8xx/bttv-vbi.c:91:31-91:54: static int vbi_buffer_prepare(struct videobuf_queue *q,
-
drivers/media/pci/bt8xx/bttv-vbi.c:199:18-199:41: vbi_buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/bt8xx/bttv-vbi.c:214:32-214:55: static void vbi_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/cobalt/cobalt-v4l2.c:34:31-34:49: static int cobalt_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cobalt/cobalt-v4l2.c:279:35-279:53: static int cobalt_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cobalt/cobalt-v4l2.c:388:35-388:53: static void cobalt_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx18/cx18-queue.c:36:22-36:41: void cx18_queue_init(struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.c:44:6-44:25: struct cx18_queue *q, int to_front)
-
drivers/media/pci/cx18/cx18-queue.c:73:54-73:73: struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.h:60:5-60:24: struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.h:67:9-67:28: struct cx18_queue *q)
-
drivers/media/pci/cx23885/cx23885-417.c:1123:24-1123:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-417.c:1167:36-1167:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-417.c:1194:36-1194:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-core.c:425:7-425:32: struct cx23885_dmaqueue *q, u32 count)
-
drivers/media/pci/cx23885/cx23885-core.c:1398:9-1398:34: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-dvb.c:88:24-88:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-dvb.c:150:36-150:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-dvb.c:161:36-161:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-vbi.c:87:5-87:30: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-vbi.c:114:24-114:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-vbi.c:217:36-217:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-vbi.c:228:36-228:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-video.c:89:2-89:27: struct cx23885_dmaqueue *q, u32 count)
-
drivers/media/pci/cx23885/cx23885-video.c:305:7-305:32: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-video.c:332:24-332:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-video.c:488:36-488:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-video.c:499:36-499:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx25821/cx25821-video.c:59:8-59:33: struct cx25821_dmaqueue *q,
-
drivers/media/pci/cx25821/cx25821-video.c:127:32-127:50: static int cx25821_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx25821/cx25821-video.c:261:36-261:54: static int cx25821_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx25821/cx25821-video.c:274:36-274:54: static void cx25821_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-blackbird.c:658:24-658:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-blackbird.c:702:28-702:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-blackbird.c:752:28-752:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-core.c:521:4-521:26: struct cx88_dmaqueue *q, u32 count)
-
drivers/media/pci/cx88/cx88-dvb.c:75:24-75:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-dvb.c:120:28-120:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-dvb.c:131:28-131:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-mpeg.c:73:8-73:30: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-mpeg.c:199:5-199:27: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-mpeg.c:216:24-216:42: int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
-
drivers/media/pci/cx88/cx88-vbi.c:52:5-52:27: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-vbi.c:99:9-99:31: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-vbi.c:115:24-115:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-vbi.c:194:28-194:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-vbi.c:205:28-205:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-video.c:350:7-350:29: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-video.c:405:12-405:34: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-video.c:420:24-420:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-video.c:529:28-529:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-video.c:540:28-540:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/dt3155/dt3155.c:148:35-148:53: static int dt3155_start_streaming(struct vb2_queue *q, unsigned count)
-
drivers/media/pci/dt3155/dt3155.c:176:35-176:53: static void dt3155_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:227:53-227:72: static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:239:28-239:47: static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:304:60-304:79: static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:345:51-345:70: static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:507:52-507:71: static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:586:60-586:79: static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:779:41-779:60: static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1521:54-1521:73: static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1654:55-1654:74: static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1937:59-1937:78: static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:32:22-32:41: void ivtv_queue_init(struct ivtv_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:40:67-40:86: void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:59:57-59:76: struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
-
drivers/media/pci/netup_unidvb/netup_unidvb_core.c:335:41-335:59: static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/netup_unidvb/netup_unidvb_core.c:344:41-344:59: static void netup_unidvb_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:261:5-261:30: struct saa7134_dmaqueue *q,
-
drivers/media/pci/saa7134/saa7134-core.c:289:7-289:32: struct saa7134_dmaqueue *q,
-
drivers/media/pci/saa7134/saa7134-core.c:302:5-302:30: struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:352:54-352:79: void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:1348:8-1348:33: struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-ts.c:106:28-106:46: int saa7134_ts_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/saa7134/saa7134-vbi.c:128:24-128:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/saa7134/saa7134-video.c:750:24-750:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:655:33-655:51: static int solo_enc_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:708:37-708:55: static int solo_enc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:715:37-715:55: static void solo_enc_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:307:29-307:47: static int solo_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:322:33-322:51: static int solo_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:330:33-330:51: static void solo_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/tw5864/tw5864-video.c:182:31-182:49: static int tw5864_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
-
drivers/media/pci/tw5864/tw5864-video.c:427:35-427:53: static int tw5864_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/tw5864/tw5864-video.c:446:35-446:53: static void tw5864_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/tw68/tw68-video.c:358:29-358:47: static int tw68_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/tw68/tw68-video.c:493:33-493:51: static int tw68_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/tw68/tw68-video.c:504:33-504:51: static void tw68_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/allegro-dvt/allegro-core.c:2831:36-2831:54: static int allegro_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/allegro-dvt/allegro-core.c:2850:36-2850:54: static void allegro_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/amphion/vpu_v4l2.c:560:36-560:54: static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/amphion/vpu_v4l2.c:593:36-593:54: static void vpu_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/aspeed/aspeed-video.c:1794:37-1794:55: static int aspeed_video_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/aspeed/aspeed-video.c:1825:41-1825:59: static int aspeed_video_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/aspeed/aspeed-video.c:1847:41-1847:59: static void aspeed_video_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/chips-media/coda-common.c:1967:33-1967:51: static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/chips-media/coda-common.c:2111:33-2111:51: static void coda_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:673:33-673:51: static int mtk_jpeg_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:849:41-849:59: static void mtk_jpeg_enc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:858:41-858:59: static void mtk_jpeg_dec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c:389:40-389:58: static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c:411:40-411:58: static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c:117:36-117:54: static int mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c:175:36-175:54: static void mdp_m2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c:187:32-187:50: static int mdp_m2m_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c:830:33-830:51: int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c:840:33-840:51: void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c:878:40-878:58: static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c:958:40-958:58: static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/dw100/dw100.c:503:38-503:56: static void dw100_return_all_buffers(struct vb2_queue *q,
-
drivers/media/platform/nxp/dw100/dw100.c:520:34-520:52: static int dw100_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/nxp/dw100/dw100.c:544:34-544:52: static void dw100_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1578:33-1578:51: static int mxc_jpeg_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1611:37-1611:55: static int mxc_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1633:37-1633:55: static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1737:35-1737:59: static void mxc_jpeg_bytesperline(struct mxc_jpeg_q_data *q, u32 precision)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1764:32-1764:56: static void mxc_jpeg_sizeimage(struct mxc_jpeg_q_data *q)
-
drivers/media/platform/nxp/imx-pxp.c:1576:32-1576:50: static int pxp_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/nxp/imx-pxp.c:1585:32-1585:50: static void pxp_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c:191:40-191:58: static int mxc_isi_m2m_vb2_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c:238:44-238:62: static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c:250:44-250:62: static void mxc_isi_m2m_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c:881:36-881:54: static int mxc_isi_vb2_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c:937:40-937:58: static int mxc_isi_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c:975:40-975:58: static void mxc_isi_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/camss/camss-video.c:379:30-379:48: static int video_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/camss/camss-video.c:489:34-489:52: static int video_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/qcom/camss/camss-video.c:537:34-537:52: static void video_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/helpers.c:1542:38-1542:56: void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/vdec.c:884:29-884:47: static int vdec_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/venus/vdec.c:1141:33-1141:51: static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/qcom/venus/vdec.c:1238:33-1238:51: static void vdec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/venc.c:1052:29-1052:47: static int venc_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/venus/venc.c:1219:33-1219:51: static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/renesas/rcar_fdp1.c:1921:33-1921:51: static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/renesas/rcar_fdp1.c:1960:33-1960:51: static void fdp1_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/rockchip/rga/rga-buf.c:59:36-59:54: static void rga_buf_return_buffers(struct vb2_queue *q,
-
drivers/media/platform/rockchip/rga/rga-buf.c:76:36-76:54: static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/rockchip/rga/rga-buf.c:91:36-91:54: static void rga_buf_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c:1881:41-1881:59: static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
-
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c:159:29-159:47: rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
-
drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c:56:36-56:54: static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c:78:36-78:54: static void gsc_m2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-capture.c:259:28-259:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos4-is/fimc-capture.c:290:28-290:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c:76:46-76:64: static int isp_video_capture_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c:119:46-119:64: static void isp_video_capture_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-lite.c:305:28-305:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos4-is/fimc-lite.c:339:28-339:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-m2m.c:73:28-73:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos4-is/fimc-m2m.c:80:28-80:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c:2564:37-2564:55: static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c:2571:37-2571:55: static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c:1027:36-1027:54: static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c:1043:36-1043:54: static void s5p_mfc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c:2502:36-2502:54: static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c:2532:36-2532:54: static void s5p_mfc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c:498:34-498:52: static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c:521:34-521:52: static void bdisp_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1303:41-1303:59: static int delta_vb2_au_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1397:41-1397:59: static void delta_vb2_au_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1527:44-1527:62: static void delta_vb2_frame_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/stm32/dma2d/dma2d.c:157:34-157:52: static int dma2d_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/st/stm32/dma2d/dma2d.c:166:34-166:52: static void dma2d_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/ti/vpe/vpe.c:2124:58-2124:76: static void vpe_return_all_buffers(struct vpe_ctx *ctx, struct vb2_queue *q,
-
drivers/media/platform/ti/vpe/vpe.c:2177:32-2177:50: static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/ti/vpe/vpe.c:2199:32-2199:50: static void vpe_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/verisilicon/hantro_v4l2.c:862:32-862:50: static bool hantro_vq_is_coded(struct vb2_queue *q)
-
drivers/media/platform/verisilicon/hantro_v4l2.c:869:35-869:53: static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/verisilicon/hantro_v4l2.c:912:20-912:38: hantro_return_bufs(struct vb2_queue *q,
-
drivers/media/platform/verisilicon/hantro_v4l2.c:929:35-929:53: static void hantro_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1509:33-1509:51: static void vicodec_return_bufs(struct vb2_queue *q, u32 state)
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1548:36-1548:54: static int vicodec_start_streaming(struct vb2_queue *q,
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1636:36-1636:54: static void vicodec_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/vim2m.c:1054:34-1054:52: static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/test-drivers/vim2m.c:1069:34-1069:52: static void vim2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/visl/visl-trace-mpeg2.h:88:1-88:1: DEFINE_EVENT(v4l2_ctrl_mpeg2_quant_tmpl, v4l2_ctrl_mpeg2_quantisation,
-
drivers/media/test-drivers/vivid/vivid-core.c:862:10-862:28: struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:39:39-39:57: static int cxusb_medion_v_queue_setup(struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:772:43-772:61: static int cxusb_medion_v_start_streaming(struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:898:43-898:61: static void cxusb_medion_v_stop_streaming(struct vb2_queue *q)
-
drivers/media/usb/go7007/go7007-fw.c:290:70-290:74: static int mjpeg_frame_header(struct go7007 *go, unsigned char *buf, int q)
-
drivers/media/usb/go7007/go7007-v4l2.c:343:31-343:49: static int go7007_queue_setup(struct vb2_queue *q,
-
drivers/media/usb/go7007/go7007-v4l2.c:397:35-397:53: static int go7007_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/usb/go7007/go7007-v4l2.c:425:35-425:53: static void go7007_stop_streaming(struct vb2_queue *q)
-
drivers/media/usb/gspca/topro.c:1439:50-1439:53: static void set_dqt(struct gspca_dev *gspca_dev, u8 q)
-
drivers/media/usb/gspca/topro.c:1456:53-1456:57: static void setquality(struct gspca_dev *gspca_dev, s32 q)
-
drivers/media/usb/hdpvr/hdpvr-video.c:97:29-97:47: static int hdpvr_free_queue(struct list_head *q)
-
drivers/media/v4l2-core/v4l2-mc.c:302:34-302:52: int v4l_vb2q_enable_media_source(struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:689:9-689:27: struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:702:8-702:26: struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:730:7-730:25: struct vb2_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:55:43-55:66: struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:76:44-76:67: static int state_neither_active_nor_queued(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:88:21-88:44: int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
-
drivers/media/v4l2-core/videobuf-core.c:121:21-121:44: int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
-
drivers/media/v4l2-core/videobuf-core.c:131:31-131:54: void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:143:31-143:54: void videobuf_queue_core_init(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:185:28-185:51: int videobuf_queue_is_busy(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:230:28-230:51: static int __videobuf_free(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:263:28-263:51: void videobuf_queue_cancel(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:298:37-298:60: enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:318:29-318:52: static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
-
drivers/media/v4l2-core/videobuf-core.c:373:24-373:47: int videobuf_mmap_free(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:384:27-384:50: int __videobuf_mmap_setup(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:428:25-428:48: int videobuf_mmap_setup(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:440:22-440:45: int videobuf_reqbufs(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:501:23-501:46: int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-
drivers/media/v4l2-core/videobuf-core.c:528:19-528:42: int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-
drivers/media/v4l2-core/videobuf-core.c:632:43-632:66: static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
-
drivers/media/v4l2-core/videobuf-core.c:675:31-675:54: static int stream_next_buffer(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:695:20-695:43: int videobuf_dqbuf(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:735:23-735:46: int videobuf_streamon(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:763:33-763:56: static int __videobuf_streamoff(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:773:24-773:47: int videobuf_streamoff(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:786:39-786:62: static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:831:36-831:59: static int __videobuf_copy_to_user(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:848:35-848:58: static int __videobuf_copy_stream(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:874:27-874:50: ssize_t videobuf_read_one(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:961:34-961:57: static int __videobuf_read_start(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:996:34-996:57: static void __videobuf_read_stop(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1012:25-1012:48: int videobuf_read_start(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1024:25-1024:48: void videobuf_read_stop(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1032:20-1032:43: void videobuf_stop(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1046:30-1046:53: ssize_t videobuf_read_stream(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:1120:10-1120:33: struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:1172:26-1172:49: int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
-
drivers/media/v4l2-core/videobuf-dma-sg.c:494:30-494:53: static int __videobuf_iolock(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:569:28-569:51: static int __videobuf_sync(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:584:35-584:58: static int __videobuf_mmap_mapper(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:667:29-667:52: void videobuf_queue_sg_init(struct videobuf_queue *q,
-
drivers/misc/uacce/uacce.c:18:34-18:54: static bool uacce_queue_is_valid(struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:23:30-23:50: static int uacce_start_queue(struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:40:28-40:48: static int uacce_put_queue(struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:103:57-103:77: static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:126:32-126:52: static void uacce_unbind_queue(struct uacce_queue *q)
-
drivers/misc/vmw_vmci/vmci_queue_pair.c:248:27-248:33: static void qp_free_queue(void *q, u64 size)
-
drivers/mmc/core/crypto.c:22:29-22:51: void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
-
drivers/mmc/core/queue.c:177:37-177:59: static void mmc_queue_setup_discard(struct request_queue *q,
-
drivers/net/ethernet/amd/pds_core/core.c:164:24-164:43: static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
-
drivers/net/ethernet/asix/ax88796c_main.c:244:44-244:65: ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:168:7-168:30: struct bnx2x_vf_queue *q,
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:1415:7-1415:30: struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:385:45-385:68: static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:390:54-390:77: static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:398:55-398:78: static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c:542:8-542:31: struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/brocade/bna/bna.h:238:44-238:62: static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:499:55-499:70: static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:612:48-612:61: static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:821:47-821:62: static void refill_free_list(struct sge *sge, struct freelQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1167:12-1167:25: struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1203:7-1203:20: struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1290:58-1290:71: static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1399:40-1399:59: static inline int enough_free_Tx_descs(const struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:169:45-169:68: static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:174:44-174:66: static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:189:11-189:34: const struct sge_rspq *q, unsigned int credits)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:233:51-233:67: static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:282:51-282:67: static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:322:7-322:23: struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:342:37-342:59: static inline int should_restart_tx(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:349:49-349:70: static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:376:48-376:63: static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:438:52-438:67: static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:481:53-481:68: static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:501:44-501:59: static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:574:50-574:65: static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:643:27-643:44: static void t3_reset_qset(struct sge_qset *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:672:51-672:68: static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:840:10-840:27: struct sge_rspq *q, unsigned int len,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1045:59-1045:75: static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1089:9-1089:31: const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1179:8-1179:24: struct sge_txq *q, unsigned int ndesc,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1246:30-1246:46: struct sge_qset *qs, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1418:58-1418:74: static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1450:45-1450:61: static inline void reclaim_completed_tx_imm(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1473:44-1473:60: static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1630:6-1630:22: struct sge_txq *q, unsigned int pidx,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1694:44-1694:60: static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1853:36-1853:53: static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1876:8-1876:25: struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2288:7-2288:30: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2293:40-2293:64: static inline void clear_rspq_bufstate(struct sge_rspq * const q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2584:58-2584:75: static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h:1597:11-1597:34: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h:1754:52-1754:69: static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:555:27-555:44: static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:913:23-913:40: void cxgb4_quiesce_rx(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:954:44-954:61: void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:1240:32-1240:49: int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2424:28-2424:44: static void disable_txq_db(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2433:49-2433:65: static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2519:49-2519:65: static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:57:33-57:50: static void uldrx_flush_handler(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:74:26-74:43: static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:203:9-203:30: struct sge_ofld_rxq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:208:38-208:60: static inline unsigned int txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:313:41-313:57: void free_tx_desc(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:341:31-341:53: static inline int reclaimable(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:359:62-359:78: static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:391:55-391:71: void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:438:48-438:63: static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:466:48-466:63: static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:479:53-479:68: static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:535:53-535:68: static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:837:49-837:65: void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:906:57-906:73: void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1025:52-1025:68: inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1106:5-1106:27: const struct sge_txq *q, void *pos)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1131:7-1131:29: const struct sge_txq *q, void *pos,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1232:26-1232:46: static void eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1238:32-1238:48: static inline void txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2108:45-2108:61: static inline void reclaim_completed_tx_imm(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2653:30-2653:51: static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2749:22-2749:43: static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2907:29-2907:49: static void txq_stop_maperr(struct sge_uld_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2923:26-2923:46: static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2948:27-2948:47: static void service_ofldq(struct sge_uld_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3069:22-3069:42: static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3189:10-3189:32: const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3220:29-3220:49: static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3675:22-3675:39: int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3829:54-3829:69: static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3855:8-3855:31: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3866:30-3866:47: static inline void rspq_next(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3889:30-3889:47: static int process_responses(struct sge_rspq *q, int budget)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4069:30-4069:47: int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4566:44-4566:60: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4753:56-4753:72: static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4852:37-4852:53: void free_txq(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4899:53-4899:74: void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:688:31-688:53: static unsigned int txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:693:26-693:46: static void eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:699:25-699:41: static void txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:58:55-58:77: static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:81:43-81:65: static inline unsigned int chcr_txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:86:37-86:53: static inline void chcr_txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:94:38-94:58: static inline void chcr_eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:747:6-747:26: struct sge_eth_txq *q, u64 mask,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:802:6-802:26: struct sge_eth_txq *q, u32 tid,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:843:8-843:28: struct sge_eth_txq *q, u64 tcp_seq,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:993:8-993:28: struct sge_eth_txq *q, uint32_t tx_chan)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1102:11-1102:31: struct sge_eth_txq *q, u32 tcp_seq,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1276:8-1276:28: struct sge_eth_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1463:21-1463:41: bool tcp_push, struct sge_eth_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1571:5-1571:25: struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1699:6-1699:26: struct sge_eth_txq *q, u32 skb_offset,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1777:10-1777:30: struct sge_eth_txq *q, u32 tls_end_offset)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1896:6-1896:26: struct sge_eth_txq *q)
-
drivers/net/ethernet/emulex/benet/be.h:150:37-150:59: static inline void *queue_head_node(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:155:37-155:59: static inline void *queue_tail_node(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:160:38-160:60: static inline void *queue_index_node(struct be_queue_info *q, u16 index)
-
drivers/net/ethernet/emulex/benet/be.h:165:35-165:57: static inline void queue_head_inc(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:175:35-175:57: static inline void queue_tail_inc(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_cmds.c:1454:50-1454:72: int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
-
drivers/net/ethernet/emulex/benet/be_cmds.c:1505:52-1505:74: int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_main.c:144:55-144:77: static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_main.c:155:55-155:77: static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:50:25-50:44: static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:67:23-67:42: static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:98:30-98:49: static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:127:30-127:49: static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:141:26-141:45: static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:256:9-256:28: get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:296:27-296:46: static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:349:24-349:43: static void advance_cq(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:366:32-366:51: static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:480:29-480:48: static int fun_process_cqes(struct funeth_rxq *q, int budget)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:537:31-537:50: static void fun_rxq_free_bufs(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:550:31-550:50: static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:576:32-576:51: static void fun_rxq_free_cache(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:588:21-588:40: int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:678:29-678:48: static void fun_rxq_free_sw(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:698:24-698:43: int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:766:30-766:49: static void fun_rxq_free_dev(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:818:36-818:55: struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:56:22-56:47: static void *txq_end(const struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:64:32-64:57: static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:78:43-78:68: static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:107:56-107:75: static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:149:57-149:76: static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:311:35-311:60: static unsigned int fun_txq_avail(const struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:317:31-317:50: static void fun_tx_check_stop(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:338:33-338:52: static bool fun_txq_may_restart(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:386:24-386:49: static u16 txq_hw_head(const struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:394:35-394:60: static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:424:29-424:48: static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:485:36-485:55: static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:515:17-515:36: bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:602:27-602:46: static void fun_txq_purge(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:613:28-613:47: static void fun_xdpq_purge(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:666:29-666:48: static void fun_txq_free_sw(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:681:24-681:43: int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:732:30-732:49: static void fun_txq_free_dev(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:790:36-790:55: struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
-
drivers/net/ethernet/fungible/funeth/funeth_txrx.h:228:38-228:63: static inline void *fun_tx_desc_addr(const struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_txrx.h:234:34-234:59: static inline void fun_txq_wr_db(const struct funeth_txq *q)
-
drivers/net/ethernet/hisilicon/hns/hnae.c:193:16-193:35: hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
-
drivers/net/ethernet/hisilicon/hns/hnae.c:237:51-237:70: static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
-
drivers/net/ethernet/hisilicon/hns/hnae.c:264:29-264:48: static void hnae_fini_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:62:50-62:69: static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:193:31-193:50: static void hns_ae_init_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:201:31-201:50: static void hns_ae_fini_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:92:28-92:47: void hns_rcb_reset_ring_hw(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:142:26-142:45: void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:159:25-159:44: void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:172:28-172:47: void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:183:27-183:46: void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:197:29-197:48: void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:202:20-202:39: void hns_rcb_start(struct hnae_queue *q, u32 val)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:222:29-222:48: void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:234:29-234:48: void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:441:34-441:53: static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:4857:31-4857:51: static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:329:12-329:37: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:380:12-380:37: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:436:51-436:76: void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:456:30-456:55: void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
-
drivers/net/ethernet/intel/fm10k/fm10k_pf.c:1134:11-1134:36: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/i40e/i40e_trace.h:60:1-60:1: TRACE_EVENT(i40e_napi_poll,
-
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c:124:23-124:49: octep_write_mbox_data(struct octep_ctrl_mbox_q *q, u32 *pi, u32 ci, void *buf, u32 w_sz)
-
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c:189:22-189:48: octep_read_mbox_data(struct octep_ctrl_mbox_q *q, u32 pi, u32 *ci, void *buf, u32 r_sz)
-
drivers/net/ethernet/marvell/octeontx2/af/common.h:47:50-47:64: static inline int qmem_alloc(struct device *dev, struct qmem **q,
-
drivers/net/ethernet/marvell/skge.c:2486:45-2486:49: static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
-
drivers/net/ethernet/marvell/skge.c:2517:47-2517:51: static void skge_qset(struct skge_port *skge, u16 q,
-
drivers/net/ethernet/marvell/sky2.c:1036:45-1036:49: static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
-
drivers/net/ethernet/marvell/sky2.c:1076:43-1076:47: static void sky2_qset(struct sky2_hw *hw, u16 q)
-
drivers/net/ethernet/marvell/sky2.c:1125:53-1125:62: static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
-
drivers/net/ethernet/marvell/sky2.c:2916:62-2916:66: static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:92:46-92:71: mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:100:43-100:68: mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:135:48-135:73: mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:186:48-186:73: mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:257:47-257:72: mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:279:46-279:71: mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:287:50-287:75: mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:310:50-310:75: mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:332:47-332:72: mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:339:52-339:77: int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:133:46-133:70: static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:138:41-138:65: static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:145:31-145:55: mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:151:40-151:64: mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:161:40-161:64: mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:168:39-168:63: static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:173:37-173:61: static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:238:9-238:33: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:248:13-248:37: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:258:10-258:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:265:10-265:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:274:9-274:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:280:46-280:70: static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:287:10-287:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:320:11-320:35: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:393:10-393:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:444:11-444:35: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:457:7-457:31: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:468:9-468:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:505:10-505:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:533:10-533:34: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:620:10-620:34: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:691:38-691:62: static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:746:36-746:66: static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:752:34-752:64: static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:759:9-759:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:789:10-789:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:804:38-804:62: static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:923:5-923:29: struct mlxsw_pci_queue *q, u8 q_num)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:984:6-984:30: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/netronome/nfp/flower/cmsg.h:685:15-685:18: u8 vnic, u8 q)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:823:39-823:51: static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:834:39-834:51: static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:839:33-839:45: static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:863:39-863:51: static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:874:39-874:51: static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:664:41-664:61: void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:697:4-697:24: struct ionic_queue *q, unsigned int index, const char *name,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:725:18-725:38: void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:737:22-737:42: void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:749:21-749:41: void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:761:19-761:39: void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:790:31-790:51: static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:801:22-801:42: void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.h:301:48-301:68: static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.h:313:38-313:58: static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:271:29-271:49: static void ionic_adminq_cb(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:292:33-292:53: bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:13:35-13:55: static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:19:35-19:55: static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:25:30-25:50: bool ionic_txq_poke_doorbell(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:57:30-57:50: bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:86:45-86:65: static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:91:32-91:52: static int ionic_rx_page_alloc(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:132:32-132:52: static void ionic_rx_page_free(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:152:34-152:54: static bool ionic_rx_buf_recycle(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:175:39-175:59: static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:235:43-235:63: static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:276:28-276:48: static void ionic_rx_clean(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:405:41-405:61: static inline void ionic_write_cmb_desc(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:413:20-413:40: void ionic_rx_fill(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:506:21-506:41: void ionic_rx_empty(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:682:39-682:59: static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:699:37-699:57: static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:716:29-716:49: static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:765:38-765:58: static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:785:28-785:48: static void ionic_tx_clean(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:887:21-887:41: void ionic_tx_empty(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:956:31-956:51: static void ionic_tx_tso_post(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:993:25-993:45: static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1119:32-1119:52: static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1158:35-1158:55: static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1194:32-1194:52: static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1211:21-1211:41: static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1239:34-1239:54: static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1265:32-1265:52: static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
-
drivers/net/ethernet/renesas/ravb_main.c:194:50-194:54: static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
-
drivers/net/ethernet/renesas/ravb_main.c:237:62-237:66: static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:262:61-262:65: static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:289:53-289:57: static void ravb_ring_free(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:328:64-328:68: static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:359:63-359:67: static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:390:55-390:59: static void ravb_ring_format(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:432:64-432:68: static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:445:63-445:67: static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:459:52-459:56: static int ravb_ring_init(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:758:64-758:68: static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:887:63-887:67: static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:1002:58-1002:62: static bool ravb_rx(struct net_device *ndev, int *quota, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:1120:59-1120:63: static bool ravb_queue_interrupt(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:1251:62-1251:66: static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
-
drivers/net/ethernet/sfc/ptp.c:869:38-869:59: static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
-
drivers/net/ethernet/sfc/ptp.c:1294:57-1294:78: static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
-
drivers/net/ethernet/sfc/siena/ptp.c:835:38-835:59: static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
-
drivers/net/ethernet/sfc/siena/ptp.c:1226:57-1226:78: static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
-
drivers/net/ethernet/via/via-velocity.c:1759:9-1759:13: int q, int n)
-
drivers/net/hyperv/netvsc_trace.h:65:1-65:1: DEFINE_EVENT(rndis_msg_class, rndis_send,
-
drivers/net/hyperv/netvsc_trace.h:71:1-71:1: DEFINE_EVENT(rndis_msg_class, rndis_recv,
-
drivers/net/tap.c:33:48-33:66: static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
-
drivers/net/tap.c:39:29-39:47: static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
-
drivers/net/tap.c:49:29-49:47: static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
-
drivers/net/tap.c:80:41-80:59: static inline bool tap_is_little_endian(struct tap_queue *q)
-
drivers/net/tap.c:86:32-86:50: static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
-
drivers/net/tap.c:91:39-91:57: static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
-
drivers/net/tap.c:145:8-145:26: struct tap_queue *q)
-
drivers/net/tap.c:166:5-166:23: struct tap_queue *q)
-
drivers/net/tap.c:187:30-187:48: static int tap_disable_queue(struct tap_queue *q)
-
drivers/net/tap.c:222:27-222:45: static void tap_put_queue(struct tap_queue *q)
-
drivers/net/tap.c:633:29-633:47: static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
-
drivers/net/tap.c:786:29-786:47: static ssize_t tap_put_user(struct tap_queue *q,
-
drivers/net/tap.c:844:28-844:46: static ssize_t tap_do_read(struct tap_queue *q,
-
drivers/net/tap.c:910:40-910:58: static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
-
drivers/net/tap.c:948:24-948:42: static int set_offload(struct tap_queue *q, unsigned long arg)
-
drivers/net/tap.c:1166:29-1166:47: static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
-
drivers/net/usb/catc.c:572:48-572:67: static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
-
drivers/net/usb/lan78xx.c:2468:49-2468:70: static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
-
drivers/net/usb/usbnet.c:715:45-715:66: static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
-
drivers/net/usb/usbnet.c:774:34-774:55: static void wait_skb_queue_empty(struct sk_buff_head *q)
-
drivers/net/virtio_net.c:586:62-586:66: static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-
drivers/net/wireless/ath/ath5k/trace.h:39:1-39:1: TRACE_EVENT(ath5k_tx,
-
drivers/net/wireless/ath/ath5k/trace.h:65:1-65:1: TRACE_EVENT(ath5k_tx_complete,
-
drivers/net/wireless/ath/ath6kl/txrx.c:845:34-845:55: static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
-
drivers/net/wireless/ath/ath9k/mac.c:46:42-46:46: u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:52:43-52:47: void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
-
drivers/net/wireless/ath/ath9k/mac.c:58:42-58:46: void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:65:46-65:50: u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:170:49-170:53: bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:196:48-196:52: bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
-
drivers/net/wireless/ath/ath9k/mac.c:261:48-261:52: bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
-
drivers/net/wireless/ath/ath9k/mac.c:337:64-337:68: static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:346:49-346:53: bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:367:47-367:51: bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/broadcom/b43/pio.c:24:28-24:52: static u16 generate_cookie(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:178:39-178:63: static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:192:37-192:61: static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:201:37-201:61: static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:317:33-317:57: static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:370:33-370:57: static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:440:25-440:49: static int pio_tx_frame(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:596:26-596:50: static bool pio_rx_frame(struct b43_pio_rxqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:762:17-762:41: void b43_pio_rx(struct b43_pio_rxqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:777:38-777:62: static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:790:37-790:61: static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.h:109:36-109:60: static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:114:36-114:60: static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:119:38-119:62: static inline void b43_piotx_write16(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:125:38-125:62: static inline void b43_piotx_write32(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:132:36-132:60: static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:137:36-137:60: static inline u32 b43_piorx_read32(struct b43_pio_rxqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:142:38-142:62: static inline void b43_piorx_write16(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:148:38-148:62: static inline void b43_piorx_write32(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c:621:61-621:74: static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c:2754:33-2754:46: static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:402:8-402:26: struct list_head *q, int *counter)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:421:6-421:24: struct list_head *q, struct brcmf_usbreq *req,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:433:20-433:38: brcmf_usbdev_qinit(struct list_head *q, int qsize)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:467:30-467:48: static void brcmf_usb_free_q(struct list_head *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4337:9-4337:34: struct ipw2100_bd_queue *q, int entries)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4358:54-4358:79: static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4375:5-4375:30: struct ipw2100_bd_queue *q, u32 base, u32 size,
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3668:31-3668:58: static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3680:38-3680:63: static inline int ipw_tx_queue_space(const struct clx2_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3710:51-3710:70: static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3736:9-3736:31: struct clx2_tx_queue *q,
-
drivers/net/wireless/intel/iwlegacy/common.c:2535:19-2535:45: il_rx_queue_space(const struct il_rx_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2552:50-2552:70: il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2905:16-2905:39: il_queue_space(const struct il_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2927:35-2927:52: il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
-
drivers/net/wireless/intel/iwlegacy/common.h:848:15-848:38: il_queue_used(const struct il_queue *q, int i)
-
drivers/net/wireless/intel/iwlegacy/common.h:859:16-859:33: il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:475:59-475:63: static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.c:677:44-677:66: int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.c:899:27-899:43: static int iwl_queue_init(struct iwl_txq *q, int slots_num)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.h:22:41-22:63: static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.h:92:33-92:55: static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
-
drivers/net/wireless/mediatek/mt76/dma.c:184:41-184:60: mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:193:44-193:63: mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:210:43-210:62: mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:255:40-255:59: mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:317:47-317:66: mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
-
drivers/net/wireless/mediatek/mt76/dma.c:341:43-341:62: mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:348:43-348:62: mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
-
drivers/net/wireless/mediatek/mt76/dma.c:388:40-388:59: mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
-
drivers/net/wireless/mediatek/mt76/dma.c:442:40-442:59: mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
-
drivers/net/wireless/mediatek/mt76/dma.c:463:49-463:68: mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:493:45-493:64: mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:589:40-589:59: mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:633:46-633:65: int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
-
drivers/net/wireless/mediatek/mt76/dma.c:687:44-687:63: mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:726:43-726:62: mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:775:41-775:60: mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
-
drivers/net/wireless/mediatek/mt76/dma.c:802:43-802:62: mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:565:49-565:68: int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:759:57-759:74: static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:795:57-795:74: static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mac80211.c:818:36-818:53: void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:1361:50-1361:67: void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt76.h:1432:41-1432:60: static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/mt76.h:1454:24-1454:43: mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
-
drivers/net/wireless/mediatek/mt76/mt7603/core.c:6:53-6:70: void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7603/dma.c:71:49-71:66: void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7603/dma.c:111:46-111:65: mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/mt7615/mac.c:1638:49-1638:66: void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c:67:48-67:65: mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:108:48-108:67: mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:242:54-242:71: void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c:35:50-35:67: void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7915/dma.c:583:51-583:70: mt7915_dma_reset_tx_queue(struct mt7915_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/mt7915/mac.c:245:46-245:65: mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/mt7915/mac.c:273:6-273:23: enum mt76_rxq_id q, u32 *info)
-
drivers/net/wireless/mediatek/mt76/mt7915/mac.c:1121:49-1121:66: void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c:909:9-909:26: enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7921/mac.c:707:49-707:66: void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7921/pci.c:32:48-32:65: mt7921_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7996/mac.c:1532:49-1532:66: void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c:252:9-252:26: enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:367:25-367:44: mt76s_get_next_rx_entry(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:383:46-383:65: mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:429:57-429:76: static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:517:42-517:61: mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/sdio.c:548:46-548:65: mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/sdio.c:582:49-582:68: static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio_txrx.c:239:53-239:72: static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/tx.c:434:18-434:37: mt76_txq_stopped(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/tx.c:441:43-441:62: mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/tx.c:699:51-699:70: void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:321:40-321:59: mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
-
drivers/net/wireless/mediatek/mt76/usb.c:354:39-354:58: mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:391:42-391:61: mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:439:25-439:44: mt76u_get_next_rx_entry(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:603:46-603:65: mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:697:43-697:62: mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:854:42-854:61: mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:888:49-888:68: static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/dma.c:461:35-461:60: static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/dma.c:485:7-485:32: struct mt7601u_tx_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/tx.c:21:17-21:20: static u8 q2hwq(u8 q)
-
drivers/net/wireless/st/cw1200/debug.c:70:10-70:31: struct cw1200_queue *q)
-
drivers/net/wireless/ti/wlcore/tx.c:508:33-508:36: struct wl1271_link *lnk, u8 q)
-
drivers/nvme/host/apple.c:208:54-208:79: static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:216:44-216:69: static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:273:31-273:56: static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
-
drivers/nvme/host/apple.c:283:35-283:60: static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
-
drivers/nvme/host/apple.c:569:43-569:68: static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:577:49-577:74: apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:585:42-585:67: static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
-
drivers/nvme/host/apple.c:607:46-607:71: static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:619:32-619:57: static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
-
drivers/nvme/host/apple.c:642:34-642:59: static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
-
drivers/nvme/host/apple.c:968:35-968:60: static void apple_nvme_init_queue(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:1295:7-1295:32: struct apple_nvme_queue *q)
-
drivers/nvme/host/core.c:1033:28-1033:50: int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-
drivers/nvme/host/core.c:1065:26-1065:48: int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-
drivers/nvme/host/core.c:1817:3-1817:25: struct request_queue *q)
-
drivers/nvme/host/core.c:2698:6-2698:42: const struct nvme_core_quirk_entry *q)
-
drivers/nvme/host/ioctl.c:148:48-148:70: static struct request *nvme_alloc_user_request(struct request_queue *q,
-
drivers/nvme/host/ioctl.c:217:33-217:55: static int nvme_submit_user_cmd(struct request_queue *q,
-
drivers/nvme/target/fc.c:2118:22-2118:49: queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
-
drivers/pcmcia/cistpl.c:761:37-761:45: static int parse_strings(u_char *p, u_char *q, int max,
-
drivers/pcmcia/cistpl.c:906:39-906:47: static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr)
-
drivers/pcmcia/cistpl.c:943:40-943:48: static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing)
-
drivers/pcmcia/cistpl.c:978:36-978:44: static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
-
drivers/pcmcia/cistpl.c:1022:37-1022:45: static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
-
drivers/pcmcia/cistpl.c:1063:37-1063:45: static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
-
drivers/platform/chrome/wilco_ec/event.c:118:38-118:61: static inline bool event_queue_empty(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:124:37-124:60: static inline bool event_queue_full(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:130:41-130:64: static struct ec_event *event_queue_pop(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:148:42-148:65: static struct ec_event *event_queue_push(struct ec_event_queue *q,
-
drivers/platform/chrome/wilco_ec/event.c:161:30-161:53: static void event_queue_free(struct ec_event_queue *q)
-
drivers/platform/surface/aggregator/controller.c:644:35-644:60: static void ssam_event_queue_push(struct ssam_event_queue *q,
-
drivers/platform/surface/aggregator/controller.c:659:53-659:78: static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
-
drivers/platform/surface/aggregator/controller.c:676:39-676:64: static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
-
drivers/ptp/ptp_clock.c:37:30-37:60: static inline int queue_free(struct timestamp_event_queue *q)
-
drivers/ptp/ptp_private.h:79:29-79:59: static inline int queue_cnt(struct timestamp_event_queue *q)
-
drivers/scsi/aacraid/comminit.c:259:50-259:69: static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
-
drivers/scsi/aacraid/commsup.c:800:44-800:63: int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
-
drivers/scsi/aacraid/commsup.c:832:46-832:64: void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
-
drivers/scsi/aacraid/dpcsup.c:39:34-39:53: unsigned int aac_response_normal(struct aac_queue * q)
-
drivers/scsi/aacraid/dpcsup.c:158:33-158:51: unsigned int aac_command_normal(struct aac_queue *q)
-
drivers/scsi/be2iscsi/be.h:51:37-51:59: static inline void *queue_head_node(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:56:35-56:57: static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
-
drivers/scsi/be2iscsi/be.h:61:37-61:59: static inline void *queue_tail_node(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:66:35-66:57: static inline void queue_head_inc(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:71:35-71:57: static inline void queue_tail_inc(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be_cmds.c:900:54-900:76: int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
-
drivers/scsi/be2iscsi/be_main.c:2994:26-2994:48: static int be_fill_queue(struct be_queue_info *q,
-
drivers/scsi/be2iscsi/be_main.c:3319:53-3319:75: static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be_main.c:3329:53-3329:75: static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
-
drivers/scsi/bfa/bfa_cs.h:157:20-157:38: bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
-
drivers/scsi/csiostor/csio_scsi.c:1159:48-1159:66: csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
-
drivers/scsi/csiostor/csio_scsi.c:1233:46-1233:64: csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
-
drivers/scsi/csiostor/csio_wr.c:1000:24-1000:39: csio_wr_avail_qcredits(struct csio_q *q)
-
drivers/scsi/csiostor/csio_wr.c:1042:40-1042:55: csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
-
drivers/scsi/csiostor/csio_wr.c:1111:18-1111:33: csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
-
drivers/scsi/csiostor/csio_wr.c:1128:40-1128:55: csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
-
drivers/scsi/elx/libefc_sli/sli4.c:488:40-488:59: __sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q)
-
drivers/scsi/elx/libefc_sli/sli4.c:499:37-499:56: __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
-
drivers/scsi/elx/libefc_sli/sli4.c:545:36-545:55: sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q,
-
drivers/scsi/elx/libefc_sli/sli4.c:672:39-672:58: __sli_create_queue(struct sli4 *sli4, struct sli4_queue *q)
-
drivers/scsi/elx/libefc_sli/sli4.c:764:3-764:22: struct sli4_queue *q, u32 n_entries,
-
drivers/scsi/elx/libefc_sli/sli4.c:995:35-995:54: sli_queue_free(struct sli4 *sli4, struct sli4_queue *q,
-
drivers/scsi/elx/libefc_sli/sli4.c:1067:37-1067:56: sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
-
drivers/scsi/elx/libefc_sli/sli4.c:1087:34-1087:53: sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
-
drivers/scsi/elx/libefc_sli/sli4.c:1125:33-1125:52: sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1147:33-1147:52: sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1168:33-1168:52: sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1197:32-1197:51: sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1239:32-1239:51: sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1283:32-1283:51: sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:3632:11-3632:30: struct sli4_queue *q, int num_q, u32 shift,
-
drivers/scsi/hpsa.c:993:53-993:56: static inline u32 next_command(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.c:6935:70-6935:73: static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.h:489:68-489:71: static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.h:527:2-527:29: __attribute__((unused)) u8 q)
-
drivers/scsi/hpsa.h:590:71-590:74: static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
-
drivers/scsi/libiscsi.c:2769:17-2769:36: iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
-
drivers/scsi/libiscsi.c:2809:22-2809:41: void iscsi_pool_free(struct iscsi_pool *q)
-
drivers/scsi/lpfc/lpfc_attr.c:1337:41-1337:59: lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
-
drivers/scsi/lpfc/lpfc_debugfs.c:4177:28-4177:47: lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
-
drivers/scsi/lpfc/lpfc_debugfs.h:341:20-341:39: lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
-
drivers/scsi/lpfc/lpfc_debugfs.h:389:19-389:38: lpfc_debug_dump_q(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:263:18-263:37: lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
-
drivers/scsi/lpfc/lpfc_sli.c:358:22-358:41: lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
-
drivers/scsi/lpfc/lpfc_sli.c:380:18-380:37: lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
-
drivers/scsi/lpfc/lpfc_sli.c:419:22-419:41: lpfc_sli4_mq_release(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:441:18-441:37: lpfc_sli4_eq_get(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:473:23-473:42: lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:492:27-492:46: lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:513:46-513:65: lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:551:50-551:69: lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:682:18-682:37: lpfc_sli4_cq_get(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:733:46-733:65: lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:766:50-766:69: lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli4.h:1178:34-1178:53: static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
-
drivers/scsi/scsi_bsg.c:12:30-12:52: static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
-
drivers/scsi/scsi_dh.c:251:22-251:44: int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
-
drivers/scsi/scsi_dh.c:298:24-298:46: int scsi_dh_set_params(struct request_queue *q, const char *params)
-
drivers/scsi/scsi_dh.c:320:20-320:42: int scsi_dh_attach(struct request_queue *q, const char *name)
-
drivers/scsi/scsi_dh.c:359:43-359:65: const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
-
drivers/scsi/scsi_ioctl.c:216:29-216:51: static int sg_emulated_host(struct request_queue *q, int __user *p)
-
drivers/scsi/scsi_ioctl.c:504:26-504:48: static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode,
-
drivers/scsi/scsi_lib.c:304:29-304:51: static void scsi_kick_queue(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:447:28-447:50: static void scsi_run_queue(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1132:36-1132:58: struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
-
drivers/scsi/scsi_lib.c:1249:40-1249:62: static inline int scsi_dev_queue_ready(struct request_queue *q,
-
drivers/scsi/scsi_lib.c:1338:41-1338:63: static inline int scsi_host_queue_ready(struct request_queue *q,
-
drivers/scsi/scsi_lib.c:1395:30-1395:52: static bool scsi_mq_lld_busy(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1648:32-1648:54: static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
-
drivers/scsi/scsi_lib.c:1662:31-1662:53: static int scsi_mq_get_budget(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1877:49-1877:71: void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
-
drivers/scsi/scsi_lib.c:2011:44-2011:66: struct scsi_device *scsi_device_from_queue(struct request_queue *q)
-
drivers/scsi/scsi_transport_fc.c:4344:15-4344:37: fc_bsg_remove(struct request_queue *q)
-
drivers/scsi/sg.c:847:30-847:52: static int max_sectors_bytes(struct request_queue *q)
-
drivers/spi/spi-fsl-qspi.c:278:37-278:54: static inline int needs_swap_endian(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:283:34-283:51: static inline int needs_4x_clock(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:288:37-288:54: static inline int needs_fill_txfifo(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:293:42-293:59: static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:298:42-298:59: static inline int needs_amba_base_offset(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:303:37-303:54: static inline int needs_tdh_setting(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:312:40-312:57: static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
-
drivers/spi/spi-fsl-qspi.c:324:25-324:42: static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
-
drivers/spi/spi-fsl-qspi.c:332:23-332:40: static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
-
drivers/spi/spi-fsl-qspi.c:356:36-356:53: static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
-
drivers/spi/spi-fsl-qspi.c:416:34-416:51: static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:472:37-472:54: static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:492:41-492:58: static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:508:33-508:50: static void fsl_qspi_invalidate(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:526:33-526:50: static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
-
drivers/spi/spi-fsl-qspi.c:552:31-552:48: static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
-
drivers/spi/spi-fsl-qspi.c:559:34-559:51: static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:584:34-584:51: static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:605:27-605:44: static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
-
drivers/spi/spi-fsl-qspi.c:630:37-630:54: static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
-
drivers/spi/spi-fsl-qspi.c:721:35-721:52: static int fsl_qspi_default_setup(struct fsl_qspi *q)
-
drivers/staging/fieldbus/anybuss/host.c:324:28-324:42: ab_task_dequeue_finish_put(struct kfifo *q, struct anybuss_host *cd)
-
drivers/staging/fieldbus/anybuss/host.c:336:36-336:50: ab_task_enqueue(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
-
drivers/staging/fieldbus/anybuss/host.c:353:41-353:55: ab_task_enqueue_wait(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
-
drivers/staging/fieldbus/anybuss/host.c:862:48-862:62: static void process_q(struct anybuss_host *cd, struct kfifo *q)
-
drivers/staging/fieldbus/anybuss/host.c:1226:44-1226:58: static int taskq_alloc(struct device *dev, struct kfifo *q)
-
drivers/staging/media/imx/imx-media-csc-scaler.c:501:43-501:61: static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
-
drivers/staging/media/imx/imx-media-csc-scaler.c:550:43-550:61: static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/ipu3/ipu3-css.c:168:36-168:59: static bool imgu_css_queue_enabled(struct imgu_css_queue *q)
-
drivers/staging/media/meson/vdec/vdec.c:164:33-164:51: static void process_num_buffers(struct vb2_queue *q,
-
drivers/staging/media/meson/vdec/vdec.c:189:29-189:47: static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
-
drivers/staging/media/meson/vdec/vdec.c:280:33-280:51: static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/staging/media/meson/vdec/vdec.c:395:33-395:51: static void vdec_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/rkvdec/rkvdec.c:549:35-549:53: static int rkvdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/staging/media/rkvdec/rkvdec.c:592:35-592:53: static void rkvdec_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/sunxi/cedrus/cedrus.h:241:11-241:29: struct vb2_queue *q,
-
drivers/ufs/core/ufshcd-crypto.c:236:50-236:72: void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:366:39-366:60: static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:377:51-377:72: static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:384:43-384:64: static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:389:48-389:69: static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:396:46-396:67: static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:401:51-401:72: static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
-
drivers/usb/musb/musb_host.h:46:40-46:58: static inline struct musb_qh *first_qh(struct list_head *q)
-
drivers/usb/serial/digi_acceleport.c:344:2-344:21: wait_queue_head_t *q, long timeout,
-
fs/ext2/inode.c:967:41-967:49: static inline int all_zeroes(__le32 *p, __le32 *q)
-
fs/ext2/inode.c:1068:67-1068:75: static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
-
fs/ext2/inode.c:1108:64-1108:72: static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
-
fs/ext4/indirect.c:753:41-753:49: static inline int all_zeroes(__le32 *p, __le32 *q)
-
fs/jffs2/compr_rubin.c:164:4-164:18: unsigned long q)
-
fs/minix/itree_common.c:215:42-215:51: static inline int all_zeroes(block_t *p, block_t *q)
-
fs/minix/itree_common.c:263:63-263:72: static inline void free_data(struct inode *inode, block_t *p, block_t *q)
-
fs/minix/itree_common.c:276:60-276:69: static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
-
fs/smb/client/dir.c:804:54-804:67: static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q)
-
fs/sysv/itree.c:269:46-269:59: static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q)
-
fs/sysv/itree.c:326:67-326:80: static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q)
-
fs/sysv/itree.c:338:64-338:77: static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth)
-
fs/xfs/xfs_trans_dquot.c:278:2-278:20: struct xfs_dqtrx *q)
-
include/crypto/b128ops.h:64:53-64:65: static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
-
include/crypto/b128ops.h:70:56-70:69: static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
-
include/crypto/b128ops.h:75:56-75:69: static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
-
include/linux/blk-integrity.h:63:40-63:62: blk_integrity_queue_supports_integrity(struct request_queue *q)
-
include/linux/blk-integrity.h:68:53-68:75: static inline void blk_queue_max_integrity_segments(struct request_queue *q,
-
include/linux/blk-integrity.h:75:30-75:58: queue_max_integrity_segments(const struct request_queue *q)
-
include/linux/blk-mq.h:914:44-914:66: static inline bool blk_should_fake_timeout(struct request_queue *q)
-
include/linux/blk-mq.h:1147:33-1147:55: static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-
include/linux/blkdev.h:615:32-615:54: static inline bool queue_is_mq(struct request_queue *q)
-
include/linux/blkdev.h:621:48-621:70: static inline enum rpm_status queue_rpm_status(struct request_queue *q)
-
include/linux/blkdev.h:633:23-633:45: blk_queue_zoned_model(struct request_queue *q)
-
include/linux/blkdev.h:640:39-640:61: static inline bool blk_queue_is_zoned(struct request_queue *q)
-
include/linux/blkdev.h:719:44-719:66: static inline unsigned int blk_queue_depth(struct request_queue *q)
-
include/linux/blkdev.h:1092:52-1092:80: static inline unsigned long queue_segment_boundary(const struct request_queue *q)
-
include/linux/blkdev.h:1097:49-1097:77: static inline unsigned long queue_virt_boundary(const struct request_queue *q)
-
include/linux/blkdev.h:1102:46-1102:74: static inline unsigned int queue_max_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1107:44-1107:66: static inline unsigned int queue_max_bytes(struct request_queue *q)
-
include/linux/blkdev.h:1112:49-1112:77: static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1117:49-1117:77: static inline unsigned short queue_max_segments(const struct request_queue *q)
-
include/linux/blkdev.h:1122:57-1122:85: static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
-
include/linux/blkdev.h:1127:51-1127:79: static inline unsigned int queue_max_segment_size(const struct request_queue *q)
-
include/linux/blkdev.h:1132:58-1132:86: static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1151:49-1151:77: static inline unsigned queue_logical_block_size(const struct request_queue *q)
-
include/linux/blkdev.h:1166:54-1166:82: static inline unsigned int queue_physical_block_size(const struct request_queue *q)
-
include/linux/blkdev.h:1176:41-1176:69: static inline unsigned int queue_io_min(const struct request_queue *q)
-
include/linux/blkdev.h:1186:41-1186:69: static inline unsigned int queue_io_opt(const struct request_queue *q)
-
include/linux/blkdev.h:1197:30-1197:58: queue_zone_write_granularity(const struct request_queue *q)
-
include/linux/blkdev.h:1314:39-1314:67: static inline int queue_dma_alignment(const struct request_queue *q)
-
include/linux/blkdev.h:1331:34-1331:56: static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
-
include/linux/blktrace_api.h:63:51-63:73: static inline bool blk_trace_note_message_enabled(struct request_queue *q)
-
include/linux/fortify-string.h:143:35-143:47: char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
-
include/linux/fortify-string.h:170:34-170:46: char *strcat(char * const POS p, const char *q)
-
include/linux/fortify-string.h:268:53-268:76: __FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
-
include/linux/fortify-string.h:319:54-319:77: __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, size_t size)
-
include/linux/fortify-string.h:395:35-395:58: char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
-
include/linux/fortify-string.h:652:39-652:63: int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
-
include/linux/fortify-string.h:721:34-721:57: char *strcpy(char * const POS p, const char * const POS q)
-
include/linux/mlx4/qp.h:497:29-497:33: static inline u16 folded_qp(u32 q)
-
include/linux/netdevice.h:688:47-688:74: static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
-
include/linux/netdevice.h:697:49-697:70: static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
-
include/linux/netdevice.h:3568:42-3568:63: static inline void netdev_tx_reset_queue(struct netdev_queue *q)
-
include/linux/sunrpc/sched.h:273:38-273:67: static inline const char * rpc_qname(const struct rpc_wait_queue *q)
-
include/linux/sunrpc/sched.h:278:46-278:69: static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
-
include/media/videobuf-core.h:162:40-162:63: static inline void videobuf_queue_lock(struct videobuf_queue *q)
-
include/media/videobuf-core.h:168:42-168:65: static inline void videobuf_queue_unlock(struct videobuf_queue *q)
-
include/media/videobuf2-core.h:669:49-669:67: static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1119:37-1119:55: static inline bool vb2_is_streaming(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1137:41-1137:59: static inline bool vb2_fileio_is_active(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1148:32-1148:50: static inline bool vb2_is_busy(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1157:38-1157:56: static inline void *vb2_get_drv_priv(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1215:47-1215:65: static inline bool vb2_start_streaming_called(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1224:51-1224:69: static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1239:49-1239:67: static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q,
-
include/media/videobuf2-v4l2.h:317:38-317:56: static inline bool vb2_queue_is_busy(struct vb2_queue *q, struct file *file)
-
include/net/inet_frag.h:148:34-148:58: static inline void inet_frag_put(struct inet_frag_queue *q)
-
include/net/ipv6_frag.h:32:33-32:57: static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
-
include/net/pkt_cls.h:171:19-171:33: __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
-
include/net/pkt_cls.h:197:21-197:35: __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
-
include/net/pkt_cls.h:216:10-216:16: void *q, struct tcf_result *res,
-
include/net/pkt_sched.h:23:32-23:46: static inline void *qdisc_priv(struct Qdisc *q)
-
include/net/pkt_sched.h:122:30-122:44: static inline void qdisc_run(struct Qdisc *q)
-
include/net/pkt_sched.h:138:37-138:51: static inline struct net *qdisc_net(struct Qdisc *q)
-
include/net/sch_generic.h:169:42-169:62: static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
-
include/net/sch_generic.h:504:30-504:50: static inline int qdisc_qlen(const struct Qdisc *q)
-
include/net/sch_generic.h:509:34-509:54: static inline int qdisc_qlen_sum(const struct Qdisc *q)
-
include/net/sch_generic.h:564:34-564:48: static inline void sch_tree_lock(struct Qdisc *q)
-
include/net/sch_generic.h:572:36-572:50: static inline void sch_tree_unlock(struct Qdisc *q)
-
include/net/sch_generic.h:1294:38-1294:58: static inline void qdisc_synchronize(const struct Qdisc *q)
-
include/net/sctp/structs.h:1135:35-1135:53: static inline void sctp_outq_cork(struct sctp_outq *q)
-
include/trace/events/block.h:256:1-256:1: TRACE_EVENT(block_bio_complete,
-
include/trace/events/block.h:379:1-379:1: TRACE_EVENT(block_plug,
-
include/trace/events/block.h:424:1-424:1: DEFINE_EVENT(block_unplug, block_unplug,
-
include/trace/events/qdisc.h:77:1-77:1: TRACE_EVENT(qdisc_reset,
-
include/trace/events/qdisc.h:102:1-102:1: TRACE_EVENT(qdisc_destroy,
-
include/trace/events/sunrpc.h:431:1-431:1: DEFINE_RPC_QUEUED_EVENT(sleep);
-
include/trace/events/sunrpc.h:432:1-432:1: DEFINE_RPC_QUEUED_EVENT(wakeup);
-
include/trace/events/v4l2.h:181:1-181:1: DECLARE_EVENT_CLASS(vb2_v4l2_event_class,
-
include/trace/events/v4l2.h:245:1-245:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_done,
-
include/trace/events/v4l2.h:250:1-250:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_queue,
-
include/trace/events/v4l2.h:255:1-255:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_dqbuf,
-
include/trace/events/v4l2.h:260:1-260:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_qbuf,
-
include/trace/events/vb2.h:11:1-11:1: DECLARE_EVENT_CLASS(vb2_event_class,
-
include/trace/events/vb2.h:46:1-46:1: DEFINE_EVENT(vb2_event_class, vb2_buf_done,
-
include/trace/events/vb2.h:51:1-51:1: DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
-
include/trace/events/vb2.h:56:1-56:1: DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
-
include/trace/events/vb2.h:61:1-61:1: DEFINE_EVENT(vb2_event_class, vb2_qbuf,
-
ipc/sem.c:646:61-646:79: static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:719:56-719:74: static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:786:46-786:64: static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
-
ipc/sem.c:799:49-799:67: static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:816:56-816:74: static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:1072:57-1072:75: static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
-
kernel/auditfilter.c:1078:39-1078:60: static void audit_list_rules(int seq, struct sk_buff_head *q)
-
kernel/cgroup/cpuset.c:581:53-581:74: static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
-
kernel/futex/core.c:499:22-499:38: void __futex_unqueue(struct futex_q *q)
-
kernel/futex/core.c:513:40-513:56: struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
-
kernel/futex/core.c:543:20-543:36: void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
-
kernel/futex/core.c:573:19-573:35: int futex_unqueue(struct futex_q *q)
-
kernel/futex/core.c:620:23-620:39: void futex_unqueue_pi(struct futex_q *q)
-
kernel/futex/futex.h:170:32-170:48: static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
-
kernel/futex/pi.c:683:54-683:70: static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-
kernel/futex/pi.c:855:52-855:68: static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-
kernel/futex/pi.c:884:39-884:55: int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked)
-
kernel/futex/requeue.c:74:20-74:36: void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
-
kernel/futex/requeue.c:92:45-92:61: static inline bool futex_requeue_pi_prepare(struct futex_q *q,
-
kernel/futex/requeue.c:125:46-125:62: static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
-
kernel/futex/requeue.c:156:48-156:64: static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
-
kernel/futex/requeue.c:223:28-223:44: void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
-
kernel/futex/requeue.c:692:8-692:24: struct futex_q *q,
-
kernel/futex/waitwake.c:115:50-115:66: void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q)
-
kernel/futex/waitwake.c:328:53-328:69: void futex_wait_queue(struct futex_hash_bucket *hb, struct futex_q *q,
-
kernel/futex/waitwake.c:578:8-578:24: struct futex_q *q, struct futex_hash_bucket **hb)
-
kernel/sched/swait.c:6:30-6:55: void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
-
kernel/sched/swait.c:21:22-21:47: void swake_up_locked(struct swait_queue_head *q)
-
kernel/sched/swait.c:41:26-41:51: void swake_up_all_locked(struct swait_queue_head *q)
-
kernel/sched/swait.c:47:19-47:44: void swake_up_one(struct swait_queue_head *q)
-
kernel/sched/swait.c:61:19-61:44: void swake_up_all(struct swait_queue_head *q)
-
kernel/sched/swait.c:84:25-84:50: void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/sched/swait.c:91:33-91:58: void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
-
kernel/sched/swait.c:102:29-102:54: long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
-
kernel/sched/swait.c:125:21-125:46: void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/sched/swait.c:132:19-132:44: void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/signal.c:450:29-450:46: static void __sigqueue_free(struct sigqueue *q)
-
kernel/signal.c:1935:20-1935:37: void sigqueue_free(struct sigqueue *q)
-
kernel/signal.c:1960:19-1960:36: int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
-
kernel/trace/blktrace.c:314:28-314:50: static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
-
kernel/trace/blktrace.c:380:31-380:53: static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
-
kernel/trace/blktrace.c:388:31-388:53: static int __blk_trace_remove(struct request_queue *q)
-
kernel/trace/blktrace.c:402:22-402:44: int blk_trace_remove(struct request_queue *q)
-
kernel/trace/blktrace.c:514:31-514:53: static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:621:30-621:52: static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:642:21-642:43: int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:657:35-657:57: static int compat_blk_trace_setup(struct request_queue *q, char *name,
-
kernel/trace/blktrace.c:690:34-690:56: static int __blk_trace_startstop(struct request_queue *q, int start)
-
kernel/trace/blktrace.c:705:25-705:47: int blk_trace_startstop(struct request_queue *q, int start)
-
kernel/trace/blktrace.c:772:25-772:47: void blk_trace_shutdown(struct request_queue *q)
-
kernel/trace/blktrace.c:780:35-780:57: static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
-
kernel/trace/blktrace.c:891:31-891:53: static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-
kernel/trace/blktrace.c:915:12-915:34: struct request_queue *q, struct bio *bio)
-
kernel/trace/blktrace.c:943:46-943:68: static void blk_add_trace_plug(void *ignore, struct request_queue *q)
-
kernel/trace/blktrace.c:954:48-954:70: static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
-
kernel/trace/blktrace.c:1607:35-1607:57: static int blk_trace_remove_queue(struct request_queue *q)
-
kernel/trace/blktrace.c:1627:34-1627:56: static int blk_trace_setup_queue(struct request_queue *q,
-
lib/bch.c:816:29-816:45: const struct gf_poly *b, struct gf_poly *q)
-
lib/crypto/curve25519-hacl64.c:547:12-547:17: u64 *q, u8 byt)
-
lib/crypto/curve25519-hacl64.c:559:24-559:29: u64 *nqpq2, u64 *q, u8 byt)
-
lib/crypto/curve25519-hacl64.c:569:7-569:12: u64 *q, u8 byt, u32 i)
-
lib/crypto/curve25519-hacl64.c:580:22-580:27: u64 *nqpq2, u64 *q,
-
lib/crypto/curve25519-hacl64.c:590:47-590:52: static void ladder_cmult(u64 *result, u8 *n1, u64 *q)
-
mm/swapfile.c:1166:6-1166:31: struct swap_info_struct *q)
-
net/core/dev.c:3056:32-3056:46: static void __netif_reschedule(struct Qdisc *q)
-
net/core/dev.c:3070:23-3070:37: void __netif_schedule(struct Qdisc *q)
-
net/core/dev.c:3767:51-3767:65: static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
-
net/core/dev.c:3779:55-3779:69: static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
-
net/core/gen_stats.c:341:10-341:50: const struct gnet_stats_queue __percpu *q)
-
net/core/gen_stats.c:358:6-358:37: const struct gnet_stats_queue *q)
-
net/core/gen_stats.c:389:9-389:34: struct gnet_stats_queue *q, __u32 qlen)
-
net/ieee802154/6lowpan/reassembly.c:36:30-36:54: static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
-
net/ipv4/inet_fragment.c:54:36-54:60: static void fragrun_append_to_last(struct inet_frag_queue *q,
-
net/ipv4/inet_fragment.c:65:28-65:52: static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
-
net/ipv4/inet_fragment.c:287:24-287:48: void inet_frag_destroy(struct inet_frag_queue *q)
-
net/ipv4/inet_fragment.c:383:28-383:52: int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
-
net/ipv4/inet_fragment.c:447:31-447:55: void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
-
net/ipv4/inet_fragment.c:516:29-516:53: void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
-
net/ipv4/inet_fragment.c:585:37-585:61: struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
-
net/ipv4/ip_fragment.c:82:27-82:51: static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
-
net/ipv4/ip_fragment.c:96:27-96:51: static void ip4_frag_free(struct inet_frag_queue *q)
-
net/netfilter/nfnetlink_queue.c:103:17-103:40: instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
-
net/netfilter/nfnetlink_queue.c:117:17-117:40: instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
-
net/netfilter/nfnetlink_queue.c:184:18-184:41: instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
-
net/netfilter/nfnetlink_queue.c:1065:25-1065:48: verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
-
net/rds/message.c:75:33-75:61: void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *q)
-
net/rds/rds.h:382:49-382:77: static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
-
net/rose/rose_in.c:102:101-102:105: static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
-
net/rose/rose_subr.c:201:56-201:61: int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
-
net/sched/cls_api.c:826:60-826:74: static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:866:63-866:77: static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:984:60-984:74: static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
-
net/sched/cls_api.c:1149:46-1149:61: static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
-
net/sched/cls_api.c:1224:32-1224:46: static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
-
net/sched/cls_api.c:1244:60-1244:74: static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
-
net/sched/cls_api.c:1281:54-1281:68: static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1318:58-1318:73: static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
-
net/sched/cls_api.c:1352:31-1352:45: static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
-
net/sched/cls_api.c:1374:11-1374:25: struct Qdisc *q,
-
net/sched/cls_api.c:1395:11-1395:25: struct Qdisc *q,
-
net/sched/cls_api.c:1410:5-1410:19: struct Qdisc *q,
-
net/sched/cls_api.c:1425:51-1425:65: int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-
net/sched/cls_api.c:1484:46-1484:60: struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
-
net/sched/cls_api.c:1500:49-1500:63: void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1966:5-1966:19: struct Qdisc *q, u32 parent, void *fh,
-
net/sched/cls_api.c:2027:31-2027:45: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:2056:35-2056:49: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:2094:31-2094:45: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:2652:53-2652:67: static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
-
net/sched/cls_basic.c:261:71-261:77: static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_bpf.c:636:11-636:17: void *q, unsigned long base)
-
net/sched/cls_flower.c:3421:68-3421:74: static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_fw.c:416:68-416:74: static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_matchall.c:390:70-390:76: static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_route.c:649:72-649:78: static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_u32.c:1250:69-1250:75: static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/sch_api.c:280:21-280:35: void qdisc_hash_add(struct Qdisc *q, bool invisible)
-
net/sched/sch_api.c:291:21-291:35: void qdisc_hash_del(struct Qdisc *q)
-
net/sched/sch_api.c:906:47-906:61: static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
-
net/sched/sch_api.c:990:34-990:48: static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
-
net/sched/sch_api.c:1398:23-1398:37: static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
-
net/sched/sch_api.c:1414:15-1414:29: check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
-
net/sched/sch_api.c:1816:48-1816:62: static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
-
net/sched/sch_api.c:1868:25-1868:39: struct nlmsghdr *n, struct Qdisc *q,
-
net/sched/sch_api.c:1890:9-1890:23: struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:1951:33-1951:47: static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:1982:28-1982:42: static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
-
net/sched/sch_api.c:2145:29-2145:43: static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:2155:33-2155:47: static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
-
net/sched/sch_cake.c:646:22-646:44: static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
-
net/sched/sch_cake.c:1151:40-1151:64: static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
-
net/sched/sch_cake.c:1314:31-1314:55: static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
-
net/sched/sch_cake.c:1349:26-1349:50: static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
-
net/sched/sch_cake.c:1396:28-1396:52: static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
-
net/sched/sch_cake.c:1408:34-1408:64: static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1415:26-1415:50: static void cake_heapify(struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1452:29-1452:53: static void cake_heapify_up(struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1468:32-1468:56: static int cake_advance_shaper(struct cake_sched_data *q,
-
net/sched/sch_cake.c:2961:25-2961:39: static void cake_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_cbs.c:251:5-251:28: struct cbs_sched_data *q)
-
net/sched/sch_cbs.c:276:55-276:78: static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
-
net/sched/sch_cbs.c:309:55-309:78: static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
-
net/sched/sch_choke.c:75:31-75:62: static unsigned int choke_len(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:81:20-81:51: static int use_ecn(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:87:25-87:56: static int use_harddrop(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:93:34-93:59: static void choke_zap_head_holes(struct choke_sched_data *q)
-
net/sched/sch_choke.c:103:34-103:59: static void choke_zap_tail_holes(struct choke_sched_data *q)
-
net/sched/sch_choke.c:179:42-179:73: static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
-
net/sched/sch_choke.c:199:32-199:63: static bool choke_match_random(const struct choke_sched_data *q,
-
net/sched/sch_etf.c:297:5-297:28: struct etf_sched_data *q)
-
net/sched/sch_etf.c:319:55-319:78: static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q,
-
net/sched/sch_ets.c:190:33-190:51: static bool ets_class_is_strict(struct ets_sched *q, const struct ets_class *cl)
-
net/sched/sch_fifo.c:227:20-227:34: int fifo_set_limit(struct Qdisc *q, unsigned int limit)
-
net/sched/sch_fq.c:172:37-172:59: static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
-
net/sched/sch_fq.c:179:35-179:57: static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
-
net/sched/sch_fq.c:217:19-217:41: static void fq_gc(struct fq_sched_data *q,
-
net/sched/sch_fq.c:261:57-261:79: static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
-
net/sched/sch_fq.c:437:9-437:37: const struct fq_sched_data *q)
-
net/sched/sch_fq.c:499:32-499:54: static void fq_check_throttled(struct fq_sched_data *q, u64 now)
-
net/sched/sch_fq.c:697:23-697:45: static void fq_rehash(struct fq_sched_data *q,
-
net/sched/sch_fq_codel.c:70:35-70:69: static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
-
net/sched/sch_fq_codel.c:608:29-608:43: static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_fq_pie.c:73:33-73:65: static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
-
net/sched/sch_generic.c:38:38-38:52: static void qdisc_maybe_clear_missed(struct Qdisc *q,
-
net/sched/sch_generic.c:72:53-72:67: static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
-
net/sched/sch_generic.c:108:57-108:71: static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
-
net/sched/sch_generic.c:118:46-118:60: static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
-
net/sched/sch_generic.c:142:57-142:71: static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
-
net/sched/sch_generic.c:178:34-178:48: static void try_bulk_dequeue_skb(struct Qdisc *q,
-
net/sched/sch_generic.c:202:39-202:53: static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
-
net/sched/sch_generic.c:228:36-228:50: static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
-
net/sched/sch_generic.c:314:43-314:57: bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
-
net/sched/sch_generic.c:388:34-388:48: static inline bool qdisc_restart(struct Qdisc *q, int *packets)
-
net/sched/sch_generic.c:410:18-410:32: void __qdisc_run(struct Qdisc *q)
-
net/sched/sch_gred.c:114:6-114:30: struct gred_sched_data *q,
-
net/sched/sch_gred.c:129:11-129:35: struct gred_sched_data *q)
-
net/sched/sch_gred.c:136:12-136:36: struct gred_sched_data *q)
-
net/sched/sch_gred.c:142:25-142:49: static int gred_use_ecn(struct gred_sched_data *q)
-
net/sched/sch_gred.c:147:30-147:54: static int gred_use_harddrop(struct gred_sched_data *q)
-
net/sched/sch_gred.c:403:36-403:60: static inline void gred_destroy_vq(struct gred_sched_data *q)
-
net/sched/sch_hfsc.c:219:18-219:37: eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
-
net/sched/sch_hfsc.c:236:18-236:37: eltree_get_minel(struct hfsc_sched *q)
-
net/sched/sch_hhf.c:182:12-182:35: struct hhf_sched_data *q)
-
net/sched/sch_hhf.c:213:8-213:31: struct hhf_sched_data *q)
-
net/sched/sch_htb.c:317:34-317:52: static void htb_add_to_wait_tree(struct htb_sched *q,
-
net/sched/sch_htb.c:364:41-364:59: static inline void htb_add_class_to_row(struct htb_sched *q,
-
net/sched/sch_htb.c:396:46-396:64: static inline void htb_remove_class_from_row(struct htb_sched *q,
-
net/sched/sch_htb.c:426:32-426:50: static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:466:34-466:52: static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:562:23-562:41: htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
-
net/sched/sch_htb.c:593:33-593:51: static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:611:35-611:53: static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:699:30-699:48: static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
-
net/sched/sch_htb.c:747:26-747:44: static s64 htb_do_events(struct htb_sched *q, const int level,
-
net/sched/sch_htb.c:872:41-872:59: static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
-
net/sched/sch_htb.c:1043:41-1043:55: static void htb_set_lockdep_class_child(struct Qdisc *q)
-
net/sched/sch_htb.c:1298:41-1298:59: static void htb_offload_aggregate_stats(struct htb_sched *q,
-
net/sched/sch_multiq.c:319:27-319:41: static void multiq_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_netem.c:200:25-200:50: static bool loss_4state(struct netem_sched_data *q)
-
net/sched/sch_netem.c:265:27-265:52: static bool loss_gilb_ell(struct netem_sched_data *q)
-
net/sched/sch_netem.c:286:24-286:49: static bool loss_event(struct netem_sched_data *q)
-
net/sched/sch_netem.c:345:36-345:67: static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
-
net/sched/sch_netem.c:629:27-629:52: static void get_slot_next(struct netem_sched_data *q, u64 now)
-
net/sched/sch_netem.c:648:35-648:60: static struct sk_buff *netem_peek(struct netem_sched_data *q)
-
net/sched/sch_netem.c:665:30-665:55: static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
-
net/sched/sch_netem.c:806:22-806:47: static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:828:29-828:54: static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:837:25-837:50: static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:845:25-845:50: static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:853:22-853:47: static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:867:25-867:50: static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:1085:28-1085:59: static int dump_loss_model(const struct netem_sched_data *q,
-
net/sched/sch_prio.c:341:25-341:39: static void prio_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_qfq.c:259:26-259:44: static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:269:43-269:61: static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
-
net/sched/sch_qfq.c:283:28-283:46: static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:317:28-317:46: static void qfq_add_to_agg(struct qfq_sched *q,
-
net/sched/sch_qfq.c:334:29-334:47: static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:347:34-347:52: static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:358:29-358:47: static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:371:35-371:53: static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:720:41-720:59: static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
-
net/sched/sch_qfq.c:737:27-737:45: static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
-
net/sched/sch_qfq.c:760:36-760:54: static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
-
net/sched/sch_qfq.c:767:32-767:50: static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
-
net/sched/sch_qfq.c:793:31-793:49: static void qfq_make_eligible(struct qfq_sched *q)
-
net/sched/sch_qfq.c:948:33-948:51: static void qfq_update_eligible(struct qfq_sched *q)
-
net/sched/sch_qfq.c:1022:30-1022:48: static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:1055:19-1055:37: qfq_update_agg_ts(struct qfq_sched *q,
-
net/sched/sch_qfq.c:1145:50-1145:68: static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
-
net/sched/sch_qfq.c:1260:30-1260:48: static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:1307:30-1307:48: static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:1321:29-1321:47: static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
-
net/sched/sch_qfq.c:1344:32-1344:50: static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_red.c:55:31-55:54: static inline int red_use_ecn(struct red_sched_data *q)
-
net/sched/sch_red.c:60:36-60:59: static inline int red_use_harddrop(struct red_sched_data *q)
-
net/sched/sch_red.c:65:27-65:50: static int red_use_nodrop(struct red_sched_data *q)
-
net/sched/sch_sfb.c:123:55-123:78: static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:138:57-138:80: static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:152:11-152:34: struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:167:55-167:78: static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:180:50-180:73: static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:185:50-185:73: static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:190:34-190:57: static void sfb_zero_all_buckets(struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:198:56-198:85: static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:218:45-218:68: static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:224:27-224:50: static void sfb_swap_slot(struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:234:49-234:72: static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
-
net/sched/sch_sfq.c:150:45-150:68: static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
-
net/sched/sch_sfq.c:157:30-157:59: static unsigned int sfq_hash(const struct sfq_sched_data *q,
-
net/sched/sch_sfq.c:203:29-203:52: static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:228:28-228:51: static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:241:28-241:51: static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:329:26-329:55: static int sfq_prob_mark(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:335:26-335:55: static int sfq_hard_mark(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:340:25-340:54: static int sfq_headdrop(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:841:24-841:38: static void sfq_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_skbprio.c:40:31-40:64: static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
-
net/sched/sch_skbprio.c:53:30-53:63: static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
-
net/sched/sch_taprio.c:109:45-109:66: static void taprio_calculate_gate_durations(struct taprio_sched *q,
-
net/sched/sch_taprio.c:168:35-168:62: static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
-
net/sched/sch_taprio.c:181:32-181:59: static ktime_t taprio_get_time(const struct taprio_sched *q)
-
net/sched/sch_taprio.c:199:30-199:51: static void switch_schedules(struct taprio_sched *q,
-
net/sched/sch_taprio.c:247:31-247:52: static int length_to_duration(struct taprio_sched *q, int len)
-
net/sched/sch_taprio.c:252:31-252:52: static int duration_to_length(struct taprio_sched *q, u64 duration)
-
net/sched/sch_taprio.c:261:41-261:62: static void taprio_update_queue_max_sdu(struct taprio_sched *q,
-
net/sched/sch_taprio.c:422:31-422:52: static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
-
net/sched/sch_taprio.c:664:32-664:53: static void taprio_set_budgets(struct taprio_sched *q,
-
net/sched/sch_taprio.c:1027:29-1027:50: static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
-
net/sched/sch_taprio.c:1059:30-1059:51: static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
-
net/sched/sch_taprio.c:1078:29-1078:50: static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
-
net/sched/sch_taprio.c:1118:34-1118:55: static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
-
net/sched/sch_taprio.c:1229:34-1229:55: static void setup_first_end_time(struct taprio_sched *q,
-
net/sched/sch_taprio.c:1282:11-1282:32: struct taprio_sched *q)
-
net/sched/sch_taprio.c:1340:26-1340:47: static void setup_txtime(struct taprio_sched *q,
-
net/sched/sch_taprio.c:1406:43-1406:64: static void taprio_offload_config_changed(struct taprio_sched *q)
-
net/sched/sch_taprio.c:1464:41-1464:62: static void taprio_detect_broken_mqprio(struct taprio_sched *q)
-
net/sched/sch_taprio.c:1481:42-1481:63: static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
-
net/sched/sch_taprio.c:1493:6-1493:27: struct taprio_sched *q,
-
net/sched/sch_taprio.c:1559:7-1559:28: struct taprio_sched *q,
-
net/sched/sch_taprio.c:2261:7-2261:28: struct taprio_sched *q,
-
net/sched/sch_tbf.c:263:30-263:59: static bool tbf_peak_present(const struct tbf_sched_data *q)
-
net/sctp/inqueue.c:64:20-64:37: void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
-
net/sctp/inqueue.c:234:30-234:47: void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
-
net/sctp/outqueue.c:59:40-59:58: static inline void sctp_outq_head_data(struct sctp_outq *q,
-
net/sctp/outqueue.c:74:57-74:75: static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
-
net/sctp/outqueue.c:80:40-80:58: static inline void sctp_outq_tail_data(struct sctp_outq *q,
-
net/sctp/outqueue.c:191:52-191:70: void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
-
net/sctp/outqueue.c:206:34-206:52: static void __sctp_outq_teardown(struct sctp_outq *q)
-
net/sctp/outqueue.c:267:25-267:43: void sctp_outq_teardown(struct sctp_outq *q)
-
net/sctp/outqueue.c:274:21-274:39: void sctp_outq_free(struct sctp_outq *q)
-
net/sctp/outqueue.c:281:21-281:39: void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
-
net/sctp/outqueue.c:450:27-450:45: void sctp_retransmit_mark(struct sctp_outq *q,
-
net/sctp/outqueue.c:537:22-537:40: void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
-
net/sctp/outqueue.c:598:34-598:52: static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
-
net/sctp/outqueue.c:759:23-759:41: void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
-
net/sctp/outqueue.c:1192:29-1192:47: static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
-
net/sctp/outqueue.c:1248:20-1248:38: int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
-
net/sctp/outqueue.c:1415:24-1415:48: int sctp_outq_is_empty(const struct sctp_outq *q)
-
net/sctp/outqueue.c:1435:36-1435:54: static void sctp_check_transmitted(struct sctp_outq *q,
-
net/sctp/outqueue.c:1709:31-1709:49: static void sctp_mark_missing(struct sctp_outq *q,
-
net/sctp/outqueue.c:1822:27-1822:45: void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
-
net/sctp/stream_interleave.c:1098:33-1098:51: static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
-
net/sctp/stream_sched.c:53:37-53:55: static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched.c:58:51-58:69: static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched.c:81:42-81:60: static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
-
net/sctp/stream_sched.c:234:30-234:48: void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
-
net/sctp/stream_sched.c:256:32-256:50: void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
-
net/sctp/stream_sched_fc.c:98:35-98:53: static void sctp_sched_fc_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched_fc.c:111:49-111:67: static struct sctp_chunk *sctp_sched_fc_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched_fc.c:132:40-132:58: static void sctp_sched_fc_dequeue_done(struct sctp_outq *q,
-
net/sctp/stream_sched_prio.c:215:37-215:55: static void sctp_sched_prio_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched_prio.c:228:51-228:69: static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched_prio.c:256:42-256:60: static void sctp_sched_prio_dequeue_done(struct sctp_outq *q,
-
net/sctp/stream_sched_rr.c:97:35-97:53: static void sctp_sched_rr_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched_rr.c:110:49-110:67: static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched_rr.c:133:40-133:58: static void sctp_sched_rr_dequeue_done(struct sctp_outq *q,
-
net/sunrpc/sched.c:147:25-147:43: __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
-
net/sunrpc/sched.c:382:40-382:63: static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:391:37-391:60: static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:400:45-400:68: static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:430:27-430:50: void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:447:19-447:42: void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:465:36-465:59: void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:481:28-481:51: void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:1212:3-1212:28: struct workqueue_struct *q)
-
net/sunrpc/sched.c:1221:52-1221:77: static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
-
net/unix/af_unix.c:432:39-432:59: static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
-
net/x25/x25_in.c:208:100-208:104: static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
-
net/x25/x25_subr.c:260:72-260:77: int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
-
net/xdp/xsk_queue.c:14:34-14:52: static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
-
net/xdp/xsk_queue.c:49:19-49:37: void xskq_destroy(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:115:52-115:70: static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
-
net/xdp/xsk_queue.h:123:50-123:68: static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
-
net/xdp/xsk_queue.h:173:44-173:62: static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:184:40-184:58: static inline bool xskq_cons_read_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:202:40-202:58: static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
-
net/xdp/xsk_queue.h:207:45-207:63: static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
-
net/xdp/xsk_queue.h:235:40-235:58: static inline void __xskq_cons_release(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:240:37-240:55: static inline void __xskq_cons_peek(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:246:42-246:60: static inline void xskq_cons_get_entries(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:252:40-252:58: static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
-
net/xdp/xsk_queue.h:265:42-265:60: static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
-
net/xdp/xsk_queue.h:270:50-270:68: static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
-
net/xdp/xsk_queue.h:277:40-277:58: static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:290:38-290:56: static inline void xskq_cons_release(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:295:45-295:63: static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:303:37-303:55: static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
-
net/xdp/xsk_queue.h:317:38-317:56: static inline bool xskq_prod_is_full(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:322:37-322:55: static inline void xskq_prod_cancel(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:327:37-327:55: static inline int xskq_prod_reserve(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:337:42-337:60: static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
-
net/xdp/xsk_queue.h:349:47-349:65: static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
-
net/xdp/xsk_queue.h:362:42-362:60: static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:379:39-379:57: static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
-
net/xdp/xsk_queue.h:384:37-384:55: static inline void xskq_prod_submit(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:389:42-389:60: static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
-
net/xdp/xsk_queue.h:399:39-399:57: static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
-
net/xdp/xsk_queue.h:404:39-404:57: static inline bool xskq_prod_is_empty(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:412:41-412:59: static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:417:45-417:63: static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
-
sound/core/seq/oss/seq_oss_event.c:42:55-42:68: snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:95:39-95:52: old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:121:44-121:57: extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:175:45-175:58: chn_voice_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:196:46-196:59: chn_common_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:223:42-223:55: timing_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:258:41-258:54: local_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_readq.c:62:26-62:48: snd_seq_oss_readq_delete(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:74:25-74:47: snd_seq_oss_readq_clear(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:89:24-89:46: snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, int len)
-
sound/core/seq/oss/seq_oss_readq.c:123:29-123:51: int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
-
sound/core/seq/oss/seq_oss_readq.c:141:29-141:51: snd_seq_oss_readq_put_event(struct seq_oss_readq *q, union evrec *ev)
-
sound/core/seq/oss/seq_oss_readq.c:169:24-169:46: snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec)
-
sound/core/seq/oss/seq_oss_readq.c:181:24-181:46: snd_seq_oss_readq_wait(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:193:24-193:46: snd_seq_oss_readq_free(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:206:24-206:46: snd_seq_oss_readq_poll(struct seq_oss_readq *q, struct file *file, poll_table *wait)
-
sound/core/seq/oss/seq_oss_readq.c:216:33-216:55: snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *q, unsigned long curt, int seq_mode)
-
sound/core/seq/oss/seq_oss_readq.c:244:29-244:51: snd_seq_oss_readq_info_read(struct seq_oss_readq *q, struct snd_info_buffer *buf)
-
sound/core/seq/oss/seq_oss_writeq.c:54:27-54:50: snd_seq_oss_writeq_delete(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:67:26-67:49: snd_seq_oss_writeq_clear(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:83:25-83:48: snd_seq_oss_writeq_sync(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:123:27-123:50: snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time)
-
sound/core/seq/oss/seq_oss_writeq.c:139:34-139:57: snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:152:31-152:54: snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int val)
-
sound/core/seq/seq_queue.c:50:27-50:49: static int queue_list_add(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:129:26-129:48: static void queue_delete(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:240:26-240:48: void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
-
sound/core/seq/seq_queue.c:354:32-354:54: static inline int check_access(struct snd_seq_queue *q, int client)
-
sound/core/seq/seq_queue.c:362:30-362:52: static int queue_access_lock(struct snd_seq_queue *q, int client)
-
sound/core/seq/seq_queue.c:376:40-376:62: static inline void queue_access_unlock(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:629:35-629:57: static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
-
sound/core/seq/seq_queue.c:652:41-652:63: static void snd_seq_queue_process_event(struct snd_seq_queue *q,
-
sound/core/seq/seq_timer.c:258:24-258:46: int snd_seq_timer_open(struct snd_seq_queue *q)
-
sound/core/seq/seq_timer.c:313:25-313:47: int snd_seq_timer_close(struct snd_seq_queue *q)
-
sound/pci/hda/hda_codec.c:1212:7-1212:29: struct hda_cvt_setup *q)
variable
Defined...
-
arch/x86/crypto/curve25519-x86_64.c:35:2-35:20: u64 q = x_xor_y | x_sub_y_xor_y;
-
arch/x86/include/asm/div64.h:83:2-83:6: u64 q;
-
arch/x86/kernel/cpu/common.c:781:2-781:12: char *p, *q, *s;
-
arch/x86/kvm/svm/sev.c:2118:2-2118:26: struct list_head *pos, *q;
-
arch/x86/kvm/vmx/nested.c:1559:2-1559:9: int i, q;
-
arch/x86/xen/platform-pci-unplug.c:181:2-181:12: char *p, *q;
-
block/bfq-iosched.c:6238:2-6238:34: struct request_queue *q = hctx->queue;
-
block/bfq-iosched.c:6843:2-6843:32: struct request_queue *q = rq->q;
-
block/bio.c:1081:2-1081:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/bio.c:1184:3-1184:56: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/bio.c:1216:2-1216:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-cgroup.c:119:2-119:34: struct request_queue *q = blkg->q;
-
block/blk-cgroup.c:458:2-458:34: struct request_queue *q = disk->queue;
-
block/blk-cgroup.c:561:2-561:34: struct request_queue *q = disk->queue;
-
block/blk-cgroup.c:776:2-776:24: struct request_queue *q;
-
block/blk-cgroup.c:1188:3-1188:35: struct request_queue *q = blkg->q;
-
block/blk-cgroup.c:1361:2-1361:34: struct request_queue *q = disk->queue;
-
block/blk-cgroup.c:1461:2-1461:34: struct request_queue *q = disk->queue;
-
block/blk-cgroup.c:1572:2-1572:34: struct request_queue *q = disk->queue;
-
block/blk-core.c:257:2-257:28: struct request_queue *q = container_of(rcu_head,
-
block/blk-core.c:375:2-376:3: struct request_queue *q =
-
block/blk-core.c:383:2-383:28: struct request_queue *q = from_timer(q, t, timeout);
-
block/blk-core.c:394:2-394:24: struct request_queue *q;
-
block/blk-core.c:632:3-632:56: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-core.c:719:2-719:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-core.c:854:2-854:24: struct request_queue *q;
-
block/blk-crypto-sysfs.c:131:2-131:34: struct request_queue *q = disk->queue;
-
block/blk-crypto.c:420:2-420:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-flush.c:167:2-167:32: struct request_queue *q = rq->q;
-
block/blk-flush.c:217:2-217:38: struct request_queue *q = flush_rq->q;
-
block/blk-flush.c:356:2-356:32: struct request_queue *q = rq->q;
-
block/blk-flush.c:390:2-390:32: struct request_queue *q = rq->q;
-
block/blk-ia-ranges.c:111:2-111:34: struct request_queue *q = disk->queue;
-
block/blk-ia-ranges.c:154:2-154:34: struct request_queue *q = disk->queue;
-
block/blk-ia-ranges.c:290:2-290:34: struct request_queue *q = disk->queue;
-
block/blk-ioc.c:76:2-76:33: struct request_queue *q = icq->q;
-
block/blk-ioc.c:117:3-117:34: struct request_queue *q = icq->q;
-
block/blk-iocost.c:3387:2-3387:24: struct request_queue *q;
-
block/blk-map.c:558:2-558:32: struct request_queue *q = rq->q;
-
block/blk-merge.c:593:2-593:32: struct request_queue *q = rq->q;
-
block/blk-mq-debugfs-zoned.c:11:2-11:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:24:2-24:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:32:2-32:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:40:2-40:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:74:2-74:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:111:2-111:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:122:2-122:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:416:2-416:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:433:2-433:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:450:2-450:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:467:2-467:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:798:2-798:40: struct request_queue *q = rqos->disk->queue;
-
block/blk-mq-sched.c:89:2-89:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:217:2-217:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:321:2-321:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.h:62:3-62:33: struct request_queue *q = rq->q;
-
block/blk-mq-sysfs.c:54:2-54:24: struct request_queue *q;
-
block/blk-mq-sysfs.c:160:2-160:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sysfs.c:221:2-221:34: struct request_queue *q = disk->queue;
-
block/blk-mq-sysfs.c:258:2-258:34: struct request_queue *q = disk->queue;
-
block/blk-mq-tag.c:47:3-47:35: struct request_queue *q = hctx->queue;
-
block/blk-mq-tag.c:83:3-83:35: struct request_queue *q = hctx->queue;
-
block/blk-mq-tag.c:265:2-265:39: struct request_queue *q = iter_data->q;
-
block/blk-mq.c:286:2-286:24: struct request_queue *q;
-
block/blk-mq.c:300:2-300:24: struct request_queue *q;
-
block/blk-mq.c:343:2-343:34: struct request_queue *q = data->q;
-
block/blk-mq.c:439:2-439:34: struct request_queue *q = data->q;
-
block/blk-mq.c:678:2-678:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:696:2-696:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1034:2-1034:34: struct request_queue *q = hctx->queue;
-
block/blk-mq.c:1220:2-1220:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1398:2-1398:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1413:2-1413:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1429:2-1430:3: struct request_queue *q =
-
block/blk-mq.c:1467:2-1467:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1606:2-1607:3: struct request_queue *q =
-
block/blk-mq.c:2000:2-2000:34: struct request_queue *q = hctx->queue;
-
block/blk-mq.c:2479:2-2479:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:2560:2-2560:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:2753:3-2753:25: struct request_queue *q;
-
block/blk-mq.c:2930:2-2930:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-mq.c:3000:2-3000:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:3926:2-3926:24: struct request_queue *q;
-
block/blk-mq.c:4034:2-4034:24: struct request_queue *q;
-
block/blk-mq.c:4085:2-4085:24: struct request_queue *q;
-
block/blk-mq.c:4668:2-4668:24: struct request_queue *q;
-
block/blk-mq.h:411:3-411:35: struct request_queue *q = hctx->queue;
-
block/blk-rq-qos.c:301:2-301:34: struct request_queue *q = disk->queue;
-
block/blk-rq-qos.c:340:2-340:40: struct request_queue *q = rqos->disk->queue;
-
block/blk-rq-qos.h:141:3-141:56: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-settings.c:393:2-393:34: struct request_queue *q = disk->queue;
-
block/blk-settings.c:917:2-917:34: struct request_queue *q = disk->queue;
-
block/blk-settings.c:963:2-963:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-settings.c:976:2-976:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-stat.c:52:2-52:32: struct request_queue *q = rq->q;
-
block/blk-sysfs.c:673:2-673:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:700:2-700:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:717:2-717:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:752:2-752:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:769:2-769:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:854:2-854:34: struct request_queue *q = disk->queue;
-
block/blk-throttle.c:1171:2-1171:24: struct request_queue *q;
-
block/blk-throttle.c:1248:2-1248:32: struct request_queue *q = td->queue;
-
block/blk-throttle.c:1713:2-1713:34: struct request_queue *q = disk->queue;
-
block/blk-throttle.c:2170:2-2170:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-throttle.c:2304:2-2304:32: struct request_queue *q = rq->q;
-
block/blk-throttle.c:2368:2-2368:34: struct request_queue *q = disk->queue;
-
block/blk-throttle.c:2412:2-2412:34: struct request_queue *q = disk->queue;
-
block/blk-throttle.c:2425:2-2425:34: struct request_queue *q = disk->queue;
-
block/blk-timeout.c:55:3-55:35: struct request_queue *q = disk->queue;
-
block/blk-timeout.c:130:2-130:33: struct request_queue *q = req->q;
-
block/blk-wbt.c:731:2-731:34: struct request_queue *q = disk->queue;
-
block/blk-wbt.c:917:2-917:34: struct request_queue *q = disk->queue;
-
block/blk-zoned.c:259:2-259:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-zoned.c:463:2-463:34: struct request_queue *q = disk->queue;
-
block/blk-zoned.c:547:2-547:34: struct request_queue *q = disk->queue;
-
block/blk-zoned.c:613:2-613:34: struct request_queue *q = disk->queue;
-
block/blk.h:67:2-67:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/bsg-lib.c:275:2-275:34: struct request_queue *q = hctx->queue;
-
block/bsg-lib.c:366:2-366:24: struct request_queue *q;
-
block/bsg.c:105:2-105:32: struct request_queue *q = bd->queue;
-
block/elevator.c:62:2-62:32: struct request_queue *q = rq->q;
-
block/genhd.c:606:2-606:34: struct request_queue *q = disk->queue;
-
block/genhd.c:966:2-966:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/genhd.c:1015:2-1015:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/genhd.c:1432:2-1432:24: struct request_queue *q;
-
block/kyber-iosched.c:954:1-954:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
-
block/kyber-iosched.c:955:1-955:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
-
block/kyber-iosched.c:956:1-956:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
-
block/kyber-iosched.c:957:1-957:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
-
block/kyber-iosched.c:962:2-962:28: struct request_queue *q = data;
-
block/mq-deadline.c:620:2-620:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:771:2-771:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:829:2-829:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:879:2-879:32: struct request_queue *q = rq->q;
-
block/mq-deadline.c:1045:1-1045:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:1046:1-1046:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:1047:1-1047:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:1048:1-1048:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:1049:1-1049:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:1050:1-1050:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:1045:1-1045:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:1046:1-1046:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:1047:1-1047:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:1048:1-1048:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:1049:1-1049:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:1050:1-1050:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:1045:1-1045:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:1046:1-1046:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:1047:1-1047:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:1048:1-1048:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:1049:1-1049:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:1050:1-1050:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:1045:1-1045:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:1046:1-1046:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:1047:1-1047:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:1048:1-1048:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:1049:1-1049:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:1050:1-1050:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:1055:2-1055:28: struct request_queue *q = data;
-
block/mq-deadline.c:1064:2-1064:28: struct request_queue *q = data;
-
block/mq-deadline.c:1073:2-1073:28: struct request_queue *q = data;
-
block/mq-deadline.c:1082:2-1082:28: struct request_queue *q = data;
-
block/mq-deadline.c:1110:2-1110:28: struct request_queue *q = data;
-
block/mq-deadline.c:1164:1-1164:1: DEADLINE_DISPATCH_ATTR(0);
-
block/mq-deadline.c:1165:1-1165:1: DEADLINE_DISPATCH_ATTR(1);
-
block/mq-deadline.c:1166:1-1166:1: DEADLINE_DISPATCH_ATTR(2);
-
block/mq-deadline.c:1164:1-1164:1: DEADLINE_DISPATCH_ATTR(0);
-
block/mq-deadline.c:1165:1-1165:1: DEADLINE_DISPATCH_ATTR(1);
-
block/mq-deadline.c:1166:1-1166:1: DEADLINE_DISPATCH_ATTR(2);
-
block/mq-deadline.c:1164:1-1164:1: DEADLINE_DISPATCH_ATTR(0);
-
block/mq-deadline.c:1165:1-1165:1: DEADLINE_DISPATCH_ATTR(1);
-
block/mq-deadline.c:1166:1-1166:1: DEADLINE_DISPATCH_ATTR(2);
-
crypto/algapi.c:229:2-229:21: struct crypto_alg *q;
-
crypto/algapi.c:307:2-307:21: struct crypto_alg *q;
-
crypto/algapi.c:364:2-364:21: struct crypto_alg *q;
-
crypto/algapi.c:535:2-535:26: struct crypto_template *q;
-
crypto/algapi.c:613:2-613:26: struct crypto_template *q, *tmpl = NULL;
-
crypto/algapi.c:1051:3-1051:22: struct crypto_alg *q;
-
crypto/api.c:59:2-59:21: struct crypto_alg *q, *alg = NULL;
-
crypto/asymmetric_keys/x509_public_key.c:148:2-148:14: const char *q;
-
crypto/async_tx/async_pq.c:382:3-382:13: void *p, *q, *s;
-
crypto/async_tx/async_raid6_recov.c:158:2-158:19: struct page *p, *q, *a, *b;
-
crypto/async_tx/async_raid6_recov.c:208:2-208:19: struct page *p, *q, *g, *dp, *dq;
-
crypto/async_tx/async_raid6_recov.c:299:2-299:19: struct page *p, *q, *dp, *dq;
-
crypto/async_tx/async_raid6_recov.c:476:2-476:19: struct page *p, *q, *dq;
-
crypto/crypto_user_base.c:38:2-38:21: struct crypto_alg *q, *alg = NULL;
-
crypto/dh.c:129:3-129:12: MPI val, q;
-
crypto/ecc.c:568:2-568:22: u64 q[ECC_MAX_DIGITS];
-
crypto/ecc.c:666:2-666:26: u64 q[ECC_MAX_DIGITS * 2];
-
crypto/essiv.c:391:2-391:18: const char *p, *q;
-
drivers/accel/habanalabs/common/hw_queue.c:44:2-44:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/common/hw_queue.c:231:2-231:58: struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
-
drivers/accel/habanalabs/common/hw_queue.c:271:2-271:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/accel/habanalabs/common/hw_queue.c:336:2-336:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/accel/habanalabs/common/hw_queue.c:372:2-372:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/accel/habanalabs/common/hw_queue.c:632:2-632:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/common/hw_queue.c:808:2-808:58: struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
-
drivers/accel/habanalabs/common/hw_queue.c:1071:2-1071:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/common/hw_queue.c:1111:2-1111:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/common/hw_queue.c:1123:2-1123:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:1100:2-1100:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:1757:2-1757:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:1771:2-1771:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:2694:2-2694:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:2840:2-2840:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:2964:2-2964:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:3109:2-3109:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:3242:2-3242:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:4678:2-4678:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:6854:2-6854:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:7282:2-7282:68: struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
-
drivers/accel/habanalabs/gaudi2/gaudi2.c:4832:2-4832:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi2/gaudi2.c:7801:2-7801:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi2/gaudi2.c:9430:2-9430:69: struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
-
drivers/accel/habanalabs/gaudi2/gaudi2.c:9515:2-9515:69: struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
-
drivers/accel/habanalabs/gaudi2/gaudi2.c:9525:2-9525:44: struct hl_engine_arc_dccm_queue_full_irq *q;
-
drivers/accel/habanalabs/goya/goya.c:1175:2-1175:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/goya/goya.c:4484:2-4484:67: struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
-
drivers/accel/ivpu/ivpu_mmu.c:305:2-305:35: struct ivpu_mmu_queue *q = &mmu->cmdq;
-
drivers/accel/ivpu/ivpu_mmu.c:324:2-324:35: struct ivpu_mmu_queue *q = &mmu->evtq;
-
drivers/accel/ivpu/ivpu_mmu.c:406:2-406:41: struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
-
drivers/accel/ivpu/ivpu_mmu.c:426:2-426:41: struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
-
drivers/acpi/ec.c:1142:2-1142:28: struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
-
drivers/acpi/ec.c:1165:2-1165:24: struct acpi_ec_query *q;
-
drivers/acpi/ec.c:1183:2-1183:24: struct acpi_ec_query *q;
-
drivers/ata/libata-scsi.c:1078:2-1078:34: struct request_queue *q = sdev->request_queue;
-
drivers/block/aoe/aoecmd.c:837:2-837:24: struct request_queue *q;
-
drivers/block/aoe/aoecmd.c:1033:2-1033:24: struct request_queue *q;
-
drivers/block/aoe/aoenet.c:75:2-75:21: register char *p, *q;
-
drivers/block/drbd/drbd_int.h:1854:3-1854:44: struct drbd_work_queue *q = &connection->sender_work;
-
drivers/block/drbd/drbd_main.c:933:3-933:48: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/block/drbd/drbd_main.c:953:3-953:37: struct request_queue *q = device->rq_queue;
-
drivers/block/drbd/drbd_nl.c:1212:2-1212:36: struct request_queue *q = device->rq_queue;
-
drivers/block/drbd/drbd_nl.c:1274:2-1274:43: struct request_queue * const q = device->rq_queue;
-
drivers/block/loop.c:761:2-761:32: struct request_queue *q = lo->lo_queue;
-
drivers/block/loop.c:939:2-939:32: struct request_queue *q = lo->lo_queue;
-
drivers/block/nbd.c:832:2-832:39: struct request_queue *q = nbd->disk->queue;
-
drivers/block/null_blk/main.c:1329:2-1329:35: struct request_queue *q = nullb->q;
-
drivers/block/null_blk/main.c:1337:2-1337:35: struct request_queue *q = nullb->q;
-
drivers/block/null_blk/zoned.c:160:2-160:35: struct request_queue *q = nullb->q;
-
drivers/block/pktcdvd.c:669:2-669:51: struct request_queue *q = bdev_get_queue(pd->bdev);
-
drivers/block/pktcdvd.c:2120:2-2120:24: struct request_queue *q;
-
drivers/block/pktcdvd.c:2442:2-2442:38: struct request_queue *q = pd->disk->queue;
-
drivers/block/rbd.c:4897:2-4897:24: struct request_queue *q;
-
drivers/block/rnbd/rnbd-clt.c:201:2-201:25: struct rnbd_queue *q = NULL;
-
drivers/block/rnbd/rnbd-clt.c:1099:2-1099:31: struct rnbd_queue *q = hctx->driver_data;
-
drivers/block/rnbd/rnbd-clt.c:1160:2-1160:31: struct rnbd_queue *q = hctx->driver_data;
-
drivers/block/rnbd/rnbd-clt.c:1323:2-1323:21: struct rnbd_queue *q;
-
drivers/block/ublk_drv.c:207:2-207:41: struct request_queue *q = ub->ub_disk->queue;
-
drivers/block/ublk_drv.c:234:2-234:41: struct request_queue *q = ub->ub_disk->queue;
-
drivers/block/virtio_blk.c:571:2-571:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:600:2-600:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:903:2-903:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:1012:2-1012:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:1319:2-1319:24: struct request_queue *q;
-
drivers/cdrom/cdrom.c:2596:2-2596:23: struct cdrom_subchnl q;
-
drivers/cdrom/cdrom.c:3053:2-3053:23: struct cdrom_subchnl q;
-
drivers/clk/clk-cdce925.c:223:2-223:5: u8 q;
-
drivers/counter/counter-chrdev.c:123:2-123:28: struct counter_comp_node *q, *o;
-
drivers/crypto/cavium/zip/zip_main.c:136:2-136:10: int q = 0;
-
drivers/crypto/cavium/zip/zip_main.c:335:2-335:10: int q = 0;
-
drivers/crypto/cavium/zip/zip_main.c:489:2-489:10: u32 q = 0;
-
drivers/crypto/ccp/ccp-ops.c:222:2-222:10: u8 *p, *q;
-
drivers/crypto/ccp/ccp-ops.c:247:2-247:10: u8 *p, *q;
-
drivers/crypto/hisilicon/zip/zip_crypto.c:222:2-222:34: struct hisi_zip_req *q = req_q->q;
-
drivers/crypto/intel/keembay/ocs-aes.c:1059:2-1059:9: int i, q;
-
drivers/firewire/core-device.c:1105:2-1105:6: u32 q;
-
drivers/firewire/core-topology.c:42:2-42:6: u32 q;
-
drivers/firewire/core-topology.c:176:2-176:23: u32 *next_sid, *end, q;
-
drivers/firmware/arm_scmi/raw_mode.c:742:2-742:25: struct scmi_raw_queue *q;
-
drivers/firmware/arm_scmi/raw_mode.c:846:2-846:25: struct scmi_raw_queue *q;
-
drivers/firmware/arm_scmi/raw_mode.c:1030:2-1030:25: struct scmi_raw_queue *q;
-
drivers/firmware/arm_scmi/raw_mode.c:1112:4-1112:27: struct scmi_raw_queue *q;
-
drivers/firmware/arm_scmi/raw_mode.c:1313:2-1313:25: struct scmi_raw_queue *q;
-
drivers/firmware/dmi_scan.c:660:2-660:20: char __iomem *p, *q;
-
drivers/firmware/efi/libstub/vsprintf.c:43:3-43:35: unsigned int q = (r * 0xccd) >> 15;
-
drivers/firmware/efi/libstub/vsprintf.c:62:2-62:42: unsigned int q = (x * 0x346DC5D7ULL) >> 43;
-
drivers/firmware/efi/libstub/vsprintf.c:76:2-76:27: unsigned int d3, d2, d1, q, h;
-
drivers/gpio/gpiolib-of.c:613:2-613:28: const of_find_gpio_quirk *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c:1447:2-1447:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:273:2-273:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:923:2-923:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:973:2-973:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1021:2-1021:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1101:2-1101:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2001:2-2001:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2130:2-2130:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c:131:2-131:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:107:2-107:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:381:2-381:20: struct queue *q = container_of(kobj, struct queue, kobj);
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:239:2-239:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:590:2-590:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:701:2-701:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:927:2-927:16: struct queue *q;
-
drivers/gpu/drm/drm_debugfs.c:275:2-275:26: struct list_head *pos, *q;
-
drivers/gpu/drm/drm_edid.c:5305:2-5305:34: u32 max_avg, min_cll, max, min, q, r;
-
drivers/gpu/drm/i915/display/intel_quirks.c:212:3-212:42: struct intel_quirk *q = &intel_quirks[i];
-
drivers/gpu/drm/i915/gvt/handlers.c:2275:2-2275:2: MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2278:2-2278:2: MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2281:2-2281:2: MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2284:2-2284:2: MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2287:2-2287:2: MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2290:2-2290:2: MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/scheduler.c:1632:2-1632:24: struct list_head *q = workload_q_head(vgpu, engine);
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:168:2-168:2: MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:177:2-177:2: MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:186:2-186:2: MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:199:2-199:2: MMIO_D(REG_50080(PIPE_A, PLANE_SPRITE0));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:212:2-212:2: MMIO_D(REG_50080(PIPE_B, PLANE_SPRITE0));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:225:2-225:2: MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
-
drivers/gpu/drm/v3d/v3d_drv.c:130:2-130:17: enum v3d_queue q;
-
drivers/gpu/drm/v3d/v3d_sched.c:258:2-258:17: enum v3d_queue q;
-
drivers/gpu/drm/v3d/v3d_sched.c:442:2-442:17: enum v3d_queue q;
-
drivers/gpu/drm/xen/xen_drm_front.c:54:2-54:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/gpu/drm/xen/xen_drm_front.c:65:2-65:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/gpu/drm/xen/xen_drm_front.c:79:2-79:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/hid/hid-quirks.c:1080:2-1080:29: struct quirks_list_struct *q;
-
drivers/hid/hid-quirks.c:1115:2-1115:37: struct quirks_list_struct *q_new, *q;
-
drivers/hid/hid-quirks.c:1171:2-1171:29: struct quirks_list_struct *q, *temp;
-
drivers/i2c/i2c-core-base.c:2104:2-2104:45: const struct i2c_adapter_quirks *q = adap->quirks;
-
drivers/iio/common/st_sensors/st_sensors_core.c:643:2-643:18: int i, len = 0, q, r;
-
drivers/iio/industrialio-buffer.c:948:2-948:30: struct iio_demux_table *p, *q;
-
drivers/infiniband/hw/hfi1/affinity.c:193:2-193:26: struct list_head *pos, *q;
-
drivers/infiniband/hw/hfi1/mad.c:85:2-85:27: struct trap_node *node, *q;
-
drivers/infiniband/hw/hfi1/mad.c:987:2-987:7: u16 *q;
-
drivers/infiniband/hw/hfi1/mad.c:1686:2-1686:24: __be16 *q = (__be16 *)data;
-
drivers/infiniband/hw/irdma/verbs.c:3860:2-3860:26: struct list_head *pos, *q;
-
drivers/infiniband/hw/mlx4/mad.c:1026:2-1026:9: int p, q;
-
drivers/infiniband/hw/mlx4/mad.c:1062:2-1062:9: int p, q;
-
drivers/infiniband/hw/mthca/mthca_mad.c:286:2-286:9: int p, q;
-
drivers/infiniband/hw/mthca/mthca_mad.c:328:2-328:9: int p, q;
-
drivers/infiniband/hw/qib/qib_mad.c:601:2-601:30: __be16 *q = (__be16 *) smp->data;
-
drivers/infiniband/hw/qib/qib_mad.c:1044:2-1044:24: u16 *q = (u16 *) smp->data;
-
drivers/infiniband/sw/rdmavt/qp.c:745:3-745:18: struct rvt_qp *q;
-
drivers/infiniband/sw/rxe/rxe_comp.c:594:2-594:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_queue.c:58:2-58:20: struct rxe_queue *q;
-
drivers/infiniband/sw/rxe/rxe_req.c:45:2-45:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_req.c:117:2-117:20: struct rxe_queue *q;
-
drivers/infiniband/sw/rxe/rxe_req.c:162:2-162:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_req.c:676:2-676:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_resp.c:272:2-272:32: struct rxe_queue *q = srq->rq.queue;
-
drivers/infiniband/sw/rxe/rxe_resp.c:1446:2-1446:31: struct rxe_queue *q = qp->rq.queue;
-
drivers/infiniband/sw/rxe/rxe_srq.c:50:2-50:20: struct rxe_queue *q;
-
drivers/infiniband/sw/rxe/rxe_srq.c:149:2-149:32: struct rxe_queue *q = srq->rq.queue;
-
drivers/infiniband/ulp/srp/ib_srp.c:2861:2-2861:34: struct request_queue *q = sdev->request_queue;
-
drivers/isdn/mISDN/dsp_cmx.c:1303:2-1303:14: u8 *d, *p, *q, *o_q;
-
drivers/isdn/mISDN/dsp_cmx.c:1625:2-1625:10: u8 *p, *q;
-
drivers/md/bcache/super.c:900:2-900:24: struct request_queue *q;
-
drivers/md/bcache/super.c:1008:2-1008:24: struct request_queue *q;
-
drivers/md/bcache/super.c:1398:2-1398:51: struct request_queue *q = bdev_get_queue(dc->bdev);
-
drivers/md/bcache/sysfs.c:1066:3-1066:16: uint16_t q[31], *p, *cached;
-
drivers/md/bcache/util.c:97:2-97:11: uint64_t q;
-
drivers/md/dm-cache-policy-smq.c:883:2-883:25: struct queue *q = &mq->dirty;
-
drivers/md/dm-cache-policy-smq.c:896:2-896:25: struct queue *q = &mq->clean;
-
drivers/md/dm-io.c:316:2-316:54: struct request_queue *q = bdev_get_queue(where->bdev);
-
drivers/md/dm-mpath.c:516:2-516:24: struct request_queue *q;
-
drivers/md/dm-mpath.c:885:2-885:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-mpath.c:941:2-941:24: struct request_queue *q;
-
drivers/md/dm-mpath.c:1626:2-1626:65: struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
-
drivers/md/dm-mpath.c:2100:2-2100:65: struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
-
drivers/md/dm-stats.c:956:2-956:14: const char *q;
-
drivers/md/dm-table.c:401:2-401:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-table.c:848:2-848:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-table.c:1482:2-1482:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1573:2-1573:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1766:2-1766:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1819:2-1819:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1827:2-1827:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-zone.c:126:2-126:32: struct request_queue *q = md->queue;
-
drivers/md/raid5.c:7153:3-7153:36: struct request_queue *q = mddev->queue;
-
drivers/media/common/saa7146/saa7146_fops.c:156:2-156:31: struct saa7146_dmaqueue *q = from_timer(q, t, timeout);
-
drivers/media/common/saa7146/saa7146_fops.c:346:2-346:20: struct vb2_queue *q;
-
drivers/media/common/saa7146/saa7146_video.c:709:2-709:36: struct saa7146_dmaqueue *q = &vv->video_dmaq;
-
drivers/media/common/videobuf2/videobuf2-core.c:216:2-216:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:357:2-357:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1046:2-1046:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1130:2-1130:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1246:2-1246:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1382:2-1382:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1394:2-1394:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1929:2-1929:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:2952:2-2952:24: struct vb2_queue *q = data;
-
drivers/media/common/videobuf2/videobuf2-dma-contig.c:194:2-194:33: struct vb2_queue *q = buf->vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-dma-contig.c:213:2-213:33: struct vb2_queue *q = buf->vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:200:2-200:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:254:2-254:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:276:2-276:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:317:2-317:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:145:2-145:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:178:2-178:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:496:2-496:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:1189:2-1189:30: struct vb2_queue *q = vdev->queue;
-
drivers/media/dvb-core/dvb_demux.c:541:2-541:12: const u8 *q;
-
drivers/media/dvb-core/dvb_vb2.c:165:2-165:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:203:2-203:50: struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:216:2-216:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:233:2-233:50: struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:357:2-357:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:370:2-370:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:387:2-387:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-frontends/rtl2832_sdr.c:1144:2-1144:30: struct vb2_queue *q = &dev->vb_queue;
-
drivers/media/dvb-frontends/sp887x.c:287:2-287:15: unsigned int q, r;
-
drivers/media/i2c/adv7511-v4l2.c:1276:2-1276:9: u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
-
drivers/media/i2c/cx25840/cx25840-core.c:697:2-697:27: struct workqueue_struct *q;
-
drivers/media/i2c/cx25840/cx25840-core.c:775:2-775:27: struct workqueue_struct *q;
-
drivers/media/i2c/cx25840/cx25840-core.c:1034:2-1034:27: struct workqueue_struct *q;
-
drivers/media/pci/bt8xx/bttv-driver.c:2014:2-2014:29: struct videobuf_queue* q = NULL;
-
drivers/media/pci/bt8xx/bttv-driver.c:2048:2-2048:42: struct videobuf_queue *q = bttv_queue(fh);
-
drivers/media/pci/cobalt/cobalt-v4l2.c:125:2-125:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/pci/cobalt/cobalt-v4l2.c:1210:2-1210:28: struct vb2_queue *q = &s->q;
-
drivers/media/pci/cx18/cx18-fileops.c:291:3-291:13: const u8 *q;
-
drivers/media/pci/cx18/cx18-streams.c:690:2-690:21: struct cx18_queue *q;
-
drivers/media/pci/cx18/cx18-streams.c:712:2-712:21: struct cx18_queue *q;
-
drivers/media/pci/cx18/cx18-vbi.c:99:2-99:10: u8 *q = buf;
-
drivers/media/pci/cx23885/cx23885-417.c:1496:2-1496:20: struct vb2_queue *q;
-
drivers/media/pci/cx23885/cx23885-core.c:1649:2-1649:38: struct cx23885_dmaqueue *q = &port->mpegq;
-
drivers/media/pci/cx23885/cx23885-dvb.c:2656:3-2656:21: struct vb2_queue *q;
-
drivers/media/pci/cx23885/cx23885-vbi.c:189:2-189:37: struct cx23885_dmaqueue *q = &dev->vbiq;
-
drivers/media/pci/cx23885/cx23885-video.c:462:2-462:40: struct cx23885_dmaqueue *q = &dev->vidq;
-
drivers/media/pci/cx23885/cx23885-video.c:1239:2-1239:20: struct vb2_queue *q;
-
drivers/media/pci/cx25821/cx25821-video.c:243:2-243:56: struct cx25821_dmaqueue *q = &dev->channels[chan->id].dma_vidq;
-
drivers/media/pci/cx25821/cx25821-video.c:681:3-681:21: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-blackbird.c:1157:2-1157:20: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-dvb.c:1766:3-1766:21: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-mpeg.c:274:2-274:34: struct cx88_dmaqueue *q = &dev->mpegq;
-
drivers/media/pci/cx88/cx88-vbi.c:172:2-172:38: struct cx88_dmaqueue *q = &dev->vbiq;
-
drivers/media/pci/cx88/cx88-video.c:507:2-507:38: struct cx88_dmaqueue *q = &dev->vidq;
-
drivers/media/pci/cx88/cx88-video.c:1262:2-1262:20: struct vb2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:540:2-540:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:802:2-802:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:890:2-891:3: struct cio2_queue *q =
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:978:2-978:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1023:2-1023:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1074:2-1074:48: struct cio2_queue *q = file_to_cio2_queue(file);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1114:2-1114:48: struct cio2_queue *q = file_to_cio2_queue(file);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1227:2-1227:25: struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1253:2-1253:25: struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1321:2-1321:25: struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1387:2-1387:21: struct cio2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1419:2-1419:21: struct cio2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1971:2-1971:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:2004:2-2004:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/ivtv/ivtv-fileops.c:297:3-297:13: const u8 *q;
-
drivers/media/pci/ivtv/ivtv-fileops.c:543:2-543:20: struct ivtv_queue q;
-
drivers/media/pci/ivtv/ivtv-vbi.c:305:2-305:10: u8 *q = buf;
-
drivers/media/pci/saa7134/saa7134-core.c:331:2-331:31: struct saa7134_dmaqueue *q = from_timer(q, t, timeout);
-
drivers/media/pci/saa7134/saa7134-dvb.c:1218:2-1218:20: struct vb2_queue *q;
-
drivers/media/pci/saa7134/saa7134-empress.c:245:2-245:20: struct vb2_queue *q;
-
drivers/media/pci/saa7134/saa7134-video.c:1641:2-1641:20: struct vb2_queue *q;
-
drivers/media/pci/saa7164/saa7164-cmd.c:73:2-73:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-cmd.c:125:2-125:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-cmd.c:246:2-246:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-dvb.c:195:2-195:24: struct list_head *p, *q;
-
drivers/media/pci/saa7164/saa7164-encoder.c:61:2-61:32: struct list_head *c, *n, *p, *q, *l, *v;
-
drivers/media/pci/saa7164/saa7164-vbi.c:30:2-30:32: struct list_head *c, *n, *p, *q, *l, *v;
-
drivers/media/platform/allegro-dvt/allegro-core.c:2810:2-2810:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/amphion/vdec.c:266:2-266:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vdec.c:431:2-431:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/venc.c:220:2-220:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vpu_v4l2.c:105:2-105:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vpu_v4l2.c:431:2-431:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vpu_v4l2.c:534:2-534:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/atmel/atmel-isi.c:1189:2-1189:20: struct vb2_queue *q;
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:1530:2-1530:36: struct mtk_jpeg_q_data *q = &ctx->out_q;
-
drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c:405:2-405:6: u32 q;
-
drivers/media/platform/microchip/microchip-isc-base.c:1799:2-1799:30: struct vb2_queue *q = &isc->vb2_vidq;
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1991:2-1991:46: struct mxc_jpeg_q_data *q[2] = {out_q, cap_q};
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c:1412:2-1412:32: struct vb2_queue *q = &video->vb2_q;
-
drivers/media/platform/qcom/camss/camss-video.c:977:2-977:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/helpers.c:1590:2-1590:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/vdec.c:311:2-311:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/venc.c:236:2-236:20: struct vb2_queue *q;
-
drivers/media/platform/renesas/rcar-vin/rcar-dma.c:1535:2-1535:30: struct vb2_queue *q = &vin->queue;
-
drivers/media/platform/renesas/rcar_drif.c:923:2-923:30: struct vb2_queue *q = &sdr->vb_queue;
-
drivers/media/platform/renesas/renesas-ceu.c:1404:2-1404:33: struct vb2_queue *q = &ceudev->vb2_vq;
-
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c:742:2-742:30: struct vb2_queue *q = &cru->queue;
-
drivers/media/platform/renesas/sh_vou.c:1228:2-1228:20: struct vb2_queue *q;
-
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c:1399:2-1399:20: struct vb2_queue *q;
-
drivers/media/platform/samsung/exynos4-is/fimc-capture.c:1717:2-1717:39: struct vb2_queue *q = &fimc->vid_cap.vbq;
-
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c:574:2-574:44: struct vb2_queue *q = &isp->video_capture.vb_queue;
-
drivers/media/platform/samsung/exynos4-is/fimc-lite.c:1245:2-1245:31: struct vb2_queue *q = &fimc->vb_queue;
-
drivers/media/platform/samsung/s3c-camif/camif-capture.c:1103:2-1103:29: struct vb2_queue *q = &vp->vb_queue;
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c:771:2-771:20: struct vb2_queue *q;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1116:2-1116:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1296:2-1296:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1462:2-1462:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1508:2-1508:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1585:2-1585:20: struct vb2_queue *q;
-
drivers/media/platform/st/stm32/stm32-dcmi.c:1931:2-1931:20: struct vb2_queue *q;
-
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c:403:2-403:30: struct vb2_queue *q = &csi->queue;
-
drivers/media/platform/ti/am437x/am437x-vpfe.c:2214:2-2214:20: struct vb2_queue *q;
-
drivers/media/platform/ti/cal/cal-video.c:255:2-255:30: struct vb2_queue *q = &ctx->vb_vidq;
-
drivers/media/platform/ti/cal/cal-video.c:982:2-982:30: struct vb2_queue *q = &ctx->vb_vidq;
-
drivers/media/platform/ti/davinci/vpif_capture.c:71:2-71:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/ti/davinci/vpif_capture.c:1401:2-1401:20: struct vb2_queue *q;
-
drivers/media/platform/ti/davinci/vpif_display.c:1123:2-1123:20: struct vb2_queue *q;
-
drivers/media/platform/verisilicon/hantro_drv.c:48:2-48:59: struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
-
drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c:84:2-84:39: struct v4l2_ctrl_mpeg2_quantisation *q;
-
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c:179:2-179:48: const struct v4l2_vp8_quantization *q = &hdr->quant;
-
drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c:86:2-86:39: struct v4l2_ctrl_mpeg2_quantisation *q;
-
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c:317:2-317:48: const struct v4l2_vp8_quantization *q = &hdr->quant;
-
drivers/media/radio/radio-gemtek.c:153:2-153:14: int i, bit, q, mute;
-
drivers/media/radio/radio-gemtek.c:257:2-257:9: int i, q;
-
drivers/media/test-drivers/vimc/vimc-capture.c:403:2-403:20: struct vb2_queue *q;
-
drivers/media/test-drivers/vivid/vivid-sdr-cap.c:469:2-469:30: struct vb2_queue *q = &dev->vb_sdr_cap_q;
-
drivers/media/test-drivers/vivid/vivid-vid-cap.c:658:2-658:30: struct vb2_queue *q = &dev->vb_vid_cap_q;
-
drivers/media/test-drivers/vivid/vivid-vid-out.c:454:2-454:30: struct vb2_queue *q = &dev->vb_vid_out_q;
-
drivers/media/tuners/max2165.c:153:2-153:6: u32 q, f = 0;
-
drivers/media/usb/airspy/airspy.c:646:2-646:28: struct vb2_queue *q = &s->vb_queue;
-
drivers/media/usb/au0828/au0828-video.c:290:2-290:36: struct vb2_queue *q = vb->vb2_buf.vb2_queue;
-
drivers/media/usb/au0828/au0828-video.c:1807:2-1807:20: struct vb2_queue *q;
-
drivers/media/usb/cx231xx/cx231xx-417.c:1739:2-1739:20: struct vb2_queue *q;
-
drivers/media/usb/cx231xx/cx231xx-video.c:1757:2-1757:20: struct vb2_queue *q;
-
drivers/media/usb/em28xx/em28xx-video.c:1239:2-1239:20: struct vb2_queue *q;
-
drivers/media/usb/go7007/go7007-fw.c:930:2-930:10: int q = 0;
-
drivers/media/usb/gspca/gspca.c:1452:2-1452:20: struct vb2_queue *q;
-
drivers/media/usb/hackrf/hackrf.c:918:2-918:20: struct vb2_queue *q;
-
drivers/media/usb/msi2500/msi2500.c:923:2-923:30: struct vb2_queue *q = &dev->vb_queue;
-
drivers/media/usb/s2255/s2255drv.c:813:2-813:29: struct vb2_queue *q = &vc->vb_vidq;
-
drivers/media/usb/s2255/s2255drv.c:1098:2-1098:29: struct vb2_queue *q = &vc->vb_vidq;
-
drivers/media/usb/s2255/s2255drv.c:1590:2-1590:20: struct vb2_queue *q;
-
drivers/media/usb/stk1160/stk1160-v4l.c:487:2-487:30: struct vb2_queue *q = &dev->vb_vidq;
-
drivers/media/usb/stk1160/stk1160-v4l.c:522:2-522:30: struct vb2_queue *q = &dev->vb_vidq;
-
drivers/media/usb/stk1160/stk1160-v4l.c:783:2-783:20: struct vb2_queue *q;
-
drivers/media/v4l2-core/videobuf-dma-sg.c:393:2-393:34: struct videobuf_queue *q = map->q;
-
drivers/media/v4l2-core/videobuf-dma-sg.c:656:2-656:24: struct videobuf_queue q;
-
drivers/misc/uacce/uacce.c:59:2-59:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:137:2-137:22: struct uacce_queue *q;
-
drivers/misc/uacce/uacce.c:187:2-187:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:202:2-202:31: struct uacce_queue *q = vma->vm_private_data;
-
drivers/misc/uacce/uacce.c:217:2-217:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:279:2-279:32: struct uacce_queue *q = file->private_data;
-
drivers/misc/uacce/uacce.c:573:2-573:22: struct uacce_queue *q, *next_q;
-
drivers/mmc/core/block.c:1452:2-1452:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:1520:2-1520:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2036:2-2036:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2192:2-2192:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2779:2-2779:26: struct list_head *pos, *q;
-
drivers/mmc/core/queue.c:86:2-86:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:122:2-122:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:140:2-140:32: struct request_queue *q = mq->queue;
-
drivers/mmc/core/queue.c:231:2-231:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:488:2-488:32: struct request_queue *q = mq->queue;
-
drivers/net/dsa/ocelot/felix_vsc9959.c:1972:2-1972:26: struct list_head *pos, *q, *last;
-
drivers/net/dsa/ocelot/felix_vsc9959.c:2007:2-2007:26: struct list_head *pos, *q, *last;
-
drivers/net/ethernet/amd/pds_core/adminq.c:69:2-69:31: struct pdsc_queue *q = &qcq->q;
-
drivers/net/ethernet/amd/pds_core/adminq.c:160:2-160:31: struct pdsc_queue *q = &qcq->q;
-
drivers/net/ethernet/amd/pds_core/debugfs.c:109:2-109:31: struct pdsc_queue *q = &qcq->q;
-
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c:116:2-116:19: unsigned int tc, q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:466:2-466:15: unsigned int q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:1020:2-1020:15: unsigned int q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2300:2-2300:15: unsigned int q, port;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2338:2-2338:15: unsigned int q, qp, port;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2393:2-2393:15: unsigned int q, qp, port;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:5387:2-5387:6: int q, rc;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:14374:2-14374:26: struct list_head *pos, *q;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:2071:3-2071:43: struct bnx2x_vf_queue *q = vfq_get(vf, i);
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c:1536:3-1536:57: struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
-
drivers/net/ethernet/broadcom/genet/bcmgenet.c:3555:2-3555:15: unsigned int q;
-
drivers/net/ethernet/broadcom/genet/bcmgenet.c:3670:2-3670:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:500:2-500:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:708:2-708:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:733:2-733:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:1798:2-1798:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2018:2-2018:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2433:2-2433:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2475:2-2475:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2501:2-2501:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2537:2-2537:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2580:2-2580:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2621:2-2621:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2724:2-2724:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2921:2-2921:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2982:2-2982:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:3036:2-3036:18: unsigned int i, q, idx;
-
drivers/net/ethernet/cadence/macb_main.c:3132:2-3132:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:4045:2-4045:21: unsigned int hw_q, q;
-
drivers/net/ethernet/cadence/macb_main.c:4218:2-4218:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4245:2-4245:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4267:2-4267:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4424:2-4424:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:5159:2-5159:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:5249:2-5249:15: unsigned int q;
-
drivers/net/ethernet/cavium/liquidio/lio_core.c:461:2-461:6: int q, q_no;
-
drivers/net/ethernet/cavium/liquidio/lio_core.c:813:2-813:6: int q, q_no;
-
drivers/net/ethernet/cavium/liquidio/lio_main.c:467:2-467:6: int q, iq;
-
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c:369:2-369:23: int mbox, key, stat, q;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:474:3-474:32: struct cmdQ *q = &sge->cmdQ[0];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:530:3-530:36: struct freelQ *q = &sge->freelQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:554:3-554:36: struct freelQ *q = &sge->freelQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:653:3-653:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:677:3-677:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1311:2-1311:31: struct cmdQ *q = &sge->cmdQ[0];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1472:2-1472:26: struct respQ *q = &sge->respQ;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1562:2-1562:26: struct respQ *q = &sge->respQ;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1701:2-1701:33: struct cmdQ *q = &sge->cmdQ[qid];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1928:3-1928:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1171:3-1171:41: struct sge_rspq *q = &adap->sge.qs[i].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1957:2-1957:72: const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1975:2-1975:22: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2032:2-2032:46: struct qset_params *q = adapter->params.sge.qset;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2152:3-2152:23: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2253:3-2253:23: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1268:2-1268:18: struct sge_txq *q;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1525:2-1525:39: struct sge_txq *q = &qs->txq[TXQ_CTRL];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1745:2-1745:39: struct sge_txq *q = &qs->txq[TXQ_OFLD];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1899:2-1899:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2326:2-2326:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2530:2-2530:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2609:2-2609:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2627:2-2627:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2647:2-2647:40: struct sge_rspq *q = &adap->sge.qs[0].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2676:2-2676:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2697:2-2697:40: struct sge_rspq *q = &adap->sge.qs[0].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3041:2-3041:42: struct sge_qset *q = &adapter->sge.qs[id];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3214:3-3214:39: struct sge_qset *q = &adap->sge.qs[i];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3237:3-3237:39: struct sge_qset *q = &adap->sge.qs[i];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3370:3-3370:37: struct qset_params *q = p->qset + i;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:952:2-952:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:967:2-967:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:979:2-979:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:927:3-927:44: struct sge_rspq *q = adap->sge.ingr_map[i];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:973:3-973:44: struct sge_rspq *q = adap->sge.ingr_map[i];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:1061:3-1061:52: struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:112:2-112:37: struct sge_ofld_rxq *q = rxq_info->uldrxq;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:376:3-376:47: struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:391:3-391:47: struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1422:2-1422:27: struct sge_txq *q = &eq->q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1516:2-1516:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2674:2-2674:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2794:2-2794:27: struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3098:2-3098:26: struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3991:2-3991:23: struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4149:2-4149:23: struct sge_rspq *q = cookie;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4163:2-4163:34: struct sge_rspq *q = &adap->sge.intrq;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4300:3-4300:31: struct sge_eth_txq *q = &s->ptptxq;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:420:2-420:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:471:2-471:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:516:2-516:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:580:2-580:65: struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:719:2-719:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1935:2-1935:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:2991:2-2991:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3007:2-3007:24: struct be_queue_info *q, *cq;
-
drivers/net/ethernet/emulex/benet/be_main.c:3039:2-3039:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3103:2-3103:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3564:2-3564:24: struct be_queue_info *q;
-
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c:3983:2-3983:32: struct dpni_queue q = { { 0 } };
-
drivers/net/ethernet/freescale/enetc/enetc.c:2664:2-2664:15: int err, tc, q;
-
drivers/net/ethernet/freescale/fec_main.c:922:2-922:15: unsigned int q;
-
drivers/net/ethernet/freescale/fec_main.c:3250:2-3250:15: unsigned int q;
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:514:2-514:30: struct funeth_rxq *q = irq->rxq;
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:621:2-621:21: struct funeth_rxq *q;
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:793:2-793:26: struct funeth_rxq *q = *qp;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:347:2-347:37: struct funeth_txq *q = fp->txqs[qid];
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:472:2-472:30: struct funeth_txq *q = irq->txq;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:576:2-576:21: struct funeth_txq *q, **xdpqs;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:630:2-630:21: struct funeth_txq *q;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:764:2-764:26: struct funeth_txq *q = *qp;
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:297:2-297:21: struct hnae_queue *q;
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:249:2-249:37: struct hnae_queue *q = &ring_pair->q;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:2766:3-2766:24: struct netdev_queue *q;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:5044:2-5044:32: struct hnae3_queue *q = ring->tqp;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:5076:4-5076:24: struct hnae3_queue *q;
-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:1811:3-1812:4: struct hclge_comm_tqp *q =
-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c:959:4-959:58: struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
-
drivers/net/ethernet/intel/fm10k/fm10k_pci.c:611:3-611:53: struct fm10k_hw_stats_q *q = &interface->stats.q[i];
-
drivers/net/ethernet/intel/fm10k/fm10k_pci.c:1332:2-1332:6: int q;
-
drivers/net/ethernet/intel/i40e/i40e_main.c:868:2-868:6: u16 q;
-
drivers/net/ethernet/intel/i40e/i40e_main.c:3871:2-3871:9: int i, q;
-
drivers/net/ethernet/intel/i40e/i40e_xsk.c:66:2-66:16: unsigned long q;
-
drivers/net/ethernet/intel/ice/ice_lib.c:2249:2-2249:9: int i, q;
-
drivers/net/ethernet/intel/ice/ice_lib.c:3013:2-3013:9: int i, q;
-
drivers/net/ethernet/intel/ice/ice_xsk.c:374:2-374:16: unsigned long q;
-
drivers/net/ethernet/intel/igb/e1000_nvm.c:690:2-690:5: u8 q, hval, rem, result;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:773:2-773:31: int val, cm3_state, host_id, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:834:2-834:22: int val, cm3_state, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:1951:2-1951:9: int i, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:2036:2-2036:9: int i, q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:159:2-159:6: int q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:186:2-186:6: int q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:654:2-654:5: u8 q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:688:2-688:10: int q = 0;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:699:2-699:21: u8 srn, num_rings, q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c:154:2-154:28: struct octep_ctrl_mbox_q *q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c:220:2-220:28: struct octep_ctrl_mbox_q *q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c:140:2-140:6: int q, i;
-
drivers/net/ethernet/marvell/octeon_ep/octep_main.c:763:2-763:6: int q;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:283:2-283:6: int q;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:339:2-339:6: int q, b;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:375:2-375:6: int q;
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:709:2-709:30: struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:822:2-822:30: struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1403:2-1403:26: struct mlxsw_pci_queue *q;
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1694:2-1694:67: struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1703:2-1703:26: struct mlxsw_pci_queue *q;
-
drivers/net/ethernet/microsoft/mana/mana_en.c:359:2-359:6: int q;
-
drivers/net/ethernet/microsoft/mana/mana_ethtool.c:115:2-115:6: int q, i = 0;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:77:2-77:31: struct ionic_queue *q = seq->private;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:87:2-87:31: struct ionic_queue *q = seq->private;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:122:2-122:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:576:2-576:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:227:2-227:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:268:2-268:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:313:2-313:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:795:2-795:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:863:2-863:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:1174:2-1174:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:3409:2-3409:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:3456:2-3456:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:217:2-217:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:325:2-325:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:377:2-377:30: struct ionic_queue *q = cq->bound_q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:840:2-840:30: struct ionic_queue *q = cq->bound_q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1288:2-1288:45: struct ionic_queue *q = &lif->hwstamp_txq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1324:2-1324:22: struct ionic_queue *q;
-
drivers/net/ethernet/renesas/ravb_main.c:1179:3-1179:7: int q;
-
drivers/net/ethernet/renesas/ravb_main.c:1285:2-1285:23: int q = napi - priv->napi;
-
drivers/net/ethernet/renesas/ravb_main.c:1554:2-1554:6: int q;
-
drivers/net/ethernet/renesas/ravb_main.c:1929:2-1929:35: u16 q = skb_get_queue_mapping(skb);
-
drivers/net/ethernet/renesas/ravb_main.c:2622:2-2622:18: int error, irq, q;
-
drivers/net/ethernet/sfc/siena/tx.c:115:2-115:23: struct efx_tx_queue *q;
-
drivers/net/ethernet/sfc/tx.c:298:2-298:23: struct efx_tx_queue *q;
-
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c:538:2-538:6: int q, stat;
-
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c:650:2-650:6: int q, stat;
-
drivers/net/ethernet/ti/davinci_emac.c:1417:2-1417:6: int q, m, ret;
-
drivers/net/phy/sfp.c:463:2-463:26: const struct sfp_quirk *q;
-
drivers/net/ppp/ppp_generic.c:1924:2-1924:21: unsigned char *p, *q;
-
drivers/net/tap.c:300:2-300:20: struct tap_queue *q, *tmp;
-
drivers/net/tap.c:323:2-323:20: struct tap_queue *q;
-
drivers/net/tap.c:504:2-504:24: struct tap_queue *q = container_of(sk, struct tap_queue, sk);
-
drivers/net/tap.c:513:2-513:20: struct tap_queue *q;
-
drivers/net/tap.c:578:2-578:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:585:2-585:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:776:2-776:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:896:2-896:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:929:2-929:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:1007:2-1007:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:1230:2-1230:24: struct tap_queue *q = container_of(sock, struct tap_queue, sock);
-
drivers/net/tap.c:1251:2-1251:24: struct tap_queue *q = container_of(sock, struct tap_queue, sock);
-
drivers/net/tap.c:1268:2-1268:24: struct tap_queue *q = container_of(sock, struct tap_queue,
-
drivers/net/tap.c:1286:2-1286:20: struct tap_queue *q;
-
drivers/net/tap.c:1298:2-1298:20: struct tap_queue *q;
-
drivers/net/tap.c:1312:2-1312:20: struct tap_queue *q;
-
drivers/net/usb/catc.c:472:2-472:50: struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
-
drivers/net/usb/catc.c:501:2-501:21: struct ctrl_queue *q;
-
drivers/net/usb/catc.c:536:2-536:21: struct ctrl_queue *q;
-
drivers/net/wireless/ath/ath10k/mac.c:3957:2-3957:32: struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
-
drivers/net/wireless/ath/ath11k/mac.c:5881:2-5881:32: struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
-
drivers/net/wireless/ath/ath12k/mac.c:4521:2-4521:32: struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
-
drivers/net/wireless/ath/ath9k/mac.c:137:2-137:9: int i, q;
-
drivers/net/wireless/ath/ath9k/mac.c:298:2-298:6: int q;
-
drivers/net/wireless/ath/ath9k/xmit.c:111:2-111:22: struct sk_buff_head q;
-
drivers/net/wireless/ath/ath9k/xmit.c:215:2-215:14: int q = fi->txq;
-
drivers/net/wireless/ath/ath9k/xmit.c:245:2-245:6: int q, ret;
-
drivers/net/wireless/ath/ath9k/xmit.c:807:2-807:20: int q = tid->txq->mac80211_qnum;
-
drivers/net/wireless/ath/ath9k/xmit.c:2344:2-2344:6: int q, ret;
-
drivers/net/wireless/ath/carl9170/tx.c:663:2-663:21: unsigned int r, t, q;
-
drivers/net/wireless/ath/carl9170/tx.c:1278:2-1278:14: uint8_t q = 0;
-
drivers/net/wireless/ath/carl9170/tx.c:1344:2-1344:18: unsigned int i, q;
-
drivers/net/wireless/ath/wil6210/netdev.c:232:2-232:7: bool q;
-
drivers/net/wireless/ath/wil6210/txrx.c:838:2-838:11: bool q = false;
-
drivers/net/wireless/ath/wil6210/wmi.c:1931:3-1931:8: bool q;
-
drivers/net/wireless/broadcom/b43/phy_g.c:2336:2-2336:23: s32 m1, m2, f = 256, q, delta;
-
drivers/net/wireless/broadcom/b43/pio.c:49:2-49:30: struct b43_pio_txqueue *q = NULL;
-
drivers/net/wireless/broadcom/b43/pio.c:126:2-126:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:162:2-162:26: struct b43_pio_rxqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:290:2-290:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:352:2-352:36: struct b43_pio_txqueue *q = pack->queue;
-
drivers/net/wireless/broadcom/b43/pio.c:422:2-422:36: struct b43_pio_txqueue *q = pack->queue;
-
drivers/net/wireless/broadcom/b43/pio.c:491:2-491:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:566:2-566:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/sdio.c:39:2-39:31: const struct b43_sdio_quirk *q;
-
drivers/net/wireless/broadcom/b43legacy/phy.c:1947:2-1947:6: s32 q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:49:2-49:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:68:2-68:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:86:2-86:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:109:2-109:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:126:2-126:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:143:2-143:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:222:2-222:23: struct sk_buff_head *q;
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4304:2-4304:42: struct ipw2100_status_queue *q = &priv->status_queue;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3811:2-3811:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:4980:2-4980:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:5009:2-5009:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:10068:2-10068:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:11736:2-11736:24: struct list_head *p, *q;
-
drivers/net/wireless/intel/iwlegacy/3945-mac.c:453:2-453:23: struct il_queue *q = NULL;
-
drivers/net/wireless/intel/iwlegacy/3945.c:275:2-275:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/3945.c:601:2-601:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:1651:2-1651:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:2389:2-2389:40: struct il_queue *q = &il->txq[txq_id].q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:2455:2-2455:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:3957:2-3957:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/common.c:2750:2-2750:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:2812:2-2812:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:3117:2-3117:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:3238:2-3238:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4489:2-4489:6: int q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4759:3-4759:20: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4789:2-4789:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/debug.c:818:2-818:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c:1163:2-1163:6: int q;
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:462:2-462:6: int q;
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:681:2-681:6: int q, fifo;
-
drivers/net/wireless/intel/iwlwifi/iwl-io.c:260:2-260:9: int i, q;
-
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c:4227:4-4227:13: int tid, q;
-
drivers/net/wireless/intel/iwlwifi/mvm/sta.c:1774:3-1774:7: int q;
-
drivers/net/wireless/marvell/mwl8k.c:5384:4-5384:38: int q = MWL8K_TX_WMM_QUEUES - 1 - queue;
-
drivers/net/wireless/mediatek/mt76/debugfs.c:61:3-61:41: struct mt76_queue *q = dev->phy.q_tx[i];
-
drivers/net/wireless/mediatek/mt76/debugfs.c:81:3-81:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/dma.c:755:2-755:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/dma.c:972:3-972:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/mac80211.c:842:2-842:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76.h:1002:2-1002:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76.h:1016:2-1016:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c:73:2-73:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:451:4-451:43: struct mt76_queue *q = dev->mphy.q_tx[i];
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:917:2-917:43: struct mt76_queue *q = dev->mphy.q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:1536:2-1536:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c:405:3-405:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:18:2-18:50: struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:172:2-172:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:346:2-346:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c:946:3-946:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c:2421:3-2421:63: struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
-
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c:623:2-623:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c:725:2-725:19: enum mt76_rxq_id q;
-
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c:169:3-169:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7921/mac.c:122:4-122:37: u8 q = mt76_connac_lmac_mapping(i);
-
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c:601:3-601:63: struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
-
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c:627:3-627:45: struct ieee80211_he_mu_edca_param_ac_rec *q;
-
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c:710:3-710:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7996/mac.c:170:4-170:37: u8 q = mt76_connac_lmac_mapping(i);
-
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c:2570:3-2570:63: struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
-
drivers/net/wireless/mediatek/mt76/sdio.c:306:2-306:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/sdio.c:325:2-325:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/sdio.c:345:2-345:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/sdio.c:614:3-614:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/sdio_txrx.c:84:2-84:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/sdio_txrx.c:369:2-369:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/testmode.c:36:2-36:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/tx.c:290:2-290:38: struct mt76_queue *q = phy->q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/tx.c:321:2-321:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/tx.c:509:2-509:38: struct mt76_queue *q = phy->q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/tx.c:715:2-715:36: struct mt76_queue *q = phy->q_tx[0];
-
drivers/net/wireless/mediatek/mt76/usb.c:559:2-559:30: struct mt76_queue *q = urb->context;
-
drivers/net/wireless/mediatek/mt76/usb.c:644:2-644:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/usb.c:664:2-664:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/usb.c:729:3-729:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/usb.c:743:3-743:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/usb.c:765:2-765:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:932:2-932:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:974:3-974:22: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:998:3-998:22: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:173:2-173:37: struct mt7601u_rx_queue *q = &dev->rx_q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:194:2-194:37: struct mt7601u_rx_queue *q = &dev->rx_q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:241:2-241:36: struct mt7601u_tx_queue *q = urb->context;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:312:2-312:44: struct mt7601u_tx_queue *q = &dev->tx_q[ep];
-
drivers/net/wireless/microchip/wilc1000/wlan.c:291:2-291:40: struct wilc_tx_queue_status *q = &wl->tx_q_limit;
-
drivers/net/wireless/purelifi/plfxlc/mac.c:182:2-182:27: struct sk_buff_head *q = NULL;
-
drivers/net/wireless/purelifi/plfxlc/mac.c:349:2-349:23: struct sk_buff_head *q;
-
drivers/net/wireless/realtek/rtw88/mac.c:1040:2-1040:6: u32 q;
-
drivers/net/wireless/realtek/rtw88/pci.c:738:2-738:5: u8 q;
-
drivers/net/wireless/ti/wlcore/main.c:1208:2-1208:6: int q, mapping;
-
drivers/net/wireless/ti/wlcore/main.c:1275:2-1275:6: int q;
-
drivers/net/wireless/ti/wlcore/tx.c:485:2-485:14: int i, q = -1, ac;
-
drivers/net/wireless/ti/wlcore/tx.c:658:3-658:7: int q;
-
drivers/net/wireless/ti/wlcore/tx.c:676:2-676:56: int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:489:2-489:33: struct sk_buff_head *q = &mac->ack_wait_queue;
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:582:3-582:34: struct sk_buff_head *q = &mac->ack_wait_queue;
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:965:2-965:23: struct sk_buff_head *q;
-
drivers/net/wireless/zydas/zd1211rw/zd_usb.c:1059:2-1059:32: struct sk_buff_head *q = &tx->submitted_skbs;
-
drivers/nvdimm/pmem.c:460:2-460:24: struct request_queue *q;
-
drivers/nvme/host/apple.c:736:2-736:37: struct apple_nvme_queue *q = hctx->driver_data;
-
drivers/nvme/host/apple.c:786:2-786:36: struct apple_nvme_queue *q = set->driver_data;
-
drivers/nvme/host/apple.c:879:2-879:36: struct apple_nvme_queue *q = iod->q;
-
drivers/nvme/host/apple.c:939:2-939:37: struct apple_nvme_queue *q = hctx->driver_data;
-
drivers/nvme/host/auth.c:66:2-66:28: struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
-
drivers/nvme/host/auth.c:894:2-894:11: int ret, q;
-
drivers/nvme/host/fc.c:2483:2-2483:6: int q;
-
drivers/nvme/host/ioctl.c:167:2-167:33: struct request_queue *q = req->q;
-
drivers/nvme/host/ioctl.c:556:2-556:51: struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
-
drivers/nvme/host/ioctl.c:788:2-788:24: struct request_queue *q;
-
drivers/nvme/host/ioctl.c:899:2-899:24: struct request_queue *q;
-
drivers/nvme/host/pci.c:2392:2-2392:45: struct request_queue *q = nvmeq->dev->ctrl.admin_q;
-
drivers/nvme/host/zns.c:12:2-12:32: struct request_queue *q = ns->queue;
-
drivers/nvme/host/zns.c:52:2-52:32: struct request_queue *q = ns->queue;
-
drivers/nvme/host/zns.c:124:2-124:38: struct request_queue *q = ns->disk->queue;
-
drivers/nvme/target/passthru.c:295:2-295:34: struct request_queue *q = ctrl->admin_q;
-
drivers/of/fdt.c:1014:2-1014:18: const char *p, *q, *options = NULL;
-
drivers/parport/probe.c:56:2-56:18: char *p = txt, *q;
-
drivers/pcmcia/cistpl.c:663:2-663:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:795:2-795:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:812:2-812:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:824:2-824:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1081:2-1081:14: u_char *p, *q, features;
-
drivers/pcmcia/cistpl.c:1204:2-1204:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1228:2-1228:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1249:2-1249:14: u_char *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:110:2-110:27: struct resource_map *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:134:2-134:27: struct resource_map *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:1042:2-1042:27: struct resource_map *p, *q;
-
drivers/platform/chrome/wilco_ec/event.c:107:2-107:25: struct ec_event_queue *q;
-
drivers/platform/surface/aggregator/ssh_packet_layer.c:700:2-700:21: struct ssh_packet *q;
-
drivers/scsi/aacraid/commsup.c:361:2-361:21: struct aac_queue * q;
-
drivers/scsi/aacraid/commsup.c:652:6-652:65: struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/commsup.c:875:2-875:21: struct aac_queue * q;
-
drivers/scsi/aacraid/dpcsup.c:278:3-278:61: struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
-
drivers/scsi/aacraid/rx.c:400:2-400:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/rx.c:423:2-423:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/src.c:486:2-486:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/be2iscsi/be_main.c:3462:2-3462:24: struct be_queue_info *q;
-
drivers/scsi/be2iscsi/be_main.c:3522:2-3522:24: struct be_queue_info *q, *cq;
-
drivers/scsi/be2iscsi/be_main.c:3632:2-3632:24: struct be_queue_info *q;
-
drivers/scsi/bfa/bfa_core.c:1318:2-1318:7: int q;
-
drivers/scsi/bfa/bfa_core.c:1474:2-1474:6: int q, per_reqq_sz, per_rspq_sz;
-
drivers/scsi/csiostor/csio_isr.c:428:4-428:50: struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
-
drivers/scsi/csiostor/csio_wr.c:191:2-191:17: struct csio_q *q, *flq;
-
drivers/scsi/csiostor/csio_wr.c:747:2-747:51: struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:765:2-765:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:789:2-789:17: struct csio_q *q;
-
drivers/scsi/csiostor/csio_wr.c:867:2-867:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:985:2-985:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:1690:2-1690:17: struct csio_q *q;
-
drivers/scsi/elx/efct/efct_hw_queues.c:406:2-406:15: struct hw_q *q;
-
drivers/scsi/elx/libefc_sli/sli4.c:4121:2-4121:18: enum sli4_qtype q;
-
drivers/scsi/esas2r/esas2r_flash.c:331:2-331:10: u8 *p, *q;
-
drivers/scsi/fnic/fnic_scsi.c:2176:2-2176:32: struct request_queue *q = rq->q;
-
drivers/scsi/hpsa.c:7017:2-7017:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7038:2-7038:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7054:2-7054:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7073:2-7073:17: u8 q = *(u8 *) queue;
-
drivers/scsi/ips.c:2530:2-2530:20: struct scsi_cmnd *q;
-
drivers/scsi/qla2xxx/qla_dbg.c:557:2-557:31: struct qla2xxx_mqueue_chain *q;
-
drivers/scsi/qla2xxx/qla_dbg.c:604:2-604:31: struct qla2xxx_mqueue_chain *q;
-
drivers/scsi/qla2xxx/qla_edif.c:1886:2-1886:23: struct enode *node, *q;
-
drivers/scsi/qla2xxx/qla_edif.c:1989:2-1989:29: struct enode *list_node, *q;
-
drivers/scsi/qla2xxx/qla_edif.c:2159:2-2159:26: struct edb_node *node, *q;
-
drivers/scsi/qla2xxx/qla_init.c:5333:2-5333:10: __be32 *q;
-
drivers/scsi/qla2xxx/qla_os.c:5129:2-5129:11: bool q = false;
-
drivers/scsi/qla2xxx/qla_os.c:7508:3-7508:12: bool q = false;
-
drivers/scsi/scsi_ioctl.c:866:2-866:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_lib.c:462:2-462:24: struct request_queue *q;
-
drivers/scsi/scsi_lib.c:536:2-536:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_lib.c:1711:2-1711:33: struct request_queue *q = req->q;
-
drivers/scsi/scsi_lib.c:2628:2-2628:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_scan.c:283:2-283:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_fc.c:4166:2-4166:35: struct request_queue *q = rport->rqst_q;
-
drivers/scsi/scsi_transport_fc.c:4279:2-4279:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_fc.c:4314:2-4314:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_iscsi.c:1538:2-1538:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_sas.c:192:2-192:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_sas.c:252:2-252:54: struct request_queue *q = to_sas_host_attrs(shost)->q;
-
drivers/scsi/sd.c:785:2-785:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:962:2-962:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:2967:2-2967:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:3272:2-3272:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd_zbc.c:204:2-204:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd_zbc.c:831:2-831:34: struct request_queue *q = disk->queue;
-
drivers/scsi/sd_zbc.c:917:2-917:34: struct request_queue *q = disk->queue;
-
drivers/scsi/sg.c:287:2-287:24: struct request_queue *q;
-
drivers/scsi/sg.c:1433:2-1433:36: struct request_queue *q = scsidp->request_queue;
-
drivers/scsi/sg.c:1727:2-1727:51: struct request_queue *q = sfp->parentdp->device->request_queue;
-
drivers/scsi/sym53c8xx_2/sym_malloc.c:97:2-97:11: m_link_p q;
-
drivers/spi/spi-fsl-qspi.c:342:2-342:23: struct fsl_qspi *q = dev_id;
-
drivers/spi/spi-fsl-qspi.c:371:2-371:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:644:2-644:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:706:2-706:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:812:2-812:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:849:2-849:19: struct fsl_qspi *q;
-
drivers/spi/spi-fsl-qspi.c:953:2-953:48: struct fsl_qspi *q = platform_get_drvdata(pdev);
-
drivers/spi/spi-fsl-qspi.c:971:2-971:42: struct fsl_qspi *q = dev_get_drvdata(dev);
-
drivers/spi/spi-pxa2xx.c:816:2-816:16: unsigned long q, q1, q2;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:212:2-212:22: ia_css_queue_t *q = NULL;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:331:2-331:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:359:2-359:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:389:2-389:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:409:2-409:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:431:2-431:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:451:2-451:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:470:2-470:18: ia_css_queue_t *q;
-
drivers/staging/media/deprecated/atmel/atmel-isc-base.c:1849:2-1849:30: struct vb2_queue *q = &isc->vb2_vidq;
-
drivers/staging/media/ipu3/ipu3-css.c:1062:2-1063:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1076:2-1077:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1114:2-1115:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1357:2-1357:6: int q, r, pipe;
-
drivers/staging/media/ipu3/ipu3-css.c:1394:2-1394:6: int q;
-
drivers/staging/media/ipu3/ipu3-css.c:1425:2-1425:18: unsigned int p, q, i;
-
drivers/staging/media/ipu3/ipu3-css.c:1469:2-1469:18: unsigned int p, q, i, abi_buf_num;
-
drivers/staging/media/ipu3/ipu3-css.c:1506:2-1506:9: int r, q, pipe;
-
drivers/staging/media/ipu3/ipu3-css.c:1702:2-1702:25: struct imgu_css_queue *q;
-
drivers/staging/media/omap4iss/iss_video.c:1088:2-1088:20: struct vb2_queue *q;
-
drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c:1848:2-1848:20: struct vb2_queue *q;
-
drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c:1300:2-1300:20: struct list_head *q, *buf_head;
-
drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c:1350:2-1350:20: struct list_head *q, *buf_head;
-
drivers/target/target_core_device.c:706:3-706:27: struct se_device_queue *q;
-
drivers/target/target_core_iblock.c:90:2-90:24: struct request_queue *q;
-
drivers/target/target_core_pscsi.c:286:2-286:32: struct request_queue *q = sd->request_queue;
-
drivers/thunderbolt/quirks.c:97:3-97:42: const struct tb_quirk *q = &tb_quirks[i];
-
drivers/tty/vt/consolemap.c:330:2-330:31: struct uni_pagedict *p, *q = NULL;
-
drivers/tty/vt/vt.c:610:3-610:12: u16 *q = p;
-
drivers/tty/vt/vt.c:716:3-716:12: u16 *q = p;
-
drivers/ufs/core/ufs_bsg.c:241:2-241:24: struct request_queue *q;
-
drivers/ufs/core/ufshcd.c:5145:2-5145:34: struct request_queue *q = sdev->request_queue;
-
drivers/ufs/core/ufshcd.c:6286:2-6286:24: struct request_queue *q;
-
drivers/ufs/core/ufshcd.c:6889:2-6889:33: struct request_queue *q = hba->tmf_queue;
-
drivers/ufs/core/ufshpb.c:690:2-690:24: struct request_queue *q;
-
drivers/usb/core/devio.c:677:2-677:24: struct list_head *p, *q, hitlist;
-
drivers/usb/fotg210/fotg210-hcd.c:3338:2-3338:51: union fotg210_shadow *q = &fotg210->pshadow[frame];
-
drivers/usb/fotg210/fotg210-hcd.c:4582:2-4582:23: union fotg210_shadow q, *q_p;
-
drivers/usb/host/ehci-sched.c:2358:2-2358:20: union ehci_shadow q, *q_p;
-
drivers/usb/host/oxu210hp-hcd.c:2270:2-2270:44: union ehci_shadow *q = &oxu->pshadow[frame];
-
drivers/usb/host/oxu210hp-hcd.c:2692:3-2692:21: union ehci_shadow q, *q_p;
-
drivers/vhost/scsi.c:542:4-542:33: struct vhost_scsi_virtqueue *q;
-
drivers/video/fbdev/aty/mach64_ct.c:209:2-209:6: u32 q;
-
drivers/video/fbdev/aty/mach64_ct.c:405:2-405:6: u32 q, memcntl, trp;
-
drivers/video/fbdev/hgafb.c:282:2-282:20: void __iomem *p, *q;
-
drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c:414:2-414:20: unsigned itc, ec, q, sc;
-
drivers/xen/events/events_fifo.c:105:2-105:33: struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
-
drivers/xen/events/events_fifo.c:279:2-279:33: struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
-
drivers/xen/events/events_fifo.c:324:2-324:11: unsigned q;
-
drivers/xen/gntdev-dmabuf.c:678:2-678:24: struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
-
drivers/xen/gntdev-dmabuf.c:723:2-723:24: struct gntdev_dmabuf *q, *gntdev_dmabuf;
-
fs/afs/addr_list.c:136:3-136:15: const char *q, *stop;
-
fs/autofs/expire.c:101:2-101:17: struct dentry *q;
-
fs/ceph/caps.c:900:5-900:21: struct rb_node *q;
-
fs/configfs/dir.c:1608:2-1608:37: struct list_head *p, *q = &cursor->s_sibling;
-
fs/dcache.c:1907:2-1907:14: struct qstr q;
-
fs/efivarfs/super.c:89:2-89:14: struct qstr q;
-
fs/erofs/zdata.c:1592:2-1592:34: struct z_erofs_decompressqueue *q;
-
fs/erofs/zdata.c:1647:2-1647:43: struct z_erofs_decompressqueue *q = bio->bi_private;
-
fs/erofs/zdata.c:1678:2-1678:48: struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
-
fs/ext4/namei.c:811:2-811:38: struct dx_entry *at, *entries, *p, *q, *m;
-
fs/ext4/namei.c:1350:2-1350:27: struct dx_map_entry *p, *q, *top = map + count - 1;
-
fs/f2fs/checkpoint.c:1812:2-1812:32: wait_queue_head_t *q = &cprc->ckpt_wait_queue;
-
fs/f2fs/segment.c:551:2-551:31: wait_queue_head_t *q = &fcc->flush_wait_queue;
-
fs/f2fs/segment.c:1786:2-1786:31: wait_queue_head_t *q = &dcc->discard_wait_queue;
-
fs/fs_context.c:409:3-411:31: char *q = kasprintf(GFP_KERNEL, "%c %s%s%pV\n", level,
-
fs/fs_pin.c:88:3-88:22: struct hlist_node *q;
-
fs/gfs2/quota.c:831:2-831:20: struct gfs2_quota q;
-
fs/gfs2/quota.c:985:2-985:20: struct gfs2_quota q;
-
fs/hpfs/alloc.c:122:2-122:14: unsigned i, q;
-
fs/hpfs/ea.c:289:4-289:44: secno q = hpfs_alloc_sector(s, fno, 1, 0);
-
fs/inode.c:2346:2-2346:2: DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
fs/jffs2/compr_rubin.c:202:2-202:35: unsigned long p = rs->p, q = rs->q;
-
fs/namespace.c:1880:2-1880:26: struct mount *res, *p, *q, *r, *parent;
-
fs/namespace.c:2254:3-2254:17: struct mount *q;
-
fs/namespace.c:3456:2-3456:20: struct mount *p, *q;
-
fs/nfs/nfs4proc.c:7479:2-7479:31: wait_queue_head_t *q = &clp->cl_lock_waitq;
-
fs/proc/base.c:504:4-504:8: int q;
-
fs/proc/bootconfig.c:31:2-31:7: char q;
-
fs/smb/client/cached_dir.c:451:2-451:28: struct cached_fid *cfid, *q;
-
fs/smb/client/cached_dir.c:552:2-552:33: struct cached_dirent *dirent, *q;
-
fs/smb/client/cached_dir.c:589:2-589:28: struct cached_fid *cfid, *q;
-
fs/ufs/inode.c:131:2-131:26: Indirect chain[4], *q = chain;
-
fs/xfs/xfs_dquot.c:73:2-73:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_dquot.c:183:2-183:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_qm_syscalls.c:279:2-279:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_quotaops.c:60:2-60:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_trans_dquot.c:626:2-626:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
include/crypto/aria.h:436:2-436:21: int q = 4 - (n / 32);
-
include/linux/blkdev.h:1229:2-1229:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1295:2-1295:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/net/pkt_cls.h:184:2-184:38: struct Qdisc *q = tp->chain->block->q;
-
include/net/pkt_cls.h:208:2-208:38: struct Qdisc *q = tp->chain->block->q;
-
include/net/sch_generic.h:536:2-536:20: struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
-
include/net/sch_generic.h:739:3-739:27: const struct Qdisc *q = rcu_dereference(txq->qdisc);
-
init/initramfs.c:87:2-87:20: struct hash **p, *q;
-
init/initramfs.c:114:2-114:20: struct hash **p, *q;
-
ipc/sem.c:285:2-285:20: struct sem_queue *q, *tq;
-
ipc/sem.c:857:2-857:20: struct sem_queue *q, *tmp;
-
ipc/sem.c:951:2-951:20: struct sem_queue *q, *tmp;
-
ipc/sem.c:1110:2-1110:20: struct sem_queue *q;
-
ipc/sem.c:1146:2-1146:20: struct sem_queue *q, *tq;
-
kernel/audit_tree.c:611:2-611:24: struct list_head *p, *q;
-
kernel/auditsc.c:272:2-272:26: struct audit_tree_refs *q;
-
kernel/auditsc.c:300:2-300:30: struct audit_tree_refs *p, *q;
-
kernel/bpf/cpumap.c:705:2-705:19: struct ptr_ring *q;
-
kernel/cgroup/pids.c:160:2-160:26: struct pids_cgroup *p, *q;
-
kernel/crash_core.c:212:3-212:9: char *q;
-
kernel/events/uprobes.c:319:2-319:26: struct list_head *pos, *q;
-
kernel/events/uprobes.c:1327:2-1327:26: struct list_head *pos, *q;
-
kernel/futex/pi.c:936:2-936:21: struct futex_q q = futex_q_init;
-
kernel/futex/requeue.c:770:2-770:21: struct futex_q q = futex_q_init;
-
kernel/futex/waitwake.c:437:3-437:30: struct futex_q *q = &vs[i].q;
-
kernel/futex/waitwake.c:637:2-637:21: struct futex_q q = futex_q_init;
-
kernel/latencytop.c:123:3-123:7: int q, same = 1;
-
kernel/latencytop.c:180:2-180:9: int i, q;
-
kernel/latencytop.c:253:4-253:8: int q;
-
kernel/ptrace.c:735:2-735:19: struct sigqueue *q;
-
kernel/signal.c:415:2-415:23: struct sigqueue *q = NULL;
-
kernel/signal.c:463:2-463:19: struct sigqueue *q;
-
kernel/signal.c:492:2-492:19: struct sigqueue *q, *n;
-
kernel/signal.c:571:2-571:19: struct sigqueue *q, *first = NULL;
-
kernel/signal.c:714:2-714:19: struct sigqueue *q, *sync = NULL;
-
kernel/signal.c:788:2-788:19: struct sigqueue *q, *n;
-
kernel/signal.c:1082:2-1082:19: struct sigqueue *q;
-
kernel/trace/blktrace.c:732:2-732:47: struct request_queue *q = bdev_get_queue(bdev);
-
kernel/trace/blktrace.c:977:2-977:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
kernel/trace/blktrace.c:1006:2-1006:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
kernel/trace/blktrace.c:1768:2-1768:47: struct request_queue *q = bdev_get_queue(bdev);
-
kernel/trace/blktrace.c:1802:2-1802:47: struct request_queue *q = bdev_get_queue(bdev);
-
kernel/trace/trace.c:3991:2-3991:8: char *q;
-
kernel/trace/trace_boot.c:564:2-564:8: char *q;
-
kernel/trace/trace_events_filter.c:1377:2-1377:7: char q;
-
kernel/trace/trace_events_filter.c:1562:3-1562:17: char q = str[i];
-
kernel/trace/trace_events_inject.c:105:3-105:17: char q = str[i];
-
kernel/watch_queue.c:315:2-315:28: struct watch_type_filter *q;
-
lib/bch.c:909:2-909:37: struct gf_poly *q = bch->poly_2t[1];
-
lib/bootconfig.c:851:2-851:12: char *p, *q;
-
lib/crc32.c:82:2-82:6: u32 q;
-
lib/crypto/curve25519-hacl64.c:36:2-36:20: u64 q = x_xor_y | x_sub_y_xor_y;
-
lib/crypto/curve25519-hacl64.c:766:2-766:7: u64 *q;
-
lib/mpi/mpih-div.c:248:5-248:16: mpi_limb_t q;
-
lib/mpi/mpih-div.c:315:5-315:16: mpi_limb_t q;
-
lib/raid6/avx2.c:37:2-37:10: u8 *p, *q;
-
lib/raid6/avx2.c:86:2-86:10: u8 *p, *q;
-
lib/raid6/avx2.c:144:2-144:10: u8 *p, *q;
-
lib/raid6/avx2.c:196:2-196:10: u8 *p, *q;
-
lib/raid6/avx2.c:276:2-276:10: u8 *p, *q;
-
lib/raid6/avx2.c:357:2-357:10: u8 *p, *q;
-
lib/raid6/avx512.c:47:2-47:10: u8 *p, *q;
-
lib/raid6/avx512.c:105:2-105:10: u8 *p, *q;
-
lib/raid6/avx512.c:174:2-174:10: u8 *p, *q;
-
lib/raid6/avx512.c:237:2-237:10: u8 *p, *q;
-
lib/raid6/avx512.c:333:2-333:10: u8 *p, *q;
-
lib/raid6/avx512.c:427:2-427:10: u8 *p, *q;
-
lib/raid6/recov.c:23:2-23:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov.c:67:2-67:10: u8 *p, *q, *dq;
-
lib/raid6/recov_avx2.c:19:2-19:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_avx2.c:189:2-189:10: u8 *p, *q, *dq;
-
lib/raid6/recov_avx512.c:27:2-27:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_avx512.c:230:2-230:10: u8 *p, *q, *dq;
-
lib/raid6/recov_ssse3.c:19:2-19:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_ssse3.c:194:2-194:10: u8 *p, *q, *dq;
-
lib/raid6/sse2.c:39:2-39:10: u8 *p, *q;
-
lib/raid6/sse2.c:91:2-91:10: u8 *p, *q;
-
lib/raid6/sse2.c:149:2-149:10: u8 *p, *q;
-
lib/raid6/sse2.c:202:2-202:10: u8 *p, *q;
-
lib/raid6/sse2.c:281:2-281:10: u8 *p, *q;
-
lib/raid6/sse2.c:368:2-368:10: u8 *p, *q;
-
lib/reed_solomon/decode_rs.c:23:2-23:14: uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
-
lib/string_helpers.c:180:2-180:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:208:2-208:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:227:2-227:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:251:2-251:24: char *p = *dst, *q = *src;
-
lib/test_hexdump.c:99:3-99:26: const char *q = *result++;
-
lib/ts_kmp.c:45:2-45:22: unsigned int i, q = 0, text_len, consumed = state->offset;
-
lib/ts_kmp.c:77:2-77:18: unsigned int k, q;
-
lib/vsprintf.c:222:2-222:11: unsigned q;
-
lib/vsprintf.c:264:2-264:11: unsigned q;
-
mm/filemap.c:1135:2-1135:46: wait_queue_head_t *q = folio_waitqueue(folio);
-
mm/filemap.c:1225:2-1225:46: wait_queue_head_t *q = folio_waitqueue(folio);
-
mm/filemap.c:1385:2-1385:21: wait_queue_head_t *q;
-
mm/filemap.c:1482:2-1482:46: wait_queue_head_t *q = folio_waitqueue(folio);
-
mm/filemap.c:1678:2-1678:51: struct wait_queue_head *q = folio_waitqueue(folio);
-
mm/z3fold.c:630:3-630:9: void *q;
-
net/atm/lec.c:870:2-870:6: int q;
-
net/bluetooth/hci_core.c:3301:2-3301:11: int cnt, q;
-
net/core/dev.c:2316:3-2316:40: int q = netdev_get_prio_tc_map(dev, i);
-
net/core/dev.c:3090:3-3090:21: struct Qdisc *q = rcu_dereference(txq->qdisc);
-
net/core/dev.c:3101:3-3101:17: struct Qdisc *q;
-
net/core/dev.c:4154:2-4154:16: struct Qdisc *q;
-
net/core/dev.c:5060:4-5060:22: struct Qdisc *q = head;
-
net/core/pktgen.c:3370:2-3370:20: struct list_head *q, *n;
-
net/core/pktgen.c:3392:2-3392:20: struct list_head *q, *n;
-
net/core/pktgen.c:3873:2-3873:20: struct list_head *q, *n;
-
net/core/pktgen.c:3969:2-3969:20: struct list_head *q, *n;
-
net/core/skbuff.c:1613:2-1613:23: struct sk_buff_head *q;
-
net/core/skbuff.c:5071:2-5071:32: struct sk_buff_head *q = &sk->sk_error_queue;
-
net/ieee802154/6lowpan/reassembly.c:70:2-70:26: struct inet_frag_queue *q;
-
net/ipv4/af_inet.c:1932:2-1932:23: struct inet_protosw *q;
-
net/ipv4/inet_fragment.c:255:2-255:30: struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
-
net/ipv4/inet_fragment.c:316:2-316:26: struct inet_frag_queue *q;
-
net/ipv4/inet_fragment.c:338:2-338:26: struct inet_frag_queue *q;
-
net/ipv4/ip_fragment.c:216:2-216:26: struct inet_frag_queue *q;
-
net/ipv4/tcp_fastopen.c:62:2-62:25: struct fastopen_queue *q;
-
net/ipv4/tcp_output.c:1046:2-1046:20: struct list_head *q, *n;
-
net/ipv6/mcast.c:1512:2-1512:22: struct sk_buff_head q;
-
net/ipv6/mcast.c:1616:2-1616:22: struct sk_buff_head q;
-
net/ipv6/netfilter/nf_conntrack_reasm.c:155:2-155:26: struct inet_frag_queue *q;
-
net/ipv6/reassembly.c:93:2-93:26: struct inet_frag_queue *q;
-
net/mac80211/debugfs.c:569:2-569:6: int q, res = 0;
-
net/mac80211/ethtool.c:79:2-79:9: int i, q;
-
net/mac80211/mlme.c:2341:2-2341:6: int q;
-
net/mac80211/tx.c:1697:3-1697:17: int q = info->hw_queue;
-
net/mac80211/tx.c:3793:2-3793:31: int q = vif->hw_queue[txq->ac];
-
net/mac80211/tx.c:4523:2-4523:16: int q = info->hw_queue;
-
net/netfilter/nfnetlink_queue.c:810:2-810:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:957:2-957:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:990:2-990:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:1017:2-1017:53: struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
-
net/netfilter/nfnetlink_queue.c:1104:2-1104:56: struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
-
net/netfilter/nfnetlink_queue.c:1215:2-1215:56: struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
-
net/netfilter/nfnetlink_queue.c:1300:2-1300:56: struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
-
net/netfilter/nfnetlink_queue.c:1464:2-1464:25: struct nfnl_queue_net *q;
-
net/netfilter/nfnetlink_queue.c:1485:3-1485:26: struct nfnl_queue_net *q;
-
net/netfilter/nfnetlink_queue.c:1550:2-1550:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:1567:2-1567:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/xt_quota.c:29:2-29:41: struct xt_quota_info *q = (void *)par->matchinfo;
-
net/netfilter/xt_quota.c:48:2-48:33: struct xt_quota_info *q = par->matchinfo;
-
net/netfilter/xt_quota.c:64:2-64:39: const struct xt_quota_info *q = par->matchinfo;
-
net/rds/message.c:96:2-96:30: struct rds_msg_zcopy_queue *q;
-
net/rds/recv.c:601:2-601:39: struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
-
net/rose/rose_in.c:266:2-266:37: int queued = 0, frametype, ns, nr, q, d, m;
-
net/rxrpc/rxkad.c:871:2-871:10: u8 *p, *q, *name, *end;
-
net/sched/cls_api.c:2130:2-2130:16: struct Qdisc *q;
-
net/sched/cls_api.c:2363:2-2363:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2520:2-2520:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2723:2-2723:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2989:2-2989:16: struct Qdisc *q;
-
net/sched/cls_api.c:3117:2-3117:20: struct Qdisc *q = NULL;
-
net/sched/cls_flow.c:505:4-505:50: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/cls_fw.c:77:3-77:49: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/sch_api.c:133:2-133:20: struct Qdisc_ops *q, **qp;
-
net/sched/sch_api.c:177:2-177:20: struct Qdisc_ops *q, **qp;
-
net/sched/sch_api.c:205:2-205:24: struct Qdisc_ops *q = NULL;
-
net/sched/sch_api.c:263:2-263:16: struct Qdisc *q;
-
net/sched/sch_api.c:302:2-302:16: struct Qdisc *q;
-
net/sched/sch_api.c:321:2-321:16: struct Qdisc *q;
-
net/sched/sch_api.c:354:2-354:24: struct Qdisc_ops *q = NULL;
-
net/sched/sch_api.c:1071:2-1071:20: struct Qdisc *q = old;
-
net/sched/sch_api.c:1452:2-1452:20: struct Qdisc *q = NULL;
-
net/sched/sch_api.c:1533:2-1533:16: struct Qdisc *q, *p;
-
net/sched/sch_api.c:1709:2-1709:16: struct Qdisc *q;
-
net/sched/sch_api.c:1935:3-1935:49: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/sch_api.c:2013:2-2013:20: struct Qdisc *q = NULL;
-
net/sched/sch_api.c:2188:2-2188:16: struct Qdisc *q;
-
net/sched/sch_cake.c:1503:2-1503:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1617:2-1617:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1657:2-1657:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1697:2-1697:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1912:2-1912:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1936:2-1936:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1947:2-1947:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2227:2-2227:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2298:2-2298:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2318:2-2318:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2407:2-2407:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2451:2-2451:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2488:2-2488:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2517:2-2517:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2572:2-2572:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2693:2-2693:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2703:2-2703:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2774:2-2774:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2849:2-2849:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2968:2-2968:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2985:2-2985:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:3058:2-3058:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:108:2-108:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:117:2-117:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:134:2-134:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:178:2-178:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:233:2-233:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:241:2-241:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:336:2-336:25: struct cbs_sched_data *q;
-
net/sched/sch_cbs.c:364:2-364:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:404:2-404:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:435:2-435:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:454:2-454:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:481:2-481:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:495:2-495:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:510:2-510:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:116:2-116:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:215:2-215:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:285:2-285:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:306:2-306:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:338:2-338:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:433:2-433:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:461:2-461:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:474:2-474:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:481:2-481:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:91:2-91:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:114:2-114:27: struct codel_sched_data *q;
-
net/sched/sch_codel.c:136:2-136:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:189:2-189:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:215:2-215:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:244:2-244:51: const struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:270:2-270:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:41:2-41:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:58:2-58:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:150:2-150:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:175:2-175:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:278:2-278:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:296:2-296:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:335:2-335:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:370:2-370:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:411:2-411:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:426:2-426:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:441:2-441:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:77:2-77:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:110:2-110:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:122:2-122:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:165:2-165:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:203:2-203:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:233:2-233:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:255:2-255:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:346:2-346:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:419:2-419:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:435:2-435:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:450:2-450:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:462:2-462:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:92:2-92:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:99:2-99:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:108:2-108:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:202:2-202:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:277:2-277:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:287:2-287:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:301:2-301:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:337:2-337:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:353:2-353:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:376:2-376:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:414:2-414:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:458:2-458:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:581:2-581:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:690:2-690:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:709:2-709:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:722:2-722:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:733:2-733:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_fifo.c:256:2-256:16: struct Qdisc *q;
-
net/sched/sch_fq.c:445:2-445:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:528:2-528:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:664:2-664:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:749:2-749:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:809:2-809:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:920:2-920:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:930:2-930:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:969:2-969:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:1010:2-1010:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:79:2-79:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:140:2-140:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:187:2-187:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:258:2-258:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:283:2-283:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:337:2-337:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:370:2-370:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:452:2-452:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:462:2-462:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:524:2-524:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:567:2-567:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:615:2-615:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:632:2-632:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:678:2-678:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:82:2-82:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:131:2-131:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:230:2-230:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:280:2-280:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:371:2-371:32: struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_fq_pie.c:393:2-393:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:446:2-446:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:481:2-481:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:506:2-506:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:525:2-525:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_generic.c:726:2-726:44: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:752:3-752:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:791:3-791:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:805:3-805:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:820:4-820:29: struct gnet_stats_queue *q;
-
net/sched/sch_generic.c:854:3-854:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:873:3-873:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:895:3-895:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:1044:2-1044:20: struct Qdisc *q = container_of(head, struct Qdisc, rcu);
-
net/sched/sch_generic.c:1302:3-1302:17: struct Qdisc *q;
-
net/sched/sch_gred.c:99:3-99:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:168:2-168:30: struct gred_sched_data *q = NULL;
-
net/sched/sch_gred.c:269:3-269:27: struct gred_sched_data *q;
-
net/sched/sch_gred.c:301:3-301:39: struct gred_sched_data *q = t->tab[i];
-
net/sched/sch_gred.c:334:4-334:44: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:488:2-488:43: struct gred_sched_data *q = table->tab[dp];
-
net/sched/sch_gred.c:791:3-791:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:807:3-807:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:857:3-857:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_hfsc.c:866:2-866:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:917:2-917:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1081:2-1081:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1094:2-1094:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1117:2-1117:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1243:2-1243:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1342:2-1342:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1361:2-1361:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1380:2-1380:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1423:2-1423:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1470:2-1470:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1485:2-1485:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1508:2-1508:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1572:2-1572:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:249:2-249:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:351:2-351:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:374:2-374:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:420:2-420:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:474:2-474:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:511:2-511:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:577:2-577:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:653:2-653:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:679:2-679:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:189:2-189:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:223:2-223:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:624:2-624:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:941:2-941:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1000:2-1000:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1035:2-1035:24: struct htb_sched *q = container_of(work, struct htb_sched, work);
-
net/sched/sch_htb.c:1060:2-1060:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1164:2-1164:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1203:2-1203:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1213:2-1213:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1253:2-1253:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1332:2-1332:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1374:2-1374:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1455:2-1455:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1515:2-1515:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1554:2-1554:29: struct Qdisc *q = cl->leaf.q;
-
net/sched/sch_htb.c:1626:2-1626:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1703:2-1703:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1773:2-1773:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:2091:2-2091:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:2126:2-2126:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:50:2-50:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:64:2-64:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:71:2-71:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:79:2-79:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:102:2-102:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:175:2-175:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:189:2-189:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:196:2-196:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:203:2-203:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:210:2-210:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:218:2-218:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:249:2-249:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_mqprio.c:672:4-672:55: struct netdev_queue *q = netdev_get_tx_queue(dev, i);
-
net/sched/sch_multiq.c:32:2-32:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:89:2-89:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:120:2-120:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:151:2-151:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:162:2-162:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:174:2-174:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:238:2-238:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:263:2-263:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:283:2-283:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:296:2-296:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:304:2-304:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:326:2-326:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:336:2-336:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:349:2-349:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:364:2-364:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:362:2-362:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:380:2-380:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:437:2-437:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:678:2-678:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:757:2-757:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:957:2-957:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1059:2-1059:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1076:2-1076:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1137:2-1137:51: const struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1220:2-1220:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1234:2-1234:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1242:2-1242:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:88:2-88:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:141:2-141:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:422:2-422:29: struct pie_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_pie.c:438:2-438:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:460:2-460:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:491:2-491:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:516:2-516:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:528:2-528:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:536:2-536:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:90:2-90:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:103:2-103:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:125:2-125:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:161:2-161:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:33:2-33:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:99:2-99:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:113:2-113:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:134:2-134:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:168:2-168:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:179:2-179:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:232:2-232:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:264:2-264:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:289:2-289:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:319:2-319:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:327:2-327:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:348:2-348:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:358:2-358:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:372:2-372:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:387:2-387:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:209:2-209:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:383:2-383:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:402:2-402:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:522:2-522:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:533:2-533:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:558:2-558:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:652:2-652:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:670:2-670:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1070:2-1070:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1192:2-1192:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1394:2-1394:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1403:2-1403:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1441:2-1441:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1457:2-1457:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_red.c:73:2-73:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:153:2-153:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:170:2-170:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:178:2-178:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:186:2-186:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:215:2-215:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:238:2-238:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:322:2-322:29: struct red_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_red.c:335:2-335:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:369:2-369:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:408:2-408:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:445:2-445:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:471:2-471:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:496:2-496:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:509:2-509:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:283:2-283:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:428:2-428:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:446:2-446:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:456:2-456:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:468:2-468:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:493:2-493:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:556:2-556:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:569:2-569:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:598:2-598:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:622:2-622:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:633:2-633:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:670:2-670:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:166:2-166:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:295:2-295:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:348:2-348:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:482:2-482:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:537:2-537:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:607:2-607:29: struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
-
net/sched/sch_sfq.c:625:2-625:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:721:2-721:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:734:2-734:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:790:2-790:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:848:2-848:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:865:2-865:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:884:2-884:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:72:2-72:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:141:2-141:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:182:2-182:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:213:2-213:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:226:2-226:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:253:2-253:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:325:2-325:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:392:2-392:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:474:2-474:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:546:2-546:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:567:2-567:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:632:2-632:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:712:2-712:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:782:2-782:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:833:2-833:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:919:2-919:27: struct taprio_sched *q = container_of(timer, struct taprio_sched,
-
net/sched/sch_taprio.c:1199:2-1199:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1262:2-1262:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1311:2-1311:23: struct taprio_sched *q;
-
net/sched/sch_taprio.c:1602:2-1602:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1732:2-1732:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1834:2-1834:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2001:2-2001:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2016:2-2016:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2056:2-2056:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2125:2-2125:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2168:2-2168:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2294:2-2294:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:143:2-143:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:207:2-207:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:241:2-241:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:270:2-270:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:330:2-330:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:353:2-353:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:480:2-480:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:495:2-495:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:504:2-504:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:547:2-547:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:558:2-558:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:571:2-571:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:79:2-79:44: struct teql_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:95:2-95:16: struct Qdisc *q;
-
net/sched/sch_teql.c:132:2-132:16: struct Qdisc *q, *prev;
-
net/sched/sch_teql.c:173:2-173:44: struct teql_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:280:2-280:24: struct Qdisc *start, *q;
-
net/sched/sch_teql.c:357:2-357:16: struct Qdisc *q;
-
net/sched/sch_teql.c:417:2-417:16: struct Qdisc *q;
-
net/sctp/output.c:678:2-678:31: struct sctp_outq *q = &asoc->outqueue;
-
net/sctp/outqueue.c:385:2-385:31: struct sctp_outq *q = &asoc->outqueue;
-
net/sctp/socket.c:171:2-171:31: struct sctp_outq *q = &asoc->outqueue;
-
net/smc/smc_llc.c:1836:2-1836:34: struct smc_llc_qentry *qentry, *q;
-
net/sunrpc/auth_gss/auth_gss.c:179:2-179:14: const void *q;
-
net/sunrpc/auth_gss/auth_gss_internal.h:18:2-18:54: const void *q = (const void *)((const char *)p + len);
-
net/sunrpc/auth_gss/auth_gss_internal.h:28:2-28:14: const void *q;
-
net/sunrpc/rpc_pipe.c:634:2-634:18: struct qstr q = QSTR_INIT(name, strlen(name));
-
net/sunrpc/rpc_pipe.c:1304:2-1304:18: struct qstr q = QSTR_INIT(files[RPCAUTH_gssd].name,
-
net/sunrpc/sched.c:171:2-171:20: struct list_head *q;
-
net/sunrpc/sched.c:605:2-605:20: struct list_head *q;
-
net/sunrpc/xdr.c:1054:2-1054:10: __be32 *q;
-
net/sunrpc/xdr.c:1405:2-1405:18: __be32 *q = p + nwords;
-
net/x25/x25_in.c:418:2-418:37: int queued = 0, frametype, ns, nr, q, d, m;
-
net/xdp/xsk.c:751:2-751:20: struct xsk_queue *q;
-
net/xdp/xsk.c:1066:3-1066:22: struct xsk_queue **q;
-
net/xdp/xsk.c:1122:3-1122:22: struct xsk_queue **q;
-
net/xdp/xsk.c:1305:2-1305:24: struct xsk_queue *q = NULL;
-
net/xdp/xsk_queue.c:26:2-26:20: struct xsk_queue *q;
-
samples/v4l/v4l2-pci-skeleton.c:750:2-750:20: struct vb2_queue *q;
-
scripts/dtc/libfdt/fdt_ro.c:260:3-260:44: const char *q = memchr(path, '/', end - p);
-
scripts/dtc/libfdt/fdt_ro.c:274:3-274:15: const char *q;
-
security/integrity/evm/evm_main.c:911:2-911:26: struct list_head *pos, *q;
-
security/keys/keyctl_pkey.c:42:2-42:31: char *c = params->info, *p, *q;
-
security/selinux/hooks.c:2540:4-2540:14: char *p, *q;
-
security/selinux/hooks.c:3500:3-3500:15: struct qstr q;
-
sound/core/misc.c:114:2-114:30: const struct snd_pci_quirk *q;
-
sound/core/pcm_lib.c:560:2-560:15: unsigned int q;
-
sound/core/pcm_lib.c:827:3-827:23: unsigned int q = i->min;
-
sound/core/pcm_lib.c:866:3-866:23: unsigned int q = i->max;
-
sound/core/pcm_lib.c:943:3-943:23: unsigned int q = i->min;
-
sound/core/pcm_lib.c:975:3-975:23: unsigned int q = i->max;
-
sound/core/seq/oss/seq_oss_readq.c:35:2-35:24: struct seq_oss_readq *q;
-
sound/core/seq/oss/seq_oss_writeq.c:27:2-27:25: struct seq_oss_writeq *q;
-
sound/core/seq/seq_clientmgr.c:575:2-575:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1556:2-1556:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1588:2-1588:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1609:2-1609:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1642:2-1642:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1770:3-1770:25: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:71:2-71:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:98:2-98:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:170:2-170:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:189:2-189:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:205:2-205:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:222:2-222:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:303:2-303:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:388:2-388:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:408:2-408:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:475:2-475:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:538:2-538:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:559:2-559:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:592:2-592:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:608:2-608:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:707:2-707:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:737:2-737:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_timer.c:125:2-125:36: struct snd_seq_queue *q = timeri->callback_data;
-
sound/core/seq/seq_timer.c:480:2-480:24: struct snd_seq_queue *q;
-
sound/pci/ac97/ac97_codec.c:2943:2-2943:28: const struct quirk_table *q;
-
sound/pci/atiixp.c:551:2-551:30: const struct snd_pci_quirk *q;
-
sound/pci/emu10k1/memory.c:169:2-169:29: struct snd_emu10k1_memblk *q;
-
sound/pci/emu10k1/memory.c:459:2-459:29: struct snd_emu10k1_memblk *q;
-
sound/pci/hda/hda_auto_parser.c:981:2-981:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1531:2-1531:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1628:2-1628:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1669:2-1669:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:2227:3-2227:31: const struct snd_pci_quirk *q;
-
sound/pci/hda/patch_hdmi.c:2007:2-2007:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/patch_realtek.c:1076:2-1076:43: const struct alc_codec_rename_pci_table *q;
-
sound/pci/hda/patch_realtek.c:1149:2-1149:30: const struct snd_pci_quirk *q;
-
sound/pci/nm256/nm256.c:1601:2-1601:30: const struct snd_pci_quirk *q;
-
sound/soc/codecs/tas2552.c:187:3-187:19: unsigned int d, q, t;