Symbol: q
function parameter
Defined...
-
arch/x86/crypto/curve25519-x86_64.c:975:34-975:39: static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2)
-
arch/x86/lib/msr-smp.c:52:49-52:54: int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
-
arch/x86/lib/msr-smp.c:83:49-83:53: int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
-
arch/x86/lib/msr-smp.c:209:54-209:58: int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
-
arch/x86/lib/msr-smp.c:225:54-225:59: int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
-
block/bfq-cgroup.c:343:34-343:56: void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
-
block/bfq-iosched.c:455:41-455:63: static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
-
block/bfq-iosched.c:2361:8-2361:30: struct request_queue *q)
-
block/bfq-iosched.c:2380:32-2380:54: static void bfq_remove_request(struct request_queue *q,
-
block/bfq-iosched.c:2445:27-2445:49: static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
-
block/bfq-iosched.c:2485:30-2485:52: static int bfq_request_merge(struct request_queue *q, struct request **req,
-
block/bfq-iosched.c:2503:32-2503:54: static void bfq_request_merged(struct request_queue *q, struct request *req,
-
block/bfq-iosched.c:2560:33-2560:55: static void bfq_requests_merged(struct request_queue *q, struct request *rq,
-
block/bfq-iosched.c:3227:33-3227:55: static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
-
block/bfq-iosched.c:3653:33-3653:55: static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
-
block/bfq-iosched.c:5249:39-5249:61: static void bfq_update_dispatch_stats(struct request_queue *q,
-
block/bfq-iosched.c:6206:37-6206:59: static void bfq_update_insert_stats(struct request_queue *q,
-
block/bfq-iosched.c:7192:27-7192:49: static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
-
block/bio.c:937:29-937:51: bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
-
block/bio.c:965:21-965:43: int bio_add_hw_page(struct request_queue *q, struct bio *bio,
-
block/bio.c:1017:21-1017:43: int bio_add_pc_page(struct request_queue *q, struct bio *bio,
-
block/blk-cgroup.c:113:34-113:56: static bool blkcg_policy_enabled(struct request_queue *q,
-
block/blk-cgroup.h:248:9-248:31: struct request_queue *q)
-
block/blk-core.c:79:44-79:66: void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:90:46-90:68: void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:104:53-104:75: bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:231:21-231:43: void blk_sync_queue(struct request_queue *q)
-
block/blk-core.c:242:22-242:44: void blk_set_pm_only(struct request_queue *q)
-
block/blk-core.c:248:24-248:46: void blk_clear_pm_only(struct request_queue *q)
-
block/blk-core.c:268:28-268:50: static void blk_free_queue(struct request_queue *q)
-
block/blk-core.c:285:20-285:42: void blk_put_queue(struct request_queue *q)
-
block/blk-core.c:292:28-292:50: void blk_queue_start_drain(struct request_queue *q)
-
block/blk-core.c:311:21-311:43: int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
-
block/blk-core.c:338:23-338:45: int __bio_queue_enter(struct request_queue *q, struct bio *bio)
-
block/blk-core.c:372:21-372:43: void blk_queue_exit(struct request_queue *q)
-
block/blk-core.c:464:20-464:42: bool blk_get_queue(struct request_queue *q)
-
block/blk-core.c:562:50-562:72: static inline blk_status_t blk_check_zone_append(struct request_queue *q,
-
block/blk-core.c:1020:18-1020:40: int blk_lld_busy(struct request_queue *q)
-
block/blk-crypto-profile.c:454:5-454:27: struct request_queue *q)
-
block/blk-flush.c:98:21-98:43: blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
-
block/blk-flush.c:289:28-289:50: static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
-
block/blk-integrity.c:27:31-27:53: int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
-
block/blk-integrity.c:68:29-68:51: int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
-
block/blk-integrity.c:164:29-164:51: bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
-
block/blk-integrity.c:187:30-187:52: bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
-
block/blk-ioc.c:171:22-171:44: void ioc_clear_queue(struct request_queue *q)
-
block/blk-ioc.c:324:30-324:52: struct io_cq *ioc_lookup_icq(struct request_queue *q)
-
block/blk-ioc.c:363:37-363:59: static struct io_cq *ioc_create_icq(struct request_queue *q)
-
block/blk-ioc.c:407:32-407:54: struct io_cq *ioc_find_get_icq(struct request_queue *q)
-
block/blk-iocost.c:665:29-665:51: static struct ioc *q_to_ioc(struct request_queue *q)
-
block/blk-map.c:382:33-382:55: static struct bio *bio_map_kern(struct request_queue *q, void *data,
-
block/blk-map.c:467:34-467:56: static struct bio *bio_copy_kern(struct request_queue *q, void *data,
-
block/blk-map.c:625:25-625:47: int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
-
block/blk-map.c:680:21-680:43: int blk_rq_map_user(struct request_queue *q, struct request *rq,
-
block/blk-map.c:775:21-775:43: int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
-
block/blk-merge.c:52:33-52:55: static inline bool bio_will_gap(struct request_queue *q,
-
block/blk-merge.c:462:33-462:55: static unsigned blk_bvec_map_sg(struct request_queue *q,
-
block/blk-merge.c:507:28-507:50: __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
-
block/blk-merge.c:527:30-527:52: static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
-
block/blk-merge.c:567:21-567:43: int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
-
block/blk-merge.c:673:39-673:61: static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
-
block/blk-merge.c:691:33-691:55: static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
-
block/blk-merge.c:805:38-805:60: static struct request *attempt_merge(struct request_queue *q,
-
block/blk-merge.c:887:43-887:65: static struct request *attempt_back_merge(struct request_queue *q,
-
block/blk-merge.c:898:44-898:66: static struct request *attempt_front_merge(struct request_queue *q,
-
block/blk-merge.c:914:28-914:50: bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
-
block/blk-merge.c:1031:56-1031:78: static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
-
block/blk-merge.c:1056:52-1056:74: static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
-
block/blk-merge.c:1103:29-1103:51: bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-
block/blk-merge.c:1135:25-1135:47: bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
-
block/blk-merge.c:1160:29-1160:51: bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
-
block/blk-mq-debugfs.c:655:30-655:52: void blk_mq_debugfs_register(struct request_queue *q)
-
block/blk-mq-debugfs.c:700:35-700:57: void blk_mq_debugfs_register_hctx(struct request_queue *q,
-
block/blk-mq-debugfs.c:728:36-728:58: void blk_mq_debugfs_register_hctxs(struct request_queue *q)
-
block/blk-mq-debugfs.c:737:38-737:60: void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
-
block/blk-mq-debugfs.c:746:36-746:58: void blk_mq_debugfs_register_sched(struct request_queue *q)
-
block/blk-mq-debugfs.c:767:38-767:60: void blk_mq_debugfs_unregister_sched(struct request_queue *q)
-
block/blk-mq-debugfs.c:816:41-816:63: void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
-
block/blk-mq-sched.c:339:29-339:51: bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
-
block/blk-mq-sched.c:375:36-375:58: bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
-
block/blk-mq-sched.c:382:43-382:65: static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
-
block/blk-mq-sched.c:406:40-406:62: static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
-
block/blk-mq-sched.c:443:23-443:45: int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/blk-mq-sched.c:509:28-509:50: void blk_mq_sched_free_rqs(struct request_queue *q)
-
block/blk-mq-sched.c:526:24-526:46: void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
-
block/blk-mq-sched.h:37:26-37:48: blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
-
block/blk-mq-sysfs.c:192:26-192:48: void blk_mq_sysfs_deinit(struct request_queue *q)
-
block/blk-mq-sysfs.c:204:24-204:46: void blk_mq_sysfs_init(struct request_queue *q)
-
block/blk-mq-sysfs.c:273:36-273:58: void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
-
block/blk-mq-sysfs.c:289:33-289:55: int blk_mq_sysfs_register_hctxs(struct request_queue *q)
-
block/blk-mq-tag.c:312:53-312:75: static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
-
block/blk-mq-tag.c:496:33-496:55: void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
-
block/blk-mq-tag.c:660:42-660:64: void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
-
block/blk-mq.c:104:31-104:53: unsigned int blk_mq_in_flight(struct request_queue *q,
-
block/blk-mq.c:114:26-114:48: void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
-
block/blk-mq.c:124:29-124:51: void blk_freeze_queue_start(struct request_queue *q)
-
block/blk-mq.c:138:31-138:53: void blk_mq_freeze_queue_wait(struct request_queue *q)
-
block/blk-mq.c:144:38-144:60: int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
-
block/blk-mq.c:157:23-157:45: void blk_freeze_queue(struct request_queue *q)
-
block/blk-mq.c:170:26-170:48: void blk_mq_freeze_queue(struct request_queue *q)
-
block/blk-mq.c:180:30-180:52: void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
-
block/blk-mq.c:194:28-194:50: void blk_mq_unfreeze_queue(struct request_queue *q)
-
block/blk-mq.c:204:34-204:56: void blk_mq_quiesce_queue_nowait(struct request_queue *q)
-
block/blk-mq.c:242:27-242:49: void blk_mq_quiesce_queue(struct request_queue *q)
-
block/blk-mq.c:258:29-258:51: void blk_mq_unquiesce_queue(struct request_queue *q)
-
block/blk-mq.c:305:26-305:48: void blk_mq_wake_waiters(struct request_queue *q)
-
block/blk-mq.c:315:18-315:40: void blk_rq_init(struct request_queue *q, struct request *rq)
-
block/blk-mq.c:518:45-518:67: static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
-
block/blk-mq.c:543:52-543:74: static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
-
block/blk-mq.c:578:38-578:60: struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
-
block/blk-mq.c:611:43-611:65: struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
-
block/blk-mq.c:1500:31-1500:53: void blk_mq_kick_requeue_list(struct request_queue *q)
-
block/blk-mq.c:1506:37-1506:59: void blk_mq_delay_kick_requeue_list(struct request_queue *q,
-
block/blk-mq.c:1531:28-1531:50: bool blk_mq_queue_inflight(struct request_queue *q)
-
block/blk-mq.c:1977:36-1977:58: static void blk_mq_release_budgets(struct request_queue *q,
-
block/blk-mq.c:2273:49-2273:71: static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
-
block/blk-mq.c:2295:27-2295:49: void blk_mq_run_hw_queues(struct request_queue *q, bool async)
-
block/blk-mq.c:2323:33-2323:55: void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
-
block/blk-mq.c:2380:28-2380:50: void blk_mq_stop_hw_queues(struct request_queue *q)
-
block/blk-mq.c:2398:29-2398:51: void blk_mq_start_hw_queues(struct request_queue *q)
-
block/blk-mq.c:2418:37-2418:59: void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
-
block/blk-mq.c:2717:38-2717:60: static void __blk_mq_flush_plug_list(struct request_queue *q,
-
block/blk-mq.c:2854:38-2854:60: static bool blk_mq_attempt_bio_merge(struct request_queue *q,
-
block/blk-mq.c:2866:48-2866:70: static struct request *blk_mq_get_new_requests(struct request_queue *q,
-
block/blk-mq.c:2903:57-2903:79: static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
-
block/blk-mq.c:3608:30-3608:52: static void blk_mq_exit_hctx(struct request_queue *q,
-
block/blk-mq.c:3635:35-3635:57: static void blk_mq_exit_hw_queues(struct request_queue *q,
-
block/blk-mq.c:3648:29-3648:51: static int blk_mq_init_hctx(struct request_queue *q,
-
block/blk-mq.c:3686:19-3686:41: blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
-
block/blk-mq.c:3750:36-3750:58: static void blk_mq_init_cpu_queues(struct request_queue *q,
-
block/blk-mq.c:3834:32-3834:54: static void blk_mq_map_swqueue(struct request_queue *q)
-
block/blk-mq.c:3941:35-3941:57: static void queue_set_hctx_shared(struct request_queue *q, bool shared)
-
block/blk-mq.c:3970:38-3970:60: static void blk_mq_del_queue_tag_set(struct request_queue *q)
-
block/blk-mq.c:3987:10-3987:32: struct request_queue *q)
-
block/blk-mq.c:4008:30-4008:52: static int blk_mq_alloc_ctxs(struct request_queue *q)
-
block/blk-mq.c:4041:21-4041:43: void blk_mq_release(struct request_queue *q)
-
block/blk-mq.c:4098:27-4098:49: void blk_mq_destroy_queue(struct request_queue *q)
-
block/blk-mq.c:4136:45-4136:67: struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
-
block/blk-mq.c:4151:31-4151:53: struct blk_mq_tag_set *set, struct request_queue *q,
-
block/blk-mq.c:4185:7-4185:29: struct request_queue *q)
-
block/blk-mq.c:4227:37-4227:59: static void blk_mq_update_poll_flag(struct request_queue *q)
-
block/blk-mq.c:4239:3-4239:25: struct request_queue *q)
-
block/blk-mq.c:4288:24-4288:46: void blk_mq_exit_queue(struct request_queue *q)
-
block/blk-mq.c:4584:31-4584:53: int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
-
block/blk-mq.c:4652:3-4652:25: struct request_queue *q)
-
block/blk-mq.c:4683:7-4683:29: struct request_queue *q)
-
block/blk-mq.c:4695:7-4695:29: struct request_queue *q)
-
block/blk-mq.c:4795:26-4795:48: static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
-
block/blk-mq.c:4822:17-4822:39: int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
-
block/blk-mq.c:4854:30-4854:52: void blk_mq_cancel_work_sync(struct request_queue *q)
-
block/blk-mq.h:78:59-78:81: static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
-
block/blk-mq.h:105:54-105:76: static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
-
block/blk-mq.h:129:51-129:73: static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
-
block/blk-mq.h:141:49-141:71: static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
-
block/blk-mq.h:244:47-244:69: static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
-
block/blk-mq.h:251:46-251:68: static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
-
block/blk-pm.c:29:26-29:48: void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
-
block/blk-pm.c:59:29-59:51: int blk_pre_runtime_suspend(struct request_queue *q)
-
block/blk-pm.c:120:31-120:53: void blk_post_runtime_suspend(struct request_queue *q, int err)
-
block/blk-pm.c:150:29-150:51: void blk_pre_runtime_resume(struct request_queue *q)
-
block/blk-pm.c:174:30-174:52: void blk_post_runtime_resume(struct request_queue *q)
-
block/blk-pm.c:197:29-197:51: void blk_set_runtime_active(struct request_queue *q)
-
block/blk-pm.h:9:54-9:76: static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
-
block/blk-rq-qos.c:289:18-289:40: void rq_qos_exit(struct request_queue *q)
-
block/blk-rq-qos.h:61:40-61:62: static inline struct rq_qos *rq_qos_id(struct request_queue *q,
-
block/blk-rq-qos.h:72:41-72:63: static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
-
block/blk-rq-qos.h:77:43-77:65: static inline struct rq_qos *iolat_rq_qos(struct request_queue *q)
-
block/blk-rq-qos.h:113:35-113:57: static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
-
block/blk-rq-qos.h:119:32-119:54: static inline void rq_qos_done(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:125:33-125:55: static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:131:35-131:57: static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:147:36-147:58: static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
-
block/blk-rq-qos.h:155:33-155:55: static inline void rq_qos_track(struct request_queue *q, struct request *rq,
-
block/blk-rq-qos.h:162:33-162:55: static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
-
block/blk-rq-qos.h:171:47-171:69: static inline void rq_qos_queue_depth_changed(struct request_queue *q)
-
block/blk-settings.c:22:27-22:49: void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
-
block/blk-settings.c:98:29-98:51: void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
-
block/blk-settings.c:123:31-123:53: void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
-
block/blk-settings.c:167:30-167:52: void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
-
block/blk-settings.c:178:36-178:58: void blk_queue_max_discard_sectors(struct request_queue *q,
-
block/blk-settings.c:191:41-191:63: void blk_queue_max_secure_erase_sectors(struct request_queue *q,
-
block/blk-settings.c:204:41-204:63: void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
-
block/blk-settings.c:216:40-216:62: void blk_queue_max_zone_append_sectors(struct request_queue *q,
-
block/blk-settings.c:247:29-247:51: void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
-
block/blk-settings.c:268:37-268:59: void blk_queue_max_discard_segments(struct request_queue *q,
-
block/blk-settings.c:284:33-284:55: void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
-
block/blk-settings.c:309:35-309:57: void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
-
block/blk-settings.c:338:36-338:58: void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
-
block/blk-settings.c:359:39-359:61: void blk_queue_zone_write_granularity(struct request_queue *q,
-
block/blk-settings.c:383:33-383:55: void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
-
block/blk-settings.c:442:23-442:45: void blk_queue_io_min(struct request_queue *q, unsigned int min)
-
block/blk-settings.c:480:23-480:45: void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
-
block/blk-settings.c:727:31-727:53: void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
-
block/blk-settings.c:739:33-739:55: void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
-
block/blk-settings.c:756:30-756:52: void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
-
block/blk-settings.c:781:30-781:52: void blk_queue_dma_alignment(struct request_queue *q, int mask)
-
block/blk-settings.c:801:37-801:59: void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
-
block/blk-settings.c:816:26-816:48: void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
-
block/blk-settings.c:831:28-831:50: void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
-
block/blk-settings.c:858:43-858:65: void blk_queue_required_elevator_features(struct request_queue *q,
-
block/blk-settings.c:872:40-872:62: bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
-
block/blk-stat.c:136:28-136:50: void blk_stat_add_callback(struct request_queue *q,
-
block/blk-stat.c:157:31-157:53: void blk_stat_remove_callback(struct request_queue *q,
-
block/blk-stat.c:187:34-187:56: void blk_stat_disable_accounting(struct request_queue *q)
-
block/blk-stat.c:198:33-198:55: void blk_stat_enable_accounting(struct request_queue *q)
-
block/blk-sysfs.c:50:36-50:58: static ssize_t queue_requests_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:56:22-56:44: queue_requests_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:78:30-78:52: static ssize_t queue_ra_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:89:16-89:38: queue_ra_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:103:39-103:61: static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:110:40-110:62: static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:115:48-115:70: static ssize_t queue_max_discard_segments_show(struct request_queue *q,
-
block/blk-sysfs.c:121:50-121:72: static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:126:44-126:66: static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:131:46-131:68: static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:136:47-136:69: static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:141:41-141:63: static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:146:34-146:56: static ssize_t queue_io_min_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:151:34-151:56: static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:156:47-156:69: static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:161:42-161:64: static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:168:39-168:61: static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:174:40-174:62: static ssize_t queue_discard_max_store(struct request_queue *q,
-
block/blk-sysfs.c:197:47-197:69: static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:202:42-202:64: static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:207:44-207:66: static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:213:50-213:72: static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
-
block/blk-sysfs.c:219:43-219:65: static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:227:25-227:47: queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:261:42-261:64: static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:268:46-268:68: static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:273:41-273:63: static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:304:1-304:1: QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
-
block/blk-sysfs.c:305:1-305:1: QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
-
block/blk-sysfs.c:306:1-306:1: QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
-
block/blk-sysfs.c:307:1-307:1: QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
-
block/blk-sysfs.c:304:1-304:1: QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
-
block/blk-sysfs.c:305:1-305:1: QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
-
block/blk-sysfs.c:306:1-306:1: QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
-
block/blk-sysfs.c:307:1-307:1: QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
-
block/blk-sysfs.c:310:33-310:55: static ssize_t queue_zoned_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:322:36-322:58: static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:327:42-327:64: static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:332:44-332:66: static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:337:36-337:58: static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:343:37-343:59: static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:362:39-362:61: static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:371:25-371:47: queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:395:38-395:60: static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:400:39-400:61: static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:406:32-406:54: static ssize_t queue_poll_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:411:33-411:55: static ssize_t queue_poll_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:421:38-421:60: static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:426:39-426:61: static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:441:30-441:52: static ssize_t queue_wc_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:449:31-449:53: static ssize_t queue_wc_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:466:31-466:53: static ssize_t queue_fua_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:471:31-471:53: static ssize_t queue_dax_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:560:34-560:56: static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:571:35-571:57: static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
-
block/blk-throttle.c:1699:32-1699:54: static void throtl_shutdown_wq(struct request_queue *q)
-
block/blk-throttle.c:2455:37-2455:59: ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
-
block/blk-throttle.c:2462:38-2462:60: ssize_t blk_throtl_sample_time_store(struct request_queue *q,
-
block/blk-timeout.c:23:32-23:54: bool __blk_should_fake_timeout(struct request_queue *q)
-
block/blk-wbt.c:493:19-493:41: bool wbt_disabled(struct request_queue *q)
-
block/blk-wbt.c:500:21-500:43: u64 wbt_get_min_lat(struct request_queue *q)
-
block/blk-wbt.c:508:22-508:44: void wbt_set_min_lat(struct request_queue *q, u64 val)
-
block/blk-wbt.c:702:26-702:48: void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
-
block/blk-wbt.c:739:30-739:52: u64 wbt_default_latency_nsec(struct request_queue *q)
-
block/blk.h:40:40-40:62: static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
-
block/blk.h:82:42-82:64: static inline bool biovec_phys_mergeable(struct request_queue *q,
-
block/blk.h:169:54-169:76: static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
-
block/blk.h:348:36-348:58: static inline void req_set_nomerge(struct request_queue *q, struct request *req)
-
block/blk.h:381:41-381:63: static inline bool blk_queue_may_bounce(struct request_queue *q)
-
block/blk.h:389:3-389:25: struct request_queue *q)
-
block/bsg-lib.c:28:35-28:57: static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
-
block/bsg-lib.c:320:23-320:45: void bsg_remove_queue(struct request_queue *q)
-
block/bsg.c:189:39-189:61: struct bsg_device *bsg_register_queue(struct request_queue *q,
-
block/elevator.c:86:41-86:63: static inline bool elv_support_features(struct request_queue *q,
-
block/elevator.c:116:48-116:70: static struct elevator_type *elevator_find_get(struct request_queue *q,
-
block/elevator.c:131:39-131:61: struct elevator_queue *elevator_alloc(struct request_queue *q,
-
block/elevator.c:159:20-159:42: void elevator_exit(struct request_queue *q)
-
block/elevator.c:179:21-179:43: void elv_rqhash_del(struct request_queue *q, struct request *rq)
-
block/elevator.c:186:21-186:43: void elv_rqhash_add(struct request_queue *q, struct request *rq)
-
block/elevator.c:196:28-196:50: void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
-
block/elevator.c:202:33-202:55: struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
-
block/elevator.c:276:26-276:48: enum elv_merge elv_merge(struct request_queue *q, struct request **req,
-
block/elevator.c:332:31-332:53: bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
-
block/elevator.c:370:25-370:47: void elv_merged_request(struct request_queue *q, struct request *rq,
-
block/elevator.c:384:25-384:47: void elv_merge_requests(struct request_queue *q, struct request *rq,
-
block/elevator.c:396:36-396:58: struct request *elv_latter_request(struct request_queue *q, struct request *rq)
-
block/elevator.c:406:36-406:58: struct request *elv_former_request(struct request_queue *q, struct request *rq)
-
block/elevator.c:463:24-463:46: int elv_register_queue(struct request_queue *q, bool uevent)
-
block/elevator.c:488:27-488:49: void elv_unregister_queue(struct request_queue *q)
-
block/elevator.c:558:40-558:62: static inline bool elv_support_iosched(struct request_queue *q)
-
block/elevator.c:570:51-570:73: static struct elevator_type *elevator_get_default(struct request_queue *q)
-
block/elevator.c:586:55-586:77: static struct elevator_type *elevator_get_by_features(struct request_queue *q)
-
block/elevator.c:612:23-612:45: void elevator_init_mq(struct request_queue *q)
-
block/elevator.c:660:21-660:43: int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
-
block/elevator.c:697:23-697:45: void elevator_disable(struct request_queue *q)
-
block/elevator.c:718:28-718:50: static int elevator_change(struct request_queue *q, const char *elevator_name)
-
block/elevator.c:748:27-748:49: ssize_t elv_iosched_store(struct request_queue *q, const char *buf,
-
block/elevator.c:764:26-764:48: ssize_t elv_iosched_show(struct request_queue *q, char *name)
-
block/elevator.c:793:39-793:61: struct request *elv_rb_former_request(struct request_queue *q,
-
block/elevator.c:805:39-805:61: struct request *elv_rb_latter_request(struct request_queue *q,
-
block/genhd.c:1325:35-1325:57: struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
-
block/kyber-iosched.c:357:56-357:78: static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
-
block/kyber-iosched.c:405:29-405:51: static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/kyber-iosched.c:567:29-567:51: static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
-
block/mq-deadline.c:210:37-210:59: static void deadline_remove_request(struct request_queue *q,
-
block/mq-deadline.c:227:31-227:53: static void dd_request_merged(struct request_queue *q, struct request *req,
-
block/mq-deadline.c:247:32-247:54: static void dd_merged_requests(struct request_queue *q, struct request *req,
-
block/mq-deadline.c:692:26-692:48: static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/mq-deadline.c:743:29-743:51: static int dd_request_merge(struct request_queue *q, struct request **rq,
-
block/mq-deadline.c:775:26-775:48: static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
-
crypto/ecc.c:1341:33-1341:57: const struct ecc_point *p, const struct ecc_point *q,
-
crypto/ecc.c:1364:22-1364:46: const u64 *u2, const struct ecc_point *q,
-
drivers/accel/habanalabs/common/hw_queue.c:31:36-31:56: static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
-
drivers/accel/habanalabs/common/hw_queue.c:83:52-83:72: void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/accel/habanalabs/common/hw_queue.c:117:5-117:25: struct hl_hw_queue *q, int num_of_entries,
-
drivers/accel/habanalabs/common/hw_queue.c:166:6-166:26: struct hl_hw_queue *q,
-
drivers/accel/habanalabs/common/hw_queue.c:200:59-200:79: static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/accel/habanalabs/common/hw_queue.c:813:59-813:79: static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/accel/habanalabs/common/hw_queue.c:854:51-854:71: static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/accel/habanalabs/common/hw_queue.c:874:51-874:71: static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/accel/habanalabs/common/hw_queue.c:879:51-879:71: static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/accel/habanalabs/common/hw_queue.c:884:50-884:70: static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/accel/habanalabs/common/hw_queue.c:985:47-985:67: static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/accel/habanalabs/common/hw_queue.c:1033:48-1033:68: static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/accel/habanalabs/common/irq.c:508:40-508:54: int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
-
drivers/accel/habanalabs/common/irq.c:536:41-536:55: void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
-
drivers/accel/habanalabs/common/irq.c:541:42-541:56: void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
-
drivers/accel/habanalabs/common/irq.c:567:40-567:54: int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
-
drivers/accel/habanalabs/common/irq.c:591:41-591:55: void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
-
drivers/accel/habanalabs/common/irq.c:598:42-598:56: void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
-
drivers/ata/libata-pata-timings.c:61:5-61:24: struct ata_timing *q, int T, int UT)
-
drivers/block/drbd/drbd_int.h:1828:17-1828:41: drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
-
drivers/block/drbd/drbd_int.h:1838:29-1838:53: drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
-
drivers/block/drbd/drbd_nl.c:1192:43-1192:65: static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
-
drivers/block/drbd/drbd_nl.c:1244:60-1244:82: static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
-
drivers/block/drbd/drbd_nl.c:1259:63-1259:85: static void fixup_discard_support(struct drbd_device *device, struct request_queue *q)
-
drivers/block/null_blk/zoned.c:61:51-61:73: int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
-
drivers/block/pktcdvd.c:941:63-941:85: static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
-
drivers/block/pktcdvd.c:2336:36-2336:58: static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
-
drivers/block/rnbd/rnbd-clt.c:137:41-137:60: static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
-
drivers/block/rnbd/rnbd-clt.c:1052:7-1052:26: struct rnbd_queue *q)
-
drivers/block/rnbd/rnbd-clt.c:1311:12-1311:31: struct rnbd_queue *q,
-
drivers/block/virtio_blk.c:750:12-750:34: struct request_queue *q)
-
drivers/char/ipmi/ipmi_msghandler.c:681:32-681:50: static void free_recv_msg_list(struct list_head *q)
-
drivers/char/ipmi/ipmi_msghandler.c:691:31-691:49: static void free_smi_msg_list(struct list_head *q)
-
drivers/clk/clk.c:3157:40-3157:58: bool clk_is_match(const struct clk *p, const struct clk *q)
-
drivers/crypto/cavium/cpt/cptpf_mbox.c:59:55-59:58: static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp)
-
drivers/crypto/cavium/cpt/cptvf_reqmanager.c:15:53-15:75: static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
-
drivers/crypto/cavium/zip/zip_mem.c:57:48-57:52: int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
-
drivers/crypto/cavium/zip/zip_mem.c:76:48-76:52: void zip_cmd_qbuf_free(struct zip_device *zip, int q)
-
drivers/crypto/hisilicon/qm.c:2320:8-2320:28: struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2340:37-2340:57: static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2348:31-2348:51: static int hisi_qm_uacce_mmap(struct uacce_queue *q,
-
drivers/crypto/hisilicon/qm.c:2401:38-2401:58: static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2408:38-2408:58: static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2413:33-2413:53: static int hisi_qm_is_q_updated(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2430:28-2430:48: static void qm_set_sqctype(struct uacce_queue *q, u16 type)
-
drivers/crypto/hisilicon/qm.c:2440:33-2440:53: static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
-
drivers/crypto/hisilicon/sec/sec_drv.c:673:47-673:53: static irqreturn_t sec_isr_handle_th(int irq, void *q)
-
drivers/crypto/hisilicon/sec/sec_drv.c:679:44-679:50: static irqreturn_t sec_isr_handle(int irq, void *q)
-
drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c:135:63-135:66: static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
-
drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c:58:7-58:37: struct otx_cpt_pending_queue *q,
-
drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c:49:6-49:37: struct otx2_cpt_pending_queue *q,
-
drivers/cxl/core/mbox.c:514:5-514:43: struct cxl_mem_query_commands __user *q)
-
drivers/firmware/arm_scmi/raw_mode.c:258:52-258:75: static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
-
drivers/firmware/arm_scmi/raw_mode.c:274:33-274:56: static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
-
drivers/firmware/arm_scmi/raw_mode.c:287:37-287:60: static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
-
drivers/firmware/arm_scmi/raw_mode.c:300:34-300:57: scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
-
drivers/firmware/arm_scmi/raw_mode.c:312:56-312:79: static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
-
drivers/firmware/arm_scmi/raw_mode.c:324:41-324:64: static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
-
drivers/firmware/arm_scmi/raw_mode.c:695:26-695:49: scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:213:39-213:64: static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:510:10-510:35: struct amdgpu_mes_queue *q,
-
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:541:10-541:35: struct amdgpu_mes_queue *q,
-
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:4325:30-4325:34: u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
-
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:820:24-820:28: u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
-
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c:3006:25-3006:29: u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
-
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c:4147:25-4147:29: u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
-
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c:3422:25-3422:29: u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
-
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1808:25-1808:29: u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
-
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:616:24-616:28: u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
-
drivers/gpu/drm/amd/amdkfd/kfd_debug.c:297:41-297:55: static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:188:60-188:74: static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:256:63-256:77: static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:312:7-312:21: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:327:7-327:21: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:345:9-345:23: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:415:5-415:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:444:4-444:18: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:511:5-511:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:529:5-529:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:649:59-649:73: static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:684:5-684:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:761:5-761:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:820:5-820:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:849:59-849:73: static int update_queue(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:961:11-961:25: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1007:11-1007:25: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1475:5-1475:19: struct queue *q, const uint32_t *restore_sdma_id)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1544:5-1544:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1776:65-1776:79: static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2031:6-2031:20: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2057:5-2057:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2267:6-2267:20: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2297:4-2297:24: const struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2317:6-2317:26: const struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:3062:31-3062:45: void set_queue_snapshot_entry(struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c:135:5-135:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c:76:64-76:78: static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c:76:64-76:78: static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c:89:63-89:77: static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c:141:5-141:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:49:60-49:85: struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:65:6-65:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:289:4-289:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:70:45-70:70: static void set_priority(struct cik_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:77:6-77:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:90:3-90:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:144:4-144:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:173:4-173:29: struct queue_properties *q, struct mqd_update_info *minfo,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:217:10-217:35: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:224:4-224:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:330:3-330:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:336:4-336:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:70:53-70:78: static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:77:3-77:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:90:4-90:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:163:4-163:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:234:6-234:31: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:309:4-309:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:343:3-343:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:361:4-361:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:96:53-96:78: static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:103:3-103:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:125:4-125:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:217:10-217:35: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:288:6-288:31: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:363:4-363:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:397:3-397:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:420:3-420:28: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:42:5-42:30: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:104:44-104:69: static void set_priority(struct v9_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:111:3-111:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:160:4-160:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:239:4-239:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:318:6-318:31: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:406:4-406:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:439:3-439:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:457:4-457:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:521:4-521:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:614:4-614:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:679:9-679:34: struct queue_properties *q, struct mqd_update_info *minfo)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:767:6-767:31: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:73:44-73:69: static void set_priority(struct vi_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:80:6-80:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:93:4-93:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:171:4-171:29: struct queue_properties *q, struct mqd_update_info *minfo,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:248:10-248:35: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:255:6-255:31: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:321:4-321:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:334:4-334:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:342:3-342:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:358:4-358:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c:212:3-212:17: struct queue *q, bool is_static)
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c:143:3-143:17: struct queue *q, bool is_static)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:493:26-493:40: int kfd_procfs_add_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:653:27-653:41: void kfd_procfs_del_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:193:27-193:42: struct kfd_node *dev, struct queue **q,
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:647:5-647:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:726:7-726:21: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:28:29-28:54: void print_queue_properties(struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:46:18-46:32: void print_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:67:16-67:31: int init_queue(struct queue **q, const struct queue_properties *properties)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:81:19-81:33: void uninit_queue(struct queue *q)
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:620:30-620:52: static void throttle_release(struct i915_request **q, int count)
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:633:7-633:29: struct i915_request **q, int count)
-
drivers/gpu/drm/v3d/v3d_sched.c:291:54-291:69: v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
-
drivers/gpu/ipu-v3/ipu-image-convert.c:1251:5-1251:23: struct list_head *q)
-
drivers/infiniband/hw/hfi1/ipoib_tx.c:841:52-841:65: void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
-
drivers/infiniband/hw/irdma/uk.c:1491:24-1491:30: void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:371:51-371:77: static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:377:6-377:32: struct ocrdma_queue_info *q, u16 len, u16 entry_size)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:403:11-403:37: struct ocrdma_queue_info *q, int queue_type)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1551:32-1551:59: static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1566:30-1566:57: static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1571:39-1571:66: static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1577:33-1577:60: static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1582:33-1582:60: static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/qedr/verbs.c:744:28-744:47: struct qedr_dev *dev, struct qedr_userq *q,
-
drivers/infiniband/hw/qedr/verbs.c:792:12-792:31: struct qedr_userq *q, u64 buf_addr,
-
drivers/infiniband/sw/rxe/rxe_queue.c:46:29-46:47: inline void rxe_queue_reset(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.c:110:26-110:44: static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
-
drivers/infiniband/sw/rxe/rxe_queue.c:147:22-147:40: int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
-
drivers/infiniband/sw/rxe/rxe_queue.c:193:24-193:42: void rxe_queue_cleanup(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:95:36-95:54: static inline u32 queue_next_index(struct rxe_queue *q, int index)
-
drivers/infiniband/sw/rxe/rxe_queue.h:100:38-100:62: static inline u32 queue_get_producer(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:127:38-127:62: static inline u32 queue_get_consumer(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:154:31-154:49: static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
-
drivers/infiniband/sw/rxe/rxe_queue.h:162:30-162:48: static inline int queue_full(struct rxe_queue *q, enum queue_type type)
-
drivers/infiniband/sw/rxe/rxe_queue.h:170:31-170:55: static inline u32 queue_count(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:179:43-179:61: static inline void queue_advance_producer(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:215:43-215:61: static inline void queue_advance_consumer(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:250:41-250:59: static inline void *queue_producer_addr(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:258:41-258:59: static inline void *queue_consumer_addr(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:266:43-266:61: static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
-
drivers/infiniband/sw/rxe/rxe_queue.h:272:41-272:65: static inline u32 queue_index_from_addr(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:279:32-279:50: static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
-
drivers/input/misc/hisi_powerkey.c:29:52-29:58: static irqreturn_t hi65xx_power_press_isr(int irq, void *q)
-
drivers/input/misc/hisi_powerkey.c:40:54-40:60: static irqreturn_t hi65xx_power_release_isr(int irq, void *q)
-
drivers/input/misc/hisi_powerkey.c:51:55-51:61: static irqreturn_t hi65xx_restart_toggle_isr(int irq, void *q)
-
drivers/input/rmi4/rmi_f54.c:283:32-283:50: static int rmi_f54_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
-
drivers/input/rmi4/rmi_f54.c:363:36-363:54: static void rmi_f54_stop_streaming(struct vb2_queue *q)
-
drivers/input/touchscreen/atmel_mxt_ts.c:2482:28-2482:46: static int mxt_queue_setup(struct vb2_queue *q,
-
drivers/input/touchscreen/sur40.c:845:30-845:48: static int sur40_queue_setup(struct vb2_queue *q,
-
drivers/md/dm-cache-policy-smq.c:270:20-270:34: static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
-
drivers/md/dm-cache-policy-smq.c:288:28-288:42: static unsigned int q_size(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:296:20-296:34: static void q_push(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:306:26-306:40: static void q_push_front(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:316:27-316:41: static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:326:19-326:33: static void q_del(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:336:29-336:43: static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
-
drivers/md/dm-cache-policy-smq.c:358:28-358:42: static struct entry *q_pop(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:373:40-373:54: static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
-
drivers/md/dm-cache-policy-smq.c:387:37-387:51: static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
-
drivers/md/dm-cache-policy-smq.c:407:27-407:41: static void q_set_targets(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:429:28-429:42: static void q_redistribute(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:472:23-472:37: static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
-
drivers/md/dm-rq.c:64:21-64:43: void dm_start_queue(struct request_queue *q)
-
drivers/md/dm-rq.c:70:20-70:42: void dm_stop_queue(struct request_queue *q)
-
drivers/md/dm-rq.c:171:39-171:61: static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
-
drivers/md/dm-table.c:1361:38-1361:60: static void dm_update_crypto_profile(struct request_queue *q,
-
drivers/md/dm-table.c:1939:51-1939:73: int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
-
drivers/md/dm-zone.c:289:51-289:73: int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
-
drivers/md/dm.c:1958:45-1958:67: static void dm_queue_destroy_crypto_profile(struct request_queue *q)
-
drivers/media/common/saa7146/saa7146_fops.c:49:5-49:30: struct saa7146_dmaqueue *q,
-
drivers/media/common/saa7146/saa7146_fops.c:71:7-71:32: struct saa7146_dmaqueue *q,
-
drivers/media/common/saa7146/saa7146_fops.c:102:5-102:30: struct saa7146_dmaqueue *q, int vbi)
-
drivers/media/common/saa7146/saa7146_vbi.c:220:24-220:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/common/saa7146/saa7146_vbi.c:290:28-290:46: static void return_buffers(struct vb2_queue *q, int state)
-
drivers/media/common/saa7146/saa7146_vbi.c:380:28-380:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/common/saa7146/saa7146_vbi.c:393:28-393:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/common/saa7146/saa7146_video.c:556:24-556:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/common/saa7146/saa7146_video.c:635:28-635:46: static void return_buffers(struct vb2_queue *q, int state)
-
drivers/media/common/saa7146/saa7146_video.c:653:28-653:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/common/saa7146/saa7146_video.c:666:28-666:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:379:37-379:55: static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
-
drivers/media/common/videobuf2/videobuf2-core.c:407:30-407:48: static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:479:28-479:46: static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
-
drivers/media/common/videobuf2/videobuf2-core.c:505:30-505:48: static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
-
drivers/media/common/videobuf2/videobuf2-core.c:605:24-605:42: bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
-
drivers/media/common/videobuf2/videobuf2-core.c:627:30-627:48: static bool __buffers_in_use(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:637:24-637:42: void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
-
drivers/media/common/videobuf2/videobuf2-core.c:647:33-647:51: static int __verify_userptr_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:660:30-660:48: static int __verify_mmap_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:673:32-673:50: static int __verify_dmabuf_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:683:28-683:46: int vb2_verify_memory_type(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-core.c:729:33-729:51: static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
-
drivers/media/common/videobuf2/videobuf2-core.c:738:36-738:54: static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
-
drivers/media/common/videobuf2/videobuf2-core.c:747:22-747:40: int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:905:26-905:44: int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:1100:23-1100:41: void vb2_discard_done(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1545:26-1545:44: int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
-
drivers/media/common/videobuf2/videobuf2-core.c:1585:32-1585:50: static int vb2_start_streaming(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1637:19-1637:37: int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
-
drivers/media/common/videobuf2/videobuf2-core.c:1797:35-1797:53: static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
-
drivers/media/common/videobuf2/videobuf2-core.c:1878:30-1878:48: static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
-
drivers/media/common/videobuf2/videobuf2-core.c:1911:30-1911:48: int vb2_wait_for_all_buffers(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1940:20-1940:38: int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
-
drivers/media/common/videobuf2/videobuf2-core.c:2004:32-2004:50: static void __vb2_queue_cancel(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2104:23-2104:41: int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-core.c:2154:22-2154:40: void vb2_queue_error(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2162:24-2162:42: int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-core.c:2190:35-2190:53: static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
-
drivers/media/common/videobuf2/videobuf2-core.c:2232:21-2232:39: int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
-
drivers/media/common/videobuf2/videobuf2-core.c:2305:14-2305:32: int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
-
drivers/media/common/videobuf2/videobuf2-core.c:2412:25-2412:43: int vb2_core_queue_init(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2466:29-2466:47: void vb2_core_queue_release(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2476:24-2476:42: __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
-
drivers/media/common/videobuf2/videobuf2-core.c:2625:30-2625:48: static int __vb2_init_fileio(struct vb2_queue *q, int read)
-
drivers/media/common/videobuf2/videobuf2-core.c:2743:33-2743:51: static int __vb2_cleanup_fileio(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2767:36-2767:54: static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:2928:17-2928:35: size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:2935:18-2935:36: size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:3014:22-3014:40: int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
-
drivers/media/common/videobuf2/videobuf2-core.c:3054:21-3054:39: int vb2_thread_stop(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:344:36-344:54: static void set_buffer_cache_hints(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:366:37-366:55: static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:628:36-628:54: struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:653:18-653:36: int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:675:27-675:45: static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:694:35-694:53: static void validate_memory_flags(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:710:17-710:35: int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:723:21-723:39: int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:742:21-742:39: int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:802:14-802:32: int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:823:15-823:33: int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:854:18-854:36: int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:864:19-864:37: int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:874:16-874:34: int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:881:25-881:43: int vb2_queue_init_name(struct vb2_queue *q, const char *name)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:926:20-926:38: int vb2_queue_init(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:932:24-932:42: void vb2_queue_release(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:938:27-938:45: int vb2_queue_change_type(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:952:19-952:37: __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
-
drivers/media/pci/bt8xx/bttv-driver.c:1465:24-1465:42: static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
-
drivers/media/pci/bt8xx/bttv-driver.c:1537:28-1537:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/bt8xx/bttv-driver.c:1567:28-1567:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/bt8xx/bttv-vbi.c:60:28-60:46: static int queue_setup_vbi(struct vb2_queue *q, unsigned int *num_buffers,
-
drivers/media/pci/bt8xx/bttv-vbi.c:124:32-124:50: static int start_streaming_vbi(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/bt8xx/bttv-vbi.c:153:32-153:50: static void stop_streaming_vbi(struct vb2_queue *q)
-
drivers/media/pci/cobalt/cobalt-v4l2.c:34:31-34:49: static int cobalt_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cobalt/cobalt-v4l2.c:279:35-279:53: static int cobalt_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cobalt/cobalt-v4l2.c:388:35-388:53: static void cobalt_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx18/cx18-queue.c:36:22-36:41: void cx18_queue_init(struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.c:44:6-44:25: struct cx18_queue *q, int to_front)
-
drivers/media/pci/cx18/cx18-queue.c:73:54-73:73: struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.h:60:5-60:24: struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.h:67:9-67:28: struct cx18_queue *q)
-
drivers/media/pci/cx23885/cx23885-417.c:1123:24-1123:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-417.c:1167:36-1167:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-417.c:1194:36-1194:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-core.c:425:7-425:32: struct cx23885_dmaqueue *q, u32 count)
-
drivers/media/pci/cx23885/cx23885-core.c:1398:9-1398:34: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-dvb.c:88:24-88:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-dvb.c:150:36-150:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-dvb.c:161:36-161:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-vbi.c:87:5-87:30: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-vbi.c:114:24-114:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-vbi.c:217:36-217:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-vbi.c:228:36-228:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-video.c:89:2-89:27: struct cx23885_dmaqueue *q, u32 count)
-
drivers/media/pci/cx23885/cx23885-video.c:305:7-305:32: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-video.c:332:24-332:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-video.c:488:36-488:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-video.c:499:36-499:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx25821/cx25821-video.c:59:8-59:33: struct cx25821_dmaqueue *q,
-
drivers/media/pci/cx25821/cx25821-video.c:127:32-127:50: static int cx25821_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx25821/cx25821-video.c:261:36-261:54: static int cx25821_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx25821/cx25821-video.c:274:36-274:54: static void cx25821_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-blackbird.c:658:24-658:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-blackbird.c:702:28-702:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-blackbird.c:752:28-752:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-core.c:521:4-521:26: struct cx88_dmaqueue *q, u32 count)
-
drivers/media/pci/cx88/cx88-dvb.c:75:24-75:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-dvb.c:120:28-120:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-dvb.c:131:28-131:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-mpeg.c:73:8-73:30: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-mpeg.c:199:5-199:27: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-mpeg.c:216:24-216:42: int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
-
drivers/media/pci/cx88/cx88-vbi.c:52:5-52:27: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-vbi.c:99:9-99:31: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-vbi.c:115:24-115:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-vbi.c:194:28-194:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-vbi.c:205:28-205:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-video.c:350:7-350:29: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-video.c:405:12-405:34: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-video.c:420:24-420:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-video.c:529:28-529:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-video.c:540:28-540:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/dt3155/dt3155.c:148:35-148:53: static int dt3155_start_streaming(struct vb2_queue *q, unsigned count)
-
drivers/media/pci/dt3155/dt3155.c:176:35-176:53: static void dt3155_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:229:53-229:72: static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:241:28-241:47: static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:306:60-306:79: static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:347:51-347:70: static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:509:52-509:71: static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:588:60-588:79: static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:781:41-781:60: static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1525:54-1525:73: static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1658:55-1658:74: static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1941:59-1941:78: static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:32:22-32:41: void ivtv_queue_init(struct ivtv_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:40:67-40:86: void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:59:57-59:76: struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
-
drivers/media/pci/netup_unidvb/netup_unidvb_core.c:335:41-335:59: static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/netup_unidvb/netup_unidvb_core.c:344:41-344:59: static void netup_unidvb_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:261:5-261:30: struct saa7134_dmaqueue *q,
-
drivers/media/pci/saa7134/saa7134-core.c:289:7-289:32: struct saa7134_dmaqueue *q,
-
drivers/media/pci/saa7134/saa7134-core.c:302:5-302:30: struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:352:54-352:79: void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:1348:8-1348:33: struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-ts.c:106:28-106:46: int saa7134_ts_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/saa7134/saa7134-vbi.c:128:24-128:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/saa7134/saa7134-video.c:750:24-750:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:655:33-655:51: static int solo_enc_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:708:37-708:55: static int solo_enc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:715:37-715:55: static void solo_enc_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:307:29-307:47: static int solo_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:322:33-322:51: static int solo_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:330:33-330:51: static void solo_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/tw5864/tw5864-video.c:182:31-182:49: static int tw5864_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
-
drivers/media/pci/tw5864/tw5864-video.c:427:35-427:53: static int tw5864_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/tw5864/tw5864-video.c:446:35-446:53: static void tw5864_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/tw68/tw68-video.c:358:29-358:47: static int tw68_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/tw68/tw68-video.c:493:33-493:51: static int tw68_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/tw68/tw68-video.c:504:33-504:51: static void tw68_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/allegro-dvt/allegro-core.c:2830:36-2830:54: static int allegro_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/allegro-dvt/allegro-core.c:2849:36-2849:54: static void allegro_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/amphion/vpu_v4l2.c:566:36-566:54: static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/amphion/vpu_v4l2.c:599:36-599:54: static void vpu_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/aspeed/aspeed-video.c:1793:37-1793:55: static int aspeed_video_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/aspeed/aspeed-video.c:1824:41-1824:59: static int aspeed_video_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/aspeed/aspeed-video.c:1846:41-1846:59: static void aspeed_video_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/chips-media/coda-common.c:1967:33-1967:51: static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/chips-media/coda-common.c:2111:33-2111:51: static void coda_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:673:33-673:51: static int mtk_jpeg_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:849:41-849:59: static void mtk_jpeg_enc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:858:41-858:59: static void mtk_jpeg_dec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c:389:40-389:58: static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c:411:40-411:58: static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c:117:36-117:54: static int mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c:175:36-175:54: static void mdp_m2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c:187:32-187:50: static int mdp_m2m_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c:840:33-840:51: int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c:850:33-850:51: void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c:865:40-865:58: static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c:944:40-944:58: static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/dw100/dw100.c:503:38-503:56: static void dw100_return_all_buffers(struct vb2_queue *q,
-
drivers/media/platform/nxp/dw100/dw100.c:520:34-520:52: static int dw100_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/nxp/dw100/dw100.c:544:34-544:52: static void dw100_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1568:33-1568:51: static int mxc_jpeg_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1601:37-1601:55: static int mxc_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1623:37-1623:55: static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1727:35-1727:59: static void mxc_jpeg_bytesperline(struct mxc_jpeg_q_data *q, u32 precision)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1754:32-1754:56: static void mxc_jpeg_sizeimage(struct mxc_jpeg_q_data *q)
-
drivers/media/platform/nxp/imx-pxp.c:1575:32-1575:50: static int pxp_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/nxp/imx-pxp.c:1584:32-1584:50: static void pxp_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c:191:40-191:58: static int mxc_isi_m2m_vb2_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c:238:44-238:62: static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c:250:44-250:62: static void mxc_isi_m2m_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c:881:36-881:54: static int mxc_isi_vb2_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c:937:40-937:58: static int mxc_isi_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c:975:40-975:58: static void mxc_isi_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/camss/camss-video.c:378:30-378:48: static int video_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/camss/camss-video.c:488:34-488:52: static int video_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/qcom/camss/camss-video.c:536:34-536:52: static void video_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/helpers.c:1544:38-1544:56: void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/vdec.c:913:29-913:47: static int vdec_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/venus/vdec.c:1170:33-1170:51: static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/qcom/venus/vdec.c:1267:33-1267:51: static void vdec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/venc.c:1057:29-1057:47: static int venc_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/venus/venc.c:1224:33-1224:51: static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/renesas/rcar_fdp1.c:1922:33-1922:51: static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/renesas/rcar_fdp1.c:1961:33-1961:51: static void fdp1_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/rockchip/rga/rga-buf.c:59:36-59:54: static void rga_buf_return_buffers(struct vb2_queue *q,
-
drivers/media/platform/rockchip/rga/rga-buf.c:76:36-76:54: static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/rockchip/rga/rga-buf.c:91:36-91:54: static void rga_buf_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c:1881:41-1881:59: static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
-
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c:159:29-159:47: rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
-
drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c:56:36-56:54: static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c:78:36-78:54: static void gsc_m2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-capture.c:259:28-259:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos4-is/fimc-capture.c:290:28-290:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c:76:46-76:64: static int isp_video_capture_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c:119:46-119:64: static void isp_video_capture_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-lite.c:305:28-305:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos4-is/fimc-lite.c:339:28-339:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-m2m.c:73:28-73:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos4-is/fimc-m2m.c:80:28-80:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c:2564:37-2564:55: static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c:2571:37-2571:55: static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c:1027:36-1027:54: static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c:1043:36-1043:54: static void s5p_mfc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c:2502:36-2502:54: static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c:2532:36-2532:54: static void s5p_mfc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c:498:34-498:52: static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c:521:34-521:52: static void bdisp_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1303:41-1303:59: static int delta_vb2_au_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1397:41-1397:59: static void delta_vb2_au_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1527:44-1527:62: static void delta_vb2_frame_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/stm32/dma2d/dma2d.c:157:34-157:52: static int dma2d_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/st/stm32/dma2d/dma2d.c:166:34-166:52: static void dma2d_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/ti/vpe/vpe.c:2124:58-2124:76: static void vpe_return_all_buffers(struct vpe_ctx *ctx, struct vb2_queue *q,
-
drivers/media/platform/ti/vpe/vpe.c:2177:32-2177:50: static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/ti/vpe/vpe.c:2199:32-2199:50: static void vpe_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/verisilicon/hantro_v4l2.c:900:32-900:50: static bool hantro_vq_is_coded(struct vb2_queue *q)
-
drivers/media/platform/verisilicon/hantro_v4l2.c:907:35-907:53: static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/verisilicon/hantro_v4l2.c:950:20-950:38: hantro_return_bufs(struct vb2_queue *q,
-
drivers/media/platform/verisilicon/hantro_v4l2.c:967:35-967:53: static void hantro_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/verisilicon/rockchip_av1_entropymode.c:4198:35-4198:39: static int rockchip_av1_get_q_ctx(int q)
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1509:33-1509:51: static void vicodec_return_bufs(struct vb2_queue *q, u32 state)
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1548:36-1548:54: static int vicodec_start_streaming(struct vb2_queue *q,
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1636:36-1636:54: static void vicodec_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/vim2m.c:1054:34-1054:52: static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/test-drivers/vim2m.c:1069:34-1069:52: static void vim2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/visl/visl-trace-mpeg2.h:88:1-88:1: DEFINE_EVENT(v4l2_ctrl_mpeg2_quant_tmpl, v4l2_ctrl_mpeg2_quantisation,
-
drivers/media/test-drivers/vivid/vivid-core.c:862:10-862:28: struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:39:39-39:57: static int cxusb_medion_v_queue_setup(struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:772:43-772:61: static int cxusb_medion_v_start_streaming(struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:898:43-898:61: static void cxusb_medion_v_stop_streaming(struct vb2_queue *q)
-
drivers/media/usb/go7007/go7007-fw.c:290:70-290:74: static int mjpeg_frame_header(struct go7007 *go, unsigned char *buf, int q)
-
drivers/media/usb/go7007/go7007-v4l2.c:343:31-343:49: static int go7007_queue_setup(struct vb2_queue *q,
-
drivers/media/usb/go7007/go7007-v4l2.c:397:35-397:53: static int go7007_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/usb/go7007/go7007-v4l2.c:425:35-425:53: static void go7007_stop_streaming(struct vb2_queue *q)
-
drivers/media/usb/gspca/topro.c:1439:50-1439:53: static void set_dqt(struct gspca_dev *gspca_dev, u8 q)
-
drivers/media/usb/gspca/topro.c:1456:53-1456:57: static void setquality(struct gspca_dev *gspca_dev, s32 q)
-
drivers/media/usb/hdpvr/hdpvr-video.c:97:29-97:47: static int hdpvr_free_queue(struct list_head *q)
-
drivers/media/v4l2-core/v4l2-ctrls-core.c:568:38-568:68: static int validate_av1_quantization(struct v4l2_av1_quantization *q)
-
drivers/media/v4l2-core/v4l2-mc.c:314:34-314:52: int v4l_vb2q_enable_media_source(struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:689:9-689:27: struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:702:8-702:26: struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:730:7-730:25: struct vb2_queue *q)
-
drivers/misc/uacce/uacce.c:18:34-18:54: static bool uacce_queue_is_valid(struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:23:30-23:50: static int uacce_start_queue(struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:40:28-40:48: static int uacce_put_queue(struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:103:57-103:77: static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:126:32-126:52: static void uacce_unbind_queue(struct uacce_queue *q)
-
drivers/misc/vmw_vmci/vmci_queue_pair.c:248:27-248:33: static void qp_free_queue(void *q, u64 size)
-
drivers/mmc/core/crypto.c:22:29-22:51: void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
-
drivers/mmc/core/queue.c:177:37-177:59: static void mmc_queue_setup_discard(struct request_queue *q,
-
drivers/net/ethernet/amd/pds_core/core.c:164:24-164:43: static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
-
drivers/net/ethernet/asix/ax88796c_main.c:244:44-244:65: ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:168:7-168:30: struct bnx2x_vf_queue *q,
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:1415:7-1415:30: struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:385:45-385:68: static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:390:54-390:77: static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:398:55-398:78: static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c:545:8-545:31: struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/brocade/bna/bna.h:238:44-238:62: static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:499:55-499:70: static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:612:48-612:61: static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:821:47-821:62: static void refill_free_list(struct sge *sge, struct freelQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1167:12-1167:25: struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1203:7-1203:20: struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1290:58-1290:71: static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1399:40-1399:59: static inline int enough_free_Tx_descs(const struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:169:45-169:68: static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:174:44-174:66: static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:189:11-189:34: const struct sge_rspq *q, unsigned int credits)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:233:51-233:67: static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:282:51-282:67: static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:322:7-322:23: struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:342:37-342:59: static inline int should_restart_tx(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:349:49-349:70: static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:376:48-376:63: static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:438:52-438:67: static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:481:53-481:68: static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:501:44-501:59: static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:574:50-574:65: static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:643:27-643:44: static void t3_reset_qset(struct sge_qset *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:672:51-672:68: static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:840:10-840:27: struct sge_rspq *q, unsigned int len,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1045:59-1045:75: static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1089:9-1089:31: const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1179:8-1179:24: struct sge_txq *q, unsigned int ndesc,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1246:30-1246:46: struct sge_qset *qs, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1418:58-1418:74: static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1450:45-1450:61: static inline void reclaim_completed_tx_imm(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1473:44-1473:60: static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1630:6-1630:22: struct sge_txq *q, unsigned int pidx,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1694:44-1694:60: static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1853:36-1853:53: static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1876:8-1876:25: struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2287:7-2287:30: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2292:40-2292:64: static inline void clear_rspq_bufstate(struct sge_rspq * const q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2583:58-2583:75: static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h:1597:11-1597:34: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h:1754:52-1754:69: static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:555:27-555:44: static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:913:23-913:40: void cxgb4_quiesce_rx(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:954:44-954:61: void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:1240:32-1240:49: int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2424:28-2424:44: static void disable_txq_db(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2433:49-2433:65: static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2519:49-2519:65: static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:57:33-57:50: static void uldrx_flush_handler(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:74:26-74:43: static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:203:9-203:30: struct sge_ofld_rxq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:208:38-208:60: static inline unsigned int txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:313:41-313:57: void free_tx_desc(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:341:31-341:53: static inline int reclaimable(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:359:62-359:78: static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:391:55-391:71: void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:438:48-438:63: static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:466:48-466:63: static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:479:53-479:68: static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:535:53-535:68: static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:837:49-837:65: void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:906:57-906:73: void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1025:52-1025:68: inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1106:5-1106:27: const struct sge_txq *q, void *pos)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1131:7-1131:29: const struct sge_txq *q, void *pos,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1232:26-1232:46: static void eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1238:32-1238:48: static inline void txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2108:45-2108:61: static inline void reclaim_completed_tx_imm(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2653:30-2653:51: static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2749:22-2749:43: static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2907:29-2907:49: static void txq_stop_maperr(struct sge_uld_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2923:26-2923:46: static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2948:27-2948:47: static void service_ofldq(struct sge_uld_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3069:22-3069:42: static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3189:10-3189:32: const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3220:29-3220:49: static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3675:22-3675:39: int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3829:54-3829:69: static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3855:8-3855:31: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3866:30-3866:47: static inline void rspq_next(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3889:30-3889:47: static int process_responses(struct sge_rspq *q, int budget)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4069:30-4069:47: int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4566:44-4566:60: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4753:56-4753:72: static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4852:37-4852:53: void free_txq(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4899:53-4899:74: void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:688:31-688:53: static unsigned int txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:693:26-693:46: static void eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:699:25-699:41: static void txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:58:55-58:77: static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:81:43-81:65: static inline unsigned int chcr_txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:86:37-86:53: static inline void chcr_txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:94:38-94:58: static inline void chcr_eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:747:6-747:26: struct sge_eth_txq *q, u64 mask,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:802:6-802:26: struct sge_eth_txq *q, u32 tid,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:843:8-843:28: struct sge_eth_txq *q, u64 tcp_seq,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:993:8-993:28: struct sge_eth_txq *q, uint32_t tx_chan)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1102:11-1102:31: struct sge_eth_txq *q, u32 tcp_seq,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1276:8-1276:28: struct sge_eth_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1463:21-1463:41: bool tcp_push, struct sge_eth_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1571:5-1571:25: struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1699:6-1699:26: struct sge_eth_txq *q, u32 skb_offset,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1777:10-1777:30: struct sge_eth_txq *q, u32 tls_end_offset)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1896:6-1896:26: struct sge_eth_txq *q)
-
drivers/net/ethernet/emulex/benet/be.h:150:37-150:59: static inline void *queue_head_node(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:155:37-155:59: static inline void *queue_tail_node(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:160:38-160:60: static inline void *queue_index_node(struct be_queue_info *q, u16 index)
-
drivers/net/ethernet/emulex/benet/be.h:165:35-165:57: static inline void queue_head_inc(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:175:35-175:57: static inline void queue_tail_inc(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_cmds.c:1454:50-1454:72: int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
-
drivers/net/ethernet/emulex/benet/be_cmds.c:1505:52-1505:74: int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_main.c:144:55-144:77: static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_main.c:155:55-155:77: static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:50:25-50:44: static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:67:23-67:42: static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:98:30-98:49: static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:127:30-127:49: static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:141:26-141:45: static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:256:9-256:28: get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:296:27-296:46: static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:348:24-348:43: static void advance_cq(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:365:32-365:51: static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:479:29-479:48: static int fun_process_cqes(struct funeth_rxq *q, int budget)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:536:31-536:50: static void fun_rxq_free_bufs(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:549:31-549:50: static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:575:32-575:51: static void fun_rxq_free_cache(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:587:21-587:40: int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:677:29-677:48: static void fun_rxq_free_sw(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:697:24-697:43: int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:765:30-765:49: static void fun_rxq_free_dev(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:817:36-817:55: struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:56:22-56:47: static void *txq_end(const struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:64:32-64:57: static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:78:43-78:68: static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:107:56-107:75: static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:149:57-149:76: static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:311:35-311:60: static unsigned int fun_txq_avail(const struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:317:31-317:50: static void fun_tx_check_stop(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:338:33-338:52: static bool fun_txq_may_restart(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:385:24-385:49: static u16 txq_hw_head(const struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:393:35-393:60: static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:423:29-423:48: static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:484:36-484:55: static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:514:17-514:36: bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:601:27-601:46: static void fun_txq_purge(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:612:28-612:47: static void fun_xdpq_purge(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:665:29-665:48: static void fun_txq_free_sw(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:680:24-680:43: int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:731:30-731:49: static void fun_txq_free_dev(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:789:36-789:55: struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
-
drivers/net/ethernet/fungible/funeth/funeth_txrx.h:229:38-229:63: static inline void *fun_tx_desc_addr(const struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_txrx.h:235:34-235:59: static inline void fun_txq_wr_db(const struct funeth_txq *q)
-
drivers/net/ethernet/hisilicon/hns/hnae.c:193:16-193:35: hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
-
drivers/net/ethernet/hisilicon/hns/hnae.c:237:51-237:70: static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
-
drivers/net/ethernet/hisilicon/hns/hnae.c:264:29-264:48: static void hnae_fini_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:62:50-62:69: static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:193:31-193:50: static void hns_ae_init_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:201:31-201:50: static void hns_ae_fini_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:88:28-88:47: void hns_rcb_reset_ring_hw(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:138:26-138:45: void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:155:25-155:44: void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:168:28-168:47: void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:179:27-179:46: void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:193:29-193:48: void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:198:20-198:39: void hns_rcb_start(struct hnae_queue *q, u32 val)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:218:29-218:48: void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:230:29-230:48: void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:437:34-437:53: static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:4872:31-4872:51: static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:329:12-329:37: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:380:12-380:37: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:436:51-436:76: void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:456:30-456:55: void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
-
drivers/net/ethernet/intel/fm10k/fm10k_pf.c:1134:11-1134:36: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/i40e/i40e_trace.h:60:1-60:1: TRACE_EVENT(i40e_napi_poll,
-
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c:133:23-133:49: octep_write_mbox_data(struct octep_ctrl_mbox_q *q, u32 *pi, u32 ci, void *buf, u32 w_sz)
-
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c:198:22-198:48: octep_read_mbox_data(struct octep_ctrl_mbox_q *q, u32 pi, u32 *ci, void *buf, u32 r_sz)
-
drivers/net/ethernet/marvell/octeontx2/af/common.h:47:50-47:64: static inline int qmem_alloc(struct device *dev, struct qmem **q,
-
drivers/net/ethernet/marvell/skge.c:2486:45-2486:49: static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
-
drivers/net/ethernet/marvell/skge.c:2517:47-2517:51: static void skge_qset(struct skge_port *skge, u16 q,
-
drivers/net/ethernet/marvell/sky2.c:1035:45-1035:49: static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
-
drivers/net/ethernet/marvell/sky2.c:1075:43-1075:47: static void sky2_qset(struct sky2_hw *hw, u16 q)
-
drivers/net/ethernet/marvell/sky2.c:1124:53-1124:62: static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
-
drivers/net/ethernet/marvell/sky2.c:2915:62-2915:66: static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:91:46-91:71: mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:99:43-99:68: mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:134:48-134:73: mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:185:48-185:73: mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:256:47-256:72: mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:278:46-278:71: mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:286:50-286:75: mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:309:50-309:75: mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:331:47-331:72: mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
-
drivers/net/ethernet/mediatek/mtk_wed_wo.c:338:52-338:77: int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:133:46-133:70: static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:138:41-138:65: static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:145:31-145:55: mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:151:40-151:64: mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:161:40-161:64: mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:168:39-168:63: static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:173:37-173:61: static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:238:9-238:33: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:248:13-248:37: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:258:10-258:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:265:10-265:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:274:9-274:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:280:46-280:70: static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:287:10-287:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:320:11-320:35: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:393:10-393:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:444:11-444:35: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:457:7-457:31: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:468:9-468:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:505:10-505:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:537:10-537:34: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:624:10-624:34: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:695:38-695:62: static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:750:36-750:66: static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:756:34-756:64: static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:763:9-763:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:793:10-793:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:808:38-808:62: static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:927:5-927:29: struct mlxsw_pci_queue *q, u8 q_num)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:988:6-988:30: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/netronome/nfp/flower/cmsg.h:685:15-685:18: u8 vnic, u8 q)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:823:39-823:51: static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:834:39-834:51: static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:839:33-839:45: static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:863:39-863:51: static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:874:39-874:51: static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:664:41-664:61: void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:697:4-697:24: struct ionic_queue *q, unsigned int index, const char *name,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:725:18-725:38: void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:737:22-737:42: void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:749:21-749:41: void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:761:19-761:39: void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:790:31-790:51: static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:801:22-801:42: void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.h:302:48-302:68: static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.h:314:38-314:58: static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:271:29-271:49: static void ionic_adminq_cb(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:292:33-292:53: bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:13:35-13:55: static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:19:35-19:55: static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:25:30-25:50: bool ionic_txq_poke_doorbell(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:57:30-57:50: bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:86:45-86:65: static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:91:32-91:52: static int ionic_rx_page_alloc(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:132:32-132:52: static void ionic_rx_page_free(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:152:34-152:54: static bool ionic_rx_buf_recycle(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:175:39-175:59: static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:236:43-236:63: static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:277:28-277:48: static void ionic_rx_clean(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:406:41-406:61: static inline void ionic_write_cmb_desc(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:414:20-414:40: void ionic_rx_fill(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:510:21-510:41: void ionic_rx_empty(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:686:39-686:59: static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:703:37-703:57: static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:720:29-720:49: static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:769:38-769:58: static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:789:28-789:48: static void ionic_tx_clean(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:891:21-891:41: void ionic_tx_empty(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:960:31-960:51: static void ionic_tx_tso_post(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:997:25-997:45: static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1123:32-1123:52: static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1162:35-1162:55: static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1198:32-1198:52: static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1215:21-1215:41: static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1243:34-1243:54: static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1269:32-1269:52: static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
-
drivers/net/ethernet/renesas/ravb_main.c:193:50-193:54: static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
-
drivers/net/ethernet/renesas/ravb_main.c:236:62-236:66: static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:261:61-261:65: static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:288:53-288:57: static void ravb_ring_free(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:327:64-327:68: static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:358:63-358:67: static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:389:55-389:59: static void ravb_ring_format(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:431:64-431:68: static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:444:63-444:67: static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:458:52-458:56: static int ravb_ring_init(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:757:64-757:68: static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:886:63-886:67: static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:1001:58-1001:62: static bool ravb_rx(struct net_device *ndev, int *quota, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:1119:59-1119:63: static bool ravb_queue_interrupt(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:1250:62-1250:66: static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
-
drivers/net/ethernet/sfc/ptp.c:816:38-816:59: static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
-
drivers/net/ethernet/sfc/ptp.c:1171:57-1171:78: static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
-
drivers/net/ethernet/sfc/siena/ptp.c:835:38-835:59: static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
-
drivers/net/ethernet/sfc/siena/ptp.c:1226:57-1226:78: static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
-
drivers/net/ethernet/via/via-velocity.c:1759:9-1759:13: int q, int n)
-
drivers/net/hyperv/netvsc_trace.h:65:1-65:1: DEFINE_EVENT(rndis_msg_class, rndis_send,
-
drivers/net/hyperv/netvsc_trace.h:71:1-71:1: DEFINE_EVENT(rndis_msg_class, rndis_recv,
-
drivers/net/tap.c:35:48-35:66: static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
-
drivers/net/tap.c:41:29-41:47: static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
-
drivers/net/tap.c:51:29-51:47: static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
-
drivers/net/tap.c:82:41-82:59: static inline bool tap_is_little_endian(struct tap_queue *q)
-
drivers/net/tap.c:88:32-88:50: static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
-
drivers/net/tap.c:93:39-93:57: static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
-
drivers/net/tap.c:147:8-147:26: struct tap_queue *q)
-
drivers/net/tap.c:168:5-168:23: struct tap_queue *q)
-
drivers/net/tap.c:189:30-189:48: static int tap_disable_queue(struct tap_queue *q)
-
drivers/net/tap.c:224:27-224:45: static void tap_put_queue(struct tap_queue *q)
-
drivers/net/tap.c:637:29-637:47: static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
-
drivers/net/tap.c:790:29-790:47: static ssize_t tap_put_user(struct tap_queue *q,
-
drivers/net/tap.c:848:28-848:46: static ssize_t tap_do_read(struct tap_queue *q,
-
drivers/net/tap.c:914:40-914:58: static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
-
drivers/net/tap.c:952:24-952:42: static int set_offload(struct tap_queue *q, unsigned long arg)
-
drivers/net/tap.c:1170:29-1170:47: static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
-
drivers/net/usb/catc.c:572:48-572:67: static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
-
drivers/net/usb/lan78xx.c:2468:49-2468:70: static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
-
drivers/net/usb/usbnet.c:715:45-715:66: static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
-
drivers/net/usb/usbnet.c:774:34-774:55: static void wait_skb_queue_empty(struct sk_buff_head *q)
-
drivers/net/virtio_net.c:783:62-783:66: static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-
drivers/net/wireless/ath/ath5k/trace.h:39:1-39:1: TRACE_EVENT(ath5k_tx,
-
drivers/net/wireless/ath/ath5k/trace.h:65:1-65:1: TRACE_EVENT(ath5k_tx_complete,
-
drivers/net/wireless/ath/ath6kl/txrx.c:845:34-845:55: static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
-
drivers/net/wireless/ath/ath9k/mac.c:46:42-46:46: u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:52:43-52:47: void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
-
drivers/net/wireless/ath/ath9k/mac.c:58:42-58:46: void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:65:46-65:50: u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:170:49-170:53: bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:196:48-196:52: bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
-
drivers/net/wireless/ath/ath9k/mac.c:261:48-261:52: bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
-
drivers/net/wireless/ath/ath9k/mac.c:337:64-337:68: static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:346:49-346:53: bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:367:47-367:51: bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/broadcom/b43/pio.c:24:28-24:52: static u16 generate_cookie(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:178:39-178:63: static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:192:37-192:61: static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:201:37-201:61: static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:317:33-317:57: static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:370:33-370:57: static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:440:25-440:49: static int pio_tx_frame(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:596:26-596:50: static bool pio_rx_frame(struct b43_pio_rxqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:762:17-762:41: void b43_pio_rx(struct b43_pio_rxqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:777:38-777:62: static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:790:37-790:61: static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.h:109:36-109:60: static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:114:36-114:60: static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:119:38-119:62: static inline void b43_piotx_write16(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:125:38-125:62: static inline void b43_piotx_write32(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:132:36-132:60: static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:137:36-137:60: static inline u32 b43_piorx_read32(struct b43_pio_rxqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:142:38-142:62: static inline void b43_piorx_write16(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:148:38-148:62: static inline void b43_piorx_write32(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c:621:61-621:74: static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c:2754:33-2754:46: static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:402:8-402:26: struct list_head *q, int *counter)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:421:6-421:24: struct list_head *q, struct brcmf_usbreq *req,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:433:20-433:38: brcmf_usbdev_qinit(struct list_head *q, int qsize)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:467:30-467:48: static void brcmf_usb_free_q(struct list_head *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4337:9-4337:34: struct ipw2100_bd_queue *q, int entries)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4358:54-4358:79: static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4375:5-4375:30: struct ipw2100_bd_queue *q, u32 base, u32 size,
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3653:31-3653:58: static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3665:38-3665:63: static inline int ipw_tx_queue_space(const struct clx2_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3695:51-3695:70: static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3721:9-3721:31: struct clx2_tx_queue *q,
-
drivers/net/wireless/intel/iwlegacy/common.c:2535:19-2535:45: il_rx_queue_space(const struct il_rx_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2552:50-2552:70: il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2905:16-2905:39: il_queue_space(const struct il_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2927:35-2927:52: il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
-
drivers/net/wireless/intel/iwlegacy/common.h:848:15-848:38: il_queue_used(const struct il_queue *q, int i)
-
drivers/net/wireless/intel/iwlegacy/common.h:859:16-859:33: il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:475:59-475:63: static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.c:693:44-693:66: int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.c:915:27-915:43: static int iwl_queue_init(struct iwl_txq *q, int slots_num)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.h:22:41-22:63: static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.h:92:33-92:55: static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
-
drivers/net/wireless/mediatek/mt76/dma.c:184:41-184:60: mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:193:44-193:63: mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:210:43-210:62: mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:255:40-255:59: mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:317:47-317:66: mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
-
drivers/net/wireless/mediatek/mt76/dma.c:341:43-341:62: mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:348:43-348:62: mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
-
drivers/net/wireless/mediatek/mt76/dma.c:388:40-388:59: mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
-
drivers/net/wireless/mediatek/mt76/dma.c:442:40-442:59: mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
-
drivers/net/wireless/mediatek/mt76/dma.c:463:49-463:68: mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:496:45-496:64: mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:595:40-595:59: mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:639:46-639:65: int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
-
drivers/net/wireless/mediatek/mt76/dma.c:693:44-693:63: mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:732:43-732:62: mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:781:41-781:60: mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
-
drivers/net/wireless/mediatek/mt76/dma.c:808:43-808:62: mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:565:49-565:68: int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:761:57-761:74: static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:797:57-797:74: static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mac80211.c:820:36-820:53: void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:1363:50-1363:67: void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt76.h:1537:41-1537:60: static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/mt76.h:1559:24-1559:43: mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
-
drivers/net/wireless/mediatek/mt76/mt7603/core.c:6:53-6:70: void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7603/dma.c:71:49-71:66: void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7603/dma.c:111:46-111:65: mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/mt7615/mac.c:1642:49-1642:66: void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c:67:48-67:65: mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:108:48-108:67: mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:242:54-242:71: void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c:35:50-35:67: void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7915/dma.c:599:51-599:70: mt7915_dma_reset_tx_queue(struct mt7915_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/mt7915/mac.c:245:46-245:65: mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/mt7915/mac.c:273:6-273:23: enum mt76_rxq_id q, u32 *info)
-
drivers/net/wireless/mediatek/mt76/mt7915/mac.c:1073:49-1073:66: void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c:884:9-884:26: enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7921/mac.c:590:49-590:66: void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt792x_dma.c:74:53-74:70: void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7996/mac.c:1367:49-1367:66: void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c:252:9-252:26: enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:367:25-367:44: mt76s_get_next_rx_entry(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:383:46-383:65: mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:429:57-429:76: static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:517:42-517:61: mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/sdio.c:548:46-548:65: mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/sdio.c:582:49-582:68: static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio_txrx.c:239:53-239:72: static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/tx.c:448:18-448:37: mt76_txq_stopped(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/tx.c:455:43-455:62: mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/tx.c:713:51-713:70: void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:320:40-320:59: mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
-
drivers/net/wireless/mediatek/mt76/usb.c:353:39-353:58: mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:390:42-390:61: mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:438:25-438:44: mt76u_get_next_rx_entry(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:602:46-602:65: mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:696:43-696:62: mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:853:42-853:61: mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:887:49-887:68: static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/dma.c:461:35-461:60: static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/dma.c:485:7-485:32: struct mt7601u_tx_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/tx.c:21:17-21:20: static u8 q2hwq(u8 q)
-
drivers/net/wireless/st/cw1200/debug.c:70:10-70:31: struct cw1200_queue *q)
-
drivers/net/wireless/ti/wlcore/tx.c:508:33-508:36: struct wl1271_link *lnk, u8 q)
-
drivers/nvme/host/apple.c:208:54-208:79: static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:216:44-216:69: static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:273:31-273:56: static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
-
drivers/nvme/host/apple.c:283:35-283:60: static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
-
drivers/nvme/host/apple.c:569:43-569:68: static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:577:49-577:74: apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:585:42-585:67: static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
-
drivers/nvme/host/apple.c:607:46-607:71: static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:619:32-619:57: static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
-
drivers/nvme/host/apple.c:642:34-642:59: static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
-
drivers/nvme/host/apple.c:968:35-968:60: static void apple_nvme_init_queue(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:1295:7-1295:32: struct apple_nvme_queue *q)
-
drivers/nvme/host/core.c:1042:28-1042:50: int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-
drivers/nvme/host/core.c:1074:26-1074:48: int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-
drivers/nvme/host/core.c:1864:3-1864:25: struct request_queue *q)
-
drivers/nvme/host/core.c:2614:6-2614:42: const struct nvme_core_quirk_entry *q)
-
drivers/nvme/host/ioctl.c:151:48-151:70: static struct request *nvme_alloc_user_request(struct request_queue *q,
-
drivers/nvme/host/ioctl.c:220:33-220:55: static int nvme_submit_user_cmd(struct request_queue *q,
-
drivers/nvme/target/fc.c:2118:22-2118:49: queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
-
drivers/pcmcia/cistpl.c:761:37-761:45: static int parse_strings(u_char *p, u_char *q, int max,
-
drivers/pcmcia/cistpl.c:906:39-906:47: static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr)
-
drivers/pcmcia/cistpl.c:943:40-943:48: static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing)
-
drivers/pcmcia/cistpl.c:978:36-978:44: static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
-
drivers/pcmcia/cistpl.c:1022:37-1022:45: static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
-
drivers/pcmcia/cistpl.c:1063:37-1063:45: static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
-
drivers/platform/chrome/wilco_ec/event.c:118:38-118:61: static inline bool event_queue_empty(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:124:37-124:60: static inline bool event_queue_full(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:130:41-130:64: static struct ec_event *event_queue_pop(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:148:42-148:65: static struct ec_event *event_queue_push(struct ec_event_queue *q,
-
drivers/platform/chrome/wilco_ec/event.c:161:30-161:53: static void event_queue_free(struct ec_event_queue *q)
-
drivers/platform/surface/aggregator/controller.c:644:35-644:60: static void ssam_event_queue_push(struct ssam_event_queue *q,
-
drivers/platform/surface/aggregator/controller.c:659:53-659:78: static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
-
drivers/platform/surface/aggregator/controller.c:676:39-676:64: static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
-
drivers/ptp/ptp_clock.c:37:30-37:60: static inline int queue_free(struct timestamp_event_queue *q)
-
drivers/ptp/ptp_private.h:79:29-79:59: static inline int queue_cnt(struct timestamp_event_queue *q)
-
drivers/scsi/aacraid/comminit.c:259:50-259:69: static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
-
drivers/scsi/aacraid/commsup.c:804:44-804:63: int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
-
drivers/scsi/aacraid/commsup.c:836:46-836:64: void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
-
drivers/scsi/aacraid/dpcsup.c:39:34-39:53: unsigned int aac_response_normal(struct aac_queue * q)
-
drivers/scsi/aacraid/dpcsup.c:158:33-158:51: unsigned int aac_command_normal(struct aac_queue *q)
-
drivers/scsi/be2iscsi/be.h:51:37-51:59: static inline void *queue_head_node(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:56:35-56:57: static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
-
drivers/scsi/be2iscsi/be.h:61:37-61:59: static inline void *queue_tail_node(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:66:35-66:57: static inline void queue_head_inc(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:71:35-71:57: static inline void queue_tail_inc(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be_cmds.c:900:54-900:76: int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
-
drivers/scsi/be2iscsi/be_main.c:2994:26-2994:48: static int be_fill_queue(struct be_queue_info *q,
-
drivers/scsi/be2iscsi/be_main.c:3319:53-3319:75: static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be_main.c:3329:53-3329:75: static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
-
drivers/scsi/bfa/bfa_cs.h:157:20-157:38: bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
-
drivers/scsi/csiostor/csio_scsi.c:1159:48-1159:66: csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
-
drivers/scsi/csiostor/csio_scsi.c:1233:46-1233:64: csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
-
drivers/scsi/csiostor/csio_wr.c:1000:24-1000:39: csio_wr_avail_qcredits(struct csio_q *q)
-
drivers/scsi/csiostor/csio_wr.c:1042:40-1042:55: csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
-
drivers/scsi/csiostor/csio_wr.c:1111:18-1111:33: csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
-
drivers/scsi/csiostor/csio_wr.c:1128:40-1128:55: csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
-
drivers/scsi/elx/libefc_sli/sli4.c:488:40-488:59: __sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q)
-
drivers/scsi/elx/libefc_sli/sli4.c:499:37-499:56: __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
-
drivers/scsi/elx/libefc_sli/sli4.c:545:36-545:55: sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q,
-
drivers/scsi/elx/libefc_sli/sli4.c:672:39-672:58: __sli_create_queue(struct sli4 *sli4, struct sli4_queue *q)
-
drivers/scsi/elx/libefc_sli/sli4.c:764:3-764:22: struct sli4_queue *q, u32 n_entries,
-
drivers/scsi/elx/libefc_sli/sli4.c:995:35-995:54: sli_queue_free(struct sli4 *sli4, struct sli4_queue *q,
-
drivers/scsi/elx/libefc_sli/sli4.c:1067:37-1067:56: sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
-
drivers/scsi/elx/libefc_sli/sli4.c:1087:34-1087:53: sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
-
drivers/scsi/elx/libefc_sli/sli4.c:1125:33-1125:52: sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1147:33-1147:52: sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1168:33-1168:52: sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1197:32-1197:51: sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1239:32-1239:51: sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1283:32-1283:51: sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:3628:11-3628:30: struct sli4_queue *q, int num_q, u32 shift,
-
drivers/scsi/hpsa.c:993:53-993:56: static inline u32 next_command(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.c:6935:70-6935:73: static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.h:489:68-489:71: static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.h:527:2-527:29: __attribute__((unused)) u8 q)
-
drivers/scsi/hpsa.h:590:71-590:74: static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
-
drivers/scsi/libiscsi.c:2769:17-2769:36: iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
-
drivers/scsi/libiscsi.c:2809:22-2809:41: void iscsi_pool_free(struct iscsi_pool *q)
-
drivers/scsi/lpfc/lpfc_attr.c:1337:41-1337:59: lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
-
drivers/scsi/lpfc/lpfc_debugfs.c:4181:28-4181:47: lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
-
drivers/scsi/lpfc/lpfc_debugfs.h:341:20-341:39: lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
-
drivers/scsi/lpfc/lpfc_debugfs.h:389:19-389:38: lpfc_debug_dump_q(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:264:18-264:37: lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
-
drivers/scsi/lpfc/lpfc_sli.c:359:22-359:41: lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
-
drivers/scsi/lpfc/lpfc_sli.c:381:18-381:37: lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
-
drivers/scsi/lpfc/lpfc_sli.c:420:22-420:41: lpfc_sli4_mq_release(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:442:18-442:37: lpfc_sli4_eq_get(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:474:23-474:42: lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:493:27-493:46: lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:514:46-514:65: lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:552:50-552:69: lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:683:18-683:37: lpfc_sli4_cq_get(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:734:46-734:65: lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:767:50-767:69: lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli4.h:1176:34-1176:53: static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
-
drivers/scsi/scsi_bsg.c:12:30-12:52: static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
-
drivers/scsi/scsi_dh.c:251:22-251:44: int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
-
drivers/scsi/scsi_dh.c:298:24-298:46: int scsi_dh_set_params(struct request_queue *q, const char *params)
-
drivers/scsi/scsi_dh.c:320:20-320:42: int scsi_dh_attach(struct request_queue *q, const char *name)
-
drivers/scsi/scsi_dh.c:359:43-359:65: const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
-
drivers/scsi/scsi_ioctl.c:216:29-216:51: static int sg_emulated_host(struct request_queue *q, int __user *p)
-
drivers/scsi/scsi_ioctl.c:504:26-504:48: static int sg_scsi_ioctl(struct request_queue *q, bool open_for_write,
-
drivers/scsi/scsi_lib.c:442:28-442:50: static void scsi_run_queue(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1130:36-1130:58: struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
-
drivers/scsi/scsi_lib.c:1247:40-1247:62: static inline int scsi_dev_queue_ready(struct request_queue *q,
-
drivers/scsi/scsi_lib.c:1336:41-1336:63: static inline int scsi_host_queue_ready(struct request_queue *q,
-
drivers/scsi/scsi_lib.c:1393:30-1393:52: static bool scsi_mq_lld_busy(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1646:32-1646:54: static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
-
drivers/scsi/scsi_lib.c:1660:31-1660:53: static int scsi_mq_get_budget(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1875:49-1875:71: void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
-
drivers/scsi/scsi_lib.c:2011:44-2011:66: struct scsi_device *scsi_device_from_queue(struct request_queue *q)
-
drivers/scsi/scsi_transport_fc.c:4344:15-4344:37: fc_bsg_remove(struct request_queue *q)
-
drivers/scsi/sg.c:847:30-847:52: static int max_sectors_bytes(struct request_queue *q)
-
drivers/spi/spi-fsl-qspi.c:277:37-277:54: static inline int needs_swap_endian(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:282:34-282:51: static inline int needs_4x_clock(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:287:37-287:54: static inline int needs_fill_txfifo(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:292:42-292:59: static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:297:42-297:59: static inline int needs_amba_base_offset(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:302:37-302:54: static inline int needs_tdh_setting(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:311:40-311:57: static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
-
drivers/spi/spi-fsl-qspi.c:323:25-323:42: static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
-
drivers/spi/spi-fsl-qspi.c:331:23-331:40: static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
-
drivers/spi/spi-fsl-qspi.c:355:36-355:53: static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
-
drivers/spi/spi-fsl-qspi.c:415:34-415:51: static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:471:37-471:54: static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:491:41-491:58: static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:507:33-507:50: static void fsl_qspi_invalidate(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:525:33-525:50: static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
-
drivers/spi/spi-fsl-qspi.c:551:31-551:48: static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
-
drivers/spi/spi-fsl-qspi.c:558:34-558:51: static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:583:34-583:51: static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:604:27-604:44: static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
-
drivers/spi/spi-fsl-qspi.c:629:37-629:54: static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
-
drivers/spi/spi-fsl-qspi.c:720:35-720:52: static int fsl_qspi_default_setup(struct fsl_qspi *q)
-
drivers/staging/fieldbus/anybuss/host.c:324:28-324:42: ab_task_dequeue_finish_put(struct kfifo *q, struct anybuss_host *cd)
-
drivers/staging/fieldbus/anybuss/host.c:336:36-336:50: ab_task_enqueue(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
-
drivers/staging/fieldbus/anybuss/host.c:353:41-353:55: ab_task_enqueue_wait(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
-
drivers/staging/fieldbus/anybuss/host.c:862:48-862:62: static void process_q(struct anybuss_host *cd, struct kfifo *q)
-
drivers/staging/fieldbus/anybuss/host.c:1226:44-1226:58: static int taskq_alloc(struct device *dev, struct kfifo *q)
-
drivers/staging/media/imx/imx-media-csc-scaler.c:501:43-501:61: static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
-
drivers/staging/media/imx/imx-media-csc-scaler.c:550:43-550:61: static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/ipu3/ipu3-css.c:168:36-168:59: static bool imgu_css_queue_enabled(struct imgu_css_queue *q)
-
drivers/staging/media/meson/vdec/vdec.c:164:33-164:51: static void process_num_buffers(struct vb2_queue *q,
-
drivers/staging/media/meson/vdec/vdec.c:189:29-189:47: static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
-
drivers/staging/media/meson/vdec/vdec.c:280:33-280:51: static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/staging/media/meson/vdec/vdec.c:395:33-395:51: static void vdec_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/rkvdec/rkvdec.c:549:35-549:53: static int rkvdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/staging/media/rkvdec/rkvdec.c:592:35-592:53: static void rkvdec_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/sunxi/cedrus/cedrus.h:241:11-241:29: struct vb2_queue *q,
-
drivers/ufs/core/ufshcd-crypto.c:236:50-236:72: void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:367:39-367:60: static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:379:51-379:72: static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:386:43-386:64: static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:391:48-391:69: static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:398:46-398:67: static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:403:51-403:72: static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
-
drivers/ufs/core/ufshcd-priv.h:410:47-410:68: static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q)
-
drivers/usb/musb/musb_host.h:46:40-46:58: static inline struct musb_qh *first_qh(struct list_head *q)
-
drivers/usb/serial/digi_acceleport.c:344:2-344:21: wait_queue_head_t *q, long timeout,
-
fs/ext2/inode.c:986:41-986:49: static inline int all_zeroes(__le32 *p, __le32 *q)
-
fs/ext2/inode.c:1087:67-1087:75: static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
-
fs/ext2/inode.c:1127:64-1127:72: static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
-
fs/ext4/indirect.c:761:41-761:49: static inline int all_zeroes(__le32 *p, __le32 *q)
-
fs/jffs2/compr_rubin.c:164:4-164:18: unsigned long q)
-
fs/minix/itree_common.c:215:42-215:51: static inline int all_zeroes(block_t *p, block_t *q)
-
fs/minix/itree_common.c:263:63-263:72: static inline void free_data(struct inode *inode, block_t *p, block_t *q)
-
fs/minix/itree_common.c:276:60-276:69: static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
-
fs/smb/client/dir.c:804:54-804:67: static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q)
-
fs/sysv/itree.c:273:46-273:59: static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q)
-
fs/sysv/itree.c:330:67-330:80: static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q)
-
fs/sysv/itree.c:342:64-342:77: static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth)
-
fs/xfs/xfs_trans_dquot.c:278:2-278:20: struct xfs_dqtrx *q)
-
include/crypto/b128ops.h:60:56-60:69: static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
-
include/crypto/b128ops.h:66:56-66:69: static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
-
include/linux/blk-integrity.h:63:40-63:62: blk_integrity_queue_supports_integrity(struct request_queue *q)
-
include/linux/blk-integrity.h:68:53-68:75: static inline void blk_queue_max_integrity_segments(struct request_queue *q,
-
include/linux/blk-integrity.h:75:30-75:58: queue_max_integrity_segments(const struct request_queue *q)
-
include/linux/blk-mq.h:906:44-906:66: static inline bool blk_should_fake_timeout(struct request_queue *q)
-
include/linux/blk-mq.h:1139:33-1139:55: static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-
include/linux/blkdev.h:609:32-609:54: static inline bool queue_is_mq(struct request_queue *q)
-
include/linux/blkdev.h:615:48-615:70: static inline enum rpm_status queue_rpm_status(struct request_queue *q)
-
include/linux/blkdev.h:627:23-627:45: blk_queue_zoned_model(struct request_queue *q)
-
include/linux/blkdev.h:634:39-634:61: static inline bool blk_queue_is_zoned(struct request_queue *q)
-
include/linux/blkdev.h:713:44-713:66: static inline unsigned int blk_queue_depth(struct request_queue *q)
-
include/linux/blkdev.h:1085:52-1085:80: static inline unsigned long queue_segment_boundary(const struct request_queue *q)
-
include/linux/blkdev.h:1090:49-1090:77: static inline unsigned long queue_virt_boundary(const struct request_queue *q)
-
include/linux/blkdev.h:1095:46-1095:74: static inline unsigned int queue_max_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1100:44-1100:66: static inline unsigned int queue_max_bytes(struct request_queue *q)
-
include/linux/blkdev.h:1105:49-1105:77: static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1110:49-1110:77: static inline unsigned short queue_max_segments(const struct request_queue *q)
-
include/linux/blkdev.h:1115:57-1115:85: static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
-
include/linux/blkdev.h:1120:51-1120:79: static inline unsigned int queue_max_segment_size(const struct request_queue *q)
-
include/linux/blkdev.h:1125:58-1125:86: static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1144:49-1144:77: static inline unsigned queue_logical_block_size(const struct request_queue *q)
-
include/linux/blkdev.h:1159:54-1159:82: static inline unsigned int queue_physical_block_size(const struct request_queue *q)
-
include/linux/blkdev.h:1169:41-1169:69: static inline unsigned int queue_io_min(const struct request_queue *q)
-
include/linux/blkdev.h:1179:41-1179:69: static inline unsigned int queue_io_opt(const struct request_queue *q)
-
include/linux/blkdev.h:1190:30-1190:58: queue_zone_write_granularity(const struct request_queue *q)
-
include/linux/blkdev.h:1310:39-1310:67: static inline int queue_dma_alignment(const struct request_queue *q)
-
include/linux/blkdev.h:1327:34-1327:56: static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
-
include/linux/blktrace_api.h:63:51-63:73: static inline bool blk_trace_note_message_enabled(struct request_queue *q)
-
include/linux/fortify-string.h:143:35-143:47: char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
-
include/linux/fortify-string.h:241:53-241:76: __FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
-
include/linux/fortify-string.h:292:54-292:77: __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, size_t size)
-
include/linux/fortify-string.h:374:36-374:59: size_t strlcat(char * const POS p, const char * const POS q, size_t avail)
-
include/linux/fortify-string.h:428:34-428:46: char *strcat(char * const POS p, const char *q)
-
include/linux/fortify-string.h:458:35-458:58: char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
-
include/linux/fortify-string.h:715:39-715:63: int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
-
include/linux/fortify-string.h:784:34-784:57: char *strcpy(char * const POS p, const char * const POS q)
-
include/linux/mlx4/qp.h:497:29-497:33: static inline u16 folded_qp(u32 q)
-
include/linux/netdevice.h:692:47-692:74: static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
-
include/linux/netdevice.h:701:49-701:70: static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
-
include/linux/netdevice.h:3567:42-3567:63: static inline void netdev_tx_reset_queue(struct netdev_queue *q)
-
include/linux/sunrpc/sched.h:273:38-273:67: static inline const char * rpc_qname(const struct rpc_wait_queue *q)
-
include/linux/sunrpc/sched.h:278:46-278:69: static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
-
include/media/videobuf-core.h:162:40-162:63: static inline void videobuf_queue_lock(struct videobuf_queue *q)
-
include/media/videobuf-core.h:168:42-168:65: static inline void videobuf_queue_unlock(struct videobuf_queue *q)
-
include/media/videobuf2-core.h:669:49-669:67: static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1119:37-1119:55: static inline bool vb2_is_streaming(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1137:41-1137:59: static inline bool vb2_fileio_is_active(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1148:32-1148:50: static inline bool vb2_is_busy(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1157:38-1157:56: static inline void *vb2_get_drv_priv(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1215:47-1215:65: static inline bool vb2_start_streaming_called(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1224:51-1224:69: static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1239:49-1239:67: static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q,
-
include/media/videobuf2-v4l2.h:317:38-317:56: static inline bool vb2_queue_is_busy(struct vb2_queue *q, struct file *file)
-
include/net/inet_frag.h:148:34-148:58: static inline void inet_frag_put(struct inet_frag_queue *q)
-
include/net/ipv6_frag.h:32:33-32:57: static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
-
include/net/pkt_cls.h:158:19-158:33: __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
-
include/net/pkt_cls.h:184:21-184:35: __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
-
include/net/pkt_cls.h:203:10-203:16: void *q, struct tcf_result *res,
-
include/net/pkt_sched.h:23:32-23:46: static inline void *qdisc_priv(struct Qdisc *q)
-
include/net/pkt_sched.h:122:30-122:44: static inline void qdisc_run(struct Qdisc *q)
-
include/net/pkt_sched.h:140:37-140:51: static inline struct net *qdisc_net(struct Qdisc *q)
-
include/net/sch_generic.h:176:42-176:62: static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
-
include/net/sch_generic.h:511:30-511:50: static inline int qdisc_qlen(const struct Qdisc *q)
-
include/net/sch_generic.h:516:34-516:54: static inline int qdisc_qlen_sum(const struct Qdisc *q)
-
include/net/sch_generic.h:571:34-571:48: static inline void sch_tree_lock(struct Qdisc *q)
-
include/net/sch_generic.h:579:36-579:50: static inline void sch_tree_unlock(struct Qdisc *q)
-
include/net/sch_generic.h:1316:38-1316:58: static inline void qdisc_synchronize(const struct Qdisc *q)
-
include/net/sctp/structs.h:1133:35-1133:53: static inline void sctp_outq_cork(struct sctp_outq *q)
-
include/trace/events/block.h:284:1-284:1: TRACE_EVENT(block_bio_complete,
-
include/trace/events/block.h:407:1-407:1: TRACE_EVENT(block_plug,
-
include/trace/events/block.h:452:1-452:1: DEFINE_EVENT(block_unplug, block_unplug,
-
include/trace/events/qdisc.h:77:1-77:1: TRACE_EVENT(qdisc_reset,
-
include/trace/events/qdisc.h:102:1-102:1: TRACE_EVENT(qdisc_destroy,
-
include/trace/events/sunrpc.h:463:1-463:1: DEFINE_RPC_QUEUED_EVENT(sleep);
-
include/trace/events/sunrpc.h:464:1-464:1: DEFINE_RPC_QUEUED_EVENT(wakeup);
-
include/trace/events/v4l2.h:181:1-181:1: DECLARE_EVENT_CLASS(vb2_v4l2_event_class,
-
include/trace/events/v4l2.h:245:1-245:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_done,
-
include/trace/events/v4l2.h:250:1-250:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_queue,
-
include/trace/events/v4l2.h:255:1-255:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_dqbuf,
-
include/trace/events/v4l2.h:260:1-260:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_qbuf,
-
include/trace/events/vb2.h:11:1-11:1: DECLARE_EVENT_CLASS(vb2_event_class,
-
include/trace/events/vb2.h:46:1-46:1: DEFINE_EVENT(vb2_event_class, vb2_buf_done,
-
include/trace/events/vb2.h:51:1-51:1: DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
-
include/trace/events/vb2.h:56:1-56:1: DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
-
include/trace/events/vb2.h:61:1-61:1: DEFINE_EVENT(vb2_event_class, vb2_qbuf,
-
ipc/sem.c:646:61-646:79: static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:719:56-719:74: static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:786:46-786:64: static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
-
ipc/sem.c:799:49-799:67: static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:816:56-816:74: static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:1072:57-1072:75: static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
-
kernel/auditfilter.c:1079:39-1079:60: static void audit_list_rules(int seq, struct sk_buff_head *q)
-
kernel/cgroup/cpuset.c:581:53-581:74: static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
-
kernel/futex/core.c:499:22-499:38: void __futex_unqueue(struct futex_q *q)
-
kernel/futex/core.c:513:40-513:56: struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
-
kernel/futex/core.c:543:20-543:36: void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
-
kernel/futex/core.c:573:19-573:35: int futex_unqueue(struct futex_q *q)
-
kernel/futex/core.c:620:23-620:39: void futex_unqueue_pi(struct futex_q *q)
-
kernel/futex/futex.h:170:32-170:48: static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
-
kernel/futex/pi.c:683:54-683:70: static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-
kernel/futex/pi.c:855:52-855:68: static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-
kernel/futex/pi.c:884:39-884:55: int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked)
-
kernel/futex/requeue.c:74:20-74:36: void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
-
kernel/futex/requeue.c:92:45-92:61: static inline bool futex_requeue_pi_prepare(struct futex_q *q,
-
kernel/futex/requeue.c:125:46-125:62: static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
-
kernel/futex/requeue.c:156:48-156:64: static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
-
kernel/futex/requeue.c:223:28-223:44: void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
-
kernel/futex/requeue.c:692:8-692:24: struct futex_q *q,
-
kernel/futex/waitwake.c:115:50-115:66: void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q)
-
kernel/futex/waitwake.c:328:53-328:69: void futex_wait_queue(struct futex_hash_bucket *hb, struct futex_q *q,
-
kernel/futex/waitwake.c:578:8-578:24: struct futex_q *q, struct futex_hash_bucket **hb)
-
kernel/sched/swait.c:6:30-6:55: void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
-
kernel/sched/swait.c:21:22-21:47: void swake_up_locked(struct swait_queue_head *q, int wake_flags)
-
kernel/sched/swait.c:41:26-41:51: void swake_up_all_locked(struct swait_queue_head *q)
-
kernel/sched/swait.c:47:19-47:44: void swake_up_one(struct swait_queue_head *q)
-
kernel/sched/swait.c:61:19-61:44: void swake_up_all(struct swait_queue_head *q)
-
kernel/sched/swait.c:84:25-84:50: void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/sched/swait.c:91:33-91:58: void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
-
kernel/sched/swait.c:102:29-102:54: long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
-
kernel/sched/swait.c:125:21-125:46: void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/sched/swait.c:132:19-132:44: void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/signal.c:452:29-452:46: static void __sigqueue_free(struct sigqueue *q)
-
kernel/signal.c:1953:20-1953:37: void sigqueue_free(struct sigqueue *q)
-
kernel/signal.c:1978:19-1978:36: int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
-
kernel/trace/blktrace.c:314:28-314:50: static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
-
kernel/trace/blktrace.c:380:31-380:53: static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
-
kernel/trace/blktrace.c:388:31-388:53: static int __blk_trace_remove(struct request_queue *q)
-
kernel/trace/blktrace.c:402:22-402:44: int blk_trace_remove(struct request_queue *q)
-
kernel/trace/blktrace.c:514:31-514:53: static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:621:30-621:52: static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:642:21-642:43: int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:657:35-657:57: static int compat_blk_trace_setup(struct request_queue *q, char *name,
-
kernel/trace/blktrace.c:690:34-690:56: static int __blk_trace_startstop(struct request_queue *q, int start)
-
kernel/trace/blktrace.c:705:25-705:47: int blk_trace_startstop(struct request_queue *q, int start)
-
kernel/trace/blktrace.c:772:25-772:47: void blk_trace_shutdown(struct request_queue *q)
-
kernel/trace/blktrace.c:780:35-780:57: static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
-
kernel/trace/blktrace.c:891:31-891:53: static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-
kernel/trace/blktrace.c:915:12-915:34: struct request_queue *q, struct bio *bio)
-
kernel/trace/blktrace.c:943:46-943:68: static void blk_add_trace_plug(void *ignore, struct request_queue *q)
-
kernel/trace/blktrace.c:954:48-954:70: static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
-
kernel/trace/blktrace.c:1607:35-1607:57: static int blk_trace_remove_queue(struct request_queue *q)
-
kernel/trace/blktrace.c:1627:34-1627:56: static int blk_trace_setup_queue(struct request_queue *q,
-
lib/bch.c:782:29-782:45: const struct gf_poly *b, struct gf_poly *q)
-
lib/crypto/curve25519-hacl64.c:545:12-545:17: u64 *q, u8 byt)
-
lib/crypto/curve25519-hacl64.c:557:24-557:29: u64 *nqpq2, u64 *q, u8 byt)
-
lib/crypto/curve25519-hacl64.c:567:7-567:12: u64 *q, u8 byt, u32 i)
-
lib/crypto/curve25519-hacl64.c:578:22-578:27: u64 *nqpq2, u64 *q,
-
lib/crypto/curve25519-hacl64.c:588:47-588:52: static void ladder_cmult(u64 *result, u8 *n1, u64 *q)
-
mm/swapfile.c:1166:6-1166:31: struct swap_info_struct *q)
-
net/core/dev.c:3115:32-3115:46: static void __netif_reschedule(struct Qdisc *q)
-
net/core/dev.c:3129:23-3129:37: void __netif_schedule(struct Qdisc *q)
-
net/core/dev.c:3762:51-3762:65: static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
-
net/core/dev.c:3774:55-3774:69: static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
-
net/core/gen_stats.c:341:10-341:50: const struct gnet_stats_queue __percpu *q)
-
net/core/gen_stats.c:358:6-358:37: const struct gnet_stats_queue *q)
-
net/core/gen_stats.c:389:9-389:34: struct gnet_stats_queue *q, __u32 qlen)
-
net/ieee802154/6lowpan/reassembly.c:36:30-36:54: static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
-
net/ipv4/inet_fragment.c:54:36-54:60: static void fragrun_append_to_last(struct inet_frag_queue *q,
-
net/ipv4/inet_fragment.c:65:28-65:52: static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
-
net/ipv4/inet_fragment.c:287:24-287:48: void inet_frag_destroy(struct inet_frag_queue *q)
-
net/ipv4/inet_fragment.c:383:28-383:52: int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
-
net/ipv4/inet_fragment.c:447:31-447:55: void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
-
net/ipv4/inet_fragment.c:516:29-516:53: void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
-
net/ipv4/inet_fragment.c:585:37-585:61: struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
-
net/ipv4/ip_fragment.c:82:27-82:51: static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
-
net/ipv4/ip_fragment.c:96:27-96:51: static void ip4_frag_free(struct inet_frag_queue *q)
-
net/netfilter/nfnetlink_queue.c:104:17-104:40: instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
-
net/netfilter/nfnetlink_queue.c:118:17-118:40: instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
-
net/netfilter/nfnetlink_queue.c:185:18-185:41: instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
-
net/netfilter/nfnetlink_queue.c:1066:25-1066:48: verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
-
net/rds/message.c:75:33-75:61: void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *q)
-
net/rds/rds.h:382:49-382:77: static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
-
net/rose/rose_in.c:102:101-102:105: static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
-
net/rose/rose_subr.c:201:56-201:61: int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
-
net/sched/cls_api.c:826:60-826:74: static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:866:63-866:77: static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:984:60-984:74: static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
-
net/sched/cls_api.c:1149:46-1149:61: static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
-
net/sched/cls_api.c:1224:32-1224:46: static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
-
net/sched/cls_api.c:1244:60-1244:74: static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
-
net/sched/cls_api.c:1281:54-1281:68: static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1318:58-1318:73: static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
-
net/sched/cls_api.c:1352:31-1352:45: static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
-
net/sched/cls_api.c:1374:11-1374:25: struct Qdisc *q,
-
net/sched/cls_api.c:1395:11-1395:25: struct Qdisc *q,
-
net/sched/cls_api.c:1410:5-1410:19: struct Qdisc *q,
-
net/sched/cls_api.c:1425:51-1425:65: int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-
net/sched/cls_api.c:1484:46-1484:60: struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
-
net/sched/cls_api.c:1500:49-1500:63: void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1966:5-1966:19: struct Qdisc *q, u32 parent, void *fh,
-
net/sched/cls_api.c:2027:31-2027:45: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:2056:35-2056:49: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:2094:31-2094:45: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:2652:53-2652:67: static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
-
net/sched/cls_basic.c:261:71-261:77: static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_bpf.c:631:11-631:17: void *q, unsigned long base)
-
net/sched/cls_flower.c:3593:68-3593:74: static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_fw.c:415:68-415:74: static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_matchall.c:379:70-379:76: static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_route.c:648:72-648:78: static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_u32.c:1321:69-1321:75: static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/sch_api.c:280:21-280:35: void qdisc_hash_add(struct Qdisc *q, bool invisible)
-
net/sched/sch_api.c:291:21-291:35: void qdisc_hash_del(struct Qdisc *q)
-
net/sched/sch_api.c:912:47-912:61: static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
-
net/sched/sch_api.c:996:34-996:48: static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
-
net/sched/sch_api.c:1427:23-1427:37: static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
-
net/sched/sch_api.c:1443:15-1443:29: check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
-
net/sched/sch_api.c:1881:48-1881:62: static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
-
net/sched/sch_api.c:1933:25-1933:39: struct nlmsghdr *n, struct Qdisc *q,
-
net/sched/sch_api.c:1955:9-1955:23: struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:2016:33-2016:47: static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:2047:28-2047:42: static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
-
net/sched/sch_api.c:2210:29-2210:43: static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:2220:33-2220:47: static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
-
net/sched/sch_cake.c:647:22-647:44: static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
-
net/sched/sch_cake.c:1152:40-1152:64: static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
-
net/sched/sch_cake.c:1315:31-1315:55: static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
-
net/sched/sch_cake.c:1350:26-1350:50: static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
-
net/sched/sch_cake.c:1397:28-1397:52: static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
-
net/sched/sch_cake.c:1409:34-1409:64: static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1416:26-1416:50: static void cake_heapify(struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1453:29-1453:53: static void cake_heapify_up(struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1469:32-1469:56: static int cake_advance_shaper(struct cake_sched_data *q,
-
net/sched/sch_cake.c:2962:25-2962:39: static void cake_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_cbs.c:251:5-251:28: struct cbs_sched_data *q)
-
net/sched/sch_cbs.c:276:55-276:78: static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
-
net/sched/sch_cbs.c:309:55-309:78: static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
-
net/sched/sch_choke.c:75:31-75:62: static unsigned int choke_len(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:81:20-81:51: static int use_ecn(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:87:25-87:56: static int use_harddrop(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:93:34-93:59: static void choke_zap_head_holes(struct choke_sched_data *q)
-
net/sched/sch_choke.c:103:34-103:59: static void choke_zap_tail_holes(struct choke_sched_data *q)
-
net/sched/sch_choke.c:179:42-179:73: static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
-
net/sched/sch_choke.c:199:32-199:63: static bool choke_match_random(const struct choke_sched_data *q,
-
net/sched/sch_etf.c:297:5-297:28: struct etf_sched_data *q)
-
net/sched/sch_etf.c:319:55-319:78: static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q,
-
net/sched/sch_ets.c:190:33-190:51: static bool ets_class_is_strict(struct ets_sched *q, const struct ets_class *cl)
-
net/sched/sch_fifo.c:227:20-227:34: int fifo_set_limit(struct Qdisc *q, unsigned int limit)
-
net/sched/sch_fq.c:172:37-172:59: static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
-
net/sched/sch_fq.c:179:35-179:57: static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
-
net/sched/sch_fq.c:217:19-217:41: static void fq_gc(struct fq_sched_data *q,
-
net/sched/sch_fq.c:261:57-261:79: static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
-
net/sched/sch_fq.c:437:9-437:37: const struct fq_sched_data *q)
-
net/sched/sch_fq.c:499:32-499:54: static void fq_check_throttled(struct fq_sched_data *q, u64 now)
-
net/sched/sch_fq.c:697:23-697:45: static void fq_rehash(struct fq_sched_data *q,
-
net/sched/sch_fq_codel.c:70:35-70:69: static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
-
net/sched/sch_fq_codel.c:608:29-608:43: static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_fq_pie.c:74:33-74:65: static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
-
net/sched/sch_generic.c:38:38-38:52: static void qdisc_maybe_clear_missed(struct Qdisc *q,
-
net/sched/sch_generic.c:72:53-72:67: static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
-
net/sched/sch_generic.c:108:57-108:71: static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
-
net/sched/sch_generic.c:118:46-118:60: static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
-
net/sched/sch_generic.c:142:57-142:71: static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
-
net/sched/sch_generic.c:178:34-178:48: static void try_bulk_dequeue_skb(struct Qdisc *q,
-
net/sched/sch_generic.c:202:39-202:53: static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
-
net/sched/sch_generic.c:228:36-228:50: static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
-
net/sched/sch_generic.c:314:43-314:57: bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
-
net/sched/sch_generic.c:388:34-388:48: static inline bool qdisc_restart(struct Qdisc *q, int *packets)
-
net/sched/sch_generic.c:410:18-410:32: void __qdisc_run(struct Qdisc *q)
-
net/sched/sch_gred.c:114:6-114:30: struct gred_sched_data *q,
-
net/sched/sch_gred.c:129:11-129:35: struct gred_sched_data *q)
-
net/sched/sch_gred.c:136:12-136:36: struct gred_sched_data *q)
-
net/sched/sch_gred.c:142:25-142:49: static int gred_use_ecn(struct gred_sched_data *q)
-
net/sched/sch_gred.c:147:30-147:54: static int gred_use_harddrop(struct gred_sched_data *q)
-
net/sched/sch_gred.c:403:36-403:60: static inline void gred_destroy_vq(struct gred_sched_data *q)
-
net/sched/sch_hfsc.c:218:18-218:37: eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
-
net/sched/sch_hfsc.c:235:18-235:37: eltree_get_minel(struct hfsc_sched *q)
-
net/sched/sch_hhf.c:182:12-182:35: struct hhf_sched_data *q)
-
net/sched/sch_hhf.c:213:8-213:31: struct hhf_sched_data *q)
-
net/sched/sch_htb.c:316:34-316:52: static void htb_add_to_wait_tree(struct htb_sched *q,
-
net/sched/sch_htb.c:363:41-363:59: static inline void htb_add_class_to_row(struct htb_sched *q,
-
net/sched/sch_htb.c:395:46-395:64: static inline void htb_remove_class_from_row(struct htb_sched *q,
-
net/sched/sch_htb.c:425:32-425:50: static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:465:34-465:52: static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:561:23-561:41: htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
-
net/sched/sch_htb.c:592:33-592:51: static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:610:35-610:53: static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:698:30-698:48: static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
-
net/sched/sch_htb.c:746:26-746:44: static s64 htb_do_events(struct htb_sched *q, const int level,
-
net/sched/sch_htb.c:871:41-871:59: static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
-
net/sched/sch_htb.c:1042:41-1042:55: static void htb_set_lockdep_class_child(struct Qdisc *q)
-
net/sched/sch_htb.c:1297:41-1297:59: static void htb_offload_aggregate_stats(struct htb_sched *q,
-
net/sched/sch_multiq.c:319:27-319:41: static void multiq_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_netem.c:207:25-207:50: static bool loss_4state(struct netem_sched_data *q)
-
net/sched/sch_netem.c:272:27-272:52: static bool loss_gilb_ell(struct netem_sched_data *q)
-
net/sched/sch_netem.c:294:24-294:49: static bool loss_event(struct netem_sched_data *q)
-
net/sched/sch_netem.c:354:36-354:67: static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
-
net/sched/sch_netem.c:638:27-638:52: static void get_slot_next(struct netem_sched_data *q, u64 now)
-
net/sched/sch_netem.c:657:35-657:60: static struct sk_buff *netem_peek(struct netem_sched_data *q)
-
net/sched/sch_netem.c:674:30-674:55: static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
-
net/sched/sch_netem.c:807:22-807:47: static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:829:29-829:54: static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:838:25-838:50: static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:846:25-846:50: static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:854:22-854:47: static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:868:25-868:50: static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:1096:28-1096:59: static int dump_loss_model(const struct netem_sched_data *q,
-
net/sched/sch_prio.c:341:25-341:39: static void prio_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_qfq.c:257:26-257:44: static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:267:43-267:61: static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
-
net/sched/sch_qfq.c:281:28-281:46: static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:315:28-315:46: static void qfq_add_to_agg(struct qfq_sched *q,
-
net/sched/sch_qfq.c:332:29-332:47: static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:345:34-345:52: static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:356:29-356:47: static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:369:35-369:53: static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:732:41-732:59: static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
-
net/sched/sch_qfq.c:749:27-749:45: static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
-
net/sched/sch_qfq.c:772:36-772:54: static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
-
net/sched/sch_qfq.c:779:32-779:50: static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
-
net/sched/sch_qfq.c:805:31-805:49: static void qfq_make_eligible(struct qfq_sched *q)
-
net/sched/sch_qfq.c:960:33-960:51: static void qfq_update_eligible(struct qfq_sched *q)
-
net/sched/sch_qfq.c:1039:30-1039:48: static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:1072:19-1072:37: qfq_update_agg_ts(struct qfq_sched *q,
-
net/sched/sch_qfq.c:1169:50-1169:68: static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
-
net/sched/sch_qfq.c:1284:30-1284:48: static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:1331:30-1331:48: static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:1345:29-1345:47: static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
-
net/sched/sch_qfq.c:1368:32-1368:50: static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_red.c:55:31-55:54: static inline int red_use_ecn(struct red_sched_data *q)
-
net/sched/sch_red.c:60:36-60:59: static inline int red_use_harddrop(struct red_sched_data *q)
-
net/sched/sch_red.c:65:27-65:50: static int red_use_nodrop(struct red_sched_data *q)
-
net/sched/sch_sfb.c:123:55-123:78: static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:138:57-138:80: static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:152:11-152:34: struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:167:55-167:78: static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:180:50-180:73: static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:185:50-185:73: static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:190:34-190:57: static void sfb_zero_all_buckets(struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:198:56-198:85: static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:218:45-218:68: static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:224:27-224:50: static void sfb_swap_slot(struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:234:49-234:72: static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
-
net/sched/sch_sfq.c:150:45-150:68: static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
-
net/sched/sch_sfq.c:157:30-157:59: static unsigned int sfq_hash(const struct sfq_sched_data *q,
-
net/sched/sch_sfq.c:203:29-203:52: static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:228:28-228:51: static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:241:28-241:51: static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:329:26-329:55: static int sfq_prob_mark(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:335:26-335:55: static int sfq_hard_mark(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:340:25-340:54: static int sfq_headdrop(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:844:24-844:38: static void sfq_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_skbprio.c:40:31-40:64: static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
-
net/sched/sch_skbprio.c:53:30-53:63: static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
-
net/sched/sch_taprio.c:112:45-112:66: static void taprio_calculate_gate_durations(struct taprio_sched *q,
-
net/sched/sch_taprio.c:171:35-171:62: static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
-
net/sched/sch_taprio.c:184:32-184:59: static ktime_t taprio_get_time(const struct taprio_sched *q)
-
net/sched/sch_taprio.c:202:30-202:51: static void switch_schedules(struct taprio_sched *q,
-
net/sched/sch_taprio.c:250:31-250:52: static int length_to_duration(struct taprio_sched *q, int len)
-
net/sched/sch_taprio.c:255:31-255:52: static int duration_to_length(struct taprio_sched *q, u64 duration)
-
net/sched/sch_taprio.c:264:41-264:62: static void taprio_update_queue_max_sdu(struct taprio_sched *q,
-
net/sched/sch_taprio.c:425:31-425:52: static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
-
net/sched/sch_taprio.c:667:32-667:53: static void taprio_set_budgets(struct taprio_sched *q,
-
net/sched/sch_taprio.c:1039:29-1039:50: static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
-
net/sched/sch_taprio.c:1071:30-1071:51: static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
-
net/sched/sch_taprio.c:1090:29-1090:50: static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
-
net/sched/sch_taprio.c:1130:34-1130:55: static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
-
net/sched/sch_taprio.c:1246:34-1246:55: static void setup_first_end_time(struct taprio_sched *q,
-
net/sched/sch_taprio.c:1299:11-1299:32: struct taprio_sched *q)
-
net/sched/sch_taprio.c:1357:26-1357:47: static void setup_txtime(struct taprio_sched *q,
-
net/sched/sch_taprio.c:1423:43-1423:64: static void taprio_offload_config_changed(struct taprio_sched *q)
-
net/sched/sch_taprio.c:1481:41-1481:62: static void taprio_detect_broken_mqprio(struct taprio_sched *q)
-
net/sched/sch_taprio.c:1498:42-1498:63: static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
-
net/sched/sch_taprio.c:1510:6-1510:27: struct taprio_sched *q,
-
net/sched/sch_taprio.c:1576:7-1576:28: struct taprio_sched *q,
-
net/sched/sch_taprio.c:2292:7-2292:28: struct taprio_sched *q,
-
net/sched/sch_tbf.c:264:30-264:59: static bool tbf_peak_present(const struct tbf_sched_data *q)
-
net/sctp/inqueue.c:64:20-64:37: void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
-
net/sctp/inqueue.c:234:30-234:47: void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
-
net/sctp/outqueue.c:59:40-59:58: static inline void sctp_outq_head_data(struct sctp_outq *q,
-
net/sctp/outqueue.c:74:57-74:75: static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
-
net/sctp/outqueue.c:80:40-80:58: static inline void sctp_outq_tail_data(struct sctp_outq *q,
-
net/sctp/outqueue.c:191:52-191:70: void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
-
net/sctp/outqueue.c:206:34-206:52: static void __sctp_outq_teardown(struct sctp_outq *q)
-
net/sctp/outqueue.c:267:25-267:43: void sctp_outq_teardown(struct sctp_outq *q)
-
net/sctp/outqueue.c:274:21-274:39: void sctp_outq_free(struct sctp_outq *q)
-
net/sctp/outqueue.c:281:21-281:39: void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
-
net/sctp/outqueue.c:450:27-450:45: void sctp_retransmit_mark(struct sctp_outq *q,
-
net/sctp/outqueue.c:537:22-537:40: void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
-
net/sctp/outqueue.c:598:34-598:52: static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
-
net/sctp/outqueue.c:759:23-759:41: void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
-
net/sctp/outqueue.c:1192:29-1192:47: static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
-
net/sctp/outqueue.c:1248:20-1248:38: int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
-
net/sctp/outqueue.c:1415:24-1415:48: int sctp_outq_is_empty(const struct sctp_outq *q)
-
net/sctp/outqueue.c:1435:36-1435:54: static void sctp_check_transmitted(struct sctp_outq *q,
-
net/sctp/outqueue.c:1709:31-1709:49: static void sctp_mark_missing(struct sctp_outq *q,
-
net/sctp/outqueue.c:1822:27-1822:45: void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
-
net/sctp/stream_interleave.c:1098:33-1098:51: static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
-
net/sctp/stream_sched.c:53:37-53:55: static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched.c:58:51-58:69: static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched.c:81:42-81:60: static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
-
net/sctp/stream_sched.c:235:30-235:48: void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
-
net/sctp/stream_sched.c:257:32-257:50: void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
-
net/sctp/stream_sched_fc.c:98:35-98:53: static void sctp_sched_fc_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched_fc.c:111:49-111:67: static struct sctp_chunk *sctp_sched_fc_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched_fc.c:132:40-132:58: static void sctp_sched_fc_dequeue_done(struct sctp_outq *q,
-
net/sctp/stream_sched_prio.c:215:37-215:55: static void sctp_sched_prio_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched_prio.c:228:51-228:69: static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched_prio.c:256:42-256:60: static void sctp_sched_prio_dequeue_done(struct sctp_outq *q,
-
net/sctp/stream_sched_rr.c:97:35-97:53: static void sctp_sched_rr_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched_rr.c:110:49-110:67: static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched_rr.c:133:40-133:58: static void sctp_sched_rr_dequeue_done(struct sctp_outq *q,
-
net/sunrpc/sched.c:147:25-147:43: __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
-
net/sunrpc/sched.c:382:40-382:63: static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:391:37-391:60: static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:400:45-400:68: static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:430:27-430:50: void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:447:19-447:42: void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:465:36-465:59: void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:481:28-481:51: void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:1212:3-1212:28: struct workqueue_struct *q)
-
net/sunrpc/sched.c:1221:52-1221:77: static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
-
net/unix/af_unix.c:444:39-444:59: static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
-
net/x25/x25_in.c:208:100-208:104: static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
-
net/x25/x25_subr.c:260:72-260:77: int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
-
net/xdp/xsk_queue.c:14:34-14:52: static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
-
net/xdp/xsk_queue.c:59:19-59:37: void xskq_destroy(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:120:52-120:70: static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
-
net/xdp/xsk_queue.h:128:50-128:68: static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
-
net/xdp/xsk_queue.h:189:35-189:53: static inline bool xskq_has_descs(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:194:44-194:62: static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:205:40-205:58: static inline bool xskq_cons_read_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:221:40-221:58: static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
-
net/xdp/xsk_queue.h:226:31-226:49: static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool,
-
net/xdp/xsk_queue.h:234:31-234:49: u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
-
net/xdp/xsk_queue.h:276:40-276:58: static inline void __xskq_cons_release(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:281:37-281:55: static inline void __xskq_cons_peek(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:287:42-287:60: static inline void xskq_cons_get_entries(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:293:40-293:58: static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
-
net/xdp/xsk_queue.h:306:42-306:60: static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
-
net/xdp/xsk_queue.h:311:50-311:68: static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
-
net/xdp/xsk_queue.h:318:40-318:58: static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:331:38-331:56: static inline void xskq_cons_release(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:336:39-336:57: static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt)
-
net/xdp/xsk_queue.h:341:45-341:63: static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:349:37-349:55: static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
-
net/xdp/xsk_queue.h:363:38-363:56: static inline bool xskq_prod_is_full(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:368:39-368:57: static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt)
-
net/xdp/xsk_queue.h:373:37-373:55: static inline int xskq_prod_reserve(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:383:42-383:60: static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
-
net/xdp/xsk_queue.h:395:47-395:65: static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
-
net/xdp/xsk_queue.h:408:42-408:60: static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:426:39-426:57: static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
-
net/xdp/xsk_queue.h:431:37-431:55: static inline void xskq_prod_submit(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:436:39-436:57: static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
-
net/xdp/xsk_queue.h:441:39-441:57: static inline bool xskq_prod_is_empty(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:449:41-449:59: static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:454:45-454:63: static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
-
sound/core/seq/oss/seq_oss_event.c:42:55-42:68: snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:95:39-95:52: old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:121:44-121:57: extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:175:45-175:58: chn_voice_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:196:46-196:59: chn_common_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:223:42-223:55: timing_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:258:41-258:54: local_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_readq.c:62:26-62:48: snd_seq_oss_readq_delete(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:74:25-74:47: snd_seq_oss_readq_clear(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:89:24-89:46: snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, int len)
-
sound/core/seq/oss/seq_oss_readq.c:123:29-123:51: int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
-
sound/core/seq/oss/seq_oss_readq.c:141:29-141:51: snd_seq_oss_readq_put_event(struct seq_oss_readq *q, union evrec *ev)
-
sound/core/seq/oss/seq_oss_readq.c:169:24-169:46: snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec)
-
sound/core/seq/oss/seq_oss_readq.c:181:24-181:46: snd_seq_oss_readq_wait(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:193:24-193:46: snd_seq_oss_readq_free(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:206:24-206:46: snd_seq_oss_readq_poll(struct seq_oss_readq *q, struct file *file, poll_table *wait)
-
sound/core/seq/oss/seq_oss_readq.c:216:33-216:55: snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *q, unsigned long curt, int seq_mode)
-
sound/core/seq/oss/seq_oss_readq.c:244:29-244:51: snd_seq_oss_readq_info_read(struct seq_oss_readq *q, struct snd_info_buffer *buf)
-
sound/core/seq/oss/seq_oss_writeq.c:54:27-54:50: snd_seq_oss_writeq_delete(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:67:26-67:49: snd_seq_oss_writeq_clear(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:83:25-83:48: snd_seq_oss_writeq_sync(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:123:27-123:50: snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time)
-
sound/core/seq/oss/seq_oss_writeq.c:139:34-139:57: snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:152:31-152:54: snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int val)
-
sound/core/seq/seq_queue.c:50:27-50:49: static int queue_list_add(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:129:26-129:48: static void queue_delete(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:240:26-240:48: void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
-
sound/core/seq/seq_queue.c:354:32-354:54: static inline int check_access(struct snd_seq_queue *q, int client)
-
sound/core/seq/seq_queue.c:362:30-362:52: static int queue_access_lock(struct snd_seq_queue *q, int client)
-
sound/core/seq/seq_queue.c:376:40-376:62: static inline void queue_access_unlock(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:629:35-629:57: static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
-
sound/core/seq/seq_queue.c:652:41-652:63: static void snd_seq_queue_process_event(struct snd_seq_queue *q,
-
sound/core/seq/seq_timer.c:258:24-258:46: int snd_seq_timer_open(struct snd_seq_queue *q)
-
sound/core/seq/seq_timer.c:313:25-313:47: int snd_seq_timer_close(struct snd_seq_queue *q)
-
sound/pci/hda/hda_codec.c:1212:7-1212:29: struct hda_cvt_setup *q)
variable
Defined...
-
arch/x86/crypto/curve25519-x86_64.c:35:2-35:20: u64 q = x_xor_y | x_sub_y_xor_y;
-
arch/x86/include/asm/div64.h:89:2-89:6: u64 q;
-
arch/x86/kernel/cpu/common.c:783:2-783:12: char *p, *q, *s;
-
arch/x86/kvm/svm/sev.c:2132:2-2132:26: struct list_head *pos, *q;
-
arch/x86/kvm/vmx/nested.c:1559:2-1559:9: int i, q;
-
arch/x86/xen/platform-pci-unplug.c:181:2-181:12: char *p, *q;
-
block/bfq-iosched.c:6242:2-6242:34: struct request_queue *q = hctx->queue;
-
block/bfq-iosched.c:6847:2-6847:32: struct request_queue *q = rq->q;
-
block/bio-integrity.c:126:2-126:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/bio.c:1045:2-1045:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/bio.c:1166:3-1166:56: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/bio.c:1202:2-1202:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-cgroup.c:123:2-123:34: struct request_queue *q = blkg->q;
-
block/blk-cgroup.c:474:2-474:34: struct request_queue *q = disk->queue;
-
block/blk-cgroup.c:577:2-577:34: struct request_queue *q = disk->queue;
-
block/blk-cgroup.c:804:2-804:24: struct request_queue *q;
-
block/blk-cgroup.c:1228:3-1228:35: struct request_queue *q = blkg->q;
-
block/blk-cgroup.c:1401:2-1401:34: struct request_queue *q = disk->queue;
-
block/blk-cgroup.c:1501:2-1501:34: struct request_queue *q = disk->queue;
-
block/blk-cgroup.c:1616:2-1616:34: struct request_queue *q = disk->queue;
-
block/blk-core.c:261:2-261:28: struct request_queue *q = container_of(rcu_head,
-
block/blk-core.c:379:2-380:3: struct request_queue *q =
-
block/blk-core.c:387:2-387:28: struct request_queue *q = from_timer(q, t, timeout);
-
block/blk-core.c:398:2-398:24: struct request_queue *q;
-
block/blk-core.c:637:3-637:56: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-core.c:724:2-724:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-core.c:854:2-854:24: struct request_queue *q;
-
block/blk-crypto-sysfs.c:131:2-131:34: struct request_queue *q = disk->queue;
-
block/blk-crypto.c:420:2-420:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-flush.c:167:2-167:32: struct request_queue *q = rq->q;
-
block/blk-flush.c:219:2-219:38: struct request_queue *q = flush_rq->q;
-
block/blk-flush.c:361:2-361:32: struct request_queue *q = rq->q;
-
block/blk-flush.c:405:2-405:32: struct request_queue *q = rq->q;
-
block/blk-ia-ranges.c:111:2-111:34: struct request_queue *q = disk->queue;
-
block/blk-ia-ranges.c:154:2-154:34: struct request_queue *q = disk->queue;
-
block/blk-ia-ranges.c:290:2-290:34: struct request_queue *q = disk->queue;
-
block/blk-ioc.c:76:2-76:33: struct request_queue *q = icq->q;
-
block/blk-ioc.c:121:3-121:34: struct request_queue *q = icq->q;
-
block/blk-iocost.c:3396:2-3396:24: struct request_queue *q;
-
block/blk-map.c:557:2-557:32: struct request_queue *q = rq->q;
-
block/blk-merge.c:593:2-593:32: struct request_queue *q = rq->q;
-
block/blk-mq-debugfs-zoned.c:11:2-11:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:24:2-24:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:32:2-32:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:40:2-40:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:74:2-74:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:114:2-114:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:125:2-125:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:418:2-418:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:435:2-435:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:452:2-452:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:469:2-469:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:800:2-800:40: struct request_queue *q = rqos->disk->queue;
-
block/blk-mq-sched.c:89:2-89:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:217:2-217:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:321:2-321:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.h:62:3-62:33: struct request_queue *q = rq->q;
-
block/blk-mq-sysfs.c:54:2-54:24: struct request_queue *q;
-
block/blk-mq-sysfs.c:160:2-160:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sysfs.c:221:2-221:34: struct request_queue *q = disk->queue;
-
block/blk-mq-sysfs.c:258:2-258:34: struct request_queue *q = disk->queue;
-
block/blk-mq-tag.c:48:3-48:35: struct request_queue *q = hctx->queue;
-
block/blk-mq-tag.c:86:3-86:35: struct request_queue *q = hctx->queue;
-
block/blk-mq-tag.c:270:2-270:39: struct request_queue *q = iter_data->q;
-
block/blk-mq.c:280:2-280:24: struct request_queue *q;
-
block/blk-mq.c:294:2-294:24: struct request_queue *q;
-
block/blk-mq.c:353:2-353:34: struct request_queue *q = data->q;
-
block/blk-mq.c:438:2-438:34: struct request_queue *q = data->q;
-
block/blk-mq.c:687:2-687:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:702:2-702:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:724:2-724:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1062:2-1062:34: struct request_queue *q = hctx->queue;
-
block/blk-mq.c:1247:2-1247:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1429:2-1429:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1444:2-1444:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1463:2-1464:3: struct request_queue *q =
-
block/blk-mq.c:1620:2-1621:3: struct request_queue *q =
-
block/blk-mq.c:2014:2-2014:34: struct request_queue *q = hctx->queue;
-
block/blk-mq.c:2497:2-2497:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:2578:2-2578:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:2787:3-2787:25: struct request_queue *q;
-
block/blk-mq.c:2965:2-2965:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-mq.c:3033:2-3033:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:3959:2-3959:24: struct request_queue *q;
-
block/blk-mq.c:4067:2-4067:24: struct request_queue *q;
-
block/blk-mq.c:4118:2-4118:24: struct request_queue *q;
-
block/blk-mq.c:4717:2-4717:24: struct request_queue *q;
-
block/blk-mq.c:4833:2-4833:32: struct request_queue *q = rq->q;
-
block/blk-mq.h:406:3-406:35: struct request_queue *q = hctx->queue;
-
block/blk-rq-qos.c:303:2-303:34: struct request_queue *q = disk->queue;
-
block/blk-rq-qos.c:338:2-338:40: struct request_queue *q = rqos->disk->queue;
-
block/blk-rq-qos.h:141:3-141:56: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-settings.c:393:2-393:34: struct request_queue *q = disk->queue;
-
block/blk-settings.c:920:2-920:34: struct request_queue *q = disk->queue;
-
block/blk-settings.c:967:2-967:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-settings.c:980:2-980:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-stat.c:52:2-52:32: struct request_queue *q = rq->q;
-
block/blk-sysfs.c:677:2-677:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:691:2-691:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:719:2-719:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:736:2-736:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:772:2-772:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:789:2-789:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:874:2-874:34: struct request_queue *q = disk->queue;
-
block/blk-throttle.c:1177:2-1177:24: struct request_queue *q;
-
block/blk-throttle.c:1254:2-1254:32: struct request_queue *q = td->queue;
-
block/blk-throttle.c:1719:2-1719:34: struct request_queue *q = disk->queue;
-
block/blk-throttle.c:2176:2-2176:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-throttle.c:2304:2-2304:32: struct request_queue *q = rq->q;
-
block/blk-throttle.c:2368:2-2368:34: struct request_queue *q = disk->queue;
-
block/blk-throttle.c:2412:2-2412:34: struct request_queue *q = disk->queue;
-
block/blk-throttle.c:2425:2-2425:34: struct request_queue *q = disk->queue;
-
block/blk-timeout.c:55:3-55:35: struct request_queue *q = disk->queue;
-
block/blk-timeout.c:130:2-130:33: struct request_queue *q = req->q;
-
block/blk-wbt.c:714:2-714:34: struct request_queue *q = disk->queue;
-
block/blk-wbt.c:900:2-900:34: struct request_queue *q = disk->queue;
-
block/blk-zoned.c:253:2-253:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-zoned.c:456:2-456:34: struct request_queue *q = disk->queue;
-
block/blk-zoned.c:538:2-538:34: struct request_queue *q = disk->queue;
-
block/blk-zoned.c:621:2-621:34: struct request_queue *q = disk->queue;
-
block/blk.h:66:2-66:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/bsg-lib.c:275:2-275:34: struct request_queue *q = hctx->queue;
-
block/bsg-lib.c:366:2-366:24: struct request_queue *q;
-
block/bsg.c:107:2-107:32: struct request_queue *q = bd->queue;
-
block/elevator.c:62:2-62:32: struct request_queue *q = rq->q;
-
block/genhd.c:633:2-633:34: struct request_queue *q = disk->queue;
-
block/genhd.c:947:2-947:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/genhd.c:996:2-996:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/genhd.c:1386:2-1386:24: struct request_queue *q;
-
block/kyber-iosched.c:954:1-954:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
-
block/kyber-iosched.c:955:1-955:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
-
block/kyber-iosched.c:956:1-956:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
-
block/kyber-iosched.c:957:1-957:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
-
block/kyber-iosched.c:962:2-962:28: struct request_queue *q = data;
-
block/mq-deadline.c:646:2-646:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:798:2-798:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:868:2-868:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:921:2-921:32: struct request_queue *q = rq->q;
-
block/mq-deadline.c:1089:1-1089:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:1090:1-1090:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:1091:1-1091:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:1092:1-1092:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:1093:1-1093:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:1094:1-1094:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:1089:1-1089:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:1090:1-1090:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:1091:1-1091:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:1092:1-1092:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:1093:1-1093:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:1094:1-1094:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:1089:1-1089:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:1090:1-1090:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:1091:1-1091:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:1092:1-1092:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:1093:1-1093:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:1094:1-1094:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:1089:1-1089:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:1090:1-1090:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:1091:1-1091:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:1092:1-1092:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:1093:1-1093:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:1094:1-1094:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:1099:2-1099:28: struct request_queue *q = data;
-
block/mq-deadline.c:1108:2-1108:28: struct request_queue *q = data;
-
block/mq-deadline.c:1117:2-1117:28: struct request_queue *q = data;
-
block/mq-deadline.c:1126:2-1126:28: struct request_queue *q = data;
-
block/mq-deadline.c:1154:2-1154:28: struct request_queue *q = data;
-
block/mq-deadline.c:1208:1-1208:1: DEADLINE_DISPATCH_ATTR(0);
-
block/mq-deadline.c:1209:1-1209:1: DEADLINE_DISPATCH_ATTR(1);
-
block/mq-deadline.c:1210:1-1210:1: DEADLINE_DISPATCH_ATTR(2);
-
block/mq-deadline.c:1208:1-1208:1: DEADLINE_DISPATCH_ATTR(0);
-
block/mq-deadline.c:1209:1-1209:1: DEADLINE_DISPATCH_ATTR(1);
-
block/mq-deadline.c:1210:1-1210:1: DEADLINE_DISPATCH_ATTR(2);
-
block/mq-deadline.c:1208:1-1208:1: DEADLINE_DISPATCH_ATTR(0);
-
block/mq-deadline.c:1209:1-1209:1: DEADLINE_DISPATCH_ATTR(1);
-
block/mq-deadline.c:1210:1-1210:1: DEADLINE_DISPATCH_ATTR(2);
-
crypto/algapi.c:241:2-241:21: struct crypto_alg *q;
-
crypto/algapi.c:319:2-319:21: struct crypto_alg *q;
-
crypto/algapi.c:376:2-376:21: struct crypto_alg *q;
-
crypto/algapi.c:547:2-547:26: struct crypto_template *q;
-
crypto/algapi.c:625:2-625:26: struct crypto_template *q, *tmpl = NULL;
-
crypto/algapi.c:1063:3-1063:22: struct crypto_alg *q;
-
crypto/api.c:59:2-59:21: struct crypto_alg *q, *alg = NULL;
-
crypto/asymmetric_keys/x509_public_key.c:166:2-166:14: const char *q;
-
crypto/async_tx/async_pq.c:382:3-382:13: void *p, *q, *s;
-
crypto/async_tx/async_raid6_recov.c:158:2-158:19: struct page *p, *q, *a, *b;
-
crypto/async_tx/async_raid6_recov.c:208:2-208:19: struct page *p, *q, *g, *dp, *dq;
-
crypto/async_tx/async_raid6_recov.c:299:2-299:19: struct page *p, *q, *dp, *dq;
-
crypto/async_tx/async_raid6_recov.c:476:2-476:19: struct page *p, *q, *dq;
-
crypto/crypto_user_base.c:38:2-38:21: struct crypto_alg *q, *alg = NULL;
-
crypto/dh.c:129:3-129:12: MPI val, q;
-
crypto/ecc.c:568:2-568:22: u64 q[ECC_MAX_DIGITS];
-
crypto/ecc.c:666:2-666:26: u64 q[ECC_MAX_DIGITS * 2];
-
crypto/essiv.c:391:2-391:18: const char *p, *q;
-
drivers/accel/habanalabs/common/hw_queue.c:44:2-44:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/common/hw_queue.c:231:2-231:58: struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
-
drivers/accel/habanalabs/common/hw_queue.c:271:2-271:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/accel/habanalabs/common/hw_queue.c:336:2-336:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/accel/habanalabs/common/hw_queue.c:372:2-372:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/accel/habanalabs/common/hw_queue.c:632:2-632:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/common/hw_queue.c:808:2-808:58: struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
-
drivers/accel/habanalabs/common/hw_queue.c:1071:2-1071:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/common/hw_queue.c:1111:2-1111:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/common/hw_queue.c:1123:2-1123:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:1093:2-1093:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:1749:2-1749:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:1763:2-1763:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:2686:2-2686:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:2832:2-2832:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:2956:2-2956:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:3101:2-3101:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:3234:2-3234:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:4667:2-4667:35: struct gaudi_internal_qman_info *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:6843:2-6843:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi/gaudi.c:7271:2-7271:68: struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
-
drivers/accel/habanalabs/gaudi2/gaudi2.c:4842:2-4842:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/gaudi2/gaudi2.c:9346:2-9346:69: struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
-
drivers/accel/habanalabs/gaudi2/gaudi2.c:9431:2-9431:69: struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
-
drivers/accel/habanalabs/gaudi2/gaudi2.c:9441:2-9441:44: struct hl_engine_arc_dccm_queue_full_irq *q;
-
drivers/accel/habanalabs/goya/goya.c:1175:2-1175:22: struct hl_hw_queue *q;
-
drivers/accel/habanalabs/goya/goya.c:4481:2-4481:67: struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
-
drivers/accel/ivpu/ivpu_mmu.c:315:2-315:35: struct ivpu_mmu_queue *q = &mmu->cmdq;
-
drivers/accel/ivpu/ivpu_mmu.c:334:2-334:35: struct ivpu_mmu_queue *q = &mmu->evtq;
-
drivers/accel/ivpu/ivpu_mmu.c:416:2-416:41: struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
-
drivers/accel/ivpu/ivpu_mmu.c:436:2-436:41: struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
-
drivers/acpi/ec.c:1127:2-1127:28: struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
-
drivers/acpi/ec.c:1150:2-1150:24: struct acpi_ec_query *q;
-
drivers/acpi/ec.c:1168:2-1168:24: struct acpi_ec_query *q;
-
drivers/ata/libata-scsi.c:1026:2-1026:34: struct request_queue *q = sdev->request_queue;
-
drivers/block/aoe/aoecmd.c:837:2-837:24: struct request_queue *q;
-
drivers/block/aoe/aoecmd.c:1033:2-1033:24: struct request_queue *q;
-
drivers/block/aoe/aoenet.c:75:2-75:21: register char *p, *q;
-
drivers/block/drbd/drbd_int.h:1854:3-1854:44: struct drbd_work_queue *q = &connection->sender_work;
-
drivers/block/drbd/drbd_main.c:932:3-932:48: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/block/drbd/drbd_main.c:952:3-952:37: struct request_queue *q = device->rq_queue;
-
drivers/block/drbd/drbd_nl.c:1212:2-1212:36: struct request_queue *q = device->rq_queue;
-
drivers/block/drbd/drbd_nl.c:1274:2-1274:43: struct request_queue * const q = device->rq_queue;
-
drivers/block/loop.c:761:2-761:32: struct request_queue *q = lo->lo_queue;
-
drivers/block/loop.c:939:2-939:32: struct request_queue *q = lo->lo_queue;
-
drivers/block/nbd.c:832:2-832:39: struct request_queue *q = nbd->disk->queue;
-
drivers/block/null_blk/main.c:1329:2-1329:35: struct request_queue *q = nullb->q;
-
drivers/block/null_blk/main.c:1337:2-1337:35: struct request_queue *q = nullb->q;
-
drivers/block/null_blk/zoned.c:160:2-160:35: struct request_queue *q = nullb->q;
-
drivers/block/pktcdvd.c:717:2-717:51: struct request_queue *q = bdev_get_queue(pd->bdev);
-
drivers/block/pktcdvd.c:2162:2-2162:24: struct request_queue *q;
-
drivers/block/pktcdvd.c:2484:2-2484:38: struct request_queue *q = pd->disk->queue;
-
drivers/block/rbd.c:4946:2-4946:24: struct request_queue *q;
-
drivers/block/rnbd/rnbd-clt.c:201:2-201:25: struct rnbd_queue *q = NULL;
-
drivers/block/rnbd/rnbd-clt.c:1099:2-1099:31: struct rnbd_queue *q = hctx->driver_data;
-
drivers/block/rnbd/rnbd-clt.c:1160:2-1160:31: struct rnbd_queue *q = hctx->driver_data;
-
drivers/block/rnbd/rnbd-clt.c:1323:2-1323:21: struct rnbd_queue *q;
-
drivers/block/ublk_drv.c:270:2-270:43: struct request_queue *q = ublk->ub_disk->queue;
-
drivers/block/ublk_drv.c:497:2-497:41: struct request_queue *q = ub->ub_disk->queue;
-
drivers/block/ublk_drv.c:524:2-524:41: struct request_queue *q = ub->ub_disk->queue;
-
drivers/block/virtio_blk.c:541:2-541:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:570:2-570:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:869:2-869:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:978:2-978:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:1307:2-1307:24: struct request_queue *q;
-
drivers/cdrom/cdrom.c:2582:2-2582:23: struct cdrom_subchnl q;
-
drivers/cdrom/cdrom.c:3039:2-3039:23: struct cdrom_subchnl q;
-
drivers/clk/clk-cdce925.c:97:9-97:9: return mult_frac(parent_rate, (unsigned long)n, (unsigned long)m);
-
drivers/clk/clk-cdce925.c:205:9-205:9: rate = mult_frac(rate, (unsigned long)n, (unsigned long)m);
-
drivers/clk/clk-cdce925.c:223:2-223:5: u8 q;
-
drivers/clk/clk.c:3122:9-3122:9: ret = mult_frac(scale, duty->num, duty->den);
-
drivers/clk/imgtec/clk-boston.c:56:13-56:13: sys_freq = mult_frac(in_freq, mul, sys_div);
-
drivers/clk/imgtec/clk-boston.c:59:13-59:13: cpu_freq = mult_frac(in_freq, mul, cpu_div);
-
drivers/clk/qcom/clk-regmap-mux-div.c:102:18-102:18: parent_rate = mult_frac(req_rate, div, 2);
-
drivers/clk/qcom/clk-regmap-mux-div.c:104:18-104:18: actual_rate = mult_frac(parent_rate, 2, div);
-
drivers/clk/qcom/clk-regmap-mux-div.c:139:18-139:18: parent_rate = mult_frac(rate, div, 2);
-
drivers/clk/qcom/clk-regmap-mux-div.c:141:18-141:18: actual_rate = mult_frac(parent_rate, 2, div);
-
drivers/clk/qcom/clk-regmap-mux-div.c:216:11-216:11: return mult_frac(parent_rate, 2, div + 1);
-
drivers/clk/xilinx/clk-xlnx-clock-wizard.c:387:9-387:9: return mult_frac(parent_rate, 1000, (div * 1000) + frac);
-
drivers/counter/counter-chrdev.c:123:2-123:28: struct counter_comp_node *q, *o;
-
drivers/crypto/cavium/zip/zip_main.c:136:2-136:10: int q = 0;
-
drivers/crypto/cavium/zip/zip_main.c:335:2-335:10: int q = 0;
-
drivers/crypto/cavium/zip/zip_main.c:489:2-489:10: u32 q = 0;
-
drivers/crypto/ccp/ccp-ops.c:222:2-222:10: u8 *p, *q;
-
drivers/crypto/ccp/ccp-ops.c:247:2-247:10: u8 *p, *q;
-
drivers/crypto/hisilicon/zip/zip_crypto.c:222:2-222:34: struct hisi_zip_req *q = req_q->q;
-
drivers/crypto/intel/keembay/ocs-aes.c:1059:2-1059:9: int i, q;
-
drivers/devfreq/governor_passive.c:121:20-121:20: freq = dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
-
drivers/firewire/core-device.c:1105:2-1105:6: u32 q;
-
drivers/firewire/core-topology.c:42:2-42:6: u32 q;
-
drivers/firewire/core-topology.c:176:2-176:23: u32 *next_sid, *end, q;
-
drivers/firmware/arm_scmi/raw_mode.c:742:2-742:25: struct scmi_raw_queue *q;
-
drivers/firmware/arm_scmi/raw_mode.c:849:2-849:25: struct scmi_raw_queue *q;
-
drivers/firmware/arm_scmi/raw_mode.c:1033:2-1033:25: struct scmi_raw_queue *q;
-
drivers/firmware/arm_scmi/raw_mode.c:1115:4-1115:27: struct scmi_raw_queue *q;
-
drivers/firmware/arm_scmi/raw_mode.c:1316:2-1316:25: struct scmi_raw_queue *q;
-
drivers/firmware/arm_scmi/scmi_power_control.c:222:21-222:21: adj_timeout_ms = mult_frac(timeout_ms, 3, 4);
-
drivers/firmware/dmi_scan.c:660:2-660:20: char __iomem *p, *q;
-
drivers/firmware/efi/libstub/vsprintf.c:43:3-43:35: unsigned int q = (r * 0xccd) >> 15;
-
drivers/firmware/efi/libstub/vsprintf.c:62:2-62:42: unsigned int q = (x * 0x346DC5D7ULL) >> 43;
-
drivers/firmware/efi/libstub/vsprintf.c:76:2-76:27: unsigned int d3, d2, d1, q, h;
-
drivers/gpio/gpiolib-of.c:629:2-629:28: const of_find_gpio_quirk *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c:1465:2-1465:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:288:2-288:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1040:2-1040:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1090:2-1090:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1147:2-1147:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1227:2-1227:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2221:2-2221:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2350:2-2350:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2772:2-2772:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2847:3-2847:17: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2949:3-2949:17: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c:131:2-131:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:105:2-105:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:379:2-379:20: struct queue *q = container_of(kobj, struct queue, kobj);
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:2152:2-2152:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:252:2-252:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:668:2-668:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:779:2-779:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:999:2-999:16: struct queue *q;
-
drivers/gpu/drm/drm_debugfs.c:317:2-317:26: struct list_head *pos, *q;
-
drivers/gpu/drm/drm_edid.c:5328:2-5328:34: u32 max_avg, min_cll, max, min, q, r;
-
drivers/gpu/drm/i915/display/intel_quirks.c:212:3-212:42: struct intel_quirk *q = &intel_quirks[i];
-
drivers/gpu/drm/i915/gt/intel_llc.c:64:26-64:26: consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
-
drivers/gpu/drm/i915/gt/intel_llc.c:92:15-92:15: ring_freq = mult_frac(gpu_freq, 5, 4);
-
drivers/gpu/drm/i915/gvt/handlers.c:2275:2-2275:2: MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2278:2-2278:2: MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2281:2-2281:2: MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2284:2-2284:2: MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2287:2-2287:2: MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2290:2-2290:2: MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/scheduler.c:1632:2-1632:24: struct list_head *q = workload_q_head(vgpu, engine);
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:168:2-168:2: MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:177:2-177:2: MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:186:2-186:2: MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:199:2-199:2: MMIO_D(REG_50080(PIPE_A, PLANE_SPRITE0));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:212:2-212:2: MMIO_D(REG_50080(PIPE_B, PLANE_SPRITE0));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:225:2-225:2: MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
-
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c:138:3-138:3: mult_frac(src_height, 1, dst_height) : 1;
-
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c:413:3-413:3: mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
-
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c:415:3-415:3: mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
-
drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c:277:18-277:18: phasex_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT,
-
drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c:294:18-294:18: phasey_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT,
-
drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c:585:15-585:15: *out_phase = mult_frac(unit, src, dst);
-
drivers/gpu/drm/msm/dsi/dsi_host.c:580:14-580:14: pclk_bpp = mult_frac(pclk_rate, bpp, 16 * lanes);
-
drivers/gpu/drm/msm/dsi/dsi_host.c:582:14-582:14: pclk_bpp = mult_frac(pclk_rate, bpp, 8 * lanes);
-
drivers/gpu/drm/msm/dsi/dsi_host.c:618:27-618:27: msm_host->src_clk_rate = mult_frac(msm_host->pixel_clk_rate, bpp, 8);
-
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c:67:7-67:7: ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
-
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c:68:8-68:8: lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
-
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c:178:7-178:7: ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
-
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c:286:7-286:7: ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
-
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c:394:7-394:7: ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
-
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c:482:7-482:7: ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
-
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c:279:4-279:4: mult_frac(ref_clk, sdm_freq_seed, BIT(16));
-
drivers/gpu/drm/v3d/v3d_drv.c:130:2-130:17: enum v3d_queue q;
-
drivers/gpu/drm/v3d/v3d_sched.c:258:2-258:17: enum v3d_queue q;
-
drivers/gpu/drm/v3d/v3d_sched.c:442:2-442:17: enum v3d_queue q;
-
drivers/gpu/drm/xen/xen_drm_front.c:53:2-53:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/gpu/drm/xen/xen_drm_front.c:64:2-64:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/gpu/drm/xen/xen_drm_front.c:78:2-78:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/hid/hid-lg4ff.c:291:21-291:21: new_value = 8192 + mult_frac(value - 8192, max_range, range);
-
drivers/hid/hid-nintendo.c:1189:14-1189:14: value[0] = mult_frac((JC_IMU_PREC_RANGE_SCALE *
-
drivers/hid/hid-nintendo.c:1194:14-1194:14: value[1] = mult_frac((JC_IMU_PREC_RANGE_SCALE *
-
drivers/hid/hid-nintendo.c:1199:14-1199:14: value[2] = mult_frac((JC_IMU_PREC_RANGE_SCALE *
-
drivers/hid/hid-playstation.c:1393:20-1393:20: int calib_data = mult_frac(ds->gyro_calib_data[i].sens_numer,
-
drivers/hid/hid-playstation.c:1402:20-1402:20: int calib_data = mult_frac(ds->accel_calib_data[i].sens_numer,
-
drivers/hid/hid-playstation.c:2248:20-2248:20: int calib_data = mult_frac(ds4->gyro_calib_data[i].sens_numer,
-
drivers/hid/hid-playstation.c:2257:20-2257:20: int calib_data = mult_frac(ds4->accel_calib_data[i].sens_numer,
-
drivers/hid/hid-quirks.c:1081:2-1081:29: struct quirks_list_struct *q;
-
drivers/hid/hid-quirks.c:1116:2-1116:37: struct quirks_list_struct *q_new, *q;
-
drivers/hid/hid-quirks.c:1172:2-1172:29: struct quirks_list_struct *q, *temp;
-
drivers/i2c/i2c-core-base.c:2123:2-2123:45: const struct i2c_adapter_quirks *q = adap->quirks;
-
drivers/iio/common/st_sensors/st_sensors_core.c:643:2-643:18: int i, len = 0, q, r;
-
drivers/iio/dac/ltc2688.c:168:8-168:8: fs = mult_frac(fs, 105, 100);
-
drivers/iio/frequency/admv1014.c:246:21-246:21: vcm_comp = 1050 + mult_frac(i, 450, 8);
-
drivers/iio/industrialio-buffer.c:948:2-948:30: struct iio_demux_table *p, *q;
-
drivers/infiniband/hw/hfi1/affinity.c:193:2-193:26: struct list_head *pos, *q;
-
drivers/infiniband/hw/hfi1/mad.c:85:2-85:27: struct trap_node *node, *q;
-
drivers/infiniband/hw/hfi1/mad.c:987:2-987:7: u16 *q;
-
drivers/infiniband/hw/hfi1/mad.c:1686:2-1686:24: __be16 *q = (__be16 *)data;
-
drivers/infiniband/hw/irdma/verbs.c:3939:2-3939:26: struct list_head *pos, *q;
-
drivers/infiniband/hw/mlx4/mad.c:1026:2-1026:9: int p, q;
-
drivers/infiniband/hw/mlx4/mad.c:1062:2-1062:9: int p, q;
-
drivers/infiniband/hw/mthca/mthca_mad.c:286:2-286:9: int p, q;
-
drivers/infiniband/hw/mthca/mthca_mad.c:328:2-328:9: int p, q;
-
drivers/infiniband/hw/qib/qib_mad.c:601:2-601:30: __be16 *q = (__be16 *) smp->data;
-
drivers/infiniband/hw/qib/qib_mad.c:1044:2-1044:24: u16 *q = (u16 *) smp->data;
-
drivers/infiniband/sw/rdmavt/qp.c:745:3-745:18: struct rvt_qp *q;
-
drivers/infiniband/sw/rxe/rxe_comp.c:597:2-597:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_queue.c:58:2-58:20: struct rxe_queue *q;
-
drivers/infiniband/sw/rxe/rxe_req.c:45:2-45:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_req.c:118:2-118:20: struct rxe_queue *q;
-
drivers/infiniband/sw/rxe/rxe_req.c:164:2-164:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_req.c:681:2-681:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_resp.c:272:2-272:32: struct rxe_queue *q = srq->rq.queue;
-
drivers/infiniband/sw/rxe/rxe_resp.c:1456:2-1456:31: struct rxe_queue *q = qp->rq.queue;
-
drivers/infiniband/sw/rxe/rxe_srq.c:48:2-48:20: struct rxe_queue *q;
-
drivers/infiniband/sw/rxe/rxe_srq.c:155:2-155:32: struct rxe_queue *q = srq->rq.queue;
-
drivers/infiniband/ulp/srp/ib_srp.c:2851:2-2851:34: struct request_queue *q = sdev->request_queue;
-
drivers/isdn/mISDN/dsp_cmx.c:1303:2-1303:14: u8 *d, *p, *q, *o_q;
-
drivers/isdn/mISDN/dsp_cmx.c:1625:2-1625:10: u8 *p, *q;
-
drivers/md/bcache/super.c:900:2-900:24: struct request_queue *q;
-
drivers/md/bcache/super.c:1008:2-1008:24: struct request_queue *q;
-
drivers/md/bcache/super.c:1398:2-1398:51: struct request_queue *q = bdev_get_queue(dc->bdev);
-
drivers/md/bcache/sysfs.c:1066:3-1066:16: uint16_t q[31], *p, *cached;
-
drivers/md/bcache/util.c:97:2-97:11: uint64_t q;
-
drivers/md/dm-bufio.c:2835:15-2835:15: mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
-
drivers/md/dm-bufio.c:2842:12-2842:12: if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
-
drivers/md/dm-bufio.c:2843:9-2843:9: mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
-
drivers/md/dm-cache-policy-smq.c:889:2-889:25: struct queue *q = &mq->dirty;
-
drivers/md/dm-cache-policy-smq.c:902:2-902:25: struct queue *q = &mq->clean;
-
drivers/md/dm-io.c:316:2-316:54: struct request_queue *q = bdev_get_queue(where->bdev);
-
drivers/md/dm-mpath.c:516:2-516:24: struct request_queue *q;
-
drivers/md/dm-mpath.c:885:2-885:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-mpath.c:941:2-941:24: struct request_queue *q;
-
drivers/md/dm-mpath.c:1626:2-1626:65: struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
-
drivers/md/dm-mpath.c:2100:2-2100:65: struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
-
drivers/md/dm-stats.c:956:2-956:14: const char *q;
-
drivers/md/dm-table.c:405:2-405:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-table.c:859:2-859:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-table.c:1493:2-1493:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1584:2-1584:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1777:2-1777:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1830:2-1830:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1838:2-1838:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-zone.c:127:2-127:32: struct request_queue *q = md->queue;
-
drivers/md/raid5.c:7170:3-7170:36: struct request_queue *q = mddev->queue;
-
drivers/media/common/saa7146/saa7146_fops.c:156:2-156:31: struct saa7146_dmaqueue *q = from_timer(q, t, timeout);
-
drivers/media/common/saa7146/saa7146_fops.c:346:2-346:20: struct vb2_queue *q;
-
drivers/media/common/saa7146/saa7146_video.c:709:2-709:36: struct saa7146_dmaqueue *q = &vv->video_dmaq;
-
drivers/media/common/videobuf2/videobuf2-core.c:216:2-216:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:357:2-357:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1046:2-1046:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1130:2-1130:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1246:2-1246:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1382:2-1382:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1394:2-1394:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1929:2-1929:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:2952:2-2952:24: struct vb2_queue *q = data;
-
drivers/media/common/videobuf2/videobuf2-dma-contig.c:193:2-193:33: struct vb2_queue *q = buf->vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-dma-contig.c:212:2-212:33: struct vb2_queue *q = buf->vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:200:2-200:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:254:2-254:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:276:2-276:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:317:2-317:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:145:2-145:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:178:2-178:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:496:2-496:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:1189:2-1189:30: struct vb2_queue *q = vdev->queue;
-
drivers/media/dvb-core/dvb_demux.c:541:2-541:12: const u8 *q;
-
drivers/media/dvb-core/dvb_frontend.c:2332:21-2332:21: c->bandwidth_hz = mult_frac(c->symbol_rate, rolloff, 100);
-
drivers/media/dvb-core/dvb_vb2.c:165:2-165:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:203:2-203:50: struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:216:2-216:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:233:2-233:50: struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:357:2-357:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:370:2-370:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:387:2-387:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-frontends/rtl2832_sdr.c:1144:2-1144:30: struct vb2_queue *q = &dev->vb_queue;
-
drivers/media/dvb-frontends/sp887x.c:287:2-287:15: unsigned int q, r;
-
drivers/media/i2c/adv7511-v4l2.c:1276:2-1276:9: u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
-
drivers/media/i2c/cx25840/cx25840-core.c:697:2-697:27: struct workqueue_struct *q;
-
drivers/media/i2c/cx25840/cx25840-core.c:775:2-775:27: struct workqueue_struct *q;
-
drivers/media/i2c/cx25840/cx25840-core.c:1034:2-1034:27: struct workqueue_struct *q;
-
drivers/media/i2c/video-i2c.c:451:14-451:14: u32 delay = mult_frac(1000000UL, data->frame_interval.numerator,
-
drivers/media/pci/bt8xx/bttv-driver.c:1945:2-1945:20: struct vb2_queue *q;
-
drivers/media/pci/bt8xx/bttv-driver.c:3061:2-3061:20: struct vb2_queue *q;
-
drivers/media/pci/cobalt/cobalt-v4l2.c:125:2-125:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/pci/cobalt/cobalt-v4l2.c:1210:2-1210:28: struct vb2_queue *q = &s->q;
-
drivers/media/pci/cx18/cx18-fileops.c:291:3-291:13: const u8 *q;
-
drivers/media/pci/cx18/cx18-streams.c:690:2-690:21: struct cx18_queue *q;
-
drivers/media/pci/cx18/cx18-streams.c:712:2-712:21: struct cx18_queue *q;
-
drivers/media/pci/cx18/cx18-vbi.c:99:2-99:10: u8 *q = buf;
-
drivers/media/pci/cx23885/cx23885-417.c:1496:2-1496:20: struct vb2_queue *q;
-
drivers/media/pci/cx23885/cx23885-core.c:1649:2-1649:38: struct cx23885_dmaqueue *q = &port->mpegq;
-
drivers/media/pci/cx23885/cx23885-dvb.c:2644:3-2644:21: struct vb2_queue *q;
-
drivers/media/pci/cx23885/cx23885-vbi.c:189:2-189:37: struct cx23885_dmaqueue *q = &dev->vbiq;
-
drivers/media/pci/cx23885/cx23885-video.c:462:2-462:40: struct cx23885_dmaqueue *q = &dev->vidq;
-
drivers/media/pci/cx23885/cx23885-video.c:1239:2-1239:20: struct vb2_queue *q;
-
drivers/media/pci/cx25821/cx25821-video.c:243:2-243:56: struct cx25821_dmaqueue *q = &dev->channels[chan->id].dma_vidq;
-
drivers/media/pci/cx25821/cx25821-video.c:681:3-681:21: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-blackbird.c:1157:2-1157:20: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-dvb.c:1766:3-1766:21: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-mpeg.c:274:2-274:34: struct cx88_dmaqueue *q = &dev->mpegq;
-
drivers/media/pci/cx88/cx88-vbi.c:172:2-172:38: struct cx88_dmaqueue *q = &dev->vbiq;
-
drivers/media/pci/cx88/cx88-video.c:507:2-507:38: struct cx88_dmaqueue *q = &dev->vidq;
-
drivers/media/pci/cx88/cx88-video.c:1262:2-1262:20: struct vb2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:542:2-542:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:804:2-804:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:892:2-893:3: struct cio2_queue *q =
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:980:2-980:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1025:2-1025:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1076:2-1076:48: struct cio2_queue *q = file_to_cio2_queue(file);
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1116:2-1116:48: struct cio2_queue *q = file_to_cio2_queue(file);
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1229:2-1229:25: struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1255:2-1255:25: struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1323:2-1323:25: struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1390:2-1390:21: struct cio2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1427:2-1427:21: struct cio2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:1975:2-1975:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/intel/ipu3/ipu3-cio2.c:2008:2-2008:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/ivtv/ivtv-fileops.c:297:3-297:13: const u8 *q;
-
drivers/media/pci/ivtv/ivtv-fileops.c:543:2-543:20: struct ivtv_queue q;
-
drivers/media/pci/ivtv/ivtv-vbi.c:305:2-305:10: u8 *q = buf;
-
drivers/media/pci/saa7134/saa7134-core.c:331:2-331:31: struct saa7134_dmaqueue *q = from_timer(q, t, timeout);
-
drivers/media/pci/saa7134/saa7134-dvb.c:1218:2-1218:20: struct vb2_queue *q;
-
drivers/media/pci/saa7134/saa7134-empress.c:245:2-245:20: struct vb2_queue *q;
-
drivers/media/pci/saa7134/saa7134-video.c:1641:2-1641:20: struct vb2_queue *q;
-
drivers/media/pci/saa7164/saa7164-cmd.c:73:2-73:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-cmd.c:125:2-125:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-cmd.c:246:2-246:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-dvb.c:195:2-195:24: struct list_head *p, *q;
-
drivers/media/pci/saa7164/saa7164-encoder.c:61:2-61:32: struct list_head *c, *n, *p, *q, *l, *v;
-
drivers/media/pci/saa7164/saa7164-vbi.c:30:2-30:32: struct list_head *c, *n, *p, *q, *l, *v;
-
drivers/media/platform/allegro-dvt/allegro-core.c:2809:2-2809:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/amphion/vdec.c:266:2-266:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vdec.c:433:2-433:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/venc.c:220:2-220:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vpu_v4l2.c:105:2-105:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vpu_v4l2.c:432:2-432:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vpu_v4l2.c:540:2-540:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/atmel/atmel-isi.c:1189:2-1189:20: struct vb2_queue *q;
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:1126:2-1126:36: struct mtk_jpeg_q_data *q = &ctx->out_q;
-
drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c:405:2-405:6: u32 q;
-
drivers/media/platform/microchip/microchip-isc-base.c:1799:2-1799:30: struct vb2_queue *q = &isc->vb2_vidq;
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1981:2-1981:46: struct mxc_jpeg_q_data *q[2] = {out_q, cap_q};
-
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c:1412:2-1412:32: struct vb2_queue *q = &video->vb2_q;
-
drivers/media/platform/qcom/camss/camss-video.c:976:2-976:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/helpers.c:1592:2-1592:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/vdec.c:329:2-329:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/venc.c:241:2-241:20: struct vb2_queue *q;
-
drivers/media/platform/renesas/rcar-vin/rcar-dma.c:1535:2-1535:30: struct vb2_queue *q = &vin->queue;
-
drivers/media/platform/renesas/rcar_drif.c:924:2-924:30: struct vb2_queue *q = &sdr->vb_queue;
-
drivers/media/platform/renesas/renesas-ceu.c:1394:2-1394:33: struct vb2_queue *q = &ceudev->vb2_vq;
-
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c:742:2-742:30: struct vb2_queue *q = &cru->queue;
-
drivers/media/platform/renesas/sh_vou.c:1227:2-1227:20: struct vb2_queue *q;
-
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c:1399:2-1399:20: struct vb2_queue *q;
-
drivers/media/platform/samsung/exynos4-is/fimc-capture.c:1717:2-1717:39: struct vb2_queue *q = &fimc->vid_cap.vbq;
-
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c:574:2-574:44: struct vb2_queue *q = &isp->video_capture.vb_queue;
-
drivers/media/platform/samsung/exynos4-is/fimc-lite.c:1245:2-1245:31: struct vb2_queue *q = &fimc->vb_queue;
-
drivers/media/platform/samsung/s3c-camif/camif-capture.c:1103:2-1103:29: struct vb2_queue *q = &vp->vb_queue;
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c:771:2-771:20: struct vb2_queue *q;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1116:2-1116:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1296:2-1296:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1462:2-1462:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1508:2-1508:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1585:2-1585:20: struct vb2_queue *q;
-
drivers/media/platform/st/stm32/stm32-dcmi.c:1896:2-1896:20: struct vb2_queue *q;
-
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c:403:2-403:30: struct vb2_queue *q = &csi->queue;
-
drivers/media/platform/ti/am437x/am437x-vpfe.c:2214:2-2214:20: struct vb2_queue *q;
-
drivers/media/platform/ti/cal/cal-video.c:255:2-255:30: struct vb2_queue *q = &ctx->vb_vidq;
-
drivers/media/platform/ti/cal/cal-video.c:995:2-995:30: struct vb2_queue *q = &ctx->vb_vidq;
-
drivers/media/platform/ti/davinci/vpif_capture.c:71:2-71:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/ti/davinci/vpif_capture.c:1401:2-1401:20: struct vb2_queue *q;
-
drivers/media/platform/ti/davinci/vpif_display.c:1123:2-1123:20: struct vb2_queue *q;
-
drivers/media/platform/verisilicon/hantro_drv.c:48:2-48:59: struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
-
drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c:84:2-84:39: struct v4l2_ctrl_mpeg2_quantisation *q;
-
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c:179:2-179:48: const struct v4l2_vp8_quantization *q = &hdr->quant;
-
drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c:86:2-86:39: struct v4l2_ctrl_mpeg2_quantisation *q;
-
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c:317:2-317:48: const struct v4l2_vp8_quantization *q = &hdr->quant;
-
drivers/media/radio/radio-gemtek.c:153:2-153:14: int i, bit, q, mute;
-
drivers/media/radio/radio-gemtek.c:257:2-257:9: int i, q;
-
drivers/media/test-drivers/vimc/vimc-capture.c:403:2-403:20: struct vb2_queue *q;
-
drivers/media/test-drivers/vivid/vivid-sdr-cap.c:469:2-469:30: struct vb2_queue *q = &dev->vb_sdr_cap_q;
-
drivers/media/test-drivers/vivid/vivid-vid-cap.c:675:2-675:30: struct vb2_queue *q = &dev->vb_vid_cap_q;
-
drivers/media/test-drivers/vivid/vivid-vid-out.c:454:2-454:30: struct vb2_queue *q = &dev->vb_vid_out_q;
-
drivers/media/tuners/max2165.c:153:2-153:6: u32 q, f = 0;
-
drivers/media/usb/airspy/airspy.c:646:2-646:28: struct vb2_queue *q = &s->vb_queue;
-
drivers/media/usb/au0828/au0828-video.c:290:2-290:36: struct vb2_queue *q = vb->vb2_buf.vb2_queue;
-
drivers/media/usb/au0828/au0828-video.c:1807:2-1807:20: struct vb2_queue *q;
-
drivers/media/usb/cx231xx/cx231xx-417.c:1739:2-1739:20: struct vb2_queue *q;
-
drivers/media/usb/cx231xx/cx231xx-video.c:1757:2-1757:20: struct vb2_queue *q;
-
drivers/media/usb/em28xx/em28xx-video.c:1239:2-1239:20: struct vb2_queue *q;
-
drivers/media/usb/go7007/go7007-fw.c:930:2-930:10: int q = 0;
-
drivers/media/usb/gspca/gspca.c:1452:2-1452:20: struct vb2_queue *q;
-
drivers/media/usb/hackrf/hackrf.c:918:2-918:20: struct vb2_queue *q;
-
drivers/media/usb/msi2500/msi2500.c:923:2-923:30: struct vb2_queue *q = &dev->vb_queue;
-
drivers/media/usb/s2255/s2255drv.c:813:2-813:29: struct vb2_queue *q = &vc->vb_vidq;
-
drivers/media/usb/s2255/s2255drv.c:1098:2-1098:29: struct vb2_queue *q = &vc->vb_vidq;
-
drivers/media/usb/s2255/s2255drv.c:1590:2-1590:20: struct vb2_queue *q;
-
drivers/media/usb/stk1160/stk1160-v4l.c:487:2-487:30: struct vb2_queue *q = &dev->vb_vidq;
-
drivers/media/usb/stk1160/stk1160-v4l.c:522:2-522:30: struct vb2_queue *q = &dev->vb_vidq;
-
drivers/media/usb/stk1160/stk1160-v4l.c:783:2-783:20: struct vb2_queue *q;
-
drivers/misc/uacce/uacce.c:59:2-59:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:137:2-137:22: struct uacce_queue *q;
-
drivers/misc/uacce/uacce.c:187:2-187:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:202:2-202:31: struct uacce_queue *q = vma->vm_private_data;
-
drivers/misc/uacce/uacce.c:220:2-220:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:282:2-282:32: struct uacce_queue *q = file->private_data;
-
drivers/misc/uacce/uacce.c:576:2-576:22: struct uacce_queue *q, *next_q;
-
drivers/mmc/core/block.c:1459:2-1459:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:1527:2-1527:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2043:2-2043:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2200:2-2200:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2787:2-2787:26: struct list_head *pos, *q;
-
drivers/mmc/core/queue.c:86:2-86:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:122:2-122:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:140:2-140:32: struct request_queue *q = mq->queue;
-
drivers/mmc/core/queue.c:231:2-231:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:488:2-488:32: struct request_queue *q = mq->queue;
-
drivers/mtd/ubi/build.c:569:10-569:10: limit = mult_frac(device_pebs, max_beb_per1024, 1024);
-
drivers/mtd/ubi/build.c:572:6-572:6: if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
-
drivers/net/dsa/ocelot/felix_vsc9959.c:1988:2-1988:26: struct list_head *pos, *q, *last;
-
drivers/net/dsa/ocelot/felix_vsc9959.c:2023:2-2023:26: struct list_head *pos, *q, *last;
-
drivers/net/ethernet/amd/pds_core/adminq.c:69:2-69:31: struct pdsc_queue *q = &qcq->q;
-
drivers/net/ethernet/amd/pds_core/adminq.c:160:2-160:31: struct pdsc_queue *q = &qcq->q;
-
drivers/net/ethernet/amd/pds_core/debugfs.c:109:2-109:31: struct pdsc_queue *q = &qcq->q;
-
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c:116:2-116:19: unsigned int tc, q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:466:2-466:15: unsigned int q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:1020:2-1020:15: unsigned int q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2300:2-2300:15: unsigned int q, port;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2338:2-2338:15: unsigned int q, qp, port;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2393:2-2393:15: unsigned int q, qp, port;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:5387:2-5387:6: int q, rc;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:14385:2-14385:26: struct list_head *pos, *q;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:2071:3-2071:43: struct bnx2x_vf_queue *q = vfq_get(vf, i);
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c:1539:3-1539:57: struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
-
drivers/net/ethernet/broadcom/genet/bcmgenet.c:3548:2-3548:15: unsigned int q;
-
drivers/net/ethernet/broadcom/genet/bcmgenet.c:3663:2-3663:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:499:2-499:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:707:2-707:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:732:2-732:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:1800:2-1800:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2020:2-2020:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2435:2-2435:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2477:2-2477:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2503:2-2503:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2539:2-2539:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2582:2-2582:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2623:2-2623:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2729:2-2729:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2930:2-2930:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2991:2-2991:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:3045:2-3045:18: unsigned int i, q, idx;
-
drivers/net/ethernet/cadence/macb_main.c:3141:2-3141:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:4054:2-4054:21: unsigned int hw_q, q;
-
drivers/net/ethernet/cadence/macb_main.c:4225:2-4225:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4252:2-4252:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4274:2-4274:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4431:2-4431:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:5194:2-5194:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:5286:2-5286:15: unsigned int q;
-
drivers/net/ethernet/cavium/liquidio/lio_core.c:468:2-468:6: int q, q_no;
-
drivers/net/ethernet/cavium/liquidio/lio_core.c:822:2-822:6: int q, q_no;
-
drivers/net/ethernet/cavium/liquidio/lio_main.c:466:2-466:6: int q, iq;
-
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c:369:2-369:23: int mbox, key, stat, q;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:474:3-474:32: struct cmdQ *q = &sge->cmdQ[0];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:530:3-530:36: struct freelQ *q = &sge->freelQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:554:3-554:36: struct freelQ *q = &sge->freelQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:653:3-653:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:677:3-677:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1311:2-1311:31: struct cmdQ *q = &sge->cmdQ[0];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1472:2-1472:26: struct respQ *q = &sge->respQ;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1562:2-1562:26: struct respQ *q = &sge->respQ;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1701:2-1701:33: struct cmdQ *q = &sge->cmdQ[qid];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1928:3-1928:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1171:3-1171:41: struct sge_rspq *q = &adap->sge.qs[i].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1957:2-1957:72: const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1975:2-1975:22: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2032:2-2032:46: struct qset_params *q = adapter->params.sge.qset;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2152:3-2152:23: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2253:3-2253:23: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1268:2-1268:18: struct sge_txq *q;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1525:2-1525:39: struct sge_txq *q = &qs->txq[TXQ_CTRL];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1745:2-1745:39: struct sge_txq *q = &qs->txq[TXQ_OFLD];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1899:2-1899:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2325:2-2325:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2529:2-2529:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2608:2-2608:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2626:2-2626:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2646:2-2646:40: struct sge_rspq *q = &adap->sge.qs[0].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2675:2-2675:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2696:2-2696:40: struct sge_rspq *q = &adap->sge.qs[0].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3040:2-3040:42: struct sge_qset *q = &adapter->sge.qs[id];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3213:3-3213:39: struct sge_qset *q = &adap->sge.qs[i];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3236:3-3236:39: struct sge_qset *q = &adap->sge.qs[i];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3369:3-3369:37: struct qset_params *q = p->qset + i;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:952:2-952:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:967:2-967:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:979:2-979:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:927:3-927:44: struct sge_rspq *q = adap->sge.ingr_map[i];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:973:3-973:44: struct sge_rspq *q = adap->sge.ingr_map[i];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:1061:3-1061:52: struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:112:2-112:37: struct sge_ofld_rxq *q = rxq_info->uldrxq;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:376:3-376:47: struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:391:3-391:47: struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1422:2-1422:27: struct sge_txq *q = &eq->q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1516:2-1516:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2674:2-2674:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2794:2-2794:27: struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3098:2-3098:26: struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3991:2-3991:23: struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4149:2-4149:23: struct sge_rspq *q = cookie;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4163:2-4163:34: struct sge_rspq *q = &adap->sge.intrq;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4300:3-4300:31: struct sge_eth_txq *q = &s->ptptxq;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:420:2-420:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:471:2-471:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:516:2-516:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:580:2-580:65: struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:719:2-719:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1935:2-1935:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:2994:2-2994:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3010:2-3010:24: struct be_queue_info *q, *cq;
-
drivers/net/ethernet/emulex/benet/be_main.c:3042:2-3042:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3106:2-3106:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3567:2-3567:24: struct be_queue_info *q;
-
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c:3983:2-3983:32: struct dpni_queue q = { { 0 } };
-
drivers/net/ethernet/freescale/enetc/enetc.c:2678:2-2678:15: int err, tc, q;
-
drivers/net/ethernet/freescale/fec_main.c:923:2-923:15: unsigned int q;
-
drivers/net/ethernet/freescale/fec_main.c:3239:2-3239:15: unsigned int q;
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:513:2-513:30: struct funeth_rxq *q = irq->rxq;
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:620:2-620:21: struct funeth_rxq *q;
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:792:2-792:26: struct funeth_rxq *q = *qp;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:347:2-347:37: struct funeth_txq *q = fp->txqs[qid];
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:471:2-471:30: struct funeth_txq *q = irq->txq;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:575:2-575:21: struct funeth_txq *q, **xdpqs;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:629:2-629:21: struct funeth_txq *q;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:763:2-763:26: struct funeth_txq *q = *qp;
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:297:2-297:21: struct hnae_queue *q;
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:245:2-245:37: struct hnae_queue *q = &ring_pair->q;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:2775:3-2775:24: struct netdev_queue *q;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:5059:2-5059:32: struct hnae3_queue *q = ring->tqp;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:5091:4-5091:24: struct hnae3_queue *q;
-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:1715:3-1716:4: struct hclge_comm_tqp *q =
-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c:960:4-960:58: struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
-
drivers/net/ethernet/intel/fm10k/fm10k_pci.c:611:3-611:53: struct fm10k_hw_stats_q *q = &interface->stats.q[i];
-
drivers/net/ethernet/intel/fm10k/fm10k_pci.c:1332:2-1332:6: int q;
-
drivers/net/ethernet/intel/i40e/i40e_main.c:868:2-868:6: u16 q;
-
drivers/net/ethernet/intel/i40e/i40e_main.c:3861:2-3861:9: int i, q;
-
drivers/net/ethernet/intel/i40e/i40e_xsk.c:66:2-66:16: unsigned long q;
-
drivers/net/ethernet/intel/ice/ice_lib.c:2061:2-2061:9: int i, q;
-
drivers/net/ethernet/intel/ice/ice_lib.c:2712:2-2712:9: int i, q;
-
drivers/net/ethernet/intel/ice/ice_tc_lib.c:813:2-813:6: int q;
-
drivers/net/ethernet/intel/ice/ice_xsk.c:371:2-371:16: unsigned long q;
-
drivers/net/ethernet/intel/igb/e1000_nvm.c:690:2-690:5: u8 q, hval, rem, result;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:773:2-773:31: int val, cm3_state, host_id, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:834:2-834:22: int val, cm3_state, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:1951:2-1951:9: int i, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:2036:2-2036:9: int i, q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:159:2-159:6: int q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:186:2-186:6: int q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:654:2-654:5: u8 q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:688:2-688:10: int q = 0;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:699:2-699:21: u8 srn, num_rings, q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c:163:2-163:28: struct octep_ctrl_mbox_q *q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c:229:2-229:28: struct octep_ctrl_mbox_q *q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c:140:2-140:6: int q, i;
-
drivers/net/ethernet/marvell/octeon_ep/octep_main.c:762:2-762:6: int q;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:280:2-280:6: int q;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:336:2-336:6: int q, b;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:372:2-372:6: int q;
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:713:2-713:30: struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:826:2-826:30: struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1407:2-1407:26: struct mlxsw_pci_queue *q;
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1698:2-1698:67: struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1707:2-1707:26: struct mlxsw_pci_queue *q;
-
drivers/net/ethernet/microsoft/mana/mana_en.c:442:2-442:6: int q;
-
drivers/net/ethernet/microsoft/mana/mana_ethtool.c:126:2-126:6: int q, i = 0;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:77:2-77:31: struct ionic_queue *q = seq->private;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:87:2-87:31: struct ionic_queue *q = seq->private;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:122:2-122:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:576:2-576:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:228:2-228:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:269:2-269:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:314:2-314:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:791:2-791:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:859:2-859:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:1170:2-1170:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:3434:2-3434:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:3481:2-3481:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:217:2-217:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:325:2-325:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:378:2-378:30: struct ionic_queue *q = cq->bound_q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:844:2-844:30: struct ionic_queue *q = cq->bound_q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1292:2-1292:45: struct ionic_queue *q = &lif->hwstamp_txq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1328:2-1328:22: struct ionic_queue *q;
-
drivers/net/ethernet/renesas/ravb_main.c:1178:3-1178:7: int q;
-
drivers/net/ethernet/renesas/ravb_main.c:1284:2-1284:23: int q = napi - priv->napi;
-
drivers/net/ethernet/renesas/ravb_main.c:1553:2-1553:6: int q;
-
drivers/net/ethernet/renesas/ravb_main.c:1928:2-1928:35: u16 q = skb_get_queue_mapping(skb);
-
drivers/net/ethernet/renesas/ravb_main.c:2623:2-2623:18: int error, irq, q;
-
drivers/net/ethernet/sfc/siena/tx.c:115:2-115:23: struct efx_tx_queue *q;
-
drivers/net/ethernet/sfc/tx.c:298:2-298:23: struct efx_tx_queue *q;
-
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c:545:2-545:6: int q, stat;
-
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c:720:2-720:6: int q, stat;
-
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:6916:2-6916:6: int q;
-
drivers/net/ethernet/ti/davinci_emac.c:1417:2-1417:6: int q, m, ret;
-
drivers/net/phy/sfp.c:510:2-510:26: const struct sfp_quirk *q;
-
drivers/net/ppp/ppp_generic.c:1924:2-1924:21: unsigned char *p, *q;
-
drivers/net/tap.c:302:2-302:20: struct tap_queue *q, *tmp;
-
drivers/net/tap.c:325:2-325:20: struct tap_queue *q;
-
drivers/net/tap.c:506:2-506:24: struct tap_queue *q = container_of(sk, struct tap_queue, sk);
-
drivers/net/tap.c:515:2-515:20: struct tap_queue *q;
-
drivers/net/tap.c:580:2-580:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:587:2-587:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:780:2-780:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:900:2-900:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:933:2-933:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:1011:2-1011:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:1234:2-1234:24: struct tap_queue *q = container_of(sock, struct tap_queue, sock);
-
drivers/net/tap.c:1255:2-1255:24: struct tap_queue *q = container_of(sock, struct tap_queue, sock);
-
drivers/net/tap.c:1272:2-1272:24: struct tap_queue *q = container_of(sock, struct tap_queue,
-
drivers/net/tap.c:1290:2-1290:20: struct tap_queue *q;
-
drivers/net/tap.c:1302:2-1302:20: struct tap_queue *q;
-
drivers/net/tap.c:1316:2-1316:20: struct tap_queue *q;
-
drivers/net/usb/catc.c:472:2-472:50: struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
-
drivers/net/usb/catc.c:501:2-501:21: struct ctrl_queue *q;
-
drivers/net/usb/catc.c:536:2-536:21: struct ctrl_queue *q;
-
drivers/net/wireless/ath/ath10k/mac.c:3957:2-3957:32: struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
-
drivers/net/wireless/ath/ath11k/mac.c:6135:2-6135:32: struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
-
drivers/net/wireless/ath/ath12k/mac.c:4879:2-4879:32: struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
-
drivers/net/wireless/ath/ath9k/mac.c:137:2-137:9: int i, q;
-
drivers/net/wireless/ath/ath9k/mac.c:298:2-298:6: int q;
-
drivers/net/wireless/ath/ath9k/xmit.c:111:2-111:22: struct sk_buff_head q;
-
drivers/net/wireless/ath/ath9k/xmit.c:215:2-215:14: int q = fi->txq;
-
drivers/net/wireless/ath/ath9k/xmit.c:245:2-245:6: int q, ret;
-
drivers/net/wireless/ath/ath9k/xmit.c:809:2-809:20: int q = tid->txq->mac80211_qnum;
-
drivers/net/wireless/ath/ath9k/xmit.c:2346:2-2346:6: int q, ret;
-
drivers/net/wireless/ath/carl9170/tx.c:663:2-663:21: unsigned int r, t, q;
-
drivers/net/wireless/ath/carl9170/tx.c:1278:2-1278:14: uint8_t q = 0;
-
drivers/net/wireless/ath/carl9170/tx.c:1344:2-1344:18: unsigned int i, q;
-
drivers/net/wireless/ath/wil6210/netdev.c:232:2-232:7: bool q;
-
drivers/net/wireless/ath/wil6210/txrx.c:838:2-838:11: bool q = false;
-
drivers/net/wireless/ath/wil6210/wmi.c:1931:3-1931:8: bool q;
-
drivers/net/wireless/broadcom/b43/phy_g.c:2336:2-2336:23: s32 m1, m2, f = 256, q, delta;
-
drivers/net/wireless/broadcom/b43/pio.c:49:2-49:30: struct b43_pio_txqueue *q = NULL;
-
drivers/net/wireless/broadcom/b43/pio.c:126:2-126:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:162:2-162:26: struct b43_pio_rxqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:290:2-290:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:352:2-352:36: struct b43_pio_txqueue *q = pack->queue;
-
drivers/net/wireless/broadcom/b43/pio.c:422:2-422:36: struct b43_pio_txqueue *q = pack->queue;
-
drivers/net/wireless/broadcom/b43/pio.c:491:2-491:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:566:2-566:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/sdio.c:39:2-39:31: const struct b43_sdio_quirk *q;
-
drivers/net/wireless/broadcom/b43legacy/phy.c:1947:2-1947:6: s32 q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:49:2-49:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:68:2-68:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:86:2-86:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:109:2-109:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:126:2-126:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:143:2-143:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:222:2-222:23: struct sk_buff_head *q;
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4304:2-4304:42: struct ipw2100_status_queue *q = &priv->status_queue;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3796:2-3796:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:4965:2-4965:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:4994:2-4994:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:10053:2-10053:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:11721:2-11721:24: struct list_head *p, *q;
-
drivers/net/wireless/intel/iwlegacy/3945-mac.c:453:2-453:23: struct il_queue *q = NULL;
-
drivers/net/wireless/intel/iwlegacy/3945.c:275:2-275:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/3945.c:601:2-601:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:1651:2-1651:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:2389:2-2389:40: struct il_queue *q = &il->txq[txq_id].q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:2455:2-2455:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:3957:2-3957:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/common.c:2750:2-2750:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:2812:2-2812:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:3117:2-3117:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:3238:2-3238:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4489:2-4489:6: int q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4759:3-4759:20: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4789:2-4789:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/debug.c:818:2-818:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c:1163:2-1163:6: int q;
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:462:2-462:6: int q;
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:681:2-681:6: int q, fifo;
-
drivers/net/wireless/intel/iwlwifi/iwl-io.c:262:2-262:9: int i, q;
-
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c:4217:4-4217:13: int tid, q;
-
drivers/net/wireless/intel/iwlwifi/mvm/sta.c:1782:3-1782:7: int q;
-
drivers/net/wireless/marvell/mwl8k.c:5384:4-5384:38: int q = MWL8K_TX_WMM_QUEUES - 1 - queue;
-
drivers/net/wireless/mediatek/mt76/debugfs.c:61:3-61:41: struct mt76_queue *q = dev->phy.q_tx[i];
-
drivers/net/wireless/mediatek/mt76/debugfs.c:81:3-81:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/dma.c:761:2-761:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/dma.c:978:3-978:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/mac80211.c:844:2-844:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76.h:1107:2-1107:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76.h:1121:2-1121:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c:73:2-73:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:462:4-462:43: struct mt76_queue *q = dev->mphy.q_tx[i];
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:928:2-928:43: struct mt76_queue *q = dev->mphy.q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:1547:2-1547:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c:405:3-405:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:18:2-18:50: struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:172:2-172:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:346:2-346:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c:938:3-938:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c:2466:3-2466:63: struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
-
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c:597:2-597:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c:725:2-725:19: enum mt76_rxq_id q;
-
drivers/net/wireless/mediatek/mt76/mt7921/mac.c:99:4-99:37: u8 q = mt76_connac_lmac_mapping(i);
-
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c:534:3-534:63: struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
-
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c:560:3-560:45: struct ieee80211_he_mu_edca_param_ac_rec *q;
-
drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c:112:3-112:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c:710:3-710:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7996/mac.c:166:4-166:37: u8 q = mt76_connac_lmac_mapping(i);
-
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c:2674:3-2674:63: struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
-
drivers/net/wireless/mediatek/mt76/sdio.c:306:2-306:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/sdio.c:325:2-325:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/sdio.c:345:2-345:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/sdio.c:614:3-614:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/sdio_txrx.c:84:2-84:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/sdio_txrx.c:369:2-369:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/testmode.c:37:2-37:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/tx.c:304:2-304:38: struct mt76_queue *q = phy->q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/tx.c:335:2-335:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/tx.c:523:2-523:38: struct mt76_queue *q = phy->q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/tx.c:729:2-729:36: struct mt76_queue *q = phy->q_tx[0];
-
drivers/net/wireless/mediatek/mt76/usb.c:558:2-558:30: struct mt76_queue *q = urb->context;
-
drivers/net/wireless/mediatek/mt76/usb.c:643:2-643:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/usb.c:663:2-663:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/usb.c:728:3-728:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/usb.c:742:3-742:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/usb.c:764:2-764:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:931:2-931:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:973:3-973:22: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:997:3-997:22: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:173:2-173:37: struct mt7601u_rx_queue *q = &dev->rx_q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:194:2-194:37: struct mt7601u_rx_queue *q = &dev->rx_q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:241:2-241:36: struct mt7601u_tx_queue *q = urb->context;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:312:2-312:44: struct mt7601u_tx_queue *q = &dev->tx_q[ep];
-
drivers/net/wireless/microchip/wilc1000/wlan.c:291:2-291:40: struct wilc_tx_queue_status *q = &wl->tx_q_limit;
-
drivers/net/wireless/purelifi/plfxlc/mac.c:182:2-182:27: struct sk_buff_head *q = NULL;
-
drivers/net/wireless/purelifi/plfxlc/mac.c:349:2-349:23: struct sk_buff_head *q;
-
drivers/net/wireless/realtek/rtw88/mac.c:1042:2-1042:6: u32 q;
-
drivers/net/wireless/realtek/rtw88/pci.c:738:2-738:5: u8 q;
-
drivers/net/wireless/ti/wlcore/main.c:1208:2-1208:6: int q, mapping;
-
drivers/net/wireless/ti/wlcore/main.c:1275:2-1275:6: int q;
-
drivers/net/wireless/ti/wlcore/tx.c:485:2-485:14: int i, q = -1, ac;
-
drivers/net/wireless/ti/wlcore/tx.c:658:3-658:7: int q;
-
drivers/net/wireless/ti/wlcore/tx.c:676:2-676:56: int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:489:2-489:33: struct sk_buff_head *q = &mac->ack_wait_queue;
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:582:3-582:34: struct sk_buff_head *q = &mac->ack_wait_queue;
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:965:2-965:23: struct sk_buff_head *q;
-
drivers/net/wireless/zydas/zd1211rw/zd_usb.c:1059:2-1059:32: struct sk_buff_head *q = &tx->submitted_skbs;
-
drivers/nvdimm/pmem.c:461:2-461:24: struct request_queue *q;
-
drivers/nvme/host/apple.c:736:2-736:37: struct apple_nvme_queue *q = hctx->driver_data;
-
drivers/nvme/host/apple.c:786:2-786:36: struct apple_nvme_queue *q = set->driver_data;
-
drivers/nvme/host/apple.c:879:2-879:36: struct apple_nvme_queue *q = iod->q;
-
drivers/nvme/host/apple.c:939:2-939:37: struct apple_nvme_queue *q = hctx->driver_data;
-
drivers/nvme/host/auth.c:66:2-66:28: struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
-
drivers/nvme/host/auth.c:894:2-894:11: int ret, q;
-
drivers/nvme/host/fc.c:2483:2-2483:6: int q;
-
drivers/nvme/host/ioctl.c:170:2-170:33: struct request_queue *q = req->q;
-
drivers/nvme/host/ioctl.c:561:2-561:51: struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
-
drivers/nvme/host/pci.c:2395:2-2395:45: struct request_queue *q = nvmeq->dev->ctrl.admin_q;
-
drivers/nvme/host/zns.c:12:2-12:32: struct request_queue *q = ns->queue;
-
drivers/nvme/host/zns.c:51:2-51:32: struct request_queue *q = ns->queue;
-
drivers/nvme/host/zns.c:123:2-123:38: struct request_queue *q = ns->disk->queue;
-
drivers/nvme/target/passthru.c:295:2-295:34: struct request_queue *q = ctrl->admin_q;
-
drivers/of/fdt.c:1014:2-1014:18: const char *p, *q, *options = NULL;
-
drivers/parport/probe.c:56:2-56:18: char *p = txt, *q;
-
drivers/pcmcia/cistpl.c:663:2-663:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:795:2-795:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:812:2-812:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:824:2-824:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1081:2-1081:14: u_char *p, *q, features;
-
drivers/pcmcia/cistpl.c:1204:2-1204:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1228:2-1228:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1249:2-1249:14: u_char *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:110:2-110:27: struct resource_map *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:134:2-134:27: struct resource_map *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:1042:2-1042:27: struct resource_map *p, *q;
-
drivers/platform/chrome/wilco_ec/event.c:107:2-107:25: struct ec_event_queue *q;
-
drivers/platform/surface/aggregator/ssh_packet_layer.c:700:2-700:21: struct ssh_packet *q;
-
drivers/power/supply/ltc4162-l-charger.c:254:22-254:22: val->intval = 100 * mult_frac(ret, 14660, (int)info->rsnsb);
-
drivers/power/supply/ltc4162-l-charger.c:312:25-312:25: val->intval = 10000u * mult_frac(regval, 100000u, info->rsnsb);
-
drivers/power/supply/ltc4162-l-charger.c:321:10-321:10: value = mult_frac(value, info->rsnsb, 100000u);
-
drivers/power/supply/ltc4162-l-charger.c:410:11-410:11: regval = mult_frac(value, info->rsnsi, 50000u);
-
drivers/power/supply/ltc4162-l-charger.c:481:11-481:11: regval = mult_frac(value, info->rsnsb, 14660u);
-
drivers/scsi/aacraid/commsup.c:365:2-365:21: struct aac_queue * q;
-
drivers/scsi/aacraid/commsup.c:656:6-656:65: struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/commsup.c:879:2-879:21: struct aac_queue * q;
-
drivers/scsi/aacraid/dpcsup.c:278:3-278:61: struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
-
drivers/scsi/aacraid/rx.c:400:2-400:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/rx.c:423:2-423:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/src.c:486:2-486:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/be2iscsi/be_main.c:3462:2-3462:24: struct be_queue_info *q;
-
drivers/scsi/be2iscsi/be_main.c:3522:2-3522:24: struct be_queue_info *q, *cq;
-
drivers/scsi/be2iscsi/be_main.c:3632:2-3632:24: struct be_queue_info *q;
-
drivers/scsi/bfa/bfa_core.c:1318:2-1318:7: int q;
-
drivers/scsi/bfa/bfa_core.c:1474:2-1474:6: int q, per_reqq_sz, per_rspq_sz;
-
drivers/scsi/csiostor/csio_isr.c:428:4-428:50: struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
-
drivers/scsi/csiostor/csio_wr.c:191:2-191:17: struct csio_q *q, *flq;
-
drivers/scsi/csiostor/csio_wr.c:747:2-747:51: struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:765:2-765:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:789:2-789:17: struct csio_q *q;
-
drivers/scsi/csiostor/csio_wr.c:867:2-867:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:985:2-985:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:1690:2-1690:17: struct csio_q *q;
-
drivers/scsi/elx/efct/efct_hw_queues.c:406:2-406:15: struct hw_q *q;
-
drivers/scsi/elx/libefc_sli/sli4.c:4117:2-4117:18: enum sli4_qtype q;
-
drivers/scsi/esas2r/esas2r_flash.c:331:2-331:10: u8 *p, *q;
-
drivers/scsi/hpsa.c:7017:2-7017:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7038:2-7038:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7054:2-7054:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7073:2-7073:17: u8 q = *(u8 *) queue;
-
drivers/scsi/ips.c:2530:2-2530:20: struct scsi_cmnd *q;
-
drivers/scsi/qla2xxx/qla_dbg.c:556:2-556:31: struct qla2xxx_mqueue_chain *q;
-
drivers/scsi/qla2xxx/qla_dbg.c:603:2-603:31: struct qla2xxx_mqueue_chain *q;
-
drivers/scsi/qla2xxx/qla_edif.c:1886:2-1886:23: struct enode *node, *q;
-
drivers/scsi/qla2xxx/qla_edif.c:1989:2-1989:29: struct enode *list_node, *q;
-
drivers/scsi/qla2xxx/qla_edif.c:2159:2-2159:26: struct edb_node *node, *q;
-
drivers/scsi/qla2xxx/qla_init.c:5607:2-5607:10: __be32 *q;
-
drivers/scsi/qla2xxx/qla_os.c:5154:2-5154:11: bool q = false;
-
drivers/scsi/qla2xxx/qla_os.c:7533:3-7533:12: bool q = false;
-
drivers/scsi/scsi_debug.c:7655:12-7655:12: jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
-
drivers/scsi/scsi_ioctl.c:866:2-866:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_lib.c:458:2-458:24: struct request_queue *q;
-
drivers/scsi/scsi_lib.c:535:2-535:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_lib.c:1709:2-1709:33: struct request_queue *q = req->q;
-
drivers/scsi/scsi_lib.c:2630:2-2630:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_scan.c:283:2-283:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_fc.c:4166:2-4166:35: struct request_queue *q = rport->rqst_q;
-
drivers/scsi/scsi_transport_fc.c:4279:2-4279:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_fc.c:4314:2-4314:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_iscsi.c:1538:2-1538:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_sas.c:192:2-192:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_sas.c:252:2-252:54: struct request_queue *q = to_sas_host_attrs(shost)->q;
-
drivers/scsi/sd.c:825:2-825:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:1002:2-1002:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:3106:2-3106:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:3411:2-3411:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd_zbc.c:204:2-204:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd_zbc.c:831:2-831:34: struct request_queue *q = disk->queue;
-
drivers/scsi/sd_zbc.c:915:2-915:34: struct request_queue *q = disk->queue;
-
drivers/scsi/sg.c:957:14-957:14: if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ))
-
drivers/scsi/sg.c:958:10-958:10: val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ),
-
drivers/scsi/sg.c:961:18-961:18: sfp->timeout = mult_frac(val, HZ, USER_HZ);
-
drivers/scsi/sg.c:2174:17-2174:17: sfp->timeout = SG_DEFAULT_TIMEOUT;
-
drivers/scsi/sg.c:287:2-287:24: struct request_queue *q;
-
drivers/scsi/sg.c:1434:2-1434:36: struct request_queue *q = scsidp->request_queue;
-
drivers/scsi/sg.c:1570:2-1570:41: struct request_queue *q = sdp->device->request_queue;
-
drivers/scsi/sg.c:1738:2-1738:51: struct request_queue *q = sfp->parentdp->device->request_queue;
-
drivers/scsi/sym53c8xx_2/sym_malloc.c:97:2-97:11: m_link_p q;
-
drivers/soc/qcom/icc-bwmon.c:556:10-556:10: thres = mult_frac(bwmon_kbps_to_count(bwmon, kbps),
-
drivers/soc/qcom/icc-bwmon.c:572:11-572:11: window = mult_frac(bwmon->data->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC);
-
drivers/soc/qcom/icc-bwmon.c:638:23-638:23: bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->data->sample_ms);
-
drivers/spi/spi-fsl-qspi.c:341:2-341:23: struct fsl_qspi *q = dev_id;
-
drivers/spi/spi-fsl-qspi.c:370:2-370:70: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
-
drivers/spi/spi-fsl-qspi.c:643:2-643:70: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
-
drivers/spi/spi-fsl-qspi.c:705:2-705:70: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
-
drivers/spi/spi-fsl-qspi.c:811:2-811:70: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
-
drivers/spi/spi-fsl-qspi.c:848:2-848:19: struct fsl_qspi *q;
-
drivers/spi/spi-fsl-qspi.c:952:2-952:48: struct fsl_qspi *q = platform_get_drvdata(pdev);
-
drivers/spi/spi-fsl-qspi.c:970:2-970:42: struct fsl_qspi *q = dev_get_drvdata(dev);
-
drivers/spi/spi-pxa2xx.c:816:2-816:16: unsigned long q, q1, q2;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:212:2-212:22: ia_css_queue_t *q = NULL;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:331:2-331:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:359:2-359:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:389:2-389:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:409:2-409:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:431:2-431:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:451:2-451:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:470:2-470:18: ia_css_queue_t *q;
-
drivers/staging/media/deprecated/atmel/atmel-isc-base.c:1849:2-1849:30: struct vb2_queue *q = &isc->vb2_vidq;
-
drivers/staging/media/ipu3/ipu3-css.c:1062:2-1063:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1076:2-1077:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1114:2-1115:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1357:2-1357:6: int q, r, pipe;
-
drivers/staging/media/ipu3/ipu3-css.c:1394:2-1394:6: int q;
-
drivers/staging/media/ipu3/ipu3-css.c:1425:2-1425:18: unsigned int p, q, i;
-
drivers/staging/media/ipu3/ipu3-css.c:1467:2-1467:18: unsigned int p, q, i, abi_buf_num;
-
drivers/staging/media/ipu3/ipu3-css.c:1504:2-1504:9: int r, q, pipe;
-
drivers/staging/media/ipu3/ipu3-css.c:1700:2-1700:25: struct imgu_css_queue *q;
-
drivers/staging/media/omap4iss/iss_video.c:1088:2-1088:20: struct vb2_queue *q;
-
drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c:1848:2-1848:20: struct vb2_queue *q;
-
drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c:1300:2-1300:20: struct list_head *q, *buf_head;
-
drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c:1350:2-1350:20: struct list_head *q, *buf_head;
-
drivers/target/target_core_device.c:706:3-706:27: struct se_device_queue *q;
-
drivers/target/target_core_iblock.c:93:2-93:24: struct request_queue *q;
-
drivers/target/target_core_iblock.c:129:35-129:35: dev->dev_attrib.hw_max_sectors = mult_frac(queue_max_hw_sectors(q),
-
drivers/target/target_core_pscsi.c:286:2-286:32: struct request_queue *q = sd->request_queue;
-
drivers/target/target_core_spc.c:552:18-552:18: io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors,
-
drivers/thermal/thermal-generic-adc.c:50:20-50:20: temp = temp_hi + mult_frac(temp_lo - temp_hi, val - adc_hi,
-
drivers/thunderbolt/quirks.c:106:3-106:42: const struct tb_quirk *q = &tb_quirks[i];
-
drivers/tty/serial/vt8500_serial.c:298:21-298:21: vt8500_write(port, mult_frac(baud, 4096, 1000000), VT8500_URBKR);
-
drivers/tty/vt/consolemap.c:330:2-330:31: struct uni_pagedict *p, *q = NULL;
-
drivers/tty/vt/vt.c:609:3-609:12: u16 *q = p;
-
drivers/tty/vt/vt.c:715:3-715:12: u16 *q = p;
-
drivers/ufs/core/ufs_bsg.c:242:2-242:24: struct request_queue *q;
-
drivers/ufs/core/ufshcd.c:5098:2-5098:34: struct request_queue *q = sdev->request_queue;
-
drivers/ufs/core/ufshcd.c:6292:2-6292:24: struct request_queue *q;
-
drivers/ufs/core/ufshcd.c:6907:2-6907:33: struct request_queue *q = hba->tmf_queue;
-
drivers/usb/core/devio.c:685:2-685:24: struct list_head *p, *q, hitlist;
-
drivers/usb/fotg210/fotg210-hcd.c:3338:2-3338:51: union fotg210_shadow *q = &fotg210->pshadow[frame];
-
drivers/usb/fotg210/fotg210-hcd.c:4582:2-4582:23: union fotg210_shadow q, *q_p;
-
drivers/usb/host/ehci-sched.c:2359:2-2359:20: union ehci_shadow q, *q_p;
-
drivers/usb/host/oxu210hp-hcd.c:2270:2-2270:44: union ehci_shadow *q = &oxu->pshadow[frame];
-
drivers/usb/host/oxu210hp-hcd.c:2692:3-2692:21: union ehci_shadow q, *q_p;
-
drivers/video/fbdev/aty/mach64_ct.c:209:2-209:6: u32 q;
-
drivers/video/fbdev/aty/mach64_ct.c:405:2-405:6: u32 q, memcntl, trp;
-
drivers/video/fbdev/hgafb.c:282:2-282:20: void __iomem *p, *q;
-
drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c:414:2-414:20: unsigned itc, ec, q, sc;
-
drivers/xen/events/events_fifo.c:105:2-105:33: struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
-
drivers/xen/events/events_fifo.c:279:2-279:33: struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
-
drivers/xen/events/events_fifo.c:324:2-324:11: unsigned q;
-
drivers/xen/gntdev-dmabuf.c:678:2-678:24: struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
-
drivers/xen/gntdev-dmabuf.c:723:2-723:24: struct gntdev_dmabuf *q, *gntdev_dmabuf;
-
fs/afs/addr_list.c:136:3-136:15: const char *q, *stop;
-
fs/autofs/expire.c:101:2-101:17: struct dentry *q;
-
fs/ceph/caps.c:901:5-901:21: struct rb_node *q;
-
fs/configfs/dir.c:1608:2-1608:37: struct list_head *p, *q = &cursor->s_sibling;
-
fs/dcache.c:1906:2-1906:14: struct qstr q;
-
fs/efivarfs/super.c:132:2-132:14: struct qstr q;
-
fs/erofs/zdata.c:1550:2-1550:34: struct z_erofs_decompressqueue *q;
-
fs/erofs/zdata.c:1601:2-1601:43: struct z_erofs_decompressqueue *q = bio->bi_private;
-
fs/erofs/zdata.c:1631:2-1631:48: struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
-
fs/ext4/namei.c:813:2-813:38: struct dx_entry *at, *entries, *p, *q, *m;
-
fs/ext4/namei.c:1354:2-1354:27: struct dx_map_entry *p, *q, *top = map + count - 1;
-
fs/f2fs/checkpoint.c:1809:2-1809:32: wait_queue_head_t *q = &cprc->ckpt_wait_queue;
-
fs/f2fs/segment.c:554:2-554:31: wait_queue_head_t *q = &fcc->flush_wait_queue;
-
fs/f2fs/segment.c:1878:2-1878:31: wait_queue_head_t *q = &dcc->discard_wait_queue;
-
fs/fs_context.c:460:3-462:31: char *q = kasprintf(GFP_KERNEL, "%c %s%s%pV\n", level,
-
fs/fs_pin.c:88:3-88:22: struct hlist_node *q;
-
fs/gfs2/quota.c:849:2-849:20: struct gfs2_quota q;
-
fs/gfs2/quota.c:997:2-997:20: struct gfs2_quota q;
-
fs/hpfs/alloc.c:122:2-122:14: unsigned i, q;
-
fs/hpfs/ea.c:289:4-289:44: secno q = hpfs_alloc_sector(s, fno, 1, 0);
-
fs/inode.c:2423:2-2423:2: DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
fs/jffs2/compr_rubin.c:202:2-202:35: unsigned long p = rs->p, q = rs->q;
-
fs/namespace.c:1970:2-1970:26: struct mount *res, *p, *q, *r, *parent;
-
fs/namespace.c:2367:3-2367:17: struct mount *q;
-
fs/namespace.c:3749:2-3749:20: struct mount *p, *q;
-
fs/nfs/nfs4proc.c:7477:2-7477:31: wait_queue_head_t *q = &clp->cl_lock_waitq;
-
fs/proc/base.c:504:4-504:8: int q;
-
fs/proc/bootconfig.c:31:2-31:7: char q;
-
fs/smb/client/cached_dir.c:462:2-462:28: struct cached_fid *cfid, *q;
-
fs/smb/client/cached_dir.c:566:2-566:33: struct cached_dirent *dirent, *q;
-
fs/smb/client/cached_dir.c:588:2-588:28: struct cached_fid *cfid, *q;
-
fs/smb/client/cached_dir.c:653:2-653:28: struct cached_fid *cfid, *q;
-
fs/super.c:216:13-216:13: dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
-
fs/super.c:217:11-217:11: inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
-
fs/super.c:218:15-218:15: fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
-
fs/ufs/inode.c:131:2-131:26: Indirect chain[4], *q = chain;
-
fs/xfs/xfs_dquot.c:73:2-73:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_dquot.c:183:2-183:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_qm_syscalls.c:279:2-279:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_quotaops.c:60:2-60:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_trans_dquot.c:626:2-626:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
include/crypto/aria.h:436:2-436:21: int q = 4 - (n / 32);
-
include/linux/blkdev.h:1222:2-1222:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1291:2-1291:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/dcache.h:504:9-504:9: return mult_frac(val, sysctl_vfs_cache_pressure, 100);
-
include/net/pkt_cls.h:171:2-171:38: struct Qdisc *q = tp->chain->block->q;
-
include/net/pkt_cls.h:195:2-195:38: struct Qdisc *q = tp->chain->block->q;
-
include/net/sch_generic.h:543:2-543:20: struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
-
include/net/sch_generic.h:773:3-773:27: const struct Qdisc *q = rcu_dereference(txq->qdisc);
-
init/initramfs.c:87:2-87:20: struct hash **p, *q;
-
init/initramfs.c:114:2-114:20: struct hash **p, *q;
-
ipc/sem.c:285:2-285:20: struct sem_queue *q, *tq;
-
ipc/sem.c:857:2-857:20: struct sem_queue *q, *tmp;
-
ipc/sem.c:951:2-951:20: struct sem_queue *q, *tmp;
-
ipc/sem.c:1110:2-1110:20: struct sem_queue *q;
-
ipc/sem.c:1146:2-1146:20: struct sem_queue *q, *tq;
-
kernel/audit_tree.c:611:2-611:24: struct list_head *p, *q;
-
kernel/auditsc.c:274:2-274:26: struct audit_tree_refs *q;
-
kernel/auditsc.c:302:2-302:30: struct audit_tree_refs *p, *q;
-
kernel/bpf/cpumap.c:670:2-670:19: struct ptr_ring *q;
-
kernel/cgroup/pids.c:160:2-160:26: struct pids_cgroup *p, *q;
-
kernel/crash_core.c:219:3-219:9: char *q;
-
kernel/events/uprobes.c:319:2-319:26: struct list_head *pos, *q;
-
kernel/events/uprobes.c:1325:2-1325:26: struct list_head *pos, *q;
-
kernel/futex/pi.c:936:2-936:21: struct futex_q q = futex_q_init;
-
kernel/futex/requeue.c:770:2-770:21: struct futex_q q = futex_q_init;
-
kernel/futex/waitwake.c:437:3-437:30: struct futex_q *q = &vs[i].q;
-
kernel/futex/waitwake.c:637:2-637:21: struct futex_q q = futex_q_init;
-
kernel/latencytop.c:123:3-123:7: int q, same = 1;
-
kernel/latencytop.c:180:2-180:9: int i, q;
-
kernel/latencytop.c:253:4-253:8: int q;
-
kernel/ptrace.c:735:2-735:19: struct sigqueue *q;
-
kernel/signal.c:417:2-417:23: struct sigqueue *q = NULL;
-
kernel/signal.c:465:2-465:19: struct sigqueue *q;
-
kernel/signal.c:494:2-494:19: struct sigqueue *q, *n;
-
kernel/signal.c:577:2-577:19: struct sigqueue *q, *first = NULL;
-
kernel/signal.c:720:2-720:19: struct sigqueue *q, *sync = NULL;
-
kernel/signal.c:794:2-794:19: struct sigqueue *q, *n;
-
kernel/signal.c:1088:2-1088:19: struct sigqueue *q;
-
kernel/trace/blktrace.c:732:2-732:47: struct request_queue *q = bdev_get_queue(bdev);
-
kernel/trace/blktrace.c:977:2-977:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
kernel/trace/blktrace.c:1006:2-1006:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
kernel/trace/blktrace.c:1768:2-1768:47: struct request_queue *q = bdev_get_queue(bdev);
-
kernel/trace/blktrace.c:1802:2-1802:47: struct request_queue *q = bdev_get_queue(bdev);
-
kernel/trace/trace.c:4020:2-4020:8: char *q;
-
kernel/trace/trace_boot.c:564:2-564:8: char *q;
-
kernel/trace/trace_events_filter.c:1514:2-1514:7: char q;
-
kernel/trace/trace_events_filter.c:1820:3-1820:17: char q = str[i];
-
kernel/trace/trace_events_inject.c:105:3-105:17: char q = str[i];
-
kernel/watch_queue.c:312:2-312:28: struct watch_type_filter *q;
-
lib/bch.c:875:2-875:37: struct gf_poly *q = bch->poly_2t[1];
-
lib/bootconfig.c:851:2-851:12: char *p, *q;
-
lib/crc32.c:82:2-82:6: u32 q;
-
lib/crypto/curve25519-hacl64.c:34:2-34:20: u64 q = x_xor_y | x_sub_y_xor_y;
-
lib/crypto/curve25519-hacl64.c:764:2-764:7: u64 *q;
-
lib/crypto/mpi/mpih-div.c:248:5-248:16: mpi_limb_t q;
-
lib/crypto/mpi/mpih-div.c:315:5-315:16: mpi_limb_t q;
-
lib/polynomial.c:99:10-99:10: tmp = mult_frac(tmp, data, term->divider);
-
lib/raid6/avx2.c:37:2-37:10: u8 *p, *q;
-
lib/raid6/avx2.c:86:2-86:10: u8 *p, *q;
-
lib/raid6/avx2.c:144:2-144:10: u8 *p, *q;
-
lib/raid6/avx2.c:196:2-196:10: u8 *p, *q;
-
lib/raid6/avx2.c:276:2-276:10: u8 *p, *q;
-
lib/raid6/avx2.c:357:2-357:10: u8 *p, *q;
-
lib/raid6/avx512.c:47:2-47:10: u8 *p, *q;
-
lib/raid6/avx512.c:105:2-105:10: u8 *p, *q;
-
lib/raid6/avx512.c:174:2-174:10: u8 *p, *q;
-
lib/raid6/avx512.c:237:2-237:10: u8 *p, *q;
-
lib/raid6/avx512.c:333:2-333:10: u8 *p, *q;
-
lib/raid6/avx512.c:427:2-427:10: u8 *p, *q;
-
lib/raid6/recov.c:22:2-22:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov.c:66:2-66:10: u8 *p, *q, *dq;
-
lib/raid6/recov_avx2.c:19:2-19:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_avx2.c:189:2-189:10: u8 *p, *q, *dq;
-
lib/raid6/recov_avx512.c:27:2-27:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_avx512.c:230:2-230:10: u8 *p, *q, *dq;
-
lib/raid6/recov_ssse3.c:19:2-19:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_ssse3.c:194:2-194:10: u8 *p, *q, *dq;
-
lib/raid6/sse2.c:39:2-39:10: u8 *p, *q;
-
lib/raid6/sse2.c:91:2-91:10: u8 *p, *q;
-
lib/raid6/sse2.c:149:2-149:10: u8 *p, *q;
-
lib/raid6/sse2.c:202:2-202:10: u8 *p, *q;
-
lib/raid6/sse2.c:281:2-281:10: u8 *p, *q;
-
lib/raid6/sse2.c:368:2-368:10: u8 *p, *q;
-
lib/reed_solomon/decode_rs.c:23:2-23:14: uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
-
lib/string_helpers.c:180:2-180:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:208:2-208:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:227:2-227:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:251:2-251:24: char *p = *dst, *q = *src;
-
lib/test_hexdump.c:99:3-99:26: const char *q = *result++;
-
lib/ts_kmp.c:45:2-45:22: unsigned int i, q = 0, text_len, consumed = state->offset;
-
lib/ts_kmp.c:77:2-77:18: unsigned int k, q;
-
lib/vsprintf.c:223:2-223:11: unsigned q;
-
lib/vsprintf.c:265:2-265:11: unsigned q;
-
mm/filemap.c:1135:2-1135:46: wait_queue_head_t *q = folio_waitqueue(folio);
-
mm/filemap.c:1225:2-1225:46: wait_queue_head_t *q = folio_waitqueue(folio);
-
mm/filemap.c:1383:2-1383:21: wait_queue_head_t *q;
-
mm/filemap.c:1477:2-1477:46: wait_queue_head_t *q = folio_waitqueue(folio);
-
mm/filemap.c:1643:2-1643:51: struct wait_queue_head *q = folio_waitqueue(folio);
-
mm/page_alloc.c:1737:14-1737:14: max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
-
mm/page_alloc.c:5684:9-5684:9: tmp = max_t(u64, tmp >> 2,
-
mm/z3fold.c:630:3-630:9: void *q;
-
net/atm/lec.c:870:2-870:6: int q;
-
net/bluetooth/hci_core.c:3336:2-3336:11: int cnt, q;
-
net/core/dev.c:2376:3-2376:40: int q = netdev_get_prio_tc_map(dev, i);
-
net/core/dev.c:3149:3-3149:21: struct Qdisc *q = rcu_dereference(txq->qdisc);
-
net/core/dev.c:3160:3-3160:17: struct Qdisc *q;
-
net/core/dev.c:4281:2-4281:16: struct Qdisc *q;
-
net/core/dev.c:5187:4-5187:22: struct Qdisc *q = head;
-
net/core/pktgen.c:3373:2-3373:20: struct list_head *q, *n;
-
net/core/pktgen.c:3395:2-3395:20: struct list_head *q, *n;
-
net/core/pktgen.c:3876:2-3876:20: struct list_head *q, *n;
-
net/core/pktgen.c:3972:2-3972:20: struct list_head *q, *n;
-
net/core/skbuff.c:1652:2-1652:23: struct sk_buff_head *q;
-
net/core/skbuff.c:5147:2-5147:32: struct sk_buff_head *q = &sk->sk_error_queue;
-
net/ieee802154/6lowpan/reassembly.c:70:2-70:26: struct inet_frag_queue *q;
-
net/ipv4/af_inet.c:1950:2-1950:23: struct inet_protosw *q;
-
net/ipv4/inet_fragment.c:255:2-255:30: struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
-
net/ipv4/inet_fragment.c:316:2-316:26: struct inet_frag_queue *q;
-
net/ipv4/inet_fragment.c:338:2-338:26: struct inet_frag_queue *q;
-
net/ipv4/ip_fragment.c:216:2-216:26: struct inet_frag_queue *q;
-
net/ipv4/tcp_fastopen.c:62:2-62:25: struct fastopen_queue *q;
-
net/ipv4/tcp_output.c:1055:2-1055:20: struct list_head *q, *n;
-
net/ipv6/mcast.c:1512:2-1512:22: struct sk_buff_head q;
-
net/ipv6/mcast.c:1616:2-1616:22: struct sk_buff_head q;
-
net/ipv6/netfilter/nf_conntrack_reasm.c:156:2-156:26: struct inet_frag_queue *q;
-
net/ipv6/reassembly.c:93:2-93:26: struct inet_frag_queue *q;
-
net/mac80211/debugfs.c:569:2-569:6: int q, res = 0;
-
net/mac80211/ethtool.c:79:2-79:9: int i, q;
-
net/mac80211/mesh_hwmp.c:459:12-459:12: mult_frac(new_metric, 10, 9) :
-
net/mac80211/mesh_hwmp.c:534:12-534:12: mult_frac(last_hop_metric, 10, 9) :
-
net/mac80211/mlme.c:2336:2-2336:6: int q;
-
net/mac80211/tx.c:1683:3-1683:17: int q = info->hw_queue;
-
net/mac80211/tx.c:3797:2-3797:31: int q = vif->hw_queue[txq->ac];
-
net/mac80211/tx.c:4527:2-4527:16: int q = info->hw_queue;
-
net/netfilter/nfnetlink_queue.c:811:2-811:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:958:2-958:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:991:2-991:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:1018:2-1018:53: struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
-
net/netfilter/nfnetlink_queue.c:1105:2-1105:56: struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
-
net/netfilter/nfnetlink_queue.c:1216:2-1216:56: struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
-
net/netfilter/nfnetlink_queue.c:1301:2-1301:56: struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
-
net/netfilter/nfnetlink_queue.c:1465:2-1465:25: struct nfnl_queue_net *q;
-
net/netfilter/nfnetlink_queue.c:1486:3-1486:26: struct nfnl_queue_net *q;
-
net/netfilter/nfnetlink_queue.c:1551:2-1551:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:1568:2-1568:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/xt_quota.c:29:2-29:41: struct xt_quota_info *q = (void *)par->matchinfo;
-
net/netfilter/xt_quota.c:48:2-48:33: struct xt_quota_info *q = par->matchinfo;
-
net/netfilter/xt_quota.c:64:2-64:39: const struct xt_quota_info *q = par->matchinfo;
-
net/rds/message.c:96:2-96:30: struct rds_msg_zcopy_queue *q;
-
net/rds/recv.c:601:2-601:39: struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
-
net/rose/rose_in.c:266:2-266:37: int queued = 0, frametype, ns, nr, q, d, m;
-
net/rxrpc/rxkad.c:871:2-871:10: u8 *p, *q, *name, *end;
-
net/sched/cls_api.c:2130:2-2130:16: struct Qdisc *q;
-
net/sched/cls_api.c:2363:2-2363:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2520:2-2520:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2723:2-2723:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2990:2-2990:16: struct Qdisc *q;
-
net/sched/cls_api.c:3118:2-3118:20: struct Qdisc *q = NULL;
-
net/sched/cls_flow.c:505:4-505:50: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/cls_fw.c:77:3-77:49: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/sch_api.c:133:2-133:20: struct Qdisc_ops *q, **qp;
-
net/sched/sch_api.c:177:2-177:20: struct Qdisc_ops *q, **qp;
-
net/sched/sch_api.c:205:2-205:24: struct Qdisc_ops *q = NULL;
-
net/sched/sch_api.c:263:2-263:16: struct Qdisc *q;
-
net/sched/sch_api.c:302:2-302:16: struct Qdisc *q;
-
net/sched/sch_api.c:321:2-321:16: struct Qdisc *q;
-
net/sched/sch_api.c:355:2-355:24: struct Qdisc_ops *q = NULL;
-
net/sched/sch_api.c:1077:2-1077:20: struct Qdisc *q = old;
-
net/sched/sch_api.c:1481:2-1481:20: struct Qdisc *q = NULL;
-
net/sched/sch_api.c:1580:2-1580:16: struct Qdisc *q, *p;
-
net/sched/sch_api.c:1774:2-1774:16: struct Qdisc *q;
-
net/sched/sch_api.c:2000:3-2000:49: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/sch_api.c:2078:2-2078:20: struct Qdisc *q = NULL;
-
net/sched/sch_api.c:2253:2-2253:16: struct Qdisc *q;
-
net/sched/sch_cake.c:1504:2-1504:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1618:2-1618:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1658:2-1658:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1698:2-1698:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1913:2-1913:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1937:2-1937:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1948:2-1948:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2228:2-2228:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2299:2-2299:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2319:2-2319:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2408:2-2408:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2452:2-2452:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2489:2-2489:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2518:2-2518:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2573:2-2573:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2694:2-2694:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2704:2-2704:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2775:2-2775:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2850:2-2850:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2969:2-2969:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2986:2-2986:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:3059:2-3059:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:108:2-108:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:117:2-117:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:134:2-134:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:178:2-178:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:233:2-233:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:241:2-241:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:336:2-336:25: struct cbs_sched_data *q;
-
net/sched/sch_cbs.c:364:2-364:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:404:2-404:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:435:2-435:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:454:2-454:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:481:2-481:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:495:2-495:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:510:2-510:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:116:2-116:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:215:2-215:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:285:2-285:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:306:2-306:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:338:2-338:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:433:2-433:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:461:2-461:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:474:2-474:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:481:2-481:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:91:2-91:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:114:2-114:27: struct codel_sched_data *q;
-
net/sched/sch_codel.c:136:2-136:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:189:2-189:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:215:2-215:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:244:2-244:51: const struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:270:2-270:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:40:2-40:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:57:2-57:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:149:2-149:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:176:2-176:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:279:2-279:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:297:2-297:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:336:2-336:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:371:2-371:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:412:2-412:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:427:2-427:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:442:2-442:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:77:2-77:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:110:2-110:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:122:2-122:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:165:2-165:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:203:2-203:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:233:2-233:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:255:2-255:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:346:2-346:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:419:2-419:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:435:2-435:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:450:2-450:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:462:2-462:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:92:2-92:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:99:2-99:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:108:2-108:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:202:2-202:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:277:2-277:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:287:2-287:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:301:2-301:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:337:2-337:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:353:2-353:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:376:2-376:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:414:2-414:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:458:2-458:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:581:2-581:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:690:2-690:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:709:2-709:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:722:2-722:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:733:2-733:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_fifo.c:256:2-256:16: struct Qdisc *q;
-
net/sched/sch_fq.c:445:2-445:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:528:2-528:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:664:2-664:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:749:2-749:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:809:2-809:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:920:2-920:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:930:2-930:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:969:2-969:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:1010:2-1010:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:79:2-79:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:140:2-140:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:187:2-187:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:258:2-258:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:283:2-283:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:337:2-337:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:370:2-370:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:452:2-452:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:462:2-462:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:524:2-524:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:567:2-567:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:615:2-615:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:632:2-632:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:678:2-678:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:83:2-83:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:132:2-132:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:237:2-237:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:287:2-287:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:378:2-378:32: struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_fq_pie.c:412:2-412:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:465:2-465:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:500:2-500:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:525:2-525:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:544:2-544:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_generic.c:726:2-726:44: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:752:3-752:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:791:3-791:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:805:3-805:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:820:4-820:29: struct gnet_stats_queue *q;
-
net/sched/sch_generic.c:854:3-854:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:873:3-873:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:895:3-895:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:1044:2-1044:20: struct Qdisc *q = container_of(head, struct Qdisc, rcu);
-
net/sched/sch_generic.c:1310:3-1310:17: struct Qdisc *q;
-
net/sched/sch_gred.c:99:3-99:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:168:2-168:30: struct gred_sched_data *q = NULL;
-
net/sched/sch_gred.c:269:3-269:27: struct gred_sched_data *q;
-
net/sched/sch_gred.c:301:3-301:39: struct gred_sched_data *q = t->tab[i];
-
net/sched/sch_gred.c:334:4-334:44: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:488:2-488:43: struct gred_sched_data *q = table->tab[dp];
-
net/sched/sch_gred.c:791:3-791:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:807:3-807:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:857:3-857:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_hfsc.c:865:2-865:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:924:2-924:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1094:2-1094:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1107:2-1107:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1133:2-1133:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1259:2-1259:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1358:2-1358:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1377:2-1377:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1396:2-1396:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1439:2-1439:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1486:2-1486:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1501:2-1501:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1524:2-1524:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1588:2-1588:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:249:2-249:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:351:2-351:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:374:2-374:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:420:2-420:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:474:2-474:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:511:2-511:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:577:2-577:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:653:2-653:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:679:2-679:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:188:2-188:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:222:2-222:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:623:2-623:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:940:2-940:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:999:2-999:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1034:2-1034:24: struct htb_sched *q = container_of(work, struct htb_sched, work);
-
net/sched/sch_htb.c:1059:2-1059:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1163:2-1163:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1202:2-1202:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1212:2-1212:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1252:2-1252:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1331:2-1331:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1373:2-1373:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1454:2-1454:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1514:2-1514:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1553:2-1553:29: struct Qdisc *q = cl->leaf.q;
-
net/sched/sch_htb.c:1625:2-1625:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1702:2-1702:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1774:2-1774:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:2090:2-2090:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:2124:2-2124:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:51:2-51:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:65:2-65:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:72:2-72:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:80:2-80:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:114:2-114:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:201:2-201:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:215:2-215:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:222:2-222:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:229:2-229:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:236:2-236:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:244:2-244:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:292:2-292:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_mqprio.c:686:4-686:55: struct netdev_queue *q = netdev_get_tx_queue(dev, i);
-
net/sched/sch_multiq.c:32:2-32:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:89:2-89:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:120:2-120:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:151:2-151:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:162:2-162:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:174:2-174:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:238:2-238:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:263:2-263:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:283:2-283:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:296:2-296:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:304:2-304:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:326:2-326:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:336:2-336:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:349:2-349:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:364:2-364:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:371:2-371:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:389:2-389:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:446:2-446:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:687:2-687:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:766:2-766:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:959:2-959:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1070:2-1070:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1087:2-1087:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1148:2-1148:51: const struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1235:2-1235:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1249:2-1249:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1257:2-1257:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:88:2-88:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:141:2-141:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:422:2-422:29: struct pie_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_pie.c:441:2-441:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:463:2-463:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:494:2-494:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:519:2-519:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:531:2-531:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:539:2-539:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:90:2-90:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:103:2-103:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:125:2-125:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:161:2-161:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:33:2-33:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:99:2-99:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:113:2-113:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:134:2-134:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:168:2-168:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:179:2-179:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:232:2-232:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:264:2-264:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:289:2-289:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:319:2-319:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:327:2-327:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:348:2-348:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:358:2-358:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:372:2-372:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:387:2-387:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:207:2-207:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:381:2-381:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:405:2-405:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:532:2-532:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:543:2-543:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:570:2-570:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:664:2-664:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:682:2-682:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1087:2-1087:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1216:2-1216:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1418:2-1418:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1427:2-1427:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1465:2-1465:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1481:2-1481:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_red.c:73:2-73:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:153:2-153:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:170:2-170:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:178:2-178:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:186:2-186:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:215:2-215:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:238:2-238:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:322:2-322:29: struct red_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_red.c:338:2-338:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:372:2-372:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:411:2-411:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:448:2-448:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:474:2-474:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:499:2-499:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:512:2-512:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:283:2-283:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:428:2-428:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:446:2-446:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:456:2-456:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:468:2-468:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:493:2-493:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:556:2-556:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:569:2-569:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:598:2-598:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:622:2-622:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:633:2-633:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:670:2-670:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:166:2-166:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:295:2-295:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:348:2-348:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:482:2-482:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:537:2-537:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:607:2-607:29: struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
-
net/sched/sch_sfq.c:628:2-628:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:724:2-724:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:737:2-737:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:793:2-793:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:851:2-851:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:868:2-868:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:887:2-887:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:72:2-72:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:141:2-141:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:182:2-182:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:213:2-213:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:226:2-226:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:253:2-253:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:328:2-328:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:395:2-395:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:477:2-477:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:549:2-549:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:570:2-570:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:635:2-635:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:715:2-715:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:785:2-785:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:839:2-839:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:925:2-925:27: struct taprio_sched *q = container_of(timer, struct taprio_sched,
-
net/sched/sch_taprio.c:1216:2-1216:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1279:2-1279:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1328:2-1328:23: struct taprio_sched *q;
-
net/sched/sch_taprio.c:1619:2-1619:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1749:2-1749:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1851:2-1851:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2018:2-2018:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2033:2-2033:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2073:2-2073:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2139:2-2139:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2189:2-2189:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2391:2-2391:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:2453:2-2453:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:144:2-144:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:208:2-208:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:242:2-242:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:271:2-271:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:331:2-331:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:354:2-354:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:481:2-481:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:496:2-496:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:505:2-505:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:548:2-548:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:559:2-559:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:572:2-572:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:79:2-79:44: struct teql_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:95:2-95:16: struct Qdisc *q;
-
net/sched/sch_teql.c:132:2-132:16: struct Qdisc *q, *prev;
-
net/sched/sch_teql.c:173:2-173:44: struct teql_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:280:2-280:24: struct Qdisc *start, *q;
-
net/sched/sch_teql.c:357:2-357:16: struct Qdisc *q;
-
net/sched/sch_teql.c:417:2-417:16: struct Qdisc *q;
-
net/sctp/output.c:678:2-678:31: struct sctp_outq *q = &asoc->outqueue;
-
net/sctp/outqueue.c:385:2-385:31: struct sctp_outq *q = &asoc->outqueue;
-
net/sctp/socket.c:171:2-171:31: struct sctp_outq *q = &asoc->outqueue;
-
net/smc/smc_llc.c:1856:2-1856:34: struct smc_llc_qentry *qentry, *q;
-
net/sunrpc/auth_gss/auth_gss.c:179:2-179:14: const void *q;
-
net/sunrpc/auth_gss/auth_gss_internal.h:18:2-18:54: const void *q = (const void *)((const char *)p + len);
-
net/sunrpc/auth_gss/auth_gss_internal.h:28:2-28:14: const void *q;
-
net/sunrpc/rpc_pipe.c:634:2-634:18: struct qstr q = QSTR_INIT(name, strlen(name));
-
net/sunrpc/rpc_pipe.c:1304:2-1304:18: struct qstr q = QSTR_INIT(files[RPCAUTH_gssd].name,
-
net/sunrpc/sched.c:171:2-171:20: struct list_head *q;
-
net/sunrpc/sched.c:605:2-605:20: struct list_head *q;
-
net/sunrpc/xdr.c:1104:2-1104:10: __be32 *q;
-
net/sunrpc/xdr.c:1478:2-1478:18: __be32 *q = p + nwords;
-
net/x25/x25_in.c:418:2-418:37: int queued = 0, frametype, ns, nr, q, d, m;
-
net/xdp/xsk.c:947:2-947:20: struct xsk_queue *q;
-
net/xdp/xsk.c:1272:3-1272:22: struct xsk_queue **q;
-
net/xdp/xsk.c:1328:3-1328:22: struct xsk_queue **q;
-
net/xdp/xsk.c:1511:2-1511:24: struct xsk_queue *q = NULL;
-
net/xdp/xsk_queue.c:26:2-26:20: struct xsk_queue *q;
-
samples/v4l/v4l2-pci-skeleton.c:750:2-750:20: struct vb2_queue *q;
-
scripts/dtc/libfdt/fdt_ro.c:260:3-260:44: const char *q = memchr(path, '/', end - p);
-
scripts/dtc/libfdt/fdt_ro.c:274:3-274:15: const char *q;
-
security/integrity/evm/evm_main.c:938:2-938:26: struct list_head *pos, *q;
-
security/keys/keyctl_pkey.c:42:2-42:31: char *c = params->info, *p, *q;
-
security/selinux/hooks.c:2591:4-2591:14: char *p, *q;
-
security/selinux/hooks.c:3576:3-3576:15: struct qstr q;
-
sound/core/misc.c:114:2-114:30: const struct snd_pci_quirk *q;
-
sound/core/pcm_lib.c:560:2-560:15: unsigned int q;
-
sound/core/pcm_lib.c:827:3-827:23: unsigned int q = i->min;
-
sound/core/pcm_lib.c:866:3-866:23: unsigned int q = i->max;
-
sound/core/pcm_lib.c:943:3-943:23: unsigned int q = i->min;
-
sound/core/pcm_lib.c:975:3-975:23: unsigned int q = i->max;
-
sound/core/seq/oss/seq_oss_readq.c:35:2-35:24: struct seq_oss_readq *q;
-
sound/core/seq/oss/seq_oss_writeq.c:27:2-27:25: struct seq_oss_writeq *q;
-
sound/core/seq/seq_clientmgr.c:606:2-606:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1573:2-1573:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1605:2-1605:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1626:2-1626:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1659:2-1659:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1787:3-1787:25: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:71:2-71:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:98:2-98:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:170:2-170:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:189:2-189:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:205:2-205:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:222:2-222:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:303:2-303:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:388:2-388:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:408:2-408:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:475:2-475:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:538:2-538:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:559:2-559:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:592:2-592:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:608:2-608:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:707:2-707:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:737:2-737:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_timer.c:125:2-125:36: struct snd_seq_queue *q = timeri->callback_data;
-
sound/core/seq/seq_timer.c:480:2-480:24: struct snd_seq_queue *q;
-
sound/pci/ac97/ac97_codec.c:2942:2-2942:28: const struct quirk_table *q;
-
sound/pci/atiixp.c:551:2-551:30: const struct snd_pci_quirk *q;
-
sound/pci/emu10k1/memory.c:169:2-169:29: struct snd_emu10k1_memblk *q;
-
sound/pci/emu10k1/memory.c:457:2-457:29: struct snd_emu10k1_memblk *q;
-
sound/pci/hda/hda_auto_parser.c:981:2-981:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1528:2-1528:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1625:2-1625:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1666:2-1666:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:2231:3-2231:31: const struct snd_pci_quirk *q;
-
sound/pci/hda/patch_hdmi.c:2007:2-2007:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/patch_realtek.c:1077:2-1077:43: const struct alc_codec_rename_pci_table *q;
-
sound/pci/hda/patch_realtek.c:1150:2-1150:30: const struct snd_pci_quirk *q;
-
sound/pci/nm256/nm256.c:1571:2-1571:30: const struct snd_pci_quirk *q;
-
sound/soc/codecs/tas2552.c:187:3-187:19: unsigned int d, q, t;