Symbol: q
function parameter
Defined...
-
arch/x86/crypto/curve25519-x86_64.c:764:34-764:39: static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2)
-
arch/x86/kernel/pci-iommu_table.c:10:6-10:32: struct iommu_table_entry *q)
-
arch/x86/lib/msr-smp.c:52:49-52:54: int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
-
arch/x86/lib/msr-smp.c:83:49-83:53: int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
-
arch/x86/lib/msr-smp.c:209:54-209:58: int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
-
arch/x86/lib/msr-smp.c:225:54-225:59: int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
-
block/bfq-cgroup.c:350:34-350:56: void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
-
block/bfq-cgroup.c:521:57-521:79: static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
-
block/bfq-iosched.c:403:6-403:28: struct request_queue *q)
-
block/bfq-iosched.c:2164:8-2164:30: struct request_queue *q)
-
block/bfq-iosched.c:2199:32-2199:54: static void bfq_remove_request(struct request_queue *q,
-
block/bfq-iosched.c:2293:30-2293:52: static int bfq_request_merge(struct request_queue *q, struct request **req,
-
block/bfq-iosched.c:2310:32-2310:54: static void bfq_request_merged(struct request_queue *q, struct request *req,
-
block/bfq-iosched.c:2367:33-2367:55: static void bfq_requests_merged(struct request_queue *q, struct request *rq,
-
block/bfq-iosched.c:2885:33-2885:55: static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
-
block/bfq-iosched.c:3311:33-3311:55: static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
-
block/bfq-iosched.c:4824:39-4824:61: static void bfq_update_dispatch_stats(struct request_queue *q,
-
block/bfq-iosched.c:5574:37-5574:59: static void bfq_update_insert_stats(struct request_queue *q,
-
block/bfq-iosched.c:6534:27-6534:49: static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
-
block/bio.c:717:34-717:56: static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
-
block/bio.c:746:21-746:43: int bio_add_hw_page(struct request_queue *q, struct bio *bio,
-
block/bio.c:801:21-801:43: int bio_add_pc_page(struct request_queue *q, struct bio *bio,
-
block/blk-cgroup.c:58:34-58:56: static bool blkcg_policy_enabled(struct request_queue *q,
-
block/blk-cgroup.c:148:57-148:79: static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
-
block/blk-cgroup.c:202:11-202:33: struct request_queue *q, bool update_hint)
-
block/blk-cgroup.c:230:9-230:31: struct request_queue *q,
-
block/blk-cgroup.c:324:3-324:25: struct request_queue *q)
-
block/blk-cgroup.c:421:30-421:52: static void blkg_destroy_all(struct request_queue *q)
-
block/blk-cgroup.c:546:8-546:30: struct request_queue *q)
-
block/blk-cgroup.c:1156:22-1156:44: int blkcg_init_queue(struct request_queue *q)
-
block/blk-cgroup.c:1209:23-1209:45: void blkcg_exit_queue(struct request_queue *q)
-
block/blk-cgroup.c:1307:27-1307:49: int blkcg_activate_policy(struct request_queue *q,
-
block/blk-cgroup.c:1406:30-1406:52: void blkcg_deactivate_policy(struct request_queue *q,
-
block/blk-cgroup.c:1761:30-1761:52: void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
-
block/blk-core.c:81:44-81:66: void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:92:46-92:68: void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:106:53-106:75: bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:112:18-112:40: void blk_rq_init(struct request_queue *q, struct request *rq)
-
block/blk-core.c:300:21-300:43: void blk_sync_queue(struct request_queue *q)
-
block/blk-core.c:311:22-311:44: void blk_set_pm_only(struct request_queue *q)
-
block/blk-core.c:317:24-317:46: void blk_clear_pm_only(struct request_queue *q)
-
block/blk-core.c:338:20-338:42: void blk_put_queue(struct request_queue *q)
-
block/blk-core.c:344:26-344:48: void blk_set_queue_dying(struct request_queue *q)
-
block/blk-core.c:372:24-372:46: void blk_cleanup_queue(struct request_queue *q)
-
block/blk-core.c:431:21-431:43: int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
-
block/blk-core.c:495:21-495:43: void blk_queue_exit(struct request_queue *q)
-
block/blk-core.c:612:20-612:42: bool blk_get_queue(struct request_queue *q)
-
block/blk-core.c:629:33-629:55: struct request *blk_get_request(struct request_queue *q, unsigned int op,
-
block/blk-core.c:763:50-763:72: static inline blk_status_t blk_check_zone_append(struct request_queue *q,
-
block/blk-core.c:1139:48-1139:70: static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
-
block/blk-core.c:1184:40-1184:62: blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
-
block/blk-core.c:1546:18-1546:40: int blk_lld_busy(struct request_queue *q)
-
block/blk-crypto.c:356:34-356:56: bool blk_crypto_config_supported(struct request_queue *q,
-
block/blk-crypto.c:379:11-379:33: struct request_queue *q)
-
block/blk-crypto.c:399:26-399:48: int blk_crypto_evict_key(struct request_queue *q,
-
block/blk-flush.c:279:28-279:50: static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
-
block/blk-integrity.c:27:31-27:53: int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
-
block/blk-integrity.c:68:29-68:51: int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
-
block/blk-integrity.c:164:29-164:51: bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
-
block/blk-integrity.c:187:30-187:52: bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
-
block/blk-ioc.c:239:22-239:44: void ioc_clear_queue(struct request_queue *q)
-
block/blk-ioc.c:332:54-332:76: struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
-
block/blk-ioc.c:372:54-372:76: struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
-
block/blk-iocost.c:663:29-663:51: static struct ioc *q_to_ioc(struct request_queue *q)
-
block/blk-iocost.c:668:27-668:49: static const char *q_name(struct request_queue *q)
-
block/blk-iocost.c:2818:28-2818:50: static int blk_iocost_init(struct request_queue *q)
-
block/blk-iocost.c:2900:57-2900:79: static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
-
block/blk-iolatency.c:718:24-718:46: int blk_iolatency_init(struct request_queue *q)
-
block/blk-iolatency.c:939:10-939:32: struct request_queue *q,
-
block/blk-map.c:379:33-379:55: static struct bio *bio_map_kern(struct request_queue *q, void *data,
-
block/blk-map.c:461:34-461:56: static struct bio *bio_copy_kern(struct request_queue *q, void *data,
-
block/blk-map.c:576:25-576:47: int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
-
block/blk-map.c:618:21-618:43: int blk_rq_map_user(struct request_queue *q, struct request *rq,
-
block/blk-map.c:682:21-682:43: int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
-
block/blk-merge.c:16:33-16:55: static inline bool bio_will_gap(struct request_queue *q,
-
block/blk-merge.c:62:42-62:64: static struct bio *blk_bio_discard_split(struct request_queue *q,
-
block/blk-merge.c:106:47-106:69: static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
-
block/blk-merge.c:120:45-120:67: static struct bio *blk_bio_write_same_split(struct request_queue *q,
-
block/blk-merge.c:144:40-144:62: static inline unsigned get_max_io_size(struct request_queue *q,
-
block/blk-merge.c:161:45-161:73: static inline unsigned get_max_segment_size(const struct request_queue *q,
-
block/blk-merge.c:197:29-197:57: static bool bvec_split_segs(const struct request_queue *q,
-
block/blk-merge.c:245:42-245:64: static struct bio *blk_bio_segment_split(struct request_queue *q,
-
block/blk-merge.c:414:33-414:55: static unsigned blk_bvec_map_sg(struct request_queue *q,
-
block/blk-merge.c:459:28-459:50: __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
-
block/blk-merge.c:479:30-479:52: static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
-
block/blk-merge.c:519:21-519:43: int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
-
block/blk-merge.c:609:39-609:61: static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
-
block/blk-merge.c:627:33-627:55: static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
-
block/blk-merge.c:727:38-727:60: static struct request *attempt_merge(struct request_queue *q,
-
block/blk-merge.c:819:43-819:65: static struct request *attempt_back_merge(struct request_queue *q,
-
block/blk-merge.c:830:44-830:66: static struct request *attempt_front_merge(struct request_queue *q,
-
block/blk-merge.c:841:27-841:49: int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
-
block/blk-merge.c:974:56-974:78: static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
-
block/blk-merge.c:999:52-999:74: static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
-
block/blk-merge.c:1048:29-1048:51: bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-
block/blk-merge.c:1086:25-1086:47: bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
-
block/blk-merge.c:1111:29-1111:51: bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
-
block/blk-mq-cpumap.c:19:34-19:44: unsigned int nr_queues, const int q)
-
block/blk-mq-debugfs.c:826:30-826:52: void blk_mq_debugfs_register(struct request_queue *q)
-
block/blk-mq-debugfs.c:859:32-859:54: void blk_mq_debugfs_unregister(struct request_queue *q)
-
block/blk-mq-debugfs.c:876:35-876:57: void blk_mq_debugfs_register_hctx(struct request_queue *q,
-
block/blk-mq-debugfs.c:899:36-899:58: void blk_mq_debugfs_register_hctxs(struct request_queue *q)
-
block/blk-mq-debugfs.c:908:38-908:60: void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
-
block/blk-mq-debugfs.c:917:36-917:58: void blk_mq_debugfs_register_sched(struct request_queue *q)
-
block/blk-mq-debugfs.c:936:38-936:60: void blk_mq_debugfs_unregister_sched(struct request_queue *q)
-
block/blk-mq-debugfs.c:966:43-966:65: void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
-
block/blk-mq-debugfs.c:972:41-972:63: void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
-
block/blk-mq-sched.c:347:31-347:53: bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
-
block/blk-mq-sched.c:381:36-381:58: bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
-
block/blk-mq-sched.c:513:36-513:58: static int blk_mq_sched_alloc_tags(struct request_queue *q,
-
block/blk-mq-sched.c:535:40-535:62: static void blk_mq_sched_tags_teardown(struct request_queue *q)
-
block/blk-mq-sched.c:551:23-551:45: int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/blk-mq-sched.c:611:33-611:55: void blk_mq_sched_free_requests(struct request_queue *q)
-
block/blk-mq-sched.c:622:24-622:46: void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
-
block/blk-mq-sched.h:31:24-31:46: blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
-
block/blk-mq-sched.h:41:26-41:48: blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
-
block/blk-mq-sysfs.c:263:48-263:70: void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
-
block/blk-mq-sysfs.c:285:26-285:48: void blk_mq_sysfs_deinit(struct request_queue *q)
-
block/blk-mq-sysfs.c:297:24-297:46: void blk_mq_sysfs_init(struct request_queue *q)
-
block/blk-mq-sysfs.c:312:47-312:69: int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
-
block/blk-mq-sysfs.c:347:30-347:52: void blk_mq_sysfs_unregister(struct request_queue *q)
-
block/blk-mq-sysfs.c:363:27-363:49: int blk_mq_sysfs_register(struct request_queue *q)
-
block/blk-mq-tag.c:410:33-410:55: void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
-
block/blk-mq.c:115:31-115:53: unsigned int blk_mq_in_flight(struct request_queue *q,
-
block/blk-mq.c:125:26-125:48: void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
-
block/blk-mq.c:135:29-135:51: void blk_freeze_queue_start(struct request_queue *q)
-
block/blk-mq.c:149:31-149:53: void blk_mq_freeze_queue_wait(struct request_queue *q)
-
block/blk-mq.c:155:38-155:60: int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
-
block/blk-mq.c:168:23-168:45: void blk_freeze_queue(struct request_queue *q)
-
block/blk-mq.c:181:26-181:48: void blk_mq_freeze_queue(struct request_queue *q)
-
block/blk-mq.c:191:28-191:50: void blk_mq_unfreeze_queue(struct request_queue *q)
-
block/blk-mq.c:208:34-208:56: void blk_mq_quiesce_queue_nowait(struct request_queue *q)
-
block/blk-mq.c:223:27-223:49: void blk_mq_quiesce_queue(struct request_queue *q)
-
block/blk-mq.c:249:29-249:51: void blk_mq_unquiesce_queue(struct request_queue *q)
-
block/blk-mq.c:258:26-258:48: void blk_mq_wake_waiters(struct request_queue *q)
-
block/blk-mq.c:401:38-401:60: struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
-
block/blk-mq.c:429:43-429:65: struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
-
block/blk-mq.c:825:31-825:53: void blk_mq_kick_requeue_list(struct request_queue *q)
-
block/blk-mq.c:831:37-831:59: void blk_mq_delay_kick_requeue_list(struct request_queue *q,
-
block/blk-mq.c:867:28-867:50: bool blk_mq_queue_inflight(struct request_queue *q)
-
block/blk-mq.c:1309:36-1309:58: static void blk_mq_release_budgets(struct request_queue *q,
-
block/blk-mq.c:1626:32-1626:54: static bool blk_mq_has_sqsched(struct request_queue *q)
-
block/blk-mq.c:1640:49-1640:71: static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
-
block/blk-mq.c:1663:27-1663:49: void blk_mq_run_hw_queues(struct request_queue *q, bool async)
-
block/blk-mq.c:1691:33-1691:55: void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
-
block/blk-mq.c:1721:27-1721:49: bool blk_mq_queue_stopped(struct request_queue *q)
-
block/blk-mq.c:1760:28-1760:50: void blk_mq_stop_hw_queues(struct request_queue *q)
-
block/blk-mq.c:1778:29-1778:51: void blk_mq_start_hw_queues(struct request_queue *q)
-
block/blk-mq.c:1798:37-1798:59: void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
-
block/blk-mq.c:2590:30-2590:52: static void blk_mq_exit_hctx(struct request_queue *q,
-
block/blk-mq.c:2610:35-2610:57: static void blk_mq_exit_hw_queues(struct request_queue *q,
-
block/blk-mq.c:2638:29-2638:51: static int blk_mq_init_hctx(struct request_queue *q,
-
block/blk-mq.c:2669:19-2669:41: blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
-
block/blk-mq.c:2735:36-2735:58: static void blk_mq_init_cpu_queues(struct request_queue *q,
-
block/blk-mq.c:2798:32-2798:54: static void blk_mq_map_swqueue(struct request_queue *q)
-
block/blk-mq.c:2904:35-2904:57: static void queue_set_hctx_shared(struct request_queue *q, bool shared)
-
block/blk-mq.c:2931:38-2931:60: static void blk_mq_del_queue_tag_set(struct request_queue *q)
-
block/blk-mq.c:2948:10-2948:32: struct request_queue *q)
-
block/blk-mq.c:2969:30-2969:52: static int blk_mq_alloc_ctxs(struct request_queue *q)
-
block/blk-mq.c:3002:21-3002:43: void blk_mq_release(struct request_queue *q)
-
block/blk-mq.c:3088:31-3088:53: struct blk_mq_tag_set *set, struct request_queue *q,
-
block/blk-mq.c:3122:7-3122:29: struct request_queue *q)
-
block/blk-mq.c:3199:9-3199:31: struct request_queue *q,
-
block/blk-mq.c:3270:24-3270:46: void blk_mq_exit_queue(struct request_queue *q)
-
block/blk-mq.c:3520:31-3520:53: int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
-
block/blk-mq.c:3583:3-3583:25: struct request_queue *q)
-
block/blk-mq.c:3615:3-3615:25: struct request_queue *q)
-
block/blk-mq.c:3712:35-3712:57: static bool blk_poll_stats_enable(struct request_queue *q)
-
block/blk-mq.c:3721:37-3721:59: static void blk_mq_poll_stats_start(struct request_queue *q)
-
block/blk-mq.c:3745:40-3745:62: static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
-
block/blk-mq.c:3777:38-3777:60: static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
-
block/blk-mq.c:3830:32-3830:54: static bool blk_mq_poll_hybrid(struct request_queue *q,
-
block/blk-mq.c:3867:14-3867:36: int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
-
block/blk-mq.h:91:59-91:81: static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
-
block/blk-mq.h:104:54-104:76: static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
-
block/blk-mq.h:133:51-133:73: static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
-
block/blk-mq.h:145:49-145:71: static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
-
block/blk-mq.h:190:47-190:69: static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
-
block/blk-mq.h:196:47-196:69: static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
-
block/blk-mq.h:271:44-271:66: static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
-
block/blk-pm.c:31:26-31:48: void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
-
block/blk-pm.c:61:29-61:51: int blk_pre_runtime_suspend(struct request_queue *q)
-
block/blk-pm.c:122:31-122:53: void blk_post_runtime_suspend(struct request_queue *q, int err)
-
block/blk-pm.c:152:29-152:51: void blk_pre_runtime_resume(struct request_queue *q)
-
block/blk-pm.c:176:30-176:52: void blk_post_runtime_resume(struct request_queue *q, int err)
-
block/blk-pm.c:207:29-207:51: void blk_set_runtime_active(struct request_queue *q)
-
block/blk-pm.h:9:54-9:76: static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
-
block/blk-rq-qos.c:295:18-295:40: void rq_qos_exit(struct request_queue *q)
-
block/blk-rq-qos.h:60:40-60:62: static inline struct rq_qos *rq_qos_id(struct request_queue *q,
-
block/blk-rq-qos.h:71:41-71:63: static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
-
block/blk-rq-qos.h:76:43-76:65: static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
-
block/blk-rq-qos.h:100:31-100:53: static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
-
block/blk-rq-qos.h:109:31-109:53: static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
-
block/blk-rq-qos.h:144:35-144:57: static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
-
block/blk-rq-qos.h:150:32-150:54: static inline void rq_qos_done(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:156:33-156:55: static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:162:35-162:57: static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:168:36-168:58: static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
-
block/blk-rq-qos.h:174:36-174:58: static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
-
block/blk-rq-qos.h:185:33-185:55: static inline void rq_qos_track(struct request_queue *q, struct request *rq,
-
block/blk-rq-qos.h:192:33-192:55: static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
-
block/blk-rq-qos.h:199:47-199:69: static inline void rq_qos_queue_depth_changed(struct request_queue *q)
-
block/blk-settings.c:25:27-25:49: void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
-
block/blk-settings.c:103:29-103:51: void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
-
block/blk-settings.c:150:31-150:53: void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
-
block/blk-settings.c:187:30-187:52: void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
-
block/blk-settings.c:198:36-198:58: void blk_queue_max_discard_sectors(struct request_queue *q,
-
block/blk-settings.c:211:39-211:61: void blk_queue_max_write_same_sectors(struct request_queue *q,
-
block/blk-settings.c:224:41-224:63: void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
-
block/blk-settings.c:236:40-236:62: void blk_queue_max_zone_append_sectors(struct request_queue *q,
-
block/blk-settings.c:267:29-267:51: void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
-
block/blk-settings.c:288:37-288:59: void blk_queue_max_discard_segments(struct request_queue *q,
-
block/blk-settings.c:304:33-304:55: void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
-
block/blk-settings.c:329:35-329:57: void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
-
block/blk-settings.c:358:36-358:58: void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
-
block/blk-settings.c:379:39-379:61: void blk_queue_zone_write_granularity(struct request_queue *q,
-
block/blk-settings.c:403:33-403:55: void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
-
block/blk-settings.c:411:33-411:55: void blk_queue_update_readahead(struct request_queue *q)
-
block/blk-settings.c:461:23-461:45: void blk_queue_io_min(struct request_queue *q, unsigned int min)
-
block/blk-settings.c:499:23-499:45: void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
-
block/blk-settings.c:716:31-716:53: void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
-
block/blk-settings.c:728:33-728:55: void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
-
block/blk-settings.c:745:30-745:52: void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
-
block/blk-settings.c:770:30-770:52: void blk_queue_dma_alignment(struct request_queue *q, int mask)
-
block/blk-settings.c:790:37-790:59: void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
-
block/blk-settings.c:805:26-805:48: void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
-
block/blk-settings.c:820:28-820:50: void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
-
block/blk-settings.c:844:43-844:65: void blk_queue_required_elevator_features(struct request_queue *q,
-
block/blk-settings.c:858:40-858:62: bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
-
block/blk-stat.c:136:28-136:50: void blk_stat_add_callback(struct request_queue *q,
-
block/blk-stat.c:157:31-157:53: void blk_stat_remove_callback(struct request_queue *q,
-
block/blk-stat.c:187:33-187:55: void blk_stat_enable_accounting(struct request_queue *q)
-
block/blk-sysfs.c:61:36-61:58: static ssize_t queue_requests_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:67:22-67:44: queue_requests_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:89:30-89:52: static ssize_t queue_ra_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:98:16-98:38: queue_ra_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:111:39-111:61: static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:118:40-118:62: static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:123:48-123:70: static ssize_t queue_max_discard_segments_show(struct request_queue *q,
-
block/blk-sysfs.c:129:50-129:72: static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:134:44-134:66: static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:139:46-139:68: static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:144:47-144:69: static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:149:41-149:63: static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:154:34-154:56: static ssize_t queue_io_min_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:159:34-159:56: static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:164:47-164:69: static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:169:42-169:64: static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:176:39-176:61: static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:182:40-182:62: static ssize_t queue_discard_max_store(struct request_queue *q,
-
block/blk-sysfs.c:205:47-205:69: static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:210:42-210:64: static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:216:44-216:66: static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:222:50-222:72: static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
-
block/blk-sysfs.c:228:43-228:65: static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:236:25-236:47: queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:260:42-260:64: static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:293:1-293:1: QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
-
block/blk-sysfs.c:294:1-294:1: QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
-
block/blk-sysfs.c:295:1-295:1: QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
-
block/blk-sysfs.c:296:1-296:1: QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
-
block/blk-sysfs.c:293:1-293:1: QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
-
block/blk-sysfs.c:294:1-294:1: QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
-
block/blk-sysfs.c:295:1-295:1: QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
-
block/blk-sysfs.c:296:1-296:1: QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
-
block/blk-sysfs.c:299:33-299:55: static ssize_t queue_zoned_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:311:36-311:58: static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:316:42-316:64: static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:321:44-321:66: static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:326:36-326:58: static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:332:37-332:59: static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:351:39-351:61: static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:360:25-360:47: queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:384:38-384:60: static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:396:39-396:61: static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:418:32-418:54: static ssize_t queue_poll_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:423:33-423:55: static ssize_t queue_poll_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:448:38-448:60: static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:453:39-453:61: static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:468:34-468:56: static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:476:35-476:57: static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:520:30-520:52: static ssize_t queue_wc_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:528:31-528:53: static ssize_t queue_wc_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:550:31-550:53: static ssize_t queue_fua_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:555:31-555:53: static ssize_t queue_dax_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:744:28-744:50: static void blk_exit_queue(struct request_queue *q)
-
block/blk-throttle.c:488:7-488:29: struct request_queue *q,
-
block/blk-throttle.c:1779:32-1779:54: static void throtl_shutdown_wq(struct request_queue *q)
-
block/blk-throttle.c:2383:21-2383:43: int blk_throtl_init(struct request_queue *q)
-
block/blk-throttle.c:2426:22-2426:44: void blk_throtl_exit(struct request_queue *q)
-
block/blk-throttle.c:2436:32-2436:54: void blk_throtl_register_queue(struct request_queue *q)
-
block/blk-throttle.c:2466:37-2466:59: ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
-
block/blk-throttle.c:2473:38-2473:60: ssize_t blk_throtl_sample_time_store(struct request_queue *q,
-
block/blk-timeout.c:23:32-23:54: bool __blk_should_fake_timeout(struct request_queue *q)
-
block/blk-wbt.c:421:21-421:43: u64 wbt_get_min_lat(struct request_queue *q)
-
block/blk-wbt.c:429:22-429:44: void wbt_set_min_lat(struct request_queue *q, u64 val)
-
block/blk-wbt.c:626:26-626:48: void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
-
block/blk-wbt.c:636:25-636:47: void wbt_enable_default(struct request_queue *q)
-
block/blk-wbt.c:652:30-652:52: u64 wbt_default_latency_nsec(struct request_queue *q)
-
block/blk-wbt.c:696:26-696:48: void wbt_disable_default(struct request_queue *q)
-
block/blk-wbt.c:812:14-812:36: int wbt_init(struct request_queue *q)
-
block/blk-zoned.c:55:39-55:61: static inline sector_t blk_zone_start(struct request_queue *q,
-
block/blk-zoned.c:380:34-380:56: void blk_queue_free_zone_bitmaps(struct request_queue *q)
-
block/blk-zoned.c:553:36-553:58: void blk_queue_clear_zone_settings(struct request_queue *q)
-
block/blk.h:36:21-36:43: blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
-
block/blk.h:41:36-41:58: static inline void __blk_get_queue(struct request_queue *q)
-
block/blk.h:63:42-63:64: static inline bool biovec_phys_mergeable(struct request_queue *q,
-
block/blk.h:79:39-79:61: static inline bool __bvec_gap_to_prev(struct request_queue *q,
-
block/blk.h:90:37-90:59: static inline bool bvec_gap_to_prev(struct request_queue *q,
-
block/blk.h:201:34-201:56: static inline void elevator_exit(struct request_queue *q,
-
block/blk.h:247:36-247:58: static inline void req_set_nomerge(struct request_queue *q, struct request *req)
-
block/blk.h:259:52-259:74: static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
-
block/blk.h:272:6-272:28: struct request_queue *q)
-
block/bounce.c:290:32-290:54: static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
-
block/bounce.c:363:23-363:45: void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
-
block/bsg-lib.c:324:23-324:45: void bsg_remove_queue(struct request_queue *q)
-
block/bsg.c:135:22-135:44: static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
-
block/bsg.c:266:55-266:77: static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
-
block/bsg.c:392:27-392:49: void bsg_unregister_queue(struct request_queue *q)
-
block/bsg.c:409:24-409:46: int bsg_register_queue(struct request_queue *q, struct device *parent,
-
block/bsg.c:466:29-466:51: int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
-
block/elevator.c:140:43-140:65: static struct elevator_type *elevator_get(struct request_queue *q,
-
block/elevator.c:164:39-164:61: struct elevator_queue *elevator_alloc(struct request_queue *q,
-
block/elevator.c:191:22-191:44: void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
-
block/elevator.c:206:21-206:43: void elv_rqhash_del(struct request_queue *q, struct request *rq)
-
block/elevator.c:213:21-213:43: void elv_rqhash_add(struct request_queue *q, struct request *rq)
-
block/elevator.c:223:28-223:50: void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
-
block/elevator.c:229:33-229:55: struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
-
block/elevator.c:303:26-303:48: enum elv_merge elv_merge(struct request_queue *q, struct request **req,
-
block/elevator.c:355:31-355:53: bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
-
block/elevator.c:389:25-389:47: void elv_merged_request(struct request_queue *q, struct request *rq,
-
block/elevator.c:403:25-403:47: void elv_merge_requests(struct request_queue *q, struct request *rq,
-
block/elevator.c:415:36-415:58: struct request *elv_latter_request(struct request_queue *q, struct request *rq)
-
block/elevator.c:425:36-425:58: struct request *elv_former_request(struct request_queue *q, struct request *rq)
-
block/elevator.c:482:24-482:46: int elv_register_queue(struct request_queue *q, bool uevent)
-
block/elevator.c:507:27-507:49: void elv_unregister_queue(struct request_queue *q)
-
block/elevator.c:574:24-574:46: int elevator_switch_mq(struct request_queue *q,
-
block/elevator.c:610:40-610:62: static inline bool elv_support_iosched(struct request_queue *q)
-
block/elevator.c:622:51-622:73: static struct elevator_type *elevator_get_default(struct request_queue *q)
-
block/elevator.c:634:55-634:77: static struct elevator_type *elevator_get_by_features(struct request_queue *q)
-
block/elevator.c:661:23-661:45: void elevator_init_mq(struct request_queue *q)
-
block/elevator.c:703:28-703:50: static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
-
block/elevator.c:723:30-723:52: static int __elevator_change(struct request_queue *q, const char *name)
-
block/elevator.c:755:27-755:49: ssize_t elv_iosched_store(struct request_queue *q, const char *name,
-
block/elevator.c:770:26-770:48: ssize_t elv_iosched_show(struct request_queue *q, char *name)
-
block/elevator.c:805:39-805:61: struct request *elv_rb_former_request(struct request_queue *q,
-
block/elevator.c:817:39-817:61: struct request *elv_rb_latter_request(struct request_queue *q,
-
block/keyslot-manager.c:440:56-440:78: bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q)
-
block/keyslot-manager.c:451:25-451:47: void blk_ksm_unregister(struct request_queue *q)
-
block/kyber-iosched.c:358:56-358:78: static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
-
block/kyber-iosched.c:405:29-405:51: static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/mq-deadline.c:111:37-111:59: static void deadline_remove_request(struct request_queue *q, struct request *rq)
-
block/mq-deadline.c:128:31-128:53: static void dd_request_merged(struct request_queue *q, struct request *req,
-
block/mq-deadline.c:142:32-142:54: static void dd_merged_requests(struct request_queue *q, struct request *req,
-
block/mq-deadline.c:408:26-408:48: static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
-
block/mq-deadline.c:441:29-441:51: static int dd_request_merge(struct request_queue *q, struct request **rq,
-
block/scsi_ioctl.c:46:27-46:49: static int scsi_get_idlun(struct request_queue *q, int __user *p)
-
block/scsi_ioctl.c:51:25-51:47: static int scsi_get_bus(struct request_queue *q, int __user *p)
-
block/scsi_ioctl.c:56:27-56:49: static int sg_get_timeout(struct request_queue *q)
-
block/scsi_ioctl.c:61:27-61:49: static int sg_set_timeout(struct request_queue *q, int __user *p)
-
block/scsi_ioctl.c:71:30-71:52: static int max_sectors_bytes(struct request_queue *q)
-
block/scsi_ioctl.c:80:33-80:55: static int sg_get_reserved_size(struct request_queue *q, int __user *p)
-
block/scsi_ioctl.c:87:33-87:55: static int sg_set_reserved_size(struct request_queue *q, int __user *p)
-
block/scsi_ioctl.c:105:29-105:51: static int sg_emulated_host(struct request_queue *q, int __user *p)
-
block/scsi_ioctl.c:220:30-220:52: static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
-
block/scsi_ioctl.c:282:18-282:40: static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
-
block/scsi_ioctl.c:407:19-407:41: int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
-
block/scsi_ioctl.c:522:31-522:53: static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
-
block/scsi_ioctl.c:542:39-542:61: static inline int blk_send_start_stop(struct request_queue *q,
-
block/scsi_ioctl.c:710:35-710:57: static int scsi_cdrom_send_packet(struct request_queue *q,
-
block/scsi_ioctl.c:767:20-767:42: int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
-
crypto/ecc.c:1207:33-1207:57: const struct ecc_point *p, const struct ecc_point *q,
-
crypto/ecc.c:1230:22-1230:46: const u64 *u2, const struct ecc_point *q,
-
drivers/acpi/ec.c:1121:34-1121:56: static void acpi_ec_delete_query(struct acpi_ec_query *q)
-
drivers/ata/libata-pata-timings.c:61:5-61:24: struct ata_timing *q, int T, int UT)
-
drivers/atm/firestream.c:577:65-577:79: static inline struct FS_QENTRY *get_qentry (struct fs_dev *dev, struct queue *q)
-
drivers/atm/firestream.c:583:48-583:62: static void submit_qentry (struct fs_dev *dev, struct queue *q, struct FS_QENTRY *qe)
-
drivers/atm/firestream.c:627:47-627:61: static void submit_queue (struct fs_dev *dev, struct queue *q,
-
drivers/atm/firestream.c:666:55-666:69: static void process_return_queue (struct fs_dev *dev, struct queue *q)
-
drivers/atm/firestream.c:692:55-692:69: static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
-
drivers/atm/firestream.c:762:51-762:65: static void process_incoming (struct fs_dev *dev, struct queue *q)
-
drivers/block/drbd/drbd_int.h:1890:17-1890:41: drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
-
drivers/block/drbd/drbd_int.h:1900:29-1900:53: drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
-
drivers/block/drbd/drbd_main.c:906:6-906:28: struct request_queue *q)
-
drivers/block/drbd/drbd_nl.c:1189:43-1189:65: static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
-
drivers/block/drbd/drbd_nl.c:1205:4-1205:26: struct request_queue *q,
-
drivers/block/drbd/drbd_nl.c:1239:44-1239:66: static void fixup_discard_if_not_supported(struct request_queue *q)
-
drivers/block/drbd/drbd_nl.c:1251:60-1251:82: static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
-
drivers/block/drbd/drbd_nl.c:1267:4-1267:26: struct request_queue *q,
-
drivers/block/null_blk/zoned.c:58:51-58:73: int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
-
drivers/block/pktcdvd.c:924:63-924:85: static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
-
drivers/block/pktcdvd.c:2285:36-2285:58: static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
-
drivers/block/rnbd/rnbd-clt.c:162:41-162:60: static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
-
drivers/block/rnbd/rnbd-clt.c:1071:7-1071:26: struct rnbd_queue *q)
-
drivers/block/rnbd/rnbd-clt.c:1266:12-1266:31: struct rnbd_queue *q,
-
drivers/block/rsxx/dma.c:243:7-243:25: struct list_head *q, unsigned int done)
-
drivers/block/rsxx/dma.c:601:7-601:25: struct list_head *q,
-
drivers/block/rsxx/dma.c:630:10-630:28: struct list_head *q,
-
drivers/block/sx8.c:663:57-663:79: static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
-
drivers/char/ipmi/ipmi_msghandler.c:668:32-668:50: static void free_recv_msg_list(struct list_head *q)
-
drivers/char/ipmi/ipmi_msghandler.c:678:31-678:49: static void free_smi_msg_list(struct list_head *q)
-
drivers/clk/clk.c:2913:40-2913:58: bool clk_is_match(const struct clk *p, const struct clk *q)
-
drivers/crypto/cavium/cpt/cptpf_mbox.c:59:55-59:58: static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp)
-
drivers/crypto/cavium/cpt/cptvf_reqmanager.c:15:53-15:75: static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
-
drivers/crypto/cavium/zip/zip_mem.c:57:48-57:52: int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
-
drivers/crypto/cavium/zip/zip_mem.c:76:48-76:52: void zip_cmd_qbuf_free(struct zip_device *zip, int q)
-
drivers/crypto/hisilicon/qm.c:2100:8-2100:28: struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2120:37-2120:57: static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2129:31-2129:51: static int hisi_qm_uacce_mmap(struct uacce_queue *q,
-
drivers/crypto/hisilicon/qm.c:2177:38-2177:58: static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2184:38-2184:58: static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:2189:28-2189:48: static void qm_set_sqctype(struct uacce_queue *q, u16 type)
-
drivers/crypto/hisilicon/qm.c:2199:33-2199:53: static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
-
drivers/crypto/hisilicon/sec/sec_drv.c:673:47-673:53: static irqreturn_t sec_isr_handle_th(int irq, void *q)
-
drivers/crypto/hisilicon/sec/sec_drv.c:679:44-679:50: static irqreturn_t sec_isr_handle(int irq, void *q)
-
drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c:135:63-135:66: static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
-
drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c:58:7-58:37: struct otx_cpt_pending_queue *q,
-
drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c:49:6-49:37: struct otx2_cpt_pending_queue *q,
-
drivers/cxl/mem.c:696:5-696:43: struct cxl_mem_query_commands __user *q)
-
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:4349:30-4349:34: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c:3042:25-3042:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c:4213:25-4213:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c:3463:25-3463:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:2061:25-2061:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:156:62-156:76: static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:200:5-200:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:216:4-216:18: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:280:5-280:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:298:5-298:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:409:59-409:73: static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:444:5-444:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:454:5-454:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:521:5-521:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:546:59-546:73: static int update_queue(struct device_queue_manager *dqm, struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1025:5-1025:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1069:5-1069:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1258:65-1258:79: static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1451:5-1451:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1661:6-1661:20: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c:180:60-180:74: static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c:197:5-197:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c:84:64-84:78: static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c:80:63-80:77: static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c:228:60-228:74: static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c:245:4-245:18: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:48:59-48:84: struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:64:6-64:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:45:4-45:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:69:45-69:70: static void set_priority(struct cik_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:76:6-76:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:89:3-89:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:143:4-143:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:188:4-188:29: struct queue_properties *q, unsigned int atc_bit)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:224:4-224:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:230:4-230:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:236:5-236:30: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:308:3-308:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:314:5-314:30: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:45:7-45:32: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:69:53-69:78: static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:76:3-76:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:89:4-89:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:165:9-165:34: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:281:4-281:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:295:3-295:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:322:3-322:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:46:4-46:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:78:44-78:69: static void set_priority(struct v9_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:85:3-85:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:132:4-132:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:215:9-215:34: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:336:4-336:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:350:3-350:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:377:3-377:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:48:4-48:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:72:44-72:69: static void set_priority(struct vi_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:79:6-79:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:92:4-92:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:170:4-170:29: struct queue_properties *q, unsigned int mtype,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:241:4-241:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:247:4-247:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:301:4-301:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:313:4-313:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:320:3-320:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:345:3-345:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c:143:3-143:17: struct queue *q, bool is_static)
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c:142:3-142:17: struct queue *q, bool is_static)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:464:26-464:40: int kfd_procfs_add_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:606:27-606:41: void kfd_procfs_del_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:166:26-166:41: struct kfd_dev *dev, struct queue **q,
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:27:29-27:54: void print_queue_properties(struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:45:18-45:32: void print_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:66:16-66:31: int init_queue(struct queue **q, const struct queue_properties *properties)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:80:19-80:33: void uninit_queue(struct queue *q)
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:599:30-599:52: static void throttle_release(struct i915_request **q, int count)
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:612:7-612:29: struct i915_request **q, int count)
-
drivers/gpu/drm/v3d/v3d_sched.c:296:54-296:69: v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
-
drivers/gpu/ipu-v3/ipu-image-convert.c:1255:5-1255:23: struct list_head *q)
-
drivers/ide/ide-cd.c:532:31-532:53: static bool ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
-
drivers/ide/ide-timings.c:96:55-96:74: static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:371:51-371:77: static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:377:6-377:32: struct ocrdma_queue_info *q, u16 len, u16 entry_size)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:403:11-403:37: struct ocrdma_queue_info *q, int queue_type)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1555:32-1555:59: static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1570:30-1570:57: static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1575:39-1575:66: static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1581:33-1581:60: static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1586:33-1586:60: static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/qedr/verbs.c:742:28-742:47: struct qedr_dev *dev, struct qedr_userq *q,
-
drivers/infiniband/hw/qedr/verbs.c:790:12-790:31: struct qedr_userq *q, u64 buf_addr,
-
drivers/infiniband/sw/rxe/rxe_queue.c:46:29-46:47: inline void rxe_queue_reset(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.c:111:26-111:44: static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
-
drivers/infiniband/sw/rxe/rxe_queue.c:129:22-129:40: int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
-
drivers/infiniband/sw/rxe/rxe_queue.c:174:24-174:42: void rxe_queue_cleanup(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:52:30-52:48: static inline int next_index(struct rxe_queue *q, int index)
-
drivers/infiniband/sw/rxe/rxe_queue.h:57:31-57:49: static inline int queue_empty(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:72:30-72:48: static inline int queue_full(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:87:37-87:55: static inline void advance_producer(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:99:37-99:55: static inline void advance_consumer(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:111:35-111:53: static inline void *producer_addr(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:117:35-117:53: static inline void *consumer_addr(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:123:43-123:61: static inline unsigned int producer_index(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:136:43-136:61: static inline unsigned int consumer_index(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:149:37-149:55: static inline void *addr_from_index(struct rxe_queue *q, unsigned int index)
-
drivers/infiniband/sw/rxe/rxe_queue.h:155:44-155:68: static inline unsigned int index_from_addr(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:162:40-162:64: static inline unsigned int queue_count(const struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:168:32-168:50: static inline void *queue_head(struct rxe_queue *q)
-
drivers/input/misc/hisi_powerkey.c:29:52-29:58: static irqreturn_t hi65xx_power_press_isr(int irq, void *q)
-
drivers/input/misc/hisi_powerkey.c:40:54-40:60: static irqreturn_t hi65xx_power_release_isr(int irq, void *q)
-
drivers/input/misc/hisi_powerkey.c:51:55-51:61: static irqreturn_t hi65xx_restart_toggle_isr(int irq, void *q)
-
drivers/input/rmi4/rmi_f54.c:283:32-283:50: static int rmi_f54_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
-
drivers/input/rmi4/rmi_f54.c:363:36-363:54: static void rmi_f54_stop_streaming(struct vb2_queue *q)
-
drivers/input/touchscreen/atmel_mxt_ts.c:2379:28-2379:46: static int mxt_queue_setup(struct vb2_queue *q,
-
drivers/input/touchscreen/sur40.c:845:30-845:48: static int sur40_queue_setup(struct vb2_queue *q,
-
drivers/md/dm-cache-policy-smq.c:269:20-269:34: static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
-
drivers/md/dm-cache-policy-smq.c:287:24-287:38: static unsigned q_size(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:295:20-295:34: static void q_push(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:305:26-305:40: static void q_push_front(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:315:27-315:41: static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:325:19-325:33: static void q_del(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:335:29-335:43: static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
-
drivers/md/dm-cache-policy-smq.c:357:28-357:42: static struct entry *q_pop(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:372:40-372:54: static struct entry *__redist_pop_from(struct queue *q, unsigned level)
-
drivers/md/dm-cache-policy-smq.c:386:37-386:51: static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
-
drivers/md/dm-cache-policy-smq.c:405:27-405:41: static void q_set_targets(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:427:28-427:42: static void q_redistribute(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:470:23-470:37: static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
-
drivers/md/dm-rq.c:65:21-65:43: void dm_start_queue(struct request_queue *q)
-
drivers/md/dm-rq.c:71:20-71:42: void dm_stop_queue(struct request_queue *q)
-
drivers/md/dm-rq.c:171:39-171:61: static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
-
drivers/md/dm-table.c:1376:39-1376:61: static void dm_update_keyslot_manager(struct request_queue *q,
-
drivers/md/dm-table.c:1985:52-1985:74: void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
-
drivers/md/dm.c:1779:46-1779:68: static void dm_queue_destroy_keyslot_manager(struct request_queue *q)
-
drivers/media/common/saa7146/saa7146_fops.c:52:47-52:70: void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q,
-
drivers/media/common/saa7146/saa7146_fops.c:69:5-69:30: struct saa7146_dmaqueue *q,
-
drivers/media/common/saa7146/saa7146_fops.c:91:7-91:32: struct saa7146_dmaqueue *q,
-
drivers/media/common/saa7146/saa7146_fops.c:112:5-112:30: struct saa7146_dmaqueue *q, int vbi)
-
drivers/media/common/saa7146/saa7146_vbi.c:219:27-219:50: static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,enum v4l2_field field)
-
drivers/media/common/saa7146/saa7146_vbi.c:274:25-274:48: static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-
drivers/media/common/saa7146/saa7146_vbi.c:289:26-289:49: static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/common/saa7146/saa7146_vbi.c:301:28-301:51: static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/common/saa7146/saa7146_video.c:1038:27-1038:50: static int buffer_prepare(struct videobuf_queue *q,
-
drivers/media/common/saa7146/saa7146_video.c:1120:25-1120:48: static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-
drivers/media/common/saa7146/saa7146_video.c:1141:26-1141:49: static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/common/saa7146/saa7146_video.c:1153:28-1153:51: static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/common/videobuf2/videobuf2-core.c:391:30-391:48: static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:473:28-473:46: static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
-
drivers/media/common/videobuf2/videobuf2-core.c:499:29-499:47: static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
-
drivers/media/common/videobuf2/videobuf2-core.c:611:24-611:42: bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
-
drivers/media/common/videobuf2/videobuf2-core.c:633:30-633:48: static bool __buffers_in_use(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:643:24-643:42: void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
-
drivers/media/common/videobuf2/videobuf2-core.c:653:33-653:51: static int __verify_userptr_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:666:30-666:48: static int __verify_mmap_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:679:32-679:50: static int __verify_dmabuf_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:689:28-689:46: int vb2_verify_memory_type(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-core.c:735:22-735:40: int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:874:26-874:44: int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:1048:23-1048:41: void vb2_discard_done(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1480:26-1480:44: int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
-
drivers/media/common/videobuf2/videobuf2-core.c:1520:32-1520:50: static int vb2_start_streaming(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1572:19-1572:37: int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
-
drivers/media/common/videobuf2/videobuf2-core.c:1721:35-1721:53: static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
-
drivers/media/common/videobuf2/videobuf2-core.c:1802:30-1802:48: static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
-
drivers/media/common/videobuf2/videobuf2-core.c:1835:30-1835:48: int vb2_wait_for_all_buffers(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1864:20-1864:38: int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
-
drivers/media/common/videobuf2/videobuf2-core.c:1928:32-1928:50: static void __vb2_queue_cancel(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2025:23-2025:41: int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-core.c:2070:22-2070:40: void vb2_queue_error(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2078:24-2078:42: int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-core.c:2106:35-2106:53: static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
-
drivers/media/common/videobuf2/videobuf2-core.c:2132:21-2132:39: int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
-
drivers/media/common/videobuf2/videobuf2-core.c:2203:14-2203:32: int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
-
drivers/media/common/videobuf2/videobuf2-core.c:2317:25-2317:43: int vb2_core_queue_init(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2360:29-2360:47: void vb2_core_queue_release(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2370:24-2370:42: __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
-
drivers/media/common/videobuf2/videobuf2-core.c:2519:30-2519:48: static int __vb2_init_fileio(struct vb2_queue *q, int read)
-
drivers/media/common/videobuf2/videobuf2-core.c:2637:33-2637:51: static int __vb2_cleanup_fileio(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2661:36-2661:54: static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:2822:17-2822:35: size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:2829:18-2829:36: size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:2908:22-2908:40: int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
-
drivers/media/common/videobuf2/videobuf2-core.c:2948:21-2948:39: int vb2_thread_stop(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:344:36-344:54: static void set_buffer_cache_hints(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:391:37-391:55: static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:653:24-653:48: int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:679:18-679:36: int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:701:27-701:45: static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:720:17-720:35: int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:729:21-729:39: int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:748:21-748:39: int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:806:14-806:32: int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:827:15-827:33: int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:858:18-858:36: int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:868:19-868:37: int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:878:16-878:34: int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:885:25-885:43: int vb2_queue_init_name(struct vb2_queue *q, const char *name)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:930:20-930:38: int vb2_queue_init(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:936:24-936:42: void vb2_queue_release(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:942:19-942:37: __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
-
drivers/media/pci/bt8xx/bttv-driver.c:1528:32-1528:55: static int bttv_prepare_buffer(struct videobuf_queue *q,struct bttv *btv,
-
drivers/media/pci/bt8xx/bttv-driver.c:1629:14-1629:37: buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-
drivers/media/pci/bt8xx/bttv-driver.c:1642:16-1642:39: buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
-
drivers/media/pci/bt8xx/bttv-driver.c:1653:14-1653:37: buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/bt8xx/bttv-driver.c:1667:28-1667:51: static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/bt8xx/bttv-risc.c:571:15-571:38: bttv_dma_free(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf)
-
drivers/media/pci/bt8xx/bttv-vbi.c:70:29-70:52: static int vbi_buffer_setup(struct videobuf_queue *q,
-
drivers/media/pci/bt8xx/bttv-vbi.c:91:31-91:54: static int vbi_buffer_prepare(struct videobuf_queue *q,
-
drivers/media/pci/bt8xx/bttv-vbi.c:199:18-199:41: vbi_buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/bt8xx/bttv-vbi.c:214:32-214:55: static void vbi_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/cobalt/cobalt-v4l2.c:34:31-34:49: static int cobalt_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cobalt/cobalt-v4l2.c:279:35-279:53: static int cobalt_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cobalt/cobalt-v4l2.c:388:35-388:53: static void cobalt_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx18/cx18-queue.c:36:22-36:41: void cx18_queue_init(struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.c:44:6-44:25: struct cx18_queue *q, int to_front)
-
drivers/media/pci/cx18/cx18-queue.c:73:54-73:73: struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.h:60:5-60:24: struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.h:67:9-67:28: struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-streams.c:95:27-95:50: static void cx18_dma_free(struct videobuf_queue *q,
-
drivers/media/pci/cx18/cx18-streams.c:103:32-103:55: static int cx18_prepare_buffer(struct videobuf_queue *q,
-
drivers/media/pci/cx18/cx18-streams.c:181:25-181:48: static int buffer_setup(struct videobuf_queue *q,
-
drivers/media/pci/cx18/cx18-streams.c:200:27-200:50: static int buffer_prepare(struct videobuf_queue *q,
-
drivers/media/pci/cx18/cx18-streams.c:213:28-213:51: static void buffer_release(struct videobuf_queue *q,
-
drivers/media/pci/cx18/cx18-streams.c:223:26-223:49: static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/cx23885/cx23885-417.c:1123:24-1123:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-417.c:1167:36-1167:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-417.c:1194:36-1194:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-core.c:425:7-425:32: struct cx23885_dmaqueue *q, u32 count)
-
drivers/media/pci/cx23885/cx23885-core.c:1393:9-1393:34: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-dvb.c:88:24-88:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-dvb.c:150:36-150:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-dvb.c:161:36-161:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-vbi.c:87:5-87:30: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-vbi.c:114:24-114:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-vbi.c:217:36-217:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-vbi.c:228:36-228:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-video.c:89:2-89:27: struct cx23885_dmaqueue *q, u32 count)
-
drivers/media/pci/cx23885/cx23885-video.c:305:7-305:32: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-video.c:332:24-332:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-video.c:487:36-487:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-video.c:498:36-498:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx25821/cx25821-video.c:59:8-59:33: struct cx25821_dmaqueue *q,
-
drivers/media/pci/cx25821/cx25821-video.c:127:32-127:50: static int cx25821_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx25821/cx25821-video.c:261:36-261:54: static int cx25821_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx25821/cx25821-video.c:274:36-274:54: static void cx25821_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-blackbird.c:658:24-658:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-blackbird.c:701:28-701:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-blackbird.c:751:28-751:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-core.c:519:4-519:26: struct cx88_dmaqueue *q, u32 count)
-
drivers/media/pci/cx88/cx88-dvb.c:75:24-75:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-dvb.c:119:28-119:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-dvb.c:130:28-130:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-mpeg.c:73:8-73:30: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-mpeg.c:196:5-196:27: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-mpeg.c:213:24-213:42: int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
-
drivers/media/pci/cx88/cx88-vbi.c:52:5-52:27: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-vbi.c:99:9-99:31: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-vbi.c:115:24-115:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-vbi.c:194:28-194:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-vbi.c:205:28-205:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-video.c:350:7-350:29: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-video.c:405:12-405:34: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-video.c:420:24-420:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-video.c:527:28-527:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-video.c:538:28-538:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/dt3155/dt3155.c:148:35-148:53: static int dt3155_start_streaming(struct vb2_queue *q, unsigned count)
-
drivers/media/pci/dt3155/dt3155.c:176:35-176:53: static void dt3155_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:218:53-218:72: static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:230:28-230:47: static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:295:60-295:79: static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:336:51-336:70: static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:498:52-498:71: static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:579:60-579:79: static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:776:41-776:60: static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1522:54-1522:73: static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1659:55-1659:74: static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1941:59-1941:78: static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:32:22-32:41: void ivtv_queue_init(struct ivtv_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:40:67-40:86: void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:59:57-59:76: struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
-
drivers/media/pci/netup_unidvb/netup_unidvb_core.c:330:41-330:59: static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/netup_unidvb/netup_unidvb_core.c:339:41-339:59: static void netup_unidvb_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:264:5-264:30: struct saa7134_dmaqueue *q,
-
drivers/media/pci/saa7134/saa7134-core.c:292:7-292:32: struct saa7134_dmaqueue *q,
-
drivers/media/pci/saa7134/saa7134-core.c:305:5-305:30: struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:355:54-355:79: void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:1373:8-1373:33: struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-ts.c:106:28-106:46: int saa7134_ts_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/saa7134/saa7134-vbi.c:128:24-128:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/saa7134/saa7134-video.c:940:24-940:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:655:33-655:51: static int solo_enc_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:708:37-708:55: static int solo_enc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:715:37-715:55: static void solo_enc_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:307:29-307:47: static int solo_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:322:33-322:51: static int solo_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:330:33-330:51: static void solo_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/tw5864/tw5864-video.c:182:31-182:49: static int tw5864_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
-
drivers/media/pci/tw5864/tw5864-video.c:427:35-427:53: static int tw5864_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/tw5864/tw5864-video.c:446:35-446:53: static void tw5864_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/tw68/tw68-video.c:358:29-358:47: static int tw68_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/tw68/tw68-video.c:491:33-491:51: static int tw68_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/tw68/tw68-video.c:502:33-502:51: static void tw68_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/allegro-dvt/allegro-core.c:2687:36-2687:54: static int allegro_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/allegro-dvt/allegro-core.c:2706:36-2706:54: static void allegro_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/aspeed-video.c:1419:37-1419:55: static int aspeed_video_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/aspeed-video.c:1450:41-1450:59: static int aspeed_video_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/aspeed-video.c:1468:41-1468:59: static void aspeed_video_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/coda/coda-common.c:1957:33-1957:51: static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/coda/coda-common.c:2099:33-2099:51: static void coda_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/exynos-gsc/gsc-m2m.c:56:36-56:54: static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/exynos-gsc/gsc-m2m.c:80:36-80:54: static void gsc_m2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/exynos4-is/fimc-capture.c:259:28-259:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/exynos4-is/fimc-capture.c:290:28-290:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/exynos4-is/fimc-isp-video.c:76:46-76:64: static int isp_video_capture_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/exynos4-is/fimc-isp-video.c:119:46-119:64: static void isp_video_capture_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/exynos4-is/fimc-lite.c:305:28-305:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/exynos4-is/fimc-lite.c:339:28-339:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/exynos4-is/fimc-m2m.c:73:28-73:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/exynos4-is/fimc-m2m.c:82:28-82:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/imx-pxp.c:1431:32-1431:50: static int pxp_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/imx-pxp.c:1440:32-1440:50: static void pxp_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c:645:33-645:51: static int mtk_jpeg_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c:821:41-821:59: static void mtk_jpeg_enc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c:830:41-830:59: static void mtk_jpeg_dec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c:389:40-389:58: static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c:411:40-411:58: static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c:1290:40-1290:58: static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c:1300:40-1300:58: static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c:778:40-778:58: static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c:847:40-847:58: static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/camss/camss-video.c:326:30-326:48: static int video_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/camss/camss-video.c:436:34-436:52: static int video_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/qcom/camss/camss-video.c:481:34-481:52: static void video_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/helpers.c:1405:38-1405:56: void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/vdec.c:804:29-804:47: static int vdec_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/venus/vdec.c:1046:33-1046:51: static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/qcom/venus/vdec.c:1143:33-1143:51: static void vdec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/venc.c:814:29-814:47: static int venc_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/venus/venc.c:955:33-955:51: static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/rcar_fdp1.c:1922:33-1922:51: static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/rcar_fdp1.c:1961:33-1961:51: static void fdp1_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/rockchip/rga/rga-buf.c:59:36-59:54: static void rga_buf_return_buffers(struct vb2_queue *q,
-
drivers/media/platform/rockchip/rga/rga-buf.c:76:36-76:54: static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/rockchip/rga/rga-buf.c:92:36-92:54: static void rga_buf_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c:1491:41-1491:59: static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
-
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c:160:29-160:47: rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
-
drivers/media/platform/s5p-jpeg/jpeg-core.c:2566:37-2566:55: static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/s5p-jpeg/jpeg-core.c:2576:37-2576:55: static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/s5p-mfc/s5p_mfc_dec.c:994:36-994:54: static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/s5p-mfc/s5p_mfc_dec.c:1010:36-1010:54: static void s5p_mfc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/s5p-mfc/s5p_mfc_enc.c:2496:36-2496:54: static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/s5p-mfc/s5p_mfc_enc.c:2526:36-2526:54: static void s5p_mfc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/sti/bdisp/bdisp-v4l2.c:498:34-498:52: static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/sti/bdisp/bdisp-v4l2.c:521:34-521:52: static void bdisp_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/sti/delta/delta-v4l2.c:1306:41-1306:59: static int delta_vb2_au_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/sti/delta/delta-v4l2.c:1400:41-1400:59: static void delta_vb2_au_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/sti/delta/delta-v4l2.c:1530:44-1530:62: static void delta_vb2_frame_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/ti-vpe/vpe.c:2124:58-2124:76: static void vpe_return_all_buffers(struct vpe_ctx *ctx, struct vb2_queue *q,
-
drivers/media/platform/ti-vpe/vpe.c:2177:32-2177:50: static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/ti-vpe/vpe.c:2199:32-2199:50: static void vpe_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1513:33-1513:51: static void vicodec_return_bufs(struct vb2_queue *q, u32 state)
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1552:36-1552:54: static int vicodec_start_streaming(struct vb2_queue *q,
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1640:36-1640:54: static void vicodec_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/vim2m.c:1064:34-1064:52: static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/test-drivers/vim2m.c:1079:34-1079:52: static void vim2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/vivid/vivid-core.c:821:10-821:28: struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:39:39-39:57: static int cxusb_medion_v_queue_setup(struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:772:43-772:61: static int cxusb_medion_v_start_streaming(struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:898:43-898:61: static void cxusb_medion_v_stop_streaming(struct vb2_queue *q)
-
drivers/media/usb/go7007/go7007-fw.c:290:70-290:74: static int mjpeg_frame_header(struct go7007 *go, unsigned char *buf, int q)
-
drivers/media/usb/go7007/go7007-v4l2.c:343:31-343:49: static int go7007_queue_setup(struct vb2_queue *q,
-
drivers/media/usb/go7007/go7007-v4l2.c:397:35-397:53: static int go7007_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/usb/go7007/go7007-v4l2.c:428:35-428:53: static void go7007_stop_streaming(struct vb2_queue *q)
-
drivers/media/usb/gspca/topro.c:1439:50-1439:53: static void set_dqt(struct gspca_dev *gspca_dev, u8 q)
-
drivers/media/usb/gspca/topro.c:1456:53-1456:57: static void setquality(struct gspca_dev *gspca_dev, s32 q)
-
drivers/media/usb/hdpvr/hdpvr-video.c:97:29-97:47: static int hdpvr_free_queue(struct list_head *q)
-
drivers/media/v4l2-core/v4l2-mc.c:302:34-302:52: int v4l_vb2q_enable_media_source(struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:678:9-678:27: struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:691:8-691:26: struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:719:7-719:25: struct vb2_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:55:43-55:66: struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:76:44-76:67: static int state_neither_active_nor_queued(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:88:21-88:44: int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
-
drivers/media/v4l2-core/videobuf-core.c:121:21-121:44: int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
-
drivers/media/v4l2-core/videobuf-core.c:131:31-131:54: void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:143:31-143:54: void videobuf_queue_core_init(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:185:28-185:51: int videobuf_queue_is_busy(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:230:28-230:51: static int __videobuf_free(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:263:28-263:51: void videobuf_queue_cancel(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:298:37-298:60: enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:318:29-318:52: static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
-
drivers/media/v4l2-core/videobuf-core.c:373:24-373:47: int videobuf_mmap_free(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:384:27-384:50: int __videobuf_mmap_setup(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:428:25-428:48: int videobuf_mmap_setup(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:440:22-440:45: int videobuf_reqbufs(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:501:23-501:46: int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-
drivers/media/v4l2-core/videobuf-core.c:528:19-528:42: int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-
drivers/media/v4l2-core/videobuf-core.c:632:43-632:66: static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
-
drivers/media/v4l2-core/videobuf-core.c:675:31-675:54: static int stream_next_buffer(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:695:20-695:43: int videobuf_dqbuf(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:735:23-735:46: int videobuf_streamon(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:763:33-763:56: static int __videobuf_streamoff(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:773:24-773:47: int videobuf_streamoff(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:786:39-786:62: static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:831:36-831:59: static int __videobuf_copy_to_user(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:848:35-848:58: static int __videobuf_copy_stream(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:874:27-874:50: ssize_t videobuf_read_one(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:961:34-961:57: static int __videobuf_read_start(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:996:34-996:57: static void __videobuf_read_stop(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1012:25-1012:48: int videobuf_read_start(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1024:25-1024:48: void videobuf_read_stop(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1032:20-1032:43: void videobuf_stop(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1046:30-1046:53: ssize_t videobuf_read_stream(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:1120:10-1120:33: struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:1172:26-1172:49: int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
-
drivers/media/v4l2-core/videobuf-dma-contig.c:234:30-234:53: static int __videobuf_iolock(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-contig.c:274:35-274:58: static int __videobuf_mmap_mapper(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-contig.c:347:37-347:60: void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-contig.c:373:31-373:54: void videobuf_dma_contig_free(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:499:30-499:53: static int __videobuf_iolock(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:574:28-574:51: static int __videobuf_sync(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:589:35-589:58: static int __videobuf_mmap_mapper(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:672:29-672:52: void videobuf_queue_sg_init(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-vmalloc.c:154:30-154:53: static int __videobuf_iolock(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-vmalloc.c:209:35-209:58: static int __videobuf_mmap_mapper(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-vmalloc.c:277:34-277:57: void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:31:36-31:56: static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
-
drivers/misc/habanalabs/common/hw_queue.c:84:4-84:24: struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
-
drivers/misc/habanalabs/common/hw_queue.c:117:5-117:25: struct hl_hw_queue *q, int num_of_entries,
-
drivers/misc/habanalabs/common/hw_queue.c:166:6-166:26: struct hl_hw_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:200:59-200:79: static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:698:59-698:79: static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:749:51-749:71: static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/misc/habanalabs/common/hw_queue.c:769:51-769:71: static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/misc/habanalabs/common/hw_queue.c:774:51-774:71: static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/misc/habanalabs/common/hw_queue.c:779:50-779:70: static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/misc/habanalabs/common/hw_queue.c:880:47-880:67: static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:928:48-928:68: static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/misc/habanalabs/common/irq.c:214:40-214:54: int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
-
drivers/misc/habanalabs/common/irq.c:242:41-242:55: void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
-
drivers/misc/habanalabs/common/irq.c:249:42-249:56: void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
-
drivers/misc/habanalabs/common/irq.c:275:40-275:54: int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
-
drivers/misc/habanalabs/common/irq.c:300:41-300:55: void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
-
drivers/misc/habanalabs/common/irq.c:309:42-309:56: void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
-
drivers/misc/uacce/uacce.c:15:30-15:50: static int uacce_start_queue(struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:40:28-40:48: static int uacce_put_queue(struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:94:57-94:77: static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:117:32-117:52: static void uacce_unbind_queue(struct uacce_queue *q)
-
drivers/misc/vmw_vmci/vmci_queue_pair.c:248:27-248:33: static void qp_free_queue(void *q, u64 size)
-
drivers/mmc/core/crypto.c:22:29-22:51: void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
-
drivers/mmc/core/queue.c:177:37-177:59: static void mmc_queue_setup_discard(struct request_queue *q,
-
drivers/mmc/core/queue.c:222:30-222:52: static void mmc_exit_request(struct request_queue *q, struct request *req)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:168:7-168:30: struct bnx2x_vf_queue *q,
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:1401:7-1401:30: struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:385:45-385:68: static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:390:54-390:77: static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:398:55-398:78: static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c:543:8-543:31: struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/brocade/bna/bna.h:238:44-238:62: static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:508:55-508:70: static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:621:48-621:61: static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:830:47-830:62: static void refill_free_list(struct sge *sge, struct freelQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1176:12-1176:25: struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1212:7-1212:20: struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1299:58-1299:71: static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1408:40-1408:59: static inline int enough_free_Tx_descs(const struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:167:43-167:64: static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:172:45-172:68: static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:177:44-177:66: static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:192:11-192:34: const struct sge_rspq *q, unsigned int credits)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:236:51-236:67: static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:285:51-285:67: static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:325:7-325:23: struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:345:37-345:59: static inline int should_restart_tx(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:352:49-352:70: static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:380:48-380:63: static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:442:52-442:67: static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:485:53-485:68: static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:505:44-505:59: static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:578:50-578:65: static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:647:27-647:44: static void t3_reset_qset(struct sge_qset *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:676:51-676:68: static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:844:10-844:27: struct sge_rspq *q, unsigned int len,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1050:59-1050:75: static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1094:9-1094:31: const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1184:8-1184:24: struct sge_txq *q, unsigned int ndesc,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1251:30-1251:46: struct sge_qset *qs, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1423:58-1423:74: static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1455:45-1455:61: static inline void reclaim_completed_tx_imm(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1478:44-1478:60: static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1633:6-1633:22: struct sge_txq *q, unsigned int pidx,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1697:44-1697:60: static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1855:36-1855:53: static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1878:8-1878:25: struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2289:7-2289:30: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2294:40-2294:64: static inline void clear_rspq_bufstate(struct sge_rspq * const q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2585:58-2585:75: static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h:1601:11-1601:34: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h:1758:52-1758:69: static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:556:27-556:44: static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:914:23-914:40: void cxgb4_quiesce_rx(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:955:44-955:61: void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:1241:32-1241:49: int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2425:28-2425:44: static void disable_txq_db(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2434:49-2434:65: static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2520:49-2520:65: static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:57:33-57:50: static void uldrx_flush_handler(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:74:26-74:43: static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:203:9-203:30: struct sge_ofld_rxq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:208:38-208:60: static inline unsigned int txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:313:41-313:57: void free_tx_desc(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:341:31-341:53: static inline int reclaimable(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:359:62-359:78: static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:391:55-391:71: void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:438:48-438:63: static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:466:48-466:63: static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:479:53-479:68: static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:535:53-535:68: static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:837:49-837:65: void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:906:57-906:73: void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1025:52-1025:68: inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1106:5-1106:27: const struct sge_txq *q, void *pos)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1131:7-1131:29: const struct sge_txq *q, void *pos,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1232:26-1232:46: static void eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1238:32-1238:48: static inline void txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2106:45-2106:61: static inline void reclaim_completed_tx_imm(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2643:30-2643:51: static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2739:22-2739:43: static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2897:29-2897:49: static void txq_stop_maperr(struct sge_uld_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2913:26-2913:46: static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2938:27-2938:47: static void service_ofldq(struct sge_uld_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3059:22-3059:42: static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3179:10-3179:32: const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3210:29-3210:49: static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3665:22-3665:39: int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3819:54-3819:69: static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3845:8-3845:31: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3856:30-3856:47: static inline void rspq_next(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3879:30-3879:47: static int process_responses(struct sge_rspq *q, int budget)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4059:30-4059:47: int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4556:44-4556:60: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4743:56-4743:72: static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4842:37-4842:53: void free_txq(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4889:53-4889:74: void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:682:31-682:53: static unsigned int txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:687:26-687:46: static void eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:693:25-693:41: static void txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:58:55-58:77: static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:81:43-81:65: static inline unsigned int chcr_txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:86:37-86:53: static inline void chcr_txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:94:38-94:58: static inline void chcr_eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:734:6-734:26: struct sge_eth_txq *q, u64 mask,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:789:6-789:26: struct sge_eth_txq *q, u32 tid,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:830:8-830:28: struct sge_eth_txq *q, u64 tcp_seq,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:980:8-980:28: struct sge_eth_txq *q, uint32_t tx_chan)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1089:11-1089:31: struct sge_eth_txq *q, u32 tcp_seq,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1263:8-1263:28: struct sge_eth_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1450:21-1450:41: bool tcp_push, struct sge_eth_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1559:5-1559:25: struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1675:9-1675:29: struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1735:6-1735:26: struct sge_eth_txq *q, u32 skb_offset,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1807:10-1807:30: struct sge_eth_txq *q, u32 tls_end_offset)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1933:6-1933:26: struct sge_eth_txq *q)
-
drivers/net/ethernet/emulex/benet/be.h:151:37-151:59: static inline void *queue_head_node(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:156:37-156:59: static inline void *queue_tail_node(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:161:38-161:60: static inline void *queue_index_node(struct be_queue_info *q, u16 index)
-
drivers/net/ethernet/emulex/benet/be.h:166:35-166:57: static inline void queue_head_inc(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:176:35-176:57: static inline void queue_tail_inc(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_cmds.c:1451:50-1451:72: int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
-
drivers/net/ethernet/emulex/benet/be_cmds.c:1502:52-1502:74: int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_main.c:145:55-145:77: static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_main.c:156:55-156:77: static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
-
drivers/net/ethernet/hisilicon/hns/hnae.c:193:16-193:35: hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
-
drivers/net/ethernet/hisilicon/hns/hnae.c:237:51-237:70: static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
-
drivers/net/ethernet/hisilicon/hns/hnae.c:264:29-264:48: static void hnae_fini_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:62:50-62:69: static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:194:31-194:50: static void hns_ae_init_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:202:31-202:50: static void hns_ae_fini_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:92:28-92:47: void hns_rcb_reset_ring_hw(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:142:26-142:45: void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:159:25-159:44: void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:172:28-172:47: void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:183:27-183:46: void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:197:29-197:48: void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:202:20-202:39: void hns_rcb_start(struct hnae_queue *q, u32 val)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:222:29-222:48: void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:234:29-234:48: void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:441:34-441:53: static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:3873:31-3873:51: static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:329:12-329:37: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:380:12-380:37: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:436:51-436:76: void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:456:30-456:55: void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
-
drivers/net/ethernet/intel/fm10k/fm10k_pf.c:1134:11-1134:36: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/marvell/octeontx2/af/common.h:51:50-51:64: static inline int qmem_alloc(struct device *dev, struct qmem **q,
-
drivers/net/ethernet/marvell/skge.c:2479:45-2479:49: static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
-
drivers/net/ethernet/marvell/skge.c:2510:47-2510:51: static void skge_qset(struct skge_port *skge, u16 q,
-
drivers/net/ethernet/marvell/sky2.c:1036:45-1036:49: static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
-
drivers/net/ethernet/marvell/sky2.c:1076:43-1076:47: static void sky2_qset(struct sky2_hw *hw, u16 q)
-
drivers/net/ethernet/marvell/sky2.c:1125:53-1125:62: static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
-
drivers/net/ethernet/marvell/sky2.c:2915:62-2915:66: static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:131:46-131:70: static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:136:41-136:65: static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:143:31-143:55: mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:149:40-149:64: mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:159:40-159:64: mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:166:39-166:63: static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:171:37-171:61: static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:236:9-236:33: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:246:13-246:37: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:256:10-256:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:263:10-263:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:272:9-272:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:278:46-278:70: static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:285:10-285:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:314:11-314:35: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:390:10-390:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:441:11-441:35: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:454:7-454:31: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:465:9-465:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:502:10-502:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:508:10-508:34: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:544:10-544:34: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:607:38-607:62: static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:662:36-662:66: static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:668:34-668:64: static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:675:9-675:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:705:10-705:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:720:38-720:62: static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:839:5-839:29: struct mlxsw_pci_queue *q, u8 q_num)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:900:6-900:30: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/netronome/nfp/flower/cmsg.h:680:15-680:18: u8 vnic, u8 q)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:785:37-785:49: static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:811:39-811:51: static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:824:39-824:51: static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:829:33-829:45: static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:853:39-853:51: static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:864:39-864:51: static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:499:41-499:61: void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:533:4-533:24: struct ionic_queue *q, unsigned int index, const char *name,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:561:18-561:38: void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:573:21-573:41: void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:585:19-585:39: void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:607:31-607:51: static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:618:22-618:42: void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.h:271:48-271:68: static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.h:283:38-283:58: static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
-
drivers/net/ethernet/pensando/ionic/ionic_lif.h:271:41-271:61: static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:231:29-231:49: static void ionic_adminq_cb(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:22:35-22:55: static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:30:35-30:55: static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:38:45-38:65: static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:43:43-43:63: static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:69:39-69:59: static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:115:43-115:63: static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:149:28-149:48: static void ionic_rx_clean(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:256:32-256:52: static int ionic_rx_page_alloc(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:297:32-297:52: static void ionic_rx_page_free(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:326:20-326:40: void ionic_rx_fill(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:395:21-395:41: void ionic_rx_empty(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:557:39-557:59: static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:574:37-574:57: static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:591:28-591:48: static void ionic_tx_clean(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:677:21-677:41: void ionic_tx_empty(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:736:31-736:51: static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:768:49-768:69: static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:778:25-778:45: static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:950:31-950:51: static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:990:34-990:54: static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1025:31-1025:51: static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1050:21-1050:41: static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1078:34-1078:54: static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1103:32-1103:52: static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
-
drivers/net/ethernet/renesas/ravb_main.c:174:50-174:54: static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
-
drivers/net/ethernet/renesas/ravb_main.c:218:53-218:57: static void ravb_ring_free(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:273:55-273:59: static void ravb_ring_format(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:337:52-337:56: static int ravb_ring_init(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:533:58-533:62: static bool ravb_rx(struct net_device *ndev, int *quota, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:756:59-756:63: static bool ravb_queue_interrupt(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:880:62-880:66: static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
-
drivers/net/ethernet/sfc/ptp.c:835:38-835:59: static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
-
drivers/net/ethernet/sfc/ptp.c:1226:57-1226:78: static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
-
drivers/net/ethernet/via/via-velocity.c:1759:9-1759:13: int q, int n)
-
drivers/net/hyperv/netvsc_trace.h:65:1-65:1: DEFINE_EVENT(rndis_msg_class, rndis_send,
-
drivers/net/hyperv/netvsc_trace.h:71:1-71:1: DEFINE_EVENT(rndis_msg_class, rndis_recv,
-
drivers/net/tap.c:33:48-33:66: static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
-
drivers/net/tap.c:39:29-39:47: static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
-
drivers/net/tap.c:49:29-49:47: static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
-
drivers/net/tap.c:80:41-80:59: static inline bool tap_is_little_endian(struct tap_queue *q)
-
drivers/net/tap.c:86:32-86:50: static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
-
drivers/net/tap.c:91:39-91:57: static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
-
drivers/net/tap.c:145:8-145:26: struct tap_queue *q)
-
drivers/net/tap.c:166:5-166:23: struct tap_queue *q)
-
drivers/net/tap.c:187:30-187:48: static int tap_disable_queue(struct tap_queue *q)
-
drivers/net/tap.c:222:27-222:45: static void tap_put_queue(struct tap_queue *q)
-
drivers/net/tap.c:620:29-620:47: static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
-
drivers/net/tap.c:762:29-762:47: static ssize_t tap_put_user(struct tap_queue *q,
-
drivers/net/tap.c:820:28-820:46: static ssize_t tap_do_read(struct tap_queue *q,
-
drivers/net/tap.c:882:40-882:58: static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
-
drivers/net/tap.c:920:24-920:42: static int set_offload(struct tap_queue *q, unsigned long arg)
-
drivers/net/tap.c:1132:29-1132:47: static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
-
drivers/net/usb/catc.c:572:48-572:67: static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
-
drivers/net/usb/lan78xx.c:2211:49-2211:70: static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
-
drivers/net/usb/usbnet.c:701:45-701:66: static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
-
drivers/net/usb/usbnet.c:760:34-760:55: static void wait_skb_queue_empty(struct sk_buff_head *q)
-
drivers/net/virtio_net.c:1411:62-1411:66: static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-
drivers/net/wireless/ath/ath5k/trace.h:39:1-39:1: TRACE_EVENT(ath5k_tx,
-
drivers/net/wireless/ath/ath5k/trace.h:65:1-65:1: TRACE_EVENT(ath5k_tx_complete,
-
drivers/net/wireless/ath/ath6kl/txrx.c:845:34-845:55: static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
-
drivers/net/wireless/ath/ath9k/mac.c:46:42-46:46: u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:52:43-52:47: void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
-
drivers/net/wireless/ath/ath9k/mac.c:58:42-58:46: void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:65:46-65:50: u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:170:49-170:53: bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:196:48-196:52: bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
-
drivers/net/wireless/ath/ath9k/mac.c:261:48-261:52: bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
-
drivers/net/wireless/ath/ath9k/mac.c:337:64-337:68: static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:346:49-346:53: bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:367:47-367:51: bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/broadcom/b43/pio.c:24:28-24:52: static u16 generate_cookie(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:178:39-178:63: static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:192:37-192:61: static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:201:37-201:61: static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:317:33-317:57: static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:370:33-370:57: static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:440:25-440:49: static int pio_tx_frame(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:596:26-596:50: static bool pio_rx_frame(struct b43_pio_rxqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:762:17-762:41: void b43_pio_rx(struct b43_pio_rxqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:777:38-777:62: static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:790:37-790:61: static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.h:109:36-109:60: static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:114:36-114:60: static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:119:38-119:62: static inline void b43_piotx_write16(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:125:38-125:62: static inline void b43_piotx_write32(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:132:36-132:60: static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:137:36-137:60: static inline u32 b43_piorx_read32(struct b43_pio_rxqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:142:38-142:62: static inline void b43_piorx_write16(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:148:38-148:62: static inline void b43_piorx_write32(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c:621:61-621:74: static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c:2747:33-2747:46: static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:402:8-402:26: struct list_head *q, int *counter)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:421:6-421:24: struct list_head *q, struct brcmf_usbreq *req,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:433:20-433:38: brcmf_usbdev_qinit(struct list_head *q, int qsize)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:467:30-467:48: static void brcmf_usb_free_q(struct list_head *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4348:9-4348:34: struct ipw2100_bd_queue *q, int entries)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4369:54-4369:79: static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4386:5-4386:30: struct ipw2100_bd_queue *q, u32 base, u32 size,
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3699:31-3699:58: static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3711:38-3711:63: static inline int ipw_tx_queue_space(const struct clx2_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3741:51-3741:70: static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3767:9-3767:31: struct clx2_tx_queue *q,
-
drivers/net/wireless/intel/iwlegacy/common.c:2537:19-2537:45: il_rx_queue_space(const struct il_rx_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2554:50-2554:70: il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2907:16-2907:39: il_queue_space(const struct il_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2929:35-2929:52: il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
-
drivers/net/wireless/intel/iwlegacy/common.h:848:15-848:38: il_queue_used(const struct il_queue *q, int i)
-
drivers/net/wireless/intel/iwlegacy/common.h:859:16-859:33: il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:480:59-480:63: static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.c:704:44-704:66: int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.c:926:27-926:43: static int iwl_queue_init(struct iwl_txq *q, int slots_num)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.h:22:41-22:63: static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.h:92:33-92:55: static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
-
drivers/net/wireless/mediatek/mt76/dma.c:83:44-83:63: mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:121:40-121:59: mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:181:47-181:66: mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
-
drivers/net/wireless/mediatek/mt76/dma.c:205:41-205:60: mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:214:43-214:62: mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:221:43-221:62: mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
-
drivers/net/wireless/mediatek/mt76/dma.c:262:40-262:59: mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
-
drivers/net/wireless/mediatek/mt76/dma.c:288:40-288:59: mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
-
drivers/net/wireless/mediatek/mt76/dma.c:309:49-309:68: mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:339:45-339:64: mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:430:40-430:59: mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:468:43-468:62: mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:513:41-513:60: mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
-
drivers/net/wireless/mediatek/mt76/dma.c:537:43-537:62: mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:511:36-511:53: void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:950:50-950:67: void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7603/core.c:6:53-6:70: void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7603/dma.c:71:49-71:66: void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7603/dma.c:111:46-111:65: mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/mt7615/mac.c:1474:49-1474:66: void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c:80:48-80:65: mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c:216:55-216:74: static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:107:48-107:67: mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:241:54-241:71: void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c:35:50-35:67: void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7915/dma.c:22:49-22:66: void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7915/pci.c:82:48-82:65: mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7921/dma.c:22:49-22:66: void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7921/pci.c:21:48-21:65: mt7921_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:96:25-96:44: mt76s_get_next_rx_entry(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:112:46-112:65: mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:158:57-158:76: static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:239:42-239:61: mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/sdio.c:266:46-266:65: mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/sdio.c:296:49-296:68: static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/tx.c:385:18-385:37: mt76_txq_stopped(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/tx.c:392:43-392:62: mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/tx.c:630:51-630:70: void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:374:40-374:59: mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
-
drivers/net/wireless/mediatek/mt76/usb.c:409:39-409:58: mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:445:42-445:61: mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:493:25-493:44: mt76u_get_next_rx_entry(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:650:46-650:65: mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:740:43-740:62: mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:900:42-900:61: mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:933:49-933:68: static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/dma.c:460:35-460:60: static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/dma.c:484:7-484:32: struct mt7601u_tx_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/tx.c:21:17-21:20: static u8 q2hwq(u8 q)
-
drivers/net/wireless/st/cw1200/debug.c:70:10-70:31: struct cw1200_queue *q)
-
drivers/net/wireless/ti/wlcore/tx.c:508:33-508:36: struct wl1271_link *lnk, u8 q)
-
drivers/nvdimm/blk.c:231:34-231:40: static void nd_blk_release_queue(void *q)
-
drivers/nvme/host/core.c:602:36-602:58: struct request *nvme_alloc_request(struct request_queue *q,
-
drivers/nvme/host/core.c:614:47-614:69: static struct request *nvme_alloc_request_qid(struct request_queue *q,
-
drivers/nvme/host/core.c:953:36-953:58: static void nvme_execute_rq_polled(struct request_queue *q,
-
drivers/nvme/host/core.c:974:28-974:50: int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-
drivers/nvme/host/core.c:1014:26-1014:48: int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-
drivers/nvme/host/core.c:1143:33-1143:55: static int nvme_submit_user_cmd(struct request_queue *q,
-
drivers/nvme/host/core.c:2077:3-2077:25: struct request_queue *q)
-
drivers/nvme/host/core.c:2765:6-2765:42: const struct nvme_core_quirk_entry *q)
-
drivers/nvme/host/lightnvm.c:647:47-647:69: static struct request *nvme_nvm_alloc_request(struct request_queue *q,
-
drivers/nvme/host/lightnvm.c:750:37-750:59: static int nvme_nvm_submit_user_cmd(struct request_queue *q,
-
drivers/nvme/target/fc.c:2100:22-2100:49: queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
-
drivers/pcmcia/cistpl.c:761:37-761:45: static int parse_strings(u_char *p, u_char *q, int max,
-
drivers/pcmcia/cistpl.c:906:39-906:47: static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr)
-
drivers/pcmcia/cistpl.c:943:40-943:48: static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing)
-
drivers/pcmcia/cistpl.c:978:36-978:44: static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
-
drivers/pcmcia/cistpl.c:1022:37-1022:45: static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
-
drivers/pcmcia/cistpl.c:1063:37-1063:45: static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
-
drivers/platform/chrome/wilco_ec/event.c:119:38-119:61: static inline bool event_queue_empty(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:125:37-125:60: static inline bool event_queue_full(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:131:41-131:64: static struct ec_event *event_queue_pop(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:149:42-149:65: static struct ec_event *event_queue_push(struct ec_event_queue *q,
-
drivers/platform/chrome/wilco_ec/event.c:162:30-162:53: static void event_queue_free(struct ec_event_queue *q)
-
drivers/platform/surface/aggregator/controller.c:619:35-619:60: static void ssam_event_queue_push(struct ssam_event_queue *q,
-
drivers/platform/surface/aggregator/controller.c:634:53-634:78: static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
-
drivers/platform/surface/aggregator/controller.c:651:39-651:64: static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
-
drivers/ptp/ptp_clock.c:36:30-36:60: static inline int queue_free(struct timestamp_event_queue *q)
-
drivers/ptp/ptp_private.h:58:29-58:59: static inline int queue_cnt(struct timestamp_event_queue *q)
-
drivers/scsi/aacraid/comminit.c:259:50-259:69: static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
-
drivers/scsi/aacraid/commsup.c:800:44-800:63: int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
-
drivers/scsi/aacraid/commsup.c:832:46-832:64: void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
-
drivers/scsi/aacraid/dpcsup.c:39:34-39:53: unsigned int aac_response_normal(struct aac_queue * q)
-
drivers/scsi/aacraid/dpcsup.c:158:33-158:51: unsigned int aac_command_normal(struct aac_queue *q)
-
drivers/scsi/be2iscsi/be.h:51:37-51:59: static inline void *queue_head_node(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:56:35-56:57: static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
-
drivers/scsi/be2iscsi/be.h:61:37-61:59: static inline void *queue_tail_node(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:66:35-66:57: static inline void queue_head_inc(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:71:35-71:57: static inline void queue_tail_inc(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be_cmds.c:900:54-900:76: int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
-
drivers/scsi/be2iscsi/be_main.c:2978:26-2978:48: static int be_fill_queue(struct be_queue_info *q,
-
drivers/scsi/be2iscsi/be_main.c:3306:53-3306:75: static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be_main.c:3316:53-3316:75: static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
-
drivers/scsi/bfa/bfa_cs.h:157:20-157:38: bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
-
drivers/scsi/csiostor/csio_scsi.c:1159:48-1159:66: csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
-
drivers/scsi/csiostor/csio_scsi.c:1233:46-1233:64: csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
-
drivers/scsi/csiostor/csio_wr.c:1000:24-1000:39: csio_wr_avail_qcredits(struct csio_q *q)
-
drivers/scsi/csiostor/csio_wr.c:1042:40-1042:55: csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
-
drivers/scsi/csiostor/csio_wr.c:1112:18-1112:33: csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
-
drivers/scsi/csiostor/csio_wr.c:1129:40-1129:55: csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
-
drivers/scsi/hpsa.c:990:53-990:56: static inline u32 next_command(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.c:6936:70-6936:73: static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.h:489:68-489:71: static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.h:527:2-527:29: __attribute__((unused)) u8 q)
-
drivers/scsi/hpsa.h:590:71-590:74: static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
-
drivers/scsi/libiscsi.c:2601:17-2601:36: iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
-
drivers/scsi/libiscsi.c:2641:22-2641:41: void iscsi_pool_free(struct iscsi_pool *q)
-
drivers/scsi/lpfc/lpfc_attr.c:1169:41-1169:59: lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
-
drivers/scsi/lpfc/lpfc_debugfs.c:4174:28-4174:47: lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
-
drivers/scsi/lpfc/lpfc_debugfs.h:332:20-332:39: lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
-
drivers/scsi/lpfc/lpfc_debugfs.h:380:19-380:38: lpfc_debug_dump_q(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:266:18-266:37: lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
-
drivers/scsi/lpfc/lpfc_sli.c:361:22-361:41: lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
-
drivers/scsi/lpfc/lpfc_sli.c:383:18-383:37: lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
-
drivers/scsi/lpfc/lpfc_sli.c:422:22-422:41: lpfc_sli4_mq_release(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:444:18-444:37: lpfc_sli4_eq_get(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:476:23-476:42: lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:495:27-495:46: lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:516:46-516:65: lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:554:50-554:69: lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:685:18-685:37: lpfc_sli4_cq_get(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:736:46-736:65: lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:769:50-769:69: lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli4.h:1166:34-1166:53: static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
-
drivers/scsi/scsi_dh.c:251:22-251:44: int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
-
drivers/scsi/scsi_dh.c:298:24-298:46: int scsi_dh_set_params(struct request_queue *q, const char *params)
-
drivers/scsi/scsi_dh.c:320:20-320:42: int scsi_dh_attach(struct request_queue *q, const char *name)
-
drivers/scsi/scsi_dh.c:359:43-359:65: const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
-
drivers/scsi/scsi_lib.c:334:29-334:51: static void scsi_kick_queue(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:479:28-479:50: static void scsi_run_queue(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:654:11-654:33: struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1262:40-1262:62: static inline int scsi_dev_queue_ready(struct request_queue *q,
-
drivers/scsi/scsi_lib.c:1349:41-1349:63: static inline int scsi_host_queue_ready(struct request_queue *q,
-
drivers/scsi/scsi_lib.c:1409:30-1409:52: static bool scsi_mq_lld_busy(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1608:32-1608:54: static void scsi_mq_put_budget(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1615:32-1615:54: static bool scsi_mq_get_budget(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1801:49-1801:71: void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1944:44-1944:66: struct scsi_device *scsi_device_from_queue(struct request_queue *q)
-
drivers/scsi/scsi_transport_fc.c:4347:15-4347:37: fc_bsg_remove(struct request_queue *q)
-
drivers/scsi/sg.c:847:30-847:52: static int max_sectors_bytes(struct request_queue *q)
-
drivers/scsi/ufs/ufshcd-crypto.c:234:10-234:32: struct request_queue *q)
-
drivers/spi/spi-fsl-qspi.c:278:37-278:54: static inline int needs_swap_endian(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:283:34-283:51: static inline int needs_4x_clock(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:288:37-288:54: static inline int needs_fill_txfifo(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:293:42-293:59: static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:298:42-298:59: static inline int needs_amba_base_offset(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:303:37-303:54: static inline int needs_tdh_setting(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:312:40-312:57: static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
-
drivers/spi/spi-fsl-qspi.c:324:25-324:42: static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
-
drivers/spi/spi-fsl-qspi.c:332:23-332:40: static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
-
drivers/spi/spi-fsl-qspi.c:356:36-356:53: static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
-
drivers/spi/spi-fsl-qspi.c:416:34-416:51: static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:472:37-472:54: static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:492:41-492:58: static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:508:33-508:50: static void fsl_qspi_invalidate(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:526:33-526:50: static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
-
drivers/spi/spi-fsl-qspi.c:552:31-552:48: static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
-
drivers/spi/spi-fsl-qspi.c:559:34-559:51: static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:584:34-584:51: static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:605:27-605:44: static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
-
drivers/spi/spi-fsl-qspi.c:630:37-630:54: static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
-
drivers/spi/spi-fsl-qspi.c:721:35-721:52: static int fsl_qspi_default_setup(struct fsl_qspi *q)
-
drivers/staging/fieldbus/anybuss/host.c:324:28-324:42: ab_task_dequeue_finish_put(struct kfifo *q, struct anybuss_host *cd)
-
drivers/staging/fieldbus/anybuss/host.c:336:36-336:50: ab_task_enqueue(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
-
drivers/staging/fieldbus/anybuss/host.c:353:41-353:55: ab_task_enqueue_wait(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
-
drivers/staging/fieldbus/anybuss/host.c:862:48-862:62: static void process_q(struct anybuss_host *cd, struct kfifo *q)
-
drivers/staging/fieldbus/anybuss/host.c:1226:44-1226:58: static int taskq_alloc(struct device *dev, struct kfifo *q)
-
drivers/staging/media/atomisp/pci/atomisp_fops.c:1075:34-1075:57: int atomisp_videobuf_mmap_mapper(struct videobuf_queue *q,
-
drivers/staging/media/atomisp/pci/atomisp_ioctl.c:902:41-902:64: static void atomisp_videobuf_free_queue(struct videobuf_queue *q)
-
drivers/staging/media/hantro/hantro_v4l2.c:654:32-654:50: static bool hantro_vq_is_coded(struct vb2_queue *q)
-
drivers/staging/media/hantro/hantro_v4l2.c:661:35-661:53: static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/staging/media/hantro/hantro_v4l2.c:702:20-702:38: hantro_return_bufs(struct vb2_queue *q,
-
drivers/staging/media/hantro/hantro_v4l2.c:719:35-719:53: static void hantro_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/imx/imx-media-csc-scaler.c:501:43-501:61: static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
-
drivers/staging/media/imx/imx-media-csc-scaler.c:550:43-550:61: static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/ipu3/ipu3-css.c:181:36-181:59: static bool imgu_css_queue_enabled(struct imgu_css_queue *q)
-
drivers/staging/media/meson/vdec/vdec.c:164:33-164:51: static void process_num_buffers(struct vb2_queue *q,
-
drivers/staging/media/meson/vdec/vdec.c:189:29-189:47: static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
-
drivers/staging/media/meson/vdec/vdec.c:280:33-280:51: static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/staging/media/meson/vdec/vdec.c:395:33-395:51: static void vdec_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/rkvdec/rkvdec.c:514:35-514:53: static int rkvdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/staging/media/rkvdec/rkvdec.c:557:35-557:53: static void rkvdec_stop_streaming(struct vb2_queue *q)
-
drivers/target/target_core_device.c:824:12-824:34: struct request_queue *q)
-
drivers/target/target_core_iblock.c:194:2-194:24: struct request_queue *q)
-
drivers/usb/musb/musb_host.h:46:40-46:58: static inline struct musb_qh *first_qh(struct list_head *q)
-
drivers/usb/serial/digi_acceleport.c:343:2-343:21: wait_queue_head_t *q, long timeout,
-
drivers/visorbus/visorchannel.c:148:62-148:66: static int sig_queue_offset(struct channel_header *chan_hdr, int q)
-
drivers/visorbus/visorchannel.c:158:61-158:65: static int sig_data_offset(struct channel_header *chan_hdr, int q,
-
fs/cifs/dir.c:824:54-824:67: static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q)
-
fs/erofs/zdata.c:1148:10-1148:44: struct z_erofs_decompressqueue *q[],
-
fs/ext2/inode.c:1002:41-1002:49: static inline int all_zeroes(__le32 *p, __le32 *q)
-
fs/ext2/inode.c:1103:67-1103:75: static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
-
fs/ext2/inode.c:1143:64-1143:72: static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
-
fs/ext4/indirect.c:743:41-743:49: static inline int all_zeroes(__le32 *p, __le32 *q)
-
fs/jffs2/compr_rubin.c:164:4-164:18: unsigned long q)
-
fs/minix/itree_common.c:215:42-215:51: static inline int all_zeroes(block_t *p, block_t *q)
-
fs/minix/itree_common.c:263:63-263:72: static inline void free_data(struct inode *inode, block_t *p, block_t *q)
-
fs/minix/itree_common.c:276:60-276:69: static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
-
fs/sysv/itree.c:269:46-269:59: static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q)
-
fs/sysv/itree.c:326:67-326:80: static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q)
-
fs/sysv/itree.c:338:64-338:77: static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth)
-
fs/xfs/xfs_trans_dquot.c:279:2-279:20: struct xfs_dqtrx *q)
-
include/crypto/b128ops.h:64:53-64:65: static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
-
include/crypto/b128ops.h:70:56-70:69: static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
-
include/crypto/b128ops.h:75:56-75:69: static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
-
include/linux/blk-cgroup.h:327:11-327:33: struct request_queue *q,
-
include/linux/blk-cgroup.h:351:9-351:31: struct request_queue *q)
-
include/linux/blk-cgroup.h:363:52-363:74: static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
-
include/linux/blk-mq.h:545:44-545:66: static inline bool blk_should_fake_timeout(struct request_queue *q)
-
include/linux/blkdev.h:690:32-690:54: static inline bool queue_is_mq(struct request_queue *q)
-
include/linux/blkdev.h:696:48-696:70: static inline enum rpm_status queue_rpm_status(struct request_queue *q)
-
include/linux/blkdev.h:708:23-708:45: blk_queue_zoned_model(struct request_queue *q)
-
include/linux/blkdev.h:715:39-715:61: static inline bool blk_queue_is_zoned(struct request_queue *q)
-
include/linux/blkdev.h:726:47-726:69: static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
-
include/linux/blkdev.h:732:47-732:69: static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
-
include/linux/blkdev.h:737:46-737:68: static inline unsigned int blk_queue_zone_no(struct request_queue *q,
-
include/linux/blkdev.h:745:42-745:64: static inline bool blk_queue_zone_is_seq(struct request_queue *q,
-
include/linux/blkdev.h:755:45-755:67: static inline void blk_queue_max_open_zones(struct request_queue *q,
-
include/linux/blkdev.h:761:49-761:77: static inline unsigned int queue_max_open_zones(const struct request_queue *q)
-
include/linux/blkdev.h:766:47-766:69: static inline void blk_queue_max_active_zones(struct request_queue *q,
-
include/linux/blkdev.h:772:51-772:79: static inline unsigned int queue_max_active_zones(const struct request_queue *q)
-
include/linux/blkdev.h:837:44-837:66: static inline unsigned int blk_queue_depth(struct request_queue *q)
-
include/linux/blkdev.h:1062:54-1062:76: static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
-
include/linux/blkdev.h:1082:48-1082:70: static inline unsigned int blk_max_size_offset(struct request_queue *q,
-
include/linux/blkdev.h:1218:33-1218:55: static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-
include/linux/blkdev.h:1390:52-1390:80: static inline unsigned long queue_segment_boundary(const struct request_queue *q)
-
include/linux/blkdev.h:1395:49-1395:77: static inline unsigned long queue_virt_boundary(const struct request_queue *q)
-
include/linux/blkdev.h:1400:46-1400:74: static inline unsigned int queue_max_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1405:49-1405:77: static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1410:49-1410:77: static inline unsigned short queue_max_segments(const struct request_queue *q)
-
include/linux/blkdev.h:1415:57-1415:85: static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
-
include/linux/blkdev.h:1420:51-1420:79: static inline unsigned int queue_max_segment_size(const struct request_queue *q)
-
include/linux/blkdev.h:1425:58-1425:86: static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1433:49-1433:77: static inline unsigned queue_logical_block_size(const struct request_queue *q)
-
include/linux/blkdev.h:1448:54-1448:82: static inline unsigned int queue_physical_block_size(const struct request_queue *q)
-
include/linux/blkdev.h:1458:41-1458:69: static inline unsigned int queue_io_min(const struct request_queue *q)
-
include/linux/blkdev.h:1468:41-1468:69: static inline unsigned int queue_io_opt(const struct request_queue *q)
-
include/linux/blkdev.h:1479:30-1479:58: queue_zone_write_granularity(const struct request_queue *q)
-
include/linux/blkdev.h:1490:42-1490:70: static inline int queue_alignment_offset(const struct request_queue *q)
-
include/linux/blkdev.h:1519:43-1519:71: static inline int queue_discard_alignment(const struct request_queue *q)
-
include/linux/blkdev.h:1627:39-1627:67: static inline int queue_dma_alignment(const struct request_queue *q)
-
include/linux/blkdev.h:1632:34-1632:56: static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
-
include/linux/blkdev.h:1717:40-1717:62: blk_integrity_queue_supports_integrity(struct request_queue *q)
-
include/linux/blkdev.h:1727:53-1727:75: static inline void blk_queue_max_integrity_segments(struct request_queue *q,
-
include/linux/blkdev.h:1734:30-1734:58: queue_max_integrity_segments(const struct request_queue *q)
-
include/linux/blktrace_api.h:64:51-64:73: static inline bool blk_trace_note_message_enabled(struct request_queue *q)
-
include/linux/fortify-string.h:30:41-30:53: __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
-
include/linux/fortify-string.h:41:40-41:52: __FORTIFY_INLINE char *strcat(char *p, const char *q)
-
include/linux/fortify-string.h:80:42-80:54: __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
-
include/linux/fortify-string.h:104:43-104:55: __FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
-
include/linux/fortify-string.h:150:41-150:53: __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
-
include/linux/fortify-string.h:178:40-178:52: __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
-
include/linux/fortify-string.h:194:41-194:53: __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
-
include/linux/fortify-string.h:222:44-222:56: __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
-
include/linux/fortify-string.h:274:40-274:52: __FORTIFY_INLINE char *strcpy(char *p, const char *q)
-
include/linux/mlx4/qp.h:496:29-496:33: static inline u16 folded_qp(u32 q)
-
include/linux/netdevice.h:639:47-639:74: static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
-
include/linux/netdevice.h:648:49-648:70: static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
-
include/linux/netdevice.h:3558:42-3558:63: static inline void netdev_tx_reset_queue(struct netdev_queue *q)
-
include/linux/sunrpc/sched.h:283:38-283:67: static inline const char * rpc_qname(const struct rpc_wait_queue *q)
-
include/linux/sunrpc/sched.h:288:46-288:69: static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
-
include/media/videobuf-core.h:162:40-162:63: static inline void videobuf_queue_lock(struct videobuf_queue *q)
-
include/media/videobuf-core.h:168:42-168:65: static inline void videobuf_queue_unlock(struct videobuf_queue *q)
-
include/media/videobuf2-core.h:649:49-649:67: static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1096:37-1096:55: static inline bool vb2_is_streaming(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1114:41-1114:59: static inline bool vb2_fileio_is_active(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1125:32-1125:50: static inline bool vb2_is_busy(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1134:38-1134:56: static inline void *vb2_get_drv_priv(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1185:47-1185:65: static inline bool vb2_start_streaming_called(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1194:51-1194:69: static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1209:49-1209:67: static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q,
-
include/net/inet_frag.h:132:34-132:58: static inline void inet_frag_put(struct inet_frag_queue *q)
-
include/net/ipv6_frag.h:31:33-31:57: static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
-
include/net/pkt_cls.h:164:19-164:33: __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
-
include/net/pkt_cls.h:190:21-190:35: __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
-
include/net/pkt_sched.h:22:32-22:46: static inline void *qdisc_priv(struct Qdisc *q)
-
include/net/pkt_sched.h:128:30-128:44: static inline void qdisc_run(struct Qdisc *q)
-
include/net/pkt_sched.h:149:37-149:51: static inline struct net *qdisc_net(struct Qdisc *q)
-
include/net/sch_generic.h:147:42-147:62: static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
-
include/net/sch_generic.h:464:34-464:54: static inline int qdisc_qlen_cpu(const struct Qdisc *q)
-
include/net/sch_generic.h:469:30-469:50: static inline int qdisc_qlen(const struct Qdisc *q)
-
include/net/sch_generic.h:474:34-474:54: static inline int qdisc_qlen_sum(const struct Qdisc *q)
-
include/net/sch_generic.h:556:34-556:48: static inline void sch_tree_lock(struct Qdisc *q)
-
include/net/sch_generic.h:564:36-564:50: static inline void sch_tree_unlock(struct Qdisc *q)
-
include/net/sctp/structs.h:1115:35-1115:53: static inline void sctp_outq_cork(struct sctp_outq *q)
-
include/trace/events/block.h:233:1-233:1: TRACE_EVENT(block_bio_complete,
-
include/trace/events/block.h:356:1-356:1: TRACE_EVENT(block_plug,
-
include/trace/events/block.h:401:1-401:1: DEFINE_EVENT(block_unplug, block_unplug,
-
include/trace/events/kyber.h:14:1-14:1: TRACE_EVENT(kyber_latency,
-
include/trace/events/kyber.h:48:1-48:1: TRACE_EVENT(kyber_adjust,
-
include/trace/events/kyber.h:72:1-72:1: TRACE_EVENT(kyber_throttled,
-
include/trace/events/qdisc.h:49:1-49:1: TRACE_EVENT(qdisc_reset,
-
include/trace/events/qdisc.h:74:1-74:1: TRACE_EVENT(qdisc_destroy,
-
include/trace/events/sunrpc.h:442:1-442:1: DEFINE_RPC_QUEUED_EVENT(sleep);
-
include/trace/events/sunrpc.h:443:1-443:1: DEFINE_RPC_QUEUED_EVENT(wakeup);
-
include/trace/events/v4l2.h:181:1-181:1: DECLARE_EVENT_CLASS(vb2_v4l2_event_class,
-
include/trace/events/v4l2.h:245:1-245:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_done,
-
include/trace/events/v4l2.h:250:1-250:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_queue,
-
include/trace/events/v4l2.h:255:1-255:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_dqbuf,
-
include/trace/events/v4l2.h:260:1-260:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_qbuf,
-
include/trace/events/vb2.h:11:1-11:1: DECLARE_EVENT_CLASS(vb2_event_class,
-
include/trace/events/vb2.h:46:1-46:1: DEFINE_EVENT(vb2_event_class, vb2_buf_done,
-
include/trace/events/vb2.h:51:1-51:1: DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
-
include/trace/events/vb2.h:56:1-56:1: DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
-
include/trace/events/vb2.h:61:1-61:1: DEFINE_EVENT(vb2_event_class, vb2_qbuf,
-
ipc/sem.c:643:61-643:79: static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:716:56-716:74: static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:784:46-784:64: static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
-
ipc/sem.c:795:49-795:67: static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:812:56-812:74: static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:1068:57-1068:75: static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
-
kernel/auditfilter.c:1069:39-1069:60: static void audit_list_rules(int seq, struct sk_buff_head *q)
-
kernel/cgroup/cpuset.c:448:53-448:74: static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
-
kernel/futex.c:1447:29-1447:45: static void __unqueue_futex(struct futex_q *q)
-
kernel/futex.c:1466:57-1466:73: static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
-
kernel/futex.c:1778:20-1778:36: void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
-
kernel/futex.c:1811:28-1811:44: void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
-
kernel/futex.c:2193:52-2193:68: static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
-
kernel/futex.c:2224:31-2224:47: static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
-
kernel/futex.c:2255:29-2255:45: static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
-
kernel/futex.c:2273:23-2273:39: static int unqueue_me(struct futex_q *q)
-
kernel/futex.c:2321:27-2321:43: static void unqueue_me_pi(struct futex_q *q)
-
kernel/futex.c:2333:54-2333:70: static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-
kernel/futex.c:2505:52-2505:68: static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-
kernel/futex.c:2536:43-2536:59: static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
-
kernel/futex.c:2579:63-2579:79: static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
-
kernel/futex.c:2629:7-2629:23: struct futex_q *q, struct futex_hash_bucket **hb)
-
kernel/futex.c:3097:8-3097:24: struct futex_q *q, union futex_key *key2,
-
kernel/sched/swait.c:7:30-7:55: void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
-
kernel/sched/swait.c:22:22-22:47: void swake_up_locked(struct swait_queue_head *q)
-
kernel/sched/swait.c:42:26-42:51: void swake_up_all_locked(struct swait_queue_head *q)
-
kernel/sched/swait.c:48:19-48:44: void swake_up_one(struct swait_queue_head *q)
-
kernel/sched/swait.c:62:19-62:44: void swake_up_all(struct swait_queue_head *q)
-
kernel/sched/swait.c:85:25-85:50: void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/sched/swait.c:92:33-92:58: void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
-
kernel/sched/swait.c:103:29-103:54: long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
-
kernel/sched/swait.c:126:21-126:46: void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/sched/swait.c:133:19-133:44: void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/signal.c:452:29-452:46: static void __sigqueue_free(struct sigqueue *q)
-
kernel/signal.c:1818:20-1818:37: void sigqueue_free(struct sigqueue *q)
-
kernel/signal.c:1843:19-1843:36: int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
-
kernel/trace/blktrace.c:345:31-345:53: static int __blk_trace_remove(struct request_queue *q)
-
kernel/trace/blktrace.c:360:22-360:44: int blk_trace_remove(struct request_queue *q)
-
kernel/trace/blktrace.c:472:31-472:53: static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:579:30-579:52: static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:600:21-600:43: int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:615:35-615:57: static int compat_blk_trace_setup(struct request_queue *q, char *name,
-
kernel/trace/blktrace.c:648:34-648:56: static int __blk_trace_startstop(struct request_queue *q, int start)
-
kernel/trace/blktrace.c:690:25-690:47: int blk_trace_startstop(struct request_queue *q, int start)
-
kernel/trace/blktrace.c:761:25-761:47: void blk_trace_shutdown(struct request_queue *q)
-
kernel/trace/blktrace.c:774:35-774:57: static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
-
kernel/trace/blktrace.c:883:31-883:53: static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-
kernel/trace/blktrace.c:907:12-907:34: struct request_queue *q, struct bio *bio)
-
kernel/trace/blktrace.c:935:46-935:68: static void blk_add_trace_plug(void *ignore, struct request_queue *q)
-
kernel/trace/blktrace.c:946:48-946:70: static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
-
kernel/trace/blktrace.c:1599:35-1599:57: static int blk_trace_remove_queue(struct request_queue *q)
-
kernel/trace/blktrace.c:1617:34-1617:56: static int blk_trace_setup_queue(struct request_queue *q,
-
lib/bch.c:816:29-816:45: const struct gf_poly *b, struct gf_poly *q)
-
lib/crypto/curve25519-hacl64.c:547:12-547:17: u64 *q, u8 byt)
-
lib/crypto/curve25519-hacl64.c:559:24-559:29: u64 *nqpq2, u64 *q, u8 byt)
-
lib/crypto/curve25519-hacl64.c:569:7-569:12: u64 *q, u8 byt, u32 i)
-
lib/crypto/curve25519-hacl64.c:580:22-580:27: u64 *nqpq2, u64 *q,
-
lib/crypto/curve25519-hacl64.c:590:47-590:52: static void ladder_cmult(u64 *result, u8 *n1, u64 *q)
-
mm/filemap.c:1193:43-1193:62: static inline int wait_on_page_bit_common(wait_queue_head_t *q,
-
mm/kasan/quarantine.c:42:25-42:44: static bool qlist_empty(struct qlist_head *q)
-
mm/kasan/quarantine.c:47:24-47:43: static void qlist_init(struct qlist_head *q)
-
mm/kasan/quarantine.c:53:23-53:42: static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
-
mm/kasan/quarantine.c:152:28-152:47: static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
-
mm/swapfile.c:1199:6-1199:31: struct swap_info_struct *q)
-
net/core/dev.c:3051:32-3051:46: static void __netif_reschedule(struct Qdisc *q)
-
net/core/dev.c:3065:23-3065:37: void __netif_schedule(struct Qdisc *q)
-
net/core/dev.c:3782:55-3782:69: static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
-
net/core/gen_stats.c:287:8-287:48: const struct gnet_stats_queue __percpu *q)
-
net/core/gen_stats.c:304:9-304:40: const struct gnet_stats_queue *q,
-
net/core/gen_stats.c:338:9-338:34: struct gnet_stats_queue *q, __u32 qlen)
-
net/decnet/af_decnet.c:1643:43-1643:64: static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
-
net/decnet/dn_nsp_out.c:369:67-369:88: int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum)
-
net/ieee802154/6lowpan/reassembly.c:36:30-36:54: static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
-
net/ipv4/inet_fragment.c:54:36-54:60: static void fragrun_append_to_last(struct inet_frag_queue *q,
-
net/ipv4/inet_fragment.c:65:28-65:52: static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
-
net/ipv4/inet_fragment.c:285:24-285:48: void inet_frag_destroy(struct inet_frag_queue *q)
-
net/ipv4/inet_fragment.c:375:28-375:52: int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
-
net/ipv4/inet_fragment.c:439:31-439:55: void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
-
net/ipv4/inet_fragment.c:508:29-508:53: void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
-
net/ipv4/inet_fragment.c:576:37-576:61: struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
-
net/ipv4/ip_fragment.c:82:27-82:51: static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
-
net/ipv4/ip_fragment.c:96:27-96:51: static void ip4_frag_free(struct inet_frag_queue *q)
-
net/netfilter/nfnetlink_queue.c:102:17-102:40: instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
-
net/netfilter/nfnetlink_queue.c:116:17-116:40: instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
-
net/netfilter/nfnetlink_queue.c:183:18-183:41: instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
-
net/netfilter/nfnetlink_queue.c:1018:25-1018:48: verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
-
net/rds/message.c:75:33-75:61: void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *q)
-
net/rds/rds.h:382:49-382:77: static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
-
net/rose/rose_in.c:101:101-101:105: static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
-
net/rose/rose_subr.c:201:56-201:61: int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
-
net/sched/cls_api.c:701:60-701:74: static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:741:63-741:77: static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:859:60-859:74: static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
-
net/sched/cls_api.c:1024:46-1024:61: static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
-
net/sched/cls_api.c:1099:32-1099:46: static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
-
net/sched/cls_api.c:1119:60-1119:74: static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
-
net/sched/cls_api.c:1156:54-1156:68: static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1193:58-1193:73: static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
-
net/sched/cls_api.c:1227:31-1227:45: static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
-
net/sched/cls_api.c:1249:11-1249:25: struct Qdisc *q,
-
net/sched/cls_api.c:1270:11-1270:25: struct Qdisc *q,
-
net/sched/cls_api.c:1285:5-1285:19: struct Qdisc *q,
-
net/sched/cls_api.c:1300:51-1300:65: int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-
net/sched/cls_api.c:1359:46-1359:60: struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
-
net/sched/cls_api.c:1375:49-1375:63: void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1798:5-1798:19: struct Qdisc *q, u32 parent, void *fh,
-
net/sched/cls_api.c:1852:31-1852:45: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1884:35-1884:49: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1924:31-1924:45: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:2472:53-2472:67: static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
-
net/sched/cls_basic.c:266:71-266:77: static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_bpf.c:635:11-635:17: void *q, unsigned long base)
-
net/sched/cls_flower.c:3171:68-3171:74: static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_fw.c:422:68-422:74: static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_matchall.c:400:70-400:76: static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_route.c:644:72-644:78: static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_rsvp.h:739:70-739:76: static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_tcindex.c:697:11-697:17: void *q, unsigned long base)
-
net/sched/cls_u32.c:1248:69-1248:75: static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/sch_api.c:278:21-278:35: void qdisc_hash_add(struct Qdisc *q, bool invisible)
-
net/sched/sch_api.c:289:21-289:35: void qdisc_hash_del(struct Qdisc *q)
-
net/sched/sch_api.c:878:47-878:61: static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
-
net/sched/sch_api.c:957:34-957:48: static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
-
net/sched/sch_api.c:1372:23-1372:37: static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
-
net/sched/sch_api.c:1388:15-1388:29: check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
-
net/sched/sch_api.c:1796:48-1796:62: static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
-
net/sched/sch_api.c:1843:25-1843:39: struct nlmsghdr *n, struct Qdisc *q,
-
net/sched/sch_api.c:1869:9-1869:23: struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:1932:33-1932:47: static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:1963:28-1963:42: static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
-
net/sched/sch_api.c:2130:29-2130:43: static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:2140:33-2140:47: static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
-
net/sched/sch_cake.c:646:22-646:44: static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
-
net/sched/sch_cake.c:1147:40-1147:64: static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
-
net/sched/sch_cake.c:1310:31-1310:55: static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
-
net/sched/sch_cake.c:1345:26-1345:50: static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
-
net/sched/sch_cake.c:1392:28-1392:52: static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
-
net/sched/sch_cake.c:1404:34-1404:64: static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1411:26-1411:50: static void cake_heapify(struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1448:29-1448:53: static void cake_heapify_up(struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1464:32-1464:56: static int cake_advance_shaper(struct cake_sched_data *q,
-
net/sched/sch_cake.c:2960:25-2960:39: static void cake_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_cbq.c:166:18-166:41: cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
-
net/sched/sch_cbq.c:342:19-342:42: cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
-
net/sched/sch_cbq.c:444:40-444:63: static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
-
net/sched/sch_cbq.c:529:21-529:44: cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
-
net/sched/sch_cbq.c:551:12-551:35: cbq_update(struct cbq_sched_data *q)
-
net/sched/sch_cbq.c:884:34-884:57: static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
-
net/sched/sch_cbq.c:1081:24-1081:47: static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
-
net/sched/sch_cbq.c:1088:25-1088:48: static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
-
net/sched/sch_cbs.c:251:5-251:28: struct cbs_sched_data *q)
-
net/sched/sch_cbs.c:276:55-276:78: static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
-
net/sched/sch_cbs.c:309:55-309:78: static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
-
net/sched/sch_choke.c:76:31-76:62: static unsigned int choke_len(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:82:20-82:51: static int use_ecn(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:88:25-88:56: static int use_harddrop(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:94:34-94:59: static void choke_zap_head_holes(struct choke_sched_data *q)
-
net/sched/sch_choke.c:104:34-104:59: static void choke_zap_tail_holes(struct choke_sched_data *q)
-
net/sched/sch_choke.c:180:42-180:73: static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
-
net/sched/sch_choke.c:200:32-200:63: static bool choke_match_random(const struct choke_sched_data *q,
-
net/sched/sch_etf.c:297:5-297:28: struct etf_sched_data *q)
-
net/sched/sch_etf.c:319:55-319:78: static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q,
-
net/sched/sch_ets.c:190:33-190:51: static bool ets_class_is_strict(struct ets_sched *q, const struct ets_class *cl)
-
net/sched/sch_fifo.c:227:20-227:34: int fifo_set_limit(struct Qdisc *q, unsigned int limit)
-
net/sched/sch_fq.c:172:37-172:59: static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
-
net/sched/sch_fq.c:179:35-179:57: static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
-
net/sched/sch_fq.c:217:19-217:41: static void fq_gc(struct fq_sched_data *q,
-
net/sched/sch_fq.c:261:57-261:79: static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
-
net/sched/sch_fq.c:437:9-437:37: const struct fq_sched_data *q)
-
net/sched/sch_fq.c:499:32-499:54: static void fq_check_throttled(struct fq_sched_data *q, u64 now)
-
net/sched/sch_fq.c:697:23-697:45: static void fq_rehash(struct fq_sched_data *q,
-
net/sched/sch_fq_codel.c:70:35-70:69: static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
-
net/sched/sch_fq_codel.c:593:29-593:43: static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_fq_pie.c:73:33-73:65: static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
-
net/sched/sch_generic.c:51:53-51:67: static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
-
net/sched/sch_generic.c:86:57-86:71: static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
-
net/sched/sch_generic.c:96:46-96:60: static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
-
net/sched/sch_generic.c:120:57-120:71: static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
-
net/sched/sch_generic.c:152:34-152:48: static void try_bulk_dequeue_skb(struct Qdisc *q,
-
net/sched/sch_generic.c:176:39-176:53: static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
-
net/sched/sch_generic.c:202:36-202:50: static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
-
net/sched/sch_generic.c:285:43-285:57: bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
-
net/sched/sch_generic.c:357:34-357:48: static inline bool qdisc_restart(struct Qdisc *q, int *packets)
-
net/sched/sch_generic.c:379:18-379:32: void __qdisc_run(struct Qdisc *q)
-
net/sched/sch_gred.c:113:6-113:30: struct gred_sched_data *q,
-
net/sched/sch_gred.c:128:11-128:35: struct gred_sched_data *q)
-
net/sched/sch_gred.c:135:12-135:36: struct gred_sched_data *q)
-
net/sched/sch_gred.c:141:25-141:49: static int gred_use_ecn(struct gred_sched_data *q)
-
net/sched/sch_gred.c:146:30-146:54: static int gred_use_harddrop(struct gred_sched_data *q)
-
net/sched/sch_gred.c:396:36-396:60: static inline void gred_destroy_vq(struct gred_sched_data *q)
-
net/sched/sch_hfsc.c:219:18-219:37: eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
-
net/sched/sch_hfsc.c:236:18-236:37: eltree_get_minel(struct hfsc_sched *q)
-
net/sched/sch_hhf.c:182:12-182:35: struct hhf_sched_data *q)
-
net/sched/sch_hhf.c:213:8-213:31: struct hhf_sched_data *q)
-
net/sched/sch_htb.c:306:34-306:52: static void htb_add_to_wait_tree(struct htb_sched *q,
-
net/sched/sch_htb.c:349:41-349:59: static inline void htb_add_class_to_row(struct htb_sched *q,
-
net/sched/sch_htb.c:378:46-378:64: static inline void htb_remove_class_from_row(struct htb_sched *q,
-
net/sched/sch_htb.c:406:32-406:50: static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:441:34-441:52: static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:532:23-532:41: htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
-
net/sched/sch_htb.c:561:33-561:51: static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:577:35-577:53: static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:661:30-661:48: static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
-
net/sched/sch_htb.c:706:26-706:44: static s64 htb_do_events(struct htb_sched *q, const int level,
-
net/sched/sch_htb.c:829:41-829:59: static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
-
net/sched/sch_htb.c:1002:41-1002:55: static void htb_set_lockdep_class_child(struct Qdisc *q)
-
net/sched/sch_htb.c:1269:41-1269:59: static void htb_offload_aggregate_stats(struct htb_sched *q,
-
net/sched/sch_multiq.c:320:27-320:41: static void multiq_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_netem.c:200:25-200:50: static bool loss_4state(struct netem_sched_data *q)
-
net/sched/sch_netem.c:265:27-265:52: static bool loss_gilb_ell(struct netem_sched_data *q)
-
net/sched/sch_netem.c:286:24-286:49: static bool loss_event(struct netem_sched_data *q)
-
net/sched/sch_netem.c:345:36-345:67: static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
-
net/sched/sch_netem.c:629:27-629:52: static void get_slot_next(struct netem_sched_data *q, u64 now)
-
net/sched/sch_netem.c:648:35-648:60: static struct sk_buff *netem_peek(struct netem_sched_data *q)
-
net/sched/sch_netem.c:665:30-665:55: static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
-
net/sched/sch_netem.c:806:22-806:47: static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:828:29-828:54: static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:837:25-837:50: static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:845:25-845:50: static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:853:22-853:47: static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:867:25-867:50: static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:1088:28-1088:59: static int dump_loss_model(const struct netem_sched_data *q,
-
net/sched/sch_prio.c:343:25-343:39: static void prio_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_qfq.c:253:26-253:44: static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:263:43-263:61: static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
-
net/sched/sch_qfq.c:277:28-277:46: static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:311:28-311:46: static void qfq_add_to_agg(struct qfq_sched *q,
-
net/sched/sch_qfq.c:328:29-328:47: static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:341:34-341:52: static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:352:29-352:47: static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:365:35-365:53: static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:730:41-730:59: static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
-
net/sched/sch_qfq.c:747:27-747:45: static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
-
net/sched/sch_qfq.c:770:36-770:54: static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
-
net/sched/sch_qfq.c:777:32-777:50: static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
-
net/sched/sch_qfq.c:803:31-803:49: static void qfq_make_eligible(struct qfq_sched *q)
-
net/sched/sch_qfq.c:958:33-958:51: static void qfq_update_eligible(struct qfq_sched *q)
-
net/sched/sch_qfq.c:1032:30-1032:48: static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:1065:19-1065:37: qfq_update_agg_ts(struct qfq_sched *q,
-
net/sched/sch_qfq.c:1155:50-1155:68: static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
-
net/sched/sch_qfq.c:1271:30-1271:48: static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:1318:30-1318:48: static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:1332:29-1332:47: static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
-
net/sched/sch_qfq.c:1355:32-1355:50: static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_red.c:55:31-55:54: static inline int red_use_ecn(struct red_sched_data *q)
-
net/sched/sch_red.c:60:36-60:59: static inline int red_use_harddrop(struct red_sched_data *q)
-
net/sched/sch_red.c:65:27-65:50: static int red_use_nodrop(struct red_sched_data *q)
-
net/sched/sch_sfb.c:123:55-123:78: static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:138:55-138:78: static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:152:11-152:34: struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:167:55-167:78: static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:180:50-180:73: static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:185:50-185:73: static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:190:34-190:57: static void sfb_zero_all_buckets(struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:198:56-198:85: static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:218:45-218:68: static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:224:27-224:50: static void sfb_swap_slot(struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:234:49-234:72: static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
-
net/sched/sch_sfq.c:150:45-150:68: static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
-
net/sched/sch_sfq.c:157:30-157:59: static unsigned int sfq_hash(const struct sfq_sched_data *q,
-
net/sched/sch_sfq.c:203:29-203:52: static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:228:28-228:51: static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:241:28-241:51: static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:329:26-329:55: static int sfq_prob_mark(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:335:26-335:55: static int sfq_hard_mark(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:340:25-340:54: static int sfq_headdrop(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:841:24-841:38: static void sfq_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_skbprio.c:40:31-40:64: static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
-
net/sched/sch_skbprio.c:53:30-53:63: static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
-
net/sched/sch_taprio.c:98:32-98:53: static ktime_t taprio_get_time(struct taprio_sched *q)
-
net/sched/sch_taprio.c:128:30-128:51: static void switch_schedules(struct taprio_sched *q,
-
net/sched/sch_taprio.c:176:31-176:52: static int length_to_duration(struct taprio_sched *q, int len)
-
net/sched/sch_taprio.c:294:31-294:52: static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
-
net/sched/sch_taprio.c:516:31-516:52: static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
-
net/sched/sch_taprio.c:781:29-781:50: static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
-
net/sched/sch_taprio.c:813:30-813:51: static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
-
net/sched/sch_taprio.c:832:29-832:50: static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
-
net/sched/sch_taprio.c:872:34-872:55: static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
-
net/sched/sch_taprio.c:1010:36-1010:57: static void setup_first_close_time(struct taprio_sched *q,
-
net/sched/sch_taprio.c:1052:11-1052:32: struct taprio_sched *q)
-
net/sched/sch_taprio.c:1104:26-1104:47: static void setup_txtime(struct taprio_sched *q,
-
net/sched/sch_taprio.c:1170:43-1170:64: static void taprio_offload_config_changed(struct taprio_sched *q)
-
net/sched/sch_taprio.c:1230:6-1230:27: struct taprio_sched *q,
-
net/sched/sch_taprio.c:1267:7-1267:28: struct taprio_sched *q,
-
net/sched/sch_tbf.c:249:30-249:59: static bool tbf_peak_present(const struct tbf_sched_data *q)
-
net/sctp/inqueue.c:64:20-64:37: void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
-
net/sctp/inqueue.c:234:30-234:47: void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
-
net/sctp/outqueue.c:59:40-59:58: static inline void sctp_outq_head_data(struct sctp_outq *q,
-
net/sctp/outqueue.c:74:57-74:75: static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
-
net/sctp/outqueue.c:80:40-80:58: static inline void sctp_outq_tail_data(struct sctp_outq *q,
-
net/sctp/outqueue.c:191:52-191:70: void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
-
net/sctp/outqueue.c:206:34-206:52: static void __sctp_outq_teardown(struct sctp_outq *q)
-
net/sctp/outqueue.c:267:25-267:43: void sctp_outq_teardown(struct sctp_outq *q)
-
net/sctp/outqueue.c:274:21-274:39: void sctp_outq_free(struct sctp_outq *q)
-
net/sctp/outqueue.c:281:21-281:39: void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
-
net/sctp/outqueue.c:447:27-447:45: void sctp_retransmit_mark(struct sctp_outq *q,
-
net/sctp/outqueue.c:534:22-534:40: void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
-
net/sctp/outqueue.c:592:34-592:52: static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
-
net/sctp/outqueue.c:753:23-753:41: void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
-
net/sctp/outqueue.c:1166:29-1166:47: static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
-
net/sctp/outqueue.c:1222:20-1222:38: int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
-
net/sctp/outqueue.c:1386:24-1386:48: int sctp_outq_is_empty(const struct sctp_outq *q)
-
net/sctp/outqueue.c:1406:36-1406:54: static void sctp_check_transmitted(struct sctp_outq *q,
-
net/sctp/outqueue.c:1680:31-1680:49: static void sctp_mark_missing(struct sctp_outq *q,
-
net/sctp/outqueue.c:1793:27-1793:45: void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
-
net/sctp/stream_interleave.c:1106:33-1106:51: static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
-
net/sctp/stream_sched.c:53:37-53:55: static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched.c:58:51-58:69: static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched.c:81:42-81:60: static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
-
net/sctp/stream_sched.c:228:30-228:48: void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
-
net/sctp/stream_sched.c:250:32-250:50: void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
-
net/sctp/stream_sched_prio.c:233:37-233:55: static void sctp_sched_prio_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched_prio.c:246:51-246:69: static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched_prio.c:274:42-274:60: static void sctp_sched_prio_dequeue_done(struct sctp_outq *q,
-
net/sctp/stream_sched_rr.c:98:35-98:53: static void sctp_sched_rr_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched_rr.c:111:49-111:67: static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched_rr.c:134:40-134:58: static void sctp_sched_rr_dequeue_done(struct sctp_outq *q,
-
net/sunrpc/sched.c:132:25-132:43: __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
-
net/sunrpc/sched.c:368:40-368:63: static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:377:37-377:60: static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:386:45-386:68: static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:416:27-416:50: void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:433:19-433:42: void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:451:36-451:59: void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:467:28-467:51: void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:1145:3-1145:28: struct workqueue_struct *q)
-
net/sunrpc/sched.c:1154:52-1154:77: static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
-
net/unix/af_unix.c:358:39-358:59: static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
-
net/x25/x25_in.c:208:100-208:104: static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
-
net/x25/x25_subr.c:260:72-260:77: int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
-
net/xdp/xsk_queue.c:13:34-13:52: static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
-
net/xdp/xsk_queue.c:50:19-50:37: void xskq_destroy(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:114:50-114:68: static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
-
net/xdp/xsk_queue.h:173:44-173:62: static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:184:40-184:58: static inline bool xskq_cons_read_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:202:45-202:63: static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:228:40-228:58: static inline void __xskq_cons_release(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:234:37-234:55: static inline void __xskq_cons_peek(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:241:42-241:60: static inline void xskq_cons_get_entries(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:247:40-247:58: static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
-
net/xdp/xsk_queue.h:260:42-260:60: static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
-
net/xdp/xsk_queue.h:265:50-265:68: static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
-
net/xdp/xsk_queue.h:272:40-272:58: static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:281:45-281:63: static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xdp_desc *descs,
-
net/xdp/xsk_queue.h:293:38-293:56: static inline void xskq_cons_release(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:298:40-298:58: static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
-
net/xdp/xsk_queue.h:303:38-303:56: static inline bool xskq_cons_is_full(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:310:45-310:63: static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:318:37-318:55: static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
-
net/xdp/xsk_queue.h:332:38-332:56: static inline bool xskq_prod_is_full(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:337:37-337:55: static inline void xskq_prod_cancel(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:342:37-342:55: static inline int xskq_prod_reserve(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:352:42-352:60: static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
-
net/xdp/xsk_queue.h:364:48-364:66: static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
-
net/xdp/xsk_queue.h:381:42-381:60: static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:398:39-398:57: static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
-
net/xdp/xsk_queue.h:405:37-405:55: static inline void xskq_prod_submit(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:410:42-410:60: static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
-
net/xdp/xsk_queue.h:420:39-420:57: static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
-
net/xdp/xsk_queue.h:425:39-425:57: static inline bool xskq_prod_is_empty(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:433:41-433:59: static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:438:45-438:63: static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
-
sound/core/seq/oss/seq_oss_event.c:42:55-42:68: snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:95:39-95:52: old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:121:44-121:57: extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:175:45-175:58: chn_voice_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:196:46-196:59: chn_common_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:223:42-223:55: timing_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:258:41-258:54: local_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_readq.c:62:26-62:48: snd_seq_oss_readq_delete(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:74:25-74:47: snd_seq_oss_readq_clear(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:89:24-89:46: snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, int len)
-
sound/core/seq/oss/seq_oss_readq.c:123:29-123:51: int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
-
sound/core/seq/oss/seq_oss_readq.c:141:29-141:51: snd_seq_oss_readq_put_event(struct seq_oss_readq *q, union evrec *ev)
-
sound/core/seq/oss/seq_oss_readq.c:169:24-169:46: snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec)
-
sound/core/seq/oss/seq_oss_readq.c:181:24-181:46: snd_seq_oss_readq_wait(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:193:24-193:46: snd_seq_oss_readq_free(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:206:24-206:46: snd_seq_oss_readq_poll(struct seq_oss_readq *q, struct file *file, poll_table *wait)
-
sound/core/seq/oss/seq_oss_readq.c:216:33-216:55: snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *q, unsigned long curt, int seq_mode)
-
sound/core/seq/oss/seq_oss_readq.c:244:29-244:51: snd_seq_oss_readq_info_read(struct seq_oss_readq *q, struct snd_info_buffer *buf)
-
sound/core/seq/oss/seq_oss_writeq.c:53:27-53:50: snd_seq_oss_writeq_delete(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:66:26-66:49: snd_seq_oss_writeq_clear(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:82:25-82:48: snd_seq_oss_writeq_sync(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:122:27-122:50: snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time)
-
sound/core/seq/oss/seq_oss_writeq.c:138:34-138:57: snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:151:31-151:54: snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int val)
-
sound/core/seq/seq_queue.c:50:27-50:49: static int queue_list_add(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:129:26-129:48: static void queue_delete(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:237:26-237:48: void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
-
sound/core/seq/seq_queue.c:343:32-343:54: static inline int check_access(struct snd_seq_queue *q, int client)
-
sound/core/seq/seq_queue.c:351:30-351:52: static int queue_access_lock(struct snd_seq_queue *q, int client)
-
sound/core/seq/seq_queue.c:365:40-365:62: static inline void queue_access_unlock(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:613:35-613:57: static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
-
sound/core/seq/seq_queue.c:636:41-636:63: static void snd_seq_queue_process_event(struct snd_seq_queue *q,
-
sound/core/seq/seq_timer.c:258:24-258:46: int snd_seq_timer_open(struct snd_seq_queue *q)
-
sound/core/seq/seq_timer.c:305:25-305:47: int snd_seq_timer_close(struct snd_seq_queue *q)
-
sound/pci/hda/hda_codec.c:1170:7-1170:29: struct hda_cvt_setup *q)
variable
Defined...
-
arch/x86/crypto/curve25519-x86_64.c:35:2-35:20: u64 q = x_xor_y | x_sub_y_xor_y;
-
arch/x86/include/asm/div64.h:83:2-83:6: u64 q;
-
arch/x86/kernel/cpu/common.c:649:2-649:12: char *p, *q, *s;
-
arch/x86/kernel/pci-iommu_table.c:28:2-28:32: struct iommu_table_entry *p, *q, tmp;
-
arch/x86/kvm/svm/sev.c:1289:2-1289:26: struct list_head *pos, *q;
-
arch/x86/kvm/vmx/nested.c:1572:2-1572:9: int i, q;
-
arch/x86/xen/platform-pci-unplug.c:179:2-179:12: char *p, *q;
-
block/bfq-iosched.c:2263:2-2263:34: struct request_queue *q = hctx->queue;
-
block/bfq-iosched.c:5608:2-5608:34: struct request_queue *q = hctx->queue;
-
block/bfq-iosched.c:6215:2-6215:32: struct request_queue *q = rq->q;
-
block/bio-integrity.c:207:2-207:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
block/bio.c:829:2-829:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
block/bio.c:1026:2-1026:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
block/blk-cgroup.c:611:2-611:24: struct request_queue *q;
-
block/blk-cgroup.c:1024:3-1024:35: struct request_queue *q = blkg->q;
-
block/blk-cgroup.c:1707:2-1707:37: struct request_queue *q = current->throttle_queue;
-
block/blk-core.c:480:2-480:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
block/blk-core.c:502:2-503:3: struct request_queue *q =
-
block/blk-core.c:510:2-510:28: struct request_queue *q = from_timer(q, t, timeout);
-
block/blk-core.c:521:2-521:24: struct request_queue *q;
-
block/blk-core.c:798:2-798:43: struct request_queue *q = bdev->bd_disk->queue;
-
block/blk-core.c:957:3-957:52: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
block/blk-flush.c:167:2-167:32: struct request_queue *q = rq->q;
-
block/blk-flush.c:216:2-216:38: struct request_queue *q = flush_rq->q;
-
block/blk-flush.c:339:2-339:32: struct request_queue *q = rq->q;
-
block/blk-flush.c:372:2-372:32: struct request_queue *q = rq->q;
-
block/blk-ioc.c:63:2-63:33: struct request_queue *q = icq->q;
-
block/blk-ioc.c:104:3-104:34: struct request_queue *q = icq->q;
-
block/blk-lib.c:29:2-29:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-lib.c:169:2-169:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-lib.c:252:2-252:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-lib.c:306:2-306:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-merge.c:307:2-307:54: struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
-
block/blk-mq-cpumap.c:39:2-39:39: unsigned int cpu, first_sibling, q = 0;
-
block/blk-mq-debugfs-zoned.c:11:2-11:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:29:2-29:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:47:2-47:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:55:2-55:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:63:2-63:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:97:2-97:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:138:2-138:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:149:2-149:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:185:2-185:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:197:2-197:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:468:2-468:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:485:2-485:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:502:2-502:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:519:2-519:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:950:2-950:34: struct request_queue *q = rqos->q;
-
block/blk-mq-sched.c:23:2-23:32: struct request_queue *q = rq->q;
-
block/blk-mq-sched.c:119:2-119:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:226:2-226:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:276:2-276:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:329:2-329:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:414:2-414:32: struct request_queue *q = rq->q;
-
block/blk-mq-sched.c:469:2-469:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.h:62:2-62:32: struct request_queue *q = rq->q;
-
block/blk-mq-sysfs.c:65:2-65:24: struct request_queue *q;
-
block/blk-mq-sysfs.c:86:2-86:24: struct request_queue *q;
-
block/blk-mq-sysfs.c:107:2-107:24: struct request_queue *q;
-
block/blk-mq-sysfs.c:129:2-129:24: struct request_queue *q;
-
block/blk-mq-sysfs.c:243:2-243:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-tag.c:27:3-27:35: struct request_queue *q = hctx->queue;
-
block/blk-mq-tag.c:59:2-59:34: struct request_queue *q = hctx->queue;
-
block/blk-mq.c:350:2-350:34: struct request_queue *q = data->q;
-
block/blk-mq.c:490:2-490:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:508:2-508:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:713:2-713:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:738:2-738:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:765:2-766:3: struct request_queue *q =
-
block/blk-mq.c:803:2-803:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:954:2-955:3: struct request_queue *q =
-
block/blk-mq.c:1325:2-1325:34: struct request_queue *q = hctx->queue;
-
block/blk-mq.c:1970:2-1970:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:2010:2-2010:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:2158:2-2158:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
block/blk-mq.c:2920:2-2920:24: struct request_queue *q;
-
block/blk-mq.c:3028:2-3028:35: struct request_queue *uninit_q, *q;
-
block/blk-mq.c:3062:2-3062:24: struct request_queue *q;
-
block/blk-mq.c:3640:2-3640:24: struct request_queue *q;
-
block/blk-mq.c:3736:2-3736:32: struct request_queue *q = cb->data;
-
block/blk-mq.h:304:3-304:35: struct request_queue *q = hctx->queue;
-
block/blk-settings.c:887:2-887:34: struct request_queue *q = disk->queue;
-
block/blk-stat.c:53:2-53:32: struct request_queue *q = rq->q;
-
block/blk-sysfs.c:679:2-680:3: struct request_queue *q =
-
block/blk-sysfs.c:706:2-707:3: struct request_queue *q =
-
block/blk-sysfs.c:723:2-723:24: struct request_queue *q;
-
block/blk-sysfs.c:738:2-738:28: struct request_queue *q = container_of(rcu_head, struct request_queue,
-
block/blk-sysfs.c:793:2-794:3: struct request_queue *q =
-
block/blk-sysfs.c:853:2-853:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:947:2-947:34: struct request_queue *q = disk->queue;
-
block/blk-throttle.c:1272:2-1272:32: struct request_queue *q = td->queue;
-
block/blk-throttle.c:1340:2-1340:32: struct request_queue *q = td->queue;
-
block/blk-throttle.c:2181:2-2181:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
block/blk-throttle.c:2321:2-2321:32: struct request_queue *q = rq->q;
-
block/blk-timeout.c:55:3-55:35: struct request_queue *q = disk->queue;
-
block/blk-timeout.c:130:2-130:33: struct request_queue *q = req->q;
-
block/blk-wbt.c:686:2-686:34: struct request_queue *q = rqos->q;
-
block/blk-zoned.c:206:2-206:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-zoned.c:285:2-285:24: struct request_queue *q;
-
block/blk-zoned.c:329:2-329:24: struct request_queue *q;
-
block/blk-zoned.c:405:2-405:34: struct request_queue *q = disk->queue;
-
block/blk-zoned.c:489:2-489:34: struct request_queue *q = disk->queue;
-
block/bsg-lib.c:267:2-267:34: struct request_queue *q = hctx->queue;
-
block/bsg-lib.c:370:2-370:24: struct request_queue *q;
-
block/bsg.c:214:2-214:32: struct request_queue *q = bd->queue;
-
block/elevator.c:62:2-62:32: struct request_queue *q = rq->q;
-
block/genhd.c:1028:2-1028:43: struct request_queue *q = bdev->bd_disk->queue;
-
block/genhd.c:1072:2-1072:43: struct request_queue *q = bdev->bd_disk->queue;
-
block/ioctl.c:112:2-112:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/kyber-iosched.c:948:1-948:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
-
block/kyber-iosched.c:949:1-949:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
-
block/kyber-iosched.c:950:1-950:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
-
block/kyber-iosched.c:951:1-951:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
-
block/kyber-iosched.c:956:2-956:28: struct request_queue *q = data;
-
block/mq-deadline.c:467:2-467:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:488:2-488:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:528:2-528:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:566:2-566:32: struct request_queue *q = rq->q;
-
block/mq-deadline.c:706:1-706:1: DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
-
block/mq-deadline.c:707:1-707:1: DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
-
block/mq-deadline.c:706:1-706:1: DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
-
block/mq-deadline.c:707:1-707:1: DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
-
block/mq-deadline.c:706:1-706:1: DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
-
block/mq-deadline.c:707:1-707:1: DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
-
block/mq-deadline.c:706:1-706:1: DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
-
block/mq-deadline.c:707:1-707:1: DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
-
block/mq-deadline.c:712:2-712:28: struct request_queue *q = data;
-
block/mq-deadline.c:721:2-721:28: struct request_queue *q = data;
-
block/mq-deadline.c:731:2-731:31: struct request_queue *q = m->private;
-
block/mq-deadline.c:740:2-740:31: struct request_queue *q = m->private;
-
block/mq-deadline.c:749:2-749:31: struct request_queue *q = m->private;
-
crypto/algapi.c:221:2-221:21: struct crypto_alg *q;
-
crypto/algapi.c:287:2-287:21: struct crypto_alg *q;
-
crypto/algapi.c:498:2-498:26: struct crypto_template *q;
-
crypto/algapi.c:576:2-576:26: struct crypto_template *q, *tmpl = NULL;
-
crypto/api.c:58:2-58:21: struct crypto_alg *q, *alg = NULL;
-
crypto/asymmetric_keys/x509_public_key.c:163:2-163:14: const char *q;
-
crypto/async_tx/async_pq.c:382:3-382:13: void *p, *q, *s;
-
crypto/async_tx/async_raid6_recov.c:158:2-158:19: struct page *p, *q, *a, *b;
-
crypto/async_tx/async_raid6_recov.c:208:2-208:19: struct page *p, *q, *g, *dp, *dq;
-
crypto/async_tx/async_raid6_recov.c:299:2-299:19: struct page *p, *q, *dp, *dq;
-
crypto/async_tx/async_raid6_recov.c:476:2-476:19: struct page *p, *q, *dq;
-
crypto/crypto_user_base.c:38:2-38:21: struct crypto_alg *q, *alg = NULL;
-
crypto/ecc.c:554:2-554:22: u64 q[ECC_MAX_DIGITS];
-
crypto/ecc.c:652:2-652:26: u64 q[ECC_MAX_DIGITS * 2];
-
crypto/essiv.c:386:2-386:18: const char *p, *q;
-
drivers/acpi/ec.c:1107:2-1107:24: struct acpi_ec_query *q;
-
drivers/acpi/ec.c:1132:2-1132:28: struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
-
drivers/acpi/ec.c:1148:2-1148:24: struct acpi_ec_query *q;
-
drivers/ata/libata-scsi.c:1028:2-1028:34: struct request_queue *q = sdev->request_queue;
-
drivers/block/aoe/aoeblk.c:348:2-348:24: struct request_queue *q;
-
drivers/block/aoe/aoecmd.c:838:2-838:24: struct request_queue *q;
-
drivers/block/aoe/aoecmd.c:1034:2-1034:24: struct request_queue *q;
-
drivers/block/aoe/aoenet.c:75:2-75:21: register char *p, *q;
-
drivers/block/drbd/drbd_int.h:1916:3-1916:44: struct drbd_work_queue *q = &connection->sender_work;
-
drivers/block/drbd/drbd_main.c:949:3-949:70: struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
-
drivers/block/drbd/drbd_main.c:2708:2-2708:24: struct request_queue *q;
-
drivers/block/drbd/drbd_nl.c:1334:2-1334:43: struct request_queue * const q = device->rq_queue;
-
drivers/block/drbd/drbd_nl.c:1506:2-1506:63: struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
-
drivers/block/drbd/drbd_receiver.c:1517:2-1517:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/block/drbd/drbd_receiver.c:1581:2-1581:69: struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
-
drivers/block/loop.c:439:2-439:32: struct request_queue *q = lo->lo_queue;
-
drivers/block/loop.c:877:2-877:32: struct request_queue *q = lo->lo_queue;
-
drivers/block/loop.c:951:2-951:32: struct request_queue *q = lo->lo_queue;
-
drivers/block/nbd.c:222:2-222:24: struct request_queue *q;
-
drivers/block/nbd.c:1649:2-1649:24: struct request_queue *q;
-
drivers/block/null_blk/main.c:1213:2-1213:35: struct request_queue *q = nullb->q;
-
drivers/block/null_blk/main.c:1221:2-1221:35: struct request_queue *q = nullb->q;
-
drivers/block/null_blk/zoned.c:157:2-157:35: struct request_queue *q = nullb->q;
-
drivers/block/paride/pd.c:400:2-400:24: struct request_queue *q;
-
drivers/block/pktcdvd.c:702:2-702:51: struct request_queue *q = bdev_get_queue(pd->bdev);
-
drivers/block/pktcdvd.c:2112:2-2112:24: struct request_queue *q;
-
drivers/block/pktcdvd.c:2432:2-2432:38: struct request_queue *q = pd->disk->queue;
-
drivers/block/rbd.c:4920:2-4920:24: struct request_queue *q;
-
drivers/block/rnbd/rnbd-clt.c:226:2-226:25: struct rnbd_queue *q = NULL;
-
drivers/block/rnbd/rnbd-clt.c:1118:2-1118:31: struct rnbd_queue *q = hctx->driver_data;
-
drivers/block/rnbd/rnbd-clt.c:1278:2-1278:21: struct rnbd_queue *q;
-
drivers/block/rnbd/rnbd-srv.c:555:2-555:57: struct request_queue *q = bdev_get_queue(rnbd_dev->bdev);
-
drivers/block/sx8.c:690:2-690:43: struct request_queue *q = carm_pop_q(host);
-
drivers/block/sx8.c:705:2-705:34: struct request_queue *q = hctx->queue;
-
drivers/block/sx8.c:1346:2-1346:24: struct request_queue *q;
-
drivers/block/sx8.c:1410:2-1410:24: struct request_queue *q;
-
drivers/block/virtio_blk.c:311:2-311:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:442:2-442:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:702:2-702:24: struct request_queue *q;
-
drivers/block/xen-blkback/xenbus.c:487:2-487:24: struct request_queue *q;
-
drivers/block/xen-blkback/xenbus.c:581:2-581:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/cdrom/cdrom.c:2162:2-2162:39: struct request_queue *q = cdi->disk->queue;
-
drivers/cdrom/cdrom.c:2612:2-2612:23: struct cdrom_subchnl q;
-
drivers/cdrom/cdrom.c:3068:2-3068:23: struct cdrom_subchnl q;
-
drivers/clk/clk-cdce925.c:223:2-223:5: u8 q;
-
drivers/crypto/cavium/zip/zip_main.c:131:2-131:10: int q = 0;
-
drivers/crypto/cavium/zip/zip_main.c:309:2-309:10: int q = 0;
-
drivers/crypto/cavium/zip/zip_main.c:461:2-461:10: u32 q = 0;
-
drivers/crypto/ccp/ccp-ops.c:222:2-222:10: u8 *p, *q;
-
drivers/crypto/ccp/ccp-ops.c:247:2-247:10: u8 *p, *q;
-
drivers/crypto/hisilicon/zip/zip_crypto.c:491:2-491:34: struct hisi_zip_req *q = req_q->q;
-
drivers/crypto/keembay/ocs-aes.c:1059:2-1059:9: int i, q;
-
drivers/dax/super.c:171:2-171:24: struct request_queue *q;
-
drivers/firewire/core-device.c:1109:2-1109:6: u32 q;
-
drivers/firewire/core-topology.c:42:2-42:6: u32 q;
-
drivers/firewire/core-topology.c:175:2-175:23: u32 *next_sid, *end, q;
-
drivers/firmware/dmi_scan.c:653:2-653:20: char __iomem *p, *q;
-
drivers/firmware/efi/libstub/vsprintf.c:43:3-43:35: unsigned int q = (r * 0xccd) >> 15;
-
drivers/firmware/efi/libstub/vsprintf.c:62:2-62:42: unsigned int q = (x * 0x346DC5D7ULL) >> 43;
-
drivers/firmware/efi/libstub/vsprintf.c:76:2-76:27: unsigned int d3, d2, d1, q, h;
-
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c:1608:2-1608:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:642:2-642:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:694:2-694:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:732:2-732:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:816:2-816:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1622:2-1622:16: struct queue *q, *next;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1696:2-1696:16: struct queue *q, *next;
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c:130:2-130:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:102:2-102:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:379:2-379:20: struct queue *q = container_of(kobj, struct queue, kobj);
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:200:2-200:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:516:2-516:16: struct queue *q;
-
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c:2239:2-2239:34: u32 max_cll, min_cll, max, min, q, r;
-
drivers/gpu/drm/drm_debugfs.c:239:2-239:26: struct list_head *pos, *q;
-
drivers/gpu/drm/i915/display/intel_quirks.c:167:3-167:42: struct intel_quirk *q = &intel_quirks[i];
-
drivers/gpu/drm/i915/gvt/handlers.c:2079:2-2079:2: MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2090:2-2090:2: MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2101:2-2101:2: MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2116:2-2116:2: MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2131:2-2131:2: MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2146:2-2146:2: MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/scheduler.c:1633:2-1633:24: struct list_head *q = workload_q_head(vgpu, engine);
-
drivers/gpu/drm/v3d/v3d_drv.c:133:2-133:17: enum v3d_queue q;
-
drivers/gpu/drm/v3d/v3d_sched.c:265:2-265:17: enum v3d_queue q;
-
drivers/gpu/drm/v3d/v3d_sched.c:466:2-466:17: enum v3d_queue q;
-
drivers/gpu/drm/xen/xen_drm_front.c:54:2-54:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/gpu/drm/xen/xen_drm_front.c:65:2-65:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/gpu/drm/xen/xen_drm_front.c:79:2-79:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/hid/hid-quirks.c:1043:2-1043:29: struct quirks_list_struct *q;
-
drivers/hid/hid-quirks.c:1078:2-1078:37: struct quirks_list_struct *q_new, *q;
-
drivers/hid/hid-quirks.c:1134:2-1134:29: struct quirks_list_struct *q, *temp;
-
drivers/i2c/i2c-core-base.c:1952:2-1952:45: const struct i2c_adapter_quirks *q = adap->quirks;
-
drivers/ide/ide-cd.c:804:2-804:35: struct request_queue *q = drive->queue;
-
drivers/ide/ide-cd.c:856:3-856:36: struct request_queue *q = drive->queue;
-
drivers/ide/ide-cd.c:1517:2-1517:35: struct request_queue *q = drive->queue;
-
drivers/ide/ide-devsets.c:162:2-162:35: struct request_queue *q = drive->queue;
-
drivers/ide/ide-disk.c:655:2-655:35: struct request_queue *q = drive->queue;
-
drivers/ide/ide-io.c:449:2-449:35: struct request_queue *q = drive->queue;
-
drivers/ide/ide-park.c:13:2-13:35: struct request_queue *q = drive->queue;
-
drivers/ide/ide-pm.c:45:2-45:32: struct request_queue *q = rq->q;
-
drivers/ide/ide-pm.c:202:2-202:35: struct request_queue *q = drive->queue;
-
drivers/ide/ide-pm.c:245:3-245:36: struct request_queue *q = drive->queue;
-
drivers/ide/ide-probe.c:765:2-765:24: struct request_queue *q;
-
drivers/ide/ide-proc.c:328:4-328:14: char *q = p;
-
drivers/iio/common/st_sensors/st_sensors_core.c:665:2-665:18: int i, len = 0, q, r;
-
drivers/iio/industrialio-buffer.c:802:2-802:30: struct iio_demux_table *p, *q;
-
drivers/infiniband/hw/hfi1/affinity.c:233:2-233:26: struct list_head *pos, *q;
-
drivers/infiniband/hw/hfi1/mad.c:127:2-127:27: struct trap_node *node, *q;
-
drivers/infiniband/hw/hfi1/mad.c:1029:2-1029:7: u16 *q;
-
drivers/infiniband/hw/hfi1/mad.c:1728:2-1728:24: __be16 *q = (__be16 *)data;
-
drivers/infiniband/hw/hfi1/verbs.c:1665:2-1665:25: char *names_out, *p, **q;
-
drivers/infiniband/hw/mlx4/mad.c:1024:2-1024:9: int p, q;
-
drivers/infiniband/hw/mlx4/mad.c:1060:2-1060:9: int p, q;
-
drivers/infiniband/hw/mthca/mthca_mad.c:286:2-286:9: int p, q;
-
drivers/infiniband/hw/mthca/mthca_mad.c:328:2-328:9: int p, q;
-
drivers/infiniband/hw/qib/qib_mad.c:601:2-601:30: __be16 *q = (__be16 *) smp->data;
-
drivers/infiniband/hw/qib/qib_mad.c:1044:2-1044:24: u16 *q = (u16 *) smp->data;
-
drivers/infiniband/sw/rdmavt/qp.c:789:3-789:18: struct rvt_qp *q;
-
drivers/infiniband/sw/rxe/rxe_queue.c:59:2-59:20: struct rxe_queue *q;
-
drivers/infiniband/sw/rxe/rxe_resp.c:293:2-293:32: struct rxe_queue *q = srq->rq.queue;
-
drivers/infiniband/sw/rxe/rxe_srq.c:80:2-80:20: struct rxe_queue *q;
-
drivers/infiniband/sw/rxe/rxe_srq.c:127:2-127:32: struct rxe_queue *q = srq->rq.queue;
-
drivers/infiniband/ulp/srp/ib_srp.c:2883:2-2883:34: struct request_queue *q = sdev->request_queue;
-
drivers/isdn/mISDN/dsp_cmx.c:1314:2-1314:14: u8 *d, *p, *q, *o_q;
-
drivers/isdn/mISDN/dsp_cmx.c:1636:2-1636:10: u8 *p, *q;
-
drivers/lightnvm/core.c:448:2-448:35: struct request_queue *q = tdisk->queue;
-
drivers/lightnvm/pblk-core.c:342:2-342:39: struct request_queue *q = pblk->dev->q;
-
drivers/lightnvm/pblk-rb.c:555:2-555:39: struct request_queue *q = pblk->dev->q;
-
drivers/md/bcache/super.c:912:2-912:24: struct request_queue *q;
-
drivers/md/bcache/super.c:1019:2-1019:24: struct request_queue *q;
-
drivers/md/bcache/super.c:1412:2-1412:51: struct request_queue *q = bdev_get_queue(dc->bdev);
-
drivers/md/bcache/sysfs.c:1066:3-1066:16: uint16_t q[31], *p, *cached;
-
drivers/md/bcache/util.c:97:2-97:11: uint64_t q;
-
drivers/md/dm-cache-policy-smq.c:880:2-880:25: struct queue *q = &mq->dirty;
-
drivers/md/dm-cache-policy-smq.c:893:2-893:25: struct queue *q = &mq->clean;
-
drivers/md/dm-cache-target.c:3388:2-3388:54: struct request_queue *q = bdev_get_queue(origin_bdev);
-
drivers/md/dm-clone-target.c:2017:2-2017:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-io.c:306:2-306:54: struct request_queue *q = bdev_get_queue(where->bdev);
-
drivers/md/dm-log-writes.c:890:2-890:56: struct request_queue *q = bdev_get_queue(lc->dev->bdev);
-
drivers/md/dm-mpath.c:509:2-509:24: struct request_queue *q;
-
drivers/md/dm-mpath.c:873:2-873:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-mpath.c:932:2-932:24: struct request_queue *q;
-
drivers/md/dm-mpath.c:1618:2-1618:65: struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
-
drivers/md/dm-mpath.c:2053:2-2053:65: struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
-
drivers/md/dm-raid.c:2945:3-2945:25: struct request_queue *q;
-
drivers/md/dm-rq.c:533:2-533:24: struct request_queue *q;
-
drivers/md/dm-stats.c:909:2-909:14: const char *q;
-
drivers/md/dm-table.c:420:2-420:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-table.c:872:2-872:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-table.c:1591:2-1591:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1621:2-1621:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1771:2-1771:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1821:2-1821:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1829:2-1829:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1837:2-1837:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1864:2-1864:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1891:2-1891:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1918:2-1918:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1952:2-1952:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1980:2-1980:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-thin.c:2817:2-2817:61: struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-
drivers/md/dm-zoned-target.c:770:2-770:24: struct request_queue *q;
-
drivers/md/dm.c:1000:2-1000:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
drivers/md/dm.c:1020:3-1020:57: struct request_queue *q = orig_bio->bi_bdev->bd_disk->queue;
-
drivers/md/dm.c:2024:2-2024:32: struct request_queue *q = md->queue;
-
drivers/md/raid5-cache.c:3068:2-3068:53: struct request_queue *q = bdev_get_queue(rdev->bdev);
-
drivers/md/raid5-ppl.c:1312:2-1312:24: struct request_queue *q;
-
drivers/md/raid5.c:6818:3-6818:36: struct request_queue *q = mddev->queue;
-
drivers/media/common/saa7146/saa7146_fops.c:165:2-165:31: struct saa7146_dmaqueue *q = from_timer(q, t, timeout);
-
drivers/media/common/saa7146/saa7146_fops.c:291:2-291:25: struct videobuf_queue *q;
-
drivers/media/common/saa7146/saa7146_fops.c:325:2-325:25: struct videobuf_queue *q;
-
drivers/media/common/saa7146/saa7146_video.c:383:2-383:36: struct saa7146_dmaqueue *q = &vv->video_dmaq;
-
drivers/media/common/saa7146/saa7146_video.c:1212:2-1212:34: struct videobuf_queue *q = &fh->video_q;
-
drivers/media/common/saa7146/saa7146_video.c:1227:2-1227:36: struct saa7146_dmaqueue *q = &vv->video_dmaq;
-
drivers/media/common/videobuf2/videobuf2-core.c:216:2-216:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:362:2-362:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:994:2-994:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1078:2-1078:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1193:2-1193:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1327:2-1327:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1339:2-1339:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1853:2-1853:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:2846:2-2846:24: struct vb2_queue *q = data;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:200:2-200:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:254:2-254:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:276:2-276:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:317:2-317:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:145:2-145:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:178:2-178:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:521:2-521:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:1180:2-1180:30: struct vb2_queue *q = vdev->queue;
-
drivers/media/dvb-core/dvb_demux.c:551:2-551:12: const u8 *q;
-
drivers/media/dvb-core/dvb_vb2.c:169:2-169:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:207:2-207:50: struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:220:2-220:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:237:2-237:50: struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:368:2-368:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-frontends/rtl2832_sdr.c:1143:2-1143:30: struct vb2_queue *q = &dev->vb_queue;
-
drivers/media/dvb-frontends/sp887x.c:289:2-289:15: unsigned int q, r;
-
drivers/media/i2c/adv7511-v4l2.c:1310:2-1310:9: u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
-
drivers/media/i2c/cx25840/cx25840-core.c:697:2-697:27: struct workqueue_struct *q;
-
drivers/media/i2c/cx25840/cx25840-core.c:775:2-775:27: struct workqueue_struct *q;
-
drivers/media/i2c/cx25840/cx25840-core.c:1034:2-1034:27: struct workqueue_struct *q;
-
drivers/media/pci/bt8xx/bttv-driver.c:2196:2-2196:29: struct videobuf_queue* q = NULL;
-
drivers/media/pci/bt8xx/bttv-driver.c:2230:2-2230:42: struct videobuf_queue *q = bttv_queue(fh);
-
drivers/media/pci/cobalt/cobalt-v4l2.c:125:2-125:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/pci/cobalt/cobalt-v4l2.c:1205:2-1205:28: struct vb2_queue *q = &s->q;
-
drivers/media/pci/cx18/cx18-fileops.c:291:3-291:13: const u8 *q;
-
drivers/media/pci/cx18/cx18-ioctl.c:810:2-810:29: struct videobuf_queue *q = NULL;
-
drivers/media/pci/cx18/cx18-streams.c:678:2-678:21: struct cx18_queue *q;
-
drivers/media/pci/cx18/cx18-streams.c:700:2-700:21: struct cx18_queue *q;
-
drivers/media/pci/cx18/cx18-vbi.c:99:2-99:10: u8 *q = buf;
-
drivers/media/pci/cx23885/cx23885-417.c:1496:2-1496:20: struct vb2_queue *q;
-
drivers/media/pci/cx23885/cx23885-core.c:1644:2-1644:38: struct cx23885_dmaqueue *q = &port->mpegq;
-
drivers/media/pci/cx23885/cx23885-dvb.c:2656:3-2656:21: struct vb2_queue *q;
-
drivers/media/pci/cx23885/cx23885-vbi.c:189:2-189:37: struct cx23885_dmaqueue *q = &dev->vbiq;
-
drivers/media/pci/cx23885/cx23885-video.c:461:2-461:40: struct cx23885_dmaqueue *q = &dev->vidq;
-
drivers/media/pci/cx23885/cx23885-video.c:1238:2-1238:20: struct vb2_queue *q;
-
drivers/media/pci/cx25821/cx25821-video.c:243:2-243:56: struct cx25821_dmaqueue *q = &dev->channels[chan->id].dma_vidq;
-
drivers/media/pci/cx25821/cx25821-video.c:681:3-681:21: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-blackbird.c:1157:2-1157:20: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-dvb.c:1765:3-1765:21: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-mpeg.c:271:2-271:34: struct cx88_dmaqueue *q = &dev->mpegq;
-
drivers/media/pci/cx88/cx88-vbi.c:172:2-172:38: struct cx88_dmaqueue *q = &dev->vbiq;
-
drivers/media/pci/cx88/cx88-video.c:505:2-505:38: struct cx88_dmaqueue *q = &dev->vidq;
-
drivers/media/pci/cx88/cx88-video.c:1261:2-1261:20: struct vb2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:531:2-531:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:798:2-798:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:882:2-883:3: struct cio2_queue *q =
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:971:2-971:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1016:2-1016:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1071:2-1071:48: struct cio2_queue *q = file_to_cio2_queue(file);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1111:2-1111:48: struct cio2_queue *q = file_to_cio2_queue(file);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1224:2-1224:25: struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1249:2-1249:25: struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1316:2-1316:25: struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1383:2-1383:21: struct cio2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1417:2-1417:21: struct cio2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1975:2-1975:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:2001:2-2001:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/ivtv/ivtv-fileops.c:297:3-297:13: const u8 *q;
-
drivers/media/pci/ivtv/ivtv-fileops.c:543:2-543:20: struct ivtv_queue q;
-
drivers/media/pci/ivtv/ivtv-vbi.c:305:2-305:10: u8 *q = buf;
-
drivers/media/pci/saa7134/saa7134-core.c:334:2-334:31: struct saa7134_dmaqueue *q = from_timer(q, t, timeout);
-
drivers/media/pci/saa7134/saa7134-dvb.c:1202:2-1202:20: struct vb2_queue *q;
-
drivers/media/pci/saa7134/saa7134-empress.c:242:2-242:20: struct vb2_queue *q;
-
drivers/media/pci/saa7134/saa7134-video.c:2044:2-2044:20: struct vb2_queue *q;
-
drivers/media/pci/saa7164/saa7164-cmd.c:73:2-73:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-cmd.c:125:2-125:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-cmd.c:247:2-247:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-dvb.c:195:2-195:24: struct list_head *p, *q;
-
drivers/media/pci/saa7164/saa7164-encoder.c:61:2-61:32: struct list_head *c, *n, *p, *q, *l, *v;
-
drivers/media/pci/saa7164/saa7164-vbi.c:30:2-30:32: struct list_head *c, *n, *p, *q, *l, *v;
-
drivers/media/platform/allegro-dvt/allegro-core.c:2666:2-2666:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/am437x/am437x-vpfe.c:2209:2-2209:20: struct vb2_queue *q;
-
drivers/media/platform/atmel/atmel-isc-base.c:2230:2-2230:30: struct vb2_queue *q = &isc->vb2_vidq;
-
drivers/media/platform/atmel/atmel-isi.c:1176:2-1176:20: struct vb2_queue *q;
-
drivers/media/platform/davinci/vpbe_display.c:195:2-195:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/davinci/vpbe_display.c:1367:2-1367:20: struct vb2_queue *q;
-
drivers/media/platform/davinci/vpif_capture.c:71:2-71:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/davinci/vpif_capture.c:1414:2-1414:20: struct vb2_queue *q;
-
drivers/media/platform/davinci/vpif_display.c:1125:2-1125:20: struct vb2_queue *q;
-
drivers/media/platform/exynos4-is/fimc-capture.c:1716:2-1716:39: struct vb2_queue *q = &fimc->vid_cap.vbq;
-
drivers/media/platform/exynos4-is/fimc-isp-video.c:569:2-569:44: struct vb2_queue *q = &isp->video_capture.vb_queue;
-
drivers/media/platform/exynos4-is/fimc-lite.c:1243:2-1243:31: struct vb2_queue *q = &fimc->vb_queue;
-
drivers/media/platform/fsl-viu.c:1245:2-1245:34: struct videobuf_queue *q = &fh->vb_vidq;
-
drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c:1172:2-1172:36: struct mtk_jpeg_q_data *q = &ctx->out_q;
-
drivers/media/platform/qcom/camss/camss-video.c:913:2-913:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/vdec.c:279:2-279:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/venc.c:236:2-236:20: struct vb2_queue *q;
-
drivers/media/platform/rcar-vin/rcar-dma.c:1394:2-1394:30: struct vb2_queue *q = &vin->queue;
-
drivers/media/platform/rcar_drif.c:929:2-929:30: struct vb2_queue *q = &sdr->vb_queue;
-
drivers/media/platform/renesas-ceu.c:1401:2-1401:33: struct vb2_queue *q = &ceudev->vb2_vq;
-
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c:1319:2-1319:20: struct vb2_queue *q;
-
drivers/media/platform/s3c-camif/camif-capture.c:1102:2-1102:29: struct vb2_queue *q = &vp->vb_queue;
-
drivers/media/platform/s5p-mfc/s5p_mfc.c:756:2-756:20: struct vb2_queue *q;
-
drivers/media/platform/sh_vou.c:1224:2-1224:20: struct vb2_queue *q;
-
drivers/media/platform/sti/delta/delta-v4l2.c:1119:2-1119:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/sti/delta/delta-v4l2.c:1299:2-1299:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/sti/delta/delta-v4l2.c:1465:2-1465:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/sti/delta/delta-v4l2.c:1511:2-1511:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/sti/delta/delta-v4l2.c:1588:2-1588:20: struct vb2_queue *q;
-
drivers/media/platform/stm32/stm32-dcmi.c:1851:2-1851:20: struct vb2_queue *q;
-
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c:403:2-403:30: struct vb2_queue *q = &csi->queue;
-
drivers/media/platform/ti-vpe/cal-video.c:251:2-251:30: struct vb2_queue *q = &ctx->vb_vidq;
-
drivers/media/platform/ti-vpe/cal-video.c:703:2-703:30: struct vb2_queue *q = &ctx->vb_vidq;
-
drivers/media/radio/radio-gemtek.c:153:2-153:14: int i, bit, q, mute;
-
drivers/media/radio/radio-gemtek.c:257:2-257:9: int i, q;
-
drivers/media/test-drivers/vimc/vimc-capture.c:403:2-403:20: struct vb2_queue *q;
-
drivers/media/test-drivers/vivid/vivid-sdr-cap.c:465:2-465:30: struct vb2_queue *q = &dev->vb_sdr_cap_q;
-
drivers/media/test-drivers/vivid/vivid-vid-cap.c:672:2-672:30: struct vb2_queue *q = &dev->vb_vid_cap_q;
-
drivers/media/test-drivers/vivid/vivid-vid-out.c:454:2-454:30: struct vb2_queue *q = &dev->vb_vid_out_q;
-
drivers/media/tuners/max2165.c:153:2-153:6: u32 q, f = 0;
-
drivers/media/usb/airspy/airspy.c:644:2-644:28: struct vb2_queue *q = &s->vb_queue;
-
drivers/media/usb/au0828/au0828-video.c:290:2-290:36: struct vb2_queue *q = vb->vb2_buf.vb2_queue;
-
drivers/media/usb/au0828/au0828-video.c:1806:2-1806:20: struct vb2_queue *q;
-
drivers/media/usb/cx231xx/cx231xx-417.c:1739:2-1739:20: struct vb2_queue *q;
-
drivers/media/usb/cx231xx/cx231xx-video.c:1757:2-1757:20: struct vb2_queue *q;
-
drivers/media/usb/em28xx/em28xx-video.c:1249:2-1249:20: struct vb2_queue *q;
-
drivers/media/usb/go7007/go7007-fw.c:930:2-930:10: int q = 0;
-
drivers/media/usb/gspca/gspca.c:1450:2-1450:20: struct vb2_queue *q;
-
drivers/media/usb/hackrf/hackrf.c:918:2-918:20: struct vb2_queue *q;
-
drivers/media/usb/msi2500/msi2500.c:924:2-924:30: struct vb2_queue *q = &dev->vb_queue;
-
drivers/media/usb/s2255/s2255drv.c:815:2-815:29: struct vb2_queue *q = &vc->vb_vidq;
-
drivers/media/usb/s2255/s2255drv.c:1100:2-1100:29: struct vb2_queue *q = &vc->vb_vidq;
-
drivers/media/usb/s2255/s2255drv.c:1592:2-1592:20: struct vb2_queue *q;
-
drivers/media/usb/stk1160/stk1160-v4l.c:483:2-483:30: struct vb2_queue *q = &dev->vb_vidq;
-
drivers/media/usb/stk1160/stk1160-v4l.c:518:2-518:30: struct vb2_queue *q = &dev->vb_vidq;
-
drivers/media/usb/stk1160/stk1160-v4l.c:779:2-779:20: struct vb2_queue *q;
-
drivers/media/usb/zr364xx/zr364xx.c:812:2-812:35: struct videobuf_queue *q = &cam->vb_vidq;
-
drivers/media/usb/zr364xx/zr364xx.c:1274:2-1274:35: struct videobuf_queue *q = &cam->vb_vidq;
-
drivers/media/v4l2-core/videobuf-dma-contig.c:76:2-76:34: struct videobuf_queue *q = map->q;
-
drivers/media/v4l2-core/videobuf-dma-sg.c:397:2-397:34: struct videobuf_queue *q = map->q;
-
drivers/media/v4l2-core/videobuf-dma-sg.c:661:2-661:24: struct videobuf_queue q;
-
drivers/media/v4l2-core/videobuf-vmalloc.c:64:2-64:34: struct videobuf_queue *q = map->q;
-
drivers/misc/habanalabs/common/hw_queue.c:44:2-44:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/common/hw_queue.c:231:2-231:58: struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
-
drivers/misc/habanalabs/common/hw_queue.c:281:2-281:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/misc/habanalabs/common/hw_queue.c:345:2-345:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/misc/habanalabs/common/hw_queue.c:381:2-381:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/misc/habanalabs/common/hw_queue.c:536:2-536:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/common/hw_queue.c:693:2-693:58: struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
-
drivers/misc/habanalabs/common/hw_queue.c:970:2-970:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/common/hw_queue.c:1010:2-1010:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/common/hw_queue.c:1022:2-1022:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:898:2-898:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:1498:2-1498:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:1514:2-1514:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:2583:2-2583:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:2725:2-2725:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:2843:2-2843:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:2984:2-2984:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:3113:2-3113:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:4612:2-4612:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/goya/goya.c:1031:2-1031:22: struct hl_hw_queue *q;
-
drivers/misc/uacce/uacce.c:66:2-66:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:128:2-128:22: struct uacce_queue *q;
-
drivers/misc/uacce/uacce.c:171:2-171:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:185:2-185:31: struct uacce_queue *q = vma->vm_private_data;
-
drivers/misc/uacce/uacce.c:200:2-200:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:263:2-263:32: struct uacce_queue *q = file->private_data;
-
drivers/misc/uacce/uacce.c:470:2-470:22: struct uacce_queue *q, *next_q;
-
drivers/mmc/core/block.c:1371:2-1371:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:1440:2-1440:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:1903:2-1903:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2053:2-2053:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2650:2-2650:26: struct list_head *pos, *q;
-
drivers/mmc/core/queue.c:85:2-85:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:122:2-122:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:140:2-140:32: struct request_queue *q = mq->queue;
-
drivers/mmc/core/queue.c:248:2-248:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:509:2-509:32: struct request_queue *q = mq->queue;
-
drivers/net/appletalk/ltpc.c:504:2-504:22: struct xmitQel *q = NULL;
-
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c:102:2-102:19: unsigned int tc, q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:455:2-455:15: unsigned int q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:1005:2-1005:15: unsigned int q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2283:2-2283:15: unsigned int q, port;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2321:2-2321:15: unsigned int q, qp, port;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2376:2-2376:15: unsigned int q, qp, port;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:5378:2-5378:6: int q, rc;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:14426:2-14426:26: struct list_head *pos, *q;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:2063:3-2063:43: struct bnx2x_vf_queue *q = vfq_get(vf, i);
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c:1537:3-1537:57: struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
-
drivers/net/ethernet/broadcom/genet/bcmgenet.c:3489:2-3489:15: unsigned int q;
-
drivers/net/ethernet/broadcom/genet/bcmgenet.c:3604:2-3604:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:443:2-443:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:706:2-706:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:731:2-731:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:1600:2-1600:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:1823:2-1823:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2237:2-2237:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2279:2-2279:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2305:2-2305:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2341:2-2341:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2384:2-2384:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2425:2-2425:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2524:2-2524:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2721:2-2721:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2771:2-2771:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2809:2-2809:18: unsigned int i, q, idx;
-
drivers/net/ethernet/cadence/macb_main.c:2902:2-2902:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:3805:2-3805:21: unsigned int hw_q, q;
-
drivers/net/ethernet/cadence/macb_main.c:3967:2-3967:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:3994:2-3994:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4016:2-4016:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4175:2-4175:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4826:2-4826:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:4913:2-4913:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_ptp.c:353:2-353:15: unsigned int q;
-
drivers/net/ethernet/cavium/liquidio/lio_core.c:461:2-461:6: int q, q_no;
-
drivers/net/ethernet/cavium/liquidio/lio_core.c:814:2-814:6: int q, q_no;
-
drivers/net/ethernet/cavium/liquidio/lio_main.c:472:2-472:6: int q, iq;
-
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c:369:2-369:23: int mbox, key, stat, q;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:483:3-483:32: struct cmdQ *q = &sge->cmdQ[0];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:539:3-539:36: struct freelQ *q = &sge->freelQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:563:3-563:36: struct freelQ *q = &sge->freelQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:662:3-662:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:686:3-686:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1320:2-1320:31: struct cmdQ *q = &sge->cmdQ[0];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1481:2-1481:26: struct respQ *q = &sge->respQ;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1571:2-1571:26: struct respQ *q = &sge->respQ;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1710:2-1710:33: struct cmdQ *q = &sge->cmdQ[qid];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1937:3-1937:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1172:3-1172:41: struct sge_rspq *q = &adap->sge.qs[i].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1955:2-1955:72: const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1971:2-1971:22: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2024:2-2024:46: struct qset_params *q = adapter->params.sge.qset;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2151:3-2151:23: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2252:3-2252:23: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1273:2-1273:18: struct sge_txq *q;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1529:2-1529:39: struct sge_txq *q = &qs->txq[TXQ_CTRL];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1747:2-1747:39: struct sge_txq *q = &qs->txq[TXQ_OFLD];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1901:2-1901:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2327:2-2327:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2531:2-2531:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2610:2-2610:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2628:2-2628:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2648:2-2648:40: struct sge_rspq *q = &adap->sge.qs[0].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2677:2-2677:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2698:2-2698:40: struct sge_rspq *q = &adap->sge.qs[0].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3042:2-3042:42: struct sge_qset *q = &adapter->sge.qs[id];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3215:3-3215:39: struct sge_qset *q = &adap->sge.qs[i];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3238:3-3238:39: struct sge_qset *q = &adap->sge.qs[i];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3368:3-3368:37: struct qset_params *q = p->qset + i;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:948:2-948:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:963:2-963:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:975:2-975:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:928:3-928:44: struct sge_rspq *q = adap->sge.ingr_map[i];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:974:3-974:44: struct sge_rspq *q = adap->sge.ingr_map[i];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:1062:3-1062:52: struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:112:2-112:37: struct sge_ofld_rxq *q = rxq_info->uldrxq;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:376:3-376:47: struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:391:3-391:47: struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1422:2-1422:27: struct sge_txq *q = &eq->q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1516:2-1516:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2664:2-2664:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2784:2-2784:27: struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3088:2-3088:26: struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3981:2-3981:23: struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4139:2-4139:23: struct sge_rspq *q = cookie;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4153:2-4153:34: struct sge_rspq *q = &adap->sge.intrq;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4290:3-4290:31: struct sge_eth_txq *q = &s->ptptxq;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:414:2-414:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:465:2-465:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:510:2-510:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:574:2-574:65: struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:713:2-713:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1971:2-1971:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/dec/tulip/de4x5.c:5181:5-5181:15: char *p, *q, t;
-
drivers/net/ethernet/emulex/benet/be_main.c:2993:2-2993:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3009:2-3009:24: struct be_queue_info *q, *cq;
-
drivers/net/ethernet/emulex/benet/be_main.c:3041:2-3041:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3105:2-3105:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3566:2-3566:24: struct be_queue_info *q;
-
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c:3472:2-3472:32: struct dpni_queue q = { { 0 } };
-
drivers/net/ethernet/freescale/fec_main.c:829:2-829:15: unsigned int q;
-
drivers/net/ethernet/freescale/fec_main.c:2778:2-2778:15: unsigned int q;
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:298:2-298:21: struct hnae_queue *q;
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:249:2-249:37: struct hnae_queue *q = &ring_pair->q;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:1995:3-1995:24: struct netdev_queue *q;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:4016:2-4016:32: struct hnae3_queue *q = ring->tqp;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:4052:4-4052:24: struct hnae3_queue *q;
-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:1724:3-1725:4: struct hclge_tqp *q =
-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c:916:4-916:58: struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
-
drivers/net/ethernet/intel/fm10k/fm10k_pci.c:612:3-612:53: struct fm10k_hw_stats_q *q = &interface->stats.q[i];
-
drivers/net/ethernet/intel/fm10k/fm10k_pci.c:1333:2-1333:6: int q;
-
drivers/net/ethernet/intel/i40e/i40e_main.c:765:2-765:6: u16 q;
-
drivers/net/ethernet/intel/i40e/i40e_main.c:3568:2-3568:9: int i, q;
-
drivers/net/ethernet/intel/ice/ice_lib.c:1797:2-1797:9: int i, q;
-
drivers/net/ethernet/intel/ice/ice_lib.c:2491:2-2491:9: int i, q;
-
drivers/net/ethernet/intel/igb/e1000_nvm.c:690:2-690:5: u8 q, hval, rem, result;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:773:2-773:31: int val, cm3_state, host_id, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:834:2-834:22: int val, cm3_state, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:1949:2-1949:9: int i, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:2034:2-2034:9: int i, q;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:278:2-278:6: int q;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:334:2-334:6: int q, b;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:370:2-370:6: int q;
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:625:2-625:30: struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:738:2-738:30: struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1333:2-1333:26: struct mlxsw_pci_queue *q;
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1600:2-1600:67: struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1609:2-1609:26: struct mlxsw_pci_queue *q;
-
drivers/net/ethernet/mscc/ocelot_vcap.c:1169:2-1169:26: struct list_head *pos, *q;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:77:2-77:31: struct ionic_queue *q = seq->private;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:87:2-87:31: struct ionic_queue *q = seq->private;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:122:2-122:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:438:2-438:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:193:2-193:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:234:2-234:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:270:2-270:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:697:2-697:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:758:2-758:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:819:2-819:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:2679:2-2679:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:2721:2-2721:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:190:2-190:42: struct ionic_queue *q = &lif->adminqcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:257:2-257:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:231:2-231:30: struct ionic_queue *q = cq->bound_q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:643:2-643:30: struct ionic_queue *q = cq->bound_q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1127:2-1127:22: struct ionic_queue *q;
-
drivers/net/ethernet/renesas/ravb_main.c:813:3-813:7: int q;
-
drivers/net/ethernet/renesas/ravb_main.c:911:2-911:23: int q = napi - priv->napi;
-
drivers/net/ethernet/renesas/ravb_main.c:1162:2-1162:6: int q;
-
drivers/net/ethernet/renesas/ravb_main.c:1501:2-1501:35: u16 q = skb_get_queue_mapping(skb);
-
drivers/net/ethernet/renesas/ravb_main.c:2049:2-2049:18: int error, irq, q;
-
drivers/net/ethernet/sfc/tx.c:298:2-298:23: struct efx_tx_queue *q;
-
drivers/net/ethernet/ti/davinci_emac.c:1399:2-1399:6: int q, m, ret;
-
drivers/net/phy/sfp-bus.c:107:2-107:26: const struct sfp_quirk *q;
-
drivers/net/ppp/ppp_generic.c:1883:2-1883:21: unsigned char *p, *q;
-
drivers/net/tap.c:300:2-300:20: struct tap_queue *q, *tmp;
-
drivers/net/tap.c:323:2-323:20: struct tap_queue *q;
-
drivers/net/tap.c:494:2-494:24: struct tap_queue *q = container_of(sk, struct tap_queue, sk);
-
drivers/net/tap.c:503:2-503:20: struct tap_queue *q;
-
drivers/net/tap.c:565:2-565:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:572:2-572:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:756:2-756:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:872:2-872:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:901:2-901:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:974:2-974:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:1196:2-1196:24: struct tap_queue *q = container_of(sock, struct tap_queue, sock);
-
drivers/net/tap.c:1216:2-1216:24: struct tap_queue *q = container_of(sock, struct tap_queue, sock);
-
drivers/net/tap.c:1233:2-1233:24: struct tap_queue *q = container_of(sock, struct tap_queue,
-
drivers/net/tap.c:1251:2-1251:20: struct tap_queue *q;
-
drivers/net/tap.c:1263:2-1263:20: struct tap_queue *q;
-
drivers/net/tap.c:1277:2-1277:20: struct tap_queue *q;
-
drivers/net/usb/catc.c:472:2-472:50: struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
-
drivers/net/usb/catc.c:501:2-501:21: struct ctrl_queue *q;
-
drivers/net/usb/catc.c:536:2-536:21: struct ctrl_queue *q;
-
drivers/net/wireless/ath/ath10k/mac.c:3933:2-3933:32: struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
-
drivers/net/wireless/ath/ath11k/mac.c:4199:2-4199:32: struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
-
drivers/net/wireless/ath/ath9k/mac.c:137:2-137:9: int i, q;
-
drivers/net/wireless/ath/ath9k/mac.c:298:2-298:6: int q;
-
drivers/net/wireless/ath/ath9k/xmit.c:105:2-105:22: struct sk_buff_head q;
-
drivers/net/wireless/ath/ath9k/xmit.c:168:2-168:14: int q = fi->txq;
-
drivers/net/wireless/ath/ath9k/xmit.c:198:2-198:6: int q, ret;
-
drivers/net/wireless/ath/ath9k/xmit.c:760:2-760:20: int q = tid->txq->mac80211_qnum;
-
drivers/net/wireless/ath/ath9k/xmit.c:2281:2-2281:6: int q, ret;
-
drivers/net/wireless/ath/carl9170/tx.c:663:2-663:21: unsigned int r, t, q;
-
drivers/net/wireless/ath/carl9170/tx.c:1277:2-1277:14: uint8_t q = 0;
-
drivers/net/wireless/ath/carl9170/tx.c:1343:2-1343:18: unsigned int i, q;
-
drivers/net/wireless/ath/wil6210/netdev.c:232:2-232:7: bool q;
-
drivers/net/wireless/ath/wil6210/txrx.c:838:2-838:11: bool q = false;
-
drivers/net/wireless/ath/wil6210/wmi.c:1931:3-1931:8: bool q;
-
drivers/net/wireless/broadcom/b43/phy_g.c:2336:2-2336:23: s32 m1, m2, f = 256, q, delta;
-
drivers/net/wireless/broadcom/b43/pio.c:49:2-49:30: struct b43_pio_txqueue *q = NULL;
-
drivers/net/wireless/broadcom/b43/pio.c:126:2-126:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:162:2-162:26: struct b43_pio_rxqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:290:2-290:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:352:2-352:36: struct b43_pio_txqueue *q = pack->queue;
-
drivers/net/wireless/broadcom/b43/pio.c:422:2-422:36: struct b43_pio_txqueue *q = pack->queue;
-
drivers/net/wireless/broadcom/b43/pio.c:491:2-491:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:566:2-566:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/sdio.c:39:2-39:31: const struct b43_sdio_quirk *q;
-
drivers/net/wireless/broadcom/b43legacy/phy.c:1947:2-1947:6: s32 q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:50:2-50:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:69:2-69:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:87:2-87:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:110:2-110:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:127:2-127:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:144:2-144:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:223:2-223:23: struct sk_buff_head *q;
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4315:2-4315:42: struct ipw2100_status_queue *q = &priv->status_queue;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3842:2-3842:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:5011:2-5011:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:5040:2-5040:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:10099:2-10099:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:11762:2-11762:24: struct list_head *p, *q;
-
drivers/net/wireless/intel/iwlegacy/3945-mac.c:453:2-453:23: struct il_queue *q = NULL;
-
drivers/net/wireless/intel/iwlegacy/3945.c:275:2-275:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/3945.c:601:2-601:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:1650:2-1650:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:2390:2-2390:40: struct il_queue *q = &il->txq[txq_id].q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:2456:2-2456:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:3958:2-3958:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/common.c:2752:2-2752:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:2814:2-2814:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:3119:2-3119:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:3241:2-3241:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4491:2-4491:6: int q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4761:3-4761:20: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4791:2-4791:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/debug.c:818:2-818:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c:1167:2-1167:6: int q;
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:467:2-467:6: int q;
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:686:2-686:6: int q, fifo;
-
drivers/net/wireless/intel/iwlwifi/iwl-io.c:255:2-255:9: int i, q;
-
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c:3507:4-3507:13: int tid, q;
-
drivers/net/wireless/intel/iwlwifi/mvm/sta.c:1639:3-1639:7: int q;
-
drivers/net/wireless/marvell/mwl8k.c:5380:4-5380:38: int q = MWL8K_TX_WMM_QUEUES - 1 - queue;
-
drivers/net/wireless/mediatek/mt76/debugfs.c:34:3-34:41: struct mt76_queue *q = dev->phy.q_tx[i];
-
drivers/net/wireless/mediatek/mt76/debugfs.c:54:3-54:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/dma.c:495:2-495:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/mac80211.c:534:2-534:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76.h:821:2-821:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76.h:836:2-836:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c:73:2-73:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:448:4-448:43: struct mt76_queue *q = dev->mphy.q_tx[i];
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:899:2-899:43: struct mt76_queue *q = dev->mphy.q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:1513:2-1513:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c:360:3-360:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c:84:2-84:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:18:2-18:50: struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:171:2-171:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:345:2-345:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c:299:3-299:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7915/mac.c:166:4-166:37: u8 q = mt7915_lmac_mapping(dev, i);
-
drivers/net/wireless/mediatek/mt76/mt7915/mac.c:920:2-920:21: struct mt76_queue *q, *q2 = NULL;
-
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c:2965:3-2965:63: struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
-
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c:148:3-148:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7921/mac.c:128:4-128:37: u8 q = mt7921_lmac_mapping(dev, i);
-
drivers/net/wireless/mediatek/mt76/mt7921/mac.c:764:2-764:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c:999:3-999:63: struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
-
drivers/net/wireless/mediatek/mt76/sdio.c:23:2-23:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/sdio.c:41:2-41:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/sdio.c:61:2-61:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/sdio.c:328:3-328:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/testmode.c:32:2-32:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/tx.c:239:2-239:38: struct mt76_queue *q = phy->q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/tx.c:266:2-266:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/tx.c:456:2-456:38: struct mt76_queue *q = phy->q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/usb.c:606:2-606:30: struct mt76_queue *q = urb->context;
-
drivers/net/wireless/mediatek/mt76/usb.c:691:2-691:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/usb.c:711:2-711:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/usb.c:778:3-778:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/usb.c:792:3-792:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/usb.c:814:2-814:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:977:2-977:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:1020:3-1020:22: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:1044:3-1044:22: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:172:2-172:37: struct mt7601u_rx_queue *q = &dev->rx_q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:193:2-193:37: struct mt7601u_rx_queue *q = &dev->rx_q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:240:2-240:36: struct mt7601u_tx_queue *q = urb->context;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:311:2-311:44: struct mt7601u_tx_queue *q = &dev->tx_q[ep];
-
drivers/net/wireless/microchip/wilc1000/wlan.c:289:2-289:40: struct wilc_tx_queue_status *q = &wl->tx_q_limit;
-
drivers/net/wireless/realtek/rtw88/mac.c:955:2-955:6: u32 q;
-
drivers/net/wireless/ti/wlcore/main.c:1220:2-1220:6: int q, mapping;
-
drivers/net/wireless/ti/wlcore/main.c:1287:2-1287:6: int q;
-
drivers/net/wireless/ti/wlcore/tx.c:485:2-485:14: int i, q = -1, ac;
-
drivers/net/wireless/ti/wlcore/tx.c:658:3-658:7: int q;
-
drivers/net/wireless/ti/wlcore/tx.c:676:2-676:56: int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:489:2-489:33: struct sk_buff_head *q = &mac->ack_wait_queue;
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:582:3-582:34: struct sk_buff_head *q = &mac->ack_wait_queue;
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:965:2-965:23: struct sk_buff_head *q;
-
drivers/net/wireless/zydas/zd1211rw/zd_usb.c:1060:2-1060:32: struct sk_buff_head *q = &tx->submitted_skbs;
-
drivers/nvdimm/blk.c:246:2-246:24: struct request_queue *q;
-
drivers/nvdimm/pmem.c:336:2-337:3: struct request_queue *q =
-
drivers/nvdimm/pmem.c:349:2-350:3: struct request_queue *q =
-
drivers/nvdimm/pmem.c:382:2-382:24: struct request_queue *q;
-
drivers/nvme/host/lightnvm.c:478:2-478:36: struct request_queue *q = nvmdev->q;
-
drivers/nvme/host/lightnvm.c:674:2-674:33: struct request_queue *q = dev->q;
-
drivers/nvme/host/lightnvm.c:891:2-891:24: struct request_queue *q;
-
drivers/nvme/host/lightnvm.c:949:2-949:32: struct request_queue *q = ns->queue;
-
drivers/nvme/host/multipath.c:362:2-362:24: struct request_queue *q;
-
drivers/nvme/host/pci.c:2269:2-2269:45: struct request_queue *q = nvmeq->dev->ctrl.admin_q;
-
drivers/nvme/host/zns.c:46:2-46:32: struct request_queue *q = ns->queue;
-
drivers/nvme/host/zns.c:119:2-119:38: struct request_queue *q = ns->disk->queue;
-
drivers/nvme/target/passthru.c:223:2-223:34: struct request_queue *q = ctrl->admin_q;
-
drivers/of/fdt.c:907:2-907:18: const char *p, *q, *options = NULL;
-
drivers/parport/probe.c:56:2-56:18: char *p = txt, *q;
-
drivers/pcmcia/cistpl.c:663:2-663:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:795:2-795:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:812:2-812:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:824:2-824:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1081:2-1081:14: u_char *p, *q, features;
-
drivers/pcmcia/cistpl.c:1204:2-1204:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1228:2-1228:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1249:2-1249:14: u_char *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:110:2-110:27: struct resource_map *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:134:2-134:27: struct resource_map *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:1036:2-1036:27: struct resource_map *p, *q;
-
drivers/platform/chrome/wilco_ec/event.c:108:2-108:25: struct ec_event_queue *q;
-
drivers/platform/surface/aggregator/ssh_packet_layer.c:700:2-700:21: struct ssh_packet *q;
-
drivers/scsi/aacraid/commsup.c:361:2-361:21: struct aac_queue * q;
-
drivers/scsi/aacraid/commsup.c:652:6-652:65: struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/commsup.c:875:2-875:21: struct aac_queue * q;
-
drivers/scsi/aacraid/dpcsup.c:278:3-278:61: struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
-
drivers/scsi/aacraid/rx.c:400:2-400:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/rx.c:423:2-423:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/src.c:486:2-486:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/be2iscsi/be_main.c:3450:2-3450:24: struct be_queue_info *q;
-
drivers/scsi/be2iscsi/be_main.c:3510:2-3510:24: struct be_queue_info *q, *cq;
-
drivers/scsi/be2iscsi/be_main.c:3620:2-3620:24: struct be_queue_info *q;
-
drivers/scsi/bfa/bfa_core.c:1318:2-1318:7: int q;
-
drivers/scsi/bfa/bfa_core.c:1474:2-1474:6: int q, per_reqq_sz, per_rspq_sz;
-
drivers/scsi/csiostor/csio_isr.c:428:4-428:50: struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
-
drivers/scsi/csiostor/csio_wr.c:191:2-191:17: struct csio_q *q, *flq;
-
drivers/scsi/csiostor/csio_wr.c:747:2-747:51: struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:765:2-765:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:789:2-789:17: struct csio_q *q;
-
drivers/scsi/csiostor/csio_wr.c:867:2-867:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:985:2-985:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:1691:2-1691:17: struct csio_q *q;
-
drivers/scsi/esas2r/esas2r_flash.c:331:2-331:10: u8 *p, *q;
-
drivers/scsi/fnic/fnic_scsi.c:2297:2-2297:41: struct request_queue *q = sc->request->q;
-
drivers/scsi/hpsa.c:7018:2-7018:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7039:2-7039:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7055:2-7055:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7074:2-7074:17: u8 q = *(u8 *) queue;
-
drivers/scsi/ips.c:2532:2-2532:20: struct scsi_cmnd *q;
-
drivers/scsi/qla2xxx/qla_dbg.c:544:2-544:31: struct qla2xxx_mqueue_chain *q;
-
drivers/scsi/qla2xxx/qla_dbg.c:591:2-591:31: struct qla2xxx_mqueue_chain *q;
-
drivers/scsi/qla2xxx/qla_init.c:5042:2-5042:10: __be32 *q;
-
drivers/scsi/qla2xxx/qla_os.c:4932:2-4932:11: bool q = false;
-
drivers/scsi/qla2xxx/qla_os.c:7204:3-7204:12: bool q = false;
-
drivers/scsi/scsi_lib.c:494:2-494:24: struct request_queue *q;
-
drivers/scsi/scsi_lib.c:568:2-568:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_lib.c:680:2-680:41: struct request_queue *q = cmd->device->request_queue;
-
drivers/scsi/scsi_lib.c:943:2-943:41: struct request_queue *q = cmd->device->request_queue;
-
drivers/scsi/scsi_lib.c:1649:2-1649:33: struct request_queue *q = req->q;
-
drivers/scsi/scsi_lib.c:1864:2-1864:34: struct request_queue *q = hctx->queue;
-
drivers/scsi/scsi_lib.c:2538:2-2538:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_lib.c:2643:2-2643:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_lib.c:2681:2-2681:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_lib.c:2695:2-2695:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_transport_fc.c:4169:2-4169:35: struct request_queue *q = rport->rqst_q;
-
drivers/scsi/scsi_transport_fc.c:4282:2-4282:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_fc.c:4317:2-4317:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_iscsi.c:1553:2-1553:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_sas.c:192:2-192:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_sas.c:246:2-246:54: struct request_queue *q = to_sas_host_attrs(shost)->q;
-
drivers/scsi/sd.c:819:2-819:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:1000:2-1000:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:2990:2-2990:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:3174:2-3174:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:3549:2-3549:34: struct request_queue *q = disk->queue;
-
drivers/scsi/sd_zbc.c:152:2-152:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd_zbc.c:702:2-702:34: struct request_queue *q = disk->queue;
-
drivers/scsi/sd_zbc.c:780:2-780:34: struct request_queue *q = disk->queue;
-
drivers/scsi/sg.c:287:2-287:24: struct request_queue *q;
-
drivers/scsi/sg.c:1458:2-1458:36: struct request_queue *q = scsidp->request_queue;
-
drivers/scsi/sg.c:1732:2-1732:51: struct request_queue *q = sfp->parentdp->device->request_queue;
-
drivers/scsi/sym53c8xx_2/sym_malloc.c:97:2-97:11: m_link_p q;
-
drivers/scsi/ufs/ufs_bsg.c:199:2-199:24: struct request_queue *q;
-
drivers/scsi/ufs/ufshcd.c:1382:2-1382:33: struct request_queue *q = hba->cmd_queue;
-
drivers/scsi/ufs/ufshcd.c:2862:2-2862:33: struct request_queue *q = hba->cmd_queue;
-
drivers/scsi/ufs/ufshcd.c:4869:2-4869:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/ufs/ufshcd.c:5810:2-5810:24: struct request_queue *q;
-
drivers/scsi/ufs/ufshcd.c:6264:2-6264:33: struct request_queue *q = hba->tmf_queue;
-
drivers/scsi/ufs/ufshcd.c:6382:2-6382:33: struct request_queue *q = hba->tmf_queue;
-
drivers/scsi/ufs/ufshcd.c:6528:2-6528:33: struct request_queue *q = hba->cmd_queue;
-
drivers/spi/spi-fsl-qspi.c:342:2-342:23: struct fsl_qspi *q = dev_id;
-
drivers/spi/spi-fsl-qspi.c:371:2-371:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:644:2-644:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:706:2-706:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:812:2-812:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:849:2-849:19: struct fsl_qspi *q;
-
drivers/spi/spi-fsl-qspi.c:950:2-950:48: struct fsl_qspi *q = platform_get_drvdata(pdev);
-
drivers/spi/spi-fsl-qspi.c:970:2-970:42: struct fsl_qspi *q = dev_get_drvdata(dev);
-
drivers/spi/spi-pxa2xx.c:830:2-830:16: unsigned long q, q1, q2;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:212:2-212:22: ia_css_queue_t *q = NULL;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:331:2-331:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:359:2-359:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:389:2-389:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:409:2-409:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:431:2-431:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:451:2-451:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:470:2-470:18: ia_css_queue_t *q;
-
drivers/staging/media/hantro/hantro_drv.c:48:2-48:59: struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
-
drivers/staging/media/hantro/hantro_g1_vp8_dec.c:180:2-180:55: const struct v4l2_vp8_quantization_header *q = &hdr->quant_header;
-
drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c:318:2-318:55: const struct v4l2_vp8_quantization_header *q = &hdr->quant_header;
-
drivers/staging/media/ipu3/ipu3-css.c:1075:2-1076:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1089:2-1090:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1127:2-1128:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1370:2-1370:6: int q, r, pipe;
-
drivers/staging/media/ipu3/ipu3-css.c:1407:2-1407:6: int q;
-
drivers/staging/media/ipu3/ipu3-css.c:1438:2-1438:18: unsigned int p, q, i;
-
drivers/staging/media/ipu3/ipu3-css.c:1482:2-1482:18: unsigned int p, q, i, abi_buf_num;
-
drivers/staging/media/ipu3/ipu3-css.c:1519:2-1519:9: int r, q, pipe;
-
drivers/staging/media/ipu3/ipu3-css.c:1715:2-1715:25: struct imgu_css_queue *q;
-
drivers/staging/media/omap4iss/iss_video.c:1098:2-1098:20: struct vb2_queue *q;
-
drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c:1853:2-1853:20: struct vb2_queue *q;
-
drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c:1303:2-1303:20: struct list_head *q, *buf_head;
-
drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c:1353:2-1353:20: struct list_head *q, *buf_head;
-
drivers/target/target_core_file.c:136:3-136:57: struct request_queue *q = bdev_get_queue(I_BDEV(inode));
-
drivers/target/target_core_iblock.c:72:2-72:24: struct request_queue *q;
-
drivers/target/target_core_iblock.c:694:3-694:58: struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
-
drivers/target/target_core_iblock.c:793:2-793:45: struct request_queue *q = bdev_get_queue(bd);
-
drivers/target/target_core_iblock.c:854:2-854:45: struct request_queue *q = bdev_get_queue(bd);
-
drivers/target/target_core_pscsi.c:291:2-291:32: struct request_queue *q = sd->request_queue;
-
drivers/thunderbolt/quirks.c:37:3-37:42: const struct tb_quirk *q = &tb_quirks[i];
-
drivers/tty/vt/consolemap.c:203:2-203:17: unsigned char *q;
-
drivers/tty/vt/consolemap.c:228:2-228:7: u16 *q;
-
drivers/tty/vt/consolemap.c:300:2-300:30: struct uni_pagedir *p, *q = NULL;
-
drivers/tty/vt/consolemap.c:437:2-437:22: struct uni_pagedir *q;
-
drivers/tty/vt/consolemap.c:506:2-506:26: struct uni_pagedir *p, *q;
-
drivers/tty/vt/consolemap.c:539:2-539:26: struct uni_pagedir *p, *q;
-
drivers/tty/vt/consolemap.c:662:2-662:7: u16 *q;
-
drivers/tty/vt/consolemap.c:717:2-717:22: struct uni_pagedir *q;
-
drivers/tty/vt/vt.c:662:3-662:12: u16 *q = p;
-
drivers/tty/vt/vt.c:767:3-767:12: u16 *q = p;
-
drivers/usb/core/devio.c:665:2-665:24: struct list_head *p, *q, hitlist;
-
drivers/usb/host/ehci-sched.c:2360:2-2360:20: union ehci_shadow q, *q_p;
-
drivers/usb/host/fotg210-hcd.c:3340:2-3340:51: union fotg210_shadow *q = &fotg210->pshadow[frame];
-
drivers/usb/host/fotg210-hcd.c:4585:2-4585:23: union fotg210_shadow q, *q_p;
-
drivers/usb/host/oxu210hp-hcd.c:2270:2-2270:44: union ehci_shadow *q = &oxu->pshadow[frame];
-
drivers/usb/host/oxu210hp-hcd.c:2692:3-2692:21: union ehci_shadow q, *q_p;
-
drivers/vhost/scsi.c:581:4-581:33: struct vhost_scsi_virtqueue *q;
-
drivers/video/fbdev/aty/mach64_ct.c:211:2-211:6: u32 q;
-
drivers/video/fbdev/aty/mach64_ct.c:407:2-407:6: u32 q, memcntl, trp;
-
drivers/video/fbdev/hgafb.c:282:2-282:20: void __iomem *p, *q;
-
drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c:416:2-416:20: unsigned itc, ec, q, sc;
-
drivers/xen/events/events_fifo.c:105:2-105:33: struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
-
drivers/xen/events/events_fifo.c:285:2-285:33: struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
-
drivers/xen/events/events_fifo.c:330:2-330:11: unsigned q;
-
drivers/xen/gntdev-dmabuf.c:684:2-684:24: struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
-
drivers/xen/gntdev-dmabuf.c:729:2-729:24: struct gntdev_dmabuf *q, *gntdev_dmabuf;
-
fs/afs/addr_list.c:136:3-136:15: const char *q, *stop;
-
fs/autofs/expire.c:101:2-101:17: struct dentry *q;
-
fs/block_dev.c:314:2-314:47: struct request_queue *q = bdev_get_queue(bdev);
-
fs/btrfs/disk-io.c:3863:2-3863:55: struct request_queue *q = bdev_get_queue(device->bdev);
-
fs/btrfs/ioctl.c:521:2-521:24: struct request_queue *q;
-
fs/btrfs/volumes.c:622:2-622:24: struct request_queue *q;
-
fs/btrfs/volumes.c:2571:2-2571:24: struct request_queue *q;
-
fs/ceph/caps.c:919:5-919:21: struct rb_node *q;
-
fs/configfs/dir.c:1631:2-1631:37: struct list_head *p, *q = &cursor->s_sibling;
-
fs/dcache.c:1879:2-1879:14: struct qstr q;
-
fs/efivarfs/super.c:89:2-89:14: struct qstr q;
-
fs/erofs/zdata.c:735:2-735:38: struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
-
fs/erofs/zdata.c:1120:2-1120:34: struct z_erofs_decompressqueue *q;
-
fs/erofs/zdata.c:1188:2-1188:48: struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
-
fs/exfat/super.c:629:3-629:54: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/ext4/ioctl.c:1103:3-1103:54: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/ext4/namei.c:773:2-773:38: struct dx_entry *at, *entries, *p, *q, *m;
-
fs/ext4/namei.c:1238:2-1238:27: struct dx_map_entry *p, *q, *top = map + count - 1;
-
fs/ext4/super.c:5075:3-5075:54: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/f2fs/checkpoint.c:1761:2-1761:32: wait_queue_head_t *q = &cprc->ckpt_wait_queue;
-
fs/f2fs/file.c:2314:2-2314:53: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/f2fs/file.c:3790:2-3790:47: struct request_queue *q = bdev_get_queue(bdev);
-
fs/f2fs/segment.c:595:2-595:31: wait_queue_head_t *q = &fcc->flush_wait_queue;
-
fs/f2fs/segment.c:1144:2-1144:47: struct request_queue *q = bdev_get_queue(bdev);
-
fs/f2fs/segment.c:1323:2-1323:47: struct request_queue *q = bdev_get_queue(bdev);
-
fs/f2fs/segment.c:1725:2-1725:31: wait_queue_head_t *q = &dcc->discard_wait_queue;
-
fs/fat/file.c:130:2-130:53: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/fat/inode.c:1877:3-1877:54: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/fs_context.c:386:3-388:31: char *q = kasprintf(GFP_KERNEL, "%c %s%s%pV\n", level,
-
fs/fs_pin.c:88:3-88:22: struct hlist_node *q;
-
fs/gfs2/quota.c:823:2-823:20: struct gfs2_quota q;
-
fs/gfs2/quota.c:977:2-977:20: struct gfs2_quota q;
-
fs/gfs2/rgrp.c:1399:2-1399:62: struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
-
fs/hpfs/alloc.c:122:2-122:14: unsigned i, q;
-
fs/hpfs/ea.c:289:4-289:44: secno q = hpfs_alloc_sector(s, fno, 1, 0);
-
fs/inode.c:2205:2-2205:2: DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
fs/iomap/direct-io.c:54:2-54:28: struct request_queue *q = READ_ONCE(kiocb->private);
-
fs/jffs2/compr_rubin.c:202:2-202:35: unsigned long p = rs->p, q = rs->q;
-
fs/jfs/ioctl.c:125:3-125:54: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/jfs/super.c:377:4-377:55: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/jfs/super.c:396:4-396:55: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/namespace.c:1831:2-1831:26: struct mount *res, *p, *q, *r, *parent;
-
fs/namespace.c:2176:3-2176:17: struct mount *q;
-
fs/namespace.c:3320:2-3320:20: struct mount *p, *q;
-
fs/nfs/nfs4proc.c:7296:2-7296:31: wait_queue_head_t *q = &clp->cl_lock_waitq;
-
fs/nfsd/blocklayout.c:217:2-217:43: struct request_queue *q = bdev->bd_disk->queue;
-
fs/nilfs2/ioctl.c:1070:2-1070:57: struct request_queue *q = bdev_get_queue(nilfs->ns_bdev);
-
fs/ocfs2/ioctl.c:921:3-921:54: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/proc/base.c:500:4-500:8: int q;
-
fs/proc/bootconfig.c:31:2-31:7: char q;
-
fs/ufs/inode.c:131:2-131:26: Indirect chain[4], *q = chain;
-
fs/xfs/xfs_discard.c:155:2-155:68: struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev);
-
fs/xfs/xfs_dquot.c:73:2-73:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_dquot.c:186:2-186:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_qm_syscalls.c:101:2-101:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_qm_syscalls.c:507:2-507:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_quotaops.c:60:2-60:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_super.c:1561:3-1561:54: struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
fs/xfs/xfs_trans_dquot.c:629:2-629:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_trans_dquot.c:855:2-855:27: struct xfs_qoff_logitem *q;
-
include/linux/blkdev.h:1104:2-1104:32: struct request_queue *q = rq->q;
-
include/linux/blkdev.h:1509:2-1509:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1552:2-1552:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1562:2-1562:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1572:2-1572:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1582:2-1582:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1592:2-1592:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1602:2-1602:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1611:2-1611:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1620:2-1620:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/net/pkt_cls.h:177:2-177:38: struct Qdisc *q = tp->chain->block->q;
-
include/net/pkt_cls.h:201:2-201:38: struct Qdisc *q = tp->chain->block->q;
-
include/net/sch_generic.h:501:2-501:20: struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
-
include/net/sch_generic.h:726:3-726:27: const struct Qdisc *q = rcu_dereference(txq->qdisc);
-
init/initramfs.c:80:2-80:20: struct hash **p, *q;
-
init/initramfs.c:107:2-107:20: struct hash **p, *q;
-
ipc/sem.c:283:2-283:20: struct sem_queue *q, *tq;
-
ipc/sem.c:853:2-853:20: struct sem_queue *q, *tmp;
-
ipc/sem.c:947:2-947:20: struct sem_queue *q, *tmp;
-
ipc/sem.c:1106:2-1106:20: struct sem_queue *q;
-
ipc/sem.c:1142:2-1142:20: struct sem_queue *q, *tq;
-
kernel/audit_tree.c:611:2-611:24: struct list_head *p, *q;
-
kernel/auditsc.c:278:2-278:26: struct audit_tree_refs *q;
-
kernel/auditsc.c:305:2-305:30: struct audit_tree_refs *p, *q;
-
kernel/bpf/cpumap.c:622:2-622:19: struct ptr_ring *q;
-
kernel/cgroup/pids.c:146:2-146:26: struct pids_cgroup *p, *q;
-
kernel/crash_core.c:198:3-198:9: char *q;
-
kernel/events/uprobes.c:324:2-324:26: struct list_head *pos, *q;
-
kernel/events/uprobes.c:1328:2-1328:26: struct list_head *pos, *q;
-
kernel/futex.c:2689:2-2689:21: struct futex_q q = futex_q_init;
-
kernel/futex.c:2781:2-2781:21: struct futex_q q = futex_q_init;
-
kernel/futex.c:3176:2-3176:21: struct futex_q q = futex_q_init;
-
kernel/latencytop.c:97:3-97:7: int q, same = 1;
-
kernel/latencytop.c:154:2-154:9: int i, q;
-
kernel/latencytop.c:227:4-227:8: int q;
-
kernel/ptrace.c:706:2-706:19: struct sigqueue *q;
-
kernel/signal.c:415:2-415:23: struct sigqueue *q = NULL;
-
kernel/signal.c:463:2-463:19: struct sigqueue *q;
-
kernel/signal.c:492:2-492:19: struct sigqueue *q, *n;
-
kernel/signal.c:571:2-571:19: struct sigqueue *q, *first = NULL;
-
kernel/signal.c:711:2-711:19: struct sigqueue *q, *sync = NULL;
-
kernel/signal.c:782:2-782:19: struct sigqueue *q, *n;
-
kernel/signal.c:1075:2-1075:19: struct sigqueue *q;
-
kernel/signal.c:1810:2-1810:66: struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
-
kernel/trace/blktrace.c:717:2-717:24: struct request_queue *q;
-
kernel/trace/blktrace.c:969:2-969:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
kernel/trace/blktrace.c:999:2-999:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
kernel/trace/blktrace.c:1758:2-1758:47: struct request_queue *q = bdev_get_queue(bdev);
-
kernel/trace/blktrace.c:1792:2-1792:47: struct request_queue *q = bdev_get_queue(bdev);
-
kernel/trace/trace.c:3557:2-3557:8: char *q;
-
kernel/trace/trace_boot.c:248:2-248:8: char *q;
-
kernel/trace/trace_events_filter.c:1161:2-1161:7: char q;
-
kernel/trace/trace_events_filter.c:1270:3-1270:17: char q = str[i];
-
kernel/trace/trace_events_inject.c:105:3-105:17: char q = str[i];
-
kernel/watch_queue.c:291:2-291:28: struct watch_type_filter *q;
-
lib/bch.c:909:2-909:37: struct gf_poly *q = bch->poly_2t[1];
-
lib/bootconfig.c:776:2-776:12: char *p, *q;
-
lib/crc32.c:82:2-82:6: u32 q;
-
lib/crypto/curve25519-hacl64.c:36:2-36:20: u64 q = x_xor_y | x_sub_y_xor_y;
-
lib/crypto/curve25519-hacl64.c:766:2-766:7: u64 *q;
-
lib/mpi/mpih-div.c:248:5-248:16: mpi_limb_t q;
-
lib/mpi/mpih-div.c:315:5-315:16: mpi_limb_t q;
-
lib/raid6/avx2.c:37:2-37:10: u8 *p, *q;
-
lib/raid6/avx2.c:86:2-86:10: u8 *p, *q;
-
lib/raid6/avx2.c:144:2-144:10: u8 *p, *q;
-
lib/raid6/avx2.c:196:2-196:10: u8 *p, *q;
-
lib/raid6/avx2.c:276:2-276:10: u8 *p, *q;
-
lib/raid6/avx2.c:357:2-357:10: u8 *p, *q;
-
lib/raid6/avx512.c:47:2-47:10: u8 *p, *q;
-
lib/raid6/avx512.c:105:2-105:10: u8 *p, *q;
-
lib/raid6/avx512.c:174:2-174:10: u8 *p, *q;
-
lib/raid6/avx512.c:237:2-237:10: u8 *p, *q;
-
lib/raid6/avx512.c:333:2-333:10: u8 *p, *q;
-
lib/raid6/avx512.c:427:2-427:10: u8 *p, *q;
-
lib/raid6/recov.c:23:2-23:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov.c:67:2-67:10: u8 *p, *q, *dq;
-
lib/raid6/recov_avx2.c:19:2-19:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_avx2.c:189:2-189:10: u8 *p, *q, *dq;
-
lib/raid6/recov_avx512.c:27:2-27:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_avx512.c:230:2-230:10: u8 *p, *q, *dq;
-
lib/raid6/recov_ssse3.c:19:2-19:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_ssse3.c:194:2-194:10: u8 *p, *q, *dq;
-
lib/raid6/sse2.c:39:2-39:10: u8 *p, *q;
-
lib/raid6/sse2.c:91:2-91:10: u8 *p, *q;
-
lib/raid6/sse2.c:149:2-149:10: u8 *p, *q;
-
lib/raid6/sse2.c:202:2-202:10: u8 *p, *q;
-
lib/raid6/sse2.c:281:2-281:10: u8 *p, *q;
-
lib/raid6/sse2.c:368:2-368:10: u8 *p, *q;
-
lib/reed_solomon/decode_rs.c:23:2-23:14: uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
-
lib/string_helpers.c:135:2-135:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:163:2-163:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:182:2-182:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:206:2-206:24: char *p = *dst, *q = *src;
-
lib/test_hexdump.c:99:3-99:26: const char *q = *result++;
-
lib/ts_kmp.c:45:2-45:22: unsigned int i, q = 0, text_len, consumed = state->offset;
-
lib/ts_kmp.c:77:2-77:18: unsigned int k, q;
-
lib/vsprintf.c:190:2-190:11: unsigned q;
-
lib/vsprintf.c:232:2-232:11: unsigned q;
-
mm/filemap.c:1099:2-1099:44: wait_queue_head_t *q = page_waitqueue(page);
-
mm/filemap.c:1334:2-1334:44: wait_queue_head_t *q = page_waitqueue(page);
-
mm/filemap.c:1341:2-1341:44: wait_queue_head_t *q = page_waitqueue(page);
-
mm/filemap.c:1361:2-1361:21: wait_queue_head_t *q;
-
mm/filemap.c:1377:2-1377:44: wait_queue_head_t *q = page_waitqueue(page);
-
mm/filemap.c:1504:2-1504:44: wait_queue_head_t *q = page_waitqueue(page);
-
mm/filemap.c:1513:2-1513:44: wait_queue_head_t *q = page_waitqueue(page);
-
mm/filemap.c:1521:2-1521:49: struct wait_queue_head *q = page_waitqueue(page);
-
mm/kasan/quarantine.c:174:2-174:21: struct qlist_head *q;
-
mm/kasan/quarantine.c:304:2-304:21: struct qlist_head *q;
-
mm/kasan/quarantine.c:351:2-351:21: struct qlist_head *q;
-
mm/swapfile.c:3111:2-3111:51: struct request_queue *q = bdev_get_queue(si->bdev);
-
mm/z3fold.c:706:3-706:9: void *q;
-
net/atm/lec.c:872:2-872:6: int q;
-
net/bluetooth/hci_core.c:4439:3-4439:12: int cnt, q;
-
net/bluetooth/hci_core.c:4494:2-4494:11: int cnt, q, conn_num = 0;
-
net/core/dev.c:2422:3-2422:40: int q = netdev_get_prio_tc_map(dev, i);
-
net/core/dev.c:3085:3-3085:21: struct Qdisc *q = rcu_dereference(txq->qdisc);
-
net/core/dev.c:3096:3-3096:17: struct Qdisc *q;
-
net/core/dev.c:4110:2-4110:16: struct Qdisc *q;
-
net/core/dev.c:4955:4-4955:22: struct Qdisc *q = head;
-
net/core/pktgen.c:3231:2-3231:20: struct list_head *q, *n;
-
net/core/pktgen.c:3253:2-3253:20: struct list_head *q, *n;
-
net/core/pktgen.c:3735:2-3735:20: struct list_head *q, *n;
-
net/core/pktgen.c:3831:2-3831:20: struct list_head *q, *n;
-
net/core/skbuff.c:1253:2-1253:23: struct sk_buff_head *q;
-
net/core/skbuff.c:4650:2-4650:32: struct sk_buff_head *q = &sk->sk_error_queue;
-
net/ieee802154/6lowpan/reassembly.c:70:2-70:26: struct inet_frag_queue *q;
-
net/ipv4/af_inet.c:1942:2-1942:23: struct inet_protosw *q;
-
net/ipv4/inet_fragment.c:254:2-254:30: struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
-
net/ipv4/inet_fragment.c:310:2-310:26: struct inet_frag_queue *q;
-
net/ipv4/inet_fragment.c:332:2-332:26: struct inet_frag_queue *q;
-
net/ipv4/ip_fragment.c:214:2-214:26: struct inet_frag_queue *q;
-
net/ipv4/tcp_fastopen.c:73:2-73:25: struct fastopen_queue *q;
-
net/ipv4/tcp_output.c:1047:2-1047:20: struct list_head *q, *n;
-
net/ipv6/netfilter/nf_conntrack_reasm.c:159:2-159:26: struct inet_frag_queue *q;
-
net/ipv6/reassembly.c:93:2-93:26: struct inet_frag_queue *q;
-
net/mac80211/debugfs.c:529:2-529:6: int q, res = 0;
-
net/mac80211/ethtool.c:75:2-75:9: int i, q;
-
net/mac80211/mlme.c:1858:2-1858:6: int q;
-
net/mac80211/rx.c:2748:2-2748:10: u16 ac, q, hdrlen;
-
net/mac80211/tx.c:1615:3-1615:17: int q = info->hw_queue;
-
net/mac80211/tx.c:4151:2-4151:16: int q = info->hw_queue;
-
net/netfilter/nfnetlink_queue.c:780:2-780:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:922:2-922:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:955:2-955:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:972:2-972:53: struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
-
net/netfilter/nfnetlink_queue.c:1066:2-1066:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:1180:2-1180:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:1261:2-1261:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:1410:2-1410:25: struct nfnl_queue_net *q;
-
net/netfilter/nfnetlink_queue.c:1431:3-1431:26: struct nfnl_queue_net *q;
-
net/netfilter/nfnetlink_queue.c:1496:2-1496:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:1514:2-1514:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/xt_quota.c:29:2-29:41: struct xt_quota_info *q = (void *)par->matchinfo;
-
net/netfilter/xt_quota.c:48:2-48:33: struct xt_quota_info *q = par->matchinfo;
-
net/netfilter/xt_quota.c:64:2-64:39: const struct xt_quota_info *q = par->matchinfo;
-
net/rds/message.c:96:2-96:30: struct rds_msg_zcopy_queue *q;
-
net/rds/recv.c:600:2-600:39: struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
-
net/rose/rose_in.c:265:2-265:37: int queued = 0, frametype, ns, nr, q, d, m;
-
net/rxrpc/rxkad.c:999:2-999:10: u8 *p, *q, *name, *end;
-
net/sched/cls_api.c:1954:2-1954:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2180:2-2180:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2340:2-2340:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2543:2-2543:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2804:2-2804:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2935:2-2935:20: struct Qdisc *q = NULL;
-
net/sched/cls_flow.c:503:4-503:50: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/cls_fw.c:75:3-75:49: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/cls_tcindex.c:114:3-114:49: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/sch_api.c:132:2-132:20: struct Qdisc_ops *q, **qp;
-
net/sched/sch_api.c:176:2-176:20: struct Qdisc_ops *q, **qp;
-
net/sched/sch_api.c:203:2-203:24: struct Qdisc_ops *q = NULL;
-
net/sched/sch_api.c:261:2-261:16: struct Qdisc *q;
-
net/sched/sch_api.c:300:2-300:16: struct Qdisc *q;
-
net/sched/sch_api.c:319:2-319:16: struct Qdisc *q;
-
net/sched/sch_api.c:352:2-352:24: struct Qdisc_ops *q = NULL;
-
net/sched/sch_api.c:1036:2-1036:20: struct Qdisc *q = old;
-
net/sched/sch_api.c:1426:2-1426:20: struct Qdisc *q = NULL;
-
net/sched/sch_api.c:1511:2-1511:16: struct Qdisc *q, *p;
-
net/sched/sch_api.c:1690:2-1690:16: struct Qdisc *q;
-
net/sched/sch_api.c:1916:3-1916:49: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/sch_api.c:1994:2-1994:20: struct Qdisc *q = NULL;
-
net/sched/sch_api.c:2173:2-2173:16: struct Qdisc *q;
-
net/sched/sch_cake.c:1499:2-1499:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1613:2-1613:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1653:2-1653:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1693:2-1693:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1908:2-1908:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1932:2-1932:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1943:2-1943:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2290:2-2290:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2310:2-2310:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2399:2-2399:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2443:2-2443:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2480:2-2480:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2509:2-2509:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2564:2-2564:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2688:2-2688:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2698:2-2698:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2773:2-2773:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2848:2-2848:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2967:2-2967:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2984:2-2984:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:3057:2-3057:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:207:2-207:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:293:2-293:49: struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
net/sched/sch_cbq.c:317:2-317:51: struct cbq_sched_data *q = qdisc_priv(this->qdisc);
-
net/sched/sch_cbq.c:362:2-362:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:396:2-396:49: struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
net/sched/sch_cbq.c:481:2-481:29: struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
-
net/sched/sch_cbq.c:642:2-642:49: struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
net/sched/sch_cbq.c:681:2-681:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:784:2-784:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:803:2-803:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:913:2-913:49: struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
net/sched/sch_cbq.c:981:2-981:51: struct cbq_sched_data *q = qdisc_priv(this->qdisc);
-
net/sched/sch_cbq.c:1008:2-1008:51: struct cbq_sched_data *q = qdisc_priv(this->qdisc);
-
net/sched/sch_cbq.c:1028:2-1028:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1097:2-1097:49: struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
net/sched/sch_cbq.c:1160:2-1160:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1322:2-1322:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1340:2-1340:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1376:2-1376:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1428:2-1428:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1435:2-1435:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1449:2-1449:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1481:2-1481:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1681:2-1681:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1720:2-1720:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1732:2-1732:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1754:2-1754:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:108:2-108:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:117:2-117:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:134:2-134:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:178:2-178:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:233:2-233:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:241:2-241:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:336:2-336:25: struct cbs_sched_data *q;
-
net/sched/sch_cbs.c:364:2-364:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:404:2-404:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:435:2-435:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:454:2-454:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:481:2-481:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:495:2-495:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:510:2-510:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:117:2-117:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:216:2-216:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:286:2-286:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:307:2-307:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:341:2-341:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:435:2-435:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:463:2-463:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:477:2-477:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:484:2-484:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:91:2-91:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:114:2-114:27: struct codel_sched_data *q;
-
net/sched/sch_codel.c:136:2-136:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:192:2-192:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:218:2-218:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:247:2-247:51: const struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:273:2-273:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:41:2-41:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:58:2-58:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:152:2-152:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:177:2-177:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:281:2-281:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:306:2-306:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:345:2-345:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:380:2-380:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:421:2-421:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:436:2-436:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:453:2-453:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:77:2-77:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:110:2-110:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:122:2-122:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:165:2-165:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:203:2-203:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:233:2-233:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:255:2-255:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:349:2-349:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:422:2-422:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:438:2-438:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:456:2-456:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:468:2-468:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:92:2-92:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:99:2-99:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:108:2-108:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:202:2-202:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:277:2-277:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:287:2-287:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:301:2-301:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:338:2-338:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:361:2-361:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:384:2-384:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:422:2-422:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:466:2-466:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:589:2-589:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:691:2-691:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:707:2-707:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:722:2-722:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:733:2-733:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_fifo.c:253:2-253:16: struct Qdisc *q;
-
net/sched/sch_fq.c:445:2-445:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:528:2-528:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:664:2-664:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:749:2-749:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:805:2-805:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:919:2-919:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:929:2-929:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:968:2-968:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:1009:2-1009:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:79:2-79:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:140:2-140:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:187:2-187:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:258:2-258:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:283:2-283:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:337:2-337:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:370:2-370:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:442:2-442:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:452:2-452:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:514:2-514:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:552:2-552:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:600:2-600:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:617:2-617:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:663:2-663:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:82:2-82:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:131:2-131:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:223:2-223:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:273:2-273:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:367:2-367:32: struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_fq_pie.c:389:2-389:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:442:2-442:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:477:2-477:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:502:2-502:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:524:2-524:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_generic.c:622:2-622:44: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:646:3-646:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:669:3-669:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:683:3-683:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:698:4-698:29: struct gnet_stats_queue *q;
-
net/sched/sch_generic.c:732:3-732:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:751:3-751:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:773:3-773:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:936:2-936:20: struct Qdisc *q = container_of(head, struct Qdisc, rcu);
-
net/sched/sch_generic.c:1172:3-1172:17: struct Qdisc *q;
-
net/sched/sch_gred.c:98:3-98:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:167:2-167:30: struct gred_sched_data *q = NULL;
-
net/sched/sch_gred.c:268:3-268:27: struct gred_sched_data *q;
-
net/sched/sch_gred.c:300:3-300:39: struct gred_sched_data *q = t->tab[i];
-
net/sched/sch_gred.c:332:4-332:44: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:481:2-481:43: struct gred_sched_data *q = table->tab[dp];
-
net/sched/sch_gred.c:780:3-780:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:796:3-796:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:847:3-847:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_hfsc.c:866:2-866:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:917:2-917:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1083:2-1083:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1096:2-1096:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1119:2-1119:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1245:2-1245:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1344:2-1344:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1370:2-1370:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1389:2-1389:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1431:2-1431:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1478:2-1478:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1495:2-1495:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1518:2-1518:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1582:2-1582:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:249:2-249:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:351:2-351:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:374:2-374:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:420:2-420:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:474:2-474:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:511:2-511:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:580:2-580:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:656:2-656:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:682:2-682:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:188:2-188:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:218:2-218:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:590:2-590:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:898:2-898:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:957:2-957:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:994:2-994:24: struct htb_sched *q = container_of(work, struct htb_sched, work);
-
net/sched/sch_htb.c:1019:2-1019:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1135:2-1135:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1174:2-1174:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1184:2-1184:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1224:2-1224:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1301:2-1301:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1397:2-1397:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1459:2-1459:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1494:2-1494:29: struct Qdisc *q = cl->leaf.q;
-
net/sched/sch_htb.c:1562:2-1562:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1640:2-1640:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1708:2-1708:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1999:2-1999:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:2034:2-2034:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:50:2-50:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:64:2-64:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:71:2-71:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:79:2-79:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:102:2-102:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:175:2-175:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:189:2-189:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:196:2-196:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:203:2-203:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:210:2-210:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:218:2-218:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:249:2-249:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_mqprio.c:530:4-530:55: struct netdev_queue *q = netdev_get_tx_queue(dev, i);
-
net/sched/sch_multiq.c:32:2-32:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:89:2-89:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:120:2-120:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:151:2-151:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:163:2-163:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:175:2-175:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:239:2-239:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:264:2-264:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:284:2-284:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:297:2-297:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:305:2-305:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:327:2-327:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:337:2-337:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:351:2-351:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:373:2-373:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:362:2-362:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:380:2-380:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:437:2-437:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:678:2-678:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:757:2-757:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:957:2-957:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1062:2-1062:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1079:2-1079:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1140:2-1140:51: const struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1223:2-1223:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1237:2-1237:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1245:2-1245:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:88:2-88:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:141:2-141:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:425:2-425:29: struct pie_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_pie.c:441:2-441:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:463:2-463:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:494:2-494:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:519:2-519:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:531:2-531:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:539:2-539:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:90:2-90:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:103:2-103:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:125:2-125:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:161:2-161:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:33:2-33:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:99:2-99:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:113:2-113:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:134:2-134:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:170:2-170:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:181:2-181:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:234:2-234:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:266:2-266:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:291:2-291:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:321:2-321:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:329:2-329:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:350:2-350:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:360:2-360:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:374:2-374:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:396:2-396:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:208:2-208:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:377:2-377:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:396:2-396:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:524:2-524:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:535:2-535:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:560:2-560:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:655:2-655:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:680:2-680:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1080:2-1080:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1202:2-1202:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1405:2-1405:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1414:2-1414:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1454:2-1454:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1472:2-1472:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_red.c:73:2-73:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:151:2-151:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:168:2-168:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:176:2-176:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:186:2-186:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:215:2-215:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:238:2-238:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:319:2-319:29: struct red_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_red.c:332:2-332:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:366:2-366:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:408:2-408:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:445:2-445:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:472:2-472:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:497:2-497:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:510:2-510:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:283:2-283:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:425:2-425:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:443:2-443:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:453:2-453:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:466:2-466:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:491:2-491:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:554:2-554:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:567:2-567:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:596:2-596:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:620:2-620:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:631:2-631:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:673:2-673:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:166:2-166:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:295:2-295:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:348:2-348:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:482:2-482:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:537:2-537:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:607:2-607:29: struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
-
net/sched/sch_sfq.c:625:2-625:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:721:2-721:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:734:2-734:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:790:2-790:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:848:2-848:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:865:2-865:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:884:2-884:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:72:2-72:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:141:2-141:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:182:2-182:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:213:2-213:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:229:2-229:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:256:2-256:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:197:2-197:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:264:2-264:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:346:2-346:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:417:2-417:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:444:2-444:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:488:2-488:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:511:2-511:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:525:2-525:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:613:2-613:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:640:2-640:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:693:2-693:27: struct taprio_sched *q = container_of(timer, struct taprio_sched,
-
net/sched/sch_taprio.c:980:2-980:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1032:2-1032:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1080:2-1080:23: struct taprio_sched *q;
-
net/sched/sch_taprio.c:1311:2-1311:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1435:2-1435:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1602:2-1602:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1618:2-1618:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1649:2-1649:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1728:2-1728:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1818:2-1818:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:143:2-143:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:193:2-193:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:227:2-227:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:256:2-256:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:316:2-316:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:341:2-341:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:466:2-466:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:481:2-481:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:490:2-490:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:533:2-533:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:544:2-544:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:555:2-555:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:79:2-79:44: struct teql_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:95:2-95:16: struct Qdisc *q;
-
net/sched/sch_teql.c:133:2-133:16: struct Qdisc *q, *prev;
-
net/sched/sch_teql.c:171:2-171:44: struct teql_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:278:2-278:24: struct Qdisc *start, *q;
-
net/sched/sch_teql.c:355:2-355:16: struct Qdisc *q;
-
net/sched/sch_teql.c:415:2-415:16: struct Qdisc *q;
-
net/sctp/output.c:652:2-652:31: struct sctp_outq *q = &asoc->outqueue;
-
net/sctp/outqueue.c:385:2-385:31: struct sctp_outq *q = &asoc->outqueue;
-
net/sctp/socket.c:169:2-169:31: struct sctp_outq *q = &asoc->outqueue;
-
net/smc/smc_llc.c:1495:2-1495:34: struct smc_llc_qentry *qentry, *q;
-
net/sunrpc/auth_gss/auth_gss.c:162:2-162:14: const void *q;
-
net/sunrpc/auth_gss/auth_gss_internal.h:18:2-18:54: const void *q = (const void *)((const char *)p + len);
-
net/sunrpc/auth_gss/auth_gss_internal.h:28:2-28:14: const void *q;
-
net/sunrpc/auth_gss/gss_krb5_wrap.c:120:2-120:18: u64 *q = (u64 *)p;
-
net/sunrpc/rpc_pipe.c:634:2-634:18: struct qstr q = QSTR_INIT(name, strlen(name));
-
net/sunrpc/rpc_pipe.c:1304:2-1304:18: struct qstr q = QSTR_INIT(files[RPCAUTH_gssd].name,
-
net/sunrpc/sched.c:156:2-156:20: struct list_head *q;
-
net/sunrpc/sched.c:591:2-591:20: struct list_head *q;
-
net/sunrpc/xdr.c:1003:2-1003:10: __be32 *q;
-
net/sunrpc/xdr.c:1336:2-1336:18: __be32 *q = p + nwords;
-
net/x25/x25_in.c:418:2-418:37: int queued = 0, frametype, ns, nr, q, d, m;
-
net/xdp/xsk.c:643:2-643:20: struct xsk_queue *q;
-
net/xdp/xsk.c:949:3-949:22: struct xsk_queue **q;
-
net/xdp/xsk.c:1005:3-1005:22: struct xsk_queue **q;
-
net/xdp/xsk.c:1187:2-1187:24: struct xsk_queue *q = NULL;
-
net/xdp/xsk_queue.c:25:2-25:20: struct xsk_queue *q;
-
samples/v4l/v4l2-pci-skeleton.c:762:2-762:20: struct vb2_queue *q;
-
scripts/dtc/libfdt/fdt_ro.c:260:3-260:44: const char *q = memchr(path, '/', end - p);
-
scripts/dtc/libfdt/fdt_ro.c:274:3-274:15: const char *q;
-
security/integrity/evm/evm_main.c:569:2-569:26: struct list_head *pos, *q;
-
security/keys/keyctl_pkey.c:42:2-42:31: char *c = params->info, *p, *q;
-
security/selinux/hooks.c:2642:4-2642:14: char *p, *q;
-
security/selinux/hooks.c:3591:3-3591:15: struct qstr q;
-
sound/core/misc.c:113:2-113:30: const struct snd_pci_quirk *q;
-
sound/core/pcm_lib.c:534:2-534:15: unsigned int q;
-
sound/core/pcm_lib.c:801:3-801:23: unsigned int q = i->min;
-
sound/core/pcm_lib.c:840:3-840:23: unsigned int q = i->max;
-
sound/core/pcm_lib.c:917:3-917:23: unsigned int q = i->min;
-
sound/core/pcm_lib.c:949:3-949:23: unsigned int q = i->max;
-
sound/core/seq/oss/seq_oss_readq.c:35:2-35:24: struct seq_oss_readq *q;
-
sound/core/seq/oss/seq_oss_writeq.c:27:2-27:25: struct seq_oss_writeq *q;
-
sound/core/seq/seq_clientmgr.c:574:2-574:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1545:2-1545:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1577:2-1577:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1598:2-1598:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1631:2-1631:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1759:3-1759:25: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:71:2-71:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:98:2-98:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:170:2-170:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:189:2-189:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:205:2-205:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:222:2-222:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:292:2-292:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:377:2-377:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:397:2-397:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:463:2-463:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:526:2-526:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:547:2-547:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:578:2-578:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:593:2-593:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:691:2-691:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:721:2-721:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_timer.c:125:2-125:36: struct snd_seq_queue *q = timeri->callback_data;
-
sound/core/seq/seq_timer.c:472:2-472:24: struct snd_seq_queue *q;
-
sound/pci/ac97/ac97_codec.c:2860:2-2860:28: const struct quirk_table *q;
-
sound/pci/atiixp.c:552:2-552:30: const struct snd_pci_quirk *q;
-
sound/pci/emu10k1/memory.c:169:2-169:29: struct snd_emu10k1_memblk *q;
-
sound/pci/emu10k1/memory.c:454:2-454:29: struct snd_emu10k1_memblk *q;
-
sound/pci/hda/hda_auto_parser.c:982:2-982:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1551:2-1551:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1647:2-1647:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1688:2-1688:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:2250:3-2250:31: const struct snd_pci_quirk *q;
-
sound/pci/hda/patch_hdmi.c:1948:2-1948:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/patch_realtek.c:1029:2-1029:43: const struct alc_codec_rename_pci_table *q;
-
sound/pci/hda/patch_realtek.c:1102:2-1102:30: const struct snd_pci_quirk *q;
-
sound/pci/nm256/nm256.c:1654:2-1654:30: const struct snd_pci_quirk *q;
-
sound/soc/codecs/tas2552.c:187:3-187:19: unsigned int d, q, t;
-
sound/usb/quirks.c:1884:2-1884:35: const struct registration_quirk *q;