Symbol: q
function parameter
Defined...
-
arch/x86/crypto/curve25519-x86_64.c:975:34-975:39: static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2)
-
arch/x86/lib/msr-smp.c:52:49-52:54: int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
-
arch/x86/lib/msr-smp.c:83:49-83:53: int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
-
arch/x86/lib/msr-smp.c:209:54-209:58: int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
-
arch/x86/lib/msr-smp.c:225:54-225:59: int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
-
block/bfq-cgroup.c:350:34-350:56: void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
-
block/bfq-cgroup.c:523:57-523:79: static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
-
block/bfq-iosched.c:438:41-438:63: static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
-
block/bfq-iosched.c:2357:8-2357:30: struct request_queue *q)
-
block/bfq-iosched.c:2392:32-2392:54: static void bfq_remove_request(struct request_queue *q,
-
block/bfq-iosched.c:2457:27-2457:49: static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
-
block/bfq-iosched.c:2496:30-2496:52: static int bfq_request_merge(struct request_queue *q, struct request **req,
-
block/bfq-iosched.c:2514:32-2514:54: static void bfq_request_merged(struct request_queue *q, struct request *req,
-
block/bfq-iosched.c:2571:33-2571:55: static void bfq_requests_merged(struct request_queue *q, struct request *rq,
-
block/bfq-iosched.c:3207:33-3207:55: static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
-
block/bfq-iosched.c:3633:33-3633:55: static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
-
block/bfq-iosched.c:5168:39-5168:61: static void bfq_update_dispatch_stats(struct request_queue *q,
-
block/bfq-iosched.c:6104:37-6104:59: static void bfq_update_insert_stats(struct request_queue *q,
-
block/bfq-iosched.c:7071:27-7071:49: static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
-
block/bio.c:918:34-918:56: static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
-
block/bio.c:947:21-947:43: int bio_add_hw_page(struct request_queue *q, struct bio *bio,
-
block/bio.c:1002:21-1002:43: int bio_add_pc_page(struct request_queue *q, struct bio *bio,
-
block/blk-cgroup.c:79:34-79:56: static bool blkcg_policy_enabled(struct request_queue *q,
-
block/blk-cgroup.c:210:57-210:79: static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
-
block/blk-cgroup.c:267:11-267:33: struct request_queue *q, bool update_hint)
-
block/blk-cgroup.c:295:9-295:31: struct request_queue *q,
-
block/blk-cgroup.c:388:3-388:25: struct request_queue *q)
-
block/blk-cgroup.c:485:30-485:52: static void blkg_destroy_all(struct request_queue *q)
-
block/blk-cgroup.c:622:8-622:30: struct request_queue *q)
-
block/blk-cgroup.c:1265:22-1265:44: int blkcg_init_queue(struct request_queue *q)
-
block/blk-cgroup.c:1323:23-1323:45: void blkcg_exit_queue(struct request_queue *q)
-
block/blk-cgroup.c:1394:27-1394:49: int blkcg_activate_policy(struct request_queue *q,
-
block/blk-cgroup.c:1497:30-1497:52: void blkcg_deactivate_policy(struct request_queue *q,
-
block/blk-cgroup.c:1850:30-1850:52: void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
-
block/blk-cgroup.h:241:11-241:33: struct request_queue *q,
-
block/blk-cgroup.h:265:9-265:31: struct request_queue *q)
-
block/blk-cgroup.h:277:52-277:74: static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
-
block/blk-core.c:81:44-81:66: void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:92:46-92:68: void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:106:53-106:75: bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
-
block/blk-core.c:229:21-229:43: void blk_sync_queue(struct request_queue *q)
-
block/blk-core.c:240:22-240:44: void blk_set_pm_only(struct request_queue *q)
-
block/blk-core.c:246:24-246:46: void blk_clear_pm_only(struct request_queue *q)
-
block/blk-core.c:267:20-267:42: void blk_put_queue(struct request_queue *q)
-
block/blk-core.c:273:28-273:50: void blk_queue_start_drain(struct request_queue *q)
-
block/blk-core.c:296:24-296:46: void blk_cleanup_queue(struct request_queue *q)
-
block/blk-core.c:335:21-335:43: int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
-
block/blk-core.c:362:23-362:45: int __bio_queue_enter(struct request_queue *q, struct bio *bio)
-
block/blk-core.c:396:21-396:43: void blk_queue_exit(struct request_queue *q)
-
block/blk-core.c:505:20-505:42: bool blk_get_queue(struct request_queue *q)
-
block/blk-core.c:608:50-608:72: static inline blk_status_t blk_check_zone_append(struct request_queue *q,
-
block/blk-core.c:1082:18-1082:40: int blk_lld_busy(struct request_queue *q)
-
block/blk-crypto-profile.c:455:5-455:27: struct request_queue *q)
-
block/blk-crypto-sysfs.c:129:31-129:53: int blk_crypto_sysfs_register(struct request_queue *q)
-
block/blk-crypto-sysfs.c:152:34-152:56: void blk_crypto_sysfs_unregister(struct request_queue *q)
-
block/blk-crypto.c:360:34-360:56: bool blk_crypto_config_supported(struct request_queue *q,
-
block/blk-crypto.c:383:11-383:33: struct request_queue *q)
-
block/blk-crypto.c:403:26-403:48: int blk_crypto_evict_key(struct request_queue *q,
-
block/blk-flush.c:100:21-100:43: blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
-
block/blk-flush.c:292:28-292:50: static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
-
block/blk-integrity.c:27:31-27:53: int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
-
block/blk-integrity.c:68:29-68:51: int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
-
block/blk-integrity.c:164:29-164:51: bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
-
block/blk-integrity.c:187:30-187:52: bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
-
block/blk-ioc.c:172:22-172:44: void ioc_clear_queue(struct request_queue *q)
-
block/blk-ioc.c:326:30-326:52: struct io_cq *ioc_lookup_icq(struct request_queue *q)
-
block/blk-ioc.c:365:37-365:59: static struct io_cq *ioc_create_icq(struct request_queue *q)
-
block/blk-ioc.c:409:32-409:54: struct io_cq *ioc_find_get_icq(struct request_queue *q)
-
block/blk-iocost.c:662:29-662:51: static struct ioc *q_to_ioc(struct request_queue *q)
-
block/blk-iocost.c:667:27-667:49: static const char *q_name(struct request_queue *q)
-
block/blk-iocost.c:2835:28-2835:50: static int blk_iocost_init(struct request_queue *q)
-
block/blk-iocost.c:2917:57-2917:79: static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
-
block/blk-iolatency.c:761:24-761:46: int blk_iolatency_init(struct request_queue *q)
-
block/blk-iolatency.c:954:10-954:32: struct request_queue *q,
-
block/blk-ioprio.c:121:28-121:50: ioprio_alloc_pd(gfp_t gfp, struct request_queue *q, struct blkcg *blkcg)
-
block/blk-ioprio.c:227:21-227:43: int blk_ioprio_init(struct request_queue *q)
-
block/blk-map.c:342:33-342:55: static struct bio *bio_map_kern(struct request_queue *q, void *data,
-
block/blk-map.c:427:34-427:56: static struct bio *bio_copy_kern(struct request_queue *q, void *data,
-
block/blk-map.c:529:25-529:47: int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
-
block/blk-map.c:573:21-573:43: int blk_rq_map_user(struct request_queue *q, struct request *rq,
-
block/blk-map.c:634:21-634:43: int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
-
block/blk-merge.c:52:33-52:55: static inline bool bio_will_gap(struct request_queue *q,
-
block/blk-merge.c:98:42-98:64: static struct bio *blk_bio_discard_split(struct request_queue *q,
-
block/blk-merge.c:142:47-142:69: static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
-
block/blk-merge.c:164:40-164:62: static inline unsigned get_max_io_size(struct request_queue *q,
-
block/blk-merge.c:181:45-181:73: static inline unsigned get_max_segment_size(const struct request_queue *q,
-
block/blk-merge.c:217:29-217:57: static bool bvec_split_segs(const struct request_queue *q,
-
block/blk-merge.c:265:42-265:64: static struct bio *blk_bio_segment_split(struct request_queue *q,
-
block/blk-merge.c:325:24-325:46: void __blk_queue_split(struct request_queue *q, struct bio **bio,
-
block/blk-merge.c:423:33-423:55: static unsigned blk_bvec_map_sg(struct request_queue *q,
-
block/blk-merge.c:468:28-468:50: __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
-
block/blk-merge.c:488:30-488:52: static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
-
block/blk-merge.c:528:21-528:43: int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
-
block/blk-merge.c:640:39-640:61: static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
-
block/blk-merge.c:658:33-658:55: static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
-
block/blk-merge.c:745:38-745:60: static struct request *attempt_merge(struct request_queue *q,
-
block/blk-merge.c:825:43-825:65: static struct request *attempt_back_merge(struct request_queue *q,
-
block/blk-merge.c:836:44-836:66: static struct request *attempt_front_merge(struct request_queue *q,
-
block/blk-merge.c:852:28-852:50: bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
-
block/blk-merge.c:965:56-965:78: static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
-
block/blk-merge.c:990:52-990:74: static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
-
block/blk-merge.c:1037:29-1037:51: bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-
block/blk-merge.c:1069:25-1069:47: bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
-
block/blk-merge.c:1094:29-1094:51: bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
-
block/blk-mq-cpumap.c:19:34-19:44: unsigned int nr_queues, const int q)
-
block/blk-mq-debugfs.c:681:30-681:52: void blk_mq_debugfs_register(struct request_queue *q)
-
block/blk-mq-debugfs.c:726:35-726:57: void blk_mq_debugfs_register_hctx(struct request_queue *q,
-
block/blk-mq-debugfs.c:751:36-751:58: void blk_mq_debugfs_register_hctxs(struct request_queue *q)
-
block/blk-mq-debugfs.c:760:38-760:60: void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
-
block/blk-mq-debugfs.c:769:36-769:58: void blk_mq_debugfs_register_sched(struct request_queue *q)
-
block/blk-mq-debugfs.c:790:38-790:60: void blk_mq_debugfs_unregister_sched(struct request_queue *q)
-
block/blk-mq-debugfs.c:843:41-843:63: void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
-
block/blk-mq-sched.c:345:29-345:51: bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
-
block/blk-mq-sched.c:381:36-381:58: bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
-
block/blk-mq-sched.c:498:43-498:65: static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
-
block/blk-mq-sched.c:522:40-522:62: static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
-
block/blk-mq-sched.c:558:23-558:45: int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/blk-mq-sched.c:631:28-631:50: void blk_mq_sched_free_rqs(struct request_queue *q)
-
block/blk-mq-sched.c:648:24-648:46: void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
-
block/blk-mq-sched.h:44:26-44:48: blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
-
block/blk-mq-sysfs.c:206:48-206:70: void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
-
block/blk-mq-sysfs.c:228:26-228:48: void blk_mq_sysfs_deinit(struct request_queue *q)
-
block/blk-mq-sysfs.c:240:24-240:46: void blk_mq_sysfs_init(struct request_queue *q)
-
block/blk-mq-sysfs.c:255:47-255:69: int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
-
block/blk-mq-sysfs.c:293:30-293:52: void blk_mq_sysfs_unregister(struct request_queue *q)
-
block/blk-mq-sysfs.c:309:27-309:49: int blk_mq_sysfs_register(struct request_queue *q)
-
block/blk-mq-tag.c:310:53-310:75: static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
-
block/blk-mq-tag.c:496:33-496:55: void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
-
block/blk-mq-tag.c:660:42-660:64: void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
-
block/blk-mq.c:71:52-71:74: static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
-
block/blk-mq.c:144:31-144:53: unsigned int blk_mq_in_flight(struct request_queue *q,
-
block/blk-mq.c:154:26-154:48: void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
-
block/blk-mq.c:164:29-164:51: void blk_freeze_queue_start(struct request_queue *q)
-
block/blk-mq.c:178:31-178:53: void blk_mq_freeze_queue_wait(struct request_queue *q)
-
block/blk-mq.c:184:38-184:60: int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
-
block/blk-mq.c:197:23-197:45: void blk_freeze_queue(struct request_queue *q)
-
block/blk-mq.c:210:26-210:48: void blk_mq_freeze_queue(struct request_queue *q)
-
block/blk-mq.c:220:30-220:52: void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
-
block/blk-mq.c:234:28-234:50: void blk_mq_unfreeze_queue(struct request_queue *q)
-
block/blk-mq.c:244:34-244:56: void blk_mq_quiesce_queue_nowait(struct request_queue *q)
-
block/blk-mq.c:262:31-262:53: void blk_mq_wait_quiesce_done(struct request_queue *q)
-
block/blk-mq.c:280:27-280:49: void blk_mq_quiesce_queue(struct request_queue *q)
-
block/blk-mq.c:294:29-294:51: void blk_mq_unquiesce_queue(struct request_queue *q)
-
block/blk-mq.c:314:26-314:48: void blk_mq_wake_waiters(struct request_queue *q)
-
block/blk-mq.c:324:18-324:40: void blk_rq_init(struct request_queue *q, struct request *rq)
-
block/blk-mq.c:510:38-510:60: struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
-
block/blk-mq.c:539:43-539:65: struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
-
block/blk-mq.c:1382:31-1382:53: void blk_mq_kick_requeue_list(struct request_queue *q)
-
block/blk-mq.c:1388:37-1388:59: void blk_mq_delay_kick_requeue_list(struct request_queue *q,
-
block/blk-mq.c:1414:28-1414:50: bool blk_mq_queue_inflight(struct request_queue *q)
-
block/blk-mq.c:1831:36-1831:58: static void blk_mq_release_budgets(struct request_queue *q,
-
block/blk-mq.c:2149:49-2149:71: static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
-
block/blk-mq.c:2171:27-2171:49: void blk_mq_run_hw_queues(struct request_queue *q, bool async)
-
block/blk-mq.c:2199:33-2199:55: void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
-
block/blk-mq.c:2237:27-2237:49: bool blk_mq_queue_stopped(struct request_queue *q)
-
block/blk-mq.c:2276:28-2276:50: void blk_mq_stop_hw_queues(struct request_queue *q)
-
block/blk-mq.c:2294:29-2294:51: void blk_mq_start_hw_queues(struct request_queue *q)
-
block/blk-mq.c:2314:37-2314:59: void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
-
block/blk-mq.c:2589:38-2589:60: static void __blk_mq_flush_plug_list(struct request_queue *q,
-
block/blk-mq.c:2703:38-2703:60: static bool blk_mq_attempt_bio_merge(struct request_queue *q,
-
block/blk-mq.c:2715:48-2715:70: static struct request *blk_mq_get_new_requests(struct request_queue *q,
-
block/blk-mq.c:2752:57-2752:79: static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
-
block/blk-mq.c:3430:30-3430:52: static void blk_mq_exit_hctx(struct request_queue *q,
-
block/blk-mq.c:3457:35-3457:57: static void blk_mq_exit_hw_queues(struct request_queue *q,
-
block/blk-mq.c:3470:29-3470:51: static int blk_mq_init_hctx(struct request_queue *q,
-
block/blk-mq.c:3508:19-3508:41: blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
-
block/blk-mq.c:3572:36-3572:58: static void blk_mq_init_cpu_queues(struct request_queue *q,
-
block/blk-mq.c:3656:32-3656:54: static void blk_mq_map_swqueue(struct request_queue *q)
-
block/blk-mq.c:3763:35-3763:57: static void queue_set_hctx_shared(struct request_queue *q, bool shared)
-
block/blk-mq.c:3792:38-3792:60: static void blk_mq_del_queue_tag_set(struct request_queue *q)
-
block/blk-mq.c:3809:10-3809:32: struct request_queue *q)
-
block/blk-mq.c:3830:30-3830:52: static int blk_mq_alloc_ctxs(struct request_queue *q)
-
block/blk-mq.c:3863:21-3863:43: void blk_mq_release(struct request_queue *q)
-
block/blk-mq.c:3930:31-3930:53: struct blk_mq_tag_set *set, struct request_queue *q,
-
block/blk-mq.c:3964:7-3964:29: struct request_queue *q)
-
block/blk-mq.c:4006:37-4006:59: static void blk_mq_update_poll_flag(struct request_queue *q)
-
block/blk-mq.c:4018:3-4018:25: struct request_queue *q)
-
block/blk-mq.c:4085:24-4085:46: void blk_mq_exit_queue(struct request_queue *q)
-
block/blk-mq.c:4356:31-4356:53: int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
-
block/blk-mq.c:4424:3-4424:25: struct request_queue *q)
-
block/blk-mq.c:4458:7-4458:29: struct request_queue *q)
-
block/blk-mq.c:4470:7-4470:29: struct request_queue *q)
-
block/blk-mq.c:4568:35-4568:57: static bool blk_poll_stats_enable(struct request_queue *q)
-
block/blk-mq.c:4576:37-4576:59: static void blk_mq_poll_stats_start(struct request_queue *q)
-
block/blk-mq.c:4599:40-4599:62: static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
-
block/blk-mq.c:4631:32-4631:54: static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
-
block/blk-mq.c:4697:32-4697:54: static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
-
block/blk-mq.c:4725:17-4725:39: int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
-
block/blk-mq.c:4742:30-4742:52: void blk_mq_cancel_work_sync(struct request_queue *q)
-
block/blk-mq.h:82:59-82:81: static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
-
block/blk-mq.h:109:54-109:76: static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
-
block/blk-mq.h:132:51-132:73: static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
-
block/blk-mq.h:144:49-144:71: static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
-
block/blk-mq.h:193:47-193:69: static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
-
block/blk-mq.h:200:46-200:68: static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
-
block/blk-mq.h:312:44-312:66: static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
-
block/blk-pm.c:31:26-31:48: void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
-
block/blk-pm.c:61:29-61:51: int blk_pre_runtime_suspend(struct request_queue *q)
-
block/blk-pm.c:122:31-122:53: void blk_post_runtime_suspend(struct request_queue *q, int err)
-
block/blk-pm.c:152:29-152:51: void blk_pre_runtime_resume(struct request_queue *q)
-
block/blk-pm.c:176:30-176:52: void blk_post_runtime_resume(struct request_queue *q)
-
block/blk-pm.c:199:29-199:51: void blk_set_runtime_active(struct request_queue *q)
-
block/blk-pm.h:9:54-9:76: static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
-
block/blk-rq-qos.c:295:18-295:40: void rq_qos_exit(struct request_queue *q)
-
block/blk-rq-qos.h:62:40-62:62: static inline struct rq_qos *rq_qos_id(struct request_queue *q,
-
block/blk-rq-qos.h:73:41-73:63: static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
-
block/blk-rq-qos.h:78:43-78:65: static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
-
block/blk-rq-qos.h:89:31-89:53: static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
-
block/blk-rq-qos.h:114:31-114:53: static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
-
block/blk-rq-qos.h:161:35-161:57: static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
-
block/blk-rq-qos.h:167:32-167:54: static inline void rq_qos_done(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:173:33-173:55: static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:179:35-179:57: static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:195:36-195:58: static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
-
block/blk-rq-qos.h:203:33-203:55: static inline void rq_qos_track(struct request_queue *q, struct request *rq,
-
block/blk-rq-qos.h:210:33-210:55: static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
-
block/blk-rq-qos.h:219:47-219:69: static inline void rq_qos_queue_depth_changed(struct request_queue *q)
-
block/blk-settings.c:21:27-21:49: void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
-
block/blk-settings.c:97:29-97:51: void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
-
block/blk-settings.c:122:31-122:53: void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
-
block/blk-settings.c:161:30-161:52: void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
-
block/blk-settings.c:172:36-172:58: void blk_queue_max_discard_sectors(struct request_queue *q,
-
block/blk-settings.c:185:41-185:63: void blk_queue_max_secure_erase_sectors(struct request_queue *q,
-
block/blk-settings.c:198:41-198:63: void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
-
block/blk-settings.c:210:40-210:62: void blk_queue_max_zone_append_sectors(struct request_queue *q,
-
block/blk-settings.c:241:29-241:51: void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
-
block/blk-settings.c:262:37-262:59: void blk_queue_max_discard_segments(struct request_queue *q,
-
block/blk-settings.c:278:33-278:55: void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
-
block/blk-settings.c:303:35-303:57: void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
-
block/blk-settings.c:332:36-332:58: void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
-
block/blk-settings.c:353:39-353:61: void blk_queue_zone_write_granularity(struct request_queue *q,
-
block/blk-settings.c:377:33-377:55: void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
-
block/blk-settings.c:436:23-436:45: void blk_queue_io_min(struct request_queue *q, unsigned int min)
-
block/blk-settings.c:474:23-474:45: void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
-
block/blk-settings.c:720:31-720:53: void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
-
block/blk-settings.c:732:33-732:55: void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
-
block/blk-settings.c:749:30-749:52: void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
-
block/blk-settings.c:774:30-774:52: void blk_queue_dma_alignment(struct request_queue *q, int mask)
-
block/blk-settings.c:794:37-794:59: void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
-
block/blk-settings.c:809:26-809:48: void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
-
block/blk-settings.c:824:28-824:50: void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
-
block/blk-settings.c:848:43-848:65: void blk_queue_required_elevator_features(struct request_queue *q,
-
block/blk-settings.c:862:40-862:62: bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
-
block/blk-stat.c:136:28-136:50: void blk_stat_add_callback(struct request_queue *q,
-
block/blk-stat.c:157:31-157:53: void blk_stat_remove_callback(struct request_queue *q,
-
block/blk-stat.c:187:34-187:56: void blk_stat_disable_accounting(struct request_queue *q)
-
block/blk-stat.c:198:33-198:55: void blk_stat_enable_accounting(struct request_queue *q)
-
block/blk-stat.c:234:29-234:51: bool blk_stats_alloc_enable(struct request_queue *q)
-
block/blk-sysfs.c:63:36-63:58: static ssize_t queue_requests_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:69:22-69:44: queue_requests_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:91:30-91:52: static ssize_t queue_ra_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:102:16-102:38: queue_ra_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:116:39-116:61: static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:123:40-123:62: static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:128:48-128:70: static ssize_t queue_max_discard_segments_show(struct request_queue *q,
-
block/blk-sysfs.c:134:50-134:72: static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:139:44-139:66: static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:144:46-144:68: static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:149:47-149:69: static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:154:41-154:63: static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:159:34-159:56: static ssize_t queue_io_min_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:164:34-164:56: static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:169:47-169:69: static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:174:42-174:64: static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:181:39-181:61: static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:187:40-187:62: static ssize_t queue_discard_max_store(struct request_queue *q,
-
block/blk-sysfs.c:210:47-210:69: static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:215:42-215:64: static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:220:44-220:66: static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:226:50-226:72: static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
-
block/blk-sysfs.c:232:43-232:65: static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:240:25-240:47: queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:265:42-265:64: static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:272:46-272:68: static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:303:1-303:1: QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
-
block/blk-sysfs.c:304:1-304:1: QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
-
block/blk-sysfs.c:305:1-305:1: QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
-
block/blk-sysfs.c:306:1-306:1: QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
-
block/blk-sysfs.c:303:1-303:1: QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
-
block/blk-sysfs.c:304:1-304:1: QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
-
block/blk-sysfs.c:305:1-305:1: QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
-
block/blk-sysfs.c:306:1-306:1: QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
-
block/blk-sysfs.c:309:33-309:55: static ssize_t queue_zoned_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:321:36-321:58: static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:326:42-326:64: static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:331:44-331:66: static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:336:36-336:58: static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:342:37-342:59: static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:361:39-361:61: static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:370:25-370:47: queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
-
block/blk-sysfs.c:394:38-394:60: static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:406:39-406:61: static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:428:32-428:54: static ssize_t queue_poll_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:433:33-433:55: static ssize_t queue_poll_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:443:38-443:60: static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:448:39-448:61: static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:463:34-463:56: static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:471:35-471:57: static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:515:30-515:52: static ssize_t queue_wc_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:523:31-523:53: static ssize_t queue_wc_store(struct request_queue *q, const char *page,
-
block/blk-sysfs.c:545:31-545:53: static ssize_t queue_fua_show(struct request_queue *q, char *page)
-
block/blk-sysfs.c:550:31-550:53: static ssize_t queue_dax_show(struct request_queue *q, char *page)
-
block/blk-throttle.c:339:7-339:29: struct request_queue *q,
-
block/blk-throttle.c:1658:32-1658:54: static void throtl_shutdown_wq(struct request_queue *q)
-
block/blk-throttle.c:1780:29-1780:51: void blk_throtl_cancel_bios(struct request_queue *q)
-
block/blk-throttle.c:2288:21-2288:43: int blk_throtl_init(struct request_queue *q)
-
block/blk-throttle.c:2331:22-2331:44: void blk_throtl_exit(struct request_queue *q)
-
block/blk-throttle.c:2342:32-2342:54: void blk_throtl_register_queue(struct request_queue *q)
-
block/blk-throttle.c:2372:37-2372:59: ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
-
block/blk-throttle.c:2379:38-2379:60: ssize_t blk_throtl_sample_time_store(struct request_queue *q,
-
block/blk-timeout.c:23:32-23:54: bool __blk_should_fake_timeout(struct request_queue *q)
-
block/blk-wbt.c:425:21-425:43: u64 wbt_get_min_lat(struct request_queue *q)
-
block/blk-wbt.c:433:22-433:44: void wbt_set_min_lat(struct request_queue *q, u64 val)
-
block/blk-wbt.c:629:26-629:48: void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
-
block/blk-wbt.c:639:25-639:47: void wbt_enable_default(struct request_queue *q)
-
block/blk-wbt.c:659:30-659:52: u64 wbt_default_latency_nsec(struct request_queue *q)
-
block/blk-wbt.c:703:26-703:48: void wbt_disable_default(struct request_queue *q)
-
block/blk-wbt.c:819:14-819:36: int wbt_init(struct request_queue *q)
-
block/blk-zoned.c:453:34-453:56: void blk_queue_free_zone_bitmaps(struct request_queue *q)
-
block/blk-zoned.c:626:36-626:58: void blk_queue_clear_zone_settings(struct request_queue *q)
-
block/blk.h:34:36-34:58: static inline void __blk_get_queue(struct request_queue *q)
-
block/blk.h:51:40-51:62: static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
-
block/blk.h:89:42-89:64: static inline bool biovec_phys_mergeable(struct request_queue *q,
-
block/blk.h:105:39-105:61: static inline bool __bvec_gap_to_prev(struct request_queue *q,
-
block/blk.h:116:37-116:59: static inline bool bvec_gap_to_prev(struct request_queue *q,
-
block/blk.h:283:34-283:56: static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
-
block/blk.h:332:36-332:58: static inline void req_set_nomerge(struct request_queue *q, struct request *req)
-
block/blk.h:344:52-344:74: static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
-
block/blk.h:375:41-375:63: static inline bool blk_queue_may_bounce(struct request_queue *q)
-
block/blk.h:382:37-382:59: static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
-
block/bsg-lib.c:28:35-28:57: static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
-
block/bsg-lib.c:320:23-320:45: void bsg_remove_queue(struct request_queue *q)
-
block/bsg.c:185:39-185:61: struct bsg_device *bsg_register_queue(struct request_queue *q,
-
block/elevator.c:140:43-140:65: static struct elevator_type *elevator_get(struct request_queue *q,
-
block/elevator.c:164:39-164:61: struct elevator_queue *elevator_alloc(struct request_queue *q,
-
block/elevator.c:191:20-191:42: void elevator_exit(struct request_queue *q)
-
block/elevator.c:211:21-211:43: void elv_rqhash_del(struct request_queue *q, struct request *rq)
-
block/elevator.c:218:21-218:43: void elv_rqhash_add(struct request_queue *q, struct request *rq)
-
block/elevator.c:228:28-228:50: void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
-
block/elevator.c:234:33-234:55: struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
-
block/elevator.c:308:26-308:48: enum elv_merge elv_merge(struct request_queue *q, struct request **req,
-
block/elevator.c:364:31-364:53: bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
-
block/elevator.c:402:25-402:47: void elv_merged_request(struct request_queue *q, struct request *rq,
-
block/elevator.c:416:25-416:47: void elv_merge_requests(struct request_queue *q, struct request *rq,
-
block/elevator.c:428:36-428:58: struct request *elv_latter_request(struct request_queue *q, struct request *rq)
-
block/elevator.c:438:36-438:58: struct request *elv_former_request(struct request_queue *q, struct request *rq)
-
block/elevator.c:495:24-495:46: int elv_register_queue(struct request_queue *q, bool uevent)
-
block/elevator.c:520:27-520:49: void elv_unregister_queue(struct request_queue *q)
-
block/elevator.c:591:24-591:46: int elevator_switch_mq(struct request_queue *q,
-
block/elevator.c:624:40-624:62: static inline bool elv_support_iosched(struct request_queue *q)
-
block/elevator.c:636:51-636:73: static struct elevator_type *elevator_get_default(struct request_queue *q)
-
block/elevator.c:652:55-652:77: static struct elevator_type *elevator_get_by_features(struct request_queue *q)
-
block/elevator.c:679:23-679:45: void elevator_init_mq(struct request_queue *q)
-
block/elevator.c:726:28-726:50: static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
-
block/elevator.c:746:30-746:52: static int __elevator_change(struct request_queue *q, const char *name)
-
block/elevator.c:778:27-778:49: ssize_t elv_iosched_store(struct request_queue *q, const char *name,
-
block/elevator.c:793:26-793:48: ssize_t elv_iosched_show(struct request_queue *q, char *name)
-
block/elevator.c:828:39-828:61: struct request *elv_rb_former_request(struct request_queue *q,
-
block/elevator.c:840:39-840:61: struct request *elv_rb_latter_request(struct request_queue *q,
-
block/genhd.c:1336:35-1336:57: struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
-
block/kyber-iosched.c:359:56-359:78: static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
-
block/kyber-iosched.c:407:29-407:51: static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/kyber-iosched.c:569:29-569:51: static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
-
block/mq-deadline.c:169:37-169:59: static void deadline_remove_request(struct request_queue *q,
-
block/mq-deadline.c:186:31-186:53: static void dd_request_merged(struct request_queue *q, struct request *req,
-
block/mq-deadline.c:206:32-206:54: static void dd_merged_requests(struct request_queue *q, struct request *req,
-
block/mq-deadline.c:609:26-609:48: static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
-
block/mq-deadline.c:660:29-660:51: static int dd_request_merge(struct request_queue *q, struct request **rq,
-
block/mq-deadline.c:692:26-692:48: static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
-
crypto/ecc.c:1341:33-1341:57: const struct ecc_point *p, const struct ecc_point *q,
-
crypto/ecc.c:1364:22-1364:46: const u64 *u2, const struct ecc_point *q,
-
drivers/ata/libata-pata-timings.c:61:5-61:24: struct ata_timing *q, int T, int UT)
-
drivers/block/drbd/drbd_int.h:1885:17-1885:41: drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
-
drivers/block/drbd/drbd_int.h:1895:29-1895:53: drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
-
drivers/block/drbd/drbd_nl.c:1192:43-1192:65: static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
-
drivers/block/drbd/drbd_nl.c:1244:60-1244:82: static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
-
drivers/block/null_blk/zoned.c:61:51-61:73: int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
-
drivers/block/pktcdvd.c:910:63-910:85: static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
-
drivers/block/pktcdvd.c:2305:36-2305:58: static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
-
drivers/block/rnbd/rnbd-clt.c:161:41-161:60: static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
-
drivers/block/rnbd/rnbd-clt.c:1069:7-1069:26: struct rnbd_queue *q)
-
drivers/block/rnbd/rnbd-clt.c:1332:12-1332:31: struct rnbd_queue *q,
-
drivers/block/sx8.c:664:57-664:79: static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
-
drivers/char/ipmi/ipmi_msghandler.c:681:32-681:50: static void free_recv_msg_list(struct list_head *q)
-
drivers/char/ipmi/ipmi_msghandler.c:691:31-691:49: static void free_smi_msg_list(struct list_head *q)
-
drivers/clk/clk.c:2922:40-2922:58: bool clk_is_match(const struct clk *p, const struct clk *q)
-
drivers/crypto/cavium/cpt/cptpf_mbox.c:59:55-59:58: static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp)
-
drivers/crypto/cavium/cpt/cptvf_reqmanager.c:15:53-15:75: static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
-
drivers/crypto/cavium/zip/zip_mem.c:57:48-57:52: int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
-
drivers/crypto/cavium/zip/zip_mem.c:76:48-76:52: void zip_cmd_qbuf_free(struct zip_device *zip, int q)
-
drivers/crypto/hisilicon/qm.c:3252:8-3252:28: struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:3272:37-3272:57: static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:3281:31-3281:51: static int hisi_qm_uacce_mmap(struct uacce_queue *q,
-
drivers/crypto/hisilicon/qm.c:3334:38-3334:58: static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:3341:38-3341:58: static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:3346:33-3346:53: static int hisi_qm_is_q_updated(struct uacce_queue *q)
-
drivers/crypto/hisilicon/qm.c:3363:28-3363:48: static void qm_set_sqctype(struct uacce_queue *q, u16 type)
-
drivers/crypto/hisilicon/qm.c:3373:33-3373:53: static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
-
drivers/crypto/hisilicon/sec/sec_drv.c:673:47-673:53: static irqreturn_t sec_isr_handle_th(int irq, void *q)
-
drivers/crypto/hisilicon/sec/sec_drv.c:679:44-679:50: static irqreturn_t sec_isr_handle(int irq, void *q)
-
drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c:135:63-135:66: static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
-
drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c:58:7-58:37: struct otx_cpt_pending_queue *q,
-
drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c:49:6-49:37: struct otx2_cpt_pending_queue *q,
-
drivers/cxl/core/mbox.c:429:5-429:43: struct cxl_mem_query_commands __user *q)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:229:39-229:64: static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:549:10-549:35: struct amdgpu_mes_queue *q,
-
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:580:10-580:35: struct amdgpu_mes_queue *q,
-
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:4606:30-4606:34: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:1124:30-1124:34: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c:3019:25-3019:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c:4182:25-4182:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c:3447:25-3447:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:2059:25-2059:29: u32 me, u32 pipe, u32 q, u32 vm)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:173:60-173:74: static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:230:63-230:77: static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:286:7-286:21: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:301:7-301:21: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:319:9-319:23: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:380:5-380:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:405:4-405:18: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:472:5-472:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:490:5-490:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:610:59-610:73: static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:645:5-645:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:719:5-719:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:778:5-778:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:807:59-807:73: static int update_queue(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1316:5-1316:19: struct queue *q, const uint32_t *restore_sdma_id)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1383:5-1383:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1605:65-1605:79: static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1844:5-1844:19: struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2041:6-2041:20: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2071:4-2071:24: const struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2091:6-2091:26: const struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c:181:60-181:74: static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c:198:5-198:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c:76:64-76:78: static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c:76:64-76:78: static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c:90:63-90:77: static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c:229:60-229:74: static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c:246:4-246:18: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:49:59-49:84: struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:65:6-65:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:71:45-71:70: static void set_priority(struct cik_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:78:6-78:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:91:3-91:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:145:4-145:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:174:4-174:29: struct queue_properties *q, struct mqd_update_info *minfo,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:211:4-211:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:225:4-225:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:232:4-232:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:338:3-338:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:344:4-344:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:71:53-71:78: static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:78:3-78:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:91:4-91:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:159:4-159:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:294:4-294:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:308:3-308:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:326:4-326:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:78:53-78:78: static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:85:3-85:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:107:4-107:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:189:10-189:35: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:323:4-323:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:337:3-337:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:364:3-364:28: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:80:44-80:69: static void set_priority(struct v9_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:87:3-87:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:134:4-134:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:209:4-209:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:357:4-357:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:371:3-371:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:389:4-389:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:74:44-74:69: static void set_priority(struct vi_mqd *m, struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:81:6-81:31: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:94:4-94:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:172:4-172:29: struct queue_properties *q, struct mqd_update_info *minfo,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:243:4-243:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:257:4-257:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:329:4-329:29: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:342:4-342:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:350:3-350:28: struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:366:4-366:29: struct queue_properties *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c:188:3-188:17: struct queue *q, bool is_static)
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c:143:3-143:17: struct queue *q, bool is_static)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:494:26-494:40: int kfd_procfs_add_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:654:27-654:41: void kfd_procfs_del_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:181:26-181:41: struct kfd_dev *dev, struct queue **q,
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:547:5-547:19: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:626:7-626:21: struct queue *q,
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:28:29-28:54: void print_queue_properties(struct queue_properties *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:46:18-46:32: void print_queue(struct queue *q)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:67:16-67:31: int init_queue(struct queue **q, const struct queue_properties *properties)
-
drivers/gpu/drm/amd/amdkfd/kfd_queue.c:81:19-81:33: void uninit_queue(struct queue *q)
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:596:30-596:52: static void throttle_release(struct i915_request **q, int count)
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:609:7-609:29: struct i915_request **q, int count)
-
drivers/gpu/drm/v3d/v3d_sched.c:291:54-291:69: v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
-
drivers/gpu/ipu-v3/ipu-image-convert.c:1252:5-1252:23: struct list_head *q)
-
drivers/infiniband/hw/hfi1/ipoib_tx.c:839:52-839:65: void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
-
drivers/infiniband/hw/irdma/uk.c:1461:24-1461:30: void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:371:51-371:77: static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:377:6-377:32: struct ocrdma_queue_info *q, u16 len, u16 entry_size)
-
drivers/infiniband/hw/ocrdma/ocrdma_hw.c:403:11-403:37: struct ocrdma_queue_info *q, int queue_type)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1551:32-1551:59: static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1566:30-1566:57: static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1571:39-1571:66: static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1577:33-1577:60: static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:1582:33-1582:60: static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
-
drivers/infiniband/hw/qedr/verbs.c:744:28-744:47: struct qedr_dev *dev, struct qedr_userq *q,
-
drivers/infiniband/hw/qedr/verbs.c:792:12-792:31: struct qedr_userq *q, u64 buf_addr,
-
drivers/infiniband/sw/rxe/rxe_queue.c:46:29-46:47: inline void rxe_queue_reset(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.c:111:26-111:44: static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
-
drivers/infiniband/sw/rxe/rxe_queue.c:146:22-146:40: int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
-
drivers/infiniband/sw/rxe/rxe_queue.c:192:24-192:42: void rxe_queue_cleanup(struct rxe_queue *q)
-
drivers/infiniband/sw/rxe/rxe_queue.h:89:36-89:54: static inline u32 queue_next_index(struct rxe_queue *q, int index)
-
drivers/infiniband/sw/rxe/rxe_queue.h:94:38-94:62: static inline u32 queue_get_producer(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:119:38-119:62: static inline u32 queue_get_consumer(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:144:31-144:49: static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
-
drivers/infiniband/sw/rxe/rxe_queue.h:152:30-152:48: static inline int queue_full(struct rxe_queue *q, enum queue_type type)
-
drivers/infiniband/sw/rxe/rxe_queue.h:160:31-160:55: static inline u32 queue_count(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:169:43-169:61: static inline void queue_advance_producer(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:198:43-198:61: static inline void queue_advance_consumer(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:227:41-227:59: static inline void *queue_producer_addr(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:235:41-235:59: static inline void *queue_consumer_addr(struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:243:43-243:61: static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
-
drivers/infiniband/sw/rxe/rxe_queue.h:249:41-249:65: static inline u32 queue_index_from_addr(const struct rxe_queue *q,
-
drivers/infiniband/sw/rxe/rxe_queue.h:256:32-256:50: static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
-
drivers/input/misc/hisi_powerkey.c:29:52-29:58: static irqreturn_t hi65xx_power_press_isr(int irq, void *q)
-
drivers/input/misc/hisi_powerkey.c:40:54-40:60: static irqreturn_t hi65xx_power_release_isr(int irq, void *q)
-
drivers/input/misc/hisi_powerkey.c:51:55-51:61: static irqreturn_t hi65xx_restart_toggle_isr(int irq, void *q)
-
drivers/input/rmi4/rmi_f54.c:283:32-283:50: static int rmi_f54_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
-
drivers/input/rmi4/rmi_f54.c:363:36-363:54: static void rmi_f54_stop_streaming(struct vb2_queue *q)
-
drivers/input/touchscreen/atmel_mxt_ts.c:2425:28-2425:46: static int mxt_queue_setup(struct vb2_queue *q,
-
drivers/input/touchscreen/sur40.c:845:30-845:48: static int sur40_queue_setup(struct vb2_queue *q,
-
drivers/md/dm-cache-policy-smq.c:269:20-269:34: static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
-
drivers/md/dm-cache-policy-smq.c:287:24-287:38: static unsigned q_size(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:295:20-295:34: static void q_push(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:305:26-305:40: static void q_push_front(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:315:27-315:41: static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:325:19-325:33: static void q_del(struct queue *q, struct entry *e)
-
drivers/md/dm-cache-policy-smq.c:335:29-335:43: static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
-
drivers/md/dm-cache-policy-smq.c:357:28-357:42: static struct entry *q_pop(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:372:40-372:54: static struct entry *__redist_pop_from(struct queue *q, unsigned level)
-
drivers/md/dm-cache-policy-smq.c:386:37-386:51: static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
-
drivers/md/dm-cache-policy-smq.c:405:27-405:41: static void q_set_targets(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:427:28-427:42: static void q_redistribute(struct queue *q)
-
drivers/md/dm-cache-policy-smq.c:470:23-470:37: static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
-
drivers/md/dm-rq.c:64:21-64:43: void dm_start_queue(struct request_queue *q)
-
drivers/md/dm-rq.c:70:20-70:42: void dm_stop_queue(struct request_queue *q)
-
drivers/md/dm-rq.c:170:39-170:61: static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
-
drivers/md/dm-table.c:1344:38-1344:60: static void dm_update_crypto_profile(struct request_queue *q,
-
drivers/md/dm-table.c:1960:51-1960:73: int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
-
drivers/md/dm-zone.c:295:51-295:73: int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
-
drivers/md/dm-zone.c:469:33-469:55: static inline void dm_zone_lock(struct request_queue *q,
-
drivers/md/dm-zone.c:479:35-479:57: static inline void dm_zone_unlock(struct request_queue *q,
-
drivers/md/dm.c:1867:45-1867:67: static void dm_queue_destroy_crypto_profile(struct request_queue *q)
-
drivers/media/common/saa7146/saa7146_fops.c:52:47-52:70: void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q,
-
drivers/media/common/saa7146/saa7146_fops.c:69:5-69:30: struct saa7146_dmaqueue *q,
-
drivers/media/common/saa7146/saa7146_fops.c:91:7-91:32: struct saa7146_dmaqueue *q,
-
drivers/media/common/saa7146/saa7146_fops.c:112:5-112:30: struct saa7146_dmaqueue *q, int vbi)
-
drivers/media/common/saa7146/saa7146_vbi.c:219:27-219:50: static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,enum v4l2_field field)
-
drivers/media/common/saa7146/saa7146_vbi.c:274:25-274:48: static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-
drivers/media/common/saa7146/saa7146_vbi.c:289:26-289:49: static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/common/saa7146/saa7146_vbi.c:301:28-301:51: static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/common/saa7146/saa7146_video.c:1036:27-1036:50: static int buffer_prepare(struct videobuf_queue *q,
-
drivers/media/common/saa7146/saa7146_video.c:1118:25-1118:48: static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-
drivers/media/common/saa7146/saa7146_video.c:1139:26-1139:49: static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/common/saa7146/saa7146_video.c:1151:28-1151:51: static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/common/videobuf2/videobuf2-core.c:379:37-379:55: static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
-
drivers/media/common/videobuf2/videobuf2-core.c:407:30-407:48: static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:479:28-479:46: static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
-
drivers/media/common/videobuf2/videobuf2-core.c:505:29-505:47: static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
-
drivers/media/common/videobuf2/videobuf2-core.c:617:24-617:42: bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
-
drivers/media/common/videobuf2/videobuf2-core.c:639:30-639:48: static bool __buffers_in_use(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:649:24-649:42: void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
-
drivers/media/common/videobuf2/videobuf2-core.c:659:33-659:51: static int __verify_userptr_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:672:30-672:48: static int __verify_mmap_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:685:32-685:50: static int __verify_dmabuf_ops(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:695:28-695:46: int vb2_verify_memory_type(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-core.c:741:33-741:51: static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
-
drivers/media/common/videobuf2/videobuf2-core.c:750:36-750:54: static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
-
drivers/media/common/videobuf2/videobuf2-core.c:759:22-759:40: int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:901:26-901:44: int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
-
drivers/media/common/videobuf2/videobuf2-core.c:1079:23-1079:41: void vb2_discard_done(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1524:26-1524:44: int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
-
drivers/media/common/videobuf2/videobuf2-core.c:1564:32-1564:50: static int vb2_start_streaming(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1616:19-1616:37: int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
-
drivers/media/common/videobuf2/videobuf2-core.c:1776:35-1776:53: static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
-
drivers/media/common/videobuf2/videobuf2-core.c:1857:30-1857:48: static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
-
drivers/media/common/videobuf2/videobuf2-core.c:1890:30-1890:48: int vb2_wait_for_all_buffers(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:1919:20-1919:38: int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
-
drivers/media/common/videobuf2/videobuf2-core.c:1983:32-1983:50: static void __vb2_queue_cancel(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2080:23-2080:41: int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-core.c:2125:22-2125:40: void vb2_queue_error(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2133:24-2133:42: int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-core.c:2161:35-2161:53: static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
-
drivers/media/common/videobuf2/videobuf2-core.c:2187:21-2187:39: int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
-
drivers/media/common/videobuf2/videobuf2-core.c:2260:14-2260:32: int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
-
drivers/media/common/videobuf2/videobuf2-core.c:2374:25-2374:43: int vb2_core_queue_init(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2428:29-2428:47: void vb2_core_queue_release(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2438:24-2438:42: __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
-
drivers/media/common/videobuf2/videobuf2-core.c:2587:30-2587:48: static int __vb2_init_fileio(struct vb2_queue *q, int read)
-
drivers/media/common/videobuf2/videobuf2-core.c:2705:33-2705:51: static int __vb2_cleanup_fileio(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-core.c:2729:36-2729:54: static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:2890:17-2890:35: size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:2897:18-2897:36: size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
-
drivers/media/common/videobuf2/videobuf2-core.c:2976:22-2976:40: int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
-
drivers/media/common/videobuf2/videobuf2-core.c:3016:21-3016:39: int vb2_thread_stop(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:344:36-344:54: static void set_buffer_cache_hints(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:366:37-366:55: static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:628:24-628:48: int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:654:18-654:36: int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:676:27-676:45: static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:695:35-695:53: static void validate_memory_flags(struct vb2_queue *q,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:711:17-711:35: int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:724:21-724:39: int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:743:21-743:39: int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:803:14-803:32: int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:824:15-824:33: int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:855:18-855:36: int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:865:19-865:37: int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:875:16-875:34: int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:882:25-882:43: int vb2_queue_init_name(struct vb2_queue *q, const char *name)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:927:20-927:38: int vb2_queue_init(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:933:24-933:42: void vb2_queue_release(struct vb2_queue *q)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:939:27-939:45: int vb2_queue_change_type(struct vb2_queue *q, unsigned int type)
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:953:19-953:37: __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
-
drivers/media/pci/bt8xx/bttv-driver.c:1528:32-1528:55: static int bttv_prepare_buffer(struct videobuf_queue *q,struct bttv *btv,
-
drivers/media/pci/bt8xx/bttv-driver.c:1629:14-1629:37: buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-
drivers/media/pci/bt8xx/bttv-driver.c:1642:16-1642:39: buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
-
drivers/media/pci/bt8xx/bttv-driver.c:1653:14-1653:37: buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/bt8xx/bttv-driver.c:1667:28-1667:51: static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/bt8xx/bttv-risc.c:571:15-571:38: bttv_dma_free(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf)
-
drivers/media/pci/bt8xx/bttv-vbi.c:70:29-70:52: static int vbi_buffer_setup(struct videobuf_queue *q,
-
drivers/media/pci/bt8xx/bttv-vbi.c:91:31-91:54: static int vbi_buffer_prepare(struct videobuf_queue *q,
-
drivers/media/pci/bt8xx/bttv-vbi.c:199:18-199:41: vbi_buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/bt8xx/bttv-vbi.c:214:32-214:55: static void vbi_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/cobalt/cobalt-v4l2.c:34:31-34:49: static int cobalt_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cobalt/cobalt-v4l2.c:279:35-279:53: static int cobalt_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cobalt/cobalt-v4l2.c:388:35-388:53: static void cobalt_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx18/cx18-queue.c:36:22-36:41: void cx18_queue_init(struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.c:44:6-44:25: struct cx18_queue *q, int to_front)
-
drivers/media/pci/cx18/cx18-queue.c:73:54-73:73: struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.h:60:5-60:24: struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-queue.h:67:9-67:28: struct cx18_queue *q)
-
drivers/media/pci/cx18/cx18-streams.c:95:27-95:50: static void cx18_dma_free(struct videobuf_queue *q,
-
drivers/media/pci/cx18/cx18-streams.c:103:32-103:55: static int cx18_prepare_buffer(struct videobuf_queue *q,
-
drivers/media/pci/cx18/cx18-streams.c:181:25-181:48: static int buffer_setup(struct videobuf_queue *q,
-
drivers/media/pci/cx18/cx18-streams.c:200:27-200:50: static int buffer_prepare(struct videobuf_queue *q,
-
drivers/media/pci/cx18/cx18-streams.c:213:28-213:51: static void buffer_release(struct videobuf_queue *q,
-
drivers/media/pci/cx18/cx18-streams.c:223:26-223:49: static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
-
drivers/media/pci/cx23885/cx23885-417.c:1123:24-1123:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-417.c:1167:36-1167:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-417.c:1194:36-1194:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-core.c:425:7-425:32: struct cx23885_dmaqueue *q, u32 count)
-
drivers/media/pci/cx23885/cx23885-core.c:1396:9-1396:34: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-dvb.c:88:24-88:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-dvb.c:150:36-150:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-dvb.c:161:36-161:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-vbi.c:87:5-87:30: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-vbi.c:114:24-114:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-vbi.c:217:36-217:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-vbi.c:228:36-228:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx23885/cx23885-video.c:89:2-89:27: struct cx23885_dmaqueue *q, u32 count)
-
drivers/media/pci/cx23885/cx23885-video.c:305:7-305:32: struct cx23885_dmaqueue *q,
-
drivers/media/pci/cx23885/cx23885-video.c:332:24-332:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx23885/cx23885-video.c:487:36-487:54: static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx23885/cx23885-video.c:498:36-498:54: static void cx23885_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx25821/cx25821-video.c:59:8-59:33: struct cx25821_dmaqueue *q,
-
drivers/media/pci/cx25821/cx25821-video.c:127:32-127:50: static int cx25821_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx25821/cx25821-video.c:261:36-261:54: static int cx25821_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx25821/cx25821-video.c:274:36-274:54: static void cx25821_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-blackbird.c:658:24-658:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-blackbird.c:702:28-702:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-blackbird.c:752:28-752:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-core.c:521:4-521:26: struct cx88_dmaqueue *q, u32 count)
-
drivers/media/pci/cx88/cx88-dvb.c:75:24-75:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-dvb.c:120:28-120:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-dvb.c:131:28-131:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-mpeg.c:73:8-73:30: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-mpeg.c:199:5-199:27: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-mpeg.c:216:24-216:42: int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
-
drivers/media/pci/cx88/cx88-vbi.c:52:5-52:27: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-vbi.c:99:9-99:31: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-vbi.c:115:24-115:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-vbi.c:195:28-195:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-vbi.c:206:28-206:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/cx88/cx88-video.c:350:7-350:29: struct cx88_dmaqueue *q,
-
drivers/media/pci/cx88/cx88-video.c:405:12-405:34: struct cx88_dmaqueue *q)
-
drivers/media/pci/cx88/cx88-video.c:420:24-420:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/cx88/cx88-video.c:528:28-528:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/cx88/cx88-video.c:539:28-539:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/dt3155/dt3155.c:148:35-148:53: static int dt3155_start_streaming(struct vb2_queue *q, unsigned count)
-
drivers/media/pci/dt3155/dt3155.c:176:35-176:53: static void dt3155_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:227:53-227:72: static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:239:28-239:47: static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:304:60-304:79: static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:345:51-345:70: static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:507:52-507:71: static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:586:60-586:79: static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:779:41-779:60: static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1520:54-1520:73: static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1653:55-1653:74: static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1933:59-1933:78: static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:32:22-32:41: void ivtv_queue_init(struct ivtv_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:40:67-40:86: void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
-
drivers/media/pci/ivtv/ivtv-queue.c:59:57-59:76: struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
-
drivers/media/pci/netup_unidvb/netup_unidvb_core.c:335:41-335:59: static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/netup_unidvb/netup_unidvb_core.c:344:41-344:59: static void netup_unidvb_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:265:5-265:30: struct saa7134_dmaqueue *q,
-
drivers/media/pci/saa7134/saa7134-core.c:293:7-293:32: struct saa7134_dmaqueue *q,
-
drivers/media/pci/saa7134/saa7134-core.c:306:5-306:30: struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:356:54-356:79: void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-core.c:1377:8-1377:33: struct saa7134_dmaqueue *q)
-
drivers/media/pci/saa7134/saa7134-ts.c:106:28-106:46: int saa7134_ts_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/saa7134/saa7134-vbi.c:128:24-128:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/saa7134/saa7134-video.c:937:24-937:42: static int queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:655:33-655:51: static int solo_enc_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:708:37-708:55: static int solo_enc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c:715:37-715:55: static void solo_enc_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:307:29-307:47: static int solo_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:322:33-322:51: static int solo_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/solo6x10/solo6x10-v4l2.c:330:33-330:51: static void solo_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/tw5864/tw5864-video.c:182:31-182:49: static int tw5864_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
-
drivers/media/pci/tw5864/tw5864-video.c:427:35-427:53: static int tw5864_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/tw5864/tw5864-video.c:446:35-446:53: static void tw5864_stop_streaming(struct vb2_queue *q)
-
drivers/media/pci/tw68/tw68-video.c:358:29-358:47: static int tw68_queue_setup(struct vb2_queue *q,
-
drivers/media/pci/tw68/tw68-video.c:491:33-491:51: static int tw68_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/pci/tw68/tw68-video.c:502:33-502:51: static void tw68_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/allegro-dvt/allegro-core.c:2831:36-2831:54: static int allegro_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/allegro-dvt/allegro-core.c:2850:36-2850:54: static void allegro_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/amphion/vpu_v4l2.c:478:36-478:54: static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/amphion/vpu_v4l2.c:509:36-509:54: static void vpu_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/aspeed/aspeed-video.c:1624:37-1624:55: static int aspeed_video_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/aspeed/aspeed-video.c:1655:41-1655:59: static int aspeed_video_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/aspeed/aspeed-video.c:1675:41-1675:59: static void aspeed_video_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/chips-media/coda-common.c:1967:33-1967:51: static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/chips-media/coda-common.c:2111:33-2111:51: static void coda_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:642:33-642:51: static int mtk_jpeg_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:818:41-818:59: static void mtk_jpeg_enc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:827:41-827:59: static void mtk_jpeg_dec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c:389:40-389:58: static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c:411:40-411:58: static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c:784:33-784:51: int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c:794:33-794:51: void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c:848:40-848:58: static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c:928:40-928:58: static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1149:33-1149:51: static int mxc_jpeg_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1182:37-1182:55: static int mxc_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1202:37-1202:55: static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1291:35-1291:59: static void mxc_jpeg_bytesperline(struct mxc_jpeg_q_data *q, u32 precision)
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1318:32-1318:56: static void mxc_jpeg_sizeimage(struct mxc_jpeg_q_data *q)
-
drivers/media/platform/nxp/imx-pxp.c:1457:32-1457:50: static int pxp_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/nxp/imx-pxp.c:1466:32-1466:50: static void pxp_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/camss/camss-video.c:377:30-377:48: static int video_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/camss/camss-video.c:487:34-487:52: static int video_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/qcom/camss/camss-video.c:532:34-532:52: static void video_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/helpers.c:1544:38-1544:56: void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/vdec.c:869:29-869:47: static int vdec_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/venus/vdec.c:1126:33-1126:51: static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/qcom/venus/vdec.c:1223:33-1223:51: static void vdec_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/qcom/venus/venc.c:968:29-968:47: static int venc_queue_setup(struct vb2_queue *q,
-
drivers/media/platform/qcom/venus/venc.c:1135:33-1135:51: static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/renesas/rcar_fdp1.c:1922:33-1922:51: static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/renesas/rcar_fdp1.c:1961:33-1961:51: static void fdp1_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/rockchip/rga/rga-buf.c:59:36-59:54: static void rga_buf_return_buffers(struct vb2_queue *q,
-
drivers/media/platform/rockchip/rga/rga-buf.c:76:36-76:54: static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/rockchip/rga/rga-buf.c:91:36-91:54: static void rga_buf_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c:1781:41-1781:59: static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
-
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c:159:29-159:47: rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
-
drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c:56:36-56:54: static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c:78:36-78:54: static void gsc_m2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-capture.c:259:28-259:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos4-is/fimc-capture.c:290:28-290:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c:76:46-76:64: static int isp_video_capture_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c:119:46-119:64: static void isp_video_capture_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-lite.c:305:28-305:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos4-is/fimc-lite.c:339:28-339:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/exynos4-is/fimc-m2m.c:73:28-73:46: static int start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/exynos4-is/fimc-m2m.c:80:28-80:46: static void stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c:2564:37-2564:55: static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c:2571:37-2571:55: static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c:1027:36-1027:54: static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c:1043:36-1043:54: static void s5p_mfc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c:2494:36-2494:54: static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c:2524:36-2524:54: static void s5p_mfc_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c:498:34-498:52: static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c:521:34-521:52: static void bdisp_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1303:41-1303:59: static int delta_vb2_au_start_streaming(struct vb2_queue *q,
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1397:41-1397:59: static void delta_vb2_au_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1527:44-1527:62: static void delta_vb2_frame_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/st/stm32/dma2d/dma2d.c:157:34-157:52: static int dma2d_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/st/stm32/dma2d/dma2d.c:166:34-166:52: static void dma2d_stop_streaming(struct vb2_queue *q)
-
drivers/media/platform/ti/vpe/vpe.c:2124:58-2124:76: static void vpe_return_all_buffers(struct vpe_ctx *ctx, struct vb2_queue *q,
-
drivers/media/platform/ti/vpe/vpe.c:2177:32-2177:50: static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/platform/ti/vpe/vpe.c:2199:32-2199:50: static void vpe_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1513:33-1513:51: static void vicodec_return_bufs(struct vb2_queue *q, u32 state)
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1552:36-1552:54: static int vicodec_start_streaming(struct vb2_queue *q,
-
drivers/media/test-drivers/vicodec/vicodec-core.c:1640:36-1640:54: static void vicodec_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/vim2m.c:1054:34-1054:52: static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/test-drivers/vim2m.c:1069:34-1069:52: static void vim2m_stop_streaming(struct vb2_queue *q)
-
drivers/media/test-drivers/vivid/vivid-core.c:870:10-870:28: struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:39:39-39:57: static int cxusb_medion_v_queue_setup(struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:772:43-772:61: static int cxusb_medion_v_start_streaming(struct vb2_queue *q,
-
drivers/media/usb/dvb-usb/cxusb-analog.c:898:43-898:61: static void cxusb_medion_v_stop_streaming(struct vb2_queue *q)
-
drivers/media/usb/go7007/go7007-fw.c:290:70-290:74: static int mjpeg_frame_header(struct go7007 *go, unsigned char *buf, int q)
-
drivers/media/usb/go7007/go7007-v4l2.c:343:31-343:49: static int go7007_queue_setup(struct vb2_queue *q,
-
drivers/media/usb/go7007/go7007-v4l2.c:397:35-397:53: static int go7007_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/media/usb/go7007/go7007-v4l2.c:428:35-428:53: static void go7007_stop_streaming(struct vb2_queue *q)
-
drivers/media/usb/gspca/topro.c:1439:50-1439:53: static void set_dqt(struct gspca_dev *gspca_dev, u8 q)
-
drivers/media/usb/gspca/topro.c:1456:53-1456:57: static void setquality(struct gspca_dev *gspca_dev, s32 q)
-
drivers/media/usb/hdpvr/hdpvr-video.c:97:29-97:47: static int hdpvr_free_queue(struct list_head *q)
-
drivers/media/v4l2-core/v4l2-mc.c:302:34-302:52: int v4l_vb2q_enable_media_source(struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:689:9-689:27: struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:702:8-702:26: struct vb2_queue *q)
-
drivers/media/v4l2-core/v4l2-mem2mem.c:730:7-730:25: struct vb2_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:55:43-55:66: struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:76:44-76:67: static int state_neither_active_nor_queued(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:88:21-88:44: int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
-
drivers/media/v4l2-core/videobuf-core.c:121:21-121:44: int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
-
drivers/media/v4l2-core/videobuf-core.c:131:31-131:54: void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:143:31-143:54: void videobuf_queue_core_init(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:185:28-185:51: int videobuf_queue_is_busy(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:230:28-230:51: static int __videobuf_free(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:263:28-263:51: void videobuf_queue_cancel(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:298:37-298:60: enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:318:29-318:52: static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
-
drivers/media/v4l2-core/videobuf-core.c:373:24-373:47: int videobuf_mmap_free(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:384:27-384:50: int __videobuf_mmap_setup(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:428:25-428:48: int videobuf_mmap_setup(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:440:22-440:45: int videobuf_reqbufs(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:501:23-501:46: int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-
drivers/media/v4l2-core/videobuf-core.c:528:19-528:42: int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-
drivers/media/v4l2-core/videobuf-core.c:632:43-632:66: static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
-
drivers/media/v4l2-core/videobuf-core.c:675:31-675:54: static int stream_next_buffer(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:695:20-695:43: int videobuf_dqbuf(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:735:23-735:46: int videobuf_streamon(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:763:33-763:56: static int __videobuf_streamoff(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:773:24-773:47: int videobuf_streamoff(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:786:39-786:62: static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:831:36-831:59: static int __videobuf_copy_to_user(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:848:35-848:58: static int __videobuf_copy_stream(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:874:27-874:50: ssize_t videobuf_read_one(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:961:34-961:57: static int __videobuf_read_start(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:996:34-996:57: static void __videobuf_read_stop(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1012:25-1012:48: int videobuf_read_start(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1024:25-1024:48: void videobuf_read_stop(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1032:20-1032:43: void videobuf_stop(struct videobuf_queue *q)
-
drivers/media/v4l2-core/videobuf-core.c:1046:30-1046:53: ssize_t videobuf_read_stream(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:1120:10-1120:33: struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-core.c:1172:26-1172:49: int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
-
drivers/media/v4l2-core/videobuf-dma-contig.c:234:30-234:53: static int __videobuf_iolock(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-contig.c:274:35-274:58: static int __videobuf_mmap_mapper(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-contig.c:347:37-347:60: void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-contig.c:373:31-373:54: void videobuf_dma_contig_free(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:498:30-498:53: static int __videobuf_iolock(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:573:28-573:51: static int __videobuf_sync(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:588:35-588:58: static int __videobuf_mmap_mapper(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-dma-sg.c:671:29-671:52: void videobuf_queue_sg_init(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-vmalloc.c:154:30-154:53: static int __videobuf_iolock(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-vmalloc.c:209:35-209:58: static int __videobuf_mmap_mapper(struct videobuf_queue *q,
-
drivers/media/v4l2-core/videobuf-vmalloc.c:277:34-277:57: void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:31:36-31:56: static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
-
drivers/misc/habanalabs/common/hw_queue.c:83:52-83:72: void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:117:5-117:25: struct hl_hw_queue *q, int num_of_entries,
-
drivers/misc/habanalabs/common/hw_queue.c:166:6-166:26: struct hl_hw_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:200:59-200:79: static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:802:59-802:79: static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:853:51-853:71: static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/misc/habanalabs/common/hw_queue.c:873:51-873:71: static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/misc/habanalabs/common/hw_queue.c:878:51-878:71: static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/misc/habanalabs/common/hw_queue.c:883:50-883:70: static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/misc/habanalabs/common/hw_queue.c:986:47-986:67: static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
-
drivers/misc/habanalabs/common/hw_queue.c:1034:48-1034:68: static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
-
drivers/misc/habanalabs/common/irq.c:402:40-402:54: int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
-
drivers/misc/habanalabs/common/irq.c:430:41-430:55: void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
-
drivers/misc/habanalabs/common/irq.c:437:42-437:56: void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
-
drivers/misc/habanalabs/common/irq.c:463:40-463:54: int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
-
drivers/misc/habanalabs/common/irq.c:489:41-489:55: void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
-
drivers/misc/habanalabs/common/irq.c:498:42-498:56: void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
-
drivers/misc/uacce/uacce.c:15:30-15:50: static int uacce_start_queue(struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:40:28-40:48: static int uacce_put_queue(struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:94:57-94:77: static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
-
drivers/misc/uacce/uacce.c:117:32-117:52: static void uacce_unbind_queue(struct uacce_queue *q)
-
drivers/misc/vmw_vmci/vmci_queue_pair.c:248:27-248:33: static void qp_free_queue(void *q, u64 size)
-
drivers/mmc/core/crypto.c:22:29-22:51: void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
-
drivers/mmc/core/queue.c:177:37-177:59: static void mmc_queue_setup_discard(struct request_queue *q,
-
drivers/net/ethernet/asix/ax88796c_main.c:244:44-244:65: ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:168:7-168:30: struct bnx2x_vf_queue *q,
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:1411:7-1411:30: struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:385:45-385:68: static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:390:54-390:77: static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h:398:55-398:78: static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c:542:8-542:31: struct bnx2x_vf_queue *q)
-
drivers/net/ethernet/brocade/bna/bna.h:238:44-238:62: static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:508:55-508:70: static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:621:48-621:61: static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:830:47-830:62: static void refill_free_list(struct sge *sge, struct freelQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1176:12-1176:25: struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1212:7-1212:20: struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1299:58-1299:71: static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1408:40-1408:59: static inline int enough_free_Tx_descs(const struct cmdQ *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:169:43-169:64: static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:174:45-174:68: static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:179:44-179:66: static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:194:11-194:34: const struct sge_rspq *q, unsigned int credits)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:238:51-238:67: static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:287:51-287:67: static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:327:7-327:23: struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:347:37-347:59: static inline int should_restart_tx(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:354:49-354:70: static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:381:48-381:63: static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:443:52-443:67: static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:486:53-486:68: static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:506:44-506:59: static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:579:50-579:65: static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:648:27-648:44: static void t3_reset_qset(struct sge_qset *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:677:51-677:68: static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:845:10-845:27: struct sge_rspq *q, unsigned int len,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1050:59-1050:75: static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1094:9-1094:31: const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1184:8-1184:24: struct sge_txq *q, unsigned int ndesc,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1251:30-1251:46: struct sge_qset *qs, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1423:58-1423:74: static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1455:45-1455:61: static inline void reclaim_completed_tx_imm(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1478:44-1478:60: static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1635:6-1635:22: struct sge_txq *q, unsigned int pidx,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1699:44-1699:60: static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1858:36-1858:53: static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1881:8-1881:25: struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2293:7-2293:30: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2298:40-2298:64: static inline void clear_rspq_bufstate(struct sge_rspq * const q)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2589:58-2589:75: static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h:1599:11-1599:34: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h:1756:52-1756:69: static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:556:27-556:44: static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:914:23-914:40: void cxgb4_quiesce_rx(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:955:44-955:61: void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:1241:32-1241:49: int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2425:28-2425:44: static void disable_txq_db(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2434:49-2434:65: static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:2520:49-2520:65: static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:57:33-57:50: static void uldrx_flush_handler(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:74:26-74:43: static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:203:9-203:30: struct sge_ofld_rxq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:208:38-208:60: static inline unsigned int txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:313:41-313:57: void free_tx_desc(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:341:31-341:53: static inline int reclaimable(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:359:62-359:78: static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:391:55-391:71: void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:438:48-438:63: static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:466:48-466:63: static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:479:53-479:68: static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:535:53-535:68: static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:837:49-837:65: void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:906:57-906:73: void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1025:52-1025:68: inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1106:5-1106:27: const struct sge_txq *q, void *pos)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1131:7-1131:29: const struct sge_txq *q, void *pos,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1232:26-1232:46: static void eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1238:32-1238:48: static inline void txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2108:45-2108:61: static inline void reclaim_completed_tx_imm(struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2653:30-2653:51: static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2749:22-2749:43: static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2907:29-2907:49: static void txq_stop_maperr(struct sge_uld_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2923:26-2923:46: static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2948:27-2948:47: static void service_ofldq(struct sge_uld_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3069:22-3069:42: static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3189:10-3189:32: const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3220:29-3220:49: static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3675:22-3675:39: int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3829:54-3829:69: static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3855:8-3855:31: const struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3866:30-3866:47: static inline void rspq_next(struct sge_rspq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3889:30-3889:47: static int process_responses(struct sge_rspq *q, int budget)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4069:30-4069:47: int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4566:44-4566:60: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4753:56-4753:72: static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4852:37-4852:53: void free_txq(struct adapter *adap, struct sge_txq *q)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4899:53-4899:74: void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:682:31-682:53: static unsigned int txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:687:26-687:46: static void eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:693:25-693:41: static void txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:58:55-58:77: static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:81:43-81:65: static inline unsigned int chcr_txq_avail(const struct sge_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:86:37-86:53: static inline void chcr_txq_advance(struct sge_txq *q, unsigned int n)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h:94:38-94:58: static inline void chcr_eth_txq_stop(struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:747:6-747:26: struct sge_eth_txq *q, u64 mask,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:802:6-802:26: struct sge_eth_txq *q, u32 tid,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:843:8-843:28: struct sge_eth_txq *q, u64 tcp_seq,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:993:8-993:28: struct sge_eth_txq *q, uint32_t tx_chan)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1102:11-1102:31: struct sge_eth_txq *q, u32 tcp_seq,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1276:8-1276:28: struct sge_eth_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1463:21-1463:41: bool tcp_push, struct sge_eth_txq *q,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1571:5-1571:25: struct sge_eth_txq *q)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1699:6-1699:26: struct sge_eth_txq *q, u32 skb_offset,
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1777:10-1777:30: struct sge_eth_txq *q, u32 tls_end_offset)
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1898:6-1898:26: struct sge_eth_txq *q)
-
drivers/net/ethernet/emulex/benet/be.h:150:37-150:59: static inline void *queue_head_node(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:155:37-155:59: static inline void *queue_tail_node(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:160:38-160:60: static inline void *queue_index_node(struct be_queue_info *q, u16 index)
-
drivers/net/ethernet/emulex/benet/be.h:165:35-165:57: static inline void queue_head_inc(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be.h:175:35-175:57: static inline void queue_tail_inc(struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_cmds.c:1453:50-1453:72: int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
-
drivers/net/ethernet/emulex/benet/be_cmds.c:1504:52-1504:74: int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_main.c:145:55-145:77: static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
-
drivers/net/ethernet/emulex/benet/be_main.c:156:55-156:77: static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:50:25-50:44: static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:67:23-67:42: static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:98:30-98:49: static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:127:30-127:49: static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:141:26-141:45: static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:253:9-253:28: get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:293:27-293:46: static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:346:24-346:43: static void advance_cq(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:363:32-363:51: static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:477:29-477:48: static int fun_process_cqes(struct funeth_rxq *q, int budget)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:534:31-534:50: static void fun_rxq_free_bufs(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:547:31-547:50: static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:573:32-573:51: static void fun_rxq_free_cache(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:585:21-585:40: int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:675:29-675:48: static void fun_rxq_free_sw(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:695:24-695:43: int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:763:30-763:49: static void fun_rxq_free_dev(struct funeth_rxq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:815:36-815:55: struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:55:22-55:47: static void *txq_end(const struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:63:32-63:57: static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:79:56-79:75: static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:121:57-121:76: static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:270:35-270:60: static unsigned int fun_txq_avail(const struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:276:31-276:50: static void fun_tx_check_stop(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:297:33-297:52: static bool fun_txq_may_restart(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:345:24-345:49: static u16 txq_hw_head(const struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:353:31-353:56: static unsigned int unmap_skb(const struct funeth_txq *q, unsigned int idx)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:383:29-383:48: static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:443:27-443:52: static void fun_xdp_unmap(const struct funeth_txq *q, unsigned int idx)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:454:36-454:55: static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:482:17-482:36: bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:563:27-563:46: static void fun_txq_purge(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:574:28-574:47: static void fun_xdpq_purge(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:628:29-628:48: static void fun_txq_free_sw(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:643:24-643:43: int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:694:30-694:49: static void fun_txq_free_dev(struct funeth_txq *q)
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:752:36-752:55: struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
-
drivers/net/ethernet/fungible/funeth/funeth_txrx.h:227:38-227:63: static inline void *fun_tx_desc_addr(const struct funeth_txq *q,
-
drivers/net/ethernet/fungible/funeth/funeth_txrx.h:233:34-233:59: static inline void fun_txq_wr_db(const struct funeth_txq *q)
-
drivers/net/ethernet/hisilicon/hns/hnae.c:193:16-193:35: hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
-
drivers/net/ethernet/hisilicon/hns/hnae.c:237:51-237:70: static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
-
drivers/net/ethernet/hisilicon/hns/hnae.c:264:29-264:48: static void hnae_fini_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:62:50-62:69: static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:193:31-193:50: static void hns_ae_init_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:201:31-201:50: static void hns_ae_fini_queue(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:92:28-92:47: void hns_rcb_reset_ring_hw(struct hnae_queue *q)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:142:26-142:45: void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:159:25-159:44: void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:172:28-172:47: void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:183:27-183:46: void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:197:29-197:48: void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:202:20-202:39: void hns_rcb_start(struct hnae_queue *q, u32 val)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:222:29-222:48: void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:234:29-234:48: void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:441:34-441:53: static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:4794:31-4794:51: static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:329:12-329:37: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:380:12-380:37: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:436:51-436:76: void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/intel/fm10k/fm10k_common.c:456:30-456:55: void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
-
drivers/net/ethernet/intel/fm10k/fm10k_pf.c:1134:11-1134:36: struct fm10k_hw_stats_q *q,
-
drivers/net/ethernet/marvell/octeontx2/af/common.h:47:50-47:64: static inline int qmem_alloc(struct device *dev, struct qmem **q,
-
drivers/net/ethernet/marvell/skge.c:2486:45-2486:49: static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
-
drivers/net/ethernet/marvell/skge.c:2517:47-2517:51: static void skge_qset(struct skge_port *skge, u16 q,
-
drivers/net/ethernet/marvell/sky2.c:1036:45-1036:49: static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
-
drivers/net/ethernet/marvell/sky2.c:1076:43-1076:47: static void sky2_qset(struct sky2_hw *hw, u16 q)
-
drivers/net/ethernet/marvell/sky2.c:1125:53-1125:62: static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
-
drivers/net/ethernet/marvell/sky2.c:2916:62-2916:66: static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:131:46-131:70: static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:136:41-136:65: static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:143:31-143:55: mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:149:40-149:64: mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:159:40-159:64: mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:166:39-166:63: static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:171:37-171:61: static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:236:9-236:33: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:246:13-246:37: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:256:10-256:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:263:10-263:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:272:9-272:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:278:46-278:70: static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:285:10-285:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:318:11-318:35: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:391:10-391:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:442:11-442:35: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:455:7-455:31: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:466:9-466:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:503:10-503:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:509:10-509:34: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:594:10-594:34: struct mlxsw_pci_queue *q,
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:663:38-663:62: static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:718:36-718:66: static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:724:34-724:64: static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:731:9-731:33: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:761:10-761:34: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:776:38-776:62: static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:895:5-895:29: struct mlxsw_pci_queue *q, u8 q_num)
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:956:6-956:30: struct mlxsw_pci_queue *q)
-
drivers/net/ethernet/netronome/nfp/flower/cmsg.h:687:15-687:18: u8 vnic, u8 q)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:780:39-780:51: static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:791:39-791:51: static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:796:33-796:45: static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:820:39-820:51: static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
-
drivers/net/ethernet/netronome/nfp/nfp_net.h:831:39-831:51: static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:595:41-595:61: void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:628:4-628:24: struct ionic_queue *q, unsigned int index, const char *name,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:656:18-656:38: void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:668:21-668:41: void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:680:19-680:39: void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:702:31-702:51: static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:713:22-713:42: void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
-
drivers/net/ethernet/pensando/ionic/ionic_dev.h:283:48-283:68: static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_dev.h:295:38-295:58: static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:271:29-271:49: static void ionic_adminq_cb(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:13:35-13:55: static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:19:35-19:55: static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:25:45-25:65: static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:30:32-30:52: static int ionic_rx_page_alloc(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:71:32-71:52: static void ionic_rx_page_free(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:91:34-91:54: static bool ionic_rx_buf_recycle(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:114:39-114:59: static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:174:43-174:63: static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:215:28-215:48: static void ionic_rx_clean(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:344:20-344:40: void ionic_rx_fill(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:420:21-420:41: void ionic_rx_empty(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:588:39-588:59: static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:605:37-605:57: static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:622:29-622:49: static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:671:38-671:58: static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:691:28-691:48: static void ionic_tx_clean(struct ionic_queue *q,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:793:21-793:41: void ionic_tx_empty(struct ionic_queue *q)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:862:31-862:51: static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:895:25-895:45: static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1018:32-1018:52: static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1055:35-1055:55: static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1089:32-1089:52: static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1106:21-1106:41: static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1134:34-1134:54: static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1160:32-1160:52: static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
-
drivers/net/ethernet/renesas/ravb_main.c:195:50-195:54: static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
-
drivers/net/ethernet/renesas/ravb_main.c:238:62-238:66: static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:263:61-263:65: static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:290:53-290:57: static void ravb_ring_free(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:329:64-329:68: static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:360:63-360:67: static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:391:55-391:59: static void ravb_ring_format(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:433:64-433:68: static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:446:63-446:67: static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:460:52-460:56: static int ravb_ring_init(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:753:64-753:68: static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:882:63-882:67: static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:997:58-997:62: static bool ravb_rx(struct net_device *ndev, int *quota, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:1115:59-1115:63: static bool ravb_queue_interrupt(struct net_device *ndev, int q)
-
drivers/net/ethernet/renesas/ravb_main.c:1246:62-1246:66: static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
-
drivers/net/ethernet/sfc/ptp.c:842:38-842:59: static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
-
drivers/net/ethernet/sfc/ptp.c:1233:57-1233:78: static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
-
drivers/net/ethernet/sfc/siena/ptp.c:835:38-835:59: static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
-
drivers/net/ethernet/sfc/siena/ptp.c:1226:57-1226:78: static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
-
drivers/net/ethernet/via/via-velocity.c:1759:9-1759:13: int q, int n)
-
drivers/net/hyperv/netvsc_trace.h:65:1-65:1: DEFINE_EVENT(rndis_msg_class, rndis_send,
-
drivers/net/hyperv/netvsc_trace.h:71:1-71:1: DEFINE_EVENT(rndis_msg_class, rndis_recv,
-
drivers/net/tap.c:33:48-33:66: static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
-
drivers/net/tap.c:39:29-39:47: static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
-
drivers/net/tap.c:49:29-49:47: static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
-
drivers/net/tap.c:80:41-80:59: static inline bool tap_is_little_endian(struct tap_queue *q)
-
drivers/net/tap.c:86:32-86:50: static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
-
drivers/net/tap.c:91:39-91:57: static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
-
drivers/net/tap.c:145:8-145:26: struct tap_queue *q)
-
drivers/net/tap.c:166:5-166:23: struct tap_queue *q)
-
drivers/net/tap.c:187:30-187:48: static int tap_disable_queue(struct tap_queue *q)
-
drivers/net/tap.c:222:27-222:45: static void tap_put_queue(struct tap_queue *q)
-
drivers/net/tap.c:630:29-630:47: static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
-
drivers/net/tap.c:777:29-777:47: static ssize_t tap_put_user(struct tap_queue *q,
-
drivers/net/tap.c:835:28-835:46: static ssize_t tap_do_read(struct tap_queue *q,
-
drivers/net/tap.c:897:40-897:58: static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
-
drivers/net/tap.c:935:24-935:42: static int set_offload(struct tap_queue *q, unsigned long arg)
-
drivers/net/tap.c:1147:29-1147:47: static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
-
drivers/net/usb/catc.c:572:48-572:67: static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
-
drivers/net/usb/lan78xx.c:2493:49-2493:70: static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
-
drivers/net/usb/usbnet.c:720:45-720:66: static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
-
drivers/net/usb/usbnet.c:779:34-779:55: static void wait_skb_queue_empty(struct sk_buff_head *q)
-
drivers/net/virtio_net.c:1583:62-1583:66: static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-
drivers/net/wireless/ath/ath5k/trace.h:39:1-39:1: TRACE_EVENT(ath5k_tx,
-
drivers/net/wireless/ath/ath5k/trace.h:65:1-65:1: TRACE_EVENT(ath5k_tx_complete,
-
drivers/net/wireless/ath/ath6kl/txrx.c:845:34-845:55: static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
-
drivers/net/wireless/ath/ath9k/mac.c:46:42-46:46: u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:52:43-52:47: void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
-
drivers/net/wireless/ath/ath9k/mac.c:58:42-58:46: void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:65:46-65:50: u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:170:49-170:53: bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:196:48-196:52: bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
-
drivers/net/wireless/ath/ath9k/mac.c:261:48-261:52: bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
-
drivers/net/wireless/ath/ath9k/mac.c:337:64-337:68: static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:346:49-346:53: bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/ath/ath9k/mac.c:367:47-367:51: bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
-
drivers/net/wireless/broadcom/b43/pio.c:24:28-24:52: static u16 generate_cookie(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:178:39-178:63: static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:192:37-192:61: static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:201:37-201:61: static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:317:33-317:57: static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:370:33-370:57: static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:440:25-440:49: static int pio_tx_frame(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.c:596:26-596:50: static bool pio_rx_frame(struct b43_pio_rxqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:762:17-762:41: void b43_pio_rx(struct b43_pio_rxqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:777:38-777:62: static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.c:790:37-790:61: static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
-
drivers/net/wireless/broadcom/b43/pio.h:109:36-109:60: static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:114:36-114:60: static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:119:38-119:62: static inline void b43_piotx_write16(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:125:38-125:62: static inline void b43_piotx_write32(struct b43_pio_txqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:132:36-132:60: static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:137:36-137:60: static inline u32 b43_piorx_read32(struct b43_pio_rxqueue *q, u16 offset)
-
drivers/net/wireless/broadcom/b43/pio.h:142:38-142:62: static inline void b43_piorx_write16(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/b43/pio.h:148:38-148:62: static inline void b43_piorx_write32(struct b43_pio_rxqueue *q,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c:621:61-621:74: static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c:2754:33-2754:46: static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:402:8-402:26: struct list_head *q, int *counter)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:421:6-421:24: struct list_head *q, struct brcmf_usbreq *req,
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:433:20-433:38: brcmf_usbdev_qinit(struct list_head *q, int qsize)
-
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:467:30-467:48: static void brcmf_usb_free_q(struct list_head *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4348:9-4348:34: struct ipw2100_bd_queue *q, int entries)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4369:54-4369:79: static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4386:5-4386:30: struct ipw2100_bd_queue *q, u32 base, u32 size,
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3696:31-3696:58: static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3708:38-3708:63: static inline int ipw_tx_queue_space(const struct clx2_queue *q)
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3738:51-3738:70: static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3764:9-3764:31: struct clx2_tx_queue *q,
-
drivers/net/wireless/intel/iwlegacy/common.c:2535:19-2535:45: il_rx_queue_space(const struct il_rx_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2552:50-2552:70: il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2905:16-2905:39: il_queue_space(const struct il_queue *q)
-
drivers/net/wireless/intel/iwlegacy/common.c:2927:35-2927:52: il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
-
drivers/net/wireless/intel/iwlegacy/common.h:848:15-848:38: il_queue_used(const struct il_queue *q, int i)
-
drivers/net/wireless/intel/iwlegacy/common.h:859:16-859:33: il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:475:59-475:63: static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.c:677:44-677:66: int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.c:899:27-899:43: static int iwl_queue_init(struct iwl_txq *q, int slots_num)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.h:22:41-22:63: static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
-
drivers/net/wireless/intel/iwlwifi/queue/tx.h:92:33-92:55: static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
-
drivers/net/wireless/mediatek/mt76/dma.c:116:41-116:60: mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:125:44-125:63: mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:142:40-142:59: mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:203:47-203:66: mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
-
drivers/net/wireless/mediatek/mt76/dma.c:227:43-227:62: mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:234:43-234:62: mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
-
drivers/net/wireless/mediatek/mt76/dma.c:274:40-274:59: mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
-
drivers/net/wireless/mediatek/mt76/dma.c:300:40-300:59: mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
-
drivers/net/wireless/mediatek/mt76/dma.c:321:49-321:68: mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:351:45-351:64: mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:445:40-445:59: mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:487:42-487:61: mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:531:44-531:63: mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/dma.c:566:43-566:62: mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/dma.c:617:41-617:60: mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
-
drivers/net/wireless/mediatek/mt76/dma.c:644:43-644:62: mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:672:57-672:74: static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:708:57-708:74: static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mac80211.c:731:36-731:53: void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
-
drivers/net/wireless/mediatek/mt76/mac80211.c:1270:50-1270:67: void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7603/core.c:6:53-6:70: void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7603/dma.c:71:49-71:66: void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7603/dma.c:111:46-111:65: mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/mt7615/mac.c:1702:49-1702:66: void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c:83:48-83:65: mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:107:48-107:67: mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:241:54-241:71: void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c:35:50-35:67: void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7915/mac.c:1788:49-1788:66: void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c:545:9-545:26: enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7921/mac.c:1206:49-1206:66: void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/mt7921/pci.c:28:48-28:65: mt7921_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-
drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c:245:50-245:67: void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-
drivers/net/wireless/mediatek/mt76/sdio.c:369:25-369:44: mt76s_get_next_rx_entry(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:385:46-385:65: mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:431:57-431:76: static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio.c:519:42-519:61: mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/sdio.c:550:46-550:65: mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/sdio.c:580:49-580:68: static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/sdio_txrx.c:230:53-230:72: static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/tx.c:430:18-430:37: mt76_txq_stopped(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/tx.c:437:43-437:62: mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/tx.c:685:51-685:70: void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:321:40-321:59: mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
-
drivers/net/wireless/mediatek/mt76/usb.c:356:39-356:58: mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:392:42-392:61: mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:440:25-440:44: mt76u_get_next_rx_entry(struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:597:46-597:65: mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:687:43-687:62: mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt76/usb.c:847:42-847:61: mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
-
drivers/net/wireless/mediatek/mt76/usb.c:881:49-881:68: static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/dma.c:460:35-460:60: static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/dma.c:484:7-484:32: struct mt7601u_tx_queue *q)
-
drivers/net/wireless/mediatek/mt7601u/tx.c:21:17-21:20: static u8 q2hwq(u8 q)
-
drivers/net/wireless/st/cw1200/debug.c:70:10-70:31: struct cw1200_queue *q)
-
drivers/net/wireless/ti/wlcore/tx.c:508:33-508:36: struct wl1271_link *lnk, u8 q)
-
drivers/nvme/host/apple.c:208:54-208:79: static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:216:44-216:69: static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:273:31-273:56: static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
-
drivers/nvme/host/apple.c:283:35-283:60: static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
-
drivers/nvme/host/apple.c:569:43-569:68: static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:577:49-577:74: apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:585:42-585:67: static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
-
drivers/nvme/host/apple.c:607:46-607:71: static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:619:32-619:57: static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
-
drivers/nvme/host/apple.c:642:34-642:59: static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
-
drivers/nvme/host/apple.c:958:35-958:60: static void apple_nvme_init_queue(struct apple_nvme_queue *q)
-
drivers/nvme/host/apple.c:1279:7-1279:32: struct apple_nvme_queue *q)
-
drivers/nvme/host/core.c:991:28-991:50: int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-
drivers/nvme/host/core.c:1028:26-1028:48: int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-
drivers/nvme/host/core.c:1791:3-1791:25: struct request_queue *q)
-
drivers/nvme/host/core.c:2588:6-2588:42: const struct nvme_core_quirk_entry *q)
-
drivers/nvme/host/ioctl.c:67:48-67:70: static struct request *nvme_alloc_user_request(struct request_queue *q,
-
drivers/nvme/host/ioctl.c:134:33-134:55: static int nvme_submit_user_cmd(struct request_queue *q,
-
drivers/nvme/target/fc.c:2116:22-2116:49: queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
-
drivers/pcmcia/cistpl.c:761:37-761:45: static int parse_strings(u_char *p, u_char *q, int max,
-
drivers/pcmcia/cistpl.c:906:39-906:47: static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr)
-
drivers/pcmcia/cistpl.c:943:40-943:48: static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing)
-
drivers/pcmcia/cistpl.c:978:36-978:44: static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
-
drivers/pcmcia/cistpl.c:1022:37-1022:45: static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
-
drivers/pcmcia/cistpl.c:1063:37-1063:45: static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
-
drivers/platform/chrome/wilco_ec/event.c:119:38-119:61: static inline bool event_queue_empty(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:125:37-125:60: static inline bool event_queue_full(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:131:41-131:64: static struct ec_event *event_queue_pop(struct ec_event_queue *q)
-
drivers/platform/chrome/wilco_ec/event.c:149:42-149:65: static struct ec_event *event_queue_push(struct ec_event_queue *q,
-
drivers/platform/chrome/wilco_ec/event.c:162:30-162:53: static void event_queue_free(struct ec_event_queue *q)
-
drivers/platform/surface/aggregator/controller.c:644:35-644:60: static void ssam_event_queue_push(struct ssam_event_queue *q,
-
drivers/platform/surface/aggregator/controller.c:659:53-659:78: static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
-
drivers/platform/surface/aggregator/controller.c:676:39-676:64: static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
-
drivers/ptp/ptp_clock.c:37:30-37:60: static inline int queue_free(struct timestamp_event_queue *q)
-
drivers/ptp/ptp_private.h:79:29-79:59: static inline int queue_cnt(struct timestamp_event_queue *q)
-
drivers/scsi/aacraid/comminit.c:259:50-259:69: static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
-
drivers/scsi/aacraid/commsup.c:800:44-800:63: int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
-
drivers/scsi/aacraid/commsup.c:832:46-832:64: void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
-
drivers/scsi/aacraid/dpcsup.c:39:34-39:53: unsigned int aac_response_normal(struct aac_queue * q)
-
drivers/scsi/aacraid/dpcsup.c:158:33-158:51: unsigned int aac_command_normal(struct aac_queue *q)
-
drivers/scsi/be2iscsi/be.h:51:37-51:59: static inline void *queue_head_node(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:56:35-56:57: static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
-
drivers/scsi/be2iscsi/be.h:61:37-61:59: static inline void *queue_tail_node(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:66:35-66:57: static inline void queue_head_inc(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be.h:71:35-71:57: static inline void queue_tail_inc(struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be_cmds.c:900:54-900:76: int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
-
drivers/scsi/be2iscsi/be_main.c:2979:26-2979:48: static int be_fill_queue(struct be_queue_info *q,
-
drivers/scsi/be2iscsi/be_main.c:3304:53-3304:75: static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
-
drivers/scsi/be2iscsi/be_main.c:3314:53-3314:75: static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
-
drivers/scsi/bfa/bfa_cs.h:157:20-157:38: bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
-
drivers/scsi/csiostor/csio_scsi.c:1159:48-1159:66: csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
-
drivers/scsi/csiostor/csio_scsi.c:1233:46-1233:64: csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
-
drivers/scsi/csiostor/csio_wr.c:1000:24-1000:39: csio_wr_avail_qcredits(struct csio_q *q)
-
drivers/scsi/csiostor/csio_wr.c:1042:40-1042:55: csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
-
drivers/scsi/csiostor/csio_wr.c:1112:18-1112:33: csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
-
drivers/scsi/csiostor/csio_wr.c:1129:40-1129:55: csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
-
drivers/scsi/elx/libefc_sli/sli4.c:488:40-488:59: __sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q)
-
drivers/scsi/elx/libefc_sli/sli4.c:499:37-499:56: __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
-
drivers/scsi/elx/libefc_sli/sli4.c:545:36-545:55: sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q,
-
drivers/scsi/elx/libefc_sli/sli4.c:672:39-672:58: __sli_create_queue(struct sli4 *sli4, struct sli4_queue *q)
-
drivers/scsi/elx/libefc_sli/sli4.c:764:3-764:22: struct sli4_queue *q, u32 n_entries,
-
drivers/scsi/elx/libefc_sli/sli4.c:995:35-995:54: sli_queue_free(struct sli4 *sli4, struct sli4_queue *q,
-
drivers/scsi/elx/libefc_sli/sli4.c:1067:37-1067:56: sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
-
drivers/scsi/elx/libefc_sli/sli4.c:1087:34-1087:53: sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
-
drivers/scsi/elx/libefc_sli/sli4.c:1125:33-1125:52: sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1147:33-1147:52: sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1168:33-1168:52: sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1197:32-1197:51: sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1239:32-1239:51: sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:1283:32-1283:51: sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
-
drivers/scsi/elx/libefc_sli/sli4.c:3632:11-3632:30: struct sli4_queue *q, int num_q, u32 shift,
-
drivers/scsi/hpsa.c:993:53-993:56: static inline u32 next_command(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.c:6937:70-6937:73: static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.h:489:68-489:71: static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
-
drivers/scsi/hpsa.h:527:2-527:29: __attribute__((unused)) u8 q)
-
drivers/scsi/hpsa.h:590:71-590:74: static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
-
drivers/scsi/libiscsi.c:2663:17-2663:36: iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
-
drivers/scsi/libiscsi.c:2703:22-2703:41: void iscsi_pool_free(struct iscsi_pool *q)
-
drivers/scsi/lpfc/lpfc_attr.c:1359:41-1359:59: lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
-
drivers/scsi/lpfc/lpfc_debugfs.c:4174:28-4174:47: lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
-
drivers/scsi/lpfc/lpfc_debugfs.h:341:20-341:39: lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
-
drivers/scsi/lpfc/lpfc_debugfs.h:389:19-389:38: lpfc_debug_dump_q(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:264:18-264:37: lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
-
drivers/scsi/lpfc/lpfc_sli.c:359:22-359:41: lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
-
drivers/scsi/lpfc/lpfc_sli.c:381:18-381:37: lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
-
drivers/scsi/lpfc/lpfc_sli.c:420:22-420:41: lpfc_sli4_mq_release(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:442:18-442:37: lpfc_sli4_eq_get(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:474:23-474:42: lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:493:27-493:46: lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:514:46-514:65: lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:552:50-552:69: lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:683:18-683:37: lpfc_sli4_cq_get(struct lpfc_queue *q)
-
drivers/scsi/lpfc/lpfc_sli.c:734:46-734:65: lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli.c:767:50-767:69: lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
-
drivers/scsi/lpfc/lpfc_sli4.h:1174:34-1174:53: static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
-
drivers/scsi/scsi_bsg.c:12:30-12:52: static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
-
drivers/scsi/scsi_dh.c:251:22-251:44: int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
-
drivers/scsi/scsi_dh.c:298:24-298:46: int scsi_dh_set_params(struct request_queue *q, const char *params)
-
drivers/scsi/scsi_dh.c:320:20-320:42: int scsi_dh_attach(struct request_queue *q, const char *name)
-
drivers/scsi/scsi_dh.c:359:43-359:65: const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
-
drivers/scsi/scsi_ioctl.c:213:29-213:51: static int sg_emulated_host(struct request_queue *q, int __user *p)
-
drivers/scsi/scsi_ioctl.c:517:26-517:48: static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode,
-
drivers/scsi/scsi_lib.c:307:29-307:51: static void scsi_kick_queue(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:452:28-452:50: static void scsi_run_queue(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:663:11-663:33: struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1128:36-1128:58: struct request *scsi_alloc_request(struct request_queue *q,
-
drivers/scsi/scsi_lib.c:1245:40-1245:62: static inline int scsi_dev_queue_ready(struct request_queue *q,
-
drivers/scsi/scsi_lib.c:1334:41-1334:63: static inline int scsi_host_queue_ready(struct request_queue *q,
-
drivers/scsi/scsi_lib.c:1394:30-1394:52: static bool scsi_mq_lld_busy(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1644:32-1644:54: static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
-
drivers/scsi/scsi_lib.c:1651:31-1651:53: static int scsi_mq_get_budget(struct request_queue *q)
-
drivers/scsi/scsi_lib.c:1869:49-1869:71: void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
-
drivers/scsi/scsi_lib.c:2003:44-2003:66: struct scsi_device *scsi_device_from_queue(struct request_queue *q)
-
drivers/scsi/scsi_transport_fc.c:4340:15-4340:37: fc_bsg_remove(struct request_queue *q)
-
drivers/scsi/sg.c:850:30-850:52: static int max_sectors_bytes(struct request_queue *q)
-
drivers/spi/spi-fsl-qspi.c:278:37-278:54: static inline int needs_swap_endian(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:283:34-283:51: static inline int needs_4x_clock(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:288:37-288:54: static inline int needs_fill_txfifo(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:293:42-293:59: static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:298:42-298:59: static inline int needs_amba_base_offset(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:303:37-303:54: static inline int needs_tdh_setting(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:312:40-312:57: static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
-
drivers/spi/spi-fsl-qspi.c:324:25-324:42: static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
-
drivers/spi/spi-fsl-qspi.c:332:23-332:40: static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
-
drivers/spi/spi-fsl-qspi.c:356:36-356:53: static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
-
drivers/spi/spi-fsl-qspi.c:416:34-416:51: static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:472:37-472:54: static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:492:41-492:58: static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:508:33-508:50: static void fsl_qspi_invalidate(struct fsl_qspi *q)
-
drivers/spi/spi-fsl-qspi.c:526:33-526:50: static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
-
drivers/spi/spi-fsl-qspi.c:552:31-552:48: static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
-
drivers/spi/spi-fsl-qspi.c:559:34-559:51: static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:584:34-584:51: static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
-
drivers/spi/spi-fsl-qspi.c:605:27-605:44: static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
-
drivers/spi/spi-fsl-qspi.c:630:37-630:54: static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
-
drivers/spi/spi-fsl-qspi.c:721:35-721:52: static int fsl_qspi_default_setup(struct fsl_qspi *q)
-
drivers/staging/fieldbus/anybuss/host.c:324:28-324:42: ab_task_dequeue_finish_put(struct kfifo *q, struct anybuss_host *cd)
-
drivers/staging/fieldbus/anybuss/host.c:336:36-336:50: ab_task_enqueue(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
-
drivers/staging/fieldbus/anybuss/host.c:353:41-353:55: ab_task_enqueue_wait(struct ab_task *t, struct kfifo *q, spinlock_t *slock,
-
drivers/staging/fieldbus/anybuss/host.c:862:48-862:62: static void process_q(struct anybuss_host *cd, struct kfifo *q)
-
drivers/staging/fieldbus/anybuss/host.c:1226:44-1226:58: static int taskq_alloc(struct device *dev, struct kfifo *q)
-
drivers/staging/media/atomisp/pci/atomisp_fops.c:1105:34-1105:57: int atomisp_videobuf_mmap_mapper(struct videobuf_queue *q,
-
drivers/staging/media/atomisp/pci/atomisp_ioctl.c:1060:41-1060:64: static void atomisp_videobuf_free_queue(struct videobuf_queue *q)
-
drivers/staging/media/hantro/hantro_v4l2.c:829:32-829:50: static bool hantro_vq_is_coded(struct vb2_queue *q)
-
drivers/staging/media/hantro/hantro_v4l2.c:836:35-836:53: static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/staging/media/hantro/hantro_v4l2.c:879:20-879:38: hantro_return_bufs(struct vb2_queue *q,
-
drivers/staging/media/hantro/hantro_v4l2.c:896:35-896:53: static void hantro_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/imx/imx-media-csc-scaler.c:501:43-501:61: static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
-
drivers/staging/media/imx/imx-media-csc-scaler.c:550:43-550:61: static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/ipu3/ipu3-css.c:168:36-168:59: static bool imgu_css_queue_enabled(struct imgu_css_queue *q)
-
drivers/staging/media/meson/vdec/vdec.c:164:33-164:51: static void process_num_buffers(struct vb2_queue *q,
-
drivers/staging/media/meson/vdec/vdec.c:189:29-189:47: static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
-
drivers/staging/media/meson/vdec/vdec.c:280:33-280:51: static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/staging/media/meson/vdec/vdec.c:395:33-395:51: static void vdec_stop_streaming(struct vb2_queue *q)
-
drivers/staging/media/rkvdec/rkvdec.c:549:35-549:53: static int rkvdec_start_streaming(struct vb2_queue *q, unsigned int count)
-
drivers/staging/media/rkvdec/rkvdec.c:592:35-592:53: static void rkvdec_stop_streaming(struct vb2_queue *q)
-
drivers/target/target_core_iblock.c:232:2-232:24: struct request_queue *q)
-
drivers/ufs/core/ufshcd-crypto.c:236:50-236:72: void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
-
drivers/usb/musb/musb_host.h:46:40-46:58: static inline struct musb_qh *first_qh(struct list_head *q)
-
drivers/usb/serial/digi_acceleport.c:343:2-343:21: wait_queue_head_t *q, long timeout,
-
fs/cifs/dir.c:792:54-792:67: static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q)
-
fs/erofs/zdata.c:1203:10-1203:44: struct z_erofs_decompressqueue *q[],
-
fs/ext2/inode.c:1007:41-1007:49: static inline int all_zeroes(__le32 *p, __le32 *q)
-
fs/ext2/inode.c:1108:67-1108:75: static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
-
fs/ext2/inode.c:1148:64-1148:72: static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
-
fs/ext4/indirect.c:746:41-746:49: static inline int all_zeroes(__le32 *p, __le32 *q)
-
fs/jffs2/compr_rubin.c:164:4-164:18: unsigned long q)
-
fs/minix/itree_common.c:215:42-215:51: static inline int all_zeroes(block_t *p, block_t *q)
-
fs/minix/itree_common.c:263:63-263:72: static inline void free_data(struct inode *inode, block_t *p, block_t *q)
-
fs/minix/itree_common.c:276:60-276:69: static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
-
fs/sysv/itree.c:269:46-269:59: static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q)
-
fs/sysv/itree.c:326:67-326:80: static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q)
-
fs/sysv/itree.c:338:64-338:77: static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth)
-
fs/xfs/xfs_trans_dquot.c:278:2-278:20: struct xfs_dqtrx *q)
-
include/crypto/b128ops.h:64:53-64:65: static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
-
include/crypto/b128ops.h:70:56-70:69: static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
-
include/crypto/b128ops.h:75:56-75:69: static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
-
include/linux/blk-integrity.h:63:40-63:62: blk_integrity_queue_supports_integrity(struct request_queue *q)
-
include/linux/blk-integrity.h:68:53-68:75: static inline void blk_queue_max_integrity_segments(struct request_queue *q,
-
include/linux/blk-integrity.h:75:30-75:58: queue_max_integrity_segments(const struct request_queue *q)
-
include/linux/blk-mq.h:882:44-882:66: static inline bool blk_should_fake_timeout(struct request_queue *q)
-
include/linux/blk-mq.h:1112:33-1112:55: static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-
include/linux/blkdev.h:630:32-630:54: static inline bool queue_is_mq(struct request_queue *q)
-
include/linux/blkdev.h:636:48-636:70: static inline enum rpm_status queue_rpm_status(struct request_queue *q)
-
include/linux/blkdev.h:648:23-648:45: blk_queue_zoned_model(struct request_queue *q)
-
include/linux/blkdev.h:655:39-655:61: static inline bool blk_queue_is_zoned(struct request_queue *q)
-
include/linux/blkdev.h:666:47-666:69: static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
-
include/linux/blkdev.h:672:47-672:69: static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
-
include/linux/blkdev.h:677:46-677:68: static inline unsigned int blk_queue_zone_no(struct request_queue *q,
-
include/linux/blkdev.h:685:42-685:64: static inline bool blk_queue_zone_is_seq(struct request_queue *q,
-
include/linux/blkdev.h:695:45-695:67: static inline void blk_queue_max_open_zones(struct request_queue *q,
-
include/linux/blkdev.h:701:49-701:77: static inline unsigned int queue_max_open_zones(const struct request_queue *q)
-
include/linux/blkdev.h:706:47-706:69: static inline void blk_queue_max_active_zones(struct request_queue *q,
-
include/linux/blkdev.h:712:51-712:79: static inline unsigned int queue_max_active_zones(const struct request_queue *q)
-
include/linux/blkdev.h:741:44-741:66: static inline unsigned int blk_queue_depth(struct request_queue *q)
-
include/linux/blkdev.h:919:54-919:76: static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
-
include/linux/blkdev.h:936:48-936:70: static inline unsigned int blk_max_size_offset(struct request_queue *q,
-
include/linux/blkdev.h:1155:52-1155:80: static inline unsigned long queue_segment_boundary(const struct request_queue *q)
-
include/linux/blkdev.h:1160:49-1160:77: static inline unsigned long queue_virt_boundary(const struct request_queue *q)
-
include/linux/blkdev.h:1165:46-1165:74: static inline unsigned int queue_max_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1170:44-1170:66: static inline unsigned int queue_max_bytes(struct request_queue *q)
-
include/linux/blkdev.h:1175:49-1175:77: static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1180:49-1180:77: static inline unsigned short queue_max_segments(const struct request_queue *q)
-
include/linux/blkdev.h:1185:57-1185:85: static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
-
include/linux/blkdev.h:1190:51-1190:79: static inline unsigned int queue_max_segment_size(const struct request_queue *q)
-
include/linux/blkdev.h:1195:58-1195:86: static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
-
include/linux/blkdev.h:1209:49-1209:77: static inline unsigned queue_logical_block_size(const struct request_queue *q)
-
include/linux/blkdev.h:1224:54-1224:82: static inline unsigned int queue_physical_block_size(const struct request_queue *q)
-
include/linux/blkdev.h:1234:41-1234:69: static inline unsigned int queue_io_min(const struct request_queue *q)
-
include/linux/blkdev.h:1244:41-1244:69: static inline unsigned int queue_io_opt(const struct request_queue *q)
-
include/linux/blkdev.h:1255:30-1255:58: queue_zone_write_granularity(const struct request_queue *q)
-
include/linux/blkdev.h:1363:39-1363:67: static inline int queue_dma_alignment(const struct request_queue *q)
-
include/linux/blkdev.h:1368:34-1368:56: static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
-
include/linux/blktrace_api.h:62:51-62:73: static inline bool blk_trace_note_message_enabled(struct request_queue *q)
-
include/linux/fortify-string.h:81:35-81:47: char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
-
include/linux/fortify-string.h:93:34-93:46: char *strcat(char * const POS p, const char *q)
-
include/linux/fortify-string.h:150:53-150:76: __FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
-
include/linux/fortify-string.h:177:54-177:77: __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, size_t size)
-
include/linux/fortify-string.h:224:35-224:58: char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
-
include/linux/fortify-string.h:407:39-407:63: int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
-
include/linux/fortify-string.h:461:34-461:57: char *strcpy(char * const POS p, const char * const POS q)
-
include/linux/mlx4/qp.h:496:29-496:33: static inline u16 folded_qp(u32 q)
-
include/linux/netdevice.h:648:47-648:74: static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
-
include/linux/netdevice.h:657:49-657:70: static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
-
include/linux/netdevice.h:3452:42-3452:63: static inline void netdev_tx_reset_queue(struct netdev_queue *q)
-
include/linux/sunrpc/sched.h:273:38-273:67: static inline const char * rpc_qname(const struct rpc_wait_queue *q)
-
include/linux/sunrpc/sched.h:278:46-278:69: static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
-
include/media/videobuf-core.h:162:40-162:63: static inline void videobuf_queue_lock(struct videobuf_queue *q)
-
include/media/videobuf-core.h:168:42-168:65: static inline void videobuf_queue_unlock(struct videobuf_queue *q)
-
include/media/videobuf2-core.h:655:49-655:67: static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1105:37-1105:55: static inline bool vb2_is_streaming(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1123:41-1123:59: static inline bool vb2_fileio_is_active(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1134:32-1134:50: static inline bool vb2_is_busy(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1143:38-1143:56: static inline void *vb2_get_drv_priv(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1201:47-1201:65: static inline bool vb2_start_streaming_called(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1210:51-1210:69: static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
-
include/media/videobuf2-core.h:1225:49-1225:67: static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q,
-
include/media/videobuf2-v4l2.h:323:38-323:56: static inline bool vb2_queue_is_busy(struct vb2_queue *q, struct file *file)
-
include/net/inet_frag.h:141:34-141:58: static inline void inet_frag_put(struct inet_frag_queue *q)
-
include/net/ipv6_frag.h:32:33-32:57: static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
-
include/net/pkt_cls.h:156:19-156:33: __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
-
include/net/pkt_cls.h:182:21-182:35: __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
-
include/net/pkt_sched.h:23:32-23:46: static inline void *qdisc_priv(struct Qdisc *q)
-
include/net/pkt_sched.h:123:30-123:44: static inline void qdisc_run(struct Qdisc *q)
-
include/net/pkt_sched.h:139:37-139:51: static inline struct net *qdisc_net(struct Qdisc *q)
-
include/net/sch_generic.h:169:42-169:62: static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
-
include/net/sch_generic.h:507:30-507:50: static inline int qdisc_qlen(const struct Qdisc *q)
-
include/net/sch_generic.h:512:34-512:54: static inline int qdisc_qlen_sum(const struct Qdisc *q)
-
include/net/sch_generic.h:586:34-586:48: static inline void sch_tree_lock(struct Qdisc *q)
-
include/net/sch_generic.h:594:36-594:50: static inline void sch_tree_unlock(struct Qdisc *q)
-
include/net/sctp/structs.h:1134:35-1134:53: static inline void sctp_outq_cork(struct sctp_outq *q)
-
include/trace/events/block.h:256:1-256:1: TRACE_EVENT(block_bio_complete,
-
include/trace/events/block.h:379:1-379:1: TRACE_EVENT(block_plug,
-
include/trace/events/block.h:424:1-424:1: DEFINE_EVENT(block_unplug, block_unplug,
-
include/trace/events/qdisc.h:77:1-77:1: TRACE_EVENT(qdisc_reset,
-
include/trace/events/qdisc.h:102:1-102:1: TRACE_EVENT(qdisc_destroy,
-
include/trace/events/sunrpc.h:431:1-431:1: DEFINE_RPC_QUEUED_EVENT(sleep);
-
include/trace/events/sunrpc.h:432:1-432:1: DEFINE_RPC_QUEUED_EVENT(wakeup);
-
include/trace/events/v4l2.h:181:1-181:1: DECLARE_EVENT_CLASS(vb2_v4l2_event_class,
-
include/trace/events/v4l2.h:245:1-245:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_done,
-
include/trace/events/v4l2.h:250:1-250:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_queue,
-
include/trace/events/v4l2.h:255:1-255:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_dqbuf,
-
include/trace/events/v4l2.h:260:1-260:1: DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_qbuf,
-
include/trace/events/vb2.h:11:1-11:1: DECLARE_EVENT_CLASS(vb2_event_class,
-
include/trace/events/vb2.h:46:1-46:1: DEFINE_EVENT(vb2_event_class, vb2_buf_done,
-
include/trace/events/vb2.h:51:1-51:1: DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
-
include/trace/events/vb2.h:56:1-56:1: DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
-
include/trace/events/vb2.h:61:1-61:1: DEFINE_EVENT(vb2_event_class, vb2_qbuf,
-
ipc/sem.c:646:61-646:79: static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:719:56-719:74: static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:786:46-786:64: static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
-
ipc/sem.c:799:49-799:67: static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:816:56-816:74: static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
-
ipc/sem.c:1072:57-1072:75: static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
-
kernel/auditfilter.c:1078:39-1078:60: static void audit_list_rules(int seq, struct sk_buff_head *q)
-
kernel/cgroup/cpuset.c:494:53-494:74: static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
-
kernel/futex/core.c:499:22-499:38: void __futex_unqueue(struct futex_q *q)
-
kernel/futex/core.c:513:40-513:56: struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
-
kernel/futex/core.c:543:20-543:36: void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
-
kernel/futex/core.c:573:19-573:35: int futex_unqueue(struct futex_q *q)
-
kernel/futex/core.c:620:23-620:39: void futex_unqueue_pi(struct futex_q *q)
-
kernel/futex/futex.h:170:32-170:48: static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
-
kernel/futex/pi.c:683:54-683:70: static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-
kernel/futex/pi.c:855:52-855:68: static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-
kernel/futex/pi.c:884:39-884:55: int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked)
-
kernel/futex/requeue.c:74:20-74:36: void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
-
kernel/futex/requeue.c:92:45-92:61: static inline bool futex_requeue_pi_prepare(struct futex_q *q,
-
kernel/futex/requeue.c:125:46-125:62: static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
-
kernel/futex/requeue.c:156:48-156:64: static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
-
kernel/futex/requeue.c:223:28-223:44: void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
-
kernel/futex/requeue.c:692:8-692:24: struct futex_q *q,
-
kernel/futex/waitwake.c:115:50-115:66: void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q)
-
kernel/futex/waitwake.c:328:53-328:69: void futex_wait_queue(struct futex_hash_bucket *hb, struct futex_q *q,
-
kernel/futex/waitwake.c:578:8-578:24: struct futex_q *q, struct futex_hash_bucket **hb)
-
kernel/sched/swait.c:6:30-6:55: void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
-
kernel/sched/swait.c:21:22-21:47: void swake_up_locked(struct swait_queue_head *q)
-
kernel/sched/swait.c:41:26-41:51: void swake_up_all_locked(struct swait_queue_head *q)
-
kernel/sched/swait.c:47:19-47:44: void swake_up_one(struct swait_queue_head *q)
-
kernel/sched/swait.c:61:19-61:44: void swake_up_all(struct swait_queue_head *q)
-
kernel/sched/swait.c:84:25-84:50: void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/sched/swait.c:91:33-91:58: void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
-
kernel/sched/swait.c:102:29-102:54: long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
-
kernel/sched/swait.c:125:21-125:46: void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/sched/swait.c:132:19-132:44: void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
-
kernel/signal.c:450:29-450:46: static void __sigqueue_free(struct sigqueue *q)
-
kernel/signal.c:1935:20-1935:37: void sigqueue_free(struct sigqueue *q)
-
kernel/signal.c:1960:19-1960:36: int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
-
kernel/trace/blktrace.c:313:28-313:50: static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
-
kernel/trace/blktrace.c:348:31-348:53: static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
-
kernel/trace/blktrace.c:355:31-355:53: static int __blk_trace_remove(struct request_queue *q)
-
kernel/trace/blktrace.c:370:22-370:44: int blk_trace_remove(struct request_queue *q)
-
kernel/trace/blktrace.c:482:31-482:53: static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:589:30-589:52: static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:610:21-610:43: int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
kernel/trace/blktrace.c:625:35-625:57: static int compat_blk_trace_setup(struct request_queue *q, char *name,
-
kernel/trace/blktrace.c:658:34-658:56: static int __blk_trace_startstop(struct request_queue *q, int start)
-
kernel/trace/blktrace.c:700:25-700:47: int blk_trace_startstop(struct request_queue *q, int start)
-
kernel/trace/blktrace.c:771:25-771:47: void blk_trace_shutdown(struct request_queue *q)
-
kernel/trace/blktrace.c:781:35-781:57: static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
-
kernel/trace/blktrace.c:893:31-893:53: static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-
kernel/trace/blktrace.c:917:12-917:34: struct request_queue *q, struct bio *bio)
-
kernel/trace/blktrace.c:945:46-945:68: static void blk_add_trace_plug(void *ignore, struct request_queue *q)
-
kernel/trace/blktrace.c:956:48-956:70: static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
-
kernel/trace/blktrace.c:1609:35-1609:57: static int blk_trace_remove_queue(struct request_queue *q)
-
kernel/trace/blktrace.c:1635:34-1635:56: static int blk_trace_setup_queue(struct request_queue *q,
-
lib/bch.c:816:29-816:45: const struct gf_poly *b, struct gf_poly *q)
-
lib/crypto/curve25519-hacl64.c:547:12-547:17: u64 *q, u8 byt)
-
lib/crypto/curve25519-hacl64.c:559:24-559:29: u64 *nqpq2, u64 *q, u8 byt)
-
lib/crypto/curve25519-hacl64.c:569:7-569:12: u64 *q, u8 byt, u32 i)
-
lib/crypto/curve25519-hacl64.c:580:22-580:27: u64 *nqpq2, u64 *q,
-
lib/crypto/curve25519-hacl64.c:590:47-590:52: static void ladder_cmult(u64 *result, u8 *n1, u64 *q)
-
mm/kasan/quarantine.c:42:25-42:44: static bool qlist_empty(struct qlist_head *q)
-
mm/kasan/quarantine.c:47:24-47:43: static void qlist_init(struct qlist_head *q)
-
mm/kasan/quarantine.c:53:23-53:42: static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
-
mm/kasan/quarantine.c:174:28-174:47: static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
-
mm/kasan/quarantine.c:323:36-323:55: static void __per_cpu_remove_cache(struct qlist_head *q, void *arg)
-
mm/swapfile.c:1163:6-1163:31: struct swap_info_struct *q)
-
net/core/dev.c:3068:32-3068:46: static void __netif_reschedule(struct Qdisc *q)
-
net/core/dev.c:3082:23-3082:37: void __netif_schedule(struct Qdisc *q)
-
net/core/dev.c:3779:51-3779:65: static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
-
net/core/dev.c:3791:55-3791:69: static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
-
net/core/gen_stats.c:341:10-341:50: const struct gnet_stats_queue __percpu *q)
-
net/core/gen_stats.c:358:6-358:37: const struct gnet_stats_queue *q)
-
net/core/gen_stats.c:389:9-389:34: struct gnet_stats_queue *q, __u32 qlen)
-
net/decnet/af_decnet.c:1640:43-1640:64: static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
-
net/decnet/dn_nsp_out.c:370:67-370:88: int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum)
-
net/ieee802154/6lowpan/reassembly.c:36:30-36:54: static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
-
net/ipv4/inet_fragment.c:54:36-54:60: static void fragrun_append_to_last(struct inet_frag_queue *q,
-
net/ipv4/inet_fragment.c:65:28-65:52: static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
-
net/ipv4/inet_fragment.c:285:24-285:48: void inet_frag_destroy(struct inet_frag_queue *q)
-
net/ipv4/inet_fragment.c:377:28-377:52: int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
-
net/ipv4/inet_fragment.c:441:31-441:55: void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
-
net/ipv4/inet_fragment.c:510:29-510:53: void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
-
net/ipv4/inet_fragment.c:579:37-579:61: struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
-
net/ipv4/ip_fragment.c:82:27-82:51: static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
-
net/ipv4/ip_fragment.c:96:27-96:51: static void ip4_frag_free(struct inet_frag_queue *q)
-
net/netfilter/nfnetlink_queue.c:102:17-102:40: instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
-
net/netfilter/nfnetlink_queue.c:116:17-116:40: instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
-
net/netfilter/nfnetlink_queue.c:183:18-183:41: instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
-
net/netfilter/nfnetlink_queue.c:1040:25-1040:48: verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
-
net/rds/message.c:75:33-75:61: void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *q)
-
net/rds/rds.h:382:49-382:77: static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
-
net/rose/rose_in.c:102:101-102:105: static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
-
net/rose/rose_subr.c:201:56-201:61: int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
-
net/sched/cls_api.c:719:60-719:74: static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:759:63-759:77: static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:877:60-877:74: static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
-
net/sched/cls_api.c:1042:46-1042:61: static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
-
net/sched/cls_api.c:1117:32-1117:46: static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
-
net/sched/cls_api.c:1137:60-1137:74: static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
-
net/sched/cls_api.c:1174:54-1174:68: static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1211:58-1211:73: static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
-
net/sched/cls_api.c:1245:31-1245:45: static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
-
net/sched/cls_api.c:1267:11-1267:25: struct Qdisc *q,
-
net/sched/cls_api.c:1288:11-1288:25: struct Qdisc *q,
-
net/sched/cls_api.c:1303:5-1303:19: struct Qdisc *q,
-
net/sched/cls_api.c:1318:51-1318:65: int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-
net/sched/cls_api.c:1377:46-1377:60: struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
-
net/sched/cls_api.c:1393:49-1393:63: void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1816:5-1816:19: struct Qdisc *q, u32 parent, void *fh,
-
net/sched/cls_api.c:1870:31-1870:45: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1899:35-1899:49: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:1937:31-1937:45: struct tcf_block *block, struct Qdisc *q,
-
net/sched/cls_api.c:2492:53-2492:67: static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
-
net/sched/cls_basic.c:266:71-266:77: static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_bpf.c:634:11-634:17: void *q, unsigned long base)
-
net/sched/cls_flower.c:3329:68-3329:74: static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_fw.c:421:68-421:74: static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_matchall.c:395:70-395:76: static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_route.c:644:72-644:78: static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_rsvp.h:738:70-738:76: static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/cls_tcindex.c:700:11-700:17: void *q, unsigned long base)
-
net/sched/cls_u32.c:1254:69-1254:75: static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
-
net/sched/sch_api.c:278:21-278:35: void qdisc_hash_add(struct Qdisc *q, bool invisible)
-
net/sched/sch_api.c:289:21-289:35: void qdisc_hash_del(struct Qdisc *q)
-
net/sched/sch_api.c:885:47-885:61: static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
-
net/sched/sch_api.c:963:34-963:48: static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
-
net/sched/sch_api.c:1369:23-1369:37: static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
-
net/sched/sch_api.c:1385:15-1385:29: check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
-
net/sched/sch_api.c:1794:48-1794:62: static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
-
net/sched/sch_api.c:1841:25-1841:39: struct nlmsghdr *n, struct Qdisc *q,
-
net/sched/sch_api.c:1863:9-1863:23: struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:1924:33-1924:47: static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:1955:28-1955:42: static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
-
net/sched/sch_api.c:2122:29-2122:43: static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
-
net/sched/sch_api.c:2132:33-2132:47: static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
-
net/sched/sch_cake.c:646:22-646:44: static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
-
net/sched/sch_cake.c:1151:40-1151:64: static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
-
net/sched/sch_cake.c:1314:31-1314:55: static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
-
net/sched/sch_cake.c:1349:26-1349:50: static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
-
net/sched/sch_cake.c:1396:28-1396:52: static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
-
net/sched/sch_cake.c:1408:34-1408:64: static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1415:26-1415:50: static void cake_heapify(struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1452:29-1452:53: static void cake_heapify_up(struct cake_sched_data *q, u16 i)
-
net/sched/sch_cake.c:1468:32-1468:56: static int cake_advance_shaper(struct cake_sched_data *q,
-
net/sched/sch_cake.c:2960:25-2960:39: static void cake_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_cbq.c:166:18-166:41: cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
-
net/sched/sch_cbq.c:342:19-342:42: cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
-
net/sched/sch_cbq.c:444:40-444:63: static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
-
net/sched/sch_cbq.c:529:21-529:44: cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
-
net/sched/sch_cbq.c:551:12-551:35: cbq_update(struct cbq_sched_data *q)
-
net/sched/sch_cbq.c:883:34-883:57: static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
-
net/sched/sch_cbq.c:1080:24-1080:47: static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
-
net/sched/sch_cbq.c:1087:25-1087:48: static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
-
net/sched/sch_cbs.c:251:5-251:28: struct cbs_sched_data *q)
-
net/sched/sch_cbs.c:276:55-276:78: static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
-
net/sched/sch_cbs.c:309:55-309:78: static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
-
net/sched/sch_choke.c:76:31-76:62: static unsigned int choke_len(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:82:20-82:51: static int use_ecn(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:88:25-88:56: static int use_harddrop(const struct choke_sched_data *q)
-
net/sched/sch_choke.c:94:34-94:59: static void choke_zap_head_holes(struct choke_sched_data *q)
-
net/sched/sch_choke.c:104:34-104:59: static void choke_zap_tail_holes(struct choke_sched_data *q)
-
net/sched/sch_choke.c:180:42-180:73: static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
-
net/sched/sch_choke.c:200:32-200:63: static bool choke_match_random(const struct choke_sched_data *q,
-
net/sched/sch_etf.c:297:5-297:28: struct etf_sched_data *q)
-
net/sched/sch_etf.c:319:55-319:78: static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q,
-
net/sched/sch_ets.c:190:33-190:51: static bool ets_class_is_strict(struct ets_sched *q, const struct ets_class *cl)
-
net/sched/sch_fifo.c:227:20-227:34: int fifo_set_limit(struct Qdisc *q, unsigned int limit)
-
net/sched/sch_fq.c:172:37-172:59: static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
-
net/sched/sch_fq.c:179:35-179:57: static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
-
net/sched/sch_fq.c:217:19-217:41: static void fq_gc(struct fq_sched_data *q,
-
net/sched/sch_fq.c:261:57-261:79: static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
-
net/sched/sch_fq.c:437:9-437:37: const struct fq_sched_data *q)
-
net/sched/sch_fq.c:499:32-499:54: static void fq_check_throttled(struct fq_sched_data *q, u64 now)
-
net/sched/sch_fq.c:697:23-697:45: static void fq_rehash(struct fq_sched_data *q,
-
net/sched/sch_fq_codel.c:70:35-70:69: static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
-
net/sched/sch_fq_codel.c:613:29-613:43: static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_fq_pie.c:73:33-73:65: static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
-
net/sched/sch_generic.c:38:38-38:52: static void qdisc_maybe_clear_missed(struct Qdisc *q,
-
net/sched/sch_generic.c:72:53-72:67: static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
-
net/sched/sch_generic.c:108:57-108:71: static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
-
net/sched/sch_generic.c:118:46-118:60: static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
-
net/sched/sch_generic.c:142:57-142:71: static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
-
net/sched/sch_generic.c:178:34-178:48: static void try_bulk_dequeue_skb(struct Qdisc *q,
-
net/sched/sch_generic.c:202:39-202:53: static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
-
net/sched/sch_generic.c:228:36-228:50: static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
-
net/sched/sch_generic.c:314:43-314:57: bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
-
net/sched/sch_generic.c:388:34-388:48: static inline bool qdisc_restart(struct Qdisc *q, int *packets)
-
net/sched/sch_generic.c:410:18-410:32: void __qdisc_run(struct Qdisc *q)
-
net/sched/sch_gred.c:114:6-114:30: struct gred_sched_data *q,
-
net/sched/sch_gred.c:129:11-129:35: struct gred_sched_data *q)
-
net/sched/sch_gred.c:136:12-136:36: struct gred_sched_data *q)
-
net/sched/sch_gred.c:142:25-142:49: static int gred_use_ecn(struct gred_sched_data *q)
-
net/sched/sch_gred.c:147:30-147:54: static int gred_use_harddrop(struct gred_sched_data *q)
-
net/sched/sch_gred.c:401:36-401:60: static inline void gred_destroy_vq(struct gred_sched_data *q)
-
net/sched/sch_hfsc.c:219:18-219:37: eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
-
net/sched/sch_hfsc.c:236:18-236:37: eltree_get_minel(struct hfsc_sched *q)
-
net/sched/sch_hhf.c:182:12-182:35: struct hhf_sched_data *q)
-
net/sched/sch_hhf.c:213:8-213:31: struct hhf_sched_data *q)
-
net/sched/sch_htb.c:313:34-313:52: static void htb_add_to_wait_tree(struct htb_sched *q,
-
net/sched/sch_htb.c:360:41-360:59: static inline void htb_add_class_to_row(struct htb_sched *q,
-
net/sched/sch_htb.c:392:46-392:64: static inline void htb_remove_class_from_row(struct htb_sched *q,
-
net/sched/sch_htb.c:422:32-422:50: static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:459:34-459:52: static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:555:23-555:41: htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
-
net/sched/sch_htb.c:586:33-586:51: static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:604:35-604:53: static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
-
net/sched/sch_htb.c:692:30-692:48: static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
-
net/sched/sch_htb.c:740:26-740:44: static s64 htb_do_events(struct htb_sched *q, const int level,
-
net/sched/sch_htb.c:865:41-865:59: static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
-
net/sched/sch_htb.c:1038:41-1038:55: static void htb_set_lockdep_class_child(struct Qdisc *q)
-
net/sched/sch_htb.c:1312:41-1312:59: static void htb_offload_aggregate_stats(struct htb_sched *q,
-
net/sched/sch_multiq.c:320:27-320:41: static void multiq_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_netem.c:200:25-200:50: static bool loss_4state(struct netem_sched_data *q)
-
net/sched/sch_netem.c:265:27-265:52: static bool loss_gilb_ell(struct netem_sched_data *q)
-
net/sched/sch_netem.c:286:24-286:49: static bool loss_event(struct netem_sched_data *q)
-
net/sched/sch_netem.c:345:36-345:67: static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
-
net/sched/sch_netem.c:629:27-629:52: static void get_slot_next(struct netem_sched_data *q, u64 now)
-
net/sched/sch_netem.c:648:35-648:60: static struct sk_buff *netem_peek(struct netem_sched_data *q)
-
net/sched/sch_netem.c:665:30-665:55: static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
-
net/sched/sch_netem.c:806:22-806:47: static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:828:29-828:54: static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:837:25-837:50: static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:845:25-845:50: static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:853:22-853:47: static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:867:25-867:50: static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
-
net/sched/sch_netem.c:1088:28-1088:59: static int dump_loss_model(const struct netem_sched_data *q,
-
net/sched/sch_prio.c:343:25-343:39: static void prio_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_qfq.c:253:26-253:44: static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:263:43-263:61: static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
-
net/sched/sch_qfq.c:277:28-277:46: static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:311:28-311:46: static void qfq_add_to_agg(struct qfq_sched *q,
-
net/sched/sch_qfq.c:328:29-328:47: static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:341:34-341:52: static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:352:29-352:47: static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:365:35-365:53: static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
-
net/sched/sch_qfq.c:728:41-728:59: static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
-
net/sched/sch_qfq.c:745:27-745:45: static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
-
net/sched/sch_qfq.c:768:36-768:54: static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
-
net/sched/sch_qfq.c:775:32-775:50: static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
-
net/sched/sch_qfq.c:801:31-801:49: static void qfq_make_eligible(struct qfq_sched *q)
-
net/sched/sch_qfq.c:956:33-956:51: static void qfq_update_eligible(struct qfq_sched *q)
-
net/sched/sch_qfq.c:1030:30-1030:48: static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:1063:19-1063:37: qfq_update_agg_ts(struct qfq_sched *q,
-
net/sched/sch_qfq.c:1153:50-1153:68: static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
-
net/sched/sch_qfq.c:1268:30-1268:48: static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_qfq.c:1315:30-1315:48: static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
-
net/sched/sch_qfq.c:1329:29-1329:47: static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
-
net/sched/sch_qfq.c:1352:32-1352:50: static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
-
net/sched/sch_red.c:55:31-55:54: static inline int red_use_ecn(struct red_sched_data *q)
-
net/sched/sch_red.c:60:36-60:59: static inline int red_use_harddrop(struct red_sched_data *q)
-
net/sched/sch_red.c:65:27-65:50: static int red_use_nodrop(struct red_sched_data *q)
-
net/sched/sch_sfb.c:123:55-123:78: static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:138:55-138:78: static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:152:11-152:34: struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:167:55-167:78: static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:180:50-180:73: static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:185:50-185:73: static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:190:34-190:57: static void sfb_zero_all_buckets(struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:198:56-198:85: static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:218:45-218:68: static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:224:27-224:50: static void sfb_swap_slot(struct sfb_sched_data *q)
-
net/sched/sch_sfb.c:234:49-234:72: static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
-
net/sched/sch_sfq.c:150:45-150:68: static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
-
net/sched/sch_sfq.c:157:30-157:59: static unsigned int sfq_hash(const struct sfq_sched_data *q,
-
net/sched/sch_sfq.c:203:29-203:52: static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:228:28-228:51: static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:241:28-241:51: static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
-
net/sched/sch_sfq.c:329:26-329:55: static int sfq_prob_mark(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:335:26-335:55: static int sfq_hard_mark(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:340:25-340:54: static int sfq_headdrop(const struct sfq_sched_data *q)
-
net/sched/sch_sfq.c:841:24-841:38: static void sfq_unbind(struct Qdisc *q, unsigned long cl)
-
net/sched/sch_skbprio.c:40:31-40:64: static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
-
net/sched/sch_skbprio.c:53:30-53:63: static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
-
net/sched/sch_taprio.c:98:35-98:62: static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
-
net/sched/sch_taprio.c:111:32-111:59: static ktime_t taprio_get_time(const struct taprio_sched *q)
-
net/sched/sch_taprio.c:129:30-129:51: static void switch_schedules(struct taprio_sched *q,
-
net/sched/sch_taprio.c:177:31-177:52: static int length_to_duration(struct taprio_sched *q, int len)
-
net/sched/sch_taprio.c:295:31-295:52: static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
-
net/sched/sch_taprio.c:551:31-551:52: static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
-
net/sched/sch_taprio.c:796:29-796:50: static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
-
net/sched/sch_taprio.c:828:30-828:51: static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
-
net/sched/sch_taprio.c:847:29-847:50: static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
-
net/sched/sch_taprio.c:887:34-887:55: static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
-
net/sched/sch_taprio.c:1031:36-1031:57: static void setup_first_close_time(struct taprio_sched *q,
-
net/sched/sch_taprio.c:1073:11-1073:32: struct taprio_sched *q)
-
net/sched/sch_taprio.c:1125:26-1125:47: static void setup_txtime(struct taprio_sched *q,
-
net/sched/sch_taprio.c:1191:43-1191:64: static void taprio_offload_config_changed(struct taprio_sched *q)
-
net/sched/sch_taprio.c:1251:6-1251:27: struct taprio_sched *q,
-
net/sched/sch_taprio.c:1288:7-1288:28: struct taprio_sched *q,
-
net/sched/sch_tbf.c:263:30-263:59: static bool tbf_peak_present(const struct tbf_sched_data *q)
-
net/sctp/inqueue.c:64:20-64:37: void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
-
net/sctp/inqueue.c:234:30-234:47: void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
-
net/sctp/outqueue.c:59:40-59:58: static inline void sctp_outq_head_data(struct sctp_outq *q,
-
net/sctp/outqueue.c:74:57-74:75: static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
-
net/sctp/outqueue.c:80:40-80:58: static inline void sctp_outq_tail_data(struct sctp_outq *q,
-
net/sctp/outqueue.c:191:52-191:70: void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
-
net/sctp/outqueue.c:206:34-206:52: static void __sctp_outq_teardown(struct sctp_outq *q)
-
net/sctp/outqueue.c:267:25-267:43: void sctp_outq_teardown(struct sctp_outq *q)
-
net/sctp/outqueue.c:274:21-274:39: void sctp_outq_free(struct sctp_outq *q)
-
net/sctp/outqueue.c:281:21-281:39: void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
-
net/sctp/outqueue.c:447:27-447:45: void sctp_retransmit_mark(struct sctp_outq *q,
-
net/sctp/outqueue.c:534:22-534:40: void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
-
net/sctp/outqueue.c:595:34-595:52: static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
-
net/sctp/outqueue.c:756:23-756:41: void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
-
net/sctp/outqueue.c:1189:29-1189:47: static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
-
net/sctp/outqueue.c:1245:20-1245:38: int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
-
net/sctp/outqueue.c:1409:24-1409:48: int sctp_outq_is_empty(const struct sctp_outq *q)
-
net/sctp/outqueue.c:1429:36-1429:54: static void sctp_check_transmitted(struct sctp_outq *q,
-
net/sctp/outqueue.c:1703:31-1703:49: static void sctp_mark_missing(struct sctp_outq *q,
-
net/sctp/outqueue.c:1816:27-1816:45: void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
-
net/sctp/stream_interleave.c:1106:33-1106:51: static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
-
net/sctp/stream_sched.c:53:37-53:55: static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched.c:58:51-58:69: static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched.c:81:42-81:60: static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
-
net/sctp/stream_sched.c:225:30-225:48: void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
-
net/sctp/stream_sched.c:247:32-247:50: void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
-
net/sctp/stream_sched_prio.c:233:37-233:55: static void sctp_sched_prio_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched_prio.c:246:51-246:69: static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched_prio.c:274:42-274:60: static void sctp_sched_prio_dequeue_done(struct sctp_outq *q,
-
net/sctp/stream_sched_rr.c:98:35-98:53: static void sctp_sched_rr_enqueue(struct sctp_outq *q,
-
net/sctp/stream_sched_rr.c:111:49-111:67: static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q)
-
net/sctp/stream_sched_rr.c:134:40-134:58: static void sctp_sched_rr_dequeue_done(struct sctp_outq *q,
-
net/sunrpc/sched.c:139:25-139:43: __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
-
net/sunrpc/sched.c:376:40-376:63: static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:385:37-385:60: static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:394:45-394:68: static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:424:27-424:50: void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:441:19-441:42: void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:459:36-459:59: void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
-
net/sunrpc/sched.c:475:28-475:51: void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
-
net/sunrpc/sched.c:1194:3-1194:28: struct workqueue_struct *q)
-
net/sunrpc/sched.c:1203:52-1203:77: static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
-
net/unix/af_unix.c:410:39-410:59: static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
-
net/x25/x25_in.c:208:100-208:104: static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
-
net/x25/x25_subr.c:260:72-260:77: int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
-
net/xdp/xsk_queue.c:13:34-13:52: static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
-
net/xdp/xsk_queue.c:50:19-50:37: void xskq_destroy(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:114:52-114:70: static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
-
net/xdp/xsk_queue.h:122:50-122:68: static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
-
net/xdp/xsk_queue.h:179:44-179:62: static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:190:40-190:58: static inline bool xskq_cons_read_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:208:45-208:63: static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
-
net/xdp/xsk_queue.h:234:40-234:58: static inline void __xskq_cons_release(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:239:37-239:55: static inline void __xskq_cons_peek(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:245:42-245:60: static inline void xskq_cons_get_entries(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:251:40-251:58: static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
-
net/xdp/xsk_queue.h:264:42-264:60: static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
-
net/xdp/xsk_queue.h:269:50-269:68: static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
-
net/xdp/xsk_queue.h:276:40-276:58: static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:289:38-289:56: static inline void xskq_cons_release(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:294:40-294:58: static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
-
net/xdp/xsk_queue.h:299:45-299:63: static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:307:37-307:55: static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
-
net/xdp/xsk_queue.h:321:38-321:56: static inline bool xskq_prod_is_full(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:326:37-326:55: static inline void xskq_prod_cancel(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:331:37-331:55: static inline int xskq_prod_reserve(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:341:42-341:60: static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
-
net/xdp/xsk_queue.h:353:48-353:66: static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
-
net/xdp/xsk_queue.h:370:42-370:60: static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
-
net/xdp/xsk_queue.h:387:39-387:57: static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
-
net/xdp/xsk_queue.h:392:37-392:55: static inline void xskq_prod_submit(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:397:42-397:60: static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
-
net/xdp/xsk_queue.h:407:39-407:57: static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
-
net/xdp/xsk_queue.h:412:39-412:57: static inline bool xskq_prod_is_empty(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:420:41-420:59: static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
-
net/xdp/xsk_queue.h:425:45-425:63: static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
-
sound/core/seq/oss/seq_oss_event.c:42:55-42:68: snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:95:39-95:52: old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:121:44-121:57: extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:175:45-175:58: chn_voice_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:196:46-196:59: chn_common_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:223:42-223:55: timing_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_event.c:258:41-258:54: local_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
-
sound/core/seq/oss/seq_oss_readq.c:62:26-62:48: snd_seq_oss_readq_delete(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:74:25-74:47: snd_seq_oss_readq_clear(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:89:24-89:46: snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, int len)
-
sound/core/seq/oss/seq_oss_readq.c:123:29-123:51: int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
-
sound/core/seq/oss/seq_oss_readq.c:141:29-141:51: snd_seq_oss_readq_put_event(struct seq_oss_readq *q, union evrec *ev)
-
sound/core/seq/oss/seq_oss_readq.c:169:24-169:46: snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec)
-
sound/core/seq/oss/seq_oss_readq.c:181:24-181:46: snd_seq_oss_readq_wait(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:193:24-193:46: snd_seq_oss_readq_free(struct seq_oss_readq *q)
-
sound/core/seq/oss/seq_oss_readq.c:206:24-206:46: snd_seq_oss_readq_poll(struct seq_oss_readq *q, struct file *file, poll_table *wait)
-
sound/core/seq/oss/seq_oss_readq.c:216:33-216:55: snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *q, unsigned long curt, int seq_mode)
-
sound/core/seq/oss/seq_oss_readq.c:244:29-244:51: snd_seq_oss_readq_info_read(struct seq_oss_readq *q, struct snd_info_buffer *buf)
-
sound/core/seq/oss/seq_oss_writeq.c:54:27-54:50: snd_seq_oss_writeq_delete(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:67:26-67:49: snd_seq_oss_writeq_clear(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:83:25-83:48: snd_seq_oss_writeq_sync(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:123:27-123:50: snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time)
-
sound/core/seq/oss/seq_oss_writeq.c:139:34-139:57: snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q)
-
sound/core/seq/oss/seq_oss_writeq.c:152:31-152:54: snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int val)
-
sound/core/seq/seq_queue.c:50:27-50:49: static int queue_list_add(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:129:26-129:48: static void queue_delete(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:240:26-240:48: void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
-
sound/core/seq/seq_queue.c:354:32-354:54: static inline int check_access(struct snd_seq_queue *q, int client)
-
sound/core/seq/seq_queue.c:362:30-362:52: static int queue_access_lock(struct snd_seq_queue *q, int client)
-
sound/core/seq/seq_queue.c:376:40-376:62: static inline void queue_access_unlock(struct snd_seq_queue *q)
-
sound/core/seq/seq_queue.c:629:35-629:57: static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
-
sound/core/seq/seq_queue.c:652:41-652:63: static void snd_seq_queue_process_event(struct snd_seq_queue *q,
-
sound/core/seq/seq_timer.c:258:24-258:46: int snd_seq_timer_open(struct snd_seq_queue *q)
-
sound/core/seq/seq_timer.c:313:25-313:47: int snd_seq_timer_close(struct snd_seq_queue *q)
-
sound/pci/hda/hda_codec.c:1215:7-1215:29: struct hda_cvt_setup *q)
variable
Defined...
-
arch/x86/crypto/curve25519-x86_64.c:35:2-35:20: u64 q = x_xor_y | x_sub_y_xor_y;
-
arch/x86/include/asm/div64.h:83:2-83:6: u64 q;
-
arch/x86/kernel/cpu/common.c:758:2-758:12: char *p, *q, *s;
-
arch/x86/kvm/svm/sev.c:2113:2-2113:26: struct list_head *pos, *q;
-
arch/x86/kvm/vmx/nested.c:1570:2-1570:9: int i, q;
-
arch/x86/xen/platform-pci-unplug.c:181:2-181:12: char *p, *q;
-
block/bfq-iosched.c:6140:2-6140:34: struct request_queue *q = hctx->queue;
-
block/bfq-iosched.c:6741:2-6741:32: struct request_queue *q = rq->q;
-
block/bio.c:1030:2-1030:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/bio.c:1140:3-1140:56: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/bio.c:1223:2-1223:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-cgroup.c:687:2-687:24: struct request_queue *q;
-
block/blk-cgroup.c:1083:3-1083:35: struct request_queue *q = blkg->q;
-
block/blk-cgroup.c:1802:2-1802:37: struct request_queue *q = current->throttle_queue;
-
block/blk-core.c:403:2-404:3: struct request_queue *q =
-
block/blk-core.c:411:2-411:28: struct request_queue *q = from_timer(q, t, timeout);
-
block/blk-core.c:422:2-422:24: struct request_queue *q;
-
block/blk-core.c:684:3-684:56: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-core.c:759:2-759:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-core.c:919:2-919:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-flush.c:174:2-174:32: struct request_queue *q = rq->q;
-
block/blk-flush.c:223:2-223:38: struct request_queue *q = flush_rq->q;
-
block/blk-flush.c:359:2-359:32: struct request_queue *q = rq->q;
-
block/blk-flush.c:392:2-392:32: struct request_queue *q = rq->q;
-
block/blk-ia-ranges.c:115:2-115:34: struct request_queue *q = disk->queue;
-
block/blk-ia-ranges.c:166:2-166:34: struct request_queue *q = disk->queue;
-
block/blk-ia-ranges.c:299:2-299:34: struct request_queue *q = disk->queue;
-
block/blk-ioc.c:76:2-76:33: struct request_queue *q = icq->q;
-
block/blk-ioc.c:117:3-117:34: struct request_queue *q = icq->q;
-
block/blk-merge.c:367:2-367:58: struct request_queue *q = bdev_get_queue((*bio)->bi_bdev);
-
block/blk-merge.c:561:2-561:32: struct request_queue *q = rq->q;
-
block/blk-mq-cpumap.c:39:2-39:39: unsigned int cpu, first_sibling, q = 0;
-
block/blk-mq-debugfs-zoned.c:11:2-11:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:30:2-30:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:51:2-51:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:59:2-59:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:67:2-67:31: struct request_queue *q = m->private;
-
block/blk-mq-debugfs.c:101:2-101:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:139:2-139:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:150:2-150:28: struct request_queue *q = data;
-
block/blk-mq-debugfs.c:444:2-444:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:461:2-461:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:478:2-478:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:495:2-495:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-debugfs.c:825:2-825:34: struct request_queue *q = rqos->q;
-
block/blk-mq-sched.c:92:2-92:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:220:2-220:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:275:2-275:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:327:2-327:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.c:411:2-411:32: struct request_queue *q = rq->q;
-
block/blk-mq-sched.c:466:2-466:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-sched.h:69:3-69:33: struct request_queue *q = rq->q;
-
block/blk-mq-sysfs.c:57:2-57:24: struct request_queue *q;
-
block/blk-mq-sysfs.c:79:2-79:24: struct request_queue *q;
-
block/blk-mq-sysfs.c:186:2-186:34: struct request_queue *q = hctx->queue;
-
block/blk-mq-tag.c:45:3-45:35: struct request_queue *q = hctx->queue;
-
block/blk-mq-tag.c:85:3-85:35: struct request_queue *q = hctx->queue;
-
block/blk-mq-tag.c:267:2-267:39: struct request_queue *q = iter_data->q;
-
block/blk-mq.c:346:2-346:34: struct request_queue *q = data->q;
-
block/blk-mq.c:442:2-442:34: struct request_queue *q = data->q;
-
block/blk-mq.c:606:2-606:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:624:2-624:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:955:2-955:34: struct request_queue *q = hctx->queue;
-
block/blk-mq.c:1132:2-1132:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1296:2-1296:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1322:2-1323:3: struct request_queue *q =
-
block/blk-mq.c:1360:2-1360:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:1484:2-1485:3: struct request_queue *q =
-
block/blk-mq.c:1851:2-1851:34: struct request_queue *q = hctx->queue;
-
block/blk-mq.c:2445:2-2445:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:2479:2-2479:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:2633:3-2633:25: struct request_queue *q;
-
block/blk-mq.c:2801:2-2801:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-mq.c:2861:2-2861:32: struct request_queue *q = rq->q;
-
block/blk-mq.c:3781:2-3781:24: struct request_queue *q;
-
block/blk-mq.c:3889:2-3889:24: struct request_queue *q;
-
block/blk-mq.c:3913:2-3913:24: struct request_queue *q;
-
block/blk-mq.c:4490:2-4490:24: struct request_queue *q;
-
block/blk-mq.c:4590:2-4590:32: struct request_queue *q = cb->data;
-
block/blk-mq.h:356:3-356:35: struct request_queue *q = hctx->queue;
-
block/blk-rq-qos.h:189:3-189:56: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-settings.c:387:2-387:34: struct request_queue *q = disk->queue;
-
block/blk-settings.c:909:2-909:34: struct request_queue *q = disk->queue;
-
block/blk-settings.c:955:2-955:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-settings.c:968:2-968:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-stat.c:53:2-53:32: struct request_queue *q = rq->q;
-
block/blk-sysfs.c:676:2-677:3: struct request_queue *q =
-
block/blk-sysfs.c:703:2-704:3: struct request_queue *q =
-
block/blk-sysfs.c:720:2-720:24: struct request_queue *q;
-
block/blk-sysfs.c:735:2-735:28: struct request_queue *q = container_of(rcu_head, struct request_queue,
-
block/blk-sysfs.c:763:2-764:3: struct request_queue *q =
-
block/blk-sysfs.c:809:2-809:34: struct request_queue *q = disk->queue;
-
block/blk-sysfs.c:906:2-906:34: struct request_queue *q = disk->queue;
-
block/blk-throttle.c:1142:2-1142:24: struct request_queue *q;
-
block/blk-throttle.c:1219:2-1219:32: struct request_queue *q = td->queue;
-
block/blk-throttle.c:2093:2-2093:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/blk-throttle.c:2226:2-2226:32: struct request_queue *q = rq->q;
-
block/blk-timeout.c:55:3-55:35: struct request_queue *q = disk->queue;
-
block/blk-timeout.c:130:2-130:33: struct request_queue *q = req->q;
-
block/blk-wbt.c:693:2-693:34: struct request_queue *q = rqos->q;
-
block/blk-zoned.c:192:2-192:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-zoned.c:264:2-264:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/blk-zoned.c:342:2-342:24: struct request_queue *q;
-
block/blk-zoned.c:399:2-399:24: struct request_queue *q;
-
block/blk-zoned.c:478:2-478:34: struct request_queue *q = disk->queue;
-
block/blk-zoned.c:562:2-562:34: struct request_queue *q = disk->queue;
-
block/blk.h:77:2-77:55: struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
block/bsg-lib.c:275:2-275:34: struct request_queue *q = hctx->queue;
-
block/bsg-lib.c:365:2-365:24: struct request_queue *q;
-
block/bsg.c:105:2-105:32: struct request_queue *q = bd->queue;
-
block/elevator.c:62:2-62:32: struct request_queue *q = rq->q;
-
block/genhd.c:598:2-598:34: struct request_queue *q = disk->queue;
-
block/genhd.c:951:2-951:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/genhd.c:1000:2-1000:47: struct request_queue *q = bdev_get_queue(bdev);
-
block/genhd.c:1398:2-1398:24: struct request_queue *q;
-
block/kyber-iosched.c:955:1-955:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
-
block/kyber-iosched.c:956:1-956:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
-
block/kyber-iosched.c:957:1-957:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
-
block/kyber-iosched.c:958:1-958:1: KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
-
block/kyber-iosched.c:963:2-963:28: struct request_queue *q = data;
-
block/mq-deadline.c:564:2-564:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:715:2-715:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:772:2-772:34: struct request_queue *q = hctx->queue;
-
block/mq-deadline.c:810:2-810:32: struct request_queue *q = rq->q;
-
block/mq-deadline.c:975:1-975:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:976:1-976:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:977:1-977:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:978:1-978:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:979:1-979:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:980:1-980:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:975:1-975:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:976:1-976:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:977:1-977:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:978:1-978:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:979:1-979:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:980:1-980:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:975:1-975:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:976:1-976:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:977:1-977:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:978:1-978:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:979:1-979:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:980:1-980:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:975:1-975:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:976:1-976:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:977:1-977:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:978:1-978:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:979:1-979:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:980:1-980:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
block/mq-deadline.c:985:2-985:28: struct request_queue *q = data;
-
block/mq-deadline.c:994:2-994:28: struct request_queue *q = data;
-
block/mq-deadline.c:1003:2-1003:28: struct request_queue *q = data;
-
block/mq-deadline.c:1012:2-1012:28: struct request_queue *q = data;
-
block/mq-deadline.c:1040:2-1040:28: struct request_queue *q = data;
-
block/mq-deadline.c:1094:1-1094:1: DEADLINE_DISPATCH_ATTR(0);
-
block/mq-deadline.c:1095:1-1095:1: DEADLINE_DISPATCH_ATTR(1);
-
block/mq-deadline.c:1096:1-1096:1: DEADLINE_DISPATCH_ATTR(2);
-
block/mq-deadline.c:1094:1-1094:1: DEADLINE_DISPATCH_ATTR(0);
-
block/mq-deadline.c:1095:1-1095:1: DEADLINE_DISPATCH_ATTR(1);
-
block/mq-deadline.c:1096:1-1096:1: DEADLINE_DISPATCH_ATTR(2);
-
block/mq-deadline.c:1094:1-1094:1: DEADLINE_DISPATCH_ATTR(0);
-
block/mq-deadline.c:1095:1-1095:1: DEADLINE_DISPATCH_ATTR(1);
-
block/mq-deadline.c:1096:1-1096:1: DEADLINE_DISPATCH_ATTR(2);
-
crypto/algapi.c:253:2-253:21: struct crypto_alg *q;
-
crypto/algapi.c:310:2-310:21: struct crypto_alg *q;
-
crypto/algapi.c:514:2-514:26: struct crypto_template *q;
-
crypto/algapi.c:592:2-592:26: struct crypto_template *q, *tmpl = NULL;
-
crypto/algapi.c:1310:3-1310:22: struct crypto_alg *q;
-
crypto/api.c:57:2-57:21: struct crypto_alg *q, *alg = NULL;
-
crypto/asymmetric_keys/x509_public_key.c:148:2-148:14: const char *q;
-
crypto/async_tx/async_pq.c:382:3-382:13: void *p, *q, *s;
-
crypto/async_tx/async_raid6_recov.c:158:2-158:19: struct page *p, *q, *a, *b;
-
crypto/async_tx/async_raid6_recov.c:208:2-208:19: struct page *p, *q, *g, *dp, *dq;
-
crypto/async_tx/async_raid6_recov.c:299:2-299:19: struct page *p, *q, *dp, *dq;
-
crypto/async_tx/async_raid6_recov.c:476:2-476:19: struct page *p, *q, *dq;
-
crypto/crypto_user_base.c:38:2-38:21: struct crypto_alg *q, *alg = NULL;
-
crypto/dh.c:129:3-129:12: MPI val, q;
-
crypto/ecc.c:568:2-568:22: u64 q[ECC_MAX_DIGITS];
-
crypto/ecc.c:666:2-666:26: u64 q[ECC_MAX_DIGITS * 2];
-
crypto/essiv.c:386:2-386:18: const char *p, *q;
-
drivers/acpi/ec.c:1134:2-1134:28: struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
-
drivers/acpi/ec.c:1157:2-1157:24: struct acpi_ec_query *q;
-
drivers/acpi/ec.c:1175:2-1175:24: struct acpi_ec_query *q;
-
drivers/ata/libata-scsi.c:1057:2-1057:34: struct request_queue *q = sdev->request_queue;
-
drivers/block/aoe/aoecmd.c:837:2-837:24: struct request_queue *q;
-
drivers/block/aoe/aoecmd.c:1033:2-1033:24: struct request_queue *q;
-
drivers/block/aoe/aoenet.c:75:2-75:21: register char *p, *q;
-
drivers/block/drbd/drbd_int.h:1911:3-1911:44: struct drbd_work_queue *q = &connection->sender_work;
-
drivers/block/drbd/drbd_main.c:928:3-928:48: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/block/drbd/drbd_main.c:948:3-948:37: struct request_queue *q = device->rq_queue;
-
drivers/block/drbd/drbd_nl.c:1212:2-1212:36: struct request_queue *q = device->rq_queue;
-
drivers/block/drbd/drbd_nl.c:1262:2-1262:43: struct request_queue * const q = device->rq_queue;
-
drivers/block/loop.c:761:2-761:32: struct request_queue *q = lo->lo_queue;
-
drivers/block/loop.c:939:2-939:32: struct request_queue *q = lo->lo_queue;
-
drivers/block/nbd.c:829:2-829:39: struct request_queue *q = nbd->disk->queue;
-
drivers/block/null_blk/main.c:1263:2-1263:35: struct request_queue *q = nullb->q;
-
drivers/block/null_blk/main.c:1271:2-1271:35: struct request_queue *q = nullb->q;
-
drivers/block/null_blk/zoned.c:160:2-160:35: struct request_queue *q = nullb->q;
-
drivers/block/paride/pd.c:400:2-400:24: struct request_queue *q;
-
drivers/block/pktcdvd.c:686:2-686:51: struct request_queue *q = bdev_get_queue(pd->bdev);
-
drivers/block/pktcdvd.c:2132:2-2132:24: struct request_queue *q;
-
drivers/block/pktcdvd.c:2452:2-2452:38: struct request_queue *q = pd->disk->queue;
-
drivers/block/rbd.c:4898:2-4898:24: struct request_queue *q;
-
drivers/block/rnbd/rnbd-clt.c:225:2-225:25: struct rnbd_queue *q = NULL;
-
drivers/block/rnbd/rnbd-clt.c:1116:2-1116:31: struct rnbd_queue *q = hctx->driver_data;
-
drivers/block/rnbd/rnbd-clt.c:1177:2-1177:31: struct rnbd_queue *q = hctx->driver_data;
-
drivers/block/rnbd/rnbd-clt.c:1344:2-1344:21: struct rnbd_queue *q;
-
drivers/block/sx8.c:691:2-691:43: struct request_queue *q = carm_pop_q(host);
-
drivers/block/sx8.c:706:2-706:34: struct request_queue *q = hctx->queue;
-
drivers/block/sx8.c:1403:2-1403:24: struct request_queue *q;
-
drivers/block/virtio_blk.c:449:2-449:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:551:2-551:40: struct request_queue *q = vblk->disk->queue;
-
drivers/block/virtio_blk.c:892:2-892:24: struct request_queue *q;
-
drivers/cdrom/cdrom.c:2596:2-2596:23: struct cdrom_subchnl q;
-
drivers/cdrom/cdrom.c:3053:2-3053:23: struct cdrom_subchnl q;
-
drivers/clk/clk-cdce925.c:223:2-223:5: u8 q;
-
drivers/counter/counter-chrdev.c:115:2-115:28: struct counter_comp_node *q, *o;
-
drivers/crypto/cavium/zip/zip_main.c:136:2-136:10: int q = 0;
-
drivers/crypto/cavium/zip/zip_main.c:335:2-335:10: int q = 0;
-
drivers/crypto/cavium/zip/zip_main.c:489:2-489:10: u32 q = 0;
-
drivers/crypto/ccp/ccp-ops.c:222:2-222:10: u8 *p, *q;
-
drivers/crypto/ccp/ccp-ops.c:247:2-247:10: u8 *p, *q;
-
drivers/crypto/hisilicon/zip/zip_crypto.c:218:2-218:34: struct hisi_zip_req *q = req_q->q;
-
drivers/crypto/keembay/ocs-aes.c:1059:2-1059:9: int i, q;
-
drivers/firewire/core-device.c:1105:2-1105:6: u32 q;
-
drivers/firewire/core-topology.c:42:2-42:6: u32 q;
-
drivers/firewire/core-topology.c:176:2-176:23: u32 *next_sid, *end, q;
-
drivers/firmware/dmi_scan.c:654:2-654:20: char __iomem *p, *q;
-
drivers/firmware/efi/libstub/vsprintf.c:43:3-43:35: unsigned int q = (r * 0xccd) >> 15;
-
drivers/firmware/efi/libstub/vsprintf.c:62:2-62:42: unsigned int q = (x * 0x346DC5D7ULL) >> 43;
-
drivers/firmware/efi/libstub/vsprintf.c:76:2-76:27: unsigned int d3, d2, d1, q, h;
-
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c:1343:2-1343:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:262:2-262:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:913:2-913:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:963:2-963:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1011:2-1011:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1091:2-1091:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1995:2-1995:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2124:2-2124:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c:131:2-131:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:106:2-106:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:380:2-380:20: struct queue *q = container_of(kobj, struct queue, kobj);
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:236:2-236:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:568:2-568:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:679:2-679:16: struct queue *q;
-
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c:898:2-898:16: struct queue *q;
-
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c:2815:2-2815:34: u32 max_avg, min_cll, max, min, q, r;
-
drivers/gpu/drm/drm_debugfs.c:240:2-240:26: struct list_head *pos, *q;
-
drivers/gpu/drm/i915/display/intel_quirks.c:202:3-202:42: struct intel_quirk *q = &intel_quirks[i];
-
drivers/gpu/drm/i915/gvt/handlers.c:2269:2-2269:2: MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2272:2-2272:2: MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2275:2-2275:2: MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2278:2-2278:2: MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2281:2-2281:2: MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/handlers.c:2284:2-2284:2: MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
-
drivers/gpu/drm/i915/gvt/scheduler.c:1631:2-1631:24: struct list_head *q = workload_q_head(vgpu, engine);
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:158:2-158:2: MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:167:2-167:2: MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:176:2-176:2: MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:189:2-189:2: MMIO_D(REG_50080(PIPE_A, PLANE_SPRITE0));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:202:2-202:2: MMIO_D(REG_50080(PIPE_B, PLANE_SPRITE0));
-
drivers/gpu/drm/i915/intel_gvt_mmio_table.c:215:2-215:2: MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
-
drivers/gpu/drm/v3d/v3d_drv.c:139:2-139:17: enum v3d_queue q;
-
drivers/gpu/drm/v3d/v3d_sched.c:258:2-258:17: enum v3d_queue q;
-
drivers/gpu/drm/v3d/v3d_sched.c:442:2-442:17: enum v3d_queue q;
-
drivers/gpu/drm/xen/xen_drm_front.c:54:2-54:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/gpu/drm/xen/xen_drm_front.c:65:2-65:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/gpu/drm/xen/xen_drm_front.c:79:2-79:35: struct xen_drm_front_dbuf *buf, *q;
-
drivers/hid/hid-quirks.c:1066:2-1066:29: struct quirks_list_struct *q;
-
drivers/hid/hid-quirks.c:1101:2-1101:37: struct quirks_list_struct *q_new, *q;
-
drivers/hid/hid-quirks.c:1157:2-1157:29: struct quirks_list_struct *q, *temp;
-
drivers/i2c/i2c-core-base.c:2016:2-2016:45: const struct i2c_adapter_quirks *q = adap->quirks;
-
drivers/iio/common/st_sensors/st_sensors_core.c:668:2-668:18: int i, len = 0, q, r;
-
drivers/iio/industrialio-buffer.c:945:2-945:30: struct iio_demux_table *p, *q;
-
drivers/infiniband/hw/hfi1/affinity.c:191:2-191:26: struct list_head *pos, *q;
-
drivers/infiniband/hw/hfi1/mad.c:85:2-85:27: struct trap_node *node, *q;
-
drivers/infiniband/hw/hfi1/mad.c:987:2-987:7: u16 *q;
-
drivers/infiniband/hw/hfi1/mad.c:1686:2-1686:24: __be16 *q = (__be16 *)data;
-
drivers/infiniband/hw/hfi1/verbs.c:1620:2-1620:25: struct rdma_stat_desc *q;
-
drivers/infiniband/hw/irdma/verbs.c:3801:2-3801:26: struct list_head *pos, *q;
-
drivers/infiniband/hw/mlx4/mad.c:1026:2-1026:9: int p, q;
-
drivers/infiniband/hw/mlx4/mad.c:1062:2-1062:9: int p, q;
-
drivers/infiniband/hw/mthca/mthca_mad.c:286:2-286:9: int p, q;
-
drivers/infiniband/hw/mthca/mthca_mad.c:328:2-328:9: int p, q;
-
drivers/infiniband/hw/qib/qib_mad.c:601:2-601:30: __be16 *q = (__be16 *) smp->data;
-
drivers/infiniband/hw/qib/qib_mad.c:1044:2-1044:24: u16 *q = (u16 *) smp->data;
-
drivers/infiniband/sw/rdmavt/qp.c:747:3-747:18: struct rvt_qp *q;
-
drivers/infiniband/sw/rxe/rxe_comp.c:526:2-526:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_queue.c:58:2-58:20: struct rxe_queue *q;
-
drivers/infiniband/sw/rxe/rxe_req.c:46:2-46:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_req.c:111:2-111:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_req.c:605:2-605:31: struct rxe_queue *q = qp->sq.queue;
-
drivers/infiniband/sw/rxe/rxe_resp.c:294:2-294:32: struct rxe_queue *q = srq->rq.queue;
-
drivers/infiniband/sw/rxe/rxe_resp.c:1242:2-1242:31: struct rxe_queue *q = qp->rq.queue;
-
drivers/infiniband/sw/rxe/rxe_srq.c:50:2-50:20: struct rxe_queue *q;
-
drivers/infiniband/sw/rxe/rxe_srq.c:149:2-149:32: struct rxe_queue *q = srq->rq.queue;
-
drivers/infiniband/ulp/srp/ib_srp.c:2868:2-2868:34: struct request_queue *q = sdev->request_queue;
-
drivers/isdn/mISDN/dsp_cmx.c:1314:2-1314:14: u8 *d, *p, *q, *o_q;
-
drivers/isdn/mISDN/dsp_cmx.c:1636:2-1636:10: u8 *p, *q;
-
drivers/md/bcache/super.c:901:2-901:24: struct request_queue *q;
-
drivers/md/bcache/super.c:1010:2-1010:24: struct request_queue *q;
-
drivers/md/bcache/super.c:1400:2-1400:51: struct request_queue *q = bdev_get_queue(dc->bdev);
-
drivers/md/bcache/sysfs.c:1066:3-1066:16: uint16_t q[31], *p, *cached;
-
drivers/md/bcache/util.c:97:2-97:11: uint64_t q;
-
drivers/md/dm-cache-policy-smq.c:880:2-880:25: struct queue *q = &mq->dirty;
-
drivers/md/dm-cache-policy-smq.c:893:2-893:25: struct queue *q = &mq->clean;
-
drivers/md/dm-io.c:306:2-306:54: struct request_queue *q = bdev_get_queue(where->bdev);
-
drivers/md/dm-mpath.c:511:2-511:24: struct request_queue *q;
-
drivers/md/dm-mpath.c:877:2-877:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-mpath.c:933:2-933:24: struct request_queue *q;
-
drivers/md/dm-mpath.c:1618:2-1618:65: struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
-
drivers/md/dm-mpath.c:2092:2-2092:65: struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
-
drivers/md/dm-stats.c:944:2-944:14: const char *q;
-
drivers/md/dm-table.c:401:2-401:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-table.c:852:2-852:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/md/dm-table.c:1484:2-1484:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1584:2-1584:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1623:2-1623:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1781:2-1781:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1837:2-1837:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1845:2-1845:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-table.c:1872:2-1872:52: struct request_queue *q = bdev_get_queue(dev->bdev);
-
drivers/md/dm-zone.c:126:2-126:32: struct request_queue *q = md->queue;
-
drivers/md/dm-zone.c:142:2-142:32: struct request_queue *q = md->queue;
-
drivers/md/dm-zone.c:182:2-182:32: struct request_queue *q = md->queue;
-
drivers/md/dm-zone.c:231:2-231:32: struct request_queue *q = md->queue;
-
drivers/md/dm-zone.c:523:2-523:32: struct request_queue *q = md->queue;
-
drivers/md/dm-zone.c:595:2-595:32: struct request_queue *q = md->queue;
-
drivers/md/dm-zoned-target.c:768:2-768:24: struct request_queue *q;
-
drivers/md/raid5-cache.c:3065:2-3065:53: struct request_queue *q = bdev_get_queue(rdev->bdev);
-
drivers/md/raid5-ppl.c:1304:2-1304:24: struct request_queue *q;
-
drivers/md/raid5.c:6869:3-6869:36: struct request_queue *q = mddev->queue;
-
drivers/media/common/saa7146/saa7146_fops.c:165:2-165:31: struct saa7146_dmaqueue *q = from_timer(q, t, timeout);
-
drivers/media/common/saa7146/saa7146_fops.c:291:2-291:25: struct videobuf_queue *q;
-
drivers/media/common/saa7146/saa7146_fops.c:325:2-325:25: struct videobuf_queue *q;
-
drivers/media/common/saa7146/saa7146_video.c:382:2-382:36: struct saa7146_dmaqueue *q = &vv->video_dmaq;
-
drivers/media/common/saa7146/saa7146_video.c:1210:2-1210:34: struct videobuf_queue *q = &fh->video_q;
-
drivers/media/common/saa7146/saa7146_video.c:1225:2-1225:36: struct saa7146_dmaqueue *q = &vv->video_dmaq;
-
drivers/media/common/videobuf2/videobuf2-core.c:216:2-216:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:357:2-357:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1025:2-1025:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1109:2-1109:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1225:2-1225:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1361:2-1361:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1373:2-1373:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:1908:2-1908:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-core.c:2914:2-2914:24: struct vb2_queue *q = data;
-
drivers/media/common/videobuf2/videobuf2-dma-contig.c:193:2-193:33: struct vb2_queue *q = buf->vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-dma-contig.c:212:2-212:33: struct vb2_queue *q = buf->vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:200:2-200:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:254:2-254:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:276:2-276:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-dvb.c:317:2-317:27: struct list_head *list, *q;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:145:2-145:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:178:2-178:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:496:2-496:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/common/videobuf2/videobuf2-v4l2.c:1189:2-1189:30: struct vb2_queue *q = vdev->queue;
-
drivers/media/dvb-core/dvb_demux.c:541:2-541:12: const u8 *q;
-
drivers/media/dvb-core/dvb_vb2.c:165:2-165:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:203:2-203:50: struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:216:2-216:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:233:2-233:50: struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
-
drivers/media/dvb-core/dvb_vb2.c:364:2-364:30: struct vb2_queue *q = &ctx->vb_q;
-
drivers/media/dvb-frontends/rtl2832_sdr.c:1144:2-1144:30: struct vb2_queue *q = &dev->vb_queue;
-
drivers/media/dvb-frontends/sp887x.c:287:2-287:15: unsigned int q, r;
-
drivers/media/i2c/adv7511-v4l2.c:1276:2-1276:9: u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
-
drivers/media/i2c/cx25840/cx25840-core.c:697:2-697:27: struct workqueue_struct *q;
-
drivers/media/i2c/cx25840/cx25840-core.c:775:2-775:27: struct workqueue_struct *q;
-
drivers/media/i2c/cx25840/cx25840-core.c:1034:2-1034:27: struct workqueue_struct *q;
-
drivers/media/pci/bt8xx/bttv-driver.c:2196:2-2196:29: struct videobuf_queue* q = NULL;
-
drivers/media/pci/bt8xx/bttv-driver.c:2230:2-2230:42: struct videobuf_queue *q = bttv_queue(fh);
-
drivers/media/pci/cobalt/cobalt-v4l2.c:125:2-125:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/pci/cobalt/cobalt-v4l2.c:1205:2-1205:28: struct vb2_queue *q = &s->q;
-
drivers/media/pci/cx18/cx18-fileops.c:291:3-291:13: const u8 *q;
-
drivers/media/pci/cx18/cx18-ioctl.c:808:2-808:29: struct videobuf_queue *q = NULL;
-
drivers/media/pci/cx18/cx18-streams.c:678:2-678:21: struct cx18_queue *q;
-
drivers/media/pci/cx18/cx18-streams.c:700:2-700:21: struct cx18_queue *q;
-
drivers/media/pci/cx18/cx18-vbi.c:99:2-99:10: u8 *q = buf;
-
drivers/media/pci/cx23885/cx23885-417.c:1496:2-1496:20: struct vb2_queue *q;
-
drivers/media/pci/cx23885/cx23885-core.c:1647:2-1647:38: struct cx23885_dmaqueue *q = &port->mpegq;
-
drivers/media/pci/cx23885/cx23885-dvb.c:2656:3-2656:21: struct vb2_queue *q;
-
drivers/media/pci/cx23885/cx23885-vbi.c:189:2-189:37: struct cx23885_dmaqueue *q = &dev->vbiq;
-
drivers/media/pci/cx23885/cx23885-video.c:461:2-461:40: struct cx23885_dmaqueue *q = &dev->vidq;
-
drivers/media/pci/cx23885/cx23885-video.c:1238:2-1238:20: struct vb2_queue *q;
-
drivers/media/pci/cx25821/cx25821-video.c:243:2-243:56: struct cx25821_dmaqueue *q = &dev->channels[chan->id].dma_vidq;
-
drivers/media/pci/cx25821/cx25821-video.c:681:3-681:21: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-blackbird.c:1157:2-1157:20: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-dvb.c:1766:3-1766:21: struct vb2_queue *q;
-
drivers/media/pci/cx88/cx88-mpeg.c:274:2-274:34: struct cx88_dmaqueue *q = &dev->mpegq;
-
drivers/media/pci/cx88/cx88-vbi.c:173:2-173:38: struct cx88_dmaqueue *q = &dev->vbiq;
-
drivers/media/pci/cx88/cx88-video.c:506:2-506:38: struct cx88_dmaqueue *q = &dev->vidq;
-
drivers/media/pci/cx88/cx88-video.c:1261:2-1261:20: struct vb2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:540:2-540:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:802:2-802:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:890:2-891:3: struct cio2_queue *q =
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:978:2-978:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1023:2-1023:46: struct cio2_queue *q = vb2q_to_cio2_queue(vq);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1074:2-1074:48: struct cio2_queue *q = file_to_cio2_queue(file);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1114:2-1114:48: struct cio2_queue *q = file_to_cio2_queue(file);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1227:2-1227:25: struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1253:2-1253:25: struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1320:2-1320:25: struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1386:2-1386:21: struct cio2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1418:2-1418:21: struct cio2_queue *q;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:1967:2-1967:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c:2000:2-2000:31: struct cio2_queue *q = cio2->cur_queue;
-
drivers/media/pci/ivtv/ivtv-fileops.c:297:3-297:13: const u8 *q;
-
drivers/media/pci/ivtv/ivtv-fileops.c:543:2-543:20: struct ivtv_queue q;
-
drivers/media/pci/ivtv/ivtv-vbi.c:305:2-305:10: u8 *q = buf;
-
drivers/media/pci/saa7134/saa7134-core.c:335:2-335:31: struct saa7134_dmaqueue *q = from_timer(q, t, timeout);
-
drivers/media/pci/saa7134/saa7134-dvb.c:1218:2-1218:20: struct vb2_queue *q;
-
drivers/media/pci/saa7134/saa7134-empress.c:245:2-245:20: struct vb2_queue *q;
-
drivers/media/pci/saa7134/saa7134-video.c:2037:2-2037:20: struct vb2_queue *q;
-
drivers/media/pci/saa7164/saa7164-cmd.c:73:2-73:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-cmd.c:125:2-125:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-cmd.c:246:2-246:25: wait_queue_head_t *q = NULL;
-
drivers/media/pci/saa7164/saa7164-dvb.c:195:2-195:24: struct list_head *p, *q;
-
drivers/media/pci/saa7164/saa7164-encoder.c:61:2-61:32: struct list_head *c, *n, *p, *q, *l, *v;
-
drivers/media/pci/saa7164/saa7164-vbi.c:30:2-30:32: struct list_head *c, *n, *p, *q, *l, *v;
-
drivers/media/platform/allegro-dvt/allegro-core.c:2810:2-2810:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/amphion/vdec.c:194:2-194:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vdec.c:334:2-334:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/venc.c:208:2-208:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vpu_v4l2.c:112:2-112:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vpu_v4l2.c:349:2-349:20: struct vb2_queue *q;
-
drivers/media/platform/amphion/vpu_v4l2.c:452:2-452:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/atmel/atmel-isc-base.c:1846:2-1846:30: struct vb2_queue *q = &isc->vb2_vidq;
-
drivers/media/platform/atmel/atmel-isi.c:1189:2-1189:20: struct vb2_queue *q;
-
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c:1164:2-1164:36: struct mtk_jpeg_q_data *q = &ctx->out_q;
-
drivers/media/platform/nxp/fsl-viu.c:1245:2-1245:34: struct videobuf_queue *q = &fh->vb_vidq;
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1082:2-1082:20: struct vb2_queue *q;
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1482:2-1482:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c:1545:2-1545:46: struct mxc_jpeg_q_data *q[2] = {out_q, cap_q};
-
drivers/media/platform/qcom/camss/camss-video.c:960:2-960:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/helpers.c:1592:2-1592:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/vdec.c:304:2-304:20: struct vb2_queue *q;
-
drivers/media/platform/qcom/venus/venc.c:238:2-238:20: struct vb2_queue *q;
-
drivers/media/platform/renesas/rcar-vin/rcar-dma.c:1454:2-1454:30: struct vb2_queue *q = &vin->queue;
-
drivers/media/platform/renesas/rcar_drif.c:928:2-928:30: struct vb2_queue *q = &sdr->vb_queue;
-
drivers/media/platform/renesas/renesas-ceu.c:1404:2-1404:33: struct vb2_queue *q = &ceudev->vb2_vq;
-
drivers/media/platform/renesas/sh_vou.c:1228:2-1228:20: struct vb2_queue *q;
-
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c:1349:2-1349:20: struct vb2_queue *q;
-
drivers/media/platform/samsung/exynos4-is/fimc-capture.c:1714:2-1714:39: struct vb2_queue *q = &fimc->vid_cap.vbq;
-
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c:571:2-571:44: struct vb2_queue *q = &isp->video_capture.vb_queue;
-
drivers/media/platform/samsung/exynos4-is/fimc-lite.c:1243:2-1243:31: struct vb2_queue *q = &fimc->vb_queue;
-
drivers/media/platform/samsung/s3c-camif/camif-capture.c:1102:2-1102:29: struct vb2_queue *q = &vp->vb_queue;
-
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c:756:2-756:20: struct vb2_queue *q;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1116:2-1116:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1296:2-1296:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1462:2-1462:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1508:2-1508:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/st/sti/delta/delta-v4l2.c:1585:2-1585:20: struct vb2_queue *q;
-
drivers/media/platform/st/stm32/stm32-dcmi.c:1888:2-1888:20: struct vb2_queue *q;
-
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c:403:2-403:30: struct vb2_queue *q = &csi->queue;
-
drivers/media/platform/ti/am437x/am437x-vpfe.c:2211:2-2211:20: struct vb2_queue *q;
-
drivers/media/platform/ti/cal/cal-video.c:252:2-252:30: struct vb2_queue *q = &ctx->vb_vidq;
-
drivers/media/platform/ti/cal/cal-video.c:978:2-978:30: struct vb2_queue *q = &ctx->vb_vidq;
-
drivers/media/platform/ti/davinci/vpbe_display.c:195:2-195:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/ti/davinci/vpbe_display.c:1365:2-1365:20: struct vb2_queue *q;
-
drivers/media/platform/ti/davinci/vpif_capture.c:71:2-71:28: struct vb2_queue *q = vb->vb2_queue;
-
drivers/media/platform/ti/davinci/vpif_capture.c:1401:2-1401:20: struct vb2_queue *q;
-
drivers/media/platform/ti/davinci/vpif_display.c:1123:2-1123:20: struct vb2_queue *q;
-
drivers/media/radio/radio-gemtek.c:153:2-153:14: int i, bit, q, mute;
-
drivers/media/radio/radio-gemtek.c:257:2-257:9: int i, q;
-
drivers/media/test-drivers/vimc/vimc-capture.c:404:2-404:20: struct vb2_queue *q;
-
drivers/media/test-drivers/vivid/vivid-sdr-cap.c:469:2-469:30: struct vb2_queue *q = &dev->vb_sdr_cap_q;
-
drivers/media/test-drivers/vivid/vivid-vid-cap.c:672:2-672:30: struct vb2_queue *q = &dev->vb_vid_cap_q;
-
drivers/media/test-drivers/vivid/vivid-vid-out.c:454:2-454:30: struct vb2_queue *q = &dev->vb_vid_out_q;
-
drivers/media/tuners/max2165.c:153:2-153:6: u32 q, f = 0;
-
drivers/media/usb/airspy/airspy.c:646:2-646:28: struct vb2_queue *q = &s->vb_queue;
-
drivers/media/usb/au0828/au0828-video.c:290:2-290:36: struct vb2_queue *q = vb->vb2_buf.vb2_queue;
-
drivers/media/usb/au0828/au0828-video.c:1806:2-1806:20: struct vb2_queue *q;
-
drivers/media/usb/cx231xx/cx231xx-417.c:1739:2-1739:20: struct vb2_queue *q;
-
drivers/media/usb/cx231xx/cx231xx-video.c:1757:2-1757:20: struct vb2_queue *q;
-
drivers/media/usb/em28xx/em28xx-video.c:1249:2-1249:20: struct vb2_queue *q;
-
drivers/media/usb/go7007/go7007-fw.c:930:2-930:10: int q = 0;
-
drivers/media/usb/gspca/gspca.c:1452:2-1452:20: struct vb2_queue *q;
-
drivers/media/usb/hackrf/hackrf.c:918:2-918:20: struct vb2_queue *q;
-
drivers/media/usb/msi2500/msi2500.c:923:2-923:30: struct vb2_queue *q = &dev->vb_queue;
-
drivers/media/usb/s2255/s2255drv.c:813:2-813:29: struct vb2_queue *q = &vc->vb_vidq;
-
drivers/media/usb/s2255/s2255drv.c:1098:2-1098:29: struct vb2_queue *q = &vc->vb_vidq;
-
drivers/media/usb/s2255/s2255drv.c:1590:2-1590:20: struct vb2_queue *q;
-
drivers/media/usb/stk1160/stk1160-v4l.c:487:2-487:30: struct vb2_queue *q = &dev->vb_vidq;
-
drivers/media/usb/stk1160/stk1160-v4l.c:522:2-522:30: struct vb2_queue *q = &dev->vb_vidq;
-
drivers/media/usb/stk1160/stk1160-v4l.c:783:2-783:20: struct vb2_queue *q;
-
drivers/media/usb/zr364xx/zr364xx.c:812:2-812:35: struct videobuf_queue *q = &cam->vb_vidq;
-
drivers/media/usb/zr364xx/zr364xx.c:1275:2-1275:35: struct videobuf_queue *q = &cam->vb_vidq;
-
drivers/media/v4l2-core/videobuf-dma-contig.c:76:2-76:34: struct videobuf_queue *q = map->q;
-
drivers/media/v4l2-core/videobuf-dma-sg.c:397:2-397:34: struct videobuf_queue *q = map->q;
-
drivers/media/v4l2-core/videobuf-dma-sg.c:660:2-660:24: struct videobuf_queue q;
-
drivers/media/v4l2-core/videobuf-vmalloc.c:64:2-64:34: struct videobuf_queue *q = map->q;
-
drivers/misc/habanalabs/common/hw_queue.c:44:2-44:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/common/hw_queue.c:231:2-231:58: struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
-
drivers/misc/habanalabs/common/hw_queue.c:271:2-271:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/misc/habanalabs/common/hw_queue.c:335:2-335:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/misc/habanalabs/common/hw_queue.c:371:2-371:63: struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
-
drivers/misc/habanalabs/common/hw_queue.c:631:2-631:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/common/hw_queue.c:797:2-797:58: struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
-
drivers/misc/habanalabs/common/hw_queue.c:1076:2-1076:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/common/hw_queue.c:1116:2-1116:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/common/hw_queue.c:1128:2-1128:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:1075:2-1075:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:1739:2-1739:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:1755:2-1755:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:2875:2-2875:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:3021:2-3021:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:3145:2-3145:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:3290:2-3290:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:3423:2-3423:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:4856:2-4856:35: struct gaudi_internal_qman_info *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:7027:2-7027:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/gaudi/gaudi.c:7427:2-7427:68: struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
-
drivers/misc/habanalabs/goya/goya.c:1195:2-1195:22: struct hl_hw_queue *q;
-
drivers/misc/habanalabs/goya/goya.c:4497:2-4497:67: struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
-
drivers/misc/uacce/uacce.c:66:2-66:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:128:2-128:22: struct uacce_queue *q;
-
drivers/misc/uacce/uacce.c:171:2-171:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:185:2-185:31: struct uacce_queue *q = vma->vm_private_data;
-
drivers/misc/uacce/uacce.c:200:2-200:33: struct uacce_queue *q = filep->private_data;
-
drivers/misc/uacce/uacce.c:263:2-263:32: struct uacce_queue *q = file->private_data;
-
drivers/misc/uacce/uacce.c:499:2-499:22: struct uacce_queue *q, *next_q;
-
drivers/mmc/core/block.c:1444:2-1444:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:1512:2-1512:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2027:2-2027:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2183:2-2183:33: struct request_queue *q = req->q;
-
drivers/mmc/core/block.c:2770:2-2770:26: struct list_head *pos, *q;
-
drivers/mmc/core/queue.c:85:2-85:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:122:2-122:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:140:2-140:32: struct request_queue *q = mq->queue;
-
drivers/mmc/core/queue.c:231:2-231:33: struct request_queue *q = req->q;
-
drivers/mmc/core/queue.c:488:2-488:32: struct request_queue *q = mq->queue;
-
drivers/net/dsa/ocelot/felix_vsc9959.c:1644:2-1644:26: struct list_head *pos, *q, *last;
-
drivers/net/dsa/ocelot/felix_vsc9959.c:1679:2-1679:26: struct list_head *pos, *q, *last;
-
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c:116:2-116:19: unsigned int tc, q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:455:2-455:15: unsigned int q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:1009:2-1009:15: unsigned int q;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2286:2-2286:15: unsigned int q, port;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2324:2-2324:15: unsigned int q, qp, port;
-
drivers/net/ethernet/broadcom/bcmsysport.c:2379:2-2379:15: unsigned int q, qp, port;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:5388:2-5388:6: int q, rc;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:14392:2-14392:26: struct list_head *pos, *q;
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:2067:3-2067:43: struct bnx2x_vf_queue *q = vfq_get(vf, i);
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c:1536:3-1536:57: struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
-
drivers/net/ethernet/broadcom/genet/bcmgenet.c:3542:2-3542:15: unsigned int q;
-
drivers/net/ethernet/broadcom/genet/bcmgenet.c:3657:2-3657:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:442:2-442:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:650:2-650:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:675:2-675:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:1728:2-1728:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:1948:2-1948:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2364:2-2364:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2406:2-2406:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2432:2-2432:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2468:2-2468:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2511:2-2511:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2552:2-2552:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2651:2-2651:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2848:2-2848:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2909:2-2909:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:2951:2-2951:18: unsigned int i, q, idx;
-
drivers/net/ethernet/cadence/macb_main.c:3047:2-3047:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:3959:2-3959:21: unsigned int hw_q, q;
-
drivers/net/ethernet/cadence/macb_main.c:4130:2-4130:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4157:2-4157:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4179:2-4179:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:4336:2-4336:38: struct macb_queue *q = &lp->queues[0];
-
drivers/net/ethernet/cadence/macb_main.c:5032:2-5032:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_main.c:5121:2-5121:15: unsigned int q;
-
drivers/net/ethernet/cadence/macb_ptp.c:367:2-367:15: unsigned int q;
-
drivers/net/ethernet/cavium/liquidio/lio_core.c:461:2-461:6: int q, q_no;
-
drivers/net/ethernet/cavium/liquidio/lio_core.c:813:2-813:6: int q, q_no;
-
drivers/net/ethernet/cavium/liquidio/lio_main.c:472:2-472:6: int q, iq;
-
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c:369:2-369:23: int mbox, key, stat, q;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:483:3-483:32: struct cmdQ *q = &sge->cmdQ[0];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:539:3-539:36: struct freelQ *q = &sge->freelQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:563:3-563:36: struct freelQ *q = &sge->freelQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:662:3-662:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:686:3-686:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1320:2-1320:31: struct cmdQ *q = &sge->cmdQ[0];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1481:2-1481:26: struct respQ *q = &sge->respQ;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1571:2-1571:26: struct respQ *q = &sge->respQ;
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1710:2-1710:33: struct cmdQ *q = &sge->cmdQ[qid];
-
drivers/net/ethernet/chelsio/cxgb/sge.c:1937:3-1937:32: struct cmdQ *q = &sge->cmdQ[i];
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1172:3-1172:41: struct sge_rspq *q = &adap->sge.qs[i].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1957:2-1957:72: const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:1975:2-1975:22: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2032:2-2032:46: struct qset_params *q = adapter->params.sge.qset;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2152:3-2152:23: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c:2253:3-2253:23: struct qset_params *q;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1273:2-1273:18: struct sge_txq *q;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1530:2-1530:39: struct sge_txq *q = &qs->txq[TXQ_CTRL];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1750:2-1750:39: struct sge_txq *q = &qs->txq[TXQ_OFLD];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1904:2-1904:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2331:2-2331:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2535:2-2535:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2614:2-2614:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2632:2-2632:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2652:2-2652:40: struct sge_rspq *q = &adap->sge.qs[0].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2681:2-2681:28: struct sge_rspq *q = &qs->rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2702:2-2702:40: struct sge_rspq *q = &adap->sge.qs[0].rspq;
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3046:2-3046:42: struct sge_qset *q = &adapter->sge.qs[id];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3219:3-3219:39: struct sge_qset *q = &adap->sge.qs[i];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3242:3-3242:39: struct sge_qset *q = &adap->sge.qs[i];
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:3375:3-3375:37: struct qset_params *q = p->qset + i;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:952:2-952:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:967:2-967:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:979:2-979:58: struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:928:3-928:44: struct sge_rspq *q = adap->sge.ingr_map[i];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:974:3-974:44: struct sge_rspq *q = adap->sge.ingr_map[i];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:1062:3-1062:52: struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:112:2-112:37: struct sge_ofld_rxq *q = rxq_info->uldrxq;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:376:3-376:47: struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c:391:3-391:47: struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1422:2-1422:27: struct sge_txq *q = &eq->q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:1516:2-1516:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2674:2-2674:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:2794:2-2794:27: struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3098:2-3098:26: struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:3991:2-3991:23: struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4149:2-4149:23: struct sge_rspq *q = cookie;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4163:2-4163:34: struct sge_rspq *q = &adap->sge.intrq;
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4300:3-4300:31: struct sge_eth_txq *q = &s->ptptxq;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:414:2-414:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:465:2-465:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:510:2-510:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:574:2-574:65: struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c:713:2-713:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:1936:2-1936:22: struct sge_eth_txq *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:2993:2-2993:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3009:2-3009:24: struct be_queue_info *q, *cq;
-
drivers/net/ethernet/emulex/benet/be_main.c:3041:2-3041:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3105:2-3105:24: struct be_queue_info *q;
-
drivers/net/ethernet/emulex/benet/be_main.c:3566:2-3566:24: struct be_queue_info *q;
-
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c:3782:2-3782:32: struct dpni_queue q = { { 0 } };
-
drivers/net/ethernet/freescale/fec_main.c:863:2-863:15: unsigned int q;
-
drivers/net/ethernet/freescale/fec_main.c:2965:2-2965:15: unsigned int q;
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:511:2-511:30: struct funeth_rxq *q = irq->rxq;
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:618:2-618:21: struct funeth_rxq *q;
-
drivers/net/ethernet/fungible/funeth/funeth_rx.c:790:2-790:26: struct funeth_rxq *q = *qp;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:306:2-306:37: struct funeth_txq *q = fp->txqs[qid];
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:431:2-431:30: struct funeth_txq *q = irq->txq;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:534:2-534:21: struct funeth_txq *q, **xdpqs;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:592:2-592:21: struct funeth_txq *q;
-
drivers/net/ethernet/fungible/funeth/funeth_tx.c:726:2-726:26: struct funeth_txq *q = *qp;
-
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:297:2-297:21: struct hnae_queue *q;
-
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:249:2-249:37: struct hnae_queue *q = &ring_pair->q;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:2764:3-2764:24: struct netdev_queue *q;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:4981:2-4981:32: struct hnae3_queue *q = ring->tqp;
-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c:5013:4-5013:24: struct hnae3_queue *q;
-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:1775:3-1776:4: struct hclge_comm_tqp *q =
-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c:918:4-918:58: struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
-
drivers/net/ethernet/intel/fm10k/fm10k_pci.c:612:3-612:53: struct fm10k_hw_stats_q *q = &interface->stats.q[i];
-
drivers/net/ethernet/intel/fm10k/fm10k_pci.c:1333:2-1333:6: int q;
-
drivers/net/ethernet/intel/i40e/i40e_main.c:792:2-792:6: u16 q;
-
drivers/net/ethernet/intel/i40e/i40e_main.c:3648:2-3648:9: int i, q;
-
drivers/net/ethernet/intel/ice/ice_lib.c:2108:2-2108:9: int i, q;
-
drivers/net/ethernet/intel/ice/ice_lib.c:2663:2-2663:9: int i, q;
-
drivers/net/ethernet/intel/igb/e1000_nvm.c:690:2-690:5: u8 q, hval, rem, result;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:773:2-773:31: int val, cm3_state, host_id, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:834:2-834:22: int val, cm3_state, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:1951:2-1951:9: int i, q;
-
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:2036:2-2036:9: int i, q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:153:2-153:6: int q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:180:2-180:6: int q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:637:2-637:5: u8 q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:671:2-671:10: int q = 0;
-
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c:682:2-682:21: u8 srn, num_rings, q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c:128:2-128:28: struct octep_ctrl_mbox_q *q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c:183:2-183:28: struct octep_ctrl_mbox_q *q;
-
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c:140:2-140:6: int q, i;
-
drivers/net/ethernet/marvell/octeon_ep/octep_main.c:757:2-757:6: int q;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:283:2-283:6: int q;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:339:2-339:6: int q, b;
-
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c:375:2-375:6: int q;
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:681:2-681:30: struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:794:2-794:30: struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1389:2-1389:26: struct mlxsw_pci_queue *q;
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1651:2-1651:67: struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
-
drivers/net/ethernet/mellanox/mlxsw/pci.c:1660:2-1660:26: struct mlxsw_pci_queue *q;
-
drivers/net/ethernet/microsoft/mana/mana_en.c:306:2-306:6: int q;
-
drivers/net/ethernet/microsoft/mana/mana_ethtool.c:75:2-75:6: int q, i = 0;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:77:2-77:31: struct ionic_queue *q = seq->private;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:87:2-87:31: struct ionic_queue *q = seq->private;
-
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c:122:2-122:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_dev.c:534:2-534:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:216:2-216:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:257:2-257:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:293:2-293:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:728:2-728:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:788:2-788:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:1090:2-1090:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:3074:2-3074:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_lif.c:3116:2-3116:32: struct ionic_queue *q = &qcq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:217:2-217:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_main.c:296:2-296:22: struct ionic_queue *q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:316:2-316:30: struct ionic_queue *q = cq->bound_q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:746:2-746:30: struct ionic_queue *q = cq->bound_q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1183:2-1183:45: struct ionic_queue *q = &lif->hwstamp_txq->q;
-
drivers/net/ethernet/pensando/ionic/ionic_txrx.c:1219:2-1219:22: struct ionic_queue *q;
-
drivers/net/ethernet/renesas/ravb_main.c:1174:3-1174:7: int q;
-
drivers/net/ethernet/renesas/ravb_main.c:1280:2-1280:23: int q = napi - priv->napi;
-
drivers/net/ethernet/renesas/ravb_main.c:1563:2-1563:6: int q;
-
drivers/net/ethernet/renesas/ravb_main.c:1938:2-1938:35: u16 q = skb_get_queue_mapping(skb);
-
drivers/net/ethernet/renesas/ravb_main.c:2620:2-2620:18: int error, irq, q;
-
drivers/net/ethernet/sfc/siena/tx.c:115:2-115:23: struct efx_tx_queue *q;
-
drivers/net/ethernet/sfc/tx.c:298:2-298:23: struct efx_tx_queue *q;
-
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c:547:2-547:6: int q, stat;
-
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c:659:2-659:6: int q, stat;
-
drivers/net/ethernet/ti/davinci_emac.c:1417:2-1417:6: int q, m, ret;
-
drivers/net/phy/sfp-bus.c:113:2-113:26: const struct sfp_quirk *q;
-
drivers/net/ppp/ppp_generic.c:1922:2-1922:21: unsigned char *p, *q;
-
drivers/net/tap.c:300:2-300:20: struct tap_queue *q, *tmp;
-
drivers/net/tap.c:323:2-323:20: struct tap_queue *q;
-
drivers/net/tap.c:504:2-504:24: struct tap_queue *q = container_of(sk, struct tap_queue, sk);
-
drivers/net/tap.c:513:2-513:20: struct tap_queue *q;
-
drivers/net/tap.c:575:2-575:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:582:2-582:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:771:2-771:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:887:2-887:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:916:2-916:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:989:2-989:30: struct tap_queue *q = file->private_data;
-
drivers/net/tap.c:1211:2-1211:24: struct tap_queue *q = container_of(sock, struct tap_queue, sock);
-
drivers/net/tap.c:1232:2-1232:24: struct tap_queue *q = container_of(sock, struct tap_queue, sock);
-
drivers/net/tap.c:1249:2-1249:24: struct tap_queue *q = container_of(sock, struct tap_queue,
-
drivers/net/tap.c:1267:2-1267:20: struct tap_queue *q;
-
drivers/net/tap.c:1279:2-1279:20: struct tap_queue *q;
-
drivers/net/tap.c:1293:2-1293:20: struct tap_queue *q;
-
drivers/net/usb/catc.c:472:2-472:50: struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
-
drivers/net/usb/catc.c:501:2-501:21: struct ctrl_queue *q;
-
drivers/net/usb/catc.c:536:2-536:21: struct ctrl_queue *q;
-
drivers/net/wireless/ath/ath10k/mac.c:3940:2-3940:32: struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
-
drivers/net/wireless/ath/ath11k/mac.c:5580:2-5580:32: struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
-
drivers/net/wireless/ath/ath9k/mac.c:137:2-137:9: int i, q;
-
drivers/net/wireless/ath/ath9k/mac.c:298:2-298:6: int q;
-
drivers/net/wireless/ath/ath9k/xmit.c:105:2-105:22: struct sk_buff_head q;
-
drivers/net/wireless/ath/ath9k/xmit.c:209:2-209:14: int q = fi->txq;
-
drivers/net/wireless/ath/ath9k/xmit.c:239:2-239:6: int q, ret;
-
drivers/net/wireless/ath/ath9k/xmit.c:801:2-801:20: int q = tid->txq->mac80211_qnum;
-
drivers/net/wireless/ath/ath9k/xmit.c:2328:2-2328:6: int q, ret;
-
drivers/net/wireless/ath/carl9170/tx.c:663:2-663:21: unsigned int r, t, q;
-
drivers/net/wireless/ath/carl9170/tx.c:1278:2-1278:14: uint8_t q = 0;
-
drivers/net/wireless/ath/carl9170/tx.c:1344:2-1344:18: unsigned int i, q;
-
drivers/net/wireless/ath/wil6210/netdev.c:232:2-232:7: bool q;
-
drivers/net/wireless/ath/wil6210/txrx.c:838:2-838:11: bool q = false;
-
drivers/net/wireless/ath/wil6210/wmi.c:1931:3-1931:8: bool q;
-
drivers/net/wireless/broadcom/b43/phy_g.c:2336:2-2336:23: s32 m1, m2, f = 256, q, delta;
-
drivers/net/wireless/broadcom/b43/pio.c:49:2-49:30: struct b43_pio_txqueue *q = NULL;
-
drivers/net/wireless/broadcom/b43/pio.c:126:2-126:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:162:2-162:26: struct b43_pio_rxqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:290:2-290:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:352:2-352:36: struct b43_pio_txqueue *q = pack->queue;
-
drivers/net/wireless/broadcom/b43/pio.c:422:2-422:36: struct b43_pio_txqueue *q = pack->queue;
-
drivers/net/wireless/broadcom/b43/pio.c:491:2-491:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/pio.c:566:2-566:26: struct b43_pio_txqueue *q;
-
drivers/net/wireless/broadcom/b43/sdio.c:39:2-39:31: const struct b43_sdio_quirk *q;
-
drivers/net/wireless/broadcom/b43legacy/phy.c:1947:2-1947:6: s32 q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:49:2-49:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:68:2-68:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:86:2-86:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:109:2-109:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:126:2-126:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:143:2-143:23: struct sk_buff_head *q;
-
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:222:2-222:23: struct sk_buff_head *q;
-
drivers/net/wireless/intel/ipw2x00/ipw2100.c:4315:2-4315:42: struct ipw2100_status_queue *q = &priv->status_queue;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:3839:2-3839:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:5008:2-5008:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:5037:2-5037:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:10096:2-10096:31: struct clx2_queue *q = &txq->q;
-
drivers/net/wireless/intel/ipw2x00/ipw2200.c:11759:2-11759:24: struct list_head *p, *q;
-
drivers/net/wireless/intel/iwlegacy/3945-mac.c:453:2-453:23: struct il_queue *q = NULL;
-
drivers/net/wireless/intel/iwlegacy/3945.c:275:2-275:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/3945.c:601:2-601:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:1651:2-1651:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:2389:2-2389:40: struct il_queue *q = &il->txq[txq_id].q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:2455:2-2455:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/4965-mac.c:3957:2-3957:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/common.c:2750:2-2750:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:2812:2-2812:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:3117:2-3117:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:3238:2-3238:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4488:2-4488:6: int q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4758:3-4758:20: struct il_queue *q;
-
drivers/net/wireless/intel/iwlegacy/common.c:4788:2-4788:29: struct il_queue *q = &txq->q;
-
drivers/net/wireless/intel/iwlegacy/debug.c:818:2-818:19: struct il_queue *q;
-
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c:1162:2-1162:6: int q;
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:462:2-462:6: int q;
-
drivers/net/wireless/intel/iwlwifi/dvm/tx.c:681:2-681:6: int q, fifo;
-
drivers/net/wireless/intel/iwlwifi/iwl-io.c:260:2-260:9: int i, q;
-
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c:3542:4-3542:13: int tid, q;
-
drivers/net/wireless/intel/iwlwifi/mvm/sta.c:1712:3-1712:7: int q;
-
drivers/net/wireless/marvell/mwl8k.c:5383:4-5383:38: int q = MWL8K_TX_WMM_QUEUES - 1 - queue;
-
drivers/net/wireless/mediatek/mt76/debugfs.c:61:3-61:41: struct mt76_queue *q = dev->phy.q_tx[i];
-
drivers/net/wireless/mediatek/mt76/debugfs.c:81:3-81:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/dma.c:596:2-596:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/mac80211.c:755:2-755:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76.h:974:2-974:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76.h:989:2-989:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c:73:2-73:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:451:4-451:43: struct mt76_queue *q = dev->mphy.q_tx[i];
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:917:2-917:43: struct mt76_queue *q = dev->mphy.q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/mt7603/mac.c:1533:2-1533:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c:407:3-407:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:18:2-18:50: struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:171:2-171:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c:345:2-345:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c:894:3-894:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7915/mac.c:168:4-168:37: u8 q = mt76_connac_lmac_mapping(i);
-
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c:2684:3-2684:63: struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
-
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c:725:2-725:19: enum mt76_rxq_id q;
-
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c:170:3-170:39: struct mt76_queue *q = queue_map[i].q;
-
drivers/net/wireless/mediatek/mt76/mt7921/mac.c:119:4-119:37: u8 q = mt76_connac_lmac_mapping(i);
-
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c:829:3-829:63: struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
-
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c:855:3-855:45: struct ieee80211_he_mu_edca_param_ac_rec *q;
-
drivers/net/wireless/mediatek/mt76/sdio.c:306:2-306:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/sdio.c:325:2-325:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/sdio.c:345:2-345:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/sdio.c:612:3-612:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/sdio_txrx.c:84:2-84:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/sdio_txrx.c:356:2-356:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/testmode.c:34:2-34:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/tx.c:280:2-280:38: struct mt76_queue *q = phy->q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/tx.c:311:2-311:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/tx.c:505:2-505:38: struct mt76_queue *q = phy->q_tx[qid];
-
drivers/net/wireless/mediatek/mt76/tx.c:701:2-701:21: struct mt76_queue *q, *q2 = NULL;
-
drivers/net/wireless/mediatek/mt76/usb.c:553:2-553:30: struct mt76_queue *q = urb->context;
-
drivers/net/wireless/mediatek/mt76/usb.c:638:2-638:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/usb.c:658:2-658:39: struct mt76_queue *q = &dev->q_rx[qid];
-
drivers/net/wireless/mediatek/mt76/usb.c:725:3-725:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/usb.c:739:3-739:38: struct mt76_queue *q = &dev->q_rx[i];
-
drivers/net/wireless/mediatek/mt76/usb.c:761:2-761:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:925:2-925:21: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:968:3-968:22: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt76/usb.c:992:3-992:22: struct mt76_queue *q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:172:2-172:37: struct mt7601u_rx_queue *q = &dev->rx_q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:193:2-193:37: struct mt7601u_rx_queue *q = &dev->rx_q;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:240:2-240:36: struct mt7601u_tx_queue *q = urb->context;
-
drivers/net/wireless/mediatek/mt7601u/dma.c:311:2-311:44: struct mt7601u_tx_queue *q = &dev->tx_q[ep];
-
drivers/net/wireless/microchip/wilc1000/wlan.c:291:2-291:40: struct wilc_tx_queue_status *q = &wl->tx_q_limit;
-
drivers/net/wireless/purelifi/plfxlc/mac.c:182:2-182:27: struct sk_buff_head *q = NULL;
-
drivers/net/wireless/purelifi/plfxlc/mac.c:349:2-349:23: struct sk_buff_head *q;
-
drivers/net/wireless/realtek/rtw88/mac.c:974:2-974:6: u32 q;
-
drivers/net/wireless/realtek/rtw88/pci.c:775:2-775:5: u8 q;
-
drivers/net/wireless/ti/wlcore/main.c:1208:2-1208:6: int q, mapping;
-
drivers/net/wireless/ti/wlcore/main.c:1275:2-1275:6: int q;
-
drivers/net/wireless/ti/wlcore/tx.c:485:2-485:14: int i, q = -1, ac;
-
drivers/net/wireless/ti/wlcore/tx.c:658:3-658:7: int q;
-
drivers/net/wireless/ti/wlcore/tx.c:676:2-676:56: int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:489:2-489:33: struct sk_buff_head *q = &mac->ack_wait_queue;
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:582:3-582:34: struct sk_buff_head *q = &mac->ack_wait_queue;
-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c:965:2-965:23: struct sk_buff_head *q;
-
drivers/net/wireless/zydas/zd1211rw/zd_usb.c:1059:2-1059:32: struct sk_buff_head *q = &tx->submitted_skbs;
-
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c:81:2-81:30: struct dpmaif_rx_queue *q = arg;
-
drivers/nvdimm/pmem.c:468:2-468:24: struct request_queue *q;
-
drivers/nvme/host/apple.c:736:2-736:37: struct apple_nvme_queue *q = hctx->driver_data;
-
drivers/nvme/host/apple.c:786:2-786:36: struct apple_nvme_queue *q = set->driver_data;
-
drivers/nvme/host/apple.c:869:2-869:36: struct apple_nvme_queue *q = iod->q;
-
drivers/nvme/host/apple.c:929:2-929:37: struct apple_nvme_queue *q = hctx->driver_data;
-
drivers/nvme/host/fc.c:2483:2-2483:6: int q;
-
drivers/nvme/host/ioctl.c:406:2-406:51: struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
-
drivers/nvme/host/pci.c:2485:2-2485:45: struct request_queue *q = nvmeq->dev->ctrl.admin_q;
-
drivers/nvme/host/zns.c:12:2-12:32: struct request_queue *q = ns->queue;
-
drivers/nvme/host/zns.c:52:2-52:32: struct request_queue *q = ns->queue;
-
drivers/nvme/host/zns.c:124:2-124:38: struct request_queue *q = ns->disk->queue;
-
drivers/nvme/target/passthru.c:233:2-233:34: struct request_queue *q = ctrl->admin_q;
-
drivers/nvme/target/zns.c:390:2-390:47: struct request_queue *q = bdev_get_queue(bdev);
-
drivers/of/fdt.c:1024:2-1024:18: const char *p, *q, *options = NULL;
-
drivers/parport/probe.c:56:2-56:18: char *p = txt, *q;
-
drivers/pcmcia/cistpl.c:663:2-663:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:795:2-795:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:812:2-812:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:824:2-824:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1081:2-1081:14: u_char *p, *q, features;
-
drivers/pcmcia/cistpl.c:1204:2-1204:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1228:2-1228:14: u_char *p, *q;
-
drivers/pcmcia/cistpl.c:1249:2-1249:14: u_char *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:110:2-110:27: struct resource_map *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:134:2-134:27: struct resource_map *p, *q;
-
drivers/pcmcia/rsrc_nonstatic.c:1042:2-1042:27: struct resource_map *p, *q;
-
drivers/platform/chrome/wilco_ec/event.c:108:2-108:25: struct ec_event_queue *q;
-
drivers/platform/surface/aggregator/ssh_packet_layer.c:700:2-700:21: struct ssh_packet *q;
-
drivers/scsi/aacraid/commsup.c:361:2-361:21: struct aac_queue * q;
-
drivers/scsi/aacraid/commsup.c:652:6-652:65: struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/commsup.c:875:2-875:21: struct aac_queue * q;
-
drivers/scsi/aacraid/dpcsup.c:278:3-278:61: struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
-
drivers/scsi/aacraid/rx.c:400:2-400:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/rx.c:423:2-423:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/aacraid/src.c:486:2-486:60: struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
-
drivers/scsi/be2iscsi/be_main.c:3447:2-3447:24: struct be_queue_info *q;
-
drivers/scsi/be2iscsi/be_main.c:3507:2-3507:24: struct be_queue_info *q, *cq;
-
drivers/scsi/be2iscsi/be_main.c:3617:2-3617:24: struct be_queue_info *q;
-
drivers/scsi/bfa/bfa_core.c:1318:2-1318:7: int q;
-
drivers/scsi/bfa/bfa_core.c:1474:2-1474:6: int q, per_reqq_sz, per_rspq_sz;
-
drivers/scsi/csiostor/csio_isr.c:428:4-428:50: struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
-
drivers/scsi/csiostor/csio_wr.c:191:2-191:17: struct csio_q *q, *flq;
-
drivers/scsi/csiostor/csio_wr.c:747:2-747:51: struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:765:2-765:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:789:2-789:17: struct csio_q *q;
-
drivers/scsi/csiostor/csio_wr.c:867:2-867:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:985:2-985:36: struct csio_q *q = wrm->q_arr[qidx];
-
drivers/scsi/csiostor/csio_wr.c:1691:2-1691:17: struct csio_q *q;
-
drivers/scsi/elx/efct/efct_hw_queues.c:406:2-406:15: struct hw_q *q;
-
drivers/scsi/elx/libefc_sli/sli4.c:4121:2-4121:18: enum sli4_qtype q;
-
drivers/scsi/esas2r/esas2r_flash.c:331:2-331:10: u8 *p, *q;
-
drivers/scsi/fnic/fnic_scsi.c:2193:2-2193:32: struct request_queue *q = rq->q;
-
drivers/scsi/hpsa.c:7019:2-7019:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7040:2-7040:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7056:2-7056:17: u8 q = *(u8 *) queue;
-
drivers/scsi/hpsa.c:7075:2-7075:17: u8 q = *(u8 *) queue;
-
drivers/scsi/ips.c:2531:2-2531:20: struct scsi_cmnd *q;
-
drivers/scsi/qla2xxx/qla_dbg.c:557:2-557:31: struct qla2xxx_mqueue_chain *q;
-
drivers/scsi/qla2xxx/qla_dbg.c:604:2-604:31: struct qla2xxx_mqueue_chain *q;
-
drivers/scsi/qla2xxx/qla_edif.c:1583:2-1583:23: struct enode *node, *q;
-
drivers/scsi/qla2xxx/qla_edif.c:1686:2-1686:29: struct enode *list_node, *q;
-
drivers/scsi/qla2xxx/qla_edif.c:1883:2-1883:26: struct edb_node *node, *q;
-
drivers/scsi/qla2xxx/qla_init.c:5310:2-5310:10: __be32 *q;
-
drivers/scsi/qla2xxx/qla_os.c:5043:2-5043:11: bool q = false;
-
drivers/scsi/qla2xxx/qla_os.c:7391:3-7391:12: bool q = false;
-
drivers/scsi/scsi_ioctl.c:879:2-879:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_lib.c:467:2-467:24: struct request_queue *q;
-
drivers/scsi/scsi_lib.c:541:2-541:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_lib.c:689:2-689:41: struct request_queue *q = cmd->device->request_queue;
-
drivers/scsi/scsi_lib.c:952:2-952:41: struct request_queue *q = cmd->device->request_queue;
-
drivers/scsi/scsi_lib.c:1700:2-1700:33: struct request_queue *q = req->q;
-
drivers/scsi/scsi_lib.c:2612:2-2612:34: struct request_queue *q = sdev->request_queue;
-
drivers/scsi/scsi_scan.c:283:2-283:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_fc.c:4162:2-4162:35: struct request_queue *q = rport->rqst_q;
-
drivers/scsi/scsi_transport_fc.c:4275:2-4275:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_fc.c:4310:2-4310:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_iscsi.c:1537:2-1537:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_sas.c:192:2-192:24: struct request_queue *q;
-
drivers/scsi/scsi_transport_sas.c:246:2-246:54: struct request_queue *q = to_sas_host_attrs(shost)->q;
-
drivers/scsi/sd.c:784:2-784:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:963:2-963:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:2913:2-2913:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd.c:3213:2-3213:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd_zbc.c:199:2-199:40: struct request_queue *q = sdkp->disk->queue;
-
drivers/scsi/sd_zbc.c:823:2-823:34: struct request_queue *q = disk->queue;
-
drivers/scsi/sd_zbc.c:909:2-909:34: struct request_queue *q = disk->queue;
-
drivers/scsi/sg.c:287:2-287:24: struct request_queue *q;
-
drivers/scsi/sg.c:1437:2-1437:36: struct request_queue *q = scsidp->request_queue;
-
drivers/scsi/sg.c:1731:2-1731:51: struct request_queue *q = sfp->parentdp->device->request_queue;
-
drivers/scsi/sym53c8xx_2/sym_malloc.c:97:2-97:11: m_link_p q;
-
drivers/spi/spi-fsl-qspi.c:342:2-342:23: struct fsl_qspi *q = dev_id;
-
drivers/spi/spi-fsl-qspi.c:371:2-371:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:644:2-644:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:706:2-706:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:812:2-812:66: struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
-
drivers/spi/spi-fsl-qspi.c:849:2-849:19: struct fsl_qspi *q;
-
drivers/spi/spi-fsl-qspi.c:954:2-954:48: struct fsl_qspi *q = platform_get_drvdata(pdev);
-
drivers/spi/spi-fsl-qspi.c:974:2-974:42: struct fsl_qspi *q = dev_get_drvdata(dev);
-
drivers/spi/spi-pxa2xx.c:817:2-817:16: unsigned long q, q1, q2;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:212:2-212:22: ia_css_queue_t *q = NULL;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:331:2-331:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:359:2-359:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:389:2-389:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:409:2-409:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:431:2-431:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:451:2-451:18: ia_css_queue_t *q;
-
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c:470:2-470:18: ia_css_queue_t *q;
-
drivers/staging/media/hantro/hantro_drv.c:48:2-48:59: struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
-
drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c:84:2-84:39: struct v4l2_ctrl_mpeg2_quantisation *q;
-
drivers/staging/media/hantro/hantro_g1_vp8_dec.c:179:2-179:48: const struct v4l2_vp8_quantization *q = &hdr->quant;
-
drivers/staging/media/hantro/rockchip_vpu2_hw_mpeg2_dec.c:86:2-86:39: struct v4l2_ctrl_mpeg2_quantisation *q;
-
drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c:317:2-317:48: const struct v4l2_vp8_quantization *q = &hdr->quant;
-
drivers/staging/media/ipu3/ipu3-css.c:1062:2-1063:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1076:2-1077:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1114:2-1115:19: struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
-
drivers/staging/media/ipu3/ipu3-css.c:1357:2-1357:6: int q, r, pipe;
-
drivers/staging/media/ipu3/ipu3-css.c:1394:2-1394:6: int q;
-
drivers/staging/media/ipu3/ipu3-css.c:1425:2-1425:18: unsigned int p, q, i;
-
drivers/staging/media/ipu3/ipu3-css.c:1469:2-1469:18: unsigned int p, q, i, abi_buf_num;
-
drivers/staging/media/ipu3/ipu3-css.c:1506:2-1506:9: int r, q, pipe;
-
drivers/staging/media/ipu3/ipu3-css.c:1702:2-1702:25: struct imgu_css_queue *q;
-
drivers/staging/media/omap4iss/iss_video.c:1101:2-1101:20: struct vb2_queue *q;
-
drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c:1848:2-1848:20: struct vb2_queue *q;
-
drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c:1300:2-1300:20: struct list_head *q, *buf_head;
-
drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c:1350:2-1350:20: struct list_head *q, *buf_head;
-
drivers/target/target_core_device.c:738:3-738:27: struct se_device_queue *q;
-
drivers/target/target_core_iblock.c:82:2-82:24: struct request_queue *q;
-
drivers/target/target_core_iblock.c:828:2-828:45: struct request_queue *q = bdev_get_queue(bd);
-
drivers/target/target_core_pscsi.c:288:2-288:32: struct request_queue *q = sd->request_queue;
-
drivers/thunderbolt/quirks.c:53:3-53:42: const struct tb_quirk *q = &tb_quirks[i];
-
drivers/tty/vt/consolemap.c:203:2-203:17: unsigned char *q;
-
drivers/tty/vt/consolemap.c:228:2-228:7: u16 *q;
-
drivers/tty/vt/consolemap.c:300:2-300:30: struct uni_pagedir *p, *q = NULL;
-
drivers/tty/vt/consolemap.c:437:2-437:22: struct uni_pagedir *q;
-
drivers/tty/vt/consolemap.c:506:2-506:26: struct uni_pagedir *p, *q;
-
drivers/tty/vt/consolemap.c:539:2-539:26: struct uni_pagedir *p, *q;
-
drivers/tty/vt/consolemap.c:662:2-662:7: u16 *q;
-
drivers/tty/vt/consolemap.c:717:2-717:22: struct uni_pagedir *q;
-
drivers/tty/vt/vt.c:662:3-662:12: u16 *q = p;
-
drivers/tty/vt/vt.c:767:3-767:12: u16 *q = p;
-
drivers/ufs/core/ufs_bsg.c:205:2-205:24: struct request_queue *q;
-
drivers/ufs/core/ufshcd.c:5037:2-5037:34: struct request_queue *q = sdev->request_queue;
-
drivers/ufs/core/ufshcd.c:6120:2-6120:24: struct request_queue *q;
-
drivers/ufs/core/ufshcd.c:6680:2-6680:33: struct request_queue *q = hba->tmf_queue;
-
drivers/ufs/core/ufshpb.c:687:2-687:24: struct request_queue *q;
-
drivers/usb/core/devio.c:678:2-678:24: struct list_head *p, *q, hitlist;
-
drivers/usb/host/ehci-sched.c:2358:2-2358:20: union ehci_shadow q, *q_p;
-
drivers/usb/host/fotg210-hcd.c:3339:2-3339:51: union fotg210_shadow *q = &fotg210->pshadow[frame];
-
drivers/usb/host/fotg210-hcd.c:4583:2-4583:23: union fotg210_shadow q, *q_p;
-
drivers/usb/host/oxu210hp-hcd.c:2270:2-2270:44: union ehci_shadow *q = &oxu->pshadow[frame];
-
drivers/usb/host/oxu210hp-hcd.c:2692:3-2692:21: union ehci_shadow q, *q_p;
-
drivers/vhost/scsi.c:565:4-565:33: struct vhost_scsi_virtqueue *q;
-
drivers/video/fbdev/aty/mach64_ct.c:209:2-209:6: u32 q;
-
drivers/video/fbdev/aty/mach64_ct.c:405:2-405:6: u32 q, memcntl, trp;
-
drivers/video/fbdev/hgafb.c:282:2-282:20: void __iomem *p, *q;
-
drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c:416:2-416:20: unsigned itc, ec, q, sc;
-
drivers/xen/events/events_fifo.c:105:2-105:33: struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
-
drivers/xen/events/events_fifo.c:279:2-279:33: struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
-
drivers/xen/events/events_fifo.c:324:2-324:11: unsigned q;
-
drivers/xen/gntdev-dmabuf.c:678:2-678:24: struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
-
drivers/xen/gntdev-dmabuf.c:723:2-723:24: struct gntdev_dmabuf *q, *gntdev_dmabuf;
-
fs/afs/addr_list.c:136:3-136:15: const char *q, *stop;
-
fs/autofs/expire.c:101:2-101:17: struct dentry *q;
-
fs/ceph/caps.c:898:5-898:21: struct rb_node *q;
-
fs/cifs/smb2ops.c:709:2-709:33: struct cached_dirent *dirent, *q;
-
fs/configfs/dir.c:1612:2-1612:37: struct list_head *p, *q = &cursor->s_sibling;
-
fs/dcache.c:1907:2-1907:14: struct qstr q;
-
fs/efivarfs/super.c:89:2-89:14: struct qstr q;
-
fs/erofs/zdata.c:1175:2-1175:34: struct z_erofs_decompressqueue *q;
-
fs/erofs/zdata.c:1238:2-1238:38: struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
-
fs/erofs/zdata.c:1270:2-1270:48: struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
-
fs/ext4/namei.c:781:2-781:38: struct dx_entry *at, *entries, *p, *q, *m;
-
fs/ext4/namei.c:1305:2-1305:27: struct dx_map_entry *p, *q, *top = map + count - 1;
-
fs/f2fs/checkpoint.c:1796:2-1796:32: wait_queue_head_t *q = &cprc->ckpt_wait_queue;
-
fs/f2fs/segment.c:513:2-513:31: wait_queue_head_t *q = &fcc->flush_wait_queue;
-
fs/f2fs/segment.c:1658:2-1658:31: wait_queue_head_t *q = &dcc->discard_wait_queue;
-
fs/fs_context.c:409:3-411:31: char *q = kasprintf(GFP_KERNEL, "%c %s%s%pV\n", level,
-
fs/fs_pin.c:88:3-88:22: struct hlist_node *q;
-
fs/gfs2/quota.c:835:2-835:20: struct gfs2_quota q;
-
fs/gfs2/quota.c:989:2-989:20: struct gfs2_quota q;
-
fs/hpfs/alloc.c:122:2-122:14: unsigned i, q;
-
fs/hpfs/ea.c:289:4-289:44: secno q = hpfs_alloc_sector(s, fno, 1, 0);
-
fs/inode.c:2296:2-2296:2: DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
fs/jffs2/compr_rubin.c:202:2-202:35: unsigned long p = rs->p, q = rs->q;
-
fs/namespace.c:1878:2-1878:26: struct mount *res, *p, *q, *r, *parent;
-
fs/namespace.c:2252:3-2252:17: struct mount *q;
-
fs/namespace.c:3457:2-3457:20: struct mount *p, *q;
-
fs/nfs/nfs4proc.c:7448:2-7448:31: wait_queue_head_t *q = &clp->cl_lock_waitq;
-
fs/proc/base.c:503:4-503:8: int q;
-
fs/proc/bootconfig.c:31:2-31:7: char q;
-
fs/ufs/inode.c:131:2-131:26: Indirect chain[4], *q = chain;
-
fs/xfs/xfs_dquot.c:73:2-73:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_dquot.c:183:2-183:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_qm_syscalls.c:279:2-279:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_quotaops.c:60:2-60:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
fs/xfs/xfs_trans_dquot.c:626:2-626:32: struct xfs_quotainfo *q = mp->m_quotainfo;
-
include/linux/blkdev.h:1287:2-1287:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1318:2-1318:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1328:2-1328:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1338:2-1338:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1347:2-1347:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/linux/blkdev.h:1356:2-1356:47: struct request_queue *q = bdev_get_queue(bdev);
-
include/net/pkt_cls.h:169:2-169:38: struct Qdisc *q = tp->chain->block->q;
-
include/net/pkt_cls.h:193:2-193:38: struct Qdisc *q = tp->chain->block->q;
-
include/net/sch_generic.h:539:2-539:20: struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
-
include/net/sch_generic.h:758:3-758:27: const struct Qdisc *q = rcu_dereference(txq->qdisc);
-
init/initramfs.c:93:2-93:20: struct hash **p, *q;
-
init/initramfs.c:120:2-120:20: struct hash **p, *q;
-
ipc/sem.c:285:2-285:20: struct sem_queue *q, *tq;
-
ipc/sem.c:857:2-857:20: struct sem_queue *q, *tmp;
-
ipc/sem.c:951:2-951:20: struct sem_queue *q, *tmp;
-
ipc/sem.c:1110:2-1110:20: struct sem_queue *q;
-
ipc/sem.c:1146:2-1146:20: struct sem_queue *q, *tq;
-
kernel/audit_tree.c:611:2-611:24: struct list_head *p, *q;
-
kernel/auditsc.c:271:2-271:26: struct audit_tree_refs *q;
-
kernel/auditsc.c:299:2-299:30: struct audit_tree_refs *p, *q;
-
kernel/bpf/cpumap.c:696:2-696:19: struct ptr_ring *q;
-
kernel/cgroup/pids.c:146:2-146:26: struct pids_cgroup *p, *q;
-
kernel/crash_core.c:200:3-200:9: char *q;
-
kernel/events/uprobes.c:318:2-318:26: struct list_head *pos, *q;
-
kernel/events/uprobes.c:1324:2-1324:26: struct list_head *pos, *q;
-
kernel/futex/pi.c:936:2-936:21: struct futex_q q = futex_q_init;
-
kernel/futex/requeue.c:770:2-770:21: struct futex_q q = futex_q_init;
-
kernel/futex/waitwake.c:437:3-437:30: struct futex_q *q = &vs[i].q;
-
kernel/futex/waitwake.c:637:2-637:21: struct futex_q q = futex_q_init;
-
kernel/latencytop.c:123:3-123:7: int q, same = 1;
-
kernel/latencytop.c:180:2-180:9: int i, q;
-
kernel/latencytop.c:253:4-253:8: int q;
-
kernel/ptrace.c:734:2-734:19: struct sigqueue *q;
-
kernel/signal.c:415:2-415:23: struct sigqueue *q = NULL;
-
kernel/signal.c:463:2-463:19: struct sigqueue *q;
-
kernel/signal.c:492:2-492:19: struct sigqueue *q, *n;
-
kernel/signal.c:571:2-571:19: struct sigqueue *q, *first = NULL;
-
kernel/signal.c:714:2-714:19: struct sigqueue *q, *sync = NULL;
-
kernel/signal.c:788:2-788:19: struct sigqueue *q, *n;
-
kernel/signal.c:1082:2-1082:19: struct sigqueue *q;
-
kernel/trace/blktrace.c:727:2-727:24: struct request_queue *q;
-
kernel/trace/blktrace.c:979:2-979:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
kernel/trace/blktrace.c:1009:2-1009:51: struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-
kernel/trace/blktrace.c:1776:2-1776:47: struct request_queue *q = bdev_get_queue(bdev);
-
kernel/trace/blktrace.c:1810:2-1810:47: struct request_queue *q = bdev_get_queue(bdev);
-
kernel/trace/trace.c:3919:2-3919:8: char *q;
-
kernel/trace/trace_boot.c:564:2-564:8: char *q;
-
kernel/trace/trace_events_filter.c:1259:2-1259:7: char q;
-
kernel/trace/trace_events_filter.c:1374:3-1374:17: char q = str[i];
-
kernel/trace/trace_events_inject.c:105:3-105:17: char q = str[i];
-
kernel/watch_queue.c:290:2-290:28: struct watch_type_filter *q;
-
lib/bch.c:909:2-909:37: struct gf_poly *q = bch->poly_2t[1];
-
lib/bootconfig.c:851:2-851:12: char *p, *q;
-
lib/crc32.c:82:2-82:6: u32 q;
-
lib/crypto/curve25519-hacl64.c:36:2-36:20: u64 q = x_xor_y | x_sub_y_xor_y;
-
lib/crypto/curve25519-hacl64.c:766:2-766:7: u64 *q;
-
lib/mpi/mpih-div.c:248:5-248:16: mpi_limb_t q;
-
lib/mpi/mpih-div.c:315:5-315:16: mpi_limb_t q;
-
lib/raid6/avx2.c:37:2-37:10: u8 *p, *q;
-
lib/raid6/avx2.c:86:2-86:10: u8 *p, *q;
-
lib/raid6/avx2.c:144:2-144:10: u8 *p, *q;
-
lib/raid6/avx2.c:196:2-196:10: u8 *p, *q;
-
lib/raid6/avx2.c:276:2-276:10: u8 *p, *q;
-
lib/raid6/avx2.c:357:2-357:10: u8 *p, *q;
-
lib/raid6/avx512.c:47:2-47:10: u8 *p, *q;
-
lib/raid6/avx512.c:105:2-105:10: u8 *p, *q;
-
lib/raid6/avx512.c:174:2-174:10: u8 *p, *q;
-
lib/raid6/avx512.c:237:2-237:10: u8 *p, *q;
-
lib/raid6/avx512.c:333:2-333:10: u8 *p, *q;
-
lib/raid6/avx512.c:427:2-427:10: u8 *p, *q;
-
lib/raid6/recov.c:23:2-23:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov.c:67:2-67:10: u8 *p, *q, *dq;
-
lib/raid6/recov_avx2.c:19:2-19:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_avx2.c:189:2-189:10: u8 *p, *q, *dq;
-
lib/raid6/recov_avx512.c:27:2-27:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_avx512.c:230:2-230:10: u8 *p, *q, *dq;
-
lib/raid6/recov_ssse3.c:19:2-19:10: u8 *p, *q, *dp, *dq;
-
lib/raid6/recov_ssse3.c:194:2-194:10: u8 *p, *q, *dq;
-
lib/raid6/sse2.c:39:2-39:10: u8 *p, *q;
-
lib/raid6/sse2.c:91:2-91:10: u8 *p, *q;
-
lib/raid6/sse2.c:149:2-149:10: u8 *p, *q;
-
lib/raid6/sse2.c:202:2-202:10: u8 *p, *q;
-
lib/raid6/sse2.c:281:2-281:10: u8 *p, *q;
-
lib/raid6/sse2.c:368:2-368:10: u8 *p, *q;
-
lib/reed_solomon/decode_rs.c:23:2-23:14: uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
-
lib/string_helpers.c:136:2-136:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:164:2-164:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:183:2-183:24: char *p = *dst, *q = *src;
-
lib/string_helpers.c:207:2-207:24: char *p = *dst, *q = *src;
-
lib/test_hexdump.c:99:3-99:26: const char *q = *result++;
-
lib/ts_kmp.c:45:2-45:22: unsigned int i, q = 0, text_len, consumed = state->offset;
-
lib/ts_kmp.c:77:2-77:18: unsigned int k, q;
-
lib/vsprintf.c:221:2-221:11: unsigned q;
-
lib/vsprintf.c:263:2-263:11: unsigned q;
-
mm/filemap.c:1155:2-1155:46: wait_queue_head_t *q = folio_waitqueue(folio);
-
mm/filemap.c:1245:2-1245:46: wait_queue_head_t *q = folio_waitqueue(folio);
-
mm/filemap.c:1409:2-1409:21: wait_queue_head_t *q;
-
mm/filemap.c:1510:2-1510:46: wait_queue_head_t *q = folio_waitqueue(folio);
-
mm/filemap.c:1704:2-1704:51: struct wait_queue_head *q = folio_waitqueue(folio);
-
mm/kasan/quarantine.c:196:2-196:21: struct qlist_head *q;
-
mm/kasan/quarantine.c:347:2-347:21: struct qlist_head *q;
-
mm/kasan/quarantine.c:415:2-415:21: struct qlist_head *q;
-
mm/z3fold.c:690:3-690:9: void *q;
-
net/atm/lec.c:870:2-870:6: int q;
-
net/bluetooth/hci_core.c:3168:3-3168:12: int cnt, q;
-
net/bluetooth/hci_core.c:3223:2-3223:11: int cnt, q, conn_num = 0;
-
net/core/dev.c:2332:3-2332:40: int q = netdev_get_prio_tc_map(dev, i);
-
net/core/dev.c:3102:3-3102:21: struct Qdisc *q = rcu_dereference(txq->qdisc);
-
net/core/dev.c:3113:3-3113:17: struct Qdisc *q;
-
net/core/dev.c:4166:2-4166:16: struct Qdisc *q;
-
net/core/dev.c:5053:4-5053:22: struct Qdisc *q = head;
-
net/core/pktgen.c:3376:2-3376:20: struct list_head *q, *n;
-
net/core/pktgen.c:3398:2-3398:20: struct list_head *q, *n;
-
net/core/pktgen.c:3879:2-3879:20: struct list_head *q, *n;
-
net/core/pktgen.c:3975:2-3975:20: struct list_head *q, *n;
-
net/core/skbuff.c:1277:2-1277:23: struct sk_buff_head *q;
-
net/core/skbuff.c:4684:2-4684:32: struct sk_buff_head *q = &sk->sk_error_queue;
-
net/ieee802154/6lowpan/reassembly.c:70:2-70:26: struct inet_frag_queue *q;
-
net/ipv4/af_inet.c:1926:2-1926:23: struct inet_protosw *q;
-
net/ipv4/inet_fragment.c:254:2-254:30: struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
-
net/ipv4/inet_fragment.c:310:2-310:26: struct inet_frag_queue *q;
-
net/ipv4/inet_fragment.c:332:2-332:26: struct inet_frag_queue *q;
-
net/ipv4/ip_fragment.c:215:2-215:26: struct inet_frag_queue *q;
-
net/ipv4/tcp_fastopen.c:62:2-62:25: struct fastopen_queue *q;
-
net/ipv4/tcp_output.c:1049:2-1049:20: struct list_head *q, *n;
-
net/ipv6/mcast.c:1512:2-1512:22: struct sk_buff_head q;
-
net/ipv6/mcast.c:1615:2-1615:22: struct sk_buff_head q;
-
net/ipv6/netfilter/nf_conntrack_reasm.c:156:2-156:26: struct inet_frag_queue *q;
-
net/ipv6/reassembly.c:93:2-93:26: struct inet_frag_queue *q;
-
net/mac80211/debugfs.c:578:2-578:6: int q, res = 0;
-
net/mac80211/ethtool.c:79:2-79:9: int i, q;
-
net/mac80211/mlme.c:1865:2-1865:6: int q;
-
net/mac80211/rx.c:2854:2-2854:10: u16 ac, q, hdrlen;
-
net/mac80211/tx.c:1659:3-1659:17: int q = info->hw_queue;
-
net/mac80211/tx.c:4400:2-4400:16: int q = info->hw_queue;
-
net/netfilter/nfnetlink_queue.c:790:2-790:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:932:2-932:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:965:2-965:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:992:2-992:53: struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
-
net/netfilter/nfnetlink_queue.c:1079:2-1079:56: struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
-
net/netfilter/nfnetlink_queue.c:1190:2-1190:56: struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
-
net/netfilter/nfnetlink_queue.c:1275:2-1275:56: struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
-
net/netfilter/nfnetlink_queue.c:1439:2-1439:25: struct nfnl_queue_net *q;
-
net/netfilter/nfnetlink_queue.c:1460:3-1460:26: struct nfnl_queue_net *q;
-
net/netfilter/nfnetlink_queue.c:1525:2-1525:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/nfnetlink_queue.c:1542:2-1542:50: struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-
net/netfilter/xt_quota.c:29:2-29:41: struct xt_quota_info *q = (void *)par->matchinfo;
-
net/netfilter/xt_quota.c:48:2-48:33: struct xt_quota_info *q = par->matchinfo;
-
net/netfilter/xt_quota.c:64:2-64:39: const struct xt_quota_info *q = par->matchinfo;
-
net/rds/message.c:96:2-96:30: struct rds_msg_zcopy_queue *q;
-
net/rds/recv.c:600:2-600:39: struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
-
net/rose/rose_in.c:266:2-266:37: int queued = 0, frametype, ns, nr, q, d, m;
-
net/rxrpc/rxkad.c:997:2-997:10: u8 *p, *q, *name, *end;
-
net/sched/cls_api.c:1967:2-1967:16: struct Qdisc *q;
-
net/sched/cls_api.c:2200:2-2200:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2360:2-2360:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2563:2-2563:20: struct Qdisc *q = NULL;
-
net/sched/cls_api.c:2822:2-2822:16: struct Qdisc *q;
-
net/sched/cls_api.c:2954:2-2954:20: struct Qdisc *q = NULL;
-
net/sched/cls_flow.c:503:4-503:50: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/cls_fw.c:75:3-75:49: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/cls_tcindex.c:114:3-114:49: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/sch_api.c:132:2-132:20: struct Qdisc_ops *q, **qp;
-
net/sched/sch_api.c:176:2-176:20: struct Qdisc_ops *q, **qp;
-
net/sched/sch_api.c:203:2-203:24: struct Qdisc_ops *q = NULL;
-
net/sched/sch_api.c:261:2-261:16: struct Qdisc *q;
-
net/sched/sch_api.c:300:2-300:16: struct Qdisc *q;
-
net/sched/sch_api.c:319:2-319:16: struct Qdisc *q;
-
net/sched/sch_api.c:352:2-352:24: struct Qdisc_ops *q = NULL;
-
net/sched/sch_api.c:1042:2-1042:20: struct Qdisc *q = old;
-
net/sched/sch_api.c:1423:2-1423:20: struct Qdisc *q = NULL;
-
net/sched/sch_api.c:1508:2-1508:16: struct Qdisc *q, *p;
-
net/sched/sch_api.c:1687:2-1687:16: struct Qdisc *q;
-
net/sched/sch_api.c:1908:3-1908:49: struct Qdisc *q = tcf_block_q(tp->chain->block);
-
net/sched/sch_api.c:1986:2-1986:20: struct Qdisc *q = NULL;
-
net/sched/sch_api.c:2165:2-2165:16: struct Qdisc *q;
-
net/sched/sch_cake.c:1503:2-1503:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1617:2-1617:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1657:2-1657:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1697:2-1697:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1912:2-1912:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1936:2-1936:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:1947:2-1947:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2294:2-2294:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2314:2-2314:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2403:2-2403:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2447:2-2447:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2484:2-2484:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2513:2-2513:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2568:2-2568:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2692:2-2692:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2702:2-2702:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2773:2-2773:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2848:2-2848:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2967:2-2967:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:2984:2-2984:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cake.c:3057:2-3057:44: struct cake_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:207:2-207:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:293:2-293:49: struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
net/sched/sch_cbq.c:317:2-317:51: struct cbq_sched_data *q = qdisc_priv(this->qdisc);
-
net/sched/sch_cbq.c:362:2-362:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:396:2-396:49: struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
net/sched/sch_cbq.c:481:2-481:29: struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
-
net/sched/sch_cbq.c:641:2-641:49: struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
net/sched/sch_cbq.c:680:2-680:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:783:2-783:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:802:2-802:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:912:2-912:49: struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
net/sched/sch_cbq.c:980:2-980:51: struct cbq_sched_data *q = qdisc_priv(this->qdisc);
-
net/sched/sch_cbq.c:1007:2-1007:51: struct cbq_sched_data *q = qdisc_priv(this->qdisc);
-
net/sched/sch_cbq.c:1027:2-1027:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1096:2-1096:49: struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
net/sched/sch_cbq.c:1159:2-1159:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1321:2-1321:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1339:2-1339:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1375:2-1375:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1426:2-1426:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1433:2-1433:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1447:2-1447:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1479:2-1479:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1678:2-1678:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1717:2-1717:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1729:2-1729:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbq.c:1751:2-1751:43: struct cbq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:108:2-108:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:117:2-117:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:134:2-134:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:178:2-178:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:233:2-233:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:241:2-241:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:336:2-336:25: struct cbs_sched_data *q;
-
net/sched/sch_cbs.c:364:2-364:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:404:2-404:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:435:2-435:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:454:2-454:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:481:2-481:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:495:2-495:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_cbs.c:510:2-510:43: struct cbs_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:117:2-117:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:216:2-216:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:286:2-286:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:307:2-307:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:341:2-341:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:436:2-436:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:464:2-464:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:478:2-478:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_choke.c:485:2-485:45: struct choke_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:91:2-91:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:114:2-114:27: struct codel_sched_data *q;
-
net/sched/sch_codel.c:136:2-136:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:192:2-192:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:218:2-218:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:247:2-247:51: const struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_codel.c:273:2-273:45: struct codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:41:2-41:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:58:2-58:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:150:2-150:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:175:2-175:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:278:2-278:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:303:2-303:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:342:2-342:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:377:2-377:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:418:2-418:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:433:2-433:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_drr.c:450:2-450:38: struct drr_sched *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:77:2-77:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:110:2-110:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:122:2-122:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:165:2-165:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:203:2-203:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:233:2-233:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:255:2-255:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:349:2-349:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:422:2-422:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:438:2-438:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:456:2-456:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_etf.c:468:2-468:43: struct etf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:92:2-92:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:99:2-99:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:108:2-108:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:202:2-202:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:277:2-277:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:287:2-287:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:301:2-301:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:337:2-337:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:360:2-360:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:383:2-383:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:421:2-421:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:465:2-465:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:588:2-588:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:702:2-702:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:721:2-721:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:736:2-736:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_ets.c:747:2-747:38: struct ets_sched *q = qdisc_priv(sch);
-
net/sched/sch_fifo.c:256:2-256:16: struct Qdisc *q;
-
net/sched/sch_fq.c:445:2-445:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:528:2-528:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:664:2-664:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:749:2-749:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:805:2-805:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:919:2-919:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:929:2-929:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:968:2-968:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq.c:1009:2-1009:42: struct fq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:79:2-79:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:140:2-140:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:187:2-187:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:258:2-258:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:283:2-283:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:337:2-337:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:372:2-372:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:457:2-457:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:467:2-467:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:529:2-529:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:572:2-572:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:620:2-620:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:637:2-637:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_codel.c:683:2-683:48: struct fq_codel_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:82:2-82:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:131:2-131:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:230:2-230:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:280:2-280:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:374:2-374:32: struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_fq_pie.c:396:2-396:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:449:2-449:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:484:2-484:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:509:2-509:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_fq_pie.c:531:2-531:46: struct fq_pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_generic.c:729:2-729:44: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:755:3-755:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:794:3-794:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:808:3-808:45: struct skb_array *q = band2list(priv, band);
-
net/sched/sch_generic.c:823:4-823:29: struct gnet_stats_queue *q;
-
net/sched/sch_generic.c:857:3-857:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:876:3-876:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:898:3-898:45: struct skb_array *q = band2list(priv, prio);
-
net/sched/sch_generic.c:1048:2-1048:20: struct Qdisc *q = container_of(head, struct Qdisc, rcu);
-
net/sched/sch_generic.c:1290:3-1290:17: struct Qdisc *q;
-
net/sched/sch_gred.c:99:3-99:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:168:2-168:30: struct gred_sched_data *q = NULL;
-
net/sched/sch_gred.c:269:3-269:27: struct gred_sched_data *q;
-
net/sched/sch_gred.c:301:3-301:39: struct gred_sched_data *q = t->tab[i];
-
net/sched/sch_gred.c:334:4-334:44: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:486:2-486:43: struct gred_sched_data *q = table->tab[dp];
-
net/sched/sch_gred.c:792:3-792:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:808:3-808:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_gred.c:859:3-859:43: struct gred_sched_data *q = table->tab[i];
-
net/sched/sch_hfsc.c:866:2-866:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:917:2-917:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1081:2-1081:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1094:2-1094:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1117:2-1117:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1243:2-1243:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1342:2-1342:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1368:2-1368:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1387:2-1387:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1430:2-1430:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1477:2-1477:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1494:2-1494:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1517:2-1517:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hfsc.c:1581:2-1581:39: struct hfsc_sched *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:249:2-249:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:351:2-351:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:374:2-374:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:420:2-420:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:474:2-474:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:511:2-511:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:580:2-580:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:656:2-656:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_hhf.c:682:2-682:43: struct hhf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:189:2-189:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:219:2-219:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:617:2-617:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:934:2-934:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:993:2-993:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1030:2-1030:24: struct htb_sched *q = container_of(work, struct htb_sched, work);
-
net/sched/sch_htb.c:1055:2-1055:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1178:2-1178:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1217:2-1217:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1227:2-1227:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1267:2-1267:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1346:2-1346:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1388:2-1388:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1469:2-1469:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1529:2-1529:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1568:2-1568:29: struct Qdisc *q = cl->leaf.q;
-
net/sched/sch_htb.c:1635:2-1635:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1713:2-1713:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:1783:2-1783:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:2100:2-2100:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_htb.c:2135:2-2135:38: struct htb_sched *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:50:2-50:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:64:2-64:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:71:2-71:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:79:2-79:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:102:2-102:47: struct ingress_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:175:2-175:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:189:2-189:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:196:2-196:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:203:2-203:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:210:2-210:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:218:2-218:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_ingress.c:249:2-249:46: struct clsact_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_mqprio.c:517:4-517:55: struct netdev_queue *q = netdev_get_tx_queue(dev, i);
-
net/sched/sch_multiq.c:32:2-32:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:89:2-89:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:120:2-120:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:151:2-151:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:163:2-163:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:175:2-175:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:239:2-239:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:264:2-264:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:284:2-284:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:297:2-297:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:305:2-305:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:327:2-327:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:337:2-337:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:350:2-350:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_multiq.c:372:2-372:46: struct multiq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:362:2-362:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:380:2-380:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:437:2-437:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:678:2-678:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:757:2-757:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:957:2-957:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1062:2-1062:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1079:2-1079:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1140:2-1140:51: const struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1223:2-1223:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1237:2-1237:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_netem.c:1245:2-1245:45: struct netem_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:88:2-88:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:141:2-141:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:425:2-425:29: struct pie_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_pie.c:441:2-441:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:463:2-463:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:494:2-494:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:519:2-519:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:531:2-531:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_pie.c:539:2-539:43: struct pie_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:90:2-90:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:103:2-103:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:125:2-125:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_plug.c:161:2-161:44: struct plug_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:33:2-33:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:99:2-99:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:113:2-113:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:134:2-134:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:170:2-170:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:181:2-181:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:234:2-234:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:266:2-266:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:291:2-291:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:321:2-321:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:329:2-329:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:350:2-350:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:360:2-360:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:374:2-374:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_prio.c:396:2-396:44: struct prio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:208:2-208:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:377:2-377:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:396:2-396:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:523:2-523:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:534:2-534:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:559:2-559:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:653:2-653:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:678:2-678:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1078:2-1078:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1200:2-1200:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1402:2-1402:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1411:2-1411:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1449:2-1449:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_qfq.c:1467:2-1467:38: struct qfq_sched *q = qdisc_priv(sch);
-
net/sched/sch_red.c:73:2-73:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:151:2-151:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:168:2-168:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:176:2-176:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:186:2-186:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:215:2-215:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:238:2-238:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:322:2-322:29: struct red_sched_data *q = from_timer(q, t, adapt_timer);
-
net/sched/sch_red.c:335:2-335:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:369:2-369:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:411:2-411:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:448:2-448:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:475:2-475:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:500:2-500:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_red.c:513:2-513:43: struct red_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:283:2-283:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:425:2-425:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:443:2-443:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:453:2-453:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:466:2-466:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:491:2-491:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:554:2-554:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:567:2-567:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:596:2-596:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:620:2-620:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:631:2-631:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfb.c:673:2-673:43: struct sfb_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:166:2-166:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:295:2-295:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:348:2-348:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:482:2-482:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:537:2-537:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:607:2-607:29: struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
-
net/sched/sch_sfq.c:625:2-625:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:721:2-721:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:734:2-734:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:790:2-790:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:848:2-848:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:865:2-865:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_sfq.c:884:2-884:43: struct sfq_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:72:2-72:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:141:2-141:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:182:2-182:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:213:2-213:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:229:2-229:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_skbprio.c:256:2-256:47: struct skbprio_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:198:2-198:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:265:2-265:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:347:2-347:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:418:2-418:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:439:2-439:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:495:2-495:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:546:2-546:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:560:2-560:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:655:2-655:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:708:2-708:27: struct taprio_sched *q = container_of(timer, struct taprio_sched,
-
net/sched/sch_taprio.c:1001:2-1001:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1053:2-1053:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1101:2-1101:23: struct taprio_sched *q;
-
net/sched/sch_taprio.c:1332:2-1332:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1459:2-1459:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1628:2-1628:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1644:2-1644:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1679:2-1679:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1744:2-1744:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1787:2-1787:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_taprio.c:1881:2-1881:41: struct taprio_sched *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:143:2-143:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:207:2-207:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:241:2-241:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:270:2-270:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:330:2-330:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:355:2-355:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:480:2-480:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:495:2-495:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:504:2-504:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:547:2-547:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:558:2-558:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_tbf.c:571:2-571:43: struct tbf_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:79:2-79:44: struct teql_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:95:2-95:16: struct Qdisc *q;
-
net/sched/sch_teql.c:133:2-133:16: struct Qdisc *q, *prev;
-
net/sched/sch_teql.c:174:2-174:44: struct teql_sched_data *q = qdisc_priv(sch);
-
net/sched/sch_teql.c:281:2-281:24: struct Qdisc *start, *q;
-
net/sched/sch_teql.c:358:2-358:16: struct Qdisc *q;
-
net/sched/sch_teql.c:418:2-418:16: struct Qdisc *q;
-
net/sctp/output.c:678:2-678:31: struct sctp_outq *q = &asoc->outqueue;
-
net/sctp/outqueue.c:385:2-385:31: struct sctp_outq *q = &asoc->outqueue;
-
net/sctp/socket.c:169:2-169:31: struct sctp_outq *q = &asoc->outqueue;
-
net/smc/smc_llc.c:1831:2-1831:34: struct smc_llc_qentry *qentry, *q;
-
net/sunrpc/auth_gss/auth_gss.c:163:2-163:14: const void *q;
-
net/sunrpc/auth_gss/auth_gss_internal.h:18:2-18:54: const void *q = (const void *)((const char *)p + len);
-
net/sunrpc/auth_gss/auth_gss_internal.h:28:2-28:14: const void *q;
-
net/sunrpc/auth_gss/gss_krb5_wrap.c:120:2-120:18: u64 *q = (u64 *)p;
-
net/sunrpc/rpc_pipe.c:634:2-634:18: struct qstr q = QSTR_INIT(name, strlen(name));
-
net/sunrpc/rpc_pipe.c:1304:2-1304:18: struct qstr q = QSTR_INIT(files[RPCAUTH_gssd].name,
-
net/sunrpc/sched.c:163:2-163:20: struct list_head *q;
-
net/sunrpc/sched.c:599:2-599:20: struct list_head *q;
-
net/sunrpc/xdr.c:1012:2-1012:10: __be32 *q;
-
net/sunrpc/xdr.c:1344:2-1344:18: __be32 *q = p + nwords;
-
net/x25/x25_in.c:418:2-418:37: int queued = 0, frametype, ns, nr, q, d, m;
-
net/xdp/xsk.c:740:2-740:20: struct xsk_queue *q;
-
net/xdp/xsk.c:1056:3-1056:22: struct xsk_queue **q;
-
net/xdp/xsk.c:1112:3-1112:22: struct xsk_queue **q;
-
net/xdp/xsk.c:1294:2-1294:24: struct xsk_queue *q = NULL;
-
net/xdp/xsk_queue.c:25:2-25:20: struct xsk_queue *q;
-
samples/v4l/v4l2-pci-skeleton.c:762:2-762:20: struct vb2_queue *q;
-
scripts/dtc/libfdt/fdt_ro.c:260:3-260:44: const char *q = memchr(path, '/', end - p);
-
scripts/dtc/libfdt/fdt_ro.c:274:3-274:15: const char *q;
-
security/integrity/evm/evm_main.c:885:2-885:26: struct list_head *pos, *q;
-
security/keys/keyctl_pkey.c:42:2-42:31: char *c = params->info, *p, *q;
-
security/selinux/hooks.c:2587:4-2587:14: char *p, *q;
-
security/selinux/hooks.c:3536:3-3536:15: struct qstr q;
-
sound/core/misc.c:113:2-113:30: const struct snd_pci_quirk *q;
-
sound/core/pcm_lib.c:535:2-535:15: unsigned int q;
-
sound/core/pcm_lib.c:802:3-802:23: unsigned int q = i->min;
-
sound/core/pcm_lib.c:841:3-841:23: unsigned int q = i->max;
-
sound/core/pcm_lib.c:918:3-918:23: unsigned int q = i->min;
-
sound/core/pcm_lib.c:950:3-950:23: unsigned int q = i->max;
-
sound/core/seq/oss/seq_oss_readq.c:35:2-35:24: struct seq_oss_readq *q;
-
sound/core/seq/oss/seq_oss_writeq.c:27:2-27:25: struct seq_oss_writeq *q;
-
sound/core/seq/seq_clientmgr.c:577:2-577:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1558:2-1558:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1590:2-1590:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1611:2-1611:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1644:2-1644:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_clientmgr.c:1772:3-1772:25: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:71:2-71:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:98:2-98:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:170:2-170:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:189:2-189:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:205:2-205:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:222:2-222:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:303:2-303:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:388:2-388:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:408:2-408:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:475:2-475:44: struct snd_seq_queue *q = queueptr(queueid);
-
sound/core/seq/seq_queue.c:538:2-538:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:559:2-559:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:592:2-592:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:608:2-608:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:707:2-707:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_queue.c:737:2-737:24: struct snd_seq_queue *q;
-
sound/core/seq/seq_timer.c:125:2-125:36: struct snd_seq_queue *q = timeri->callback_data;
-
sound/core/seq/seq_timer.c:480:2-480:24: struct snd_seq_queue *q;
-
sound/pci/ac97/ac97_codec.c:2927:2-2927:28: const struct quirk_table *q;
-
sound/pci/atiixp.c:551:2-551:30: const struct snd_pci_quirk *q;
-
sound/pci/emu10k1/memory.c:169:2-169:29: struct snd_emu10k1_memblk *q;
-
sound/pci/emu10k1/memory.c:459:2-459:29: struct snd_emu10k1_memblk *q;
-
sound/pci/hda/hda_auto_parser.c:981:2-981:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1531:2-1531:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1628:2-1628:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:1669:2-1669:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/hda_intel.c:2214:3-2214:31: const struct snd_pci_quirk *q;
-
sound/pci/hda/patch_hdmi.c:2026:2-2026:30: const struct snd_pci_quirk *q;
-
sound/pci/hda/patch_realtek.c:1075:2-1075:43: const struct alc_codec_rename_pci_table *q;
-
sound/pci/hda/patch_realtek.c:1148:2-1148:30: const struct snd_pci_quirk *q;
-
sound/pci/nm256/nm256.c:1601:2-1601:30: const struct snd_pci_quirk *q;
-
sound/soc/codecs/tas2552.c:187:3-187:19: unsigned int d, q, t;
-
sound/usb/quirks.c:1762:2-1762:35: const struct registration_quirk *q;