Symbol: rq
function parameter
Defined...
-
block/bfq-cgroup.c:343:59-343:75: void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
-
block/bfq-iosched.c:1043:41-1043:57: static unsigned long bfq_serv_to_charge(struct request *rq,
-
block/bfq-iosched.c:1821:11-1821:27: struct request *rq,
-
block/bfq-iosched.c:2204:29-2204:45: static void bfq_add_request(struct request *rq)
-
block/bfq-iosched.c:2372:46-2372:62: static sector_t get_sdist(sector_t last_pos, struct request *rq)
-
block/bfq-iosched.c:2381:11-2381:27: struct request *rq)
-
block/bfq-iosched.c:2560:58-2560:74: static void bfq_requests_merged(struct request_queue *q, struct request *rq,
-
block/bfq-iosched.c:3227:58-3227:74: static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
-
block/bfq-iosched.c:3433:12-3433:28: struct request *rq)
-
block/bfq-iosched.c:3450:58-3450:74: static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
-
block/bfq-iosched.c:3591:57-3591:73: static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
-
block/bfq-iosched.c:3653:58-3653:74: static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
-
block/bfq-iosched.c:5250:11-5250:27: struct request *rq,
-
block/bfq-iosched.c:5917:10-5917:26: struct request *rq)
-
block/bfq-iosched.c:6079:8-6079:24: struct request *rq)
-
block/bfq-iosched.c:6151:57-6151:73: static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
-
block/bfq-iosched.c:6239:60-6239:76: static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
-
block/bfq-iosched.c:6648:40-6648:56: static void bfq_finish_requeue_request(struct request *rq)
-
block/bfq-iosched.c:6703:32-6703:48: static void bfq_finish_request(struct request *rq)
-
block/bfq-iosched.c:6810:33-6810:49: static void bfq_prepare_request(struct request *rq)
-
block/bfq-iosched.c:6845:38-6845:54: static struct bfq_queue *bfq_init_rq(struct request *rq)
-
block/blk-cgroup.h:462:41-462:57: static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
-
block/blk-crypto-internal.h:57:47-57:63: static inline void blk_crypto_rq_set_defaults(struct request *rq)
-
block/blk-crypto-internal.h:63:47-63:63: static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
-
block/blk-crypto-internal.h:68:46-68:62: static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
-
block/blk-crypto-internal.h:148:45-148:61: static inline void bio_crypt_do_front_merge(struct request *rq,
-
block/blk-crypto-internal.h:167:54-167:70: static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
-
block/blk-crypto-internal.h:175:46-175:62: static inline void blk_crypto_rq_put_keyslot(struct request *rq)
-
block/blk-crypto-internal.h:182:44-182:60: static inline void blk_crypto_free_request(struct request *rq)
-
block/blk-crypto-internal.h:200:42-200:58: static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
-
block/blk-crypto.c:193:34-193:50: bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
-
block/blk-crypto.c:228:42-228:58: blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
-
block/blk-crypto.c:235:34-235:50: void __blk_crypto_rq_put_keyslot(struct request *rq)
-
block/blk-crypto.c:241:32-241:48: void __blk_crypto_free_request(struct request *rq)
-
block/blk-crypto.c:303:30-303:46: int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
-
block/blk-flush.c:103:60-103:76: static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
-
block/blk-flush.c:120:39-120:55: static unsigned int blk_flush_cur_seq(struct request *rq)
-
block/blk-flush.c:125:39-125:55: static void blk_flush_restore_request(struct request *rq)
-
block/blk-flush.c:139:34-139:50: static void blk_account_io_flush(struct request *rq)
-
block/blk-flush.c:163:36-163:52: static void blk_flush_complete_seq(struct request *rq,
-
block/blk-flush.c:271:18-271:34: bool is_flush_rq(struct request *rq)
-
block/blk-flush.c:358:48-358:64: static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
-
block/blk-flush.c:390:31-390:47: static void blk_rq_init_flush(struct request *rq)
-
block/blk-flush.c:403:23-403:39: bool blk_insert_flush(struct request *rq)
-
block/blk-integrity.c:334:39-334:55: static void blk_integrity_nop_prepare(struct request *rq)
-
block/blk-integrity.c:338:40-338:56: static void blk_integrity_nop_complete(struct request *rq,
-
block/blk-iocost.c:2563:42-2563:58: static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
-
block/blk-iocost.c:2580:33-2580:49: static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
-
block/blk-iocost.c:2723:49-2723:65: static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
-
block/blk-iocost.c:2790:48-2790:64: static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
-
block/blk-map.c:131:30-131:46: static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
-
block/blk-map.c:246:41-246:57: static struct bio *blk_rq_map_bio_alloc(struct request *rq,
-
block/blk-map.c:265:29-265:45: static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
-
block/blk-map.c:530:23-530:39: int blk_rq_append_bio(struct request *rq, struct bio *bio)
-
block/blk-map.c:555:33-555:49: static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
-
block/blk-map.c:625:50-625:66: int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
-
block/blk-map.c:680:46-680:62: int blk_rq_map_user(struct request_queue *q, struct request *rq,
-
block/blk-map.c:775:46-775:62: int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
-
block/blk-merge.c:412:37-412:53: unsigned int blk_recalc_rq_segments(struct request *rq)
-
block/blk-merge.c:567:46-567:62: int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
-
block/blk-merge.c:590:51-590:67: static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
-
block/blk-merge.c:733:29-733:45: void blk_rq_set_mixed_merge(struct request *rq)
-
block/blk-merge.c:888:3-888:19: struct request *rq)
-
block/blk-merge.c:899:3-899:19: struct request *rq)
-
block/blk-merge.c:914:53-914:69: bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
-
block/blk-merge.c:920:22-920:38: bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
-
block/blk-merge.c:950:30-950:46: enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
-
block/blk-merge.c:1057:10-1057:26: struct request *rq,
-
block/blk-mq-debugfs.c:280:50-280:66: int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
-
block/blk-mq-debugfs.c:354:31-354:47: static bool hctx_show_busy_rq(struct request *rq, void *data)
-
block/blk-mq-sched.c:375:61-375:77: bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
-
block/blk-mq-sched.h:37:51-37:67: blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
-
block/blk-mq-sched.h:49:51-49:67: static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
-
block/blk-mq-sched.h:59:49-59:65: static inline void blk_mq_sched_requeue_request(struct request *rq)
-
block/blk-mq-tag.c:452:47-452:63: static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
-
block/blk-mq-tag.c:678:23-678:39: u32 blk_mq_unique_tag(struct request *rq)
-
block/blk-mq.c:92:35-92:51: static bool blk_mq_check_inflight(struct request *rq, void *priv)
-
block/blk-mq.c:315:43-315:59: void blk_rq_init(struct request_queue *q, struct request *rq)
-
block/blk-mq.c:333:40-333:56: static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
-
block/blk-mq.c:685:35-685:51: static void blk_mq_finish_request(struct request *rq)
-
block/blk-mq.c:700:35-700:51: static void __blk_mq_free_request(struct request *rq)
-
block/blk-mq.c:722:26-722:42: void blk_mq_free_request(struct request *rq)
-
block/blk-mq.c:747:24-747:40: void blk_dump_rq_flags(struct request *rq, char *msg)
-
block/blk-mq.c:761:27-761:43: static void req_bio_endio(struct request *rq, struct bio *bio,
-
block/blk-mq.c:1023:46-1023:62: static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
-
block/blk-mq.c:1032:34-1032:50: inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
-
block/blk-mq.c:1049:25-1049:41: void blk_mq_end_request(struct request *rq, blk_status_t error)
-
block/blk-mq.c:1150:45-1150:61: static inline bool blk_mq_complete_need_ipi(struct request *rq)
-
block/blk-mq.c:1176:38-1176:54: static void blk_mq_complete_send_ipi(struct request *rq)
-
block/blk-mq.c:1185:34-1185:50: static void blk_mq_raise_softirq(struct request *rq)
-
block/blk-mq.c:1196:37-1196:53: bool blk_mq_complete_request_remote(struct request *rq)
-
block/blk-mq.c:1230:30-1230:46: void blk_mq_complete_request(struct request *rq)
-
block/blk-mq.c:1245:27-1245:43: void blk_mq_start_request(struct request *rq)
-
block/blk-mq.c:1284:55-1284:71: static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
-
block/blk-mq.c:1323:28-1323:44: void blk_execute_rq_nowait(struct request *rq, bool at_head)
-
block/blk-mq.c:1352:43-1352:59: static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
-
block/blk-mq.c:1361:21-1361:37: bool blk_rq_is_poll(struct request *rq)
-
block/blk-mq.c:1371:36-1371:52: static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
-
block/blk-mq.c:1389:29-1389:45: blk_status_t blk_execute_rq(struct request *rq, bool at_head)
-
block/blk-mq.c:1427:38-1427:54: static void __blk_mq_requeue_request(struct request *rq)
-
block/blk-mq.c:1442:29-1442:45: void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
-
block/blk-mq.c:1514:32-1514:48: static bool blk_mq_rq_inflight(struct request *rq, void *priv)
-
block/blk-mq.c:1561:32-1561:48: static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
-
block/blk-mq.c:1581:24-1581:40: void blk_mq_put_rq_ref(struct request *rq)
-
block/blk-mq.c:1591:34-1591:50: static bool blk_mq_check_expired(struct request *rq, void *priv)
-
block/blk-mq.c:1609:35-1609:51: static bool blk_mq_handle_expired(struct request *rq, void *priv)
-
block/blk-mq.c:1751:39-1751:55: static bool __blk_mq_alloc_driver_tag(struct request *rq)
-
block/blk-mq.c:1775:58-1775:74: bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
-
block/blk-mq.c:1817:6-1817:22: struct request *rq)
-
block/blk-mq.c:1913:40-1913:56: static void blk_mq_handle_dev_resource(struct request *rq,
-
block/blk-mq.c:1920:41-1920:57: static void blk_mq_handle_zone_resource(struct request *rq,
-
block/blk-mq.c:1939:51-1939:67: static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
-
block/blk-mq.c:2446:42-2446:58: static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
-
block/blk-mq.c:2495:35-2495:51: static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
-
block/blk-mq.c:2557:35-2557:51: static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
-
block/blk-mq.c:2576:10-2576:26: struct request *rq, bool last)
-
block/blk-mq.c:2608:39-2608:55: static bool blk_mq_get_budget_and_tag(struct request *rq)
-
block/blk-mq.c:2634:3-2634:19: struct request *rq)
-
block/blk-mq.c:2664:51-2664:67: static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
-
block/blk-mq.c:3031:40-3031:56: blk_status_t blk_insert_cloned_request(struct request *rq)
-
block/blk-mq.c:3097:26-3097:42: void blk_rq_unprep_clone(struct request *rq)
-
block/blk-mq.c:3126:23-3126:39: int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
-
block/blk-mq.c:3183:44-3183:60: void blk_steal_bios(struct bio_list *list, struct request *rq)
-
block/blk-mq.c:3360:60-3360:76: static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
-
block/blk-mq.c:3457:32-3457:48: static bool blk_mq_has_request(struct request *rq, void *data)
-
block/blk-mq.c:4830:17-4830:33: int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
-
block/blk-mq.c:4848:28-4848:44: unsigned int blk_mq_rq_cpu(struct request *rq)
-
block/blk-mq.h:258:47-258:63: static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
-
block/blk-mq.h:267:46-267:62: static inline int blk_mq_get_rq_budget_token(struct request *rq)
-
block/blk-mq.h:303:9-303:25: struct request *rq)
-
block/blk-mq.h:314:42-314:58: static inline void blk_mq_put_driver_tag(struct request *rq)
-
block/blk-mq.h:324:42-324:58: static inline bool blk_mq_get_driver_tag(struct request *rq)
-
block/blk-pm.h:19:42-19:58: static inline void blk_pm_mark_last_busy(struct request *rq)
-
block/blk-rq-qos.c:35:41-35:57: void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
-
block/blk-rq-qos.c:44:42-44:58: void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
-
block/blk-rq-qos.c:53:44-53:60: void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
-
block/blk-rq-qos.c:71:42-71:58: void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
-
block/blk-rq-qos.c:80:42-80:58: void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
-
block/blk-rq-qos.h:119:57-119:73: static inline void rq_qos_done(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:125:58-125:74: static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:131:60-131:76: static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
-
block/blk-rq-qos.h:155:58-155:74: static inline void rq_qos_track(struct request_queue *q, struct request *rq,
-
block/blk-rq-qos.h:162:58-162:74: static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
-
block/blk-stat.c:50:19-50:35: void blk_stat_add(struct request *rq, u64 now)
-
block/blk-throttle.c:2302:26-2302:42: void blk_throtl_stat_add(struct request *rq, u64 time_ns)
-
block/blk-wbt.c:102:36-102:52: static inline void wbt_clear_state(struct request *rq)
-
block/blk-wbt.c:107:40-107:56: static inline enum wbt_flags wbt_flags(struct request *rq)
-
block/blk-wbt.c:112:35-112:51: static inline bool wbt_is_tracked(struct request *rq)
-
block/blk-wbt.c:117:32-117:48: static inline bool wbt_is_read(struct request *rq)
-
block/blk-wbt.c:245:43-245:59: static void wbt_done(struct rq_qos *rqos, struct request *rq)
-
block/blk-wbt.c:665:44-665:60: static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
-
block/blk-wbt.c:671:44-671:60: static void wbt_issue(struct rq_qos *rqos, struct request *rq)
-
block/blk-wbt.c:691:46-691:62: static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
-
block/blk-wbt.c:751:25-751:47: static int wbt_data_dir(const struct request *rq)
-
block/blk-zoned.c:58:36-58:52: bool blk_req_needs_zone_write_lock(struct request *rq)
-
block/blk-zoned.c:67:33-67:49: bool blk_req_zone_write_trylock(struct request *rq)
-
block/blk-zoned.c:81:32-81:48: void __blk_req_zone_write_lock(struct request *rq)
-
block/blk-zoned.c:92:34-92:50: void __blk_req_zone_write_unlock(struct request *rq)
-
block/blk.h:124:33-124:49: static inline bool rq_mergeable(struct request *rq)
-
block/blk.h:162:52-162:68: static inline unsigned int blk_rq_get_max_segments(struct request *rq)
-
block/blk.h:341:35-341:51: static inline bool blk_do_io_stat(struct request *rq)
-
block/bsg-lib.c:205:26-205:42: static void bsg_complete(struct request *rq)
-
block/bsg-lib.c:335:45-335:61: static enum blk_eh_timer_return bsg_timeout(struct request *rq)
-
block/elevator.c:60:41-60:57: static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
-
block/elevator.c:74:23-74:39: bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
-
block/elevator.c:173:37-173:53: static inline void __elv_rqhash_del(struct request *rq)
-
block/elevator.c:179:46-179:62: void elv_rqhash_del(struct request_queue *q, struct request *rq)
-
block/elevator.c:186:46-186:62: void elv_rqhash_add(struct request_queue *q, struct request *rq)
-
block/elevator.c:196:53-196:69: void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
-
block/elevator.c:227:39-227:55: void elv_rb_add(struct rb_root *root, struct request *rq)
-
block/elevator.c:248:39-248:55: void elv_rb_del(struct rb_root *root, struct request *rq)
-
block/elevator.c:332:56-332:72: bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
-
block/elevator.c:370:50-370:66: void elv_merged_request(struct request_queue *q, struct request *rq,
-
block/elevator.c:384:50-384:66: void elv_merge_requests(struct request_queue *q, struct request *rq,
-
block/elevator.c:396:61-396:77: struct request *elv_latter_request(struct request_queue *q, struct request *rq)
-
block/elevator.c:406:61-406:77: struct request *elv_former_request(struct request_queue *q, struct request *rq)
-
block/elevator.c:794:11-794:27: struct request *rq)
-
block/elevator.c:806:11-806:27: struct request *rq)
-
block/kyber-iosched.c:530:32-530:48: static int rq_get_domain_token(struct request *rq)
-
block/kyber-iosched.c:535:33-535:49: static void rq_set_domain_token(struct request *rq, int token)
-
block/kyber-iosched.c:541:7-541:23: struct request *rq)
-
block/kyber-iosched.c:585:35-585:51: static void kyber_prepare_request(struct request *rq)
-
block/kyber-iosched.c:614:34-614:50: static void kyber_finish_request(struct request *rq)
-
block/kyber-iosched.c:639:37-639:53: static void kyber_completed_request(struct request *rq, u64 now)
-
block/mq-deadline.c:117:48-117:64: deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
-
block/mq-deadline.c:126:25-126:41: static u8 dd_rq_ioclass(struct request *rq)
-
block/mq-deadline.c:135:26-135:42: deadline_earlier_request(struct request *rq)
-
block/mq-deadline.c:149:25-149:41: deadline_latter_request(struct request *rq)
-
block/mq-deadline.c:194:50-194:66: deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
-
block/mq-deadline.c:202:50-202:66: deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
-
block/mq-deadline.c:212:9-212:25: struct request *rq)
-
block/mq-deadline.c:281:9-281:25: struct request *rq)
-
block/mq-deadline.c:314:61-314:77: static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
-
block/mq-deadline.c:329:7-329:23: struct request *rq)
-
block/mq-deadline.c:429:53-429:69: static bool started_after(struct deadline_data *dd, struct request *rq,
-
block/mq-deadline.c:743:54-743:71: static int dd_request_merge(struct request_queue *q, struct request **rq,
-
block/mq-deadline.c:795:59-795:75: static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
-
block/mq-deadline.c:886:32-886:48: static void dd_prepare_request(struct request *rq)
-
block/mq-deadline.c:919:31-919:47: static void dd_finish_request(struct request *rq)
-
block/t10-pi.c:135:34-135:50: static void t10_pi_type1_prepare(struct request *rq)
-
block/t10-pi.c:184:35-184:51: static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
-
block/t10-pi.c:238:34-238:50: static void t10_pi_type3_prepare(struct request *rq)
-
block/t10-pi.c:243:35-243:51: static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
-
block/t10-pi.c:374:34-374:50: static void ext_pi_type1_prepare(struct request *rq)
-
block/t10-pi.c:412:35-412:51: static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
-
drivers/ata/libata-scsi.c:1016:30-1016:46: bool ata_scsi_dma_need_drain(struct request *rq)
-
drivers/block/aoe/aoecmd.c:825:26-825:42: bufinit(struct buf *buf, struct request *rq, struct bio *bio)
-
drivers/block/aoe/aoecmd.c:1029:35-1029:51: aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
-
drivers/block/loop.c:263:52-263:68: static int lo_write_simple(struct loop_device *lo, struct request *rq,
-
drivers/block/loop.c:280:51-280:67: static int lo_read_simple(struct loop_device *lo, struct request *rq,
-
drivers/block/loop.c:309:49-309:65: static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
-
drivers/block/loop.c:330:49-330:65: static int lo_req_flush(struct loop_device *lo, struct request *rq)
-
drivers/block/loop.c:339:28-339:44: static void lo_complete_rq(struct request *rq)
-
drivers/block/loop.c:463:54-463:70: static int do_req_filebacked(struct loop_device *lo, struct request *rq)
-
drivers/block/mtip32xx/mtip32xx.c:2045:55-2045:71: static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
-
drivers/block/mtip32xx/mtip32xx.c:2429:34-2429:50: static void mtip_softirq_done_fn(struct request *rq)
-
drivers/block/mtip32xx/mtip32xx.c:3229:55-3229:71: static inline bool is_stopped(struct driver_data *dd, struct request *rq)
-
drivers/block/mtip32xx/mtip32xx.c:3250:7-3250:23: struct request *rq)
-
drivers/block/mtip32xx/mtip32xx.c:3274:3-3274:19: struct request *rq)
-
drivers/block/mtip32xx/mtip32xx.c:3332:55-3332:71: static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
-
drivers/block/mtip32xx/mtip32xx.c:3345:54-3345:70: static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
-
drivers/block/nbd.c:1729:57-1729:73: static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
-
drivers/block/null_blk/main.c:857:30-857:46: static void null_complete_rq(struct request *rq)
-
drivers/block/null_blk/main.c:1548:36-1548:52: static bool should_timeout_request(struct request *rq)
-
drivers/block/null_blk/main.c:1556:36-1556:52: static bool should_requeue_request(struct request *rq)
-
drivers/block/null_blk/main.c:1672:49-1672:65: static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
-
drivers/block/rnbd/rnbd-clt.c:366:34-366:50: static void rnbd_softirq_done_fn(struct request *rq)
-
drivers/block/rnbd/rnbd-clt.c:989:10-989:26: struct request *rq,
-
drivers/block/rnbd/rnbd-proto.h:254:36-254:52: static inline u32 rq_to_rnbd_flags(struct request *rq)
-
drivers/block/ublk_drv.c:665:37-665:59: static inline bool ublk_rq_has_data(const struct request *rq)
-
drivers/block/ublk_drv.c:1114:3-1114:19: struct request *rq)
-
drivers/block/ublk_drv.c:1232:52-1232:68: static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
-
drivers/block/ublk_drv.c:1264:46-1264:62: static enum blk_eh_timer_return ublk_timeout(struct request *rq)
-
drivers/block/ublk_drv.c:1513:36-1513:52: static bool ublk_check_inflight_rq(struct request *rq, void *data)
-
drivers/block/xen-blkfront.c:122:43-122:59: static inline struct blkif_req *blkif_req(struct request *rq)
-
drivers/block/xen-blkfront.c:934:31-934:47: static void blkif_complete_rq(struct request *rq)
-
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:2227:40-2227:61: static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
-
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:2391:9-2391:30: struct i915_request *rq,
-
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:3022:5-3022:26: struct i915_request *rq)
-
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:3089:55-3089:76: static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq,
-
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:3252:43-3252:64: eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq,
-
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h:35:55-35:76: igt_vma_move_to_active_unlocked(struct i915_vma *vma, struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen2_engine_cs.c:16:21-16:42: int gen2_emit_flush(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen2_engine_cs.c:43:25-43:46: int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen2_engine_cs.c:129:25-129:46: int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen2_engine_cs.c:144:36-144:57: static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
-
drivers/gpu/drm/i915/gt/gen2_engine_cs.c:172:27-172:48: u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen2_engine_cs.c:177:27-177:48: u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen2_engine_cs.c:186:24-186:45: int i830_emit_bb_start(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen2_engine_cs.c:251:24-251:45: int gen3_emit_bb_start(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen2_engine_cs.c:271:24-271:45: int gen4_emit_bb_start(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:55:35-55:56: gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:89:25-89:46: int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:143:31-143:52: u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:178:24-178:45: static int mi_flush_dw(struct i915_request *rq, u32 flags)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:214:26-214:47: static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:219:25-219:46: int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:224:25-224:46: int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:229:24-229:45: int gen6_emit_bb_start(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:251:19-251:40: hsw_emit_bb_start(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:272:26-272:47: static int gen7_stall_cs(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:289:25-289:46: int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:353:31-353:52: u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:375:31-375:52: u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen6_engine_cs.c:393:31-393:52: u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:13:25-13:46: int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:79:25-79:46: int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:112:26-112:47: int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:226:35-226:56: static int mtl_dummy_pipe_control(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:247:26-247:47: int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:364:26-364:47: int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:424:24-424:51: static u32 hwsp_offset(const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:436:31-436:52: int gen8_emit_init_breadcrumb(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:483:33-483:54: static int __xehp_emit_bb_start(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:524:30-524:51: int xehp_emit_bb_start_noarb(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:531:24-531:45: int xehp_emit_bb_start(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:538:30-538:51: int gen8_emit_bb_start_noarb(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:574:24-574:45: int gen8_emit_bb_start(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:602:34-602:55: static void assert_request_valid(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:615:31-615:52: static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:628:35-628:56: static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:644:32-644:53: gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:659:33-659:54: static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:664:36-664:57: u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:669:36-669:57: u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:689:37-689:58: u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:729:41-729:62: static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:746:33-746:54: static u32 ccs_semaphore_offset(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:753:34-753:55: static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:782:33-782:54: gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:801:37-801:58: u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/gen8_engine_cs.c:808:37-808:58: u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c:106:46-106:67: check_signal_order(struct intel_context *ce, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c:333:32-333:53: static void irq_signal_request(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c:344:31-344:52: static void insert_breadcrumb(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c:406:37-406:58: bool i915_request_enable_breadcrumb(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c:431:37-431:58: void i915_request_cancel_breadcrumb(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_context.c:467:7-467:28: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_context.c:609:50-609:71: bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_context.h:128:7-128:28: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_context_sseu.c:16:34-16:55: static int gen8_emit_rpcs_config(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_engine_cs.c:1975:44-1975:65: static struct intel_timeline *get_timeline(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_engine_cs.c:1996:42-1996:63: static int print_ring(char *buf, int sz, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_engine_cs.c:2199:55-2199:76: static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_engine_cs.c:2268:33-2268:54: static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
-
drivers/gpu/drm/i915/gt/intel_engine_cs.c:2498:34-2498:56: struct intel_context **ce, struct i915_request **rq)
-
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:79:56-79:77: static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:87:30-87:51: static void heartbeat_commit(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:96:28-96:55: static void show_heartbeat(const struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:116:46-116:67: reset_engine(struct intel_engine_cs *engine, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_engine_pm.c:109:24-109:45: __queue_and_release_pm(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:213:4-213:25: struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:233:56-233:77: active_request(const struct intel_timeline * const tl, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:256:20-256:47: static int rq_prio(const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:261:27-261:54: static int effective_prio(const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:298:5-298:32: const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:418:33-418:54: execlists_context_status_change(struct i915_request *rq, unsigned long status)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:431:26-431:47: static void reset_active(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:469:25-469:52: static bool bad_request(const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:475:25-475:46: __execlists_schedule_in(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:530:35-530:56: static void execlists_schedule_in(struct i915_request *rq, int idx)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:547:26-547:47: resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:560:27-560:48: static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:589:38-589:66: static void __execlists_schedule_out(struct i915_request * const rq,
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:651:43-651:64: static inline void execlists_schedule_out(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:675:37-675:58: static u64 execlists_update_context(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:738:54-738:75: dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:962:41-962:68: static unsigned long i915_request_flags(const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:997:8-997:35: const struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1075:27-1075:48: static void defer_request(struct i915_request *rq, struct list_head * const pl)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1137:3-1137:30: const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1155:8-1155:35: const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1190:51-1190:78: timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1240:10-1240:37: const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1256:5-1256:32: const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1265:23-1265:50: static bool completed(const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2056:30-2056:51: static void __execlists_hold(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2100:7-2100:28: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2128:26-2128:53: static bool hold_request(const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2154:32-2154:53: static void __execlists_unhold(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2198:9-2198:30: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2551:6-2551:27: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2561:5-2561:32: const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2573:9-2573:36: const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2643:11-2643:32: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2714:22-2714:43: static int emit_pdps(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:3279:27-3279:48: static void add_to_engine(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:3285:32-3285:53: static void remove_from_engine(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:3324:28-3324:55: static void kick_execlists(const struct i915_request *rq, int prio)
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:3897:36-3897:57: static void virtual_submit_request(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h:25:6-25:27: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_migrate.c:334:32-334:53: static int emit_no_arbitration(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_migrate.c:350:29-350:50: static int max_pte_pkt_size(struct i915_request *rq, int pkt)
-
drivers/gpu/drm/i915/gt/intel_migrate.c:362:21-362:42: static int emit_pte(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_migrate.c:530:26-530:47: static int emit_copy_ccs(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_migrate.c:581:22-581:43: static int emit_copy(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_migrate.c:917:23-917:44: static int emit_clear(struct i915_request *rq, u32 offset, int size,
-
drivers/gpu/drm/i915/gt/intel_renderstate.c:210:7-210:28: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_reset.c:65:25-65:46: static bool mark_guilty(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_reset.c:116:27-116:48: static void mark_innocent(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_reset.c:127:27-127:48: void __i915_request_reset(struct i915_request *rq, bool guilty)
-
drivers/gpu/drm/i915/gt/intel_ring.c:230:23-230:44: u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
-
drivers/gpu/drm/i915/gt/intel_ring.c:312:32-312:53: int intel_ring_cacheline_align(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_ring.h:41:39-41:60: static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
-
drivers/gpu/drm/i915/gt/intel_ring.h:81:37-81:64: static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:604:5-604:26: struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:623:6-623:27: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:654:24-654:45: static int load_pd_dir(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:688:27-688:48: static int mi_set_context(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:805:27-805:48: static int remap_l3_slice(struct i915_request *rq, int slice)
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:835:21-835:42: static int remap_l3(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:856:22-856:43: static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:882:28-882:49: static int clear_residuals(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:913:27-913:48: static int switch_context(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:1100:27-1100:48: static void add_to_engine(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:1106:32-1106:53: static void remove_from_engine(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_rps.c:1009:22-1009:43: void intel_rps_boost(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_timeline.c:327:9-327:30: struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/intel_timeline.h:96:10-96:37: const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_workarounds.c:980:30-980:51: int intel_engine_emit_ctx_wa(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/intel_workarounds.c:3249:13-3249:34: wa_list_srm(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/mock_engine.c:256:32-256:53: static void mock_add_to_engine(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/mock_engine.c:262:37-262:58: static void mock_remove_from_engine(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/selftest_context.c:15:25-15:46: static int request_sync(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/selftest_engine_cs.c:52:28-52:49: static int write_timestamp(struct i915_request *rq, int slot)
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:28:23-28:44: static bool is_active(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:43:7-43:28: struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:69:6-69:27: struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:820:22-820:43: emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:97:10-97:37: const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:253:44-253:71: static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:274:48-274:69: static bool wait_until_running(struct hang *h, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:868:31-868:52: static int active_request_put(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:38:23-38:44: static bool is_active(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:53:7-53:28: struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:1714:6-1714:27: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/selftest_mocs.c:38:29-38:50: static int request_add_sync(struct i915_request *rq, int err)
-
drivers/gpu/drm/i915/gt/selftest_mocs.c:49:29-49:50: static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
-
drivers/gpu/drm/i915/gt/selftest_mocs.c:102:22-102:43: static int read_regs(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/selftest_mocs.c:130:28-130:49: static int read_mocs_table(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/selftest_mocs.c:148:28-148:49: static int read_l3cc_table(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:454:31-454:52: static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:778:27-778:48: static int emit_read_hwsp(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:956:43-956:64: static struct i915_request *wrap_timeline(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/selftest_workarounds.c:37:29-37:50: static int request_add_sync(struct i915_request *rq, int err)
-
drivers/gpu/drm/i915/gt/selftest_workarounds.c:48:29-48:50: static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
-
drivers/gpu/drm/i915/gt/selftest_workarounds.c:271:6-271:28: struct i915_request **rq)
-
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c:218:29-218:50: static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
-
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c:20:30-20:51: static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:376:31-376:52: request_to_scheduling_context(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:682:53-682:74: static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:765:51-765:72: static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:777:37-777:58: static inline void guc_set_lrc_tail(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:783:27-783:54: static inline int rq_prio(const struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:788:29-788:50: static bool is_multi_lrc_rq(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:793:26-793:47: static bool can_merge_rq(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:836:33-836:54: static int __guc_wq_item_append(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:880:10-880:31: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:897:30-897:51: static bool multi_lrc_submit(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:2003:6-2003:27: struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:2014:10-2014:31: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:2039:49-2039:70: static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:2049:32-2049:53: static void guc_submit_request(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:3007:12-3007:33: struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:3051:46-3051:67: guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:3457:28-3457:49: static void add_to_context(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:3481:27-3481:48: static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:3493:33-3493:54: static void remove_from_context(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:3612:30-3612:51: static int guc_request_alloc(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:4068:44-4068:65: static void guc_bump_inflight_request_prio(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:4092:46-4092:67: static void guc_retire_inflight_request_prio(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:5205:54-5205:75: static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:5252:53-5252:74: static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:5297:52-5297:73: __emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:5340:35-5340:56: static inline bool skip_handshake(struct i915_request *rq)
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:5347:50-5347:71: emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:5391:51-5391:72: __emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:5423:49-5423:70: emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
-
drivers/gpu/drm/i915/gt/uc/selftest_guc.c:11:29-11:50: static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
-
drivers/gpu/drm/i915/gvt/scheduler.c:262:35-262:56: static inline bool is_gvt_request(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_active.c:427:54-427:75: int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_active.c:738:31-738:52: int i915_request_await_active(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_active.c:987:39-987:60: void i915_request_add_active_barriers(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_active.c:1119:6-1119:27: struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_active.h:220:50-220:71: static inline int __i915_request_await_exclusive(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_gpu_error.c:1479:7-1479:34: const struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_gpu_error.c:1573:7-1573:28: struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_perf.c:1316:20-1316:41: __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
-
drivers/gpu/drm/i915/i915_perf.c:2483:17-2483:38: gen8_store_flex(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_perf.c:2508:16-2508:37: gen8_load_flex(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_request.c:192:21-192:42: __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
-
drivers/gpu/drm/i915/i915_request.c:205:37-205:58: static void __notify_execute_cb_irq(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:216:41-216:62: void i915_request_notify_execute_cb_imm(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:221:33-221:54: static void __i915_request_fill(struct i915_request *rq, u8 val)
-
drivers/gpu/drm/i915/i915_request.c:245:28-245:49: i915_request_active_engine(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_request.c:277:32-277:53: static void __rq_init_watchdog(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:298:31-298:52: static void __rq_arm_watchdog(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:317:34-317:55: static void __rq_cancel_watchdog(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:357:26-357:47: bool i915_request_retire(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:419:31-419:52: void i915_request_retire_upto(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:502:19-502:40: __await_execution(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_request.c:553:26-553:47: void __i915_request_skip(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:571:34-571:55: bool i915_request_set_error_once(struct i915_request *rq, int error)
-
drivers/gpu/drm/i915/i915_request.c:589:44-589:65: struct i915_request *i915_request_mark_eio(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:762:26-762:47: void i915_request_cancel(struct i915_request *rq, int error)
-
drivers/gpu/drm/i915/i915_request.c:1060:26-1060:47: i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
-
drivers/gpu/drm/i915/i915_request.c:1126:21-1126:42: already_busywaiting(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:1335:27-1335:48: static void mark_external(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:1349:31-1349:52: __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
-
drivers/gpu/drm/i915/i915_request.c:1359:29-1359:50: i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
-
drivers/gpu/drm/i915/i915_request.c:1384:35-1384:56: static inline bool is_parallel_rq(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:1389:55-1389:76: static inline struct intel_context *request_to_parent(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:1404:30-1404:51: i915_request_await_execution(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_request.c:1501:30-1501:51: i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
-
drivers/gpu/drm/i915/i915_request.c:1569:29-1569:50: int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps)
-
drivers/gpu/drm/i915/i915_request.c:1621:36-1621:57: static void i915_request_await_huc(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:1636:41-1636:62: __i915_request_ensure_parallel_ordering(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_request.c:1671:32-1671:53: __i915_request_ensure_ordering(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_request.c:1720:32-1720:53: __i915_request_add_to_timeline(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:1787:44-1787:65: struct i915_request *__i915_request_commit(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:1817:30-1817:51: void __i915_request_queue_bh(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:1823:27-1823:48: void __i915_request_queue(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_request.c:1845:23-1845:44: void i915_request_add(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:1902:33-1902:61: static bool __i915_spin_request(struct i915_request * const rq, int state)
-
drivers/gpu/drm/i915/i915_request.c:1981:32-1981:53: long i915_request_wait_timeout(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_request.c:2120:24-2120:45: long i915_request_wait(struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_request.c:2147:26-2147:53: static char queue_status(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:2158:31-2158:58: static const char *run_status(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:2172:33-2172:60: static const char *fence_status(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:2184:10-2184:37: const struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_request.c:2235:63-2235:84: static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:2242:24-2242:45: static bool match_ring(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.c:2262:49-2262:70: enum i915_request_state i915_test_request_state(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:400:18-400:39: i915_request_get(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:406:22-406:43: i915_request_get_rcu(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:412:18-412:39: i915_request_put(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:454:42-454:69: static inline bool i915_request_signaled(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:460:43-460:70: static inline bool i915_request_is_active(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:465:51-465:78: static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:471:37-471:64: i915_request_has_initial_breadcrumb(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:484:32-484:59: static inline u32 __hwsp_seqno(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:504:30-504:57: static inline u32 hwsp_seqno(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:515:47-515:74: static inline bool __i915_request_has_started(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:546:41-546:68: static inline bool i915_request_started(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:571:44-571:71: static inline bool i915_request_is_running(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:601:42-601:69: static inline bool i915_request_is_ready(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:606:47-606:74: static inline bool __i915_request_is_complete(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:611:43-611:70: static inline bool i915_request_completed(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:627:47-627:68: static inline void i915_request_mark_complete(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:633:47-633:74: static inline bool i915_request_has_waitboost(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:638:47-638:74: static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:644:46-644:73: static inline bool i915_request_has_sentinel(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:649:41-649:68: static inline bool i915_request_on_hold(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:654:42-654:63: static inline void i915_request_set_hold(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:659:44-659:65: static inline void i915_request_clear_hold(struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:665:23-665:50: i915_request_timeline(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:674:26-674:53: i915_request_gem_context(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:681:30-681:57: i915_request_active_timeline(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_request.h:693:27-693:54: i915_request_active_seqno(const struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_scheduler.c:289:20-289:41: void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
-
drivers/gpu/drm/i915/i915_scheduler.c:410:10-410:37: const struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_trace.h:266:1-266:1: TRACE_EVENT(i915_request_queue,
-
drivers/gpu/drm/i915/i915_trace.h:320:1-320:1: DEFINE_EVENT(i915_request, i915_request_add,
-
drivers/gpu/drm/i915/i915_trace.h:326:1-326:1: DEFINE_EVENT(i915_request, i915_request_guc_submit,
-
drivers/gpu/drm/i915/i915_trace.h:331:1-331:1: DEFINE_EVENT(i915_request, i915_request_submit,
-
drivers/gpu/drm/i915/i915_trace.h:336:1-336:1: DEFINE_EVENT(i915_request, i915_request_execute,
-
drivers/gpu/drm/i915/i915_trace.h:341:1-341:1: TRACE_EVENT(i915_request_in,
-
drivers/gpu/drm/i915/i915_trace.h:371:1-371:1: TRACE_EVENT(i915_request_out,
-
drivers/gpu/drm/i915/i915_trace.h:601:1-601:1: DEFINE_EVENT(i915_request, i915_request_retire,
-
drivers/gpu/drm/i915/i915_trace.h:606:1-606:1: TRACE_EVENT(i915_request_wait_begin,
-
drivers/gpu/drm/i915/i915_trace.h:640:1-640:1: DEFINE_EVENT(i915_request, i915_request_wait_end,
-
drivers/gpu/drm/i915/i915_vma.c:1884:27-1884:48: __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
-
drivers/gpu/drm/i915/i915_vma.c:1889:60-1889:81: static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
-
drivers/gpu/drm/i915/i915_vma.c:1902:9-1902:30: struct i915_request *rq,
-
drivers/gpu/drm/i915/i915_vma.h:65:47-65:68: i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq,
-
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c:83:32-83:53: static void pxp_request_commit(struct i915_request *rq)
-
drivers/gpu/drm/i915/selftests/i915_perf.c:161:28-161:49: static int write_timestamp(struct i915_request *rq, int slot)
-
drivers/gpu/drm/i915/selftests/i915_perf.c:188:28-188:49: static ktime_t poll_status(struct i915_request *rq, int slot)
-
drivers/gpu/drm/i915/selftests/i915_request.c:1008:26-1008:47: static int emit_bb_start(struct i915_request *rq, struct i915_vma *batch)
-
drivers/gpu/drm/i915/selftests/igt_spinner.c:117:10-117:37: const struct i915_request *rq)
-
drivers/gpu/drm/i915/selftests/igt_spinner.c:217:43-217:70: hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
-
drivers/gpu/drm/i915/selftests/igt_spinner.c:250:53-250:74: bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
-
drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c:90:32-90:53: int intel_selftest_wait_for_rq(struct i915_request *rq)
-
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c:28:54-28:59: g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
-
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c:28:56-28:61: gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
-
drivers/gpu/drm/scheduler/sched_main.c:128:10-128:31: struct drm_sched_rq *rq)
-
drivers/gpu/drm/scheduler/sched_main.c:145:30-145:51: void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
-
drivers/gpu/drm/scheduler/sched_main.c:167:33-167:54: void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
-
drivers/gpu/drm/scheduler/sched_main.c:195:31-195:52: drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
-
drivers/gpu/drm/scheduler/sched_main.c:239:33-239:54: drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:2254:23-2254:44: static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
-
drivers/infiniband/hw/mlx5/qp.c:1433:8-1433:27: struct mlx5_ib_rq *rq, void *qpin,
-
drivers/infiniband/hw/mlx5/qp.c:1503:10-1503:29: struct mlx5_ib_rq *rq)
-
drivers/infiniband/hw/mlx5/qp.c:1509:11-1509:30: struct mlx5_ib_rq *rq,
-
drivers/infiniband/hw/mlx5/qp.c:1520:9-1520:28: struct mlx5_ib_rq *rq, u32 tdn,
-
drivers/infiniband/hw/mlx5/qp.c:3781:27-3781:46: struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
-
drivers/infiniband/hw/mlx5/qp.c:4808:6-4808:25: struct mlx5_ib_rq *rq,
-
drivers/infiniband/hw/mlx5/qpc.c:581:5-581:26: struct mlx5_core_qp *rq)
-
drivers/infiniband/hw/mlx5/qpc.c:605:6-605:27: struct mlx5_core_qp *rq)
-
drivers/infiniband/sw/rdmavt/qp.c:783:18-783:33: int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
-
drivers/infiniband/sw/rdmavt/qp.c:2321:32-2321:47: static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
-
drivers/infiniband/sw/rxe/rxe_verbs.c:950:26-950:41: static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
-
drivers/input/misc/xen-kbdfront.c:181:34-181:38: static irqreturn_t input_handler(int rq, void *dev_id)
-
drivers/isdn/hardware/mISDN/avmfritz.c:895:37-895:57: open_bchannel(struct fritzcard *fc, struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/hfcmulti.c:4016:8-4016:28: struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/hfcmulti.c:4057:8-4057:28: struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/hfcpci.c:1879:8-1879:28: struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/hfcpci.c:1930:35-1930:55: open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/hfcsusb.c:416:8-416:28: struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/hfcsusb.c:469:35-469:55: open_bchannel(struct hfcsusb *hw, struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/mISDNipac.c:743:44-743:64: open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller)
-
drivers/isdn/hardware/mISDN/mISDNipac.c:761:37-761:57: open_dchannel(struct isac_hw *isac, struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/mISDNipac.c:1479:37-1479:57: open_bchannel(struct ipac_hw *ipac, struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/mISDNisar.c:1635:33-1635:53: isar_open(struct isar_hw *isar, struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/netjet.c:849:38-849:58: open_bchannel(struct tiger_hw *card, struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/w6692.c:987:38-987:58: open_bchannel(struct w6692_hw *card, struct channel_req *rq)
-
drivers/isdn/hardware/mISDN/w6692.c:1158:38-1158:58: open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller)
-
drivers/isdn/mISDN/l1oip_core.c:978:55-978:75: open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
-
drivers/isdn/mISDN/l1oip_core.c:1005:55-1005:75: open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
-
drivers/md/dm-integrity.c:351:34-351:50: static void dm_integrity_prepare(struct request *rq)
-
drivers/md/dm-integrity.c:355:35-355:51: static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
-
drivers/md/dm-mpath.c:507:58-507:74: static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
-
drivers/md/dm-rq.c:122:49-122:65: static struct dm_rq_target_io *tio_from_request(struct request *rq)
-
drivers/md/dm-rq.c:182:41-182:57: static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
-
drivers/md/dm-rq.c:251:29-251:45: static void dm_softirq_done(struct request *rq)
-
drivers/md/dm-rq.c:276:33-276:49: static void dm_complete_request(struct request *rq, blk_status_t error)
-
drivers/md/dm-rq.c:291:38-291:54: static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
-
drivers/md/dm-rq.c:320:47-320:63: static int setup_clone(struct request *clone, struct request *rq,
-
drivers/md/dm-rq.c:338:51-338:67: static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
-
drivers/md/dm-rq.c:457:59-457:75: static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
-
drivers/md/dm-target.c:140:58-140:74: static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
-
drivers/mmc/core/mmc_test.c:766:32-766:53: static void mmc_test_req_reset(struct mmc_test_req *rq)
-
drivers/mmc/core/queue.h:25:58-25:74: static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
-
drivers/net/dsa/hirschmann/hellcreek_ptp.c:223:5-223:31: struct ptp_clock_request *rq, int on)
-
drivers/net/dsa/mv88e6xxx/ptp.c:270:11-270:37: struct ptp_clock_request *rq, int on)
-
drivers/net/dsa/mv88e6xxx/ptp.c:323:5-323:31: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/3com/3c574_cs.c:1034:46-1034:60: static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/3com/3c59x.c:3029:49-3029:63: static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/8390/axnet_cs.c:608:48-608:62: static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/8390/pcnet_cs.c:1108:45-1108:59: static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/adaptec/starfire.c:1906:49-1906:63: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/adi/adin1110.c:784:54-784:68: static int adin1110_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/amd/pcnet32.c:2777:50-2777:64: static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c:390:12-390:38: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c:427:9-427:35: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c:461:11-461:37: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c:488:11-488:37: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:13791:8-13791:34: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c:402:11-402:37: struct ptp_clock_request *rq)
-
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c:438:7-438:33: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/broadcom/tg3.c:6255:6-6255:32: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/cadence/macb_main.c:3769:47-3769:61: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/cadence/macb_ptp.c:184:6-184:32: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/cadence/macb_ptp.c:377:43-377:57: int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
-
drivers/net/ethernet/cavium/common/cavium_ptp.c:207:9-207:35: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/cavium/liquidio/lio_main.c:1621:7-1621:48: struct ptp_clock_request __maybe_unused *rq,
-
drivers/net/ethernet/cavium/thunder/nicvf_main.c:530:5-530:23: struct rcv_queue *rq, struct sk_buff **skb)
-
drivers/net/ethernet/cavium/thunder/nicvf_main.c:773:29-773:47: struct snd_queue *sq, struct rcv_queue *rq)
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:1957:51-1957:68: static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2084:42-2084:59: static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
-
drivers/net/ethernet/chelsio/cxgb3/sge.c:2714:6-2714:23: struct sge_rspq *rq)
-
drivers/net/ethernet/chelsio/cxgb4/sge.c:4864:41-4864:58: void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
-
drivers/net/ethernet/cisco/enic/enic.h:219:58-219:71: static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
-
drivers/net/ethernet/cisco/enic/enic.h:230:2-230:15: unsigned int rq)
-
drivers/net/ethernet/cisco/enic/enic_clsf.c:21:64-21:68: int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
-
drivers/net/ethernet/cisco/enic/enic_main.c:1222:30-1222:46: static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
-
drivers/net/ethernet/cisco/enic/enic_main.c:1235:30-1235:46: static int enic_rq_alloc_buf(struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/enic_main.c:1296:34-1296:50: static void enic_rq_indicate_buf(struct vnic_rq *rq,
-
drivers/net/ethernet/cisco/enic/enic_main.c:1446:56-1446:72: static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/enic_main.c:1458:57-1458:73: static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/enic_res.h:109:39-109:55: static inline void enic_queue_rq_desc(struct vnic_rq *rq,
-
drivers/net/ethernet/cisco/enic/vnic_rq.c:18:31-18:47: static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/vnic_rq.c:53:19-53:35: void vnic_rq_free(struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/vnic_rq.c:72:42-72:58: int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
-
drivers/net/ethernet/cisco/enic/vnic_rq.c:101:32-101:48: static void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
-
drivers/net/ethernet/cisco/enic/vnic_rq.c:125:19-125:35: void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
-
drivers/net/ethernet/cisco/enic/vnic_rq.c:133:35-133:51: unsigned int vnic_rq_error_status(struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/vnic_rq.c:138:21-138:37: void vnic_rq_enable(struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/vnic_rq.c:143:21-143:37: int vnic_rq_disable(struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/vnic_rq.c:171:20-171:36: void vnic_rq_clean(struct vnic_rq *rq,
-
drivers/net/ethernet/cisco/enic/vnic_rq.h:84:47-84:63: static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/vnic_rq.h:90:46-90:62: static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/vnic_rq.h:96:39-96:55: static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/vnic_rq.h:101:47-101:63: static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
-
drivers/net/ethernet/cisco/enic/vnic_rq.h:106:33-106:49: static inline void vnic_rq_post(struct vnic_rq *rq,
-
drivers/net/ethernet/cisco/enic/vnic_rq.h:141:41-141:57: static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
-
drivers/net/ethernet/cisco/enic/vnic_rq.h:151:36-151:52: static inline void vnic_rq_service(struct vnic_rq *rq,
-
drivers/net/ethernet/cisco/enic/vnic_rq.h:179:32-179:48: static inline int vnic_rq_fill(struct vnic_rq *rq,
-
drivers/net/ethernet/dec/tulip/tulip_core.c:897:51-897:65: static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/dec/tulip/winbond-840.c:1438:49-1438:63: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/dlink/dl2k.c:1340:36-1340:50: rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/dlink/sundance.c:1800:49-1800:63: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/fealnx.c:1873:46-1873:60: static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c:2586:55-2586:69: static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c:2622:52-2622:66: static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c:16:8-16:34: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/freescale/enetc/enetc.c:2956:42-2956:56: int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/freescale/fec_ptp.c:524:6-524:32: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/freescale/gianfar.c:2129:47-2129:61: static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c:53:61-53:78: static u64 hinic_dbg_get_rq_info(struct hinic_dev *nic_dev, struct hinic_rq *rq, int idx)
-
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c:250:25-250:42: void hinic_rq_debug_rem(struct hinic_rq *rq)
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:155:7-155:24: struct hinic_rq *rq, u16 global_qid)
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:247:29-247:46: static int alloc_rq_skb_arr(struct hinic_rq *rq)
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:264:29-264:46: static void free_rq_skb_arr(struct hinic_rq *rq)
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:316:25-316:42: static int alloc_rq_cqe(struct hinic_rq *rq)
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:360:25-360:42: static void free_rq_cqe(struct hinic_rq *rq)
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:384:19-384:36: int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:435:21-435:38: void hinic_clean_rq(struct hinic_rq *rq)
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:468:30-468:47: int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:796:39-796:56: struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:815:25-815:42: void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:837:40-837:57: struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:872:45-872:62: struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:899:23-899:40: void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:922:23-922:40: void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:940:27-940:44: void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
-
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c:968:22-968:39: void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx)
-
drivers/net/ethernet/huawei/hinic/hinic_rx.c:580:43-580:60: int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
-
drivers/net/ethernet/intel/i40e/i40e_ptp.c:490:10-490:36: struct ptp_clock_request *rq,
-
drivers/net/ethernet/intel/i40e/i40e_ptp.c:595:8-595:34: struct ptp_clock_request *rq,
-
drivers/net/ethernet/intel/ice/ice_ptp.c:1714:5-1714:31: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/intel/ice/ice_ptp.c:1788:9-1788:35: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/intel/igb/igb_ptp.c:490:6-490:32: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/intel/igb/igb_ptp.c:642:12-642:38: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/intel/igb/igb_ptp.c:788:7-788:33: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/intel/igc/igc_ptp.c:246:12-246:38: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c:627:9-627:35: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/jme.c:2614:38-2614:52: jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/korina.c:920:49-920:63: static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c:339:7-339:33: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c:671:9-671:26: struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c:686:51-686:68: void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:255:68-255:85: static int mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:277:51-277:68: mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:342:52-342:69: static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:362:50-362:67: static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:450:59-450:76: static int mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:703:32-703:49: void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:724:32-724:49: void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c:40:11-40:28: struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c:92:33-92:50: static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h:198:53-198:70: static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h:345:37-345:54: static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h:371:39-371:56: static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h:381:41-381:58: static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h:391:39-391:56: static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h:401:46-401:63: static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h:503:57-503:74: static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
-
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c:61:45-61:62: mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c:265:23-265:40: bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c:886:33-886:50: void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c:19:30-19:47: int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
-
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c:160:37-160:54: int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
-
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c:198:29-198:46: int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
-
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c:225:48-225:65: static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_buff *xdp)
-
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c:247:53-247:70: struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c:304:47-304:64: struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c:63:9-63:26: struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c:484:30-484:47: static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
-
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c:564:31-564:48: void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:222:40-222:57: static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:245:37-245:54: static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:254:37-254:54: static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:259:42-259:59: static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:284:42-284:59: static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:291:38-291:55: static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:474:65-474:82: static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:500:12-500:29: struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:513:40-513:57: static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:552:34-552:51: static void mlx5e_init_xsk_buffs(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:574:38-574:55: static int mlx5e_init_wqe_alloc_info(struct mlx5e_rq *rq, int node)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:608:39-608:56: static void mlx5e_free_wqe_alloc_info(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:621:43-621:60: static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:636:43-636:60: static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:644:28-644:45: u32 xdp_frag_size, struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:675:5-675:22: struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:720:34-720:51: static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:734:16-734:33: int node, struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:936:27-936:44: static void mlx5e_free_rq(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:963:21-963:38: int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1010:34-1010:51: static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1039:31-1039:48: static void mlx5e_flush_rq_cq(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1055:20-1055:37: int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1078:32-1078:49: static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1106:23-1106:40: void mlx5e_destroy_rq(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1111:32-1111:49: int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1131:34-1131:51: void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1167:26-1167:43: void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1214:5-1214:22: struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1271:24-1271:41: void mlx5e_activate_rq(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1276:26-1276:43: void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:1282:21-1282:38: void mlx5e_close_rq(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:3193:32-3193:49: static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:3199:11-3199:28: struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:4783:39-4783:56: static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:89:44-89:61: static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:108:42-108:59: static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:153:41-153:58: static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:183:49-183:66: static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:194:42-194:59: static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:226:46-226:63: static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:256:47-256:64: static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:276:40-276:57: static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:295:43-295:60: static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:305:37-305:54: static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:331:38-331:55: static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:338:52-338:69: static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:343:31-343:48: static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:374:38-374:55: static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:389:34-389:51: static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:407:36-407:53: static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:425:32-425:49: static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:439:32-439:49: static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:457:33-457:50: static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:498:32-498:49: mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:525:20-525:37: mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:538:23-538:40: mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:552:21-552:38: mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:585:33-585:50: static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:638:38-638:55: static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:720:36-720:53: static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:759:33-759:50: static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:848:30-848:47: void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:882:36-882:53: static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:894:49-894:66: INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1080:51-1080:68: INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1216:41-1216:58: static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1224:46-1224:63: static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1243:46-1243:63: static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1262:47-1262:64: static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1274:46-1274:63: static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1298:46-1298:63: static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1319:37-1319:54: static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1377:37-1377:54: static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1479:10-1479:27: struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1550:11-1550:28: struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1607:42-1607:59: static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1628:42-1628:59: static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1641:40-1641:57: struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1661:30-1661:47: static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1672:27-1672:44: mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1720:30-1720:47: mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1804:28-1804:45: static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1816:37-1816:54: static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1822:33-1822:50: static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1866:37-1866:54: static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1909:43-1909:60: static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1967:42-1967:59: mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1994:36-1994:53: mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2139:33-2139:50: mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2197:27-2197:44: mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2260:24-2260:41: mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2283:31-2283:48: mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2297:46-2297:63: static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2382:39-2382:56: static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2441:50-2441:67: static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2490:47-2490:64: static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2557:42-2557:59: static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2642:33-2642:50: static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2683:27-2683:44: int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2732:38-2732:55: static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2766:33-2766:50: void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
-
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c:61:33-61:50: static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
-
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c:438:5-438:31: struct ptp_clock_request *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c:536:57-536:83: static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c:557:13-557:39: struct ptp_clock_request *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c:589:67-589:93: static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c:623:6-623:32: struct ptp_clock_request *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c:699:10-699:36: struct ptp_clock_request *rq,
-
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c:710:7-710:33: struct ptp_clock_request *rq,
-
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c:808:10-808:36: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c:909:9-909:35: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c:954:10-954:36: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/mscc/ocelot_ptp.c:198:9-198:35: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/natsemi/natsemi.c:3072:49-3072:63: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/neterion/s2io.c:6618:47-6618:61: static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/packetengines/hamachi.c:1875:59-1875:73: static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq,
-
drivers/net/ethernet/packetengines/hamachi.c:1903:50-1903:64: static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/packetengines/yellowfin.c:1352:49-1352:63: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/qlogic/qede/qede_ptp.c:123:11-123:37: struct ptp_clock_request *rq,
-
drivers/net/ethernet/realtek/8139cp.c:1606:46-1606:60: static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/realtek/8139too.c:2501:49-2501:63: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/renesas/rcar_gen4_ptp.c:117:5-117:31: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c:1941:48-1941:62: static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/sis/sis900.c:2228:50-2228:64: static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/smsc/epic100.c:1486:49-1486:63: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/smsc/smc91c92_cs.c:1997:47-1997:61: static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:6022:49-6022:63: static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c:164:5-164:31: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/ti/cpts.c:300:7-300:33: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/ti/icssg/icss_iep.c:650:11-650:37: struct ptp_clock_request *rq, int on)
-
drivers/net/ethernet/ti/tlan.c:936:47-936:61: static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/via/via-rhine.c:2387:49-2387:63: static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/via/via-velocity.c:2427:51-2427:65: static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/xilinx/xilinx_axienet_main.c:1290:50-1290:64: static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/xilinx/xilinx_emaclite.c:1220:52-1220:66: static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/ethernet/xircom/xirc2ps_cs.c:1420:34-1420:48: do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/fddi/skfp/skfddi.c:960:56-960:70: static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
-
drivers/net/hippi/rrunner.c:1576:54-1576:68: static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq,
-
drivers/net/phy/bcm-phy-ptp.c:689:6-689:32: struct ptp_clock_request *rq, int on)
-
drivers/net/phy/dp83640.c:471:10-471:36: struct ptp_clock_request *rq, int on)
-
drivers/net/phy/micrel.c:4250:10-4250:36: struct ptp_clock_request *rq, int on)
-
drivers/net/phy/micrel.c:4435:9-4435:35: struct ptp_clock_request *rq, int on)
-
drivers/net/phy/micrel.c:4463:10-4463:36: struct ptp_clock_request *rq, int on)
-
drivers/net/plip/plip.c:1216:45-1216:59: plip_siocdevprivate(struct net_device *dev, struct ifreq *rq,
-
drivers/net/slip/slip.c:1186:54-1186:68: static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq,
-
drivers/net/usb/asix_devices.c:107:48-107:62: static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
-
drivers/net/usb/ax88179_178a.c:832:50-832:64: static int ax88179_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-
drivers/net/usb/dm9601.c:276:49-276:63: static int dm9601_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-
drivers/net/usb/mcs7830.c:328:50-328:64: static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-
drivers/net/usb/pegasus.c:1004:59-1004:73: static int pegasus_siocdevprivate(struct net_device *net, struct ifreq *rq,
-
drivers/net/usb/r8152.c:9260:53-9260:67: static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
-
drivers/net/usb/rtl8150.c:825:62-825:76: static int rtl8150_siocdevprivate(struct net_device *netdev, struct ifreq *rq,
-
drivers/net/usb/smsc75xx.c:750:54-750:68: static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
-
drivers/net/usb/smsc95xx.c:796:54-796:68: static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
-
drivers/net/usb/sr9700.c:239:52-239:66: static int sr9700_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
-
drivers/net/usb/sr9800.c:488:45-488:59: static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-
drivers/net/veth.c:295:30-295:46: static void __veth_xdp_flush(struct veth_rq *rq)
-
drivers/net/veth.c:306:24-306:40: static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
-
drivers/net/veth.c:317:8-317:24: struct veth_rq *rq, bool xdp)
-
drivers/net/veth.c:553:31-553:47: static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
-
drivers/net/veth.c:577:28-577:44: static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
-
drivers/net/veth.c:600:24-600:40: static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
-
drivers/net/veth.c:616:43-616:59: static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
-
drivers/net/veth.c:687:35-687:51: static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
-
drivers/net/veth.c:730:41-730:57: static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
-
drivers/net/veth.c:831:41-831:57: static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
-
drivers/net/veth.c:942:25-942:41: static int veth_xdp_rcv(struct veth_rq *rq, int budget,
-
drivers/net/veth.c:1034:34-1034:50: static int veth_create_page_pool(struct veth_rq *rq)
-
drivers/net/virtio_net.c:388:24-388:46: static void give_pages(struct receive_queue *rq, struct page *page)
-
drivers/net/virtio_net.c:398:32-398:54: static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
-
drivers/net/virtio_net.c:498:8-498:30: struct receive_queue *rq,
-
drivers/net/virtio_net.c:597:30-597:52: static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
-
drivers/net/virtio_net.c:626:33-626:55: static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
-
drivers/net/virtio_net.c:637:43-637:65: static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
-
drivers/net/virtio_net.c:648:36-648:58: static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
-
drivers/net/virtio_net.c:673:31-673:53: static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
-
drivers/net/virtio_net.c:1077:40-1077:62: static struct page *xdp_linearize_page(struct receive_queue *rq,
-
drivers/net/virtio_net.c:1158:7-1158:29: struct receive_queue *rq,
-
drivers/net/virtio_net.c:1245:10-1245:32: struct receive_queue *rq,
-
drivers/net/virtio_net.c:1292:8-1292:30: struct receive_queue *rq,
-
drivers/net/virtio_net.c:1313:32-1313:54: static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
-
drivers/net/virtio_net.c:1387:11-1387:33: struct receive_queue *rq,
-
drivers/net/virtio_net.c:1475:8-1475:30: struct receive_queue *rq,
-
drivers/net/virtio_net.c:1550:11-1550:33: struct receive_queue *rq,
-
drivers/net/virtio_net.c:1610:7-1610:29: struct receive_queue *rq,
-
drivers/net/virtio_net.c:1755:50-1755:72: static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
-
drivers/net/virtio_net.c:1815:55-1815:77: static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
-
drivers/net/virtio_net.c:1844:53-1844:75: static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
-
drivers/net/virtio_net.c:1893:43-1893:65: static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
-
drivers/net/virtio_net.c:1911:6-1911:28: struct receive_queue *rq, gfp_t gfp)
-
drivers/net/virtio_net.c:1966:52-1966:74: static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
-
drivers/net/virtio_net.c:2062:28-2062:50: static int virtnet_receive(struct receive_queue *rq, int budget,
-
drivers/net/virtio_net.c:2109:34-2109:56: static void virtnet_poll_cleantx(struct receive_queue *rq)
-
drivers/net/virtio_net.c:2382:9-2382:31: struct receive_queue *rq, u32 ring_num)
-
drivers/net/vmxnet3/vmxnet3_drv.c:613:25-613:50: vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
-
drivers/net/vmxnet3/vmxnet3_drv.c:1305:5-1305:30: struct vmxnet3_rx_queue *rq, int size)
-
drivers/net/vmxnet3/vmxnet3_drv.c:1421:18-1421:43: vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
-
drivers/net/vmxnet3/vmxnet3_drv.c:1503:24-1503:49: vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
-
drivers/net/vmxnet3/vmxnet3_drv.c:1910:20-1910:45: vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
-
drivers/net/vmxnet3/vmxnet3_drv.c:1972:32-1972:57: static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
-
drivers/net/vmxnet3/vmxnet3_drv.c:2043:17-2043:42: vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
-
drivers/net/vmxnet3/vmxnet3_drv.c:2127:19-2127:44: vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
-
drivers/net/vmxnet3/vmxnet3_xdp.c:251:17-251:42: vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp,
-
drivers/net/vmxnet3/vmxnet3_xdp.c:303:19-303:44: vmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page,
-
drivers/net/vmxnet3/vmxnet3_xdp.c:326:6-326:31: struct vmxnet3_rx_queue *rq,
-
drivers/net/vmxnet3/vmxnet3_xdp.c:369:7-369:32: struct vmxnet3_rx_queue *rq,
-
drivers/net/wireless/atmel/atmel.c:2619:48-2619:62: static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/net/wireless/cisco/airo.c:7688:56-7688:70: static int airo_siocdevprivate(struct net_device *dev, struct ifreq *rq,
-
drivers/nvme/host/core.c:705:3-705:19: struct request *rq)
-
drivers/nvme/host/core.c:717:49-717:65: bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
-
drivers/nvme/host/core.c:1025:21-1025:37: int nvme_execute_rq(struct request *rq, bool at_head)
-
drivers/nvme/host/core.c:1199:50-1199:66: static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
-
drivers/nvme/host/fabrics.h:198:52-198:68: static inline void nvmf_complete_timed_out_request(struct request *rq)
-
drivers/nvme/host/fc.c:1840:50-1840:66: nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
-
drivers/nvme/host/fc.c:2097:3-2097:19: struct request *rq, u32 rqno)
-
drivers/nvme/host/fc.c:2148:50-2148:66: nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
-
drivers/nvme/host/fc.c:2580:49-2580:65: static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
-
drivers/nvme/host/fc.c:2608:45-2608:61: nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
-
drivers/nvme/host/fc.c:2643:47-2643:63: nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
-
drivers/nvme/host/fc.c:2861:21-2861:37: nvme_fc_complete_rq(struct request *rq)
-
drivers/nvme/host/multipath.c:125:31-125:47: void nvme_mpath_start_request(struct request *rq)
-
drivers/nvme/host/multipath.c:139:29-139:45: void nvme_mpath_end_request(struct request *rq)
-
drivers/nvme/host/nvme.h:550:28-550:44: static inline u16 nvme_cid(struct request *rq)
-
drivers/nvme/host/nvme.h:795:61-795:77: static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
-
drivers/nvme/host/nvme.h:1039:39-1039:55: static inline void nvme_start_request(struct request *rq)
-
drivers/nvme/host/rdma.c:286:3-286:19: struct request *rq, unsigned int hctx_idx)
-
drivers/nvme/host/rdma.c:294:3-294:19: struct request *rq, unsigned int hctx_idx,
-
drivers/nvme/host/rdma.c:1206:62-1206:78: static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
-
drivers/nvme/host/rdma.c:1223:3-1223:19: struct request *rq)
-
drivers/nvme/host/rdma.c:1452:59-1452:75: static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
-
drivers/nvme/host/rdma.c:1513:3-1513:19: struct request *rq, struct nvme_command *c)
-
drivers/nvme/host/rdma.c:1929:42-1929:58: static void nvme_rdma_complete_timed_out(struct request *rq)
-
drivers/nvme/host/rdma.c:1938:51-1938:67: static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
-
drivers/nvme/host/rdma.c:2092:35-2092:51: static void nvme_rdma_complete_rq(struct request *rq)
-
drivers/nvme/host/tcp.c:467:3-467:19: struct request *rq, unsigned int hctx_idx)
-
drivers/nvme/host/tcp.c:475:3-475:19: struct request *rq, unsigned int hctx_idx,
-
drivers/nvme/host/tcp.c:760:41-760:57: static inline void nvme_tcp_end_request(struct request *rq, u16 status)
-
drivers/nvme/host/tcp.c:2255:41-2255:57: static void nvme_tcp_complete_timed_out(struct request *rq)
-
drivers/nvme/host/tcp.c:2264:50-2264:66: static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
-
drivers/nvme/host/tcp.c:2304:4-2304:20: struct request *rq)
-
drivers/nvme/host/tcp.c:2324:3-2324:19: struct request *rq)
-
drivers/nvme/target/passthru.c:249:51-249:67: static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
-
drivers/nvme/target/passthru.c:260:57-260:73: static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
-
drivers/pci/pci.c:6143:42-6143:46: int pcie_set_readrq(struct pci_dev *dev, int rq)
-
drivers/platform/chrome/wilco_ec/mailbox.c:92:9-92:34: struct wilco_ec_request *rq)
-
drivers/platform/chrome/wilco_ec/mailbox.c:117:9-117:34: struct wilco_ec_request *rq)
-
drivers/platform/chrome/wilco_ec/properties.c:35:9-35:37: struct ec_property_request *rq,
-
drivers/platform/chrome/wilco_ec/sysfs.c:158:5-158:32: struct usb_charge_request *rq,
-
drivers/platform/chrome/wilco_ec/telemetry.c:153:32-153:63: static int check_telem_request(struct wilco_ec_telem_request *rq,
-
drivers/ptp/ptp_clockmatrix.c:274:10-274:36: struct ptp_clock_request *rq, int on)
-
drivers/ptp/ptp_clockmatrix.c:1932:4-1932:30: struct ptp_clock_request *rq, int on)
-
drivers/ptp/ptp_dte.c:211:8-211:34: struct ptp_clock_request *rq, int on)
-
drivers/ptp/ptp_idt82p33.c:236:6-236:32: struct ptp_clock_request *rq, int on)
-
drivers/ptp/ptp_idt82p33.c:944:7-944:33: struct ptp_clock_request *rq, int on)
-
drivers/ptp/ptp_kvm_common.c:106:6-106:32: struct ptp_clock_request *rq, int on)
-
drivers/ptp/ptp_ocp.c:1140:49-1140:75: ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
-
drivers/ptp/ptp_pch.c:401:6-401:32: struct ptp_clock_request *rq, int on)
-
drivers/ptp/ptp_qoriq.c:301:8-301:34: struct ptp_clock_request *rq, int on)
-
drivers/scsi/elx/efct/efct_hw_queues.c:455:16-455:30: efct_hw_del_rq(struct hw_rq *rq)
-
drivers/scsi/esas2r/esas2r.h:1169:8-1169:31: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r.h:1200:43-1200:66: static inline void esas2r_rq_init_request(struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r.h:1264:44-1264:67: static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r.h:1277:46-1277:69: static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r.h:1304:6-1304:29: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r.h:1394:9-1394:32: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:387:6-387:29: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:460:11-460:34: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:489:10-489:33: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:503:10-503:33: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:518:12-518:35: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:549:8-549:31: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:578:11-578:34: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:625:7-625:30: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:688:7-688:30: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:740:10-740:33: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:789:8-789:31: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:826:11-826:34: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:882:8-882:31: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:940:11-940:34: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:1045:8-1045:31: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_disc.c:1084:5-1084:28: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_flash.c:134:7-134:30: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_flash.c:177:8-177:31: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_flash.c:227:50-227:73: static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_flash.c:303:11-303:34: struct esas2r_request *rq, u8 fi_stat)
-
drivers/scsi/esas2r/esas2r_flash.c:324:9-324:32: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_flash.c:828:10-828:33: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_flash.c:847:9-847:32: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_flash.c:1213:7-1213:30: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_flash.c:1258:51-1258:74: bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_flash.c:1391:6-1391:29: struct esas2r_request *rq, struct esas2r_sg_context *sgc)
-
drivers/scsi/esas2r/esas2r_init.c:104:6-104:29: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_init.c:1146:8-1146:31: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_int.c:173:9-173:32: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_int.c:749:51-749:74: void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_int.c:876:54-876:77: void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_int.c:880:12-880:35: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_int.c:920:9-920:32: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_io.c:46:53-46:76: void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_io.c:120:5-120:28: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_io.c:138:10-138:33: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_io.c:858:53-858:76: bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_ioctl.c:83:5-83:28: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_ioctl.c:182:9-182:32: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_ioctl.c:199:6-199:29: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_ioctl.c:294:10-294:33: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_ioctl.c:331:11-331:34: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_ioctl.c:343:10-343:33: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_ioctl.c:391:11-391:34: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_ioctl.c:607:10-607:33: struct esas2r_request *rq, void *context)
-
drivers/scsi/esas2r/esas2r_ioctl.c:669:9-669:32: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_ioctl.c:688:7-688:30: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_ioctl.c:745:10-745:33: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_ioctl.c:1203:9-1203:32: struct esas2r_request *rq, void *context)
-
drivers/scsi/esas2r/esas2r_ioctl.c:1248:51-1248:74: int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_ioctl.c:1806:9-1806:32: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_ioctl.c:1924:5-1924:28: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_main.c:893:11-893:34: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_main.c:1199:5-1199:28: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_main.c:1236:52-1236:75: void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_main.c:1484:52-1484:75: void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_main.c:1517:5-1517:28: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_vda.c:67:10-67:33: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_vda.c:270:11-270:34: struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_vda.c:347:8-347:31: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_vda.c:373:6-373:29: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_vda.c:420:52-420:75: void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
-
drivers/scsi/esas2r/esas2r_vda.c:449:6-449:29: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_vda.c:466:8-466:31: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_vda.c:483:6-483:29: struct esas2r_request *rq,
-
drivers/scsi/esas2r/esas2r_vda.c:504:31-504:54: static void clear_vda_request(struct esas2r_request *rq)
-
drivers/scsi/fnic/fnic_fcs.c:815:37-815:53: static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
-
drivers/scsi/fnic/fnic_fcs.c:958:25-958:41: int fnic_alloc_rq_frame(struct vnic_rq *rq)
-
drivers/scsi/fnic/fnic_fcs.c:992:23-992:39: void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
-
drivers/scsi/fnic/fnic_res.h:211:39-211:55: static inline void fnic_queue_rq_desc(struct vnic_rq *rq,
-
drivers/scsi/fnic/vnic_rq.c:15:31-15:47: static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.c:53:19-53:35: void vnic_rq_free(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.c:70:42-70:58: int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
-
drivers/scsi/fnic/vnic_rq.c:99:19-99:35: void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
-
drivers/scsi/fnic/vnic_rq.c:125:35-125:51: unsigned int vnic_rq_error_status(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.c:130:21-130:37: void vnic_rq_enable(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.c:135:21-135:37: int vnic_rq_disable(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.c:153:20-153:36: void vnic_rq_clean(struct vnic_rq *rq,
-
drivers/scsi/fnic/vnic_rq.h:93:47-93:63: static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.h:99:46-99:62: static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.h:105:39-105:55: static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.h:110:47-110:63: static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.h:115:51-115:67: static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.h:120:33-120:49: static inline void vnic_rq_post(struct vnic_rq *rq,
-
drivers/scsi/fnic/vnic_rq.h:153:40-153:56: static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
-
drivers/scsi/fnic/vnic_rq.h:158:41-158:57: static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
-
drivers/scsi/fnic/vnic_rq.h:168:36-168:52: static inline void vnic_rq_service(struct vnic_rq *rq,
-
drivers/scsi/fnic/vnic_rq.h:196:32-196:48: static inline int vnic_rq_fill(struct vnic_rq *rq,
-
drivers/scsi/hisi_sas/hisi_sas_main.c:180:10-180:26: struct request *rq)
-
drivers/scsi/hosts.c:585:39-585:55: static bool scsi_host_check_in_flight(struct request *rq, void *data)
-
drivers/scsi/hosts.c:680:36-680:52: static bool complete_all_cmds_iter(struct request *rq, void *data)
-
drivers/scsi/lpfc/lpfc_init.c:10936:44-10936:63: lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
-
drivers/scsi/mpi3mr/mpi3mr_os.c:449:31-449:47: static bool mpi3mr_print_scmd(struct request *rq, void *data)
-
drivers/scsi/mpi3mr/mpi3mr_os.c:480:31-480:47: static bool mpi3mr_flush_scmd(struct request *rq, void *data)
-
drivers/scsi/mpi3mr/mpi3mr_os.c:519:38-519:54: static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
-
drivers/scsi/mpi3mr/mpi3mr_os.c:551:38-551:54: static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
-
drivers/scsi/scsi_debug.c:5254:30-5254:46: static bool sdebug_stop_cmnd(struct request *rq, void *data)
-
drivers/scsi/scsi_debug.c:5289:45-5289:61: static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
-
drivers/scsi/scsi_debug.c:5894:38-5894:54: static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
-
drivers/scsi/scsi_debug.c:7426:37-7426:53: static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
-
drivers/scsi/scsi_debugfs.c:35:39-35:55: void scsi_show_rq(struct seq_file *m, struct request *rq)
-
drivers/scsi/scsi_ioctl.c:348:57-348:73: static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
-
drivers/scsi/scsi_ioctl.c:372:35-372:51: static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
-
drivers/scsi/scsi_lib.c:633:39-633:61: static unsigned int scsi_rq_err_bytes(const struct request *rq)
-
drivers/scsi/scsi_lib.c:997:3-997:19: struct request *rq)
-
drivers/scsi/scsi_lib.c:1118:32-1118:48: static void scsi_initialize_rq(struct request *rq)
-
drivers/scsi/scsi_lib.c:1146:29-1146:45: static void scsi_cleanup_rq(struct request *rq)
-
drivers/scsi/scsi_lib.c:1419:27-1419:43: static void scsi_complete(struct request *rq)
-
drivers/scsi/scsi_lib.c:1807:61-1807:77: static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
-
drivers/scsi/scsi_lib.c:1835:62-1835:78: static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
-
drivers/scsi/sd.c:875:34-875:50: static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
-
drivers/scsi/sd_zbc.c:507:40-507:56: static bool sd_zbc_need_zone_wp_update(struct request *rq)
-
drivers/scsi/sg.c:1316:14-1316:30: sg_rq_end_io(struct request *rq, blk_status_t status)
-
drivers/staging/ks7010/ks_wlan_net.c:2461:57-2461:71: static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
-
drivers/staging/octeon/ethernet-mdio.c:56:43-56:57: int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/staging/rtl8192u/r8192U_core.c:3288:50-3288:64: static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/staging/rtl8712/rtl871x_ioctl_linux.c:2178:41-2178:55: int r871x_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/staging/rtl8723bs/os_dep/ioctl_linux.c:1282:39-1282:53: int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-
drivers/ufs/core/ufshcd-crypto.h:16:47-16:63: static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
-
drivers/ufs/core/ufshcd.c:6342:30-6342:46: static bool ufshcd_abort_one(struct request *rq, void *priv)
-
drivers/video/fbdev/xen-fbfront.c:321:40-321:44: static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
-
fs/dlm/lock.c:2424:51-2424:67: static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
-
fs/erofs/decompressor.c:194:26-194:57: int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
-
fs/erofs/decompressor.c:276:35-276:66: static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
-
fs/erofs/decompressor.c:320:36-320:67: static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
-
fs/erofs/decompressor_deflate.c:97:32-97:63: int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
-
fs/erofs/decompressor_lzma.c:153:29-153:60: int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
-
fs/nfsd/nfsd.h:144:33-144:50: static inline int nfsd_v4client(struct svc_rqst *rq)
-
include/linux/blk-integrity.h:102:37-102:53: static inline bool blk_integrity_rq(struct request *rq)
-
include/linux/blk-integrity.h:111:48-111:64: static inline struct bio_vec *rq_integrity_vec(struct request *rq)
-
include/linux/blk-mq.h:198:42-198:58: static inline bool blk_rq_is_passthrough(struct request *rq)
-
include/linux/blk-mq.h:260:5-260:21: struct request *rq, struct request *prev)
-
include/linux/blk-mq.h:786:48-786:64: static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
-
include/linux/blk-mq.h:791:42-791:58: static inline int blk_mq_request_started(struct request *rq)
-
include/linux/blk-mq.h:796:44-796:60: static inline int blk_mq_request_completed(struct request *rq)
-
include/linux/blk-mq.h:808:48-808:64: static inline void blk_mq_set_request_complete(struct request *rq)
-
include/linux/blk-mq.h:817:51-817:67: static inline void blk_mq_complete_request_direct(struct request *rq,
-
include/linux/blk-mq.h:833:43-833:59: static inline bool blk_mq_need_time_stamp(struct request *rq)
-
include/linux/blk-mq.h:838:42-838:58: static inline bool blk_mq_is_reserved_rq(struct request *rq)
-
include/linux/blk-mq.h:937:38-937:54: static inline void *blk_mq_rq_to_pdu(struct request *rq)
-
include/linux/blk-mq.h:949:38-949:54: static inline void blk_mq_cleanup_rq(struct request *rq)
-
include/linux/blk-mq.h:955:36-955:52: static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
-
include/linux/blk-mq.h:967:31-967:47: static inline bool rq_is_sync(struct request *rq)
-
include/linux/blk-mq.h:1031:35-1031:57: static inline sector_t blk_rq_pos(const struct request *rq)
-
include/linux/blk-mq.h:1036:41-1036:63: static inline unsigned int blk_rq_bytes(const struct request *rq)
-
include/linux/blk-mq.h:1041:36-1041:58: static inline int blk_rq_cur_bytes(const struct request *rq)
-
include/linux/blk-mq.h:1050:43-1050:65: static inline unsigned int blk_rq_sectors(const struct request *rq)
-
include/linux/blk-mq.h:1055:47-1055:69: static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
-
include/linux/blk-mq.h:1060:49-1060:71: static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
-
include/linux/blk-mq.h:1071:49-1071:65: static inline unsigned int blk_rq_payload_bytes(struct request *rq)
-
include/linux/blk-mq.h:1082:39-1082:55: static inline struct bio_vec req_bvec(struct request *rq)
-
include/linux/blk-mq.h:1089:46-1089:62: static inline unsigned int blk_rq_count_bios(struct request *rq)
-
include/linux/blk-mq.h:1121:54-1121:70: static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
-
include/linux/blk-mq.h:1132:57-1132:73: static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
-
include/linux/blk-mq.h:1139:58-1139:74: static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-
include/linux/blk-mq.h:1149:43-1149:59: static inline unsigned int blk_rq_zone_no(struct request *rq)
-
include/linux/blk-mq.h:1154:47-1154:63: static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
-
include/linux/blk-mq.h:1165:46-1165:62: static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
-
include/linux/blk-mq.h:1176:44-1176:60: static inline void blk_req_zone_write_lock(struct request *rq)
-
include/linux/blk-mq.h:1182:46-1182:62: static inline void blk_req_zone_write_unlock(struct request *rq)
-
include/linux/blk-mq.h:1188:49-1188:65: static inline bool blk_req_zone_is_write_locked(struct request *rq)
-
include/linux/blk-mq.h:1194:49-1194:65: static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-
include/linux/blktrace_api.h:115:44-115:60: static inline sector_t blk_rq_trace_sector(struct request *rq)
-
include/linux/blktrace_api.h:126:52-126:68: static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
-
include/linux/mii.h:51:45-51:59: static inline struct mii_ioctl_data *if_mii(struct ifreq *rq)
-
include/linux/t10-pi.h:40:34-40:50: static inline u32 t10_pi_ref_tag(struct request *rq)
-
include/linux/t10-pi.h:71:34-71:50: static inline u64 ext_pi_ref_tag(struct request *rq)
-
include/rdma/rdmavt_qp.h:276:36-276:51: static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
-
include/rdma/rdmavt_qp.h:544:49-544:64: static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
-
include/rdma/rdmavt_qp.h:952:32-952:47: static inline void rvt_free_rq(struct rvt_rq *rq)
-
include/trace/events/block.h:75:1-75:1: TRACE_EVENT(block_rq_requeue,
-
include/trace/events/block.h:149:1-149:1: DEFINE_EVENT(block_rq_completion, block_rq_complete,
-
include/trace/events/block.h:165:1-165:1: DEFINE_EVENT(block_rq_completion, block_rq_error,
-
include/trace/events/block.h:215:1-215:1: DEFINE_EVENT(block_rq, block_rq_insert,
-
include/trace/events/block.h:229:1-229:1: DEFINE_EVENT(block_rq, block_rq_issue,
-
include/trace/events/block.h:243:1-243:1: DEFINE_EVENT(block_rq, block_rq_merge,
-
include/trace/events/block.h:256:1-256:1: DEFINE_EVENT(block_rq, block_io_start,
-
include/trace/events/block.h:269:1-269:1: DEFINE_EVENT(block_rq, block_io_done,
-
include/trace/events/block.h:549:1-549:1: TRACE_EVENT(block_rq_remap,
-
include/trace/events/nbd.h:61:1-61:1: DECLARE_EVENT_CLASS(nbd_send_request,
-
include/trace/events/nbd.h:94:1-94:1: NBD_DEFINE_EVENT(nbd_send_request, nbd_send_request,
-
include/trace/events/sched.h:698:1-698:1: DECLARE_TRACE(pelt_rt_tp,
-
include/trace/events/sched.h:702:1-702:1: DECLARE_TRACE(pelt_dl_tp,
-
include/trace/events/sched.h:706:1-706:1: DECLARE_TRACE(pelt_thermal_tp,
-
include/trace/events/sched.h:710:1-710:1: DECLARE_TRACE(pelt_irq_tp,
-
include/trace/events/sched.h:718:1-718:1: DECLARE_TRACE(sched_cpu_capacity_tp,
-
include/trace/events/sched.h:734:1-734:1: DECLARE_TRACE(sched_update_nr_running_tp,
-
kernel/sched/core.c:239:25-239:36: void sched_core_enqueue(struct rq *rq, struct task_struct *p)
-
kernel/sched/core.c:249:25-249:36: void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/core.c:299:44-299:55: static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
-
kernel/sched/core.c:551:30-551:41: void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
-
kernel/sched/core.c:576:26-576:37: bool raw_spin_rq_trylock(struct rq *rq)
-
kernel/sched/core.c:600:25-600:36: void raw_spin_rq_unlock(struct rq *rq)
-
kernel/sched/core.c:694:34-694:45: static void update_rq_clock_task(struct rq *rq, s64 delta)
-
kernel/sched/core.c:750:22-750:33: void update_rq_clock(struct rq *rq)
-
kernel/sched/core.c:777:26-777:37: static void hrtick_clear(struct rq *rq)
-
kernel/sched/core.c:804:30-804:41: static void __hrtick_restart(struct rq *rq)
-
kernel/sched/core.c:830:19-830:30: void hrtick_start(struct rq *rq, u64 delay)
-
kernel/sched/core.c:867:28-867:39: static void hrtick_rq_init(struct rq *rq)
-
kernel/sched/core.c:1041:19-1041:30: void resched_curr(struct rq *rq)
-
kernel/sched/core.c:1417:19-1417:30: uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
-
kernel/sched/core.c:1433:38-1433:49: static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
-
kernel/sched/core.c:1444:34-1444:45: unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
-
kernel/sched/core.c:1566:37-1566:48: static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
-
kernel/sched/core.c:1604:37-1604:48: static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
-
kernel/sched/core.c:1670:34-1670:45: static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
-
kernel/sched/core.c:1694:34-1694:45: static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
-
kernel/sched/core.c:1714:39-1714:50: static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
-
kernel/sched/core.c:2013:35-2013:46: static void __init init_uclamp_rq(struct rq *rq)
-
kernel/sched/core.c:2091:33-2091:44: static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/core.c:2108:33-2108:44: static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/core.c:2125:20-2125:31: void activate_task(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/core.c:2137:22-2137:33: void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/core.c:2208:40-2208:51: static inline void check_class_changed(struct rq *rq, struct task_struct *p,
-
kernel/sched/core.c:2221:25-2221:36: void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/core.c:2392:36-2392:47: static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
-
kernel/sched/core.c:2462:40-2462:51: static inline bool rq_has_pinned_tasks(struct rq *rq)
-
kernel/sched/core.c:2516:36-2516:47: static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
-
kernel/sched/core.c:2562:34-2562:45: static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
-
kernel/sched/core.c:2961:29-2961:40: static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
-
kernel/sched/core.c:3108:7-3108:18: struct rq *rq,
-
kernel/sched/core.c:3767:18-3767:29: ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
-
kernel/sched/core.c:5010:34-5010:45: static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
-
kernel/sched/core.c:5046:28-5046:39: __splice_balance_callbacks(struct rq *rq, bool split)
-
kernel/sched/core.c:5070:65-5070:76: static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
-
kernel/sched/core.c:5075:33-5075:44: static void __balance_callbacks(struct rq *rq)
-
kernel/sched/core.c:5080:38-5080:49: static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
-
kernel/sched/core.c:5109:21-5109:32: prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
-
kernel/sched/core.c:5125:39-5125:50: static inline void finish_lock_switch(struct rq *rq)
-
kernel/sched/core.c:5179:21-5179:32: prepare_task_switch(struct rq *rq, struct task_struct *prev,
-
kernel/sched/core.c:5324:16-5324:27: context_switch(struct rq *rq, struct task_struct *prev,
-
kernel/sched/core.c:5586:32-5586:43: static u64 cpu_resched_latency(struct rq *rq)
-
kernel/sched/core.c:5964:35-5964:46: static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
-
kernel/sched/core.c:5990:18-5990:29: __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
-
kernel/sched/core.c:6048:45-6048:56: static inline struct task_struct *pick_task(struct rq *rq)
-
kernel/sched/core.c:6067:16-6067:27: pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
-
kernel/sched/core.c:6366:32-6366:43: static void sched_core_balance(struct rq *rq)
-
kernel/sched/core.c:6388:32-6388:43: static void queue_core_balance(struct rq *rq)
-
kernel/sched/core.c:9458:26-9458:37: static void balance_push(struct rq *rq)
-
kernel/sched/core.c:9565:20-9565:31: void set_rq_online(struct rq *rq)
-
kernel/sched/core.c:9580:21-9580:32: void set_rq_offline(struct rq *rq)
-
kernel/sched/core.c:9801:31-9801:42: static void calc_load_migrate(struct rq *rq)
-
kernel/sched/core.c:9809:27-9809:38: static void dump_rq_tasks(struct rq *rq, const char *loglvl)
-
kernel/sched/core.c:11571:41-11571:52: void call_trace_sched_update_nr_running(struct rq *rq, int count)
-
kernel/sched/core.c:12013:23-12013:34: void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
-
kernel/sched/core_sched.c:240:37-240:48: void __sched_core_account_forceidle(struct rq *rq)
-
kernel/sched/core_sched.c:289:24-289:35: void __sched_core_tick(struct rq *rq)
-
kernel/sched/deadline.c:526:33-526:44: static inline int dl_overloaded(struct rq *rq)
-
kernel/sched/deadline.c:531:36-531:47: static inline void dl_set_overload(struct rq *rq)
-
kernel/sched/deadline.c:547:38-547:49: static inline void dl_clear_overload(struct rq *rq)
-
kernel/sched/deadline.c:601:38-601:49: static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
-
kernel/sched/deadline.c:614:38-614:49: static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
-
kernel/sched/deadline.c:630:41-630:52: static inline int has_pushable_dl_tasks(struct rq *rq)
-
kernel/sched/deadline.c:637:38-637:49: static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
-
kernel/sched/deadline.c:648:46-648:57: static inline void deadline_queue_push_tasks(struct rq *rq)
-
kernel/sched/deadline.c:656:45-656:56: static inline void deadline_queue_pull_task(struct rq *rq)
-
kernel/sched/deadline.c:663:45-663:56: static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
-
kernel/sched/deadline.c:769:10-769:21: struct rq *rq)
-
kernel/sched/deadline.c:950:57-950:68: update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
-
kernel/sched/deadline.c:1274:36-1274:47: static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
-
kernel/sched/deadline.c:1298:28-1298:39: static void update_curr_dl(struct rq *rq)
-
kernel/sched/deadline.c:1661:29-1661:40: static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/deadline.c:1745:31-1745:42: static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/deadline.c:1752:29-1752:40: static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/deadline.c:1785:27-1785:38: static void yield_task_dl(struct rq *rq)
-
kernel/sched/deadline.c:1808:8-1808:19: struct rq *rq)
-
kernel/sched/deadline.c:1899:36-1899:47: static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
-
kernel/sched/deadline.c:1920:23-1920:34: static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
-
kernel/sched/deadline.c:1942:35-1942:46: static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
-
kernel/sched/deadline.c:1962:29-1962:40: static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
-
kernel/sched/deadline.c:1972:30-1972:41: static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
-
kernel/sched/deadline.c:2006:41-2006:52: static struct task_struct *pick_task_dl(struct rq *rq)
-
kernel/sched/deadline.c:2022:46-2022:57: static struct task_struct *pick_next_task_dl(struct rq *rq)
-
kernel/sched/deadline.c:2033:30-2033:41: static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
-
kernel/sched/deadline.c:2056:26-2056:37: static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
-
kernel/sched/deadline.c:2084:25-2084:36: static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
-
kernel/sched/deadline.c:2096:59-2096:70: static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
-
kernel/sched/deadline.c:2210:64-2210:75: static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
-
kernel/sched/deadline.c:2264:55-2264:66: static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
-
kernel/sched/deadline.c:2288:25-2288:36: static int push_dl_task(struct rq *rq)
-
kernel/sched/deadline.c:2366:27-2366:38: static void push_dl_tasks(struct rq *rq)
-
kernel/sched/deadline.c:2467:27-2467:38: static void task_woken_dl(struct rq *rq, struct task_struct *p)
-
kernel/sched/deadline.c:2513:26-2513:37: static void rq_online_dl(struct rq *rq)
-
kernel/sched/deadline.c:2524:27-2524:38: static void rq_offline_dl(struct rq *rq)
-
kernel/sched/deadline.c:2577:30-2577:41: static void switched_from_dl(struct rq *rq, struct task_struct *p)
-
kernel/sched/deadline.c:2631:28-2631:39: static void switched_to_dl(struct rq *rq, struct task_struct *p)
-
kernel/sched/deadline.c:2667:29-2667:40: static void prio_changed_dl(struct rq *rq, struct task_struct *p,
-
kernel/sched/debug.c:575:32-575:43: print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
-
kernel/sched/debug.c:608:42-608:53: static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
-
kernel/sched/fair.c:419:44-419:55: static inline void assert_list_leaf_cfs_rq(struct rq *rq)
-
kernel/sched/fair.c:1191:30-1191:41: static void update_curr_fair(struct rq *rq)
-
kernel/sched/fair.c:1506:34-1506:45: static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
-
kernel/sched/fair.c:1512:34-1512:45: static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
-
kernel/sched/fair.c:3442:28-3442:39: static void task_tick_numa(struct rq *rq, struct task_struct *curr)
-
kernel/sched/fair.c:4909:64-4909:75: static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
-
kernel/sched/fair.c:6268:51-6268:62: static void __maybe_unused update_runtime_enabled(struct rq *rq)
-
kernel/sched/fair.c:6287:55-6287:66: static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
-
kernel/sched/fair.c:6416:48-6416:59: static inline void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) {}
-
kernel/sched/fair.c:6424:31-6424:42: static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
-
kernel/sched/fair.c:6449:27-6449:38: static void hrtick_update(struct rq *rq)
-
kernel/sched/fair.c:6479:47-6479:58: static inline void update_overutilized_status(struct rq *rq)
-
kernel/sched/fair.c:6491:26-6491:37: static int sched_idle_rq(struct rq *rq)
-
kernel/sched/fair.c:6510:19-6510:30: enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/fair.c:6603:31-6603:42: static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/fair.c:6693:31-6693:42: static unsigned long cpu_load(struct rq *rq)
-
kernel/sched/fair.c:6711:39-6711:50: static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
-
kernel/sched/fair.c:6729:35-6729:46: static unsigned long cpu_runnable(struct rq *rq)
-
kernel/sched/fair.c:6734:43-6734:54: static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
-
kernel/sched/fair.c:7062:25-7062:36: void __update_idle_core(struct rq *rq)
-
kernel/sched/fair.c:8032:14-8032:25: balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
-
kernel/sched/fair.c:8055:34-8055:45: static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
-
kernel/sched/fair.c:8136:43-8136:54: static struct task_struct *pick_task_fair(struct rq *rq)
-
kernel/sched/fair.c:8169:21-8169:32: pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
-
kernel/sched/fair.c:8313:50-8313:61: static struct task_struct *__pick_next_task_fair(struct rq *rq)
-
kernel/sched/fair.c:8321:32-8321:43: static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
-
kernel/sched/fair.c:8335:29-8335:40: static void yield_task_fair(struct rq *rq)
-
kernel/sched/fair.c:8364:32-8364:43: static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
-
kernel/sched/fair.c:8969:25-8969:36: static void attach_task(struct rq *rq, struct task_struct *p)
-
kernel/sched/fair.c:8982:29-8982:40: static void attach_one_task(struct rq *rq, struct task_struct *p)
-
kernel/sched/fair.c:9027:40-9027:51: static inline bool others_have_blocked(struct rq *rq)
-
kernel/sched/fair.c:9046:45-9046:56: static inline void update_blocked_load_tick(struct rq *rq)
-
kernel/sched/fair.c:9051:47-9051:58: static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
-
kernel/sched/fair.c:9063:37-9063:48: static bool __update_blocked_others(struct rq *rq, bool *done)
-
kernel/sched/fair.c:9091:35-9091:46: static bool __update_blocked_fair(struct rq *rq, bool *done)
-
kernel/sched/fair.c:9389:20-9389:31: check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
-
kernel/sched/fair.c:9400:39-9400:50: static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
-
kernel/sched/fair.c:9647:24-9647:35: sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
-
kernel/sched/fair.c:9917:45-9917:56: static inline enum fbq_type fbq_classify_rq(struct rq *rq)
-
kernel/sched/fair.c:11479:31-11479:42: static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
-
kernel/sched/fair.c:11561:34-11561:45: static inline int on_null_domain(struct rq *rq)
-
kernel/sched/fair.c:11635:32-11635:43: static void nohz_balancer_kick(struct rq *rq)
-
kernel/sched/fair.c:11769:29-11769:40: void nohz_balance_exit_idle(struct rq *rq)
-
kernel/sched/fair.c:11860:31-11860:42: static bool update_nohz_stats(struct rq *rq)
-
kernel/sched/fair.c:12219:27-12219:38: void trigger_load_balance(struct rq *rq)
-
kernel/sched/fair.c:12234:28-12234:39: static void rq_online_fair(struct rq *rq)
-
kernel/sched/fair.c:12241:29-12241:40: static void rq_offline_fair(struct rq *rq)
-
kernel/sched/fair.c:12262:35-12262:46: static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
-
kernel/sched/fair.c:12305:27-12305:38: void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
-
kernel/sched/fair.c:12386:28-12386:39: static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
-
kernel/sched/fair.c:12433:19-12433:30: prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
-
kernel/sched/fair.c:12534:32-12534:43: static void switched_from_fair(struct rq *rq, struct task_struct *p)
-
kernel/sched/fair.c:12539:30-12539:41: static void switched_to_fair(struct rq *rq, struct task_struct *p)
-
kernel/sched/fair.c:12561:32-12561:43: static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
-
kernel/sched/fair.c:12886:42-12886:53: static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
-
kernel/sched/idle.c:395:14-395:25: balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
-
kernel/sched/idle.c:404:37-404:48: static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/idle.c:409:32-409:43: static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
-
kernel/sched/idle.c:413:32-413:43: static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
-
kernel/sched/idle.c:420:43-420:54: static struct task_struct *pick_task_idle(struct rq *rq)
-
kernel/sched/idle.c:426:41-426:52: struct task_struct *pick_next_task_idle(struct rq *rq)
-
kernel/sched/idle.c:440:19-440:30: dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/idle.c:456:28-456:39: static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
-
kernel/sched/idle.c:460:30-460:41: static void switched_to_idle(struct rq *rq, struct task_struct *p)
-
kernel/sched/idle.c:466:19-466:30: prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
-
kernel/sched/idle.c:471:30-471:41: static void update_curr_idle(struct rq *rq)
-
kernel/sched/loadavg.c:233:33-233:44: static void calc_load_nohz_fold(struct rq *rq)
-
kernel/sched/loadavg.c:258:28-258:39: void calc_load_nohz_remote(struct rq *rq)
-
kernel/sched/pelt.c:346:36-346:47: int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
-
kernel/sched/pelt.c:372:36-372:47: int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
-
kernel/sched/pelt.c:430:25-430:36: int update_irq_load_avg(struct rq *rq, u64 running)
-
kernel/sched/pelt.h:19:34-19:45: update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
-
kernel/sched/pelt.h:24:36-24:47: static inline u64 thermal_load_avg(struct rq *rq)
-
kernel/sched/pelt.h:64:33-64:44: static inline u64 rq_clock_pelt(struct rq *rq)
-
kernel/sched/pelt.h:73:47-73:58: static inline void _update_idle_rq_clock_pelt(struct rq *rq)
-
kernel/sched/pelt.h:95:41-95:52: static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
-
kernel/sched/pelt.h:133:46-133:57: static inline void update_idle_rq_clock_pelt(struct rq *rq)
-
kernel/sched/rt.c:321:38-321:49: static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
-
kernel/sched/rt.c:327:33-327:44: static inline int rt_overloaded(struct rq *rq)
-
kernel/sched/rt.c:332:36-332:47: static inline void rt_set_overload(struct rq *rq)
-
kernel/sched/rt.c:351:38-351:49: static inline void rt_clear_overload(struct rq *rq)
-
kernel/sched/rt.c:408:38-408:49: static inline int has_pushable_tasks(struct rq *rq)
-
kernel/sched/rt.c:419:40-419:51: static inline void rt_queue_push_tasks(struct rq *rq)
-
kernel/sched/rt.c:427:39-427:50: static inline void rt_queue_pull_task(struct rq *rq)
-
kernel/sched/rt.c:432:35-432:46: static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
-
kernel/sched/rt.c:443:35-443:46: static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
-
kernel/sched/rt.c:780:31-780:42: static void __disable_runtime(struct rq *rq)
-
kernel/sched/rt.c:862:30-862:41: static void __enable_runtime(struct rq *rq)
-
kernel/sched/rt.c:1045:28-1045:39: static void update_curr_rt(struct rq *rq)
-
kernel/sched/rt.c:1535:17-1535:28: enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/rt.c:1551:29-1551:40: static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/rt.c:1579:29-1579:40: static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
-
kernel/sched/rt.c:1590:27-1590:38: static void yield_task_rt(struct rq *rq)
-
kernel/sched/rt.c:1670:38-1670:49: static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
-
kernel/sched/rt.c:1697:23-1697:34: static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
-
kernel/sched/rt.c:1718:35-1718:46: static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/rt.c:1743:37-1743:48: static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
-
kernel/sched/rt.c:1787:47-1787:58: static struct task_struct *_pick_next_task_rt(struct rq *rq)
-
kernel/sched/rt.c:1802:41-1802:52: static struct task_struct *pick_task_rt(struct rq *rq)
-
kernel/sched/rt.c:1814:46-1814:57: static struct task_struct *pick_next_task_rt(struct rq *rq)
-
kernel/sched/rt.c:1824:30-1824:41: static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
-
kernel/sched/rt.c:1849:25-1849:36: static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
-
kernel/sched/rt.c:1862:55-1862:66: static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
-
kernel/sched/rt.c:1972:65-1972:76: static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
-
kernel/sched/rt.c:2032:52-2032:63: static struct task_struct *pick_next_pushable_task(struct rq *rq)
-
kernel/sched/rt.c:2057:25-2057:36: static int push_rt_task(struct rq *rq, bool pull)
-
kernel/sched/rt.c:2175:27-2175:38: static void push_rt_tasks(struct rq *rq)
-
kernel/sched/rt.c:2282:30-2282:41: static void tell_cpu_to_push(struct rq *rq)
-
kernel/sched/rt.c:2466:27-2466:38: static void task_woken_rt(struct rq *rq, struct task_struct *p)
-
kernel/sched/rt.c:2480:26-2480:37: static void rq_online_rt(struct rq *rq)
-
kernel/sched/rt.c:2491:27-2491:38: static void rq_offline_rt(struct rq *rq)
-
kernel/sched/rt.c:2505:30-2505:41: static void switched_from_rt(struct rq *rq, struct task_struct *p)
-
kernel/sched/rt.c:2536:28-2536:39: static void switched_to_rt(struct rq *rq, struct task_struct *p)
-
kernel/sched/rt.c:2567:17-2567:28: prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
-
kernel/sched/rt.c:2604:22-2604:33: static void watchdog(struct rq *rq, struct task_struct *p)
-
kernel/sched/rt.c:2639:26-2639:37: static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
-
kernel/sched/rt.c:2673:40-2673:51: static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
-
kernel/sched/sched.h:1187:26-1187:37: static inline int cpu_of(struct rq *rq)
-
kernel/sched/sched.h:1221:39-1221:50: static inline bool sched_core_enabled(struct rq *rq)
-
kernel/sched/sched.h:1235:40-1235:51: static inline raw_spinlock_t *rq_lockp(struct rq *rq)
-
kernel/sched/sched.h:1243:42-1243:53: static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
-
kernel/sched/sched.h:1261:43-1261:54: static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
-
kernel/sched/sched.h:1270:44-1270:55: static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
-
kernel/sched/sched.h:1293:45-1293:56: static inline bool sched_group_cookie_match(struct rq *rq,
-
kernel/sched/sched.h:1361:43-1361:54: static inline void lockdep_assert_rq_held(struct rq *rq)
-
kernel/sched/sched.h:1370:37-1370:48: static inline void raw_spin_rq_lock(struct rq *rq)
-
kernel/sched/sched.h:1375:41-1375:52: static inline void raw_spin_rq_lock_irq(struct rq *rq)
-
kernel/sched/sched.h:1381:43-1381:54: static inline void raw_spin_rq_unlock_irq(struct rq *rq)
-
kernel/sched/sched.h:1387:55-1387:66: static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
-
kernel/sched/sched.h:1395:50-1395:61: static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
-
kernel/sched/sched.h:1409:37-1409:48: static inline void update_idle_core(struct rq *rq)
-
kernel/sched/sched.h:1496:41-1496:52: static inline void assert_clock_updated(struct rq *rq)
-
kernel/sched/sched.h:1505:28-1505:39: static inline u64 rq_clock(struct rq *rq)
-
kernel/sched/sched.h:1513:33-1513:44: static inline u64 rq_clock_task(struct rq *rq)
-
kernel/sched/sched.h:1534:36-1534:47: static inline u64 rq_clock_thermal(struct rq *rq)
-
kernel/sched/sched.h:1539:41-1539:52: static inline void rq_clock_skip_update(struct rq *rq)
-
kernel/sched/sched.h:1549:47-1549:58: static inline void rq_clock_cancel_skipupdate(struct rq *rq)
-
kernel/sched/sched.h:1564:47-1564:58: static inline void rq_clock_start_loop_update(struct rq *rq)
-
kernel/sched/sched.h:1571:46-1571:57: static inline void rq_clock_stop_loop_update(struct rq *rq)
-
kernel/sched/sched.h:1602:32-1602:43: static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
-
kernel/sched/sched.h:1615:34-1615:45: static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
-
kernel/sched/sched.h:1625:34-1625:45: static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
-
kernel/sched/sched.h:1644:37-1644:48: static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
-
kernel/sched/sched.h:1652:16-1652:27: task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
-
kernel/sched/sched.h:1662:17-1662:28: rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
-
kernel/sched/sched.h:1670:13-1670:24: rq_lock_irq(struct rq *rq, struct rq_flags *rf)
-
kernel/sched/sched.h:1678:9-1678:20: rq_lock(struct rq *rq, struct rq_flags *rf)
-
kernel/sched/sched.h:1686:22-1686:33: rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
-
kernel/sched/sched.h:1694:15-1694:26: rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
-
kernel/sched/sched.h:1702:11-1702:22: rq_unlock(struct rq *rq, struct rq_flags *rf)
-
kernel/sched/sched.h:1784:24-1784:35: queue_balance_callback(struct rq *rq,
-
kernel/sched/sched.h:1964:49-1964:60: static inline void sched_core_account_forceidle(struct rq *rq)
-
kernel/sched/sched.h:1972:36-1972:47: static inline void sched_core_tick(struct rq *rq)
-
kernel/sched/sched.h:2131:32-2131:43: static inline int task_current(struct rq *rq, struct task_struct *p)
-
kernel/sched/sched.h:2136:31-2136:42: static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
-
kernel/sched/sched.h:2295:34-2295:45: static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
-
kernel/sched/sched.h:2301:34-2301:45: static inline void set_next_task(struct rq *rq, struct task_struct *next)
-
kernel/sched/sched.h:2340:40-2340:51: static inline bool sched_stop_runnable(struct rq *rq)
-
kernel/sched/sched.h:2345:38-2345:49: static inline bool sched_dl_runnable(struct rq *rq)
-
kernel/sched/sched.h:2350:38-2350:49: static inline bool sched_rt_runnable(struct rq *rq)
-
kernel/sched/sched.h:2355:40-2355:51: static inline bool sched_fair_runnable(struct rq *rq)
-
kernel/sched/sched.h:2376:49-2376:60: static inline struct task_struct *get_push_task(struct rq *rq)
-
kernel/sched/sched.h:2400:35-2400:46: static inline void idle_set_state(struct rq *rq,
-
kernel/sched/sched.h:2406:52-2406:63: static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-
kernel/sched/sched.h:2480:49-2480:60: static inline void sched_update_tick_dependency(struct rq *rq) { }
-
kernel/sched/sched.h:2483:35-2483:46: static inline void add_nr_running(struct rq *rq, unsigned count)
-
kernel/sched/sched.h:2502:35-2502:46: static inline void sub_nr_running(struct rq *rq, unsigned count)
-
kernel/sched/sched.h:2549:34-2549:45: static inline int hrtick_enabled(struct rq *rq)
-
kernel/sched/sched.h:2556:39-2556:50: static inline int hrtick_enabled_fair(struct rq *rq)
-
kernel/sched/sched.h:2563:37-2563:48: static inline int hrtick_enabled_dl(struct rq *rq)
-
kernel/sched/sched.h:2958:40-2958:51: static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
-
kernel/sched/sched.h:3019:39-3019:50: static inline unsigned long cpu_bw_dl(struct rq *rq)
-
kernel/sched/sched.h:3024:41-3024:52: static inline unsigned long cpu_util_dl(struct rq *rq)
-
kernel/sched/sched.h:3033:41-3033:52: static inline unsigned long cpu_util_rt(struct rq *rq)
-
kernel/sched/sched.h:3042:43-3042:54: static inline unsigned long uclamp_rq_get(struct rq *rq,
-
kernel/sched/sched.h:3048:34-3048:45: static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
-
kernel/sched/sched.h:3054:38-3054:49: static inline bool uclamp_rq_is_idle(struct rq *rq)
-
kernel/sched/sched.h:3077:35-3077:46: unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
-
kernel/sched/sched.h:3113:40-3113:51: static inline bool uclamp_rq_is_capped(struct rq *rq)
-
kernel/sched/sched.h:3184:42-3184:53: static inline unsigned long cpu_util_irq(struct rq *rq)
-
kernel/sched/sched.h:3236:41-3236:52: static inline void membarrier_switch_mm(struct rq *rq,
-
kernel/sched/sched.h:3393:41-3393:52: static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm)
-
kernel/sched/sched.h:3401:32-3401:43: static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm)
-
kernel/sched/sched.h:3453:30-3453:41: static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm)
-
kernel/sched/sched.h:3475:34-3475:45: static inline void switch_mm_cid(struct rq *rq,
-
kernel/sched/stats.c:6:32-6:43: void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
-
kernel/sched/stats.c:20:30-20:41: void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
-
kernel/sched/stats.c:47:37-47:48: void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
-
kernel/sched/stats.h:13:22-13:33: rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
-
kernel/sched/stats.h:25:22-25:33: rq_sched_info_depart(struct rq *rq, unsigned long long delta)
-
kernel/sched/stats.h:32:23-32:34: rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
-
kernel/sched/stats.h:205:39-205:50: static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
-
kernel/sched/stats.h:224:31-224:42: static void sched_info_arrive(struct rq *rq, struct task_struct *t)
-
kernel/sched/stats.h:246:39-246:50: static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
-
kernel/sched/stats.h:260:38-260:49: static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
-
kernel/sched/stats.h:276:19-276:30: sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
-
kernel/sched/stop_task.c:19:14-19:25: balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
-
kernel/sched/stop_task.c:26:25-26:36: check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/stop_task.c:31:32-31:43: static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
-
kernel/sched/stop_task.c:36:43-36:54: static struct task_struct *pick_task_stop(struct rq *rq)
-
kernel/sched/stop_task.c:44:48-44:59: static struct task_struct *pick_next_task_stop(struct rq *rq)
-
kernel/sched/stop_task.c:55:19-55:30: enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/stop_task.c:61:19-61:30: dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
-
kernel/sched/stop_task.c:66:29-66:40: static void yield_task_stop(struct rq *rq)
-
kernel/sched/stop_task.c:71:32-71:43: static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
-
kernel/sched/stop_task.c:95:28-95:39: static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
-
kernel/sched/stop_task.c:99:30-99:41: static void switched_to_stop(struct rq *rq, struct task_struct *p)
-
kernel/sched/stop_task.c:105:19-105:30: prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
-
kernel/sched/stop_task.c:110:30-110:41: static void update_curr_stop(struct rq *rq)
-
kernel/sched/topology.c:487:21-487:32: void rq_attach_root(struct rq *rq, struct root_domain *rd)
-
kernel/trace/blktrace.c:803:28-803:44: blk_trace_request_get_cgid(struct request *rq)
-
kernel/trace/blktrace.c:827:30-827:46: static void blk_add_trace_rq(struct request *rq, blk_status_t error,
-
kernel/trace/blktrace.c:849:51-849:67: static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
-
kernel/trace/blktrace.c:855:50-855:66: static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
-
kernel/trace/blktrace.c:861:50-861:66: static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
-
kernel/trace/blktrace.c:867:52-867:68: static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
-
kernel/trace/blktrace.c:873:53-873:69: static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
-
kernel/trace/blktrace.c:1040:50-1040:66: static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
-
kernel/trace/blktrace.c:1073:26-1073:42: void blk_add_driver_data(struct request *rq, void *data, size_t len)
-
net/bridge/br_ioctl.c:144:51-144:65: int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq,
-
net/key/af_key.c:1943:6-1943:34: struct sadb_x_ipsecrequest *rq)
record
Declared as a prototype...
Defined...
variable
Defined...
-
block/bfq-iosched.c:999:2-999:18: struct request *rq;
-
block/bfq-iosched.c:5106:2-5106:29: struct request *rq = bfqq->next_rq;
-
block/bfq-iosched.c:5162:2-5162:23: struct request *rq = NULL;
-
block/bfq-iosched.c:5303:2-5303:18: struct request *rq;
-
block/bfq-iosched.c:6300:3-6300:19: struct request *rq;
-
block/blk-flush.c:221:2-221:18: struct request *rq, *n;
-
block/blk-merge.c:1107:2-1107:18: struct request *rq;
-
block/blk-merge.c:1138:2-1138:18: struct request *rq;
-
block/blk-merge.c:1163:2-1163:18: struct request *rq;
-
block/blk-mq-sched.c:60:2-60:18: struct request *rq;
-
block/blk-mq-sched.c:103:3-103:19: struct request *rq;
-
block/blk-mq-sched.c:221:2-221:18: struct request *rq;
-
block/blk-mq-tag.c:255:2-255:18: struct request *rq;
-
block/blk-mq-tag.c:273:2-273:18: struct request *rq;
-
block/blk-mq-tag.c:342:2-342:18: struct request *rq;
-
block/blk-mq.c:354:2-354:43: struct request *rq = tags->static_rqs[tag];
-
block/blk-mq.c:410:2-410:18: struct request *rq;
-
block/blk-mq.c:440:2-440:18: struct request *rq;
-
block/blk-mq.c:530:2-530:18: struct request *rq;
-
block/blk-mq.c:548:2-548:18: struct request *rq;
-
block/blk-mq.c:581:2-581:18: struct request *rq;
-
block/blk-mq.c:621:2-621:18: struct request *rq;
-
block/blk-mq.c:741:2-741:18: struct request *rq;
-
block/blk-mq.c:1079:2-1079:18: struct request *rq;
-
block/blk-mq.c:1128:2-1128:18: struct request *rq, *next;
-
block/blk-mq.c:1467:2-1467:18: struct request *rq;
-
block/blk-mq.c:1980:2-1980:18: struct request *rq;
-
block/blk-mq.c:2015:2-2015:18: struct request *rq;
-
block/blk-mq.c:2462:2-2462:18: struct request *rq;
-
block/blk-mq.c:2681:2-2681:18: struct request *rq;
-
block/blk-mq.c:2736:3-2736:24: struct request *rq = rq_list_pop(&plug->mq_list);
-
block/blk-mq.c:2773:2-2773:18: struct request *rq;
-
block/blk-mq.c:2828:3-2828:24: struct request *rq = list_first_entry(list, struct request,
-
block/blk-mq.c:2876:2-2876:18: struct request *rq;
-
block/blk-mq.c:2906:2-2906:18: struct request *rq;
-
block/blk-mq.c:2969:2-2969:18: struct request *rq;
-
block/blk-mq.c:3225:4-3225:40: struct request *rq = drv_tags->rqs[i];
-
block/blk-mq.c:3263:4-3263:43: struct request *rq = tags->static_rqs[i];
-
block/blk-mq.c:3433:4-3433:25: struct request *rq = p;
-
block/blk-mq.h:380:3-380:24: struct request *rq = list_entry_rq(list->next);
-
block/bsg-lib.c:32:2-32:18: struct request *rq;
-
block/bsg-lib.c:159:2-159:45: struct request *rq = blk_mq_rq_from_pdu(job);
-
block/bsg-lib.c:192:2-192:45: struct request *rq = blk_mq_rq_from_pdu(job);
-
block/elevator.c:206:2-206:18: struct request *rq;
-
block/elevator.c:259:2-259:18: struct request *rq;
-
block/kyber-iosched.c:595:2-595:18: struct request *rq, *next;
-
block/kyber-iosched.c:759:2-759:18: struct request *rq;
-
block/kyber-iosched.c:807:2-807:18: struct request *rq;
-
block/mq-deadline.c:167:2-167:18: struct request *rq, *res = NULL;
-
block/mq-deadline.c:306:2-306:23: struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
-
block/mq-deadline.c:349:2-349:18: struct request *rq, *rb_rq, *next;
-
block/mq-deadline.c:393:2-393:18: struct request *rq;
-
block/mq-deadline.c:447:2-447:18: struct request *rq, *next_rq;
-
block/mq-deadline.c:567:2-567:18: struct request *rq;
-
block/mq-deadline.c:600:2-600:18: struct request *rq;
-
block/mq-deadline.c:874:3-874:19: struct request *rq;
-
block/mq-deadline.c:1089:1-1089:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-
block/mq-deadline.c:1090:1-1090:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-
block/mq-deadline.c:1091:1-1091:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-
block/mq-deadline.c:1092:1-1092:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-
block/mq-deadline.c:1093:1-1093:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-
block/mq-deadline.c:1094:1-1094:1: DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-
drivers/ata/libata-scsi.c:1514:2-1514:42: struct request *rq = scsi_cmd_to_rq(scmd);
-
drivers/ata/libata-scsi.c:1549:2-1549:42: struct request *rq = scsi_cmd_to_rq(scmd);
-
drivers/block/aoe/aoecmd.c:836:2-836:18: struct request *rq;
-
drivers/block/aoe/aoecmd.c:1056:2-1056:28: struct request *rq = buf->rq;
-
drivers/block/aoe/aoedev.c:162:2-162:18: struct request *rq;
-
drivers/block/loop.c:376:2-376:45: struct request *rq = blk_mq_rq_from_pdu(cmd);
-
drivers/block/loop.c:400:2-400:45: struct request *rq = blk_mq_rq_from_pdu(cmd);
-
drivers/block/loop.c:1847:2-1847:27: struct request *rq = bd->rq;
-
drivers/block/loop.c:1891:2-1891:45: struct request *rq = blk_mq_rq_from_pdu(cmd);
-
drivers/block/mtip32xx/mtip32xx.c:956:2-956:18: struct request *rq;
-
drivers/block/mtip32xx/mtip32xx.c:3314:2-3314:27: struct request *rq = bd->rq;
-
drivers/block/nbd.c:835:2-835:18: struct request *rq;
-
drivers/block/null_blk/main.c:1276:2-1276:28: struct request *rq = cmd->rq;
-
drivers/block/null_blk/main.c:1348:2-1348:28: struct request *rq = cmd->rq;
-
drivers/block/null_blk/main.c:1646:2-1646:18: struct request *rq;
-
drivers/block/null_blk/main.c:1708:2-1708:27: struct request *rq = bd->rq;
-
drivers/block/pktcdvd.c:719:2-719:18: struct request *rq;
-
drivers/block/rbd.c:3610:3-3610:50: struct request *rq = blk_mq_rq_from_pdu(img_req);
-
drivers/block/rbd.c:4719:2-4719:53: struct request *rq = blk_mq_rq_from_pdu(img_request);
-
drivers/block/rnbd/rnbd-clt.c:382:2-382:27: struct request *rq = iu->rq;
-
drivers/block/rnbd/rnbd-clt.c:1114:2-1114:27: struct request *rq = bd->rq;
-
drivers/block/ublk_drv.c:1284:2-1284:27: struct request *rq = bd->rq;
-
drivers/block/ublk_drv.c:1429:4-1429:20: struct request *rq;
-
drivers/block/xen-blkfront.c:946:2-946:35: struct request_queue *rq = info->rq;
-
drivers/block/xen-blkfront.c:1603:5-1603:38: struct request_queue *rq = info->rq;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c:329:3-329:47: struct drm_sched_rq *rq = &sched->sched_rq[i];
-
drivers/gpu/drm/i915/display/intel_display_rps.c:25:2-25:34: struct i915_request *rq = wait->request;
-
drivers/gpu/drm/i915/display/intel_overlay.c:233:2-233:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/display/intel_overlay.c:255:2-255:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/display/intel_overlay.c:320:2-320:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/display/intel_overlay.c:401:2-401:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/display/intel_overlay.c:454:2-454:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/i915_gem_busy.c:43:2-43:29: const struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/i915_gem_context.c:1343:2-1343:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:2041:3-2041:43: struct i915_request *rq = eb->requests[j];
-
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:2490:2-2490:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:2524:2-2524:28: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:3082:2-3082:23: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:3144:3-3144:43: struct i915_request *rq = eb->requests[i];
-
drivers/gpu/drm/i915/gem/i915_gem_throttle.c:64:4-64:25: struct i915_request *rq, *target = NULL;
-
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c:197:2-197:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/i915_gem_wait.c:95:2-95:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c:483:2-483:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c:198:2-198:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:74:3-74:29: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:200:3-200:29: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:243:2-243:28: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:945:2-945:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:1049:2-1049:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:1095:2-1095:28: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:1515:2-1515:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:1604:2-1604:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c:193:2-193:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c:394:3-394:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c:546:3-546:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c:1176:2-1176:28: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c:1586:3-1586:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c:24:2-24:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c:115:2-115:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c:207:3-207:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c:245:3-246:4: struct i915_request *rq =
-
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c:458:2-458:23: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c:491:2-491:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_context.c:496:2-496:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_context.c:534:2-534:23: struct i915_request *rq, *active = NULL;
-
drivers/gpu/drm/i915/gt/intel_context_sseu.c:43:2-43:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_engine_cs.c:2124:3-2124:39: struct i915_request * const *port, *rq;
-
drivers/gpu/drm/i915/gt/intel_engine_cs.c:2303:2-2303:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_engine_cs.c:2360:2-2360:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:25:2-25:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:70:2-70:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:141:2-141:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:278:2-278:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:380:2-380:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_engine_pm.c:101:2-101:44: struct i915_request *rq = to_request(fence);
-
drivers/gpu/drm/i915/gt/intel_engine_pm.c:153:2-153:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_engine_pm.h:90:2-90:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:369:2-369:23: struct i915_request *rq, *rn, *active = NULL;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:784:2-784:38: struct i915_request * const *port, *rq, *prev = NULL;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:932:3-932:49: struct i915_request *rq = execlists->pending[n];
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1033:3-1033:29: struct i915_request *rq = READ_ONCE(ve->request);
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1125:2-1125:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1414:3-1414:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1493:3-1493:24: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:1968:5-1968:43: struct i915_request *rq = *execlists->active;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2293:2-2293:38: struct i915_request * const *port, *rq;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:2443:3-2443:54: const struct i915_request *rq = *engine->execlists.active;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:3032:2-3032:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:3153:2-3153:23: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:3790:2-3790:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:4089:2-4089:23: struct i915_request *rq, *last;
-
drivers/gpu/drm/i915/gt/intel_execlists_submission.c:4143:3-4143:29: struct i915_request *rq = READ_ONCE(ve->request);
-
drivers/gpu/drm/i915/gt/intel_gt.c:529:3-529:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_gt.c:577:3-577:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_gt.c:613:3-613:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_gt_requests.c:19:2-19:23: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/intel_gt_requests.c:243:2-243:23: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/intel_migrate.c:694:2-694:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_migrate.c:994:2-994:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/intel_ring_submission.c:354:2-354:29: struct i915_request *pos, *rq;
-
drivers/gpu/drm/i915/gt/intel_timeline.c:424:3-424:24: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/intel_workarounds.c:3292:2-3292:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/mock_engine.c:297:2-297:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_context.c:51:3-51:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_context.c:78:2-78:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_context.c:235:3-235:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_context.c:328:2-328:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_engine_cs.c:168:4-168:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_engine_cs.c:311:4-311:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c:207:2-207:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_engine_pm.c:79:2-79:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_engine_pm.c:261:3-261:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:125:3-125:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:182:3-182:28: struct i915_request *rq[2];
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:344:3-344:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:493:3-493:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:601:3-601:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:720:5-720:26: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:860:2-860:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:895:2-895:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:946:4-946:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:1056:2-1056:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:1130:3-1130:33: struct i915_request *rq[3] = {};
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:1259:2-1259:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:1330:3-1330:24: struct i915_request *rq, *nop;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:1430:3-1430:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:1721:2-1721:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:1759:3-1759:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:1852:3-1852:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:2058:2-2058:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:2102:2-2102:32: struct i915_request *rq[2] = {};
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:2173:2-2173:32: struct i915_request *rq[3] = {};
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:2268:2-2268:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:2325:2-2325:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:2576:3-2576:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:2706:2-2706:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:2801:2-2801:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:2975:3-2975:29: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:3153:2-3153:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:3214:2-3214:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:3299:4-3299:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:3398:3-3398:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:3490:2-3490:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:3754:6-3754:27: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:3771:6-3771:27: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:4027:2-4027:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:4094:2-4094:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:4226:3-4226:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_execlists.c:4337:2-4337:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:109:2-109:28: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:287:2-287:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:375:5-375:26: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:474:5-474:26: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:595:5-595:26: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:730:4-730:30: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:896:2-896:32: struct i915_request *rq[8] = {};
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:1048:4-1048:30: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:1300:2-1300:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:1434:2-1434:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:1683:4-1683:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:1813:2-1813:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_hangcheck.c:1912:2-1912:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:83:2-83:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:110:2-110:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:406:2-406:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:534:2-534:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:569:2-569:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:620:2-620:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:742:2-742:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:800:2-800:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:1057:2-1057:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:1210:2-1210:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:1406:2-1406:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:1560:2-1560:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:1735:2-1735:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_lrc.c:1851:2-1851:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_migrate.c:46:2-46:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_migrate.c:147:2-147:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_migrate.c:265:2-265:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_migrate.c:553:2-553:23: struct i915_request *rq, *prev;
-
drivers/gpu/drm/i915/gt/selftest_migrate.c:856:3-856:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_migrate.c:935:3-935:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_mocs.c:221:2-221:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_mocs.c:324:2-324:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_rc6.c:127:2-127:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_reset.c:60:3-60:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_ring_submission.c:72:2-72:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_rps.c:245:3-245:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_rps.c:403:3-403:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_rps.c:631:3-631:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_rps.c:770:3-770:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_rps.c:897:2-897:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_rps.c:1158:3-1158:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_rps.c:1260:3-1260:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_slpc.c:316:3-316:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:487:2-487:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:554:4-554:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:624:4-624:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:696:3-696:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:899:2-899:28: struct i915_request *rq = fetch_and_zero(&w->rq);
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:945:2-945:23: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:1031:4-1031:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:1194:3-1194:33: struct i915_request *rq[3] = {};
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:1270:3-1270:33: struct i915_request *rq[3] = {};
-
drivers/gpu/drm/i915/gt/selftest_timeline.c:1370:4-1370:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_tlb.c:44:2-44:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_workarounds.c:104:2-104:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_workarounds.c:303:2-303:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_workarounds.c:523:3-523:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_workarounds.c:846:2-846:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_workarounds.c:889:2-889:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/selftest_workarounds.c:1243:2-1243:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c:240:2-240:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c:47:2-47:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c:134:2-134:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:941:3-941:24: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:1712:2-1712:23: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:1748:2-1748:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:1836:2-1836:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:1851:2-1851:23: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:3546:2-3546:28: struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:3554:2-3554:23: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:4969:2-4969:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:5090:3-5090:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/selftest_guc.c:26:2-26:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/selftest_guc.c:54:2-54:54: struct i915_request *last[3] = {NULL, NULL, NULL}, *rq;
-
drivers/gpu/drm/i915/gt/uc/selftest_guc.c:153:2-153:40: struct i915_request *spin_rq = NULL, *rq, *last = NULL;
-
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c:17:2-17:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c:36:2-36:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c:80:2-80:23: struct i915_request *rq, *child_rq;
-
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c:114:2-114:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gvt/scheduler.c:289:2-289:28: struct i915_request *rq = data;
-
drivers/gpu/drm/i915/gvt/scheduler.c:463:2-463:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gvt/scheduler.c:594:2-594:38: struct i915_request *rq = workload->req;
-
drivers/gpu/drm/i915/gvt/scheduler.c:802:2-802:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/gvt/scheduler.c:938:2-938:38: struct i915_request *rq = workload->req;
-
drivers/gpu/drm/i915/gvt/scheduler.c:1073:2-1073:38: struct i915_request *rq = workload->req;
-
drivers/gpu/drm/i915/i915_gpu_error.c:1634:2-1634:28: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/i915_perf.c:1341:2-1341:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/i915_perf.c:2311:2-2311:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/i915_perf.c:2535:2-2535:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/i915_perf.c:2556:2-2556:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/i915_request.c:115:2-115:44: struct i915_request *rq = to_request(fence);
-
drivers/gpu/drm/i915/i915_request.c:284:2-285:3: struct i915_request *rq =
-
drivers/gpu/drm/i915/i915_request.c:441:2-441:38: struct i915_request * const *port, *rq;
-
drivers/gpu/drm/i915/i915_request.c:811:2-811:28: struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
-
drivers/gpu/drm/i915/i915_request.c:827:2-827:23: struct i915_request *rq, *rn;
-
drivers/gpu/drm/i915/i915_request.c:839:2-839:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/i915_request.c:875:2-875:28: struct i915_request *rq = arg;
-
drivers/gpu/drm/i915/i915_request.c:898:2-898:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/i915_request.c:1031:2-1031:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/i915_scheduler.c:133:2-133:54: const struct i915_request *rq = node_to_request(node);
-
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c:99:2-99:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_active.c:101:3-101:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_gem.c:28:3-28:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_gem_evict.c:457:4-457:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_perf.c:201:2-201:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_perf.c:294:2-294:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:367:4-367:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:406:4-406:48: struct i915_request *rq = requests[count - 1];
-
drivers/gpu/drm/i915/selftests/i915_request.c:421:4-421:40: struct i915_request *rq = requests[n];
-
drivers/gpu/drm/i915/selftests/i915_request.c:647:2-647:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:700:2-700:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:761:2-761:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:822:2-822:23: struct i915_request *rq, *nop;
-
drivers/gpu/drm/i915/selftests/i915_request.c:1275:3-1275:40: struct i915_request *rq = request[idx];
-
drivers/gpu/drm/i915/selftests/i915_request.c:1300:3-1300:40: struct i915_request *rq = request[idx];
-
drivers/gpu/drm/i915/selftests/i915_request.c:1476:3-1476:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:1514:3-1514:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:1560:2-1560:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:1690:2-1690:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:1891:2-1891:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2020:2-2020:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2111:3-2111:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2183:3-2183:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2243:2-2243:23: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2298:3-2298:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2388:4-2388:25: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2479:3-2479:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2591:3-2591:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2739:3-2739:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2773:3-2773:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2807:3-2807:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:2998:3-2998:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:3073:3-3073:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/i915_request.c:3149:3-3149:24: struct i915_request *rq;
-
drivers/gpu/drm/i915/selftests/igt_spinner.c:128:2-128:28: struct i915_request *rq = NULL;
-
drivers/gpu/drm/i915/selftests/intel_memory_region.c:1035:2-1035:23: struct i915_request *rq;
-
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c:39:2-39:23: int rq = ram->freq < 1000000; /* XXX */
-
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c:117:2-117:14: u32 hi, lo, rq, tx;
-
drivers/gpu/drm/scheduler/sched_entity.c:510:2-510:23: struct drm_sched_rq *rq;
-
drivers/gpu/drm/scheduler/sched_main.c:90:2-90:36: struct drm_sched_rq *rq = entity->rq;
-
drivers/gpu/drm/scheduler/sched_main.c:1142:3-1142:47: struct drm_sched_rq *rq = &sched->sched_rq[i];
-
drivers/gpu/drm/scheduler/sched_main.c:1191:4-1191:48: struct drm_sched_rq *rq = &sched->sched_rq[i];
-
drivers/infiniband/hw/bnxt_re/ib_verbs.c:1192:2-1192:23: struct bnxt_qplib_q *rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:177:2-177:33: struct bnxt_qplib_q *rq = &qp->rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:199:2-199:33: struct bnxt_qplib_q *rq = &qp->rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:844:2-844:33: struct bnxt_qplib_q *rq = &qp->rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:985:2-985:33: struct bnxt_qplib_q *rq = &qp->rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:1564:2-1564:33: struct bnxt_qplib_q *rq = &qp->rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:1577:2-1577:33: struct bnxt_qplib_q *rq = &qp->rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:1985:2-1985:33: struct bnxt_qplib_q *rq = &qp->rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:1994:2-1994:33: struct bnxt_qplib_q *rq = &qp->rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:2529:2-2529:23: struct bnxt_qplib_q *rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:2608:2-2608:23: struct bnxt_qplib_q *rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:2706:2-2706:23: struct bnxt_qplib_q *rq;
-
drivers/infiniband/hw/bnxt_re/qplib_fp.c:2798:2-2798:28: struct bnxt_qplib_q *sq, *rq;
-
drivers/infiniband/hw/mlx5/devx.c:638:4-638:44: struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
-
drivers/infiniband/hw/mlx5/qp.c:1577:2-1577:42: struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
-
drivers/infiniband/hw/mlx5/qp.c:1669:2-1669:42: struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
-
drivers/infiniband/hw/mlx5/qp.c:1686:2-1686:42: struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
-
drivers/infiniband/hw/mlx5/qp.c:3903:2-3903:42: struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
-
drivers/infiniband/hw/mlx5/qp.c:4885:2-4885:42: struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
-
drivers/infiniband/sw/rdmavt/qp.c:2346:2-2346:17: struct rvt_rq *rq;
-
drivers/infiniband/sw/rxe/rxe_verbs.c:1008:2-1008:27: struct rxe_rq *rq = &qp->rq;
-
drivers/infiniband/ulp/srp/ib_srp.c:2153:2-2153:43: struct request *rq = scsi_cmd_to_rq(scmnd);
-
drivers/isdn/hardware/mISDN/avmfritz.c:920:2-920:22: struct channel_req *rq;
-
drivers/isdn/hardware/mISDN/hfcmulti.c:4146:2-4146:22: struct channel_req *rq;
-
drivers/isdn/hardware/mISDN/hfcpci.c:1957:2-1957:22: struct channel_req *rq;
-
drivers/isdn/hardware/mISDN/hfcsusb.c:526:2-526:22: struct channel_req *rq;
-
drivers/isdn/hardware/mISDN/mISDNipac.c:1531:2-1531:22: struct channel_req *rq;
-
drivers/isdn/hardware/mISDN/netjet.c:875:2-875:22: struct channel_req *rq;
-
drivers/isdn/hardware/mISDN/speedfax.c:241:2-241:22: struct channel_req *rq;
-
drivers/isdn/hardware/mISDN/w6692.c:1181:2-1181:22: struct channel_req *rq;
-
drivers/isdn/mISDN/l1oip_core.c:1036:2-1036:22: struct channel_req *rq;
-
drivers/isdn/mISDN/layer2.c:2112:2-2112:21: struct channel_req rq;
-
drivers/isdn/mISDN/stack.c:421:2-421:21: struct channel_req rq;
-
drivers/isdn/mISDN/stack.c:458:2-458:21: struct channel_req rq, rq2;
-
drivers/isdn/mISDN/stack.c:515:2-515:21: struct channel_req rq;
-
drivers/isdn/mISDN/tei.c:788:2-788:21: struct channel_req rq;
-
drivers/md/dm-rq.c:161:2-161:28: struct request *rq = tio->orig;
-
drivers/md/dm-rq.c:191:2-191:28: struct request *rq = tio->orig;
-
drivers/md/dm-rq.c:367:2-367:28: struct request *rq = tio->orig;
-
drivers/md/dm-rq.c:480:2-480:27: struct request *rq = bd->rq;
-
drivers/mmc/core/mmc_test.c:777:2-777:59: struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
-
drivers/mmc/core/mmc_test.c:2358:2-2358:47: struct mmc_test_req *rq = mmc_test_req_alloc();
-
drivers/mtd/mtd_blkdevs.c:108:2-108:18: struct request *rq;
-
drivers/net/ethernet/cavium/thunder/nicvf_main.c:857:2-857:39: struct rcv_queue *rq = &qs->rq[cq_idx];
-
drivers/net/ethernet/cavium/thunder/nicvf_queues.c:749:2-749:20: struct rcv_queue *rq;
-
drivers/net/ethernet/cavium/thunder/nicvf_queues.c:1814:2-1814:20: struct rcv_queue *rq;
-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c:1178:2-1178:64: const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
-
drivers/net/ethernet/cisco/enic/enic_main.c:1636:2-1636:42: unsigned int rq = (napi - &enic->napi[0]);
-
drivers/net/ethernet/freescale/gianfar.c:1779:2-1779:14: int i, rq = 0;
-
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c:236:2-236:19: struct hinic_rq *rq;
-
drivers/net/ethernet/huawei/hinic/hinic_main.c:241:3-241:61: struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
-
drivers/net/ethernet/huawei/hinic/hinic_rx.c:245:2-245:29: struct hinic_rq *rq = rxq->rq;
-
drivers/net/ethernet/huawei/hinic/hinic_rx.c:359:2-359:29: struct hinic_rq *rq = rxq->rq;
-
drivers/net/ethernet/huawei/hinic/hinic_rx.c:461:2-461:29: struct hinic_rq *rq = rxq->rq;
-
drivers/net/ethernet/huawei/hinic/hinic_rx.c:496:2-496:29: struct hinic_rq *rq = rxq->rq;
-
drivers/net/ethernet/huawei/hinic/hinic_rx.c:519:2-519:29: struct hinic_rq *rq = rxq->rq;
-
drivers/net/ethernet/huawei/hinic/hinic_rx.c:565:2-565:29: struct hinic_rq *rq = rxq->rq;
-
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c:78:2-78:49: struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
-
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c:1496:2-1496:28: int stack_pages, pool_id, rq;
-
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c:254:2-254:28: struct mlx5e_rq *rq = &c->rq;
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:82:2-82:19: struct mlx5e_rq *rq;
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:152:2-152:24: struct mlx5e_rq *rq = ctx;
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:173:2-173:19: struct mlx5e_rq *rq;
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:497:3-497:20: struct mlx5e_rq *rq;
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:583:2-583:24: struct mlx5e_rq *rq = ctx;
-
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:669:3-669:47: struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
-
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c:12:2-12:35: struct mlx5e_rq *rq = &trap_ctx->rq;
-
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c:65:2-65:28: struct mlx5e_rq *rq = &t->rq;
-
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c:47:2-47:24: struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
-
drivers/net/ethernet/mellanox/mlx5/core/en_main.c:616:2-616:24: struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:980:2-980:28: struct mlx5e_rq *rq = &c->rq;
-
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:2520:2-2520:24: struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
-
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c:132:2-132:28: struct mlx5e_rq *rq = &c->rq;
-
drivers/net/ethernet/microsoft/mana/hw_channel.c:209:2-209:21: struct gdma_queue *rq;
-
drivers/net/ethernet/microsoft/mana/hw_channel.c:620:2-620:36: struct gdma_queue *rq = hwc->rxq->gdma_wq;
-
drivers/net/ethernet/ti/icssg/icss_iep.c:576:2-576:27: struct ptp_clock_request rq;
-
drivers/net/veth.c:346:2-346:23: struct veth_rq *rq = NULL;
-
drivers/net/veth.c:489:2-489:18: struct veth_rq *rq;
-
drivers/net/veth.c:1002:2-1003:3: struct veth_rq *rq =
-
drivers/net/veth.c:1066:3-1066:35: struct veth_rq *rq = &priv->rq[i];
-
drivers/net/veth.c:1074:3-1074:35: struct veth_rq *rq = &priv->rq[i];
-
drivers/net/veth.c:1106:3-1106:35: struct veth_rq *rq = &priv->rq[i];
-
drivers/net/veth.c:1115:3-1115:35: struct veth_rq *rq = &priv->rq[i];
-
drivers/net/veth.c:1144:3-1144:35: struct veth_rq *rq = &priv->rq[i];
-
drivers/net/veth.c:1167:3-1167:35: struct veth_rq *rq = &priv->rq[i];
-
drivers/net/veth.c:1184:3-1184:35: struct veth_rq *rq = &priv->rq[i];
-
drivers/net/veth.c:1259:3-1259:35: struct veth_rq *rq = &priv->rq[i];
-
drivers/net/veth.c:1267:4-1267:36: struct veth_rq *rq = &priv->rq[i];
-
drivers/net/virtio_net.c:918:2-918:33: struct receive_queue *rq = vi->rq;
-
drivers/net/virtio_net.c:1998:2-1998:48: struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
-
drivers/net/virtio_net.c:2048:3-2048:39: struct receive_queue *rq = &vi->rq[i];
-
drivers/net/virtio_net.c:2139:2-2140:3: struct receive_queue *rq =
-
drivers/net/virtio_net.c:2551:3-2551:39: struct receive_queue *rq = &vi->rq[i];
-
drivers/net/virtio_net.c:2865:2-2865:24: struct receive_queue *rq;
-
drivers/net/virtio_net.c:3170:3-3170:39: struct receive_queue *rq = &vi->rq[i];
-
drivers/net/virtio_net.c:4022:3-4022:39: struct receive_queue *rq = &vi->rq[i];
-
drivers/net/vmxnet3/vmxnet3_drv.c:2028:3-2028:53: struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
-
drivers/net/vmxnet3/vmxnet3_drv.c:2258:2-2258:32: struct vmxnet3_rx_queue *rq = container_of(napi,
-
drivers/net/vmxnet3/vmxnet3_drv.c:2322:2-2322:32: struct vmxnet3_rx_queue *rq = data;
-
drivers/net/vmxnet3/vmxnet3_drv.c:2513:4-2513:54: struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
-
drivers/net/vmxnet3/vmxnet3_drv.c:2823:3-2823:53: struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
-
drivers/net/vmxnet3/vmxnet3_drv.c:3276:3-3276:53: struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
-
drivers/net/vmxnet3/vmxnet3_drv.c:3317:3-3317:53: struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
-
drivers/net/vmxnet3/vmxnet3_ethtool.c:578:3-578:53: struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
-
drivers/nvme/host/core.c:1246:2-1246:18: struct request *rq;
-
drivers/nvme/host/fc.c:1925:2-1925:27: struct request *rq = op->rq;
-
drivers/nvme/host/fc.c:1937:2-1937:27: struct request *rq = op->rq;
-
drivers/nvme/host/fc.c:2805:2-2805:27: struct request *rq = bd->rq;
-
drivers/nvme/host/nvme.h:560:2-560:18: struct request *rq;
-
drivers/nvme/host/nvme.h:696:2-696:40: struct nvme_request *rq = nvme_req(req);
-
drivers/nvme/host/rdma.c:1150:2-1150:45: struct request *rq = blk_mq_rq_from_pdu(req);
-
drivers/nvme/host/rdma.c:1403:2-1403:45: struct request *rq = blk_mq_rq_from_pdu(req);
-
drivers/nvme/host/rdma.c:1680:2-1680:18: struct request *rq;
-
drivers/nvme/host/rdma.c:1978:2-1978:27: struct request *rq = bd->rq;
-
drivers/nvme/host/rdma.c:2063:2-2063:45: struct request *rq = blk_mq_rq_from_pdu(req);
-
drivers/nvme/host/tcp.c:237:2-237:18: struct request *rq;
-
drivers/nvme/host/tcp.c:279:2-279:45: struct request *rq = blk_mq_rq_from_pdu(req);
-
drivers/nvme/host/tcp.c:549:2-549:18: struct request *rq;
-
drivers/nvme/host/tcp.c:574:2-574:18: struct request *rq;
-
drivers/nvme/host/tcp.c:631:2-631:45: struct request *rq = blk_mq_rq_from_pdu(req);
-
drivers/nvme/host/tcp.c:665:2-665:18: struct request *rq;
-
drivers/nvme/host/tcp.c:772:2-773:57: struct request *rq =
-
drivers/nvme/host/tcp.c:859:3-860:21: struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
-
drivers/nvme/host/tcp.c:872:3-873:21: struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
-
drivers/nvme/host/tcp.c:2390:2-2390:27: struct request *rq = bd->rq;
-
drivers/nvme/target/loop.c:108:3-108:19: struct request *rq;
-
drivers/nvme/target/passthru.c:217:2-217:30: struct request *rq = req->p.rq;
-
drivers/nvme/target/passthru.c:297:2-297:23: struct request *rq = NULL;
-
drivers/platform/chrome/wilco_ec/debugfs.c:178:2-178:20: struct ec_request rq;
-
drivers/platform/chrome/wilco_ec/mailbox.c:198:2-198:27: struct wilco_ec_request *rq;
-
drivers/platform/chrome/wilco_ec/properties.c:62:2-62:29: struct ec_property_request rq;
-
drivers/platform/chrome/wilco_ec/properties.c:84:2-84:29: struct ec_property_request rq;
-
drivers/platform/chrome/wilco_ec/sysfs.c:74:2-74:28: struct boot_on_ac_request rq;
-
drivers/platform/chrome/wilco_ec/sysfs.c:183:2-183:28: struct usb_charge_request rq;
-
drivers/platform/chrome/wilco_ec/sysfs.c:203:2-203:28: struct usb_charge_request rq;
-
drivers/ptp/ptp_chardev.c:21:2-21:27: struct ptp_clock_request rq;
-
drivers/scsi/elx/efct/efct_hw.c:748:3-748:33: struct hw_rq *rq = hw->hw_rq[i];
-
drivers/scsi/elx/efct/efct_hw.c:781:2-781:16: struct hw_rq *rq;
-
drivers/scsi/elx/efct/efct_hw.c:1099:3-1099:33: struct hw_rq *rq = hw->hw_rq[i];
-
drivers/scsi/elx/efct/efct_hw.c:1239:3-1239:33: struct hw_rq *rq = hw->hw_rq[i];
-
drivers/scsi/elx/efct/efct_hw.c:1300:3-1300:38: struct hw_rq *rq = hw->hw_rq[rq_idx];
-
drivers/scsi/elx/efct/efct_hw.c:1330:3-1330:33: struct hw_rq *rq = hw->hw_rq[i];
-
drivers/scsi/elx/efct/efct_hw_queues.c:314:2-314:21: struct hw_rq *rq = NULL;
-
drivers/scsi/elx/efct/efct_hw_queues.c:494:2-494:56: struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
-
drivers/scsi/elx/efct/efct_hw_queues.c:531:2-531:16: struct hw_rq *rq;
-
drivers/scsi/elx/efct/efct_hw_queues.c:607:2-607:42: struct hw_rq *rq = hw->hw_rq[hw_rq_index];
-
drivers/scsi/elx/libefc_sli/sli4.c:371:2-371:33: struct sli4_rqst_rq_create_v1 *rq;
-
drivers/scsi/esas2r/esas2r.h:1408:2-1408:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_disc.c:160:2-160:34: struct esas2r_request *rq = &a->general_req;
-
drivers/scsi/esas2r/esas2r_disc.c:313:2-313:34: struct esas2r_request *rq = &a->general_req;
-
drivers/scsi/esas2r/esas2r_disc.c:1163:2-1163:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_init.c:770:2-770:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_init.c:1246:2-1246:34: struct esas2r_request *rq = &a->general_req;
-
drivers/scsi/esas2r/esas2r_init.c:1287:2-1287:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_int.c:210:2-210:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_int.c:309:2-309:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_int.c:390:2-390:34: struct esas2r_request *rq = &a->general_req;
-
drivers/scsi/esas2r/esas2r_int.c:448:2-448:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_io.c:190:2-190:35: struct esas2r_request *rq = sgc->first_req;
-
drivers/scsi/esas2r/esas2r_io.c:373:2-373:35: struct esas2r_request *rq = sgc->first_req;
-
drivers/scsi/esas2r/esas2r_io.c:527:2-527:35: struct esas2r_request *rq = sgc->first_req;
-
drivers/scsi/esas2r/esas2r_io.c:770:2-770:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_ioctl.c:111:2-111:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_ioctl.c:208:2-208:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_ioctl.c:1276:2-1276:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_ioctl.c:1829:3-1829:26: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_ioctl.c:1951:3-1951:26: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_main.c:145:2-145:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_main.c:825:2-825:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_main.c:912:2-912:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_main.c:1111:2-1111:25: struct esas2r_request *rq;
-
drivers/scsi/esas2r/esas2r_main.c:1496:2-1496:25: struct esas2r_request *rq;
-
drivers/scsi/fnic/fnic_scsi.c:1719:2-1719:46: struct request *const rq = scsi_cmd_to_rq(sc);
-
drivers/scsi/fnic/fnic_scsi.c:2197:2-2197:40: struct request *rq = scsi_cmd_to_rq(sc);
-
drivers/scsi/hisi_sas/hisi_sas_main.c:472:2-472:23: struct request *rq = NULL;
-
drivers/scsi/hpsa.c:996:2-996:51: struct reply_queue_buffer *rq = &h->reply_queue[q];
-
drivers/scsi/hpsa.c:5987:2-5987:6: int rq;
-
drivers/scsi/hpsa.h:491:2-491:51: struct reply_queue_buffer *rq = &h->reply_queue[q];
-
drivers/scsi/hpsa.h:593:2-593:51: struct reply_queue_buffer *rq = &h->reply_queue[q];
-
drivers/scsi/lpfc/lpfc_ct.c:3188:2-3188:22: struct lpfc_dmabuf *rq, *rsp;
-
drivers/scsi/mpi3mr/mpi3mr_os.c:4714:2-4714:42: struct request *rq = scsi_cmd_to_rq(scmd);
-
drivers/scsi/mpt3sas/mpt3sas_scsih.c:5130:2-5130:42: struct request *rq = scsi_cmd_to_rq(scmd);
-
drivers/scsi/mvsas/mv_sas.c:697:2-697:18: struct request *rq;
-
drivers/scsi/myrb.c:1267:2-1267:42: struct request *rq = scsi_cmd_to_rq(scmd);
-
drivers/scsi/myrs.c:1587:2-1587:42: struct request *rq = scsi_cmd_to_rq(scmd);
-
drivers/scsi/pm8001/pm8001_sas.h:736:2-736:23: struct request *rq = NULL;
-
drivers/scsi/pm8001/pm80xx_hwi.c:4283:2-4283:44: struct request *rq = sas_task_find_rq(task);
-
drivers/scsi/scsi_bsg.c:16:2-16:18: struct request *rq;
-
drivers/scsi/scsi_debug.c:5542:2-5542:42: struct request *rq = scsi_cmd_to_rq(cmnd);
-
drivers/scsi/scsi_error.c:2436:2-2436:18: struct request *rq;
-
drivers/scsi/scsi_ioctl.c:417:2-417:18: struct request *rq;
-
drivers/scsi/scsi_ioctl.c:507:2-507:18: struct request *rq;
-
drivers/scsi/scsi_lib.c:116:2-116:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/scsi_lib.c:1019:2-1019:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/scsi_lib.c:1133:2-1133:18: struct request *rq;
-
drivers/scsi/scsi_lib.c:1157:2-1157:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/scsi_logging.c:31:2-31:62: struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
-
drivers/scsi/sd.c:787:2-787:42: struct request *rq = scsi_cmd_to_rq(scmd);
-
drivers/scsi/sd.c:891:2-891:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd.c:922:2-922:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd.c:949:2-949:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd.c:974:2-974:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd.c:1062:2-1062:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd.c:1185:2-1185:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd.c:1301:2-1301:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd.c:1345:2-1345:43: struct request *rq = scsi_cmd_to_rq(SCpnt);
-
drivers/scsi/sd_zbc.c:332:2-332:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd_zbc.c:414:2-414:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd_zbc.c:480:2-480:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd_zbc.c:535:2-535:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sd_zbc.c:610:2-610:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/scsi/sg.c:1730:2-1730:18: struct request *rq;
-
drivers/scsi/sr.c:301:2-301:43: struct request *rq = scsi_cmd_to_rq(SCpnt);
-
drivers/scsi/sr.c:363:2-363:43: struct request *rq = scsi_cmd_to_rq(SCpnt);
-
drivers/scsi/sr.c:930:2-930:18: struct request *rq;
-
drivers/scsi/virtio_scsi.c:531:2-531:40: struct request *rq = scsi_cmd_to_rq(sc);
-
drivers/tty/ipwireless/hardware.c:1734:2-1734:29: struct ipw_rx_packet *rp, *rq;
-
drivers/ufs/core/ufshcd.c:335:2-335:42: struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
-
drivers/ufs/core/ufshcd.c:411:2-411:41: struct request *rq = scsi_cmd_to_rq(cmd);
-
drivers/ufs/core/ufshcd.c:2931:2-2931:18: struct request *rq;
-
drivers/usb/misc/uss720.c:80:2-80:36: struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count);
-
drivers/usb/misc/uss720.c:98:2-98:31: struct uss720_async_request *rq;
-
drivers/usb/misc/uss720.c:128:2-128:31: struct uss720_async_request *rq;
-
drivers/usb/misc/uss720.c:178:2-178:31: struct uss720_async_request *rq;
-
drivers/usb/misc/uss720.c:196:2-196:31: struct uss720_async_request *rq;
-
drivers/usb/misc/uss720.c:232:2-232:31: struct uss720_async_request *rq;
-
fs/erofs/decompressor.c:68:2-68:43: struct z_erofs_decompress_req *rq = ctx->rq;
-
fs/erofs/decompressor.c:128:2-128:43: struct z_erofs_decompress_req *rq = ctx->rq;
-
fs/erofs/decompressor.c:210:2-210:43: struct z_erofs_decompress_req *rq = ctx->rq;
-
include/scsi/scsi_cmnd.h:306:2-306:46: struct request *rq = blk_mq_rq_from_pdu(scmd);
-
kernel/sched/core.c:630:2-630:13: struct rq *rq;
-
kernel/sched/core.c:655:2-655:13: struct rq *rq;
-
kernel/sched/core.c:789:2-789:18: struct rq *rq = container_of(timer, struct rq, hrtick_timer);
-
kernel/sched/core.c:817:2-817:18: struct rq *rq = arg;
-
kernel/sched/core.c:1067:2-1067:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:1130:2-1130:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:1174:2-1174:18: struct rq *rq = info;
-
kernel/sched/core.c:1484:2-1484:13: struct rq *rq;
-
kernel/sched/core.c:1736:2-1736:13: struct rq *rq;
-
kernel/sched/core.c:2290:2-2290:13: struct rq *rq;
-
kernel/sched/core.c:2584:2-2584:18: struct rq *rq = this_rq();
-
kernel/sched/core.c:2685:2-2685:37: struct rq *lowest_rq = NULL, *rq = this_rq();
-
kernel/sched/core.c:2750:2-2750:18: struct rq *rq = task_rq(p);
-
kernel/sched/core.c:3198:2-3198:13: struct rq *rq;
-
kernel/sched/core.c:3242:2-3242:13: struct rq *rq;
-
kernel/sched/core.c:3721:2-3721:13: struct rq *rq;
-
kernel/sched/core.c:3848:2-3848:13: struct rq *rq;
-
kernel/sched/core.c:3873:2-3873:18: struct rq *rq = this_rq();
-
kernel/sched/core.c:3931:2-3931:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:3941:2-3941:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:4021:2-4021:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:4405:2-4405:18: struct rq *rq = NULL;
-
kernel/sched/core.c:4851:2-4851:13: struct rq *rq;
-
kernel/sched/core.c:5214:2-5214:18: struct rq *rq = this_rq();
-
kernel/sched/core.c:5549:2-5549:13: struct rq *rq;
-
kernel/sched/core.c:5641:2-5641:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:6410:2-6410:18: struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
-
kernel/sched/core.c:6449:2-6449:18: struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
-
kernel/sched/core.c:6499:2-6499:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:6582:2-6582:13: struct rq *rq;
-
kernel/sched/core.c:7069:2-7069:13: struct rq *rq;
-
kernel/sched/core.c:7192:2-7192:13: struct rq *rq;
-
kernel/sched/core.c:7327:2-7327:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:7374:2-7374:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:7410:2-7410:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:7631:2-7631:13: struct rq *rq;
-
kernel/sched/core.c:8531:2-8531:13: struct rq *rq;
-
kernel/sched/core.c:8934:2-8934:13: struct rq *rq, *p_rq;
-
kernel/sched/core.c:9089:2-9089:13: struct rq *rq;
-
kernel/sched/core.c:9258:2-9258:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:9384:2-9384:13: struct rq *rq;
-
kernel/sched/core.c:9428:2-9428:18: struct rq *rq = this_rq();
-
kernel/sched/core.c:9521:2-9521:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:9542:2-9542:18: struct rq *rq = this_rq();
-
kernel/sched/core.c:9648:2-9648:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:9693:2-9693:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:9759:2-9759:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:9830:2-9830:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:9974:3-9974:14: struct rq *rq;
-
kernel/sched/core.c:10502:2-10502:13: struct rq *rq;
-
kernel/sched/core.c:10897:3-10897:27: struct rq *rq = cfs_rq->rq;
-
kernel/sched/core.c:11853:2-11853:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:11911:2-11911:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/core.c:12030:2-12030:13: struct rq *rq;
-
kernel/sched/core.c:12054:2-12054:13: struct rq *rq;
-
kernel/sched/core.c:12078:2-12078:13: struct rq *rq;
-
kernel/sched/core_sched.c:58:2-58:13: struct rq *rq;
-
kernel/sched/cpufreq_schedutil.c:159:2-159:18: struct rq *rq = cpu_rq(sg_cpu->cpu);
-
kernel/sched/cputime.c:225:2-225:18: struct rq *rq = this_rq();
-
kernel/sched/deadline.c:70:2-70:18: struct rq *rq = task_rq(p);
-
kernel/sched/deadline.c:177:3-177:19: struct rq *rq = cpu_rq(i);
-
kernel/sched/deadline.c:313:2-313:13: struct rq *rq;
-
kernel/sched/deadline.c:397:2-397:35: struct rq *rq = rq_of_dl_rq(dl_rq);
-
kernel/sched/deadline.c:791:2-791:35: struct rq *rq = rq_of_dl_rq(dl_rq);
-
kernel/sched/deadline.c:833:2-833:35: struct rq *rq = rq_of_dl_rq(dl_rq);
-
kernel/sched/deadline.c:1014:2-1014:35: struct rq *rq = rq_of_dl_rq(dl_rq);
-
kernel/sched/deadline.c:1049:2-1049:18: struct rq *rq = task_rq(p);
-
kernel/sched/deadline.c:1110:2-1110:13: struct rq *rq;
-
kernel/sched/deadline.c:1239:2-1239:48: struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
-
kernel/sched/deadline.c:1406:2-1406:13: struct rq *rq;
-
kernel/sched/deadline.c:1456:2-1456:35: struct rq *rq = rq_of_dl_rq(dl_rq);
-
kernel/sched/deadline.c:1469:2-1469:35: struct rq *rq = rq_of_dl_rq(dl_rq);
-
kernel/sched/deadline.c:1822:2-1822:13: struct rq *rq;
-
kernel/sched/deadline.c:1869:2-1869:13: struct rq *rq;
-
kernel/sched/deadline.c:2483:2-2483:13: struct rq *rq;
-
kernel/sched/deadline.c:2545:2-2545:13: struct rq *rq;
-
kernel/sched/debug.c:633:2-633:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/debug.c:764:2-764:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/fair.c:333:2-333:30: struct rq *rq = rq_of(cfs_rq);
-
kernel/sched/fair.c:402:3-402:31: struct rq *rq = rq_of(cfs_rq);
-
kernel/sched/fair.c:2053:3-2053:19: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/fair.c:2085:2-2085:18: struct rq *rq = cpu_rq(env->dst_cpu);
-
kernel/sched/fair.c:3531:3-3531:31: struct rq *rq = rq_of(cfs_rq);
-
kernel/sched/fair.c:3838:2-3838:30: struct rq *rq = rq_of(cfs_rq);
-
kernel/sched/fair.c:3905:3-3905:31: struct rq *rq = rq_of(cfs_rq);
-
kernel/sched/fair.c:4269:2-4269:13: struct rq *rq;
-
kernel/sched/fair.c:5136:4-5136:32: struct rq *rq = rq_of(cfs_rq);
-
kernel/sched/fair.c:5514:2-5514:18: struct rq *rq = data;
-
kernel/sched/fair.c:5543:2-5543:18: struct rq *rq = data;
-
kernel/sched/fair.c:5562:2-5562:30: struct rq *rq = rq_of(cfs_rq);
-
kernel/sched/fair.c:5651:2-5651:30: struct rq *rq = rq_of(cfs_rq);
-
kernel/sched/fair.c:5739:2-5739:18: struct rq *rq = arg;
-
kernel/sched/fair.c:5777:2-5777:30: struct rq *rq = rq_of(cfs_rq);
-
kernel/sched/fair.c:5820:2-5820:13: struct rq *rq;
-
kernel/sched/fair.c:6247:3-6247:19: struct rq *rq = cpu_rq(i);
-
kernel/sched/fair.c:6928:3-6928:19: struct rq *rq = cpu_rq(i);
-
kernel/sched/fair.c:7788:4-7788:20: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/fair.c:9141:2-9141:30: struct rq *rq = rq_of(cfs_rq);
-
kernel/sched/fair.c:9202:2-9202:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/fair.c:9282:2-9282:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/fair.c:9680:3-9680:19: struct rq *rq = cpu_rq(i);
-
kernel/sched/fair.c:9965:2-9965:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/fair.c:10005:3-10005:19: struct rq *rq = cpu_rq(i);
-
kernel/sched/fair.c:10773:2-10773:30: struct rq *busiest = NULL, *rq;
-
kernel/sched/fair.c:11805:2-11805:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/fair.c:11892:2-11892:13: struct rq *rq;
-
kernel/sched/fair.c:12318:2-12318:18: struct rq *rq = task_rq(a);
-
kernel/sched/fair.c:12414:2-12414:18: struct rq *rq = this_rq();
-
kernel/sched/fair.c:12673:2-12673:13: struct rq *rq;
-
kernel/sched/fair.c:12690:2-12690:13: struct rq *rq;
-
kernel/sched/fair.c:12718:2-12718:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/fair.c:12766:3-12766:19: struct rq *rq = cpu_rq(i);
-
kernel/sched/fair.c:12817:3-12817:19: struct rq *rq = cpu_rq(i);
-
kernel/sched/membarrier.c:235:2-235:18: struct rq *rq = this_rq();
-
kernel/sched/membarrier.c:466:3-466:19: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/psi.c:1047:2-1047:13: struct rq *rq;
-
kernel/sched/psi.c:1078:2-1078:13: struct rq *rq;
-
kernel/sched/psi.c:1147:2-1147:13: struct rq *rq;
-
kernel/sched/psi.c:1221:3-1221:19: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/rt.c:218:2-218:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/rt.c:578:2-578:35: struct rq *rq = rq_of_rt_rq(rt_rq);
-
kernel/sched/rt.c:923:3-923:36: struct rq *rq = rq_of_rt_rq(rt_rq);
-
kernel/sched/rt.c:1090:2-1090:35: struct rq *rq = rq_of_rt_rq(rt_rq);
-
kernel/sched/rt.c:1107:2-1107:35: struct rq *rq = rq_of_rt_rq(rt_rq);
-
kernel/sched/rt.c:1131:2-1131:35: struct rq *rq = rq_of_rt_rq(rt_rq);
-
kernel/sched/rt.c:1147:2-1147:35: struct rq *rq = rq_of_rt_rq(rt_rq);
-
kernel/sched/rt.c:1504:2-1504:35: struct rq *rq = rq_of_rt_se(rt_se);
-
kernel/sched/rt.c:1516:2-1516:35: struct rq *rq = rq_of_rt_se(rt_se);
-
kernel/sched/rt.c:1602:2-1602:13: struct rq *rq;
-
kernel/sched/rt.c:2320:2-2320:13: struct rq *rq;
-
kernel/sched/sched.h:1728:2-1728:13: struct rq *rq;
-
kernel/sched/stats.c:126:3-126:14: struct rq *rq;
-
kernel/sched/stats.h:170:3-170:14: struct rq *rq;
-
kernel/sched/topology.c:711:2-711:18: struct rq *rq = cpu_rq(cpu);
-
kernel/sched/topology.c:2362:2-2362:18: struct rq *rq = NULL;
-
net/atm/common.c:227:2-227:30: struct sk_buff_head queue, *rq;
-
net/key/af_key.c:1998:2-1998:48: struct sadb_x_ipsecrequest *rq = (void*)(pol+1);
-
net/key/af_key.c:2157:3-2157:31: struct sadb_x_ipsecrequest *rq;
-
net/key/af_key.c:2554:2-2554:30: struct sadb_x_ipsecrequest *rq;
-
net/key/af_key.c:3523:2-3523:30: struct sadb_x_ipsecrequest *rq;
-
net/sunrpc/cache.c:830:2-830:24: struct cache_request *rq;