Symbol: kvm
function parameter
Defined...
-
arch/x86/include/asm/kvm_host.h:1816:46-1816:58: static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
-
arch/x86/include/asm/kvm_host.h:2052:42-2052:54: static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
-
arch/x86/include/asm/kvm_host.h:2058:44-2058:56: static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
-
arch/x86/kvm/debugfs.c:191:32-191:44: int kvm_arch_create_vm_debugfs(struct kvm *kvm)
-
arch/x86/kvm/hyperv.c:190:43-190:55: static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
-
arch/x86/kvm/hyperv.c:207:44-207:56: static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
-
arch/x86/kvm/hyperv.c:500:26-500:38: int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
-
arch/x86/kvm/hyperv.c:523:32-523:44: static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
-
arch/x86/kvm/hyperv.c:538:32-538:44: void kvm_hv_irq_routing_update(struct kvm *kvm)
-
arch/x86/kvm/hyperv.c:568:33-568:45: static u64 get_time_ref_counter(struct kvm *kvm)
-
arch/x86/kvm/hyperv.c:1031:38-1031:50: static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
-
arch/x86/kvm/hyperv.c:1043:37-1043:49: static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
-
arch/x86/kvm/hyperv.c:1051:37-1051:49: static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
-
arch/x86/kvm/hyperv.c:1060:38-1060:50: static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
-
arch/x86/kvm/hyperv.c:1156:28-1156:40: void kvm_hv_setup_tsc_page(struct kvm *kvm,
-
arch/x86/kvm/hyperv.c:1234:37-1234:49: void kvm_hv_request_tsc_page_update(struct kvm *kvm)
-
arch/x86/kvm/hyperv.c:1740:37-1740:49: static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
-
arch/x86/kvm/hyperv.c:1830:31-1830:43: static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
-
arch/x86/kvm/hyperv.c:1863:34-1863:46: static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
-
arch/x86/kvm/hyperv.c:1874:41-1874:53: static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
-
arch/x86/kvm/hyperv.c:2144:37-2144:49: static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector,
-
arch/x86/kvm/hyperv.c:2637:21-2637:33: void kvm_hv_init_vm(struct kvm *kvm)
-
arch/x86/kvm/hyperv.c:2645:24-2645:36: void kvm_hv_destroy_vm(struct kvm *kvm)
-
arch/x86/kvm/hyperv.c:2656:34-2656:46: static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
-
arch/x86/kvm/hyperv.c:2680:36-2680:48: static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
-
arch/x86/kvm/hyperv.c:2697:29-2697:41: int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
-
arch/x86/kvm/hyperv.h:57:40-57:52: static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm)
-
arch/x86/kvm/i8254.c:662:32-662:44: struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
-
arch/x86/kvm/i8254.c:736:19-736:31: void kvm_free_pit(struct kvm *kvm)
-
arch/x86/kvm/i8259.c:236:22-236:34: int kvm_pic_read_irq(struct kvm *kvm)
-
arch/x86/kvm/i8259.c:566:29-566:41: static void pic_irq_request(struct kvm *kvm, int level)
-
arch/x86/kvm/i8259.c:590:18-590:30: int kvm_pic_init(struct kvm *kvm)
-
arch/x86/kvm/i8259.c:645:22-645:34: void kvm_pic_destroy(struct kvm *kvm)
-
arch/x86/kvm/ioapic.c:309:49-309:61: void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
-
arch/x86/kvm/ioapic.c:714:21-714:33: int kvm_ioapic_init(struct kvm *kvm)
-
arch/x86/kvm/ioapic.c:740:25-740:37: void kvm_ioapic_destroy(struct kvm *kvm)
-
arch/x86/kvm/ioapic.c:755:21-755:33: void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
-
arch/x86/kvm/ioapic.c:765:21-765:33: void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
-
arch/x86/kvm/ioapic.h:104:36-104:48: static inline int ioapic_in_kernel(struct kvm *kvm)
-
arch/x86/kvm/irq.c:161:29-161:41: bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
-
arch/x86/kvm/irq.c:168:33-168:45: bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
-
arch/x86/kvm/irq.h:67:33-67:45: static inline int irqchip_split(struct kvm *kvm)
-
arch/x86/kvm/irq.h:76:34-76:46: static inline int irqchip_kernel(struct kvm *kvm)
-
arch/x86/kvm/irq.h:85:33-85:45: static inline int pic_in_kernel(struct kvm *kvm)
-
arch/x86/kvm/irq.h:90:37-90:49: static inline int irqchip_in_kernel(struct kvm *kvm)
-
arch/x86/kvm/irq_comm.c:31:7-31:19: struct kvm *kvm, int irq_source_id, int level,
-
arch/x86/kvm/irq_comm.c:39:10-39:22: struct kvm *kvm, int irq_source_id, int level,
-
arch/x86/kvm/irq_comm.c:47:30-47:42: int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
-
arch/x86/kvm/irq_comm.c:104:22-104:34: void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
-
arch/x86/kvm/irq_comm.c:125:42-125:54: static inline bool kvm_msi_route_invalid(struct kvm *kvm,
-
arch/x86/kvm/irq_comm.c:132:3-132:15: struct kvm *kvm, int irq_source_id, int level, bool line_status)
-
arch/x86/kvm/irq_comm.c:149:7-149:19: struct kvm *kvm, int irq_source_id, int level,
-
arch/x86/kvm/irq_comm.c:159:10-159:22: struct kvm *kvm, int irq_source_id, int level,
-
arch/x86/kvm/irq_comm.c:194:31-194:43: int kvm_request_irq_source_id(struct kvm *kvm)
-
arch/x86/kvm/irq_comm.c:217:29-217:41: void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
-
arch/x86/kvm/irq_comm.c:238:37-238:49: void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
-
arch/x86/kvm/irq_comm.c:247:39-247:51: void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
-
arch/x86/kvm/irq_comm.c:256:30-256:42: void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
-
arch/x86/kvm/irq_comm.c:271:35-271:47: bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
-
arch/x86/kvm/irq_comm.c:276:27-276:39: int kvm_set_routing_entry(struct kvm *kvm,
-
arch/x86/kvm/irq_comm.c:333:30-333:42: bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
-
arch/x86/kvm/irq_comm.c:387:35-387:47: int kvm_setup_default_irq_routing(struct kvm *kvm)
-
arch/x86/kvm/irq_comm.c:395:33-395:45: int kvm_setup_empty_irq_routing(struct kvm *kvm)
-
arch/x86/kvm/irq_comm.c:400:39-400:51: void kvm_arch_post_irq_routing_update(struct kvm *kvm)
-
arch/x86/kvm/irq_comm.c:439:34-439:46: void kvm_arch_irq_routing_update(struct kvm *kvm)
-
arch/x86/kvm/kvm_onhyperv.c:37:41-37:53: static int __hv_flush_remote_tlbs_range(struct kvm *kvm,
-
arch/x86/kvm/kvm_onhyperv.c:95:32-95:44: int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages)
-
arch/x86/kvm/kvm_onhyperv.c:106:26-106:38: int hv_flush_remote_tlbs(struct kvm *kvm)
-
arch/x86/kvm/lapic.c:373:31-373:43: void kvm_recalculate_apic_map(struct kvm *kvm)
-
arch/x86/kvm/lapic.c:849:21-849:33: int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
-
arch/x86/kvm/lapic.c:1105:43-1105:55: static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
-
arch/x86/kvm/lapic.c:1113:40-1113:52: static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
-
arch/x86/kvm/lapic.c:1139:48-1139:60: static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
-
arch/x86/kvm/lapic.c:1205:36-1205:48: bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
-
arch/x86/kvm/lapic.c:1256:35-1256:47: bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
-
arch/x86/kvm/lapic.c:1391:31-1391:43: void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
-
arch/x86/kvm/lapic.c:2596:32-2596:44: int kvm_alloc_apic_access_page(struct kvm *kvm)
-
arch/x86/kvm/mmu.h:247:46-247:58: static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
-
arch/x86/kvm/mmu.h:264:44-264:56: static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
-
arch/x86/kvm/mmu.h:290:42-290:54: static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
-
arch/x86/kvm/mmu/mmu.c:277:38-277:50: int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
-
arch/x86/kvm/mmu/mmu.c:288:41-288:53: static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
-
arch/x86/kvm/mmu/mmu.c:565:38-565:50: static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
-
arch/x86/kvm/mmu/mmu.c:821:30-821:42: static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:842:34-842:46: void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:860:34-860:46: static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
-
arch/x86/kvm/mmu/mmu.c:869:32-869:44: static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:885:36-885:48: void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:894:36-894:48: static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:963:40-963:52: static void pte_list_desc_remove_entry(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:1000:29-1000:41: static void pte_list_remove(struct kvm *kvm, u64 *spte,
-
arch/x86/kvm/mmu/mmu.c:1031:35-1031:47: static void kvm_zap_one_rmap_spte(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:1039:36-1039:48: static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:1089:25-1089:37: static void rmap_remove(struct kvm *kvm, u64 *spte)
-
arch/x86/kvm/mmu/mmu.c:1189:23-1189:35: static void drop_spte(struct kvm *kvm, u64 *sptep)
-
arch/x86/kvm/mmu/mmu.c:1197:29-1197:41: static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
-
arch/x86/kvm/mmu/mmu.c:1276:32-1276:44: static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-
arch/x86/kvm/mmu/mmu.c:1301:45-1301:57: static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:1334:43-1334:55: static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:1367:46-1367:58: void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:1408:37-1408:49: bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:1438:28-1438:40: static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-
arch/x86/kvm/mmu/mmu.c:1444:26-1444:38: static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-
arch/x86/kvm/mmu/mmu.c:1451:30-1451:42: static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-
arch/x86/kvm/mmu/mmu.c:1561:50-1561:62: static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:1576:26-1576:38: bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
-
arch/x86/kvm/mmu/mmu.c:1593:23-1593:35: bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-
arch/x86/kvm/mmu/mmu.c:1606:26-1606:38: static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-
arch/x86/kvm/mmu/mmu.c:1620:31-1620:43: static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-
arch/x86/kvm/mmu/mmu.c:1635:24-1635:36: static void __rmap_add(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:1667:18-1667:30: bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-
arch/x86/kvm/mmu/mmu.c:1680:23-1680:35: bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-
arch/x86/kvm/mmu/mmu.c:1713:43-1713:55: static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
-
arch/x86/kvm/mmu/mmu.c:1719:34-1719:46: static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:1725:36-1725:48: static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:1757:40-1757:52: static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
-
arch/x86/kvm/mmu/mmu.c:1763:29-1763:41: static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
-
arch/x86/kvm/mmu/mmu.c:1878:36-1878:48: static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:1991:41-1991:53: static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:2005:28-2005:40: static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:2145:54-2145:66: static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:2230:55-2230:67: static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:2266:55-2266:67: static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:2423:32-2423:44: static void __link_shadow_page(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:2487:29-2487:41: static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
-
arch/x86/kvm/mmu/mmu.c:2517:41-2517:53: static int kvm_mmu_page_unlink_children(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:2530:36-2530:48: static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:2539:36-2539:48: static int mmu_zap_unsync_children(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:2563:40-2563:52: static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:2628:38-2628:50: static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
-
arch/x86/kvm/mmu/mmu.c:2637:37-2637:49: static void kvm_mmu_commit_zap_page(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:2662:51-2662:63: static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:2699:53-2699:65: static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:2735:31-2735:43: void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
-
arch/x86/kvm/mmu/mmu.c:2751:28-2751:40: int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
-
arch/x86/kvm/mmu/mmu.c:2784:29-2784:41: static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/mmu.c:2799:29-2799:41: int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
-
arch/x86/kvm/mmu/mmu.c:3076:35-3076:47: static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
-
arch/x86/kvm/mmu/mmu.c:3140:31-3140:43: int kvm_mmu_max_mapping_level(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:3538:32-3538:44: static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
-
arch/x86/kvm/mmu/mmu.c:3559:25-3559:37: void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:3615:36-3615:48: void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:3707:40-3707:52: static int mmu_first_shadow_root_alloc(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:4548:47-4548:59: static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:4582:46-4582:58: static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:4603:29-4603:41: static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:5498:30-5498:42: static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
-
arch/x86/kvm/mmu/mmu.c:5525:43-5525:55: static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:5909:47-5909:59: static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:5938:45-5938:57: static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:5949:48-5949:60: static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6049:36-6049:48: static void kvm_zap_obsolete_pages(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:6114:34-6114:46: static void kvm_mmu_zap_all_fast(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:6165:43-6165:55: static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:6170:22-6170:34: void kvm_mmu_init_vm(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:6189:39-6189:51: static void mmu_free_vm_memory_caches(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:6196:24-6196:36: void kvm_mmu_uninit_vm(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:6204:36-6204:48: static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
-
arch/x86/kvm/mmu/mmu.c:6239:24-6239:36: void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
-
arch/x86/kvm/mmu/mmu.c:6263:37-6263:49: static bool slot_rmap_write_protect(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6270:39-6270:51: void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6293:48-6293:60: static bool need_topup_split_caches_or_resched(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:6308:31-6308:43: static int topup_split_caches(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:6341:57-6341:69: static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
-
arch/x86/kvm/mmu/mmu.c:6368:40-6368:52: static void shadow_mmu_split_huge_page(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6414:43-6414:55: static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6453:45-6453:57: static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6499:49-6499:61: static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6518:35-6518:47: void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6537:40-6537:52: void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6568:42-6568:54: static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6605:44-6605:56: static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6617:36-6617:48: void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6633:36-6633:48: void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6662:29-6662:41: static void kvm_mmu_zap_all(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:6687:32-6687:44: void kvm_arch_flush_shadow_all(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:6692:36-6692:48: void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
-
arch/x86/kvm/mmu/mmu.c:6698:36-6698:48: void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
-
arch/x86/kvm/mmu/mmu.c:7010:39-7010:51: static void kvm_recover_nx_huge_pages(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:7116:45-7116:57: static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data)
-
arch/x86/kvm/mmu/mmu.c:7141:26-7141:38: int kvm_mmu_post_init_vm(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu.c:7157:29-7157:41: void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmu_internal.h:175:46-175:58: static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
-
arch/x86/kvm/mmu/mmu_internal.h:184:44-184:56: static inline bool is_nx_huge_page_enabled(struct kvm *kvm)
-
arch/x86/kvm/mmu/mmutrace.h:289:1-289:1: TRACE_EVENT(
-
arch/x86/kvm/mmu/page_track.c:23:44-23:56: bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
-
arch/x86/kvm/mmu/page_track.c:47:35-47:47: int kvm_page_track_create_memslot(struct kvm *kvm,
-
arch/x86/kvm/mmu/page_track.c:77:32-77:44: void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
-
arch/x86/kvm/mmu/page_track.c:100:35-100:47: void __kvm_write_track_remove_gfn(struct kvm *kvm,
-
arch/x86/kvm/mmu/page_track.c:123:31-123:43: bool kvm_gfn_is_write_tracked(struct kvm *kvm,
-
arch/x86/kvm/mmu/page_track.c:139:29-139:41: void kvm_page_track_cleanup(struct kvm *kvm)
-
arch/x86/kvm/mmu/page_track.c:147:25-147:37: int kvm_page_track_init(struct kvm *kvm)
-
arch/x86/kvm/mmu/page_track.c:160:38-160:50: int kvm_page_track_register_notifier(struct kvm *kvm,
-
arch/x86/kvm/mmu/page_track.c:183:41-183:53: void kvm_page_track_unregister_notifier(struct kvm *kvm,
-
arch/x86/kvm/mmu/page_track.c:206:29-206:41: void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes)
-
arch/x86/kvm/mmu/page_track.c:229:33-229:45: void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
-
arch/x86/kvm/mmu/page_track.c:255:29-255:41: int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn)
-
arch/x86/kvm/mmu/page_track.c:285:32-285:44: int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn)
-
arch/x86/kvm/mmu/page_track.h:33:53-33:65: static inline bool kvm_page_track_has_external_user(struct kvm *kvm)
-
arch/x86/kvm/mmu/spte.c:274:31-274:43: u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role,
-
arch/x86/kvm/mmu/tdp_mmu.c:15:27-15:39: void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
-
arch/x86/kvm/mmu/tdp_mmu.c:22:62-22:74: static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:33:29-33:41: void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
-
arch/x86/kvm/mmu/tdp_mmu.c:76:27-76:39: void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
-
arch/x86/kvm/mmu/tdp_mmu.c:107:47-107:59: static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:262:34-262:46: static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/tdp_mmu.c:268:36-268:48: static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/tdp_mmu.c:283:31-283:43: static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
-
arch/x86/kvm/mmu/tdp_mmu.c:320:31-320:43: static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
-
arch/x86/kvm/mmu/tdp_mmu.c:419:33-419:45: static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
-
arch/x86/kvm/mmu/tdp_mmu.c:528:43-528:55: static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:560:43-560:55: static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:603:29-603:41: static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
-
arch/x86/kvm/mmu/tdp_mmu.c:623:42-623:54: static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
-
arch/x86/kvm/mmu/tdp_mmu.c:659:59-659:71: static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:701:32-701:44: static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
-
arch/x86/kvm/mmu/tdp_mmu.c:727:30-727:42: static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
-
arch/x86/kvm/mmu/tdp_mmu.c:763:25-763:37: bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
-
arch/x86/kvm/mmu/tdp_mmu.c:791:31-791:43: static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
-
arch/x86/kvm/mmu/tdp_mmu.c:831:28-831:40: bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
-
arch/x86/kvm/mmu/tdp_mmu.c:841:26-841:38: void kvm_tdp_mmu_zap_all(struct kvm *kvm)
-
arch/x86/kvm/mmu/tdp_mmu.c:865:40-865:52: void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
-
arch/x86/kvm/mmu/tdp_mmu.c:910:39-910:51: void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
-
arch/x86/kvm/mmu/tdp_mmu.c:1015:28-1015:40: static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
-
arch/x86/kvm/mmu/tdp_mmu.c:1123:34-1123:46: bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
-
arch/x86/kvm/mmu/tdp_mmu.c:1138:52-1138:64: static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:1170:27-1170:39: static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
-
arch/x86/kvm/mmu/tdp_mmu.c:1204:32-1204:44: bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
-
arch/x86/kvm/mmu/tdp_mmu.c:1209:26-1209:38: static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
-
arch/x86/kvm/mmu/tdp_mmu.c:1215:31-1215:43: bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-
arch/x86/kvm/mmu/tdp_mmu.c:1220:26-1220:38: static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
-
arch/x86/kvm/mmu/tdp_mmu.c:1256:31-1256:43: bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-
arch/x86/kvm/mmu/tdp_mmu.c:1271:30-1271:42: static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-
arch/x86/kvm/mmu/tdp_mmu.c:1309:30-1309:42: bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:1343:56-1343:68: static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:1383:36-1383:48: static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
-
arch/x86/kvm/mmu/tdp_mmu.c:1421:42-1421:54: static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:1490:39-1490:51: void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:1516:35-1516:47: static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-
arch/x86/kvm/mmu/tdp_mmu.c:1556:35-1556:47: bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:1578:35-1578:47: static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
-
arch/x86/kvm/mmu/tdp_mmu.c:1626:40-1626:52: void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:1637:40-1637:52: static void zap_collapsible_spte_range(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:1692:40-1692:52: void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
-
arch/x86/kvm/mmu/tdp_mmu.c:1708:31-1708:43: static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
-
arch/x86/kvm/mmu/tdp_mmu.c:1744:36-1744:48: bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
-
arch/x86/kvm/pmu.c:834:39-834:51: int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
-
arch/x86/kvm/svm/avic.c:174:22-174:34: void avic_vm_destroy(struct kvm *kvm)
-
arch/x86/kvm/svm/avic.c:192:18-192:30: int avic_vm_init(struct kvm *kvm)
-
arch/x86/kvm/svm/avic.c:349:43-349:55: static void avic_kick_vcpu_by_physical_id(struct kvm *kvm, u32 physical_id,
-
arch/x86/kvm/svm/avic.c:365:42-365:54: static void avic_kick_vcpu_by_logical_id(struct kvm *kvm, u32 *avic_logical_id_table,
-
arch/x86/kvm/svm/avic.c:397:40-397:52: static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source,
-
arch/x86/kvm/svm/avic.c:465:36-465:48: static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
-
arch/x86/kvm/svm/avic.c:861:18-861:30: get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
-
arch/x86/kvm/svm/avic.c:894:25-894:37: int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
-
arch/x86/kvm/svm/sev.c:113:45-113:57: static inline bool is_mirroring_enc_context(struct kvm *kvm)
-
arch/x86/kvm/svm/sev.c:190:25-190:37: static int sev_get_asid(struct kvm *kvm)
-
arch/x86/kvm/svm/sev.c:229:29-229:41: static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
-
arch/x86/kvm/svm/sev.c:246:27-246:39: static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:285:26-285:38: static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
-
arch/x86/kvm/svm/sev.c:314:26-314:38: static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
-
arch/x86/kvm/svm/sev.c:321:29-321:41: static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:393:37-393:49: static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
-
arch/x86/kvm/svm/sev.c:456:30-456:42: static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
-
arch/x86/kvm/svm/sev.c:504:35-504:47: static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:624:37-624:49: static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
-
arch/x86/kvm/svm/sev.c:660:35-660:47: static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:684:31-684:43: static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:746:30-746:42: static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:758:29-758:41: static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:785:32-785:44: static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
-
arch/x86/kvm/svm/sev.c:803:30-803:42: static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
-
arch/x86/kvm/svm/sev.c:819:35-819:47: static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
-
arch/x86/kvm/svm/sev.c:855:35-855:47: static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
-
arch/x86/kvm/svm/sev.c:931:26-931:38: static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
-
arch/x86/kvm/svm/sev.c:1015:30-1015:42: static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:1090:39-1090:51: static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:1153:39-1153:51: __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
-
arch/x86/kvm/svm/sev.c:1172:27-1172:39: static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:1264:38-1264:50: __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
-
arch/x86/kvm/svm/sev.c:1285:33-1285:45: static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:1370:28-1370:40: static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:1382:28-1382:40: static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:1394:30-1394:42: static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:1468:36-1468:48: static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:1545:31-1545:43: static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
-
arch/x86/kvm/svm/sev.c:1624:41-1624:53: static int sev_lock_vcpus_for_migration(struct kvm *kvm,
-
arch/x86/kvm/svm/sev.c:1664:44-1664:56: static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
-
arch/x86/kvm/svm/sev.c:1781:34-1781:46: int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
-
arch/x86/kvm/svm/sev.c:1852:23-1852:35: int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
-
arch/x86/kvm/svm/sev.c:1949:33-1949:45: int sev_mem_enc_register_region(struct kvm *kvm,
-
arch/x86/kvm/svm/sev.c:2000:17-2000:29: find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
-
arch/x86/kvm/svm/sev.c:2015:44-2015:56: static void __unregister_enc_region_locked(struct kvm *kvm,
-
arch/x86/kvm/svm/sev.c:2023:35-2023:47: int sev_mem_enc_unregister_region(struct kvm *kvm,
-
arch/x86/kvm/svm/sev.c:2063:34-2063:46: int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
-
arch/x86/kvm/svm/sev.c:2128:21-2128:33: void sev_vm_destroy(struct kvm *kvm)
-
arch/x86/kvm/svm/sev.c:2346:33-2346:45: void sev_guest_memory_reclaimed(struct kvm *kvm)
-
arch/x86/kvm/svm/svm.c:4295:34-4295:46: static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
-
arch/x86/kvm/svm/svm.c:4889:28-4889:40: static void svm_vm_destroy(struct kvm *kvm)
-
arch/x86/kvm/svm/svm.c:4895:24-4895:36: static int svm_vm_init(struct kvm *kvm)
-
arch/x86/kvm/svm/svm.h:314:51-314:63: static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
-
arch/x86/kvm/svm/svm.h:319:39-319:51: static __always_inline bool sev_guest(struct kvm *kvm)
-
arch/x86/kvm/svm/svm.h:330:42-330:54: static __always_inline bool sev_es_guest(struct kvm *kvm)
-
arch/x86/kvm/vmx/posted_intr.c:135:32-135:44: static bool vmx_can_use_vtd_pi(struct kvm *kvm)
-
arch/x86/kvm/vmx/posted_intr.c:255:30-255:42: void vmx_pi_start_assignment(struct kvm *kvm)
-
arch/x86/kvm/vmx/posted_intr.c:272:24-272:36: int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
-
arch/x86/kvm/vmx/vmx.c:3836:27-3836:39: static int init_rmode_tss(struct kvm *kvm, void __user *ua)
-
arch/x86/kvm/vmx/vmx.c:3858:36-3858:48: static int init_rmode_identity_map(struct kvm *kvm)
-
arch/x86/kvm/vmx/vmx.c:4672:43-4672:55: static inline int vmx_get_pid_table_order(struct kvm *kvm)
-
arch/x86/kvm/vmx/vmx.c:4677:37-4677:49: static int vmx_alloc_ipiv_pid_table(struct kvm *kvm)
-
arch/x86/kvm/vmx/vmx.c:4697:31-4697:43: static int vmx_vcpu_precreate(struct kvm *kvm)
-
arch/x86/kvm/vmx/vmx.c:5067:29-5067:41: static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
-
arch/x86/kvm/vmx/vmx.c:5087:38-5087:50: static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
-
arch/x86/kvm/vmx/vmx.c:6995:34-6995:46: static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
-
arch/x86/kvm/vmx/vmx.c:7549:24-7549:36: static int vmx_vm_init(struct kvm *kvm)
-
arch/x86/kvm/vmx/vmx.c:8205:28-8205:40: static void vmx_vm_destroy(struct kvm *kvm)
-
arch/x86/kvm/vmx/vmx.h:648:51-648:63: static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
-
arch/x86/kvm/x86.c:2311:34-2311:46: static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
-
arch/x86/kvm/x86.c:2951:41-2951:53: static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
-
arch/x86/kvm/x86.c:2983:48-2983:60: static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
-
arch/x86/kvm/x86.c:2988:40-2988:52: static void __kvm_start_pvclock_update(struct kvm *kvm)
-
arch/x86/kvm/x86.c:2994:38-2994:50: static void kvm_start_pvclock_update(struct kvm *kvm)
-
arch/x86/kvm/x86.c:3002:36-3002:48: static void kvm_end_pvclock_update(struct kvm *kvm)
-
arch/x86/kvm/x86.c:3018:36-3018:48: static void kvm_update_masterclock(struct kvm *kvm)
-
arch/x86/kvm/x86.c:3043:28-3043:40: static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
-
arch/x86/kvm/x86.c:3078:26-3078:38: static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
-
arch/x86/kvm/x86.c:3089:21-3089:33: u64 get_kvmclock_ns(struct kvm *kvm)
-
arch/x86/kvm/x86.c:4444:34-4444:46: int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
-
arch/x86/kvm/x86.c:6090:38-6090:50: static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
-
arch/x86/kvm/x86.c:6100:47-6100:59: static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
-
arch/x86/kvm/x86.c:6106:42-6106:54: static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
-
arch/x86/kvm/x86.c:6121:37-6121:49: static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
-
arch/x86/kvm/x86.c:6146:37-6146:49: static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
-
arch/x86/kvm/x86.c:6176:33-6176:45: static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
-
arch/x86/kvm/x86.c:6188:33-6188:45: static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
-
arch/x86/kvm/x86.c:6201:34-6201:46: static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
-
arch/x86/kvm/x86.c:6212:34-6212:46: static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
-
arch/x86/kvm/x86.c:6234:34-6234:46: static int kvm_vm_ioctl_reinject(struct kvm *kvm,
-
arch/x86/kvm/x86.c:6250:30-6250:42: void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
-
arch/x86/kvm/x86.c:6266:27-6266:39: int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
-
arch/x86/kvm/x86.c:6278:29-6278:41: int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
-
arch/x86/kvm/x86.c:6574:40-6574:52: static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
-
arch/x86/kvm/x86.c:6675:38-6675:50: static int kvm_arch_suspend_notifier(struct kvm *kvm)
-
arch/x86/kvm/x86.c:6698:26-6698:38: int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
-
arch/x86/kvm/x86.c:6710:35-6710:47: static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp)
-
arch/x86/kvm/x86.c:6721:35-6721:47: static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
-
arch/x86/kvm/x86.c:9717:32-9717:44: static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid)
-
arch/x86/kvm/x86.c:9733:26-9733:38: bool kvm_apicv_activated(struct kvm *kvm)
-
arch/x86/kvm/x86.c:9759:28-9759:40: static void kvm_apicv_init(struct kvm *kvm)
-
arch/x86/kvm/x86.c:10312:40-10312:52: void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
-
arch/x86/kvm/x86.c:10318:35-10318:47: void kvm_make_scan_ioapic_request(struct kvm *kvm)
-
arch/x86/kvm/x86.c:10383:39-10383:51: void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
-
arch/x86/kvm/x86.c:10424:37-10424:49: void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
-
arch/x86/kvm/x86.c:10476:38-10476:50: void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
-
arch/x86/kvm/x86.c:11637:57-11637:69: static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
-
arch/x86/kvm/x86.c:11833:29-11833:41: int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
-
arch/x86/kvm/x86.c:12303:23-12303:35: void kvm_arch_free_vm(struct kvm *kvm)
-
arch/x86/kvm/x86.c:12310:22-12310:34: int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
-
arch/x86/kvm/x86.c:12372:27-12372:39: int kvm_arch_post_init_vm(struct kvm *kvm)
-
arch/x86/kvm/x86.c:12384:34-12384:46: static void kvm_unload_vcpu_mmus(struct kvm *kvm)
-
arch/x86/kvm/x86.c:12395:27-12395:39: void kvm_arch_sync_events(struct kvm *kvm)
-
arch/x86/kvm/x86.c:12424:39-12424:51: void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
-
arch/x86/kvm/x86.c:12477:30-12477:42: void kvm_arch_pre_destroy_vm(struct kvm *kvm)
-
arch/x86/kvm/x86.c:12482:26-12482:38: void kvm_arch_destroy_vm(struct kvm *kvm)
-
arch/x86/kvm/x86.c:12522:28-12522:40: void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
-
arch/x86/kvm/x86.c:12558:39-12558:51: static int kvm_alloc_memslot_metadata(struct kvm *kvm,
-
arch/x86/kvm/x86.c:12623:32-12623:44: void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
-
arch/x86/kvm/x86.c:12639:36-12639:48: int kvm_arch_prepare_memory_region(struct kvm *kvm,
-
arch/x86/kvm/x86.c:12667:46-12667:58: static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
-
arch/x86/kvm/x86.c:12679:38-12679:50: static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
-
arch/x86/kvm/x86.c:12800:36-12800:48: void kvm_arch_commit_memory_region(struct kvm *kvm,
-
arch/x86/kvm/x86.c:13186:32-13186:44: void kvm_arch_start_assignment(struct kvm *kvm)
-
arch/x86/kvm/x86.c:13193:30-13193:42: void kvm_arch_end_assignment(struct kvm *kvm)
-
arch/x86/kvm/x86.c:13199:43-13199:55: bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
-
arch/x86/kvm/x86.c:13205:40-13205:52: void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
-
arch/x86/kvm/x86.c:13211:42-13211:54: void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
-
arch/x86/kvm/x86.c:13217:35-13217:47: bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
-
arch/x86/kvm/x86.c:13270:35-13270:47: int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
-
arch/x86/kvm/x86.h:288:40-288:52: static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
-
arch/x86/kvm/x86.h:408:39-408:51: static inline bool kvm_mwait_in_guest(struct kvm *kvm)
-
arch/x86/kvm/x86.h:413:37-413:49: static inline bool kvm_hlt_in_guest(struct kvm *kvm)
-
arch/x86/kvm/x86.h:418:39-418:51: static inline bool kvm_pause_in_guest(struct kvm *kvm)
-
arch/x86/kvm/x86.h:423:40-423:52: static inline bool kvm_cstate_in_guest(struct kvm *kvm)
-
arch/x86/kvm/x86.h:428:46-428:58: static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
-
arch/x86/kvm/xen.c:37:37-37:49: static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
-
arch/x86/kvm/xen.c:604:26-604:38: int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
-
arch/x86/kvm/xen.c:667:26-667:38: int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
-
arch/x86/kvm/xen.c:1112:24-1112:36: int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
-
arch/x86/kvm/xen.c:1159:35-1159:47: static inline int max_evtchn_port(struct kvm *kvm)
-
arch/x86/kvm/xen.c:1552:56-1552:68: int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
-
arch/x86/kvm/xen.c:1661:58-1661:70: static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
-
arch/x86/kvm/xen.c:1720:66-1720:78: static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
-
arch/x86/kvm/xen.c:1733:26-1733:38: int kvm_xen_setup_evtchn(struct kvm *kvm,
-
arch/x86/kvm/xen.c:1772:29-1772:41: int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
-
arch/x86/kvm/xen.c:1819:35-1819:47: static int kvm_xen_eventfd_update(struct kvm *kvm,
-
arch/x86/kvm/xen.c:1866:35-1866:47: static int kvm_xen_eventfd_assign(struct kvm *kvm,
-
arch/x86/kvm/xen.c:1938:37-1938:49: static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
-
arch/x86/kvm/xen.c:1956:34-1956:46: static int kvm_xen_eventfd_reset(struct kvm *kvm)
-
arch/x86/kvm/xen.c:1998:35-1998:47: static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
-
arch/x86/kvm/xen.c:2106:22-2106:34: void kvm_xen_init_vm(struct kvm *kvm)
-
arch/x86/kvm/xen.c:2113:25-2113:37: void kvm_xen_destroy_vm(struct kvm *kvm)
-
arch/x86/kvm/xen.h:39:40-39:52: static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
-
arch/x86/kvm/xen.h:45:46-45:58: static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
-
drivers/vfio/group.c:887:51-887:63: void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
-
drivers/vfio/vfio_main.c:387:59-387:71: void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
-
drivers/vfio/vfio_main.c:1359:57-1359:69: static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm)
-
drivers/vfio/vfio_main.c:1381:43-1381:55: void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
-
include/linux/kvm_host.h:839:32-839:44: static inline void kvm_vm_dead(struct kvm *kvm)
-
include/linux/kvm_host.h:845:34-845:46: static inline void kvm_vm_bugged(struct kvm *kvm)
-
include/linux/kvm_host.h:908:62-908:74: static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
-
include/linux/kvm_host.h:913:46-913:58: static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
-
include/linux/kvm_host.h:920:45-920:57: static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
-
include/linux/kvm_host.h:934:51-934:63: static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
-
include/linux/kvm_host.h:990:51-990:63: static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
-
include/linux/kvm_host.h:998:49-998:61: static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
-
include/linux/kvm_host.h:1491:39-1491:51: static inline void __kvm_arch_free_vm(struct kvm *kvm)
-
include/linux/kvm_host.h:1590:46-1590:58: static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
-
include/linux/kvm_host.h:1731:30-1731:42: static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
-
include/linux/kvm_host.h:1759:37-1759:49: static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
-
include/linux/kvm_host.h:1951:40-1951:52: static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
-
include/linux/kvm_host.h:1973:44-1973:56: static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
-
virt/kvm/coalesced_mmio.c:110:29-110:41: int kvm_coalesced_mmio_init(struct kvm *kvm)
-
virt/kvm/coalesced_mmio.c:131:30-131:42: void kvm_coalesced_mmio_free(struct kvm *kvm)
-
virt/kvm/coalesced_mmio.c:137:42-137:54: int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
-
virt/kvm/coalesced_mmio.c:173:44-173:56: int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
-
virt/kvm/dirty_ring.c:24:27-24:39: bool kvm_use_dirty_bitmap(struct kvm *kvm)
-
virt/kvm/dirty_ring.c:32:48-32:60: bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
-
virt/kvm/dirty_ring.c:53:33-53:45: static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
-
virt/kvm/dirty_ring.c:104:26-104:38: int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
-
virt/kvm/eventfd.c:36:24-36:36: kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
-
virt/kvm/eventfd.c:183:5-183:17: struct kvm *kvm, int irq_source_id,
-
virt/kvm/eventfd.c:257:26-257:38: static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
-
virt/kvm/eventfd.c:288:5-288:17: struct kvm *kvm, unsigned int host_irq,
-
virt/kvm/eventfd.c:303:18-303:30: kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
-
virt/kvm/eventfd.c:467:27-467:39: bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
-
virt/kvm/eventfd.c:488:27-488:39: void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
-
virt/kvm/eventfd.c:498:27-498:39: void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
-
virt/kvm/eventfd.c:511:36-511:48: void kvm_register_irq_ack_notifier(struct kvm *kvm,
-
virt/kvm/eventfd.c:520:38-520:50: void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
-
virt/kvm/eventfd.c:532:18-532:30: kvm_eventfd_init(struct kvm *kvm)
-
virt/kvm/eventfd.c:548:20-548:32: kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
-
virt/kvm/eventfd.c:588:11-588:23: kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
-
virt/kvm/eventfd.c:604:19-604:31: kvm_irqfd_release(struct kvm *kvm)
-
virt/kvm/eventfd.c:627:29-627:41: void kvm_irq_routing_update(struct kvm *kvm)
-
virt/kvm/eventfd.c:655:33-655:45: bool kvm_notify_irqfd_resampler(struct kvm *kvm,
-
virt/kvm/eventfd.c:812:27-812:39: ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
-
virt/kvm/eventfd.c:837:37-837:49: static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
-
virt/kvm/eventfd.c:901:28-901:40: kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
-
virt/kvm/eventfd.c:944:35-944:47: static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
-
virt/kvm/eventfd.c:956:22-956:34: kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
-
virt/kvm/eventfd.c:1008:15-1008:27: kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
-
virt/kvm/irqchip.c:21:21-21:33: int kvm_irq_map_gsi(struct kvm *kvm,
-
virt/kvm/irqchip.c:40:26-40:38: int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
-
virt/kvm/irqchip.c:48:28-48:40: int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
-
virt/kvm/irqchip.c:70:17-70:29: int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
-
virt/kvm/irqchip.c:119:27-119:39: void kvm_free_irq_routing(struct kvm *kvm)
-
virt/kvm/irqchip.c:127:32-127:44: static int setup_routing_entry(struct kvm *kvm,
-
virt/kvm/irqchip.c:159:56-159:68: void __attribute__((weak)) kvm_arch_irq_routing_update(struct kvm *kvm)
-
virt/kvm/irqchip.c:163:42-163:54: bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm)
-
virt/kvm/irqchip.c:168:25-168:37: int kvm_set_irq_routing(struct kvm *kvm,
-
virt/kvm/kvm_main.c:157:45-157:57: __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
-
virt/kvm/kvm_main.c:290:34-290:46: bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
-
virt/kvm/kvm_main.c:316:39-316:51: bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
-
virt/kvm/kvm_main.c:342:32-342:44: bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
-
virt/kvm/kvm_main.c:348:28-348:40: void kvm_flush_remote_tlbs(struct kvm *kvm)
-
virt/kvm/kvm_main.c:369:34-369:46: void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
-
virt/kvm/kvm_main.c:382:36-382:48: void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
-
virt/kvm/kvm_main.c:396:34-396:46: static void kvm_flush_shadow_all(struct kvm *kvm)
-
virt/kvm/kvm_main.c:484:50-484:62: static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
-
virt/kvm/kvm_main.c:524:24-524:36: void kvm_destroy_vcpus(struct kvm *kvm)
-
virt/kvm/kvm_main.c:583:51-583:63: static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
-
virt/kvm/kvm_main.c:699:33-699:45: static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-
virt/kvm/kvm_main.c:741:31-741:43: void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
-
virt/kvm/kvm_main.c:816:29-816:41: void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
-
virt/kvm/kvm_main.c:931:34-931:46: static int kvm_init_mmu_notifier(struct kvm *kvm)
-
virt/kvm/kvm_main.c:956:34-956:46: static void kvm_init_pm_notifier(struct kvm *kvm)
-
virt/kvm/kvm_main.c:964:37-964:49: static void kvm_destroy_pm_notifier(struct kvm *kvm)
-
virt/kvm/kvm_main.c:988:30-988:42: static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
-
virt/kvm/kvm_main.c:997:31-997:43: static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
-
virt/kvm/kvm_main.c:1029:36-1029:48: static void kvm_destroy_vm_debugfs(struct kvm *kvm)
-
virt/kvm/kvm_main.c:1047:34-1047:46: static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
-
virt/kvm/kvm_main.c:1126:34-1126:46: int __weak kvm_arch_post_init_vm(struct kvm *kvm)
-
virt/kvm/kvm_main.c:1135:37-1135:49: void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
-
virt/kvm/kvm_main.c:1145:39-1145:51: int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
-
virt/kvm/kvm_main.c:1286:33-1286:45: static void kvm_destroy_devices(struct kvm *kvm)
-
virt/kvm/kvm_main.c:1301:28-1301:40: static void kvm_destroy_vm(struct kvm *kvm)
-
virt/kvm/kvm_main.c:1354:18-1354:30: void kvm_get_kvm(struct kvm *kvm)
-
virt/kvm/kvm_main.c:1364:23-1364:35: bool kvm_get_kvm_safe(struct kvm *kvm)
-
virt/kvm/kvm_main.c:1370:18-1370:30: void kvm_put_kvm(struct kvm *kvm)
-
virt/kvm/kvm_main.c:1384:29-1384:41: void kvm_put_kvm_no_destroy(struct kvm *kvm)
-
virt/kvm/kvm_main.c:1415:55-1415:67: static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
-
virt/kvm/kvm_main.c:1495:33-1495:45: static void kvm_replace_memslot(struct kvm *kvm,
-
virt/kvm/kvm_main.c:1562:38-1562:50: static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
-
virt/kvm/kvm_main.c:1620:38-1620:50: static int kvm_prepare_memory_region(struct kvm *kvm,
-
virt/kvm/kvm_main.c:1658:38-1658:50: static void kvm_commit_memory_region(struct kvm *kvm,
-
virt/kvm/kvm_main.c:1720:34-1720:46: static void kvm_activate_memslot(struct kvm *kvm,
-
virt/kvm/kvm_main.c:1745:36-1745:48: static void kvm_invalidate_memslot(struct kvm *kvm,
-
virt/kvm/kvm_main.c:1787:32-1787:44: static void kvm_create_memslot(struct kvm *kvm,
-
virt/kvm/kvm_main.c:1795:32-1795:44: static void kvm_delete_memslot(struct kvm *kvm,
-
virt/kvm/kvm_main.c:1807:30-1807:42: static void kvm_move_memslot(struct kvm *kvm,
-
virt/kvm/kvm_main.c:1820:38-1820:50: static void kvm_update_flags_memslot(struct kvm *kvm,
-
virt/kvm/kvm_main.c:1833:28-1833:40: static int kvm_set_memslot(struct kvm *kvm,
-
virt/kvm/kvm_main.c:1949:29-1949:41: int __kvm_set_memory_region(struct kvm *kvm,
-
virt/kvm/kvm_main.c:2053:27-2053:39: int kvm_set_memory_region(struct kvm *kvm,
-
virt/kvm/kvm_main.c:2065:43-2065:55: static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
-
virt/kvm/kvm_main.c:2145:38-2145:50: static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
-
virt/kvm/kvm_main.c:2236:39-2236:51: static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
-
virt/kvm/kvm_main.c:2255:40-2255:52: static int kvm_clear_dirty_log_protect(struct kvm *kvm,
-
virt/kvm/kvm_main.c:2331:41-2331:53: static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
-
virt/kvm/kvm_main.c:2345:40-2345:52: struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
-
virt/kvm/kvm_main.c:2384:25-2384:37: bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
-
virt/kvm/kvm_main.c:2457:26-2457:38: unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
-
virt/kvm/kvm_main.c:2488:31-2488:43: unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
-
virt/kvm/kvm_main.c:2778:27-2778:39: kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
-
virt/kvm/kvm_main.c:2806:22-2806:34: kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
-
virt/kvm/kvm_main.c:2841:26-2841:38: struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
-
virt/kvm/kvm_main.c:3043:25-3043:37: int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
-
virt/kvm/kvm_main.c:3061:20-3061:32: int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
-
virt/kvm/kvm_main.c:3129:35-3129:47: static int __kvm_write_guest_page(struct kvm *kvm,
-
virt/kvm/kvm_main.c:3146:26-3146:38: int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
-
virt/kvm/kvm_main.c:3164:21-3164:33: int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
-
virt/kvm/kvm_main.c:3247:31-3247:43: int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-
virt/kvm/kvm_main.c:3255:35-3255:47: int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-
virt/kvm/kvm_main.c:3286:28-3286:40: int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-
virt/kvm/kvm_main.c:3293:34-3293:46: int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-
virt/kvm/kvm_main.c:3323:27-3323:39: int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-
virt/kvm/kvm_main.c:3330:21-3330:33: int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
-
virt/kvm/kvm_main.c:3350:30-3350:42: void mark_page_dirty_in_slot(struct kvm *kvm,
-
virt/kvm/kvm_main.c:3375:22-3375:34: void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
-
virt/kvm/kvm_main.c:3827:36-3827:48: static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
-
virt/kvm/kvm_main.c:3943:37-3943:49: static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
-
virt/kvm/kvm_main.c:4474:36-4474:48: static int kvm_ioctl_create_device(struct kvm *kvm,
-
virt/kvm/kvm_main.c:4532:49-4532:61: static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
-
virt/kvm/kvm_main.c:4594:47-4594:59: static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
-
virt/kvm/kvm_main.c:4632:43-4632:55: static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
-
virt/kvm/kvm_main.c:4654:51-4654:63: int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
-
virt/kvm/kvm_main.c:4660:33-4660:45: bool kvm_are_all_memslots_empty(struct kvm *kvm)
-
virt/kvm/kvm_main.c:4675:44-4675:56: static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
-
virt/kvm/kvm_main.c:4767:38-4767:50: static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
-
virt/kvm/kvm_main.c:5549:29-5549:41: int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
-
virt/kvm/kvm_main.c:5591:31-5591:43: int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
-
virt/kvm/kvm_main.c:5639:42-5639:54: struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
-
virt/kvm/kvm_main.c:5699:32-5699:44: static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
-
virt/kvm/kvm_main.c:5706:34-5706:46: static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
-
virt/kvm/kvm_main.c:5713:34-5713:46: static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
-
virt/kvm/kvm_main.c:5726:36-5726:48: static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
-
virt/kvm/kvm_main.c:5867:57-5867:69: static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
-
virt/kvm/kvm_main.c:6238:33-6238:45: int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
-
virt/kvm/pfncache.c:25:40-25:52: void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
-
virt/kvm/pfncache.c:112:45-112:57: static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
-
virt/kvm/pfncache.c:340:49-340:61: void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
-
virt/kvm/vfio.c:38:54-38:66: static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
record
Declared as a prototype...
Defined...
variable
Defined...