Symbol: mm
function parameter
Defined...
-
arch/x86/entry/vsyscall/vsyscall_64.c:317:37-317:55: struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
-
arch/x86/entry/vsyscall/vsyscall_64.c:328:18-328:36: int in_gate_area(struct mm_struct *mm, unsigned long addr)
-
arch/x86/events/core.c:2495:60-2495:78: static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
-
arch/x86/events/core.c:2516:62-2516:80: static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:55:41-55:59: static inline void init_new_context_ldt(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:89:45-89:63: static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:94:53-94:71: static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:101:43-101:61: static inline unsigned long mm_untag_mask(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:106:40-106:58: static inline void mm_reset_untag_mask(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:112:44-112:62: static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:142:8-142:26: struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:163:36-163:54: static inline void destroy_context(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:196:7-196:25: struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:208:58-208:76: static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:216:35-216:53: static inline void arch_exit_mmap(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:223:32-223:50: static inline bool is_64bit_mm(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:235:31-235:49: static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
-
arch/x86/include/asm/paravirt.h:95:44-95:62: static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
-
arch/x86/include/asm/paravirt.h:342:38-342:56: static inline int paravirt_pgd_alloc(struct mm_struct *mm)
-
arch/x86/include/asm/paravirt.h:347:38-347:56: static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/include/asm/paravirt.h:352:39-352:57: static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/include/asm/paravirt.h:361:39-361:57: static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/include/asm/paravirt.h:371:39-371:57: static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/include/asm/paravirt.h:380:39-380:57: static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/include/asm/paravirt.h:536:30-536:48: static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgalloc.h:13:41-13:59: static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
-
arch/x86/include/asm/pgalloc.h:64:40-64:58: static inline void pmd_populate_kernel(struct mm_struct *mm,
-
arch/x86/include/asm/pgalloc.h:71:45-71:63: static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
-
arch/x86/include/asm/pgalloc.h:78:33-78:51: static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
-
arch/x86/include/asm/pgalloc.h:99:33-99:51: static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-
arch/x86/include/asm/pgalloc.h:105:38-105:56: static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-
arch/x86/include/asm/pgalloc.h:113:33-113:51: static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
-
arch/x86/include/asm/pgalloc.h:119:38-119:56: static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
-
arch/x86/include/asm/pgalloc.h:134:33-134:51: static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
-
arch/x86/include/asm/pgalloc.h:142:38-142:56: static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
-
arch/x86/include/asm/pgalloc.h:150:36-150:54: static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
-
arch/x86/include/asm/pgalloc.h:159:29-159:47: static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
-
arch/x86/include/asm/pgtable.h:979:35-979:53: static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
-
arch/x86/include/asm/pgtable.h:1244:31-1244:49: static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgtable.h:1251:31-1251:49: static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgtable.h:1281:40-1281:58: static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgtable.h:1290:45-1290:63: static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
-
arch/x86/include/asm/pgtable.h:1309:39-1309:57: static inline void ptep_set_wrprotect(struct mm_struct *mm,
-
arch/x86/include/asm/pgtable.h:1349:45-1349:63: static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgtable.h:1360:45-1360:63: static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
-
arch/x86/include/asm/pgtable.h:1371:39-1371:57: static inline void pmdp_set_wrprotect(struct mm_struct *mm,
-
arch/x86/include/asm/pgtable_64.h:57:34-57:52: static inline bool mm_p4d_folded(struct mm_struct *mm)
-
arch/x86/include/asm/pgtable_64.h:70:37-70:55: static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pkeys.h:25:37-25:55: static inline int execute_only_pkey(struct mm_struct *mm)
-
arch/x86/include/asm/pkeys.h:55:27-55:45: bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
-
arch/x86/include/asm/pkeys.h:81:19-81:37: int mm_pkey_alloc(struct mm_struct *mm)
-
arch/x86/include/asm/pkeys.h:108:18-108:36: int mm_pkey_free(struct mm_struct *mm, int pkey)
-
arch/x86/include/asm/uaccess_64.h:38:52-38:70: static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
-
arch/x86/kernel/alternative.c:1715:48-1715:66: static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
-
arch/x86/kernel/cpu/sgx/encl.c:774:10-774:28: struct mm_struct *mm)
-
arch/x86/kernel/cpu/sgx/encl.c:816:10-816:28: struct mm_struct *mm)
-
arch/x86/kernel/cpu/sgx/encl.c:836:44-836:62: int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
-
arch/x86/kernel/cpu/sgx/encl.c:1135:35-1135:53: int sgx_encl_test_and_clear_young(struct mm_struct *mm,
-
arch/x86/kernel/cpu/sgx/encl.h:89:33-89:51: static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
-
arch/x86/kernel/ldt.c:42:18-42:36: void load_mm_ldt(struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:189:29-189:47: static void do_sanity_check(struct mm_struct *mm,
-
arch/x86/kernel/ldt.c:264:36-264:54: static void map_ldt_struct_to_user(struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:272:38-272:56: static void sanity_check_ldt_mapping(struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:288:16-288:34: map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
-
arch/x86/kernel/ldt.c:349:30-349:48: static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
-
arch/x86/kernel/ldt.c:393:31-393:49: static void free_ldt_pgtables(struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:421:25-421:43: static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
-
arch/x86/kernel/ldt.c:451:47-451:65: int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:491:26-491:44: void destroy_context_ldt(struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:497:25-497:43: void ldt_arch_exit_mmap(struct mm_struct *mm)
-
arch/x86/kernel/process.c:1031:34-1031:52: unsigned long arch_randomize_brk(struct mm_struct *mm)
-
arch/x86/kernel/process_64.c:753:37-753:55: static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
-
arch/x86/kernel/uprobes.c:854:59-854:77: int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
-
arch/x86/mm/dump_pagetables.c:366:12-366:30: struct mm_struct *mm, pgd_t *pgd,
-
arch/x86/mm/dump_pagetables.c:402:48-402:66: void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm)
-
arch/x86/mm/dump_pagetables.c:407:56-407:74: void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
-
arch/x86/mm/init_64.c:73:1-73:1: DEFINE_POPULATE(p4d_populate, p4d, pud, init)
-
arch/x86/mm/init_64.c:74:1-74:1: DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
-
arch/x86/mm/init_64.c:75:1-75:1: DEFINE_POPULATE(pud_populate, pud, pmd, init)
-
arch/x86/mm/init_64.c:76:1-76:1: DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
-
arch/x86/mm/mmap.c:129:28-129:46: void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-
arch/x86/mm/pgtable.c:31:25-31:43: pgtable_t pte_alloc_one(struct mm_struct *mm)
-
arch/x86/mm/pgtable.c:113:36-113:54: static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
-
arch/x86/mm/pgtable.c:123:22-123:40: static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/mm/pgtable.c:213:23-213:41: static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
-
arch/x86/mm/pgtable.c:228:29-228:47: static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
-
arch/x86/mm/pgtable.c:271:28-271:46: static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
-
arch/x86/mm/pgtable.c:286:29-286:47: static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
-
arch/x86/mm/pgtable.c:305:33-305:51: static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
-
arch/x86/mm/pgtable.c:326:38-326:56: static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
-
arch/x86/mm/pgtable.c:430:18-430:36: pgd_t *pgd_alloc(struct mm_struct *mm)
-
arch/x86/mm/pgtable.c:484:15-484:33: void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/mm/pkeys.c:14:25-14:43: int __execute_only_pkey(struct mm_struct *mm)
-
arch/x86/mm/tlb.c:471:38-471:56: static inline void cr4_update_pce_mm(struct mm_struct *mm)
-
arch/x86/mm/tlb.c:678:21-678:39: void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-
arch/x86/mm/tlb.c:965:50-965:68: static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
-
arch/x86/mm/tlb.c:1001:25-1001:43: void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
-
arch/x86/platform/efi/efi_64.c:394:39-394:57: static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md,
-
arch/x86/xen/mmu_hvm.c:36:31-36:49: static void xen_hvm_exit_mmap(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:553:26-553:44: static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
-
arch/x86/xen/mmu_pv.c:567:26-567:44: static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
-
arch/x86/xen/mmu_pv.c:588:26-588:44: static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
-
arch/x86/xen/mmu_pv.c:617:28-617:46: static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
-
arch/x86/xen/mmu_pv.c:655:26-655:44: static void xen_pgd_walk(struct mm_struct *mm,
-
arch/x86/xen/mmu_pv.c:665:52-665:70: static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:693:26-693:44: static void xen_pin_page(struct mm_struct *mm, struct page *page,
-
arch/x86/xen/mmu_pv.c:745:27-745:45: static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/xen/mmu_pv.c:766:25-766:43: static void xen_pgd_pin(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:797:36-797:54: static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
-
arch/x86/xen/mmu_pv.c:818:28-818:46: static void xen_unpin_page(struct mm_struct *mm, struct page *page,
-
arch/x86/xen/mmu_pv.c:857:29-857:47: static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/xen/mmu_pv.c:878:27-878:45: static void xen_pgd_unpin(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:904:28-904:46: static void xen_enter_mmap(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:931:29-931:47: static void xen_drop_mm_ref(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:985:27-985:45: static void xen_exit_mmap(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:1402:26-1402:44: static int xen_pgd_alloc(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:1428:26-1428:44: static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/xen/mmu_pv.c:1481:39-1481:57: static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/xen/mmu_pv.c:1491:39-1491:57: static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/xen/mmu_pv.c:1537:37-1537:55: static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
-
arch/x86/xen/mmu_pv.c:1564:27-1564:45: static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/xen/mmu_pv.c:1569:27-1569:45: static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/xen/mmu_pv.c:1606:27-1606:45: static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
-
drivers/firmware/efi/memattr.c:128:42-128:60: int __init efi_memattr_apply_permissions(struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c:125:32-125:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c:64:5-64:23: struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c:171:49-171:67: bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c:60:29-60:47: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c:287:24-287:42: uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c:211:24-211:42: uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c:374:32-374:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c:182:24-182:42: uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c:360:32-360:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c:167:4-167:22: struct mm_struct *mm, uint32_t inst)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c:345:32-345:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c:162:24-162:42: uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c:240:32-240:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c:157:24-157:42: uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c:264:32-264:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:225:24-225:42: uint32_t wptr_mask, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:387:32-387:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:2392:10-2392:28: struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_device.c:1049:24-1049:42: int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
-
drivers/gpu/drm/amd/amdkfd/kfd_device.c:1069:23-1069:41: int kgd2kfd_resume_mm(struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_device.c:1096:48-1096:66: int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c:492:4-492:22: struct mm_struct *mm, uint32_t trigger)
-
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c:773:55-773:73: int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c:846:5-846:23: struct mm_struct *mm, uint32_t trigger)
-
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c:871:7-871:25: struct mm_struct *mm, uint32_t trigger)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:91:24-91:44: void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:98:36-98:56: void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:206:26-206:46: int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:214:24-214:44: int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:222:22-222:42: void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:233:25-233:45: bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:241:23-241:43: int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:254:26-254:46: int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:262:27-262:47: bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:288:25-288:45: uint64_t kfd_mqd_stride(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:45:28-45:48: static void update_cu_mask(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:88:22-88:42: static void init_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:142:27-142:47: static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:159:21-159:41: static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:172:26-172:46: static void __update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:216:24-216:44: static void update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:223:29-223:49: static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:251:28-251:48: static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:260:25-260:45: static void restore_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:286:33-286:53: static void checkpoint_mqd_sdma(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:298:30-298:50: static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:328:26-328:46: static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:335:28-335:48: static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:45:28-45:48: static void update_cu_mask(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:88:22-88:42: static void init_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:148:21-148:41: static int load_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:162:24-162:44: static void update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:233:27-233:47: static int get_wave_state(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:271:28-271:48: static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:280:25-280:45: static void restore_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:307:26-307:46: static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:321:28-321:48: static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:341:27-341:47: static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:360:29-360:49: static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:387:33-387:53: static void checkpoint_mqd_sdma(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:399:30-399:50: static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:44:28-44:48: static void update_cu_mask(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:123:22-123:42: static void init_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:202:21-202:41: static int load_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:216:24-216:44: static void update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:287:27-287:47: static int get_wave_state(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:324:28-324:48: static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:333:25-333:45: static void restore_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:361:26-361:46: static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:375:28-375:48: static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:395:27-395:47: static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:419:29-419:49: static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:41:31-41:51: static uint64_t mqd_stride_v9(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:62:28-62:48: static void update_cu_mask(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:158:22-158:42: static void init_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:226:21-226:41: static int load_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:238:24-238:44: static void update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:317:27-317:47: static int get_wave_state(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:353:33-353:53: static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:360:28-360:48: static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:372:25-372:45: static void restore_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:404:26-404:46: static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:418:28-418:48: static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:437:27-437:47: static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:456:29-456:49: static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:483:33-483:53: static void checkpoint_mqd_sdma(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:495:30-495:50: static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:519:33-519:53: static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:550:36-550:56: static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:574:35-574:55: static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:612:29-612:49: static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:678:31-678:51: static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:711:31-711:51: static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:739:28-739:48: static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:766:34-766:54: static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:48:28-48:48: static void update_cu_mask(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:91:22-91:42: static void init_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:157:21-157:41: static int load_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:170:26-170:46: static void __update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:247:24-247:44: static void update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:254:27-254:47: static int get_wave_state(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:277:33-277:53: static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:283:28-283:48: static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:292:25-292:45: static void restore_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:319:26-319:46: static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:333:28-333:48: static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:340:27-340:47: static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:357:29-357:49: static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:385:33-385:53: static void checkpoint_mqd_sdma(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:397:30-397:50: static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:893:47-893:71: static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:1149:56-1149:74: static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:1205:6-1205:24: struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:1790:46-1790:70: struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c:307:46-307:64: void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:109:31-109:49: svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1105:47-1105:65: svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1134:55-1134:73: svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1588:39-1588:57: static int svm_range_validate_and_map(struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1752:8-1752:26: struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1877:43-1877:61: svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2173:45-2173:63: svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2202:5-2202:23: struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2355:4-2355:22: struct mm_struct *mm, enum svm_work_list_ops op)
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2388:23-2388:41: svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2422:26-2422:44: svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2756:7-2756:25: struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3392:29-3392:47: svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3497:43-3497:61: svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3615:43-3615:61: svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
-
drivers/gpu/drm/drm_buddy.c:14:48-14:66: static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:35:28-35:46: static void drm_block_free(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:41:32-41:50: static void list_insert_sorted(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:68:23-68:41: static void mark_free(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:97:20-97:38: int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
-
drivers/gpu/drm/drm_buddy.c:187:21-187:39: void drm_buddy_fini(struct drm_buddy *mm)
-
drivers/gpu/drm/drm_buddy.c:203:24-203:42: static int split_block(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:263:30-263:48: static void __drm_buddy_free(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:293:27-293:45: void drm_buddy_free_block(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:308:26-308:44: void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
-
drivers/gpu/drm/drm_buddy.c:331:18-331:36: alloc_range_bias(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:408:14-408:32: get_maxblock(struct drm_buddy *mm, unsigned int order)
-
drivers/gpu/drm/drm_buddy.c:434:21-434:39: alloc_from_freelist(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:480:26-480:44: static int __alloc_range(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:558:36-558:54: static int __drm_buddy_alloc_range(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:590:26-590:44: int drm_buddy_block_trim(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:661:28-661:46: int drm_buddy_alloc_blocks(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:751:28-751:46: void drm_buddy_block_print(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:768:22-768:40: void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
-
drivers/gpu/drm/drm_mm.c:146:24-146:39: static void show_leaks(struct drm_mm *mm) { }
-
drivers/gpu/drm/drm_mm.c:157:25-157:46: __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
-
drivers/gpu/drm/drm_mm.c:305:38-305:53: static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
-
drivers/gpu/drm/drm_mm.c:330:43-330:58: static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
-
drivers/gpu/drm/drm_mm.c:356:12-356:27: first_hole(struct drm_mm *mm,
-
drivers/gpu/drm/drm_mm.c:414:11-414:26: next_hole(struct drm_mm *mm,
-
drivers/gpu/drm/drm_mm.c:450:25-450:40: int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
-
drivers/gpu/drm/drm_mm.c:514:33-514:55: int drm_mm_insert_node_in_range(struct drm_mm * const mm,
-
drivers/gpu/drm/drm_mm.c:737:6-737:21: struct drm_mm *mm,
-
drivers/gpu/drm/drm_mm.c:963:18-963:33: void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
-
drivers/gpu/drm/drm_mm.c:997:22-997:37: void drm_mm_takedown(struct drm_mm *mm)
-
drivers/gpu/drm/drm_mm.c:1023:19-1023:40: void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
-
drivers/gpu/drm/i915/gem/i915_gem_userptr.c:428:13-428:31: probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
-
drivers/gpu/drm/i915/gvt/cmd_parser.c:1761:53-1761:75: static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:549:35-549:57: static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:564:47-564:69: static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:570:48-570:70: static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:576:35-576:57: static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:587:48-587:70: static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:593:34-593:56: static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:605:34-605:56: static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:616:33-616:55: static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:626:33-626:55: static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:1753:33-1753:55: static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.c:1783:28-1783:50: static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.c:1844:26-1844:48: static void vgpu_free_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.c:1978:26-1978:48: void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.c:1994:23-1994:45: int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.c:2039:46-2039:68: static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:2068:37-2068:59: unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
-
drivers/gpu/drm/i915/gvt/gtt.h:187:38-187:60: static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.h:194:38-194:60: static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.h:199:42-199:64: static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h:29:21-29:37: nvkm_mm_initialised(struct nvkm_mm *mm)
-
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h:44:19-44:35: nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap)
-
drivers/gpu/drm/nouveau/nouveau_svm.c:925:45-925:63: nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:30:14-30:30: nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:48:14-48:30: nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis)
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:86:13-86:29: region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:111:14-111:30: nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:161:13-161:29: region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:186:14-186:30: nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:240:14-240:30: nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:283:14-283:30: nvkm_mm_fini(struct nvkm_mm *mm)
-
drivers/gpu/drm/tests/drm_buddy_test.c:46:46-46:64: static void __dump_block(struct kunit *test, struct drm_buddy *mm,
-
drivers/gpu/drm/tests/drm_buddy_test.c:55:44-55:62: static void dump_block(struct kunit *test, struct drm_buddy *mm,
-
drivers/gpu/drm/tests/drm_buddy_test.c:67:44-67:62: static int check_block(struct kunit *test, struct drm_buddy *mm,
-
drivers/gpu/drm/tests/drm_buddy_test.c:141:45-141:63: static int check_blocks(struct kunit *test, struct drm_buddy *mm,
-
drivers/gpu/drm/tests/drm_buddy_test.c:203:41-203:59: static int check_mm(struct kunit *test, struct drm_buddy *mm)
-
drivers/gpu/drm/tests/drm_mm_test.c:46:49-46:70: static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm)
-
drivers/gpu/drm/tests/drm_mm_test.c:71:49-71:70: static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end)
-
drivers/gpu/drm/tests/drm_mm_test.c:100:51-100:72: static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size)
-
drivers/gpu/drm/tests/drm_mm_test.c:162:71-162:86: static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:281:53-281:68: static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node)
-
drivers/gpu/drm/tests/drm_mm_test.c:301:77-301:92: static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:482:47-482:62: static bool expect_insert(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:506:52-506:67: static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size)
-
drivers/gpu/drm/tests/drm_mm_test.c:701:56-701:71: static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node,
-
drivers/gpu/drm/tests/drm_mm_test.c:727:61-727:76: static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:752:60-752:75: static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:950:45-950:60: static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes,
-
drivers/gpu/drm/tests/drm_mm_test.c:972:48-972:63: static u64 get_insert_time(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:1153:44-1153:65: static void show_holes(struct kunit *test, const struct drm_mm *mm, int count)
-
drivers/gpu/drm/tests/drm_mm_test.c:1232:47-1232:62: static bool evict_nothing(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:1278:50-1278:65: static bool evict_everything(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:1326:48-1326:63: static int evict_something(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:1983:44-1983:59: static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
-
drivers/infiniband/hw/cxgb4/iw_cxgb4.h:565:11-565:33: struct c4iw_mm_entry *mm)
-
drivers/infiniband/hw/hfi1/pin_system.c:65:32-65:50: static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
-
drivers/infiniband/hw/hfi1/user_pages.c:29:50-29:68: bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
-
drivers/infiniband/hw/hfi1/user_pages.c:80:29-80:47: int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
-
drivers/infiniband/hw/hfi1/user_pages.c:95:30-95:48: void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
-
drivers/iommu/amd/iommu_v2.c:362:6-362:24: struct mm_struct *mm,
-
drivers/iommu/amd/iommu_v2.c:378:49-378:67: static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
-
drivers/iommu/intel/svm.c:221:6-221:24: struct mm_struct *mm,
-
drivers/iommu/intel/svm.c:230:55-230:73: static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
-
drivers/iommu/intel/svm.c:293:9-293:27: struct mm_struct *mm)
-
drivers/iommu/iommu-sva.c:15:34-15:52: static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
-
drivers/iommu/iommu-sva.c:59:61-59:79: struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
-
drivers/iommu/iommu-sva.c:206:20-206:38: void mm_pasid_drop(struct mm_struct *mm)
-
drivers/iommu/iommu.c:3498:10-3498:28: struct mm_struct *mm)
-
drivers/misc/sgi-gru/grutlbpurge.c:238:48-238:66: static struct mmu_notifier *gru_alloc_notifier(struct mm_struct *mm)
-
drivers/mtd/nand/raw/atmel/pmecc.c:192:40-192:44: static int atmel_pmecc_build_gf_tables(int mm, unsigned int poly,
-
drivers/vdpa/vdpa_sim/vdpa_sim.c:640:54-640:72: static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
-
drivers/vfio/vfio_iommu_type1.c:412:51-412:69: static int mm_lock_acct(struct task_struct *task, struct mm_struct *mm,
-
drivers/vfio/vfio_iommu_type1.c:512:57-512:75: static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
-
drivers/vfio/vfio_iommu_type1.c:555:27-555:45: static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
-
drivers/video/fbdev/pm2fb.c:252:30-252:45: static void pm2_mnp(u32 clk, unsigned char *mm, unsigned char *nn,
-
drivers/video/fbdev/pm2fb.c:281:31-281:46: static void pm2v_mnp(u32 clk, unsigned char *mm, unsigned char *nn,
-
fs/aio.c:663:48-663:66: static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
-
fs/aio.c:844:23-844:41: static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
-
fs/aio.c:888:15-888:33: void exit_aio(struct mm_struct *mm)
-
fs/binfmt_elf.c:1571:10-1571:28: struct mm_struct *mm)
-
fs/binfmt_elf.c:1615:53-1615:71: static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
-
fs/exec.c:979:22-979:40: static int exec_mmap(struct mm_struct *mm)
-
fs/exec.c:2101:19-2101:37: void set_dumpable(struct mm_struct *mm, int value)
-
fs/proc/array.c:420:56-420:74: static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm)
-
fs/proc/array.c:429:56-429:74: static inline void task_untag_mask(struct seq_file *m, struct mm_struct *mm)
-
fs/proc/base.c:219:33-219:51: static ssize_t get_mm_proctitle(struct mm_struct *mm, char __user *buf,
-
fs/proc/base.c:256:31-256:49: static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
-
fs/proc/task_mmu.c:31:35-31:53: void task_mem(struct seq_file *m, struct mm_struct *mm)
-
fs/proc/task_mmu.c:83:26-83:44: unsigned long task_vsize(struct mm_struct *mm)
-
fs/proc/task_mmu.c:88:26-88:44: unsigned long task_statm(struct mm_struct *mm,
-
fs/userfaultfd.c:872:33-872:51: void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
-
fs/userfaultfd.c:1287:2-1287:20: struct mm_struct *mm, __u64 start, __u64 len)
-
fs/userfaultfd.c:1306:43-1306:61: static __always_inline int validate_range(struct mm_struct *mm,
-
include/asm-generic/cacheflush.h:23:35-23:53: static inline void flush_cache_mm(struct mm_struct *mm)
-
include/asm-generic/cacheflush.h:29:39-29:57: static inline void flush_cache_dup_mm(struct mm_struct *mm)
-
include/asm-generic/hugetlb.h:61:35-61:53: static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
-
include/asm-generic/hugetlb.h:78:36-78:54: static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
-
include/asm-generic/hugetlb.h:86:45-86:63: static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
-
include/asm-generic/hugetlb.h:130:44-130:62: static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
-
include/asm-generic/pgalloc.h:19:45-19:63: static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
-
include/asm-generic/pgalloc.h:36:43-36:61: static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-
include/asm-generic/pgalloc.h:47:36-47:54: static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-
include/asm-generic/pgalloc.h:64:41-64:59: static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
-
include/asm-generic/pgalloc.h:104:29-104:47: static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
-
include/asm-generic/pgalloc.h:127:36-127:54: static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-
include/asm-generic/pgalloc.h:146:29-146:47: static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-
include/asm-generic/pgalloc.h:160:38-160:56: static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-
include/asm-generic/pgalloc.h:185:36-185:54: static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-
include/asm-generic/pgalloc.h:191:31-191:49: static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
-
include/asm-generic/pgalloc.h:198:29-198:47: static inline void pud_free(struct mm_struct *mm, pud_t *pud)
-
include/asm-generic/pgtable-nopmd.h:63:29-63:47: static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-
include/drm/drm_buddy.h:127:22-127:40: drm_buddy_block_size(struct drm_buddy *mm,
-
include/drm/drm_mm.h:276:39-276:60: static inline bool drm_mm_initialized(const struct drm_mm *mm)
-
include/drm/drm_mm.h:434:28-434:43: drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
-
include/drm/drm_mm.h:458:38-458:53: static inline int drm_mm_insert_node(struct drm_mm *mm,
-
include/drm/drm_mm.h:478:33-478:54: static inline bool drm_mm_clean(const struct drm_mm *mm)
-
include/drm/drm_mm.h:534:9-534:24: struct drm_mm *mm,
-
include/linux/hugetlb.h:195:37-195:55: static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
-
include/linux/hugetlb.h:850:42-850:60: static inline int is_hugepage_only_range(struct mm_struct *mm,
-
include/linux/hugetlb.h:970:9-970:27: struct mm_struct *mm, pte_t *pte)
-
include/linux/hugetlb.h:989:39-989:57: static inline void hugetlb_count_init(struct mm_struct *mm)
-
include/linux/hugetlb.h:994:46-994:64: static inline void hugetlb_count_add(long l, struct mm_struct *mm)
-
include/linux/hugetlb.h:999:46-999:64: static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
-
include/linux/hugetlb.h:1224:6-1224:24: struct mm_struct *mm, pte_t *pte)
-
include/linux/iommu.h:1184:34-1184:52: static inline void mm_pasid_init(struct mm_struct *mm)
-
include/linux/iommu.h:1188:35-1188:53: static inline bool mm_valid_pasid(struct mm_struct *mm)
-
include/linux/khugepaged.h:30:36-30:54: static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-
include/linux/khugepaged.h:36:36-36:54: static inline void khugepaged_exit(struct mm_struct *mm)
-
include/linux/ksm.h:38:46-38:64: static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
-
include/linux/ksm.h:46:28-46:46: static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-
include/linux/ksm.h:62:29-62:47: static inline void ksm_exit(struct mm_struct *mm)
-
include/linux/memcontrol.h:681:58-681:76: static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
-
include/linux/memcontrol.h:886:36-886:54: static inline bool mm_match_cgroup(struct mm_struct *mm,
-
include/linux/memcontrol.h:1101:41-1101:59: static inline void count_memcg_event_mm(struct mm_struct *mm,
-
include/linux/memcontrol.h:1141:42-1141:60: static inline void memcg_memory_event_mm(struct mm_struct *mm,
-
include/linux/mm.h:804:57-804:75: static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
-
include/linux/mm.h:2428:53-2428:71: static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
-
include/linux/mm.h:2543:44-2543:62: static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
-
include/linux/mm.h:2550:35-2550:53: static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
-
include/linux/mm.h:2557:35-2557:53: static inline void inc_mm_counter(struct mm_struct *mm, int member)
-
include/linux/mm.h:2564:35-2564:53: static inline void dec_mm_counter(struct mm_struct *mm, int member)
-
include/linux/mm.h:2586:40-2586:58: static inline unsigned long get_mm_rss(struct mm_struct *mm)
-
include/linux/mm.h:2593:48-2593:66: static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
-
include/linux/mm.h:2598:47-2598:65: static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
-
include/linux/mm.h:2603:39-2603:57: static inline void update_hiwater_rss(struct mm_struct *mm)
-
include/linux/mm.h:2611:38-2611:56: static inline void update_hiwater_vm(struct mm_struct *mm)
-
include/linux/mm.h:2617:41-2617:59: static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
-
include/linux/mm.h:2623:7-2623:25: struct mm_struct *mm)
-
include/linux/mm.h:2634:32-2634:50: static inline void sync_mm_rss(struct mm_struct *mm)
-
include/linux/mm.h:2660:37-2660:55: static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
-
include/linux/mm.h:2690:35-2690:53: static inline void mm_inc_nr_puds(struct mm_struct *mm)
-
include/linux/mm.h:2697:35-2697:53: static inline void mm_dec_nr_puds(struct mm_struct *mm)
-
include/linux/mm.h:2718:35-2718:53: static inline void mm_inc_nr_pmds(struct mm_struct *mm)
-
include/linux/mm.h:2725:35-2725:53: static inline void mm_dec_nr_pmds(struct mm_struct *mm)
-
include/linux/mm.h:2734:43-2734:61: static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
-
include/linux/mm.h:2739:47-2739:71: static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
-
include/linux/mm.h:2744:35-2744:53: static inline void mm_inc_nr_ptes(struct mm_struct *mm)
-
include/linux/mm.h:2749:35-2749:53: static inline void mm_dec_nr_ptes(struct mm_struct *mm)
-
include/linux/mm.h:2770:32-2770:50: static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
-
include/linux/mm.h:2777:32-2777:50: static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
-
include/linux/mm.h:2784:32-2784:50: static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
-
include/linux/mm.h:2872:39-2872:57: static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
-
include/linux/mm.h:2934:42-2934:60: static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
-
include/linux/mm.h:2977:39-2977:57: static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
-
include/linux/mm.h:3014:36-3014:54: static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
-
include/linux/mm.h:3047:39-3047:57: static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
-
include/linux/mm.h:3052:36-3052:54: static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
-
include/linux/mm.h:3371:35-3371:53: struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
-
include/linux/mm.h:3417:53-3417:71: static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
-
include/linux/mm_inline.h:442:43-442:61: static inline void init_tlb_flush_pending(struct mm_struct *mm)
-
include/linux/mm_inline.h:447:42-447:60: static inline void inc_tlb_flush_pending(struct mm_struct *mm)
-
include/linux/mm_inline.h:488:42-488:60: static inline void dec_tlb_flush_pending(struct mm_struct *mm)
-
include/linux/mm_inline.h:501:41-501:59: static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-
include/linux/mm_inline.h:514:40-514:58: static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
-
include/linux/mm_types.h:934:36-934:54: static inline void mm_init_cpumask(struct mm_struct *mm)
-
include/linux/mm_types.h:943:37-943:55: static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
-
include/linux/mm_types.h:963:36-963:54: static inline void lru_gen_init_mm(struct mm_struct *mm)
-
include/linux/mm_types.h:972:35-972:53: static inline void lru_gen_use_mm(struct mm_struct *mm)
-
include/linux/mm_types.h:1022:3-1022:21: struct mm_struct *mm, unsigned long addr)
-
include/linux/mm_types.h:1060:37-1060:55: static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
-
include/linux/mm_types.h:1070:32-1070:50: static inline void mm_init_cid(struct mm_struct *mm)
-
include/linux/mm_types.h:1083:32-1083:50: static inline int mm_alloc_cid(struct mm_struct *mm)
-
include/linux/mm_types.h:1092:35-1092:53: static inline void mm_destroy_cid(struct mm_struct *mm)
-
include/linux/mmap_lock.h:25:52-25:70: static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
-
include/linux/mmap_lock.h:32:55-32:73: static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
-
include/linux/mmap_lock.h:39:47-39:65: static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
-
include/linux/mmap_lock.h:63:39-63:57: static inline void mmap_assert_locked(struct mm_struct *mm)
-
include/linux/mmap_lock.h:69:45-69:63: static inline void mmap_assert_write_locked(struct mm_struct *mm)
-
include/linux/mmap_lock.h:84:38-84:56: static inline void vma_end_write_all(struct mm_struct *mm)
-
include/linux/mmap_lock.h:100:35-100:53: static inline void mmap_init_lock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:105:36-105:54: static inline void mmap_write_lock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:112:43-112:61: static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
-
include/linux/mmap_lock.h:119:44-119:62: static inline int mmap_write_lock_killable(struct mm_struct *mm)
-
include/linux/mmap_lock.h:129:38-129:56: static inline void mmap_write_unlock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:136:41-136:59: static inline void mmap_write_downgrade(struct mm_struct *mm)
-
include/linux/mmap_lock.h:143:35-143:53: static inline void mmap_read_lock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:150:43-150:61: static inline int mmap_read_lock_killable(struct mm_struct *mm)
-
include/linux/mmap_lock.h:160:38-160:56: static inline bool mmap_read_trylock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:170:37-170:55: static inline void mmap_read_unlock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:176:47-176:65: static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
-
include/linux/mmap_lock.h:182:42-182:60: static inline int mmap_lock_is_contended(struct mm_struct *mm)
-
include/linux/mmu_notifier.h:280:36-280:54: static inline int mm_has_notifiers(struct mm_struct *mm)
-
include/linux/mmu_notifier.h:288:54-288:72: mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
-
include/linux/mmu_notifier.h:410:41-410:59: static inline void mmu_notifier_release(struct mm_struct *mm)
-
include/linux/mmu_notifier.h:416:50-416:68: static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
-
include/linux/mmu_notifier.h:425:44-425:62: static inline int mmu_notifier_clear_young(struct mm_struct *mm,
-
include/linux/mmu_notifier.h:434:43-434:61: static inline int mmu_notifier_test_young(struct mm_struct *mm,
-
include/linux/mmu_notifier.h:442:44-442:62: static inline void mmu_notifier_change_pte(struct mm_struct *mm,
-
include/linux/mmu_notifier.h:486:64-486:82: static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
-
include/linux/mmu_notifier.h:493:52-493:70: static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
-
include/linux/mmu_notifier.h:498:55-498:73: static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
-
include/linux/mmu_notifier.h:508:9-508:27: struct mm_struct *mm,
-
include/linux/mmu_notifier.h:522:4-522:22: struct mm_struct *mm, unsigned long start,
-
include/linux/oom.h:93:53-93:71: static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
-
include/linux/page_table_check.h:44:47-44:65: static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
-
include/linux/page_table_check.h:52:47-52:65: static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
-
include/linux/page_table_check.h:60:47-60:65: static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
-
include/linux/page_table_check.h:68:46-68:64: static inline void page_table_check_ptes_set(struct mm_struct *mm,
-
include/linux/page_table_check.h:77:45-77:63: static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
-
include/linux/page_table_check.h:86:45-86:63: static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
-
include/linux/page_table_check.h:95:53-95:71: static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
-
include/linux/pgtable.h:163:30-163:48: static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
-
include/linux/pgtable.h:231:29-231:47: static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
-
include/linux/pgtable.h:408:31-408:49: static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
-
include/linux/pgtable.h:578:47-578:65: static inline void pte_clear_not_present_full(struct mm_struct *mm,
-
include/linux/pgtable.h:660:39-660:57: static inline void pudp_set_wrprotect(struct mm_struct *mm,
-
include/linux/pgtable.h:867:38-867:56: static inline void arch_do_swap_page(struct mm_struct *mm,
-
include/linux/pgtable.h:885:34-885:52: static inline int arch_unmap_one(struct mm_struct *mm,
-
include/linux/sched/coredump.h:29:32-29:50: static inline int get_dumpable(struct mm_struct *mm)
-
include/linux/sched/mm.h:34:27-34:45: static inline void mmgrab(struct mm_struct *mm)
-
include/linux/sched/mm.h:46:27-46:45: static inline void mmdrop(struct mm_struct *mm)
-
include/linux/sched/mm.h:80:33-80:51: static inline void mmdrop_sched(struct mm_struct *mm)
-
include/linux/sched/mm.h:87:36-87:54: static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
-
include/linux/sched/mm.h:93:36-93:54: static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
-
include/linux/sched/mm.h:106:42-106:60: static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
-
include/linux/sched/mm.h:130:26-130:44: static inline void mmget(struct mm_struct *mm)
-
include/linux/sched/mm.h:135:35-135:53: static inline bool mmget_not_zero(struct mm_struct *mm)
-
include/linux/sched/mm.h:453:60-453:78: static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
-
include/trace/events/huge_memory.h:56:1-56:1: TRACE_EVENT(mm_khugepaged_scan_pmd,
-
include/trace/events/huge_memory.h:93:1-93:1: TRACE_EVENT(mm_collapse_huge_page,
-
include/trace/events/huge_memory.h:148:1-148:1: TRACE_EVENT(mm_collapse_huge_page_swapin,
-
include/trace/events/huge_memory.h:175:1-175:1: TRACE_EVENT(mm_khugepaged_scan_file,
-
include/trace/events/huge_memory.h:209:1-209:1: TRACE_EVENT(mm_khugepaged_collapse_file,
-
include/trace/events/kmem.h:346:1-346:1: TRACE_EVENT(rss_stat,
-
include/trace/events/ksm.h:75:1-75:1: DECLARE_EVENT_CLASS(ksm_enter_exit_template,
-
include/trace/events/ksm.h:99:1-99:1: DEFINE_EVENT(ksm_enter_exit_template, ksm_enter,
-
include/trace/events/ksm.h:113:1-113:1: DEFINE_EVENT(ksm_enter_exit_template, ksm_exit,
-
include/trace/events/ksm.h:130:1-130:1: TRACE_EVENT(ksm_merge_one_page,
-
include/trace/events/ksm.h:165:1-165:1: TRACE_EVENT(ksm_merge_with_ksm_page,
-
include/trace/events/ksm.h:226:1-226:1: TRACE_EVENT(ksm_remove_rmap_item,
-
include/trace/events/mmap.h:98:1-98:1: TRACE_EVENT(exit_mmap,
-
include/trace/events/mmap_lock.h:16:1-16:1: DECLARE_EVENT_CLASS(mmap_lock,
-
include/trace/events/mmap_lock.h:49:1-49:1: DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking);
-
include/trace/events/mmap_lock.h:50:1-50:1: DEFINE_MMAP_LOCK_EVENT(mmap_lock_released);
-
include/trace/events/mmap_lock.h:52:1-52:1: TRACE_EVENT_FN(mmap_lock_acquire_returned,
-
include/trace/events/xen.h:283:1-283:1: DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start);
-
include/trace/events/xen.h:284:1-284:1: DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit);
-
include/trace/events/xen.h:286:1-286:1: TRACE_EVENT(xen_mmu_alloc_ptpage,
-
include/trace/events/xen.h:336:1-336:1: DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
-
include/trace/events/xen.h:337:1-337:1: DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
-
include/trace/events/xen.h:349:1-349:1: TRACE_EVENT(xen_mmu_flush_tlb_multi,
-
kernel/audit.c:2193:6-2193:24: struct mm_struct *mm)
-
kernel/bpf/mmap_unlock_work.h:49:74-49:92: static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)
-
kernel/cgroup/cpuset.c:1970:31-1970:49: static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
-
kernel/events/core.c:7483:34-7483:52: static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
-
kernel/events/core.c:10629:8-10629:26: struct mm_struct *mm,
-
kernel/events/uprobes.c:282:45-282:63: delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
-
kernel/events/uprobes.c:292:54-292:72: static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
-
kernel/events/uprobes.c:317:58-317:76: static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
-
kernel/events/uprobes.c:351:41-351:59: find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
-
kernel/events/uprobes.c:364:18-364:36: __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
-
kernel/events/uprobes.c:403:5-403:23: struct mm_struct *mm, short d)
-
kernel/events/uprobes.c:412:50-412:68: static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
-
kernel/events/uprobes.c:459:54-459:72: int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
-
kernel/events/uprobes.c:569:50-569:68: int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
-
kernel/events/uprobes.c:584:44-584:62: set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
-
kernel/events/uprobes.c:833:5-833:23: struct mm_struct *mm, unsigned long vaddr)
-
kernel/events/uprobes.c:867:36-867:54: enum uprobe_filter_ctx ctx, struct mm_struct *mm)
-
kernel/events/uprobes.c:873:33-873:51: enum uprobe_filter_ctx ctx, struct mm_struct *mm)
-
kernel/events/uprobes.c:890:43-890:61: install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
-
kernel/events/uprobes.c:918:42-918:60: remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
-
kernel/events/uprobes.c:1232:50-1232:68: static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
-
kernel/events/uprobes.c:1437:24-1437:42: static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
-
kernel/events/uprobes.c:1541:25-1541:43: void uprobe_clear_state(struct mm_struct *mm)
-
kernel/events/uprobes.c:1985:32-1985:50: static void mmf_recalc_uprobes(struct mm_struct *mm)
-
kernel/events/uprobes.c:2006:28-2006:46: static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
-
kernel/exit.c:446:27-446:45: void mm_update_next_owner(struct mm_struct *mm)
-
kernel/fork.c:481:38-481:56: struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
-
kernel/fork.c:631:29-631:47: static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
-
kernel/fork.c:646:38-646:56: static __latent_entropy int dup_mmap(struct mm_struct *mm,
-
kernel/fork.c:793:32-793:50: static inline int mm_alloc_pgd(struct mm_struct *mm)
-
kernel/fork.c:801:32-801:50: static inline void mm_free_pgd(struct mm_struct *mm)
-
kernel/fork.c:817:22-817:40: static void check_mm(struct mm_struct *mm)
-
kernel/fork.c:862:31-862:49: static void cleanup_lazy_tlbs(struct mm_struct *mm)
-
kernel/fork.c:910:15-910:33: void __mmdrop(struct mm_struct *mm)
-
kernel/fork.c:940:26-940:44: static void mmdrop_async(struct mm_struct *mm)
-
kernel/fork.c:1226:25-1226:43: static void mm_init_aio(struct mm_struct *mm)
-
kernel/fork.c:1234:44-1234:62: static __always_inline void mm_clear_owner(struct mm_struct *mm,
-
kernel/fork.c:1243:27-1243:45: static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
-
kernel/fork.c:1250:35-1250:53: static void mm_init_uprobes_state(struct mm_struct *mm)
-
kernel/fork.c:1257:34-1257:52: static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
-
kernel/fork.c:1341:28-1341:46: static inline void __mmput(struct mm_struct *mm)
-
kernel/fork.c:1366:12-1366:30: void mmput(struct mm_struct *mm)
-
kernel/fork.c:1384:18-1384:36: void mmput_async(struct mm_struct *mm)
-
kernel/fork.c:1405:21-1405:39: int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
-
kernel/fork.c:1440:25-1440:43: int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
-
kernel/fork.c:1490:30-1490:48: struct file *get_mm_exe_file(struct mm_struct *mm)
-
kernel/fork.c:1616:49-1616:67: static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
-
kernel/fork.c:1649:47-1649:65: void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
-
kernel/fork.c:1655:47-1655:65: void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
-
kernel/kthread.c:1410:21-1410:39: void kthread_use_mm(struct mm_struct *mm)
-
kernel/kthread.c:1456:23-1456:41: void kthread_unuse_mm(struct mm_struct *mm)
-
kernel/sched/core.c:11850:39-11850:57: static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
-
kernel/sched/core.c:11909:43-11909:61: static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
-
kernel/sched/core.c:11942:46-11942:64: static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
-
kernel/sched/membarrier.c:217:27-217:45: void membarrier_exec_mmap(struct mm_struct *mm)
-
kernel/sched/membarrier.c:426:44-426:62: static int sync_runqueues_membarrier_state(struct mm_struct *mm)
-
kernel/sched/sched.h:3306:33-3306:51: static inline void __mm_cid_put(struct mm_struct *mm, int cid)
-
kernel/sched/sched.h:3335:37-3335:55: static inline int mm_cid_pcpu_unset(struct mm_struct *mm)
-
kernel/sched/sched.h:3356:31-3356:49: static inline void mm_cid_put(struct mm_struct *mm)
-
kernel/sched/sched.h:3367:36-3367:54: static inline int __mm_cid_try_get(struct mm_struct *mm)
-
kernel/sched/sched.h:3393:56-3393:74: static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm)
-
kernel/sched/sched.h:3401:47-3401:65: static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm)
-
kernel/sched/sched.h:3453:45-3453:63: static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm)
-
kernel/sys.c:1890:34-1890:52: static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
-
kernel/sys.c:2097:27-2097:45: static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
-
kernel/trace/bpf_trace.c:3121:5-3121:23: struct mm_struct *mm)
-
kernel/trace/trace_events_user.c:391:35-391:57: static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
-
kernel/trace/trace_events_user.c:474:44-474:66: static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
-
kernel/trace/trace_events_user.c:506:37-506:59: static int user_event_enabler_write(struct user_event_mm *mm,
-
kernel/trace/trace_events_user.c:560:39-560:61: static bool user_event_enabler_exists(struct user_event_mm *mm,
-
kernel/trace/trace_events_user.c:613:8-613:30: struct user_event_mm *mm)
-
kernel/trace/trace_events_user.c:638:48-638:70: static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
-
kernel/trace/trace_events_user.c:743:35-743:57: static void user_event_mm_destroy(struct user_event_mm *mm)
-
kernel/trace/trace_events_user.c:754:31-754:53: static void user_event_mm_put(struct user_event_mm *mm)
-
kernel/trace/trace_output.c:387:51-387:69: static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
-
kernel/trace/trace_uprobe.c:1198:58-1198:76: __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
-
kernel/trace/trace_uprobe.c:1320:33-1320:51: enum uprobe_filter_ctx ctx, struct mm_struct *mm)
-
mm/damon/vaddr.c:116:37-116:55: static int __damon_va_three_regions(struct mm_struct *mm,
-
mm/damon/vaddr.c:338:45-338:63: static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
-
mm/damon/vaddr.c:396:28-396:46: static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
-
mm/damon/vaddr.c:407:45-407:63: static void __damon_va_prepare_access_check(struct mm_struct *mm,
-
mm/damon/vaddr.c:540:28-540:46: static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
-
mm/damon/vaddr.c:560:37-560:55: static void __damon_va_check_access(struct mm_struct *mm,
-
mm/debug.c:162:14-162:38: void dump_mm(const struct mm_struct *mm)
-
mm/gup.c:857:26-857:44: static int get_gate_page(struct mm_struct *mm, unsigned long address,
-
mm/gup.c:1095:46-1095:64: static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
-
mm/gup.c:1184:30-1184:48: static long __get_user_pages(struct mm_struct *mm,
-
mm/gup.c:1379:22-1379:40: int fixup_user_fault(struct mm_struct *mm,
-
mm/gup.c:1464:53-1464:71: static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
-
mm/gup.c:2187:35-2187:53: static long __gup_longterm_locked(struct mm_struct *mm,
-
mm/gup.c:2326:28-2326:46: long get_user_pages_remote(struct mm_struct *mm,
-
mm/gup.c:3336:28-3336:46: long pin_user_pages_remote(struct mm_struct *mm,
-
mm/huge_memory.c:191:36-191:54: struct page *mm_get_huge_zero_page(struct mm_struct *mm)
-
mm/huge_memory.c:205:28-205:46: void mm_put_huge_zero_page(struct mm_struct *mm)
-
mm/huge_memory.c:760:51-760:69: static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
-
mm/huge_memory.c:1650:40-1650:58: static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
-
mm/hugetlb.c:4764:47-4764:65: void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
-
mm/hugetlb.c:5504:31-5504:49: static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:5560:30-5560:48: static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:5832:50-5832:68: static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
-
mm/hugetlb.c:5845:35-5845:53: static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
-
mm/hugetlb.c:6079:26-6079:44: vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:6990:23-6990:41: pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:7047:22-7047:40: int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:7092:23-7092:41: pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:7135:24-7135:42: pte_t *huge_pte_offset(struct mm_struct *mm,
-
mm/khugepaged.c:407:44-407:62: static inline int hpage_collapse_test_exit(struct mm_struct *mm)
-
mm/khugepaged.c:412:25-412:43: void __khugepaged_enter(struct mm_struct *mm)
-
mm/khugepaged.c:454:24-454:42: void __khugepaged_exit(struct mm_struct *mm)
-
mm/khugepaged.c:910:36-910:54: static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
-
mm/khugepaged.c:941:36-941:54: static int find_pmd_or_thp_or_none(struct mm_struct *mm,
-
mm/khugepaged.c:965:34-965:52: static int check_pmd_still_valid(struct mm_struct *mm,
-
mm/khugepaged.c:986:40-986:58: static int __collapse_huge_page_swapin(struct mm_struct *mm,
-
mm/khugepaged.c:1058:52-1058:70: static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
-
mm/khugepaged.c:1080:31-1080:49: static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
-
mm/khugepaged.c:1240:36-1240:54: static int hpage_collapse_scan_pmd(struct mm_struct *mm,
-
mm/khugepaged.c:1469:29-1469:47: int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
-
mm/khugepaged.c:1788:26-1788:44: static int collapse_file(struct mm_struct *mm, unsigned long addr,
-
mm/khugepaged.c:2215:37-2215:55: static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
-
mm/ksm.c:427:34-427:52: static inline bool ksm_test_exit(struct mm_struct *mm)
-
mm/ksm.c:555:50-555:68: static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
-
mm/ksm.c:2577:26-2577:44: static void ksm_add_vmas(struct mm_struct *mm)
-
mm/ksm.c:2586:25-2586:43: static int ksm_del_vmas(struct mm_struct *mm)
-
mm/ksm.c:2608:26-2608:44: int ksm_enable_merge_any(struct mm_struct *mm)
-
mm/ksm.c:2639:27-2639:45: int ksm_disable_merge_any(struct mm_struct *mm)
-
mm/ksm.c:2656:17-2656:35: int ksm_disable(struct mm_struct *mm)
-
mm/ksm.c:2707:17-2707:35: int __ksm_enter(struct mm_struct *mm)
-
mm/ksm.c:2750:17-2750:35: void __ksm_exit(struct mm_struct *mm)
-
mm/ksm.c:3108:25-3108:43: long ksm_process_profit(struct mm_struct *mm)
-
mm/madvise.c:1229:23-1229:41: int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
-
mm/madvise.c:1311:27-1311:45: int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
-
mm/madvise.c:1407:16-1407:34: int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
-
mm/memcontrol.c:1032:43-1032:61: struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
-
mm/memcontrol.c:6048:49-6048:67: static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
-
mm/memcontrol.c:6062:36-6062:54: static int mem_cgroup_precharge_mc(struct mm_struct *mm)
-
mm/memcontrol.c:7011:46-7011:64: int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
-
mm/memcontrol.c:7035:57-7035:75: int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
-
mm/memory.c:179:24-179:42: void mm_trace_rss_stat(struct mm_struct *mm, int member)
-
mm/memory.c:409:18-409:36: void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
-
mm/memory.c:435:17-435:35: int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
-
mm/memory.c:470:35-470:53: static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
-
mm/memory.c:1796:27-1796:45: static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
-
mm/memory.c:1818:25-1818:43: pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
-
mm/memory.c:2334:28-2334:46: static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
-
mm/memory.c:2360:35-2360:53: static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
-
mm/memory.c:2383:35-2383:53: static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
-
mm/memory.c:2405:35-2405:53: static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
-
mm/memory.c:2561:31-2561:49: static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
-
mm/memory.c:2604:31-2604:49: static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
-
mm/memory.c:2642:31-2642:49: static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
-
mm/memory.c:2678:31-2678:49: static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
-
mm/memory.c:2714:34-2714:52: static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
-
mm/memory.c:2755:25-2755:43: int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
-
mm/memory.c:2769:34-2769:52: int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
-
mm/memory.c:5140:37-5140:55: static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
-
mm/memory.c:5312:44-5312:62: static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
-
mm/memory.c:5326:41-5326:59: static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
-
mm/memory.c:5339:48-5339:66: static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
-
mm/memory.c:5369:45-5369:63: struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
-
mm/memory.c:5431:43-5431:61: struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
-
mm/memory.c:5484:17-5484:35: int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
-
mm/memory.c:5507:17-5507:35: int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
-
mm/memory.c:5530:17-5530:35: int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
-
mm/memory.c:5571:16-5571:34: int follow_pte(struct mm_struct *mm, unsigned long address,
-
mm/memory.c:5739:24-5739:42: int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
-
mm/memory.c:5829:22-5829:40: int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-
mm/mempolicy.c:381:21-381:39: void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
-
mm/mempolicy.c:743:19-743:37: queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
-
mm/mempolicy.c:920:24-920:42: static int lookup_node(struct mm_struct *mm, unsigned long addr)
-
mm/mempolicy.c:1067:28-1067:46: static int migrate_to_node(struct mm_struct *mm, int source, int dest,
-
mm/mempolicy.c:1108:22-1108:40: int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
-
mm/migrate.c:303:27-303:45: void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
-
mm/migrate.c:364:31-364:49: void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
-
mm/migrate.c:2032:34-2032:52: static int do_move_pages_to_node(struct mm_struct *mm,
-
mm/migrate.c:2057:35-2057:53: static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
-
mm/migrate.c:2129:40-2129:58: static int move_pages_and_store_status(struct mm_struct *mm, int node,
-
mm/migrate.c:2159:26-2159:44: static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
-
mm/migrate.c:2265:33-2265:51: static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
-
mm/migrate.c:2328:26-2328:44: static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
-
mm/mlock.c:534:47-534:65: static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
-
mm/mmap.c:281:25-281:43: static void validate_mm(struct mm_struct *mm)
-
mm/mmap.c:367:44-367:62: static unsigned long count_vma_pages_range(struct mm_struct *mm,
-
mm/mmap.c:395:21-395:39: static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
-
mm/mmap.c:511:31-511:49: struct vma_iterator *vmi, struct mm_struct *mm)
-
mm/mmap.c:863:60-863:78: struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
-
mm/mmap.c:1151:22-1151:40: bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
-
mm/mmap.c:1853:46-1853:64: struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
-
mm/mmap.c:1872:33-1872:51: struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
-
mm/mmap.c:1895:15-1895:33: find_vma_prev(struct mm_struct *mm, unsigned long addr,
-
mm/mmap.c:2187:47-2187:65: struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
-
mm/mmap.c:2265:37-2265:55: struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
-
mm/mmap.c:2299:30-2299:48: static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
-
mm/mmap.c:2322:26-2322:44: static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
-
mm/mmap.c:2453:7-2453:25: struct mm_struct *mm, unsigned long start,
-
mm/mmap.c:2618:45-2618:63: int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
-
mm/mmap.c:2654:15-2654:33: int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
-
mm/mmap.c:3204:16-3204:34: void exit_mmap(struct mm_struct *mm)
-
mm/mmap.c:3271:22-3271:40: int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
-
mm/mmap.c:3402:20-3402:38: bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
-
mm/mmap.c:3427:22-3427:40: void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
-
mm/mmap.c:3523:2-3523:20: struct mm_struct *mm,
-
mm/mmap.c:3578:2-3578:20: struct mm_struct *mm,
-
mm/mmap.c:3586:29-3586:47: int install_special_mapping(struct mm_struct *mm,
-
mm/mmap.c:3599:30-3599:48: static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
-
mm/mmap.c:3622:29-3622:47: static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
-
mm/mmap.c:3678:23-3678:41: int mm_take_all_locks(struct mm_struct *mm)
-
mm/mmap.c:3774:24-3774:42: void mm_drop_all_locks(struct mm_struct *mm)
-
mm/mmap_lock.c:199:38-199:56: static const char *get_mm_memcg_path(struct mm_struct *mm)
-
mm/mmap_lock.c:228:41-228:59: void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write)
-
mm/mmap_lock.c:234:44-234:62: void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
-
mm/mmap_lock.c:241:36-241:54: void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write)
-
mm/mmu_gather.c:303:54-303:72: static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-
mm/mmu_gather.c:336:45-336:63: void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
-
mm/mmu_gather.c:352:52-352:70: void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
-
mm/mmu_notifier.c:262:9-262:27: struct mm_struct *mm)
-
mm/mmu_notifier.c:300:9-300:27: struct mm_struct *mm)
-
mm/mmu_notifier.c:348:29-348:47: void __mmu_notifier_release(struct mm_struct *mm)
-
mm/mmu_notifier.c:365:38-365:56: int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
-
mm/mmu_notifier.c:385:32-385:50: int __mmu_notifier_clear_young(struct mm_struct *mm,
-
mm/mmu_notifier.c:405:31-405:49: int __mmu_notifier_test_young(struct mm_struct *mm,
-
mm/mmu_notifier.c:427:32-427:50: void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
-
mm/mmu_notifier.c:588:52-588:70: void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
-
mm/mmu_notifier.c:612:8-612:26: struct mm_struct *mm)
-
mm/mmu_notifier.c:715:6-715:24: struct mm_struct *mm)
-
mm/mmu_notifier.c:727:23-727:41: find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
-
mm/mmu_notifier.c:767:11-767:29: struct mm_struct *mm)
-
mm/mmu_notifier.c:795:43-795:61: void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
-
mm/mmu_notifier.c:813:9-813:27: struct mm_struct *mm)
-
mm/mmu_notifier.c:906:46-906:64: struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
-
mm/mmu_notifier.c:988:6-988:24: struct mm_struct *mm, unsigned long start,
-
mm/mmu_notifier.c:1010:46-1010:64: struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
-
mm/mremap.c:35:27-35:45: static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
-
mm/mremap.c:56:27-56:45: static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
-
mm/mremap.c:72:29-72:47: static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/mremap.c:86:29-86:47: static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/oom_kill.c:488:47-488:65: bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
-
mm/oom_kill.c:510:32-510:50: static bool __oom_reap_task_mm(struct mm_struct *mm)
-
mm/oom_kill.c:566:55-566:73: static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
-
mm/page_table_check.c:152:35-152:53: void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
-
mm/page_table_check.c:163:35-163:53: void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
-
mm/page_table_check.c:174:35-174:53: void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
-
mm/page_table_check.c:185:34-185:52: void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
-
mm/page_table_check.c:200:33-200:51: void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
-
mm/page_table_check.c:213:33-213:51: void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud)
-
mm/page_table_check.c:226:41-226:59: void __page_table_check_pte_clear_range(struct mm_struct *mm,
-
mm/pagewalk.c:403:41-403:59: static inline void process_mm_walk_lock(struct mm_struct *mm,
-
mm/pagewalk.c:470:21-470:39: int walk_page_range(struct mm_struct *mm, unsigned long start,
-
mm/pagewalk.c:543:27-543:45: int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
-
mm/pgalloc-track.h:6:38-6:56: static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd,
-
mm/pgalloc-track.h:19:38-19:56: static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d,
-
mm/pgalloc-track.h:32:38-32:56: static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud,
-
mm/pgtable-generic.c:165:33-165:51: void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
-
mm/pgtable-generic.c:181:39-181:57: pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
-
mm/pgtable-generic.c:245:21-245:39: void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
-
mm/pgtable-generic.c:306:30-306:48: pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
-
mm/pgtable-generic.c:362:30-362:48: pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
-
mm/process_vm_access.c:74:9-74:27: struct mm_struct *mm,
-
mm/ptdump.c:151:47-151:65: void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
-
mm/rmap.c:645:39-645:57: static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
-
mm/rmap.c:690:32-690:50: static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
-
mm/rmap.c:713:32-713:50: void flush_tlb_batched_pending(struct mm_struct *mm)
-
mm/rmap.c:771:20-771:38: pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
-
mm/rmap.c:2280:3-2280:21: struct mm_struct *mm, unsigned long address, void *owner)
-
mm/rmap.c:2328:33-2328:51: int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
-
mm/shmem.c:539:6-539:24: struct mm_struct *mm, unsigned long vm_flags)
-
mm/swapfile.c:1987:21-1987:39: static int unuse_mm(struct mm_struct *mm, unsigned int type)
-
mm/userfaultfd.c:329:28-329:46: static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
-
mm/util.c:474:25-474:43: int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
-
mm/util.c:517:23-517:41: int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
-
mm/util.c:931:24-931:42: int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
-
mm/vmscan.c:3440:21-3440:39: void lru_gen_add_mm(struct mm_struct *mm)
-
mm/vmscan.c:3466:21-3466:39: void lru_gen_del_mm(struct mm_struct *mm)
-
mm/vmscan.c:3505:25-3505:43: void lru_gen_migrate_mm(struct mm_struct *mm)
-
mm/vmscan.c:3559:28-3559:46: static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
-
mm/vmscan.c:4282:44-4282:62: static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
-
mm/z3fold.c:1395:4-1395:23: enum zpool_mapmode mm)
-
mm/zbud.c:404:4-404:23: enum zpool_mapmode mm)
-
mm/zsmalloc.c:384:4-384:23: enum zpool_mapmode mm)
-
mm/zsmalloc.c:1193:4-1193:20: enum zs_mapmode mm)
-
net/ipv4/tcp.c:2042:44-2042:62: static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm,
-
security/commoncap.c:1400:26-1400:44: int cap_vm_enough_memory(struct mm_struct *mm, long pages)
-
security/security.c:1021:34-1021:52: int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
-
security/selinux/hooks.c:2212:37-2212:55: static int selinux_vm_enough_memory(struct mm_struct *mm, long pages)
-
virt/kvm/kvm_main.c:717:6-717:24: struct mm_struct *mm,
-
virt/kvm/kvm_main.c:867:12-867:30: struct mm_struct *mm,
-
virt/kvm/kvm_main.c:878:6-878:24: struct mm_struct *mm,
-
virt/kvm/kvm_main.c:901:12-901:30: struct mm_struct *mm,
-
virt/kvm/kvm_main.c:911:10-911:28: struct mm_struct *mm)
variable
Defined...
-
arch/x86/entry/vdso/vma.c:113:2-113:31: struct mm_struct *mm = task->mm;
-
arch/x86/entry/vdso/vma.c:225:2-225:34: struct mm_struct *mm = current->mm;
-
arch/x86/entry/vdso/vma.c:332:2-332:34: struct mm_struct *mm = current->mm;
-
arch/x86/kernel/ldt.c:138:2-138:25: struct mm_struct *mm = __mm;
-
arch/x86/kernel/ldt.c:504:2-504:34: struct mm_struct *mm = current->mm;
-
arch/x86/kernel/ldt.c:580:2-580:34: struct mm_struct *mm = current->mm;
-
arch/x86/kernel/shstk.c:104:2-104:34: struct mm_struct *mm = current->mm;
-
arch/x86/kernel/sys_x86_64.c:126:2-126:34: struct mm_struct *mm = current->mm;
-
arch/x86/kernel/sys_x86_64.c:166:2-166:34: struct mm_struct *mm = current->mm;
-
arch/x86/mm/fault.c:873:2-873:34: struct mm_struct *mm = current->mm;
-
arch/x86/mm/fault.c:1245:2-1245:20: struct mm_struct *mm;
-
arch/x86/mm/hugetlbpage.c:117:2-117:34: struct mm_struct *mm = current->mm;
-
arch/x86/mm/mmap.c:155:2-155:34: struct mm_struct *mm = current->mm;
-
arch/x86/mm/tlb.c:702:2-702:25: struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
-
arch/x86/xen/mmu_pv.c:913:2-913:25: struct mm_struct *mm = info;
-
drivers/acpi/acpica/evrgnini.c:41:2-41:27: struct acpi_mem_mapping *mm;
-
drivers/acpi/acpica/exregion.c:44:2-44:42: struct acpi_mem_mapping *mm = mem_info->cur_mm;
-
drivers/android/binder_alloc.c:188:2-188:25: struct mm_struct *mm = NULL;
-
drivers/android/binder_alloc.c:984:2-984:25: struct mm_struct *mm = NULL;
-
drivers/dma-buf/dma-resv.c:768:2-768:34: struct mm_struct *mm = mm_alloc();
-
drivers/dma/idxd/cdev.c:646:2-646:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:2639:2-2639:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c:169:2-169:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c:666:2-666:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c:273:2-273:31: struct drm_buddy *mm = &mgr->mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c:432:2-432:31: struct drm_buddy *mm = &mgr->mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c:621:2-621:31: struct drm_buddy *mm = &mgr->mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c:841:2-841:31: struct drm_buddy *mm = &mgr->mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c:2893:2-2893:25: struct mm_struct *mm = NULL;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1146:2-1146:25: struct mm_struct *mm = NULL;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:2773:2-2773:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c:898:2-898:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:104:2-104:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:2153:2-2153:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:531:2-531:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1772:2-1772:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2287:2-2287:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2887:2-2887:25: struct mm_struct *mm = NULL;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3437:2-3437:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3801:2-3801:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:4017:2-4017:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:4105:2-4105:34: struct mm_struct *mm = current->mm;
-
drivers/gpu/drm/drm_mm.c:167:2-167:33: struct drm_mm *mm = hole_node->mm;
-
drivers/gpu/drm/drm_mm.c:268:2-268:28: struct drm_mm *mm = node->mm;
-
drivers/gpu/drm/drm_mm.c:629:2-629:28: struct drm_mm *mm = node->mm;
-
drivers/gpu/drm/drm_mm.c:662:2-662:27: struct drm_mm *mm = old->mm;
-
drivers/gpu/drm/drm_mm.c:783:2-783:28: struct drm_mm *mm = scan->mm;
-
drivers/gpu/drm/drm_mm.c:914:2-914:28: struct drm_mm *mm = scan->mm;
-
drivers/gpu/drm/i915/gem/i915_gem_mman.c:106:3-106:35: struct mm_struct *mm = current->mm;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c:659:2-659:53: struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
-
drivers/gpu/drm/i915/gvt/cmd_parser.c:862:2-862:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/cmd_parser.c:1818:2-1819:39: struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
-
drivers/gpu/drm/i915/gvt/cmd_parser.c:1901:2-1902:39: struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
-
drivers/gpu/drm/i915/gvt/gtt.c:1831:2-1831:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:1864:2-1864:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:1905:2-1905:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:1950:2-1950:29: struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
-
drivers/gpu/drm/i915/gvt/gtt.c:2016:2-2016:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2489:2-2489:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2604:2-2604:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2641:2-2641:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2666:2-2666:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2762:2-2762:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2831:2-2831:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/handlers.c:1472:2-1472:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/scheduler.c:437:2-437:39: struct intel_vgpu_mm *mm = workload->shadow_mm;
-
drivers/gpu/drm/i915/gvt/scheduler.c:1526:3-1526:29: struct intel_vgpu_mm *m, *mm;
-
drivers/gpu/drm/i915/gvt/scheduler.c:1583:2-1583:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/i915_scatterlist.c:170:2-170:35: struct drm_buddy *mm = bman_res->mm;
-
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c:41:2-41:32: struct drm_buddy *mm = &bman->mm;
-
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c:176:2-176:32: struct drm_buddy *mm = &bman->mm;
-
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c:214:2-214:32: struct drm_buddy *mm = &bman->mm;
-
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c:353:2-353:32: struct drm_buddy *mm = &bman->mm;
-
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c:391:2-391:32: struct drm_buddy *mm = &bman->mm;
-
drivers/gpu/drm/i915/selftests/intel_memory_region.c:374:2-374:20: struct drm_buddy *mm;
-
drivers/gpu/drm/i915/selftests/intel_memory_region.c:459:2-459:20: struct drm_buddy *mm;
-
drivers/gpu/drm/i915/selftests/intel_memory_region.c:538:2-538:35: struct drm_buddy *mm = bman_res->mm;
-
drivers/gpu/drm/nouveau/nouveau_svm.c:117:2-117:20: struct mm_struct *mm;
-
drivers/gpu/drm/nouveau/nouveau_svm.c:595:2-595:40: struct mm_struct *mm = svmm->notifier.mm;
-
drivers/gpu/drm/nouveau/nouveau_svm.c:667:2-667:40: struct mm_struct *mm = svmm->notifier.mm;
-
drivers/gpu/drm/nouveau/nouveau_svm.c:782:3-782:21: struct mm_struct *mm;
-
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c:116:2-116:18: struct nvkm_mm *mm;
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:318:2-318:41: struct nvkm_mm *mm = &device->fb->ram->vram;
-
drivers/gpu/drm/tests/drm_buddy_test.c:332:2-332:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_buddy_test.c:410:2-410:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_buddy_test.c:502:2-502:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_buddy_test.c:597:2-597:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_buddy_test.c:642:2-642:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_buddy_test.c:701:2-701:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:197:2-197:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:250:2-250:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:347:2-347:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:531:2-531:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:815:2-815:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:900:2-900:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:993:2-993:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1057:2-1057:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1102:2-1102:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1395:2-1395:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1489:2-1489:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1576:2-1576:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1683:2-1683:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1769:2-1769:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1850:2-1850:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:2051:2-2051:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:2135:2-2135:16: struct drm_mm mm;
-
drivers/gpu/drm/ttm/ttm_range_manager.c:65:2-65:29: struct drm_mm *mm = &rman->mm;
-
drivers/gpu/drm/ttm/ttm_range_manager.c:219:2-219:29: struct drm_mm *mm = &rman->mm;
-
drivers/infiniband/core/umem.c:157:2-157:20: struct mm_struct *mm;
-
drivers/infiniband/core/uverbs_main.c:827:3-827:26: struct mm_struct *mm = NULL;
-
drivers/infiniband/hw/cxgb4/cq.c:1009:2-1009:24: struct c4iw_mm_entry *mm, *mm2;
-
drivers/infiniband/hw/cxgb4/iw_cxgb4.h:546:2-546:24: struct c4iw_mm_entry *mm;
-
drivers/infiniband/hw/cxgb4/provider.c:66:2-66:24: struct c4iw_mm_entry *mm, *tmp;
-
drivers/infiniband/hw/cxgb4/provider.c:84:2-84:29: struct c4iw_mm_entry *mm = NULL;
-
drivers/infiniband/hw/cxgb4/provider.c:131:2-131:24: struct c4iw_mm_entry *mm;
-
drivers/infiniband/hw/hfi1/user_exp_rcv.c:141:2-141:20: struct mm_struct *mm;
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:196:2-196:20: struct ocrdma_mm *mm;
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:214:2-214:20: struct ocrdma_mm *mm, *tmp;
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:232:2-232:20: struct ocrdma_mm *mm;
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:522:2-522:20: struct ocrdma_mm *mm, *tmp;
-
drivers/infiniband/hw/usnic/usnic_uiom.c:100:2-100:20: struct mm_struct *mm;
-
drivers/iommu/amd/iommu_v2.c:478:2-478:20: struct mm_struct *mm;
-
drivers/iommu/amd/iommu_v2.c:606:2-606:20: struct mm_struct *mm;
-
drivers/iommu/intel/svm.c:371:2-371:20: struct mm_struct *mm;
-
drivers/iommu/intel/svm.c:799:2-799:33: struct mm_struct *mm = domain->mm;
-
drivers/iommu/iommu-sva.c:156:2-156:25: struct mm_struct *mm = data;
-
drivers/media/dvb-frontends/drxd_hard.c:258:2-261:2: u8 mm[6] = { reg & 0xff, (reg >> 16) & 0xff,
-
drivers/media/dvb-frontends/drxd_hard.c:271:2-275:2: u8 mm[8] = { reg & 0xff, (reg >> 16) & 0xff,
-
drivers/media/dvb-frontends/drxd_hard.c:286:2-288:2: u8 mm[CHUNK_SIZE + 4] = { reg & 0xff, (reg >> 16) & 0xff,
-
drivers/media/dvb-frontends/drxk_hard.c:340:2-340:37: u8 adr = state->demod_address, mm[6], len;
-
drivers/media/dvb-frontends/drxk_hard.c:369:2-369:37: u8 adr = state->demod_address, mm[8], len;
-
drivers/misc/sgi-gru/grufault.c:68:2-68:34: struct mm_struct *mm = current->mm;
-
drivers/misc/sgi-gru/grufault.c:85:2-85:34: struct mm_struct *mm = current->mm;
-
drivers/misc/sgi-gru/grufault.c:255:2-255:30: struct mm_struct *mm = gts->ts_mm;
-
drivers/net/arcnet/com20020-pci.c:123:2-123:35: struct com20020_pci_channel_map *mm;
-
drivers/net/dsa/ocelot/felix_vsc9959.c:1216:2-1216:47: struct ocelot_mm_state *mm = &ocelot->mm[port];
-
drivers/net/dsa/ocelot/felix_vsc9959.c:2538:3-2538:48: struct ocelot_mm_state *mm = &ocelot->mm[port];
-
drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c:199:2-199:34: struct mm_struct *mm = current->mm;
-
drivers/net/ethernet/mscc/ocelot_mm.c:55:2-55:47: struct ocelot_mm_state *mm = &ocelot->mm[port];
-
drivers/net/ethernet/mscc/ocelot_mm.c:93:2-93:47: struct ocelot_mm_state *mm = &ocelot->mm[port];
-
drivers/net/ethernet/mscc/ocelot_mm.c:108:2-108:47: struct ocelot_mm_state *mm = &ocelot->mm[port];
-
drivers/net/ethernet/mscc/ocelot_mm.c:174:2-174:26: struct ocelot_mm_state *mm;
-
drivers/net/ethernet/mscc/ocelot_mm.c:238:2-238:26: struct ocelot_mm_state *mm;
-
drivers/net/ethernet/mscc/ocelot_mm.c:275:2-275:26: struct ocelot_mm_state *mm;
-
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c:1507:2-1507:18: u8 af = 0, mm = 0;
-
drivers/pcmcia/rsrc_nonstatic.c:538:2-538:26: struct resource_map *m, mm;
-
drivers/usb/gadget/legacy/inode.c:465:2-465:31: struct mm_struct *mm = priv->mm;
-
drivers/vdpa/vdpa_sim/vdpa_sim.c:175:2-175:34: struct mm_struct *mm = vdpasim->mm_bound;
-
drivers/vfio/pci/vfio_pci_core.c:1619:3-1619:26: struct mm_struct *mm = NULL;
-
drivers/vfio/vfio_iommu_type1.c:427:2-427:20: struct mm_struct *mm;
-
drivers/vfio/vfio_iommu_type1.c:618:2-618:34: struct mm_struct *mm = current->mm;
-
drivers/vfio/vfio_iommu_type1.c:749:2-749:20: struct mm_struct *mm;
-
drivers/vfio/vfio_iommu_type1.c:1528:2-1528:34: struct mm_struct *mm = current->mm;
-
drivers/vfio/vfio_iommu_type1.c:3059:2-3059:20: struct mm_struct *mm;
-
drivers/xen/privcmd.c:259:2-259:34: struct mm_struct *mm = current->mm;
-
drivers/xen/privcmd.c:453:2-453:34: struct mm_struct *mm = current->mm;
-
drivers/xen/privcmd.c:730:2-730:34: struct mm_struct *mm = current->mm;
-
fs/aio.c:357:2-357:30: struct mm_struct *mm = vma->vm_mm;
-
fs/aio.c:494:2-494:34: struct mm_struct *mm = current->mm;
-
fs/aio.c:731:2-731:34: struct mm_struct *mm = current->mm;
-
fs/aio.c:1074:2-1074:34: struct mm_struct *mm = current->mm;
-
fs/binfmt_elf.c:179:2-179:34: struct mm_struct *mm = current->mm;
-
fs/binfmt_elf.c:843:2-843:20: struct mm_struct *mm;
-
fs/coredump.c:523:2-523:34: struct mm_struct *mm = current->mm;
-
fs/coredump.c:1148:2-1148:34: struct mm_struct *mm = current->mm;
-
fs/exec.c:187:2-187:34: struct mm_struct *mm = current->mm;
-
fs/exec.c:202:2-202:31: struct mm_struct *mm = bprm->mm;
-
fs/exec.c:257:2-257:31: struct mm_struct *mm = bprm->mm;
-
fs/exec.c:371:2-371:25: struct mm_struct *mm = NULL;
-
fs/exec.c:685:2-685:30: struct mm_struct *mm = vma->vm_mm;
-
fs/exec.c:755:2-755:34: struct mm_struct *mm = current->mm;
-
fs/exec.c:2087:2-2087:34: struct mm_struct *mm = current->mm;
-
fs/hugetlbfs/inode.c:239:2-239:34: struct mm_struct *mm = current->mm;
-
fs/hugetlbfs/inode.c:820:2-820:34: struct mm_struct *mm = current->mm;
-
fs/proc/array.c:442:2-442:41: struct mm_struct *mm = get_task_mm(task);
-
fs/proc/array.c:478:2-478:20: struct mm_struct *mm;
-
fs/proc/array.c:677:2-677:41: struct mm_struct *mm = get_task_mm(task);
-
fs/proc/base.c:346:2-346:20: struct mm_struct *mm;
-
fs/proc/base.c:799:2-799:39: struct mm_struct *mm = ERR_PTR(-ESRCH);
-
fs/proc/base.c:818:2-818:50: struct mm_struct *mm = proc_mem_open(inode, mode);
-
fs/proc/base.c:840:2-840:31: struct mm_struct *mm = file->private_data;
-
fs/proc/base.c:922:2-922:31: struct mm_struct *mm = file->private_data;
-
fs/proc/base.c:947:2-947:31: struct mm_struct *mm = file->private_data;
-
fs/proc/base.c:1019:2-1019:31: struct mm_struct *mm = file->private_data;
-
fs/proc/base.c:1062:2-1062:25: struct mm_struct *mm = NULL;
-
fs/proc/base.c:1849:3-1849:21: struct mm_struct *mm;
-
fs/proc/base.c:2145:2-2145:25: struct mm_struct *mm = NULL;
-
fs/proc/base.c:2197:2-2197:20: struct mm_struct *mm;
-
fs/proc/base.c:2295:2-2295:20: struct mm_struct *mm;
-
fs/proc/base.c:2348:2-2348:20: struct mm_struct *mm;
-
fs/proc/base.c:2904:2-2904:20: struct mm_struct *mm;
-
fs/proc/base.c:2933:2-2933:20: struct mm_struct *mm;
-
fs/proc/base.c:3192:2-3192:20: struct mm_struct *mm;
-
fs/proc/base.c:3205:2-3205:20: struct mm_struct *mm;
-
fs/proc/task_mmu.c:146:2-146:20: struct mm_struct *mm;
-
fs/proc/task_mmu.c:190:2-190:31: struct mm_struct *mm = priv->mm;
-
fs/proc/task_mmu.c:264:2-264:30: struct mm_struct *mm = vma->vm_mm;
-
fs/proc/task_mmu.c:881:2-881:31: struct mm_struct *mm = priv->mm;
-
fs/proc/task_mmu.c:1250:2-1250:20: struct mm_struct *mm;
-
fs/proc/task_mmu.c:1653:2-1653:31: struct mm_struct *mm = file->private_data;
-
fs/proc/task_mmu.c:1746:2-1746:20: struct mm_struct *mm;
-
fs/proc/task_mmu.c:1757:2-1757:31: struct mm_struct *mm = file->private_data;
-
fs/proc/task_mmu.c:1947:2-1947:30: struct mm_struct *mm = vma->vm_mm;
-
fs/userfaultfd.c:328:2-328:30: struct mm_struct *mm = ctx->mm;
-
fs/userfaultfd.c:415:2-415:30: struct mm_struct *mm = vma->vm_mm;
-
fs/userfaultfd.c:653:3-653:43: struct mm_struct *mm = release_new_ctx->mm;
-
fs/userfaultfd.c:812:2-812:30: struct mm_struct *mm = vma->vm_mm;
-
fs/userfaultfd.c:894:2-894:30: struct mm_struct *mm = ctx->mm;
-
fs/userfaultfd.c:1318:2-1318:30: struct mm_struct *mm = ctx->mm;
-
fs/userfaultfd.c:1555:2-1555:30: struct mm_struct *mm = ctx->mm;
-
ipc/shm.c:1733:2-1733:34: struct mm_struct *mm = current->mm;
-
kernel/acct.c:560:3-560:35: struct mm_struct *mm = current->mm;
-
kernel/bpf/task_iter.c:786:2-786:20: struct mm_struct *mm;
-
kernel/cgroup/cpuset.c:2057:3-2057:21: struct mm_struct *mm;
-
kernel/cgroup/cpuset.c:2699:3-2699:44: struct mm_struct *mm = get_task_mm(leader);
-
kernel/cpu.c:888:2-888:31: struct mm_struct *mm = idle->active_mm;
-
kernel/events/core.c:7542:2-7542:20: struct mm_struct *mm;
-
kernel/events/core.c:10653:2-10653:25: struct mm_struct *mm = NULL;
-
kernel/events/uprobes.c:158:2-158:30: struct mm_struct *mm = vma->vm_mm;
-
kernel/events/uprobes.c:1044:3-1044:32: struct mm_struct *mm = info->mm;
-
kernel/events/uprobes.c:1479:2-1479:34: struct mm_struct *mm = current->mm;
-
kernel/events/uprobes.c:1527:2-1527:34: struct mm_struct *mm = current->mm;
-
kernel/events/uprobes.c:1797:2-1797:34: struct mm_struct *mm = current->mm;
-
kernel/events/uprobes.c:2041:2-2041:34: struct mm_struct *mm = current->mm;
-
kernel/exit.c:537:2-537:34: struct mm_struct *mm = current->mm;
-
kernel/fork.c:846:2-846:25: struct mm_struct *mm = arg;
-
kernel/fork.c:853:2-853:25: struct mm_struct *mm = arg;
-
kernel/fork.c:934:2-934:20: struct mm_struct *mm;
-
kernel/fork.c:1331:2-1331:20: struct mm_struct *mm;
-
kernel/fork.c:1378:2-1378:25: struct mm_struct *mm = container_of(work, struct mm_struct,
-
kernel/fork.c:1512:2-1512:20: struct mm_struct *mm;
-
kernel/fork.c:1535:2-1535:20: struct mm_struct *mm;
-
kernel/fork.c:1552:2-1552:20: struct mm_struct *mm;
-
kernel/fork.c:1674:2-1674:20: struct mm_struct *mm;
-
kernel/fork.c:1710:2-1710:20: struct mm_struct *mm, *oldmm;
-
kernel/futex/core.c:224:2-224:34: struct mm_struct *mm = current->mm;
-
kernel/futex/core.c:411:2-411:34: struct mm_struct *mm = current->mm;
-
kernel/ptrace.c:47:2-47:20: struct mm_struct *mm;
-
kernel/ptrace.c:290:2-290:20: struct mm_struct *mm;
-
kernel/sched/core.c:5215:2-5215:29: struct mm_struct *mm = rq->prev_mm;
-
kernel/sched/core.c:9412:2-9412:34: struct mm_struct *mm = current->active_mm;
-
kernel/sched/core.c:11696:2-11696:28: struct mm_struct *mm = t->mm;
-
kernel/sched/core.c:11739:2-11739:28: struct mm_struct *mm = t->mm;
-
kernel/sched/core.c:11800:2-11800:28: struct mm_struct *mm = t->mm;
-
kernel/sched/core.c:11960:2-11960:20: struct mm_struct *mm;
-
kernel/sched/core.c:12001:2-12001:28: struct mm_struct *mm = t->mm;
-
kernel/sched/core.c:12028:2-12028:28: struct mm_struct *mm = t->mm;
-
kernel/sched/core.c:12052:2-12052:28: struct mm_struct *mm = t->mm;
-
kernel/sched/core.c:12076:2-12076:28: struct mm_struct *mm = t->mm;
-
kernel/sched/fair.c:3211:2-3211:28: struct mm_struct *mm = p->mm;
-
kernel/sched/fair.c:3395:2-3395:28: struct mm_struct *mm = p->mm;
-
kernel/sched/membarrier.c:202:2-202:46: struct mm_struct *mm = (struct mm_struct *) info;
-
kernel/sched/membarrier.c:313:2-313:34: struct mm_struct *mm = current->mm;
-
kernel/sched/membarrier.c:486:2-486:28: struct mm_struct *mm = p->mm;
-
kernel/sched/membarrier.c:505:2-505:28: struct mm_struct *mm = p->mm;
-
kernel/sched/membarrier.c:547:2-547:28: struct mm_struct *mm = p->mm;
-
kernel/sched/sched.h:3323:2-3323:28: struct mm_struct *mm = t->mm;
-
kernel/sys.c:1848:3-1848:39: struct mm_struct *mm = get_task_mm(p);
-
kernel/sys.c:1993:2-1993:34: struct mm_struct *mm = current->mm;
-
kernel/sys.c:2130:2-2130:34: struct mm_struct *mm = current->mm;
-
kernel/sys.c:2322:2-2322:34: struct mm_struct *mm = current->mm;
-
kernel/sys.c:2400:2-2400:34: struct mm_struct *mm = current->mm;
-
kernel/trace/trace_events_user.c:429:2-429:36: struct user_event_mm *mm = fault->mm;
-
kernel/trace/trace_events_user.c:577:2-577:24: struct user_event_mm *mm;
-
kernel/trace/trace_events_user.c:649:2-649:24: struct user_event_mm *mm;
-
kernel/trace/trace_events_user.c:762:2-762:24: struct user_event_mm *mm;
-
kernel/trace/trace_events_user.c:770:2-770:24: struct user_event_mm *mm;
-
kernel/trace/trace_events_user.c:824:2-824:50: struct user_event_mm *mm = user_event_mm_alloc(t);
-
kernel/trace/trace_events_user.c:2454:2-2454:38: struct user_event_mm *mm = current->user_event_mm;
-
kernel/trace/trace_output.c:1268:2-1268:25: struct mm_struct *mm = NULL;
-
kernel/tsacct.c:93:2-93:20: struct mm_struct *mm;
-
lib/is_single_threaded.c:18:2-18:31: struct mm_struct *mm = task->mm;
-
lib/test_hmm.c:290:2-290:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:330:2-330:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:782:2-782:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:905:2-905:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:965:2-965:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:1111:2-1111:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:1167:2-1167:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/zlib_inflate/inffast.c:22:2-22:12: union uu mm;
-
lib/zlib_inflate/inffast.c:269:5-269:14: union uu mm;
-
mm/damon/vaddr.c:44:2-44:20: struct mm_struct *mm;
-
mm/damon/vaddr.c:176:2-176:20: struct mm_struct *mm;
-
mm/damon/vaddr.c:418:2-418:20: struct mm_struct *mm;
-
mm/damon/vaddr.c:585:2-585:20: struct mm_struct *mm;
-
mm/damon/vaddr.c:633:2-633:20: struct mm_struct *mm;
-
mm/filemap.c:3406:2-3406:35: struct mm_struct *mm = vmf->vma->vm_mm;
-
mm/gup.c:583:2-583:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:699:2-699:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:750:2-750:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:814:2-814:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:1624:2-1624:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:1693:2-1693:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:1739:2-1739:34: struct mm_struct *mm = current->mm;
-
mm/gup.c:1936:2-1936:34: struct mm_struct *mm = current->mm;
-
mm/hmm.c:593:2-593:42: struct mm_struct *mm = range->notifier->mm;
-
mm/huge_memory.c:839:2-839:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:937:2-937:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:1026:2-1026:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:1181:2-1181:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:1448:2-1448:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:1586:2-1586:30: struct mm_struct *mm = tlb->mm;
-
mm/huge_memory.c:1753:2-1753:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:1808:2-1808:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:2028:2-2028:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:2069:2-2069:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:2975:2-2975:20: struct mm_struct *mm;
-
mm/huge_memory.c:3206:2-3206:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:3254:2-3254:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:5223:2-5223:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:5253:2-5253:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:5313:2-5313:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:6493:2-6493:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:6563:2-6563:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:7303:2-7303:30: struct mm_struct *mm = vma->vm_mm;
-
mm/khugepaged.c:1414:2-1414:31: struct mm_struct *mm = slot->mm;
-
mm/khugepaged.c:1686:3-1686:21: struct mm_struct *mm;
-
mm/khugepaged.c:2330:2-2330:20: struct mm_struct *mm;
-
mm/khugepaged.c:2699:2-2699:30: struct mm_struct *mm = vma->vm_mm;
-
mm/ksm.c:569:2-569:36: struct mm_struct *mm = rmap_item->mm;
-
mm/ksm.c:588:2-588:36: struct mm_struct *mm = rmap_item->mm;
-
mm/ksm.c:1018:2-1018:20: struct mm_struct *mm;
-
mm/ksm.c:1098:2-1098:30: struct mm_struct *mm = vma->vm_mm;
-
mm/ksm.c:1189:2-1189:30: struct mm_struct *mm = vma->vm_mm;
-
mm/ksm.c:1357:2-1357:36: struct mm_struct *mm = rmap_item->mm;
-
mm/ksm.c:2117:2-2117:36: struct mm_struct *mm = rmap_item->mm;
-
mm/ksm.c:2310:2-2310:20: struct mm_struct *mm;
-
mm/ksm.c:2571:2-2571:30: struct mm_struct *mm = vma->vm_mm;
-
mm/ksm.c:2670:2-2670:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:142:2-142:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:283:2-283:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:349:2-349:30: struct mm_struct *mm = tlb->mm;
-
mm/madvise.c:567:2-567:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:600:2-600:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:630:2-630:30: struct mm_struct *mm = tlb->mm;
-
mm/madvise.c:771:2-771:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:864:2-864:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:919:2-919:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:981:2-981:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:1474:2-1474:20: struct mm_struct *mm;
-
mm/memcontrol.c:6114:2-6114:28: struct mm_struct *mm = mc.mm;
-
mm/memcontrol.c:6137:2-6137:20: struct mm_struct *mm;
-
mm/memory.c:1399:2-1399:30: struct mm_struct *mm = tlb->mm;
-
mm/memory.c:1898:2-1898:36: struct mm_struct *const mm = vma->vm_mm;
-
mm/memory.c:2111:2-2111:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:2437:2-2437:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:2812:2-2812:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:3057:2-3057:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:5042:2-5042:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:5259:2-5259:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:5843:2-5843:20: struct mm_struct *mm;
-
mm/memory.c:5863:2-5863:34: struct mm_struct *mm = current->mm;
-
mm/mempolicy.c:938:2-938:34: struct mm_struct *mm = current->mm;
-
mm/mempolicy.c:1265:2-1265:34: struct mm_struct *mm = current->mm;
-
mm/mempolicy.c:1507:2-1507:34: struct mm_struct *mm = current->mm;
-
mm/mempolicy.c:1608:2-1608:25: struct mm_struct *mm = NULL;
-
mm/migrate.c:2364:2-2364:20: struct mm_struct *mm;
-
mm/migrate.c:2417:2-2417:20: struct mm_struct *mm;
-
mm/migrate_device.c:64:2-64:30: struct mm_struct *mm = vma->vm_mm;
-
mm/migrate_device.c:568:2-568:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mlock.c:416:2-416:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mmap.c:181:2-181:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:1209:2-1209:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:1695:2-1695:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:1744:2-1744:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:1916:2-1916:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mmap.c:2054:2-2054:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mmap.c:2666:2-2666:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:2921:2-2921:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:2956:2-2956:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:3051:2-3051:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mmap.c:3072:2-3072:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:3152:2-3152:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:3318:2-3318:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mmu_notifier.c:858:2-858:39: struct mm_struct *mm = subscription->mm;
-
mm/mmu_notifier.c:889:2-889:39: struct mm_struct *mm = subscription->mm;
-
mm/mmu_notifier.c:1055:2-1055:39: struct mm_struct *mm = interval_sub->mm;
-
mm/mprotect.c:487:2-487:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mprotect.c:579:2-579:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:141:2-141:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:240:2-240:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:308:2-308:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:357:2-357:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:590:2-590:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:749:2-749:34: struct mm_struct *mm = current->mm;
-
mm/mremap.c:806:2-806:34: struct mm_struct *mm = current->mm;
-
mm/mremap.c:912:2-912:34: struct mm_struct *mm = current->mm;
-
mm/msync.c:35:2-35:34: struct mm_struct *mm = current->mm;
-
mm/oom_kill.c:610:2-610:38: struct mm_struct *mm = tsk->signal->oom_mm;
-
mm/oom_kill.c:664:2-664:38: struct mm_struct *mm = tsk->signal->oom_mm;
-
mm/oom_kill.c:758:2-758:30: struct mm_struct *mm = tsk->mm;
-
mm/oom_kill.c:869:2-869:31: struct mm_struct *mm = task->mm;
-
mm/oom_kill.c:917:2-917:20: struct mm_struct *mm;
-
mm/oom_kill.c:1198:2-1198:25: struct mm_struct *mm = NULL;
-
mm/page_vma_mapped.c:176:2-176:30: struct mm_struct *mm = vma->vm_mm;
-
mm/pgtable-generic.c:97:2-97:32: struct mm_struct *mm = (vma)->vm_mm;
-
mm/process_vm_access.c:158:2-158:20: struct mm_struct *mm;
-
mm/rmap.c:190:2-190:30: struct mm_struct *mm = vma->vm_mm;
-
mm/rmap.c:1475:2-1475:30: struct mm_struct *mm = vma->vm_mm;
-
mm/rmap.c:1820:2-1820:30: struct mm_struct *mm = vma->vm_mm;
-
mm/rmap.c:2185:2-2185:30: struct mm_struct *mm = vma->vm_mm;
-
mm/swapfile.c:2041:2-2041:20: struct mm_struct *mm;
-
mm/util.c:538:2-538:34: struct mm_struct *mm = current->mm;
-
mm/util.c:989:2-989:41: struct mm_struct *mm = get_task_mm(task);
-
mm/vmscan.c:3588:2-3588:25: struct mm_struct *mm = NULL;
-
mm/vmscan.c:4500:2-4500:25: struct mm_struct *mm = NULL;
-
security/tomoyo/util.c:970:2-970:34: struct mm_struct *mm = current->mm;
-
virt/kvm/async_pf.c:49:2-49:30: struct mm_struct *mm = apf->mm;
-
virt/kvm/kvm_main.c:1304:2-1304:30: struct mm_struct *mm = kvm->mm;