Symbol: mm
function parameter
Defined...
-
arch/x86/entry/vsyscall/vsyscall_64.c:317:37-317:55: struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
-
arch/x86/entry/vsyscall/vsyscall_64.c:328:18-328:36: int in_gate_area(struct mm_struct *mm, unsigned long addr)
-
arch/x86/events/core.c:2500:60-2500:78: static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
-
arch/x86/events/core.c:2521:62-2521:80: static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:62:41-62:59: static inline void init_new_context_ldt(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:104:8-104:26: struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:124:36-124:54: static inline void destroy_context(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:156:7-156:25: struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:168:58-168:76: static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:175:35-175:53: static inline void arch_exit_mmap(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:182:32-182:50: static inline bool is_64bit_mm(struct mm_struct *mm)
-
arch/x86/include/asm/mmu_context.h:194:31-194:49: static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
-
arch/x86/include/asm/paravirt.h:95:44-95:62: static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
-
arch/x86/include/asm/paravirt.h:344:8-344:26: struct mm_struct *mm)
-
arch/x86/include/asm/paravirt.h:349:38-349:56: static inline int paravirt_pgd_alloc(struct mm_struct *mm)
-
arch/x86/include/asm/paravirt.h:354:38-354:56: static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/include/asm/paravirt.h:359:39-359:57: static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/include/asm/paravirt.h:368:39-368:57: static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/include/asm/paravirt.h:378:39-378:57: static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/include/asm/paravirt.h:387:39-387:57: static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/include/asm/paravirt.h:543:30-543:48: static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgalloc.h:13:41-13:59: static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
-
arch/x86/include/asm/pgalloc.h:64:40-64:58: static inline void pmd_populate_kernel(struct mm_struct *mm,
-
arch/x86/include/asm/pgalloc.h:71:45-71:63: static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
-
arch/x86/include/asm/pgalloc.h:78:33-78:51: static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
-
arch/x86/include/asm/pgalloc.h:99:33-99:51: static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-
arch/x86/include/asm/pgalloc.h:105:38-105:56: static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-
arch/x86/include/asm/pgalloc.h:113:33-113:51: static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
-
arch/x86/include/asm/pgalloc.h:119:38-119:56: static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
-
arch/x86/include/asm/pgalloc.h:134:33-134:51: static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
-
arch/x86/include/asm/pgalloc.h:142:38-142:56: static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
-
arch/x86/include/asm/pgalloc.h:150:36-150:54: static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
-
arch/x86/include/asm/pgalloc.h:159:29-159:47: static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
-
arch/x86/include/asm/pgtable.h:764:35-764:53: static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
-
arch/x86/include/asm/pgtable.h:1022:31-1022:49: static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgtable.h:1029:31-1029:49: static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgtable.h:1036:31-1036:49: static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgtable.h:1066:40-1066:58: static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgtable.h:1075:45-1075:63: static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
-
arch/x86/include/asm/pgtable.h:1094:39-1094:57: static inline void ptep_set_wrprotect(struct mm_struct *mm,
-
arch/x86/include/asm/pgtable.h:1130:45-1130:63: static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pgtable.h:1141:45-1141:63: static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
-
arch/x86/include/asm/pgtable.h:1152:39-1152:57: static inline void pmdp_set_wrprotect(struct mm_struct *mm,
-
arch/x86/include/asm/pgtable_64.h:57:34-57:52: static inline bool mm_p4d_folded(struct mm_struct *mm)
-
arch/x86/include/asm/pgtable_64.h:70:37-70:55: static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
-
arch/x86/include/asm/pkeys.h:25:37-25:55: static inline int execute_only_pkey(struct mm_struct *mm)
-
arch/x86/include/asm/pkeys.h:55:27-55:45: bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
-
arch/x86/include/asm/pkeys.h:81:19-81:37: int mm_pkey_alloc(struct mm_struct *mm)
-
arch/x86/include/asm/pkeys.h:108:18-108:36: int mm_pkey_free(struct mm_struct *mm, int pkey)
-
arch/x86/include/asm/tlbflush.h:243:34-243:52: static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
-
arch/x86/include/asm/tlbflush.h:255:6-255:24: struct mm_struct *mm)
-
arch/x86/kernel/alternative.c:1479:48-1479:66: static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
-
arch/x86/kernel/cpu/sgx/encl.c:754:10-754:28: struct mm_struct *mm)
-
arch/x86/kernel/cpu/sgx/encl.c:794:10-794:28: struct mm_struct *mm)
-
arch/x86/kernel/cpu/sgx/encl.c:814:44-814:62: int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
-
arch/x86/kernel/cpu/sgx/encl.c:1113:35-1113:53: int sgx_encl_test_and_clear_young(struct mm_struct *mm,
-
arch/x86/kernel/cpu/sgx/encl.h:89:33-89:51: static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
-
arch/x86/kernel/ldt.c:42:18-42:36: void load_mm_ldt(struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:189:29-189:47: static void do_sanity_check(struct mm_struct *mm,
-
arch/x86/kernel/ldt.c:264:36-264:54: static void map_ldt_struct_to_user(struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:272:38-272:56: static void sanity_check_ldt_mapping(struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:288:16-288:34: map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
-
arch/x86/kernel/ldt.c:349:30-349:48: static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
-
arch/x86/kernel/ldt.c:391:31-391:49: static void free_ldt_pgtables(struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:419:25-419:43: static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
-
arch/x86/kernel/ldt.c:449:47-449:65: int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:489:26-489:44: void destroy_context_ldt(struct mm_struct *mm)
-
arch/x86/kernel/ldt.c:495:25-495:43: void ldt_arch_exit_mmap(struct mm_struct *mm)
-
arch/x86/kernel/process.c:972:34-972:52: unsigned long arch_randomize_brk(struct mm_struct *mm)
-
arch/x86/kernel/uprobes.c:854:59-854:77: int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
-
arch/x86/mm/dump_pagetables.c:366:12-366:30: struct mm_struct *mm, pgd_t *pgd,
-
arch/x86/mm/dump_pagetables.c:402:48-402:66: void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm)
-
arch/x86/mm/dump_pagetables.c:407:56-407:74: void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
-
arch/x86/mm/init_64.c:73:1-73:1: DEFINE_POPULATE(p4d_populate, p4d, pud, init)
-
arch/x86/mm/init_64.c:74:1-74:1: DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
-
arch/x86/mm/init_64.c:75:1-75:1: DEFINE_POPULATE(pud_populate, pud, pmd, init)
-
arch/x86/mm/init_64.c:76:1-76:1: DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
-
arch/x86/mm/mmap.c:129:28-129:46: void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-
arch/x86/mm/pgtable.c:31:25-31:43: pgtable_t pte_alloc_one(struct mm_struct *mm)
-
arch/x86/mm/pgtable.c:113:36-113:54: static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
-
arch/x86/mm/pgtable.c:123:22-123:40: static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/mm/pgtable.c:213:23-213:41: static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
-
arch/x86/mm/pgtable.c:225:29-225:47: static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
-
arch/x86/mm/pgtable.c:262:28-262:46: static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
-
arch/x86/mm/pgtable.c:277:29-277:47: static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
-
arch/x86/mm/pgtable.c:296:33-296:51: static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
-
arch/x86/mm/pgtable.c:317:38-317:56: static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
-
arch/x86/mm/pgtable.c:421:18-421:36: pgd_t *pgd_alloc(struct mm_struct *mm)
-
arch/x86/mm/pgtable.c:475:15-475:33: void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/mm/pkeys.c:14:25-14:43: int __execute_only_pkey(struct mm_struct *mm)
-
arch/x86/mm/tlb.c:465:38-465:56: static inline void cr4_update_pce_mm(struct mm_struct *mm)
-
arch/x86/mm/tlb.c:663:21-663:39: void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-
arch/x86/mm/tlb.c:945:50-945:68: static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
-
arch/x86/mm/tlb.c:981:25-981:43: void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
-
arch/x86/platform/efi/efi_64.c:394:39-394:57: static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md,
-
arch/x86/xen/mmu_hvm.c:36:31-36:49: static void xen_hvm_exit_mmap(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:537:26-537:44: static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
-
arch/x86/xen/mmu_pv.c:551:26-551:44: static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
-
arch/x86/xen/mmu_pv.c:572:26-572:44: static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
-
arch/x86/xen/mmu_pv.c:601:28-601:46: static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
-
arch/x86/xen/mmu_pv.c:639:26-639:44: static void xen_pgd_walk(struct mm_struct *mm,
-
arch/x86/xen/mmu_pv.c:649:52-649:70: static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:677:26-677:44: static void xen_pin_page(struct mm_struct *mm, struct page *page,
-
arch/x86/xen/mmu_pv.c:729:27-729:45: static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/xen/mmu_pv.c:750:25-750:43: static void xen_pgd_pin(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:781:36-781:54: static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
-
arch/x86/xen/mmu_pv.c:802:28-802:46: static void xen_unpin_page(struct mm_struct *mm, struct page *page,
-
arch/x86/xen/mmu_pv.c:841:29-841:47: static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/xen/mmu_pv.c:862:27-862:45: static void xen_pgd_unpin(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:895:51-895:69: static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:922:29-922:47: static void xen_drop_mm_ref(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:976:27-976:45: static void xen_exit_mmap(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:1393:26-1393:44: static int xen_pgd_alloc(struct mm_struct *mm)
-
arch/x86/xen/mmu_pv.c:1419:26-1419:44: static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
-
arch/x86/xen/mmu_pv.c:1472:39-1472:57: static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/xen/mmu_pv.c:1482:39-1482:57: static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/xen/mmu_pv.c:1528:37-1528:55: static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
-
arch/x86/xen/mmu_pv.c:1555:27-1555:45: static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/xen/mmu_pv.c:1560:27-1560:45: static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
-
arch/x86/xen/mmu_pv.c:1597:27-1597:45: static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
-
drivers/firmware/efi/memattr.c:128:42-128:60: int __init efi_memattr_apply_permissions(struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c:122:32-122:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c:64:5-64:23: struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c:171:49-171:67: bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c:208:24-208:42: uint32_t wptr_mask, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c:371:32-371:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c:180:24-180:42: uint32_t wptr_mask, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c:358:32-358:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c:165:4-165:22: struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c:343:32-343:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c:161:24-161:42: uint32_t wptr_mask, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c:239:32-239:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c:156:24-156:42: uint32_t wptr_mask, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c:263:32-263:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:223:24-223:42: uint32_t wptr_mask, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:383:32-383:50: uint32_t __user *wptr, struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:2314:10-2314:28: struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_device.c:850:24-850:42: int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
-
drivers/gpu/drm/amd/amdkfd/kfd_device.c:870:23-870:41: int kgd2kfd_resume_mm(struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_device.c:897:48-897:66: int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c:491:4-491:22: struct mm_struct *mm, uint32_t trigger)
-
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c:766:55-766:73: int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c:841:5-841:23: struct mm_struct *mm, uint32_t trigger)
-
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c:866:7-866:25: struct mm_struct *mm, uint32_t trigger)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:90:24-90:44: void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:97:36-97:56: void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:187:26-187:46: int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:195:24-195:44: int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:203:22-203:42: void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:214:25-214:45: bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:222:23-222:43: int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:235:26-235:46: int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c:243:27-243:47: bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:45:28-45:48: static void update_cu_mask(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:89:22-89:42: static void init_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:143:27-143:47: static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:160:21-160:41: static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:173:26-173:46: static void __update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:210:24-210:44: static void update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:224:31-224:51: static void update_mqd_hawaii(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:231:29-231:49: static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:259:28-259:48: static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:268:25-268:45: static void restore_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:294:33-294:53: static void checkpoint_mqd_sdma(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:306:30-306:50: static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:336:26-336:46: static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c:343:28-343:48: static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:45:28-45:48: static void update_cu_mask(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:89:22-89:42: static void init_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:144:21-144:41: static int load_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:158:24-158:44: static void update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:229:27-229:47: static int get_wave_state(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:256:28-256:48: static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:265:25-265:45: static void restore_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:292:26-292:46: static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:306:27-306:47: static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:325:29-325:49: static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:352:33-352:53: static void checkpoint_mqd_sdma(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c:364:30-364:50: static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:44:28-44:48: static void update_cu_mask(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:105:22-105:42: static void init_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:170:21-170:41: static int load_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:184:24-184:44: static void update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:255:27-255:47: static int get_wave_state(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:292:26-292:46: static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:306:27-306:47: static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c:330:29-330:49: static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:46:28-46:48: static void update_cu_mask(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:132:22-132:42: static void init_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:196:21-196:41: static int load_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:208:24-208:44: static void update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:281:27-281:47: static int get_wave_state(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:304:33-304:53: static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:311:28-311:48: static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:323:25-323:45: static void restore_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:355:26-355:46: static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:369:27-369:47: static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:388:29-388:49: static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:415:33-415:53: static void checkpoint_mqd_sdma(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c:427:30-427:50: static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:48:28-48:48: static void update_cu_mask(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:92:22-92:42: static void init_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:158:21-158:41: static int load_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:171:26-171:46: static void __update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:242:24-242:44: static void update_mqd(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:256:30-256:50: static void update_mqd_tonga(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:263:27-263:47: static int get_wave_state(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:285:33-285:53: static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:291:28-291:48: static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:300:25-300:45: static void restore_mqd(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:327:26-327:46: static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:341:28-341:48: static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:348:27-348:47: static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:365:29-365:49: static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:393:33-393:53: static void checkpoint_mqd_sdma(struct mqd_manager *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c:405:30-405:50: static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:897:47-897:71: static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:1155:56-1155:74: static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:1182:6-1182:24: struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:1785:46-1785:70: struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c:307:46-307:64: void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:99:31-99:49: svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1078:47-1078:65: svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1107:55-1107:73: svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1521:39-1521:57: static int svm_range_validate_and_map(struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1672:8-1672:26: struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1796:43-1796:61: svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2080:45-2080:63: svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2109:5-2109:23: struct mm_struct *mm)
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2254:4-2254:22: struct mm_struct *mm, enum svm_work_list_ops op)
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2287:23-2287:41: svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2321:26-2321:44: svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2656:7-2656:25: struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3278:29-3278:47: svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3383:43-3383:61: svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3501:43-3501:61: svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
-
drivers/gpu/drm/drm_buddy.c:14:48-14:66: static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:35:28-35:46: static void drm_block_free(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:41:32-41:50: static void list_insert_sorted(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:68:23-68:41: static void mark_free(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:97:20-97:38: int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
-
drivers/gpu/drm/drm_buddy.c:187:21-187:39: void drm_buddy_fini(struct drm_buddy *mm)
-
drivers/gpu/drm/drm_buddy.c:203:24-203:42: static int split_block(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:263:30-263:48: static void __drm_buddy_free(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:293:27-293:45: void drm_buddy_free_block(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:308:26-308:44: void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
-
drivers/gpu/drm/drm_buddy.c:331:18-331:36: alloc_range_bias(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:408:14-408:32: get_maxblock(struct drm_buddy *mm, unsigned int order)
-
drivers/gpu/drm/drm_buddy.c:434:21-434:39: alloc_from_freelist(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:480:26-480:44: static int __alloc_range(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:558:36-558:54: static int __drm_buddy_alloc_range(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:590:26-590:44: int drm_buddy_block_trim(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:661:28-661:46: int drm_buddy_alloc_blocks(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:751:28-751:46: void drm_buddy_block_print(struct drm_buddy *mm,
-
drivers/gpu/drm/drm_buddy.c:768:22-768:40: void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
-
drivers/gpu/drm/drm_mm.c:146:24-146:39: static void show_leaks(struct drm_mm *mm) { }
-
drivers/gpu/drm/drm_mm.c:157:25-157:46: __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
-
drivers/gpu/drm/drm_mm.c:305:38-305:53: static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
-
drivers/gpu/drm/drm_mm.c:330:43-330:58: static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
-
drivers/gpu/drm/drm_mm.c:356:12-356:27: first_hole(struct drm_mm *mm,
-
drivers/gpu/drm/drm_mm.c:414:11-414:26: next_hole(struct drm_mm *mm,
-
drivers/gpu/drm/drm_mm.c:450:25-450:40: int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
-
drivers/gpu/drm/drm_mm.c:514:33-514:55: int drm_mm_insert_node_in_range(struct drm_mm * const mm,
-
drivers/gpu/drm/drm_mm.c:737:6-737:21: struct drm_mm *mm,
-
drivers/gpu/drm/drm_mm.c:963:18-963:33: void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
-
drivers/gpu/drm/drm_mm.c:997:22-997:37: void drm_mm_takedown(struct drm_mm *mm)
-
drivers/gpu/drm/drm_mm.c:1023:19-1023:40: void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
-
drivers/gpu/drm/i915/gem/i915_gem_userptr.c:428:13-428:31: probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
-
drivers/gpu/drm/i915/gvt/cmd_parser.c:1761:53-1761:75: static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:565:35-565:57: static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:580:47-580:69: static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:586:48-586:70: static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:592:35-592:57: static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:603:48-603:70: static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:609:34-609:56: static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:621:34-621:56: static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:632:33-632:55: static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:642:33-642:55: static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:1805:33-1805:55: static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.c:1835:28-1835:50: static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.c:1893:26-1893:48: static void vgpu_free_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.c:2027:26-2027:48: void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.c:2043:23-2043:45: int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.c:2088:46-2088:68: static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
-
drivers/gpu/drm/i915/gvt/gtt.c:2117:37-2117:59: unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
-
drivers/gpu/drm/i915/gvt/gtt.h:187:38-187:60: static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.h:194:38-194:60: static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/i915/gvt/gtt.h:199:42-199:64: static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
-
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h:29:21-29:37: nvkm_mm_initialised(struct nvkm_mm *mm)
-
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h:44:19-44:35: nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap)
-
drivers/gpu/drm/nouveau/nouveau_svm.c:925:45-925:63: nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:30:14-30:30: nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:48:14-48:30: nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis)
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:86:13-86:29: region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:111:14-111:30: nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:161:13-161:29: region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:186:14-186:30: nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:240:14-240:30: nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
-
drivers/gpu/drm/nouveau/nvkm/core/mm.c:283:14-283:30: nvkm_mm_fini(struct nvkm_mm *mm)
-
drivers/gpu/drm/tests/drm_buddy_test.c:46:46-46:64: static void __dump_block(struct kunit *test, struct drm_buddy *mm,
-
drivers/gpu/drm/tests/drm_buddy_test.c:55:44-55:62: static void dump_block(struct kunit *test, struct drm_buddy *mm,
-
drivers/gpu/drm/tests/drm_buddy_test.c:67:44-67:62: static int check_block(struct kunit *test, struct drm_buddy *mm,
-
drivers/gpu/drm/tests/drm_buddy_test.c:140:45-140:63: static int check_blocks(struct kunit *test, struct drm_buddy *mm,
-
drivers/gpu/drm/tests/drm_buddy_test.c:202:41-202:59: static int check_mm(struct kunit *test, struct drm_buddy *mm)
-
drivers/gpu/drm/tests/drm_mm_test.c:46:49-46:70: static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm)
-
drivers/gpu/drm/tests/drm_mm_test.c:71:49-71:70: static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end)
-
drivers/gpu/drm/tests/drm_mm_test.c:100:51-100:72: static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size)
-
drivers/gpu/drm/tests/drm_mm_test.c:162:71-162:86: static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:281:53-281:68: static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node)
-
drivers/gpu/drm/tests/drm_mm_test.c:301:77-301:92: static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:482:47-482:62: static bool expect_insert(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:506:52-506:67: static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size)
-
drivers/gpu/drm/tests/drm_mm_test.c:701:56-701:71: static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node,
-
drivers/gpu/drm/tests/drm_mm_test.c:727:61-727:76: static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:752:60-752:75: static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:950:45-950:60: static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes,
-
drivers/gpu/drm/tests/drm_mm_test.c:972:48-972:63: static u64 get_insert_time(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:1153:44-1153:65: static void show_holes(struct kunit *test, const struct drm_mm *mm, int count)
-
drivers/gpu/drm/tests/drm_mm_test.c:1232:47-1232:62: static bool evict_nothing(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:1278:50-1278:65: static bool evict_everything(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:1326:48-1326:63: static int evict_something(struct kunit *test, struct drm_mm *mm,
-
drivers/gpu/drm/tests/drm_mm_test.c:1983:44-1983:59: static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
-
drivers/infiniband/hw/cxgb4/iw_cxgb4.h:565:11-565:33: struct c4iw_mm_entry *mm)
-
drivers/infiniband/hw/hfi1/user_pages.c:29:50-29:68: bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
-
drivers/infiniband/hw/hfi1/user_pages.c:80:29-80:47: int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
-
drivers/infiniband/hw/hfi1/user_pages.c:95:30-95:48: void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
-
drivers/infiniband/hw/hfi1/user_sdma.c:1043:32-1043:50: static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
-
drivers/iommu/amd/iommu_v2.c:359:5-359:23: struct mm_struct *mm,
-
drivers/iommu/amd/iommu_v2.c:375:49-375:67: static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
-
drivers/iommu/intel/svm.c:224:8-224:26: struct mm_struct *mm,
-
drivers/iommu/intel/svm.c:233:55-233:73: static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
-
drivers/iommu/intel/svm.c:302:9-302:27: struct mm_struct *mm)
-
drivers/iommu/iommu-sva.c:26:27-26:45: int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
-
drivers/iommu/iommu-sva.c:55:30-55:36: static bool __mmget_not_zero(void *mm)
-
drivers/iommu/iommu-sva.c:90:61-90:79: struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
-
drivers/iommu/iommu.c:3436:10-3436:28: struct mm_struct *mm)
-
drivers/misc/sgi-gru/grutlbpurge.c:238:48-238:66: static struct mmu_notifier *gru_alloc_notifier(struct mm_struct *mm)
-
drivers/mtd/nand/raw/atmel/pmecc.c:192:40-192:44: static int atmel_pmecc_build_gf_tables(int mm, unsigned int poly,
-
drivers/vfio/vfio_iommu_type1.c:412:51-412:69: static int mm_lock_acct(struct task_struct *task, struct mm_struct *mm,
-
drivers/vfio/vfio_iommu_type1.c:512:57-512:75: static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
-
drivers/vfio/vfio_iommu_type1.c:552:27-552:45: static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
-
drivers/video/fbdev/pm2fb.c:252:30-252:45: static void pm2_mnp(u32 clk, unsigned char *mm, unsigned char *nn,
-
drivers/video/fbdev/pm2fb.c:281:31-281:46: static void pm2v_mnp(u32 clk, unsigned char *mm, unsigned char *nn,
-
fs/aio.c:664:48-664:66: static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
-
fs/aio.c:846:23-846:41: static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
-
fs/aio.c:890:15-890:33: void exit_aio(struct mm_struct *mm)
-
fs/binfmt_elf.c:1571:10-1571:28: struct mm_struct *mm)
-
fs/binfmt_elf.c:1615:53-1615:71: static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
-
fs/exec.c:976:22-976:40: static int exec_mmap(struct mm_struct *mm)
-
fs/exec.c:2097:19-2097:37: void set_dumpable(struct mm_struct *mm, int value)
-
fs/proc/array.c:417:56-417:74: static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm)
-
fs/proc/base.c:218:33-218:51: static ssize_t get_mm_proctitle(struct mm_struct *mm, char __user *buf,
-
fs/proc/base.c:255:31-255:49: static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
-
fs/proc/task_mmu.c:30:35-30:53: void task_mem(struct seq_file *m, struct mm_struct *mm)
-
fs/proc/task_mmu.c:82:26-82:44: unsigned long task_vsize(struct mm_struct *mm)
-
fs/proc/task_mmu.c:87:26-87:44: unsigned long task_statm(struct mm_struct *mm,
-
fs/userfaultfd.c:830:28-830:46: int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start,
-
fs/userfaultfd.c:859:33-859:51: void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
-
fs/userfaultfd.c:1272:43-1272:61: static __always_inline int validate_range(struct mm_struct *mm,
-
include/asm-generic/cacheflush.h:23:35-23:53: static inline void flush_cache_mm(struct mm_struct *mm)
-
include/asm-generic/cacheflush.h:29:39-29:57: static inline void flush_cache_dup_mm(struct mm_struct *mm)
-
include/asm-generic/hugetlb.h:61:35-61:53: static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
-
include/asm-generic/hugetlb.h:78:36-78:54: static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
-
include/asm-generic/hugetlb.h:86:45-86:63: static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
-
include/asm-generic/hugetlb.h:130:44-130:62: static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
-
include/asm-generic/pgalloc.h:19:45-19:63: static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
-
include/asm-generic/pgalloc.h:31:43-31:61: static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-
include/asm-generic/pgalloc.h:42:36-42:54: static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-
include/asm-generic/pgalloc.h:59:41-59:59: static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
-
include/asm-generic/pgalloc.h:99:29-99:47: static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
-
include/asm-generic/pgalloc.h:119:36-119:54: static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-
include/asm-generic/pgalloc.h:138:29-138:47: static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-
include/asm-generic/pgalloc.h:150:38-150:56: static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-
include/asm-generic/pgalloc.h:169:36-169:54: static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-
include/asm-generic/pgalloc.h:175:31-175:49: static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
-
include/asm-generic/pgalloc.h:182:29-182:47: static inline void pud_free(struct mm_struct *mm, pud_t *pud)
-
include/asm-generic/pgtable-nopmd.h:63:29-63:47: static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-
include/drm/drm_buddy.h:127:22-127:40: drm_buddy_block_size(struct drm_buddy *mm,
-
include/drm/drm_mm.h:276:39-276:60: static inline bool drm_mm_initialized(const struct drm_mm *mm)
-
include/drm/drm_mm.h:434:28-434:43: drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
-
include/drm/drm_mm.h:458:38-458:53: static inline int drm_mm_insert_node(struct drm_mm *mm,
-
include/drm/drm_mm.h:478:33-478:54: static inline bool drm_mm_clean(const struct drm_mm *mm)
-
include/drm/drm_mm.h:534:9-534:24: struct drm_mm *mm,
-
include/linux/hugetlb.h:837:42-837:60: static inline int is_hugepage_only_range(struct mm_struct *mm,
-
include/linux/hugetlb.h:962:9-962:27: struct mm_struct *mm, pte_t *pte)
-
include/linux/hugetlb.h:981:39-981:57: static inline void hugetlb_count_init(struct mm_struct *mm)
-
include/linux/hugetlb.h:986:46-986:64: static inline void hugetlb_count_add(long l, struct mm_struct *mm)
-
include/linux/hugetlb.h:991:46-991:64: static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
-
include/linux/hugetlb.h:1215:6-1215:24: struct mm_struct *mm, pte_t *pte)
-
include/linux/khugepaged.h:30:36-30:54: static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-
include/linux/khugepaged.h:36:36-36:54: static inline void khugepaged_exit(struct mm_struct *mm)
-
include/linux/ksm.h:24:28-24:46: static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-
include/linux/ksm.h:31:29-31:47: static inline void ksm_exit(struct mm_struct *mm)
-
include/linux/memcontrol.h:673:58-673:76: static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
-
include/linux/memcontrol.h:879:36-879:54: static inline bool mm_match_cgroup(struct mm_struct *mm,
-
include/linux/memcontrol.h:1098:41-1098:59: static inline void count_memcg_event_mm(struct mm_struct *mm,
-
include/linux/memcontrol.h:1138:42-1138:60: static inline void memcg_memory_event_mm(struct mm_struct *mm,
-
include/linux/mm.h:626:57-626:75: static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
-
include/linux/mm.h:2327:44-2327:62: static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
-
include/linux/mm.h:2334:35-2334:53: static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
-
include/linux/mm.h:2341:35-2341:53: static inline void inc_mm_counter(struct mm_struct *mm, int member)
-
include/linux/mm.h:2348:35-2348:53: static inline void dec_mm_counter(struct mm_struct *mm, int member)
-
include/linux/mm.h:2370:40-2370:58: static inline unsigned long get_mm_rss(struct mm_struct *mm)
-
include/linux/mm.h:2377:48-2377:66: static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
-
include/linux/mm.h:2382:47-2382:65: static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
-
include/linux/mm.h:2387:39-2387:57: static inline void update_hiwater_rss(struct mm_struct *mm)
-
include/linux/mm.h:2395:38-2395:56: static inline void update_hiwater_vm(struct mm_struct *mm)
-
include/linux/mm.h:2401:41-2401:59: static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
-
include/linux/mm.h:2407:7-2407:25: struct mm_struct *mm)
-
include/linux/mm.h:2418:32-2418:50: static inline void sync_mm_rss(struct mm_struct *mm)
-
include/linux/mm.h:2444:37-2444:55: static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
-
include/linux/mm.h:2474:35-2474:53: static inline void mm_inc_nr_puds(struct mm_struct *mm)
-
include/linux/mm.h:2481:35-2481:53: static inline void mm_dec_nr_puds(struct mm_struct *mm)
-
include/linux/mm.h:2502:35-2502:53: static inline void mm_inc_nr_pmds(struct mm_struct *mm)
-
include/linux/mm.h:2509:35-2509:53: static inline void mm_dec_nr_pmds(struct mm_struct *mm)
-
include/linux/mm.h:2518:43-2518:61: static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
-
include/linux/mm.h:2523:47-2523:71: static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
-
include/linux/mm.h:2528:35-2528:53: static inline void mm_inc_nr_ptes(struct mm_struct *mm)
-
include/linux/mm.h:2533:35-2533:53: static inline void mm_dec_nr_ptes(struct mm_struct *mm)
-
include/linux/mm.h:2554:32-2554:50: static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
-
include/linux/mm.h:2561:32-2561:50: static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
-
include/linux/mm.h:2568:32-2568:50: static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
-
include/linux/mm.h:2605:39-2605:57: static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
-
include/linux/mm.h:2696:39-2696:57: static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
-
include/linux/mm.h:2733:36-2733:54: static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
-
include/linux/mm.h:2762:39-2762:57: static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
-
include/linux/mm.h:2767:36-2767:54: static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
-
include/linux/mm.h:3097:35-3097:53: struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
-
include/linux/mm.h:3132:53-3132:71: static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
-
include/linux/mm_inline.h:454:43-454:61: static inline void init_tlb_flush_pending(struct mm_struct *mm)
-
include/linux/mm_inline.h:459:42-459:60: static inline void inc_tlb_flush_pending(struct mm_struct *mm)
-
include/linux/mm_inline.h:500:42-500:60: static inline void dec_tlb_flush_pending(struct mm_struct *mm)
-
include/linux/mm_inline.h:513:41-513:59: static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-
include/linux/mm_inline.h:526:40-526:58: static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
-
include/linux/mm_types.h:781:36-781:54: static inline void mm_init_cpumask(struct mm_struct *mm)
-
include/linux/mm_types.h:790:37-790:55: static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
-
include/linux/mm_types.h:810:36-810:54: static inline void lru_gen_init_mm(struct mm_struct *mm)
-
include/linux/mm_types.h:819:35-819:53: static inline void lru_gen_use_mm(struct mm_struct *mm)
-
include/linux/mm_types.h:869:3-869:21: struct mm_struct *mm, unsigned long addr)
-
include/linux/mm_types.h:876:37-876:55: static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
-
include/linux/mm_types.h:886:32-886:50: static inline void mm_init_cid(struct mm_struct *mm)
-
include/linux/mmap_lock.h:25:52-25:70: static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
-
include/linux/mmap_lock.h:32:55-32:73: static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
-
include/linux/mmap_lock.h:39:47-39:65: static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
-
include/linux/mmap_lock.h:63:35-63:53: static inline void mmap_init_lock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:68:36-68:54: static inline void mmap_write_lock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:75:43-75:61: static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
-
include/linux/mmap_lock.h:82:44-82:62: static inline int mmap_write_lock_killable(struct mm_struct *mm)
-
include/linux/mmap_lock.h:92:39-92:57: static inline bool mmap_write_trylock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:102:38-102:56: static inline void mmap_write_unlock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:108:41-108:59: static inline void mmap_write_downgrade(struct mm_struct *mm)
-
include/linux/mmap_lock.h:114:35-114:53: static inline void mmap_read_lock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:121:43-121:61: static inline int mmap_read_lock_killable(struct mm_struct *mm)
-
include/linux/mmap_lock.h:131:38-131:56: static inline bool mmap_read_trylock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:141:37-141:55: static inline void mmap_read_unlock(struct mm_struct *mm)
-
include/linux/mmap_lock.h:147:47-147:65: static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
-
include/linux/mmap_lock.h:153:39-153:57: static inline void mmap_assert_locked(struct mm_struct *mm)
-
include/linux/mmap_lock.h:159:45-159:63: static inline void mmap_assert_write_locked(struct mm_struct *mm)
-
include/linux/mmap_lock.h:165:42-165:60: static inline int mmap_lock_is_contended(struct mm_struct *mm)
-
include/linux/mmu_notifier.h:280:36-280:54: static inline int mm_has_notifiers(struct mm_struct *mm)
-
include/linux/mmu_notifier.h:288:54-288:72: mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
-
include/linux/mmu_notifier.h:411:41-411:59: static inline void mmu_notifier_release(struct mm_struct *mm)
-
include/linux/mmu_notifier.h:417:50-417:68: static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
-
include/linux/mmu_notifier.h:426:44-426:62: static inline int mmu_notifier_clear_young(struct mm_struct *mm,
-
include/linux/mmu_notifier.h:435:43-435:61: static inline int mmu_notifier_test_young(struct mm_struct *mm,
-
include/linux/mmu_notifier.h:443:44-443:62: static inline void mmu_notifier_change_pte(struct mm_struct *mm,
-
include/linux/mmu_notifier.h:494:50-494:68: static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
-
include/linux/mmu_notifier.h:501:52-501:70: static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
-
include/linux/mmu_notifier.h:506:55-506:73: static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
-
include/linux/mmu_notifier.h:516:9-516:27: struct mm_struct *mm,
-
include/linux/mmu_notifier.h:530:4-530:22: struct mm_struct *mm, unsigned long start,
-
include/linux/oom.h:93:53-93:71: static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
-
include/linux/page_table_check.h:49:47-49:65: static inline void page_table_check_pte_clear(struct mm_struct *mm,
-
include/linux/page_table_check.h:58:47-58:65: static inline void page_table_check_pmd_clear(struct mm_struct *mm,
-
include/linux/page_table_check.h:67:47-67:65: static inline void page_table_check_pud_clear(struct mm_struct *mm,
-
include/linux/page_table_check.h:76:45-76:63: static inline void page_table_check_pte_set(struct mm_struct *mm,
-
include/linux/page_table_check.h:86:45-86:63: static inline void page_table_check_pmd_set(struct mm_struct *mm,
-
include/linux/page_table_check.h:96:45-96:63: static inline void page_table_check_pud_set(struct mm_struct *mm,
-
include/linux/page_table_check.h:106:53-106:71: static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
-
include/linux/pagemap.h:1002:3-1002:21: struct mm_struct *mm, unsigned int flags)
-
include/linux/pgtable.h:151:30-151:48: static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
-
include/linux/pgtable.h:306:31-306:49: static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
-
include/linux/pgtable.h:445:50-445:68: static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
-
include/linux/pgtable.h:486:47-486:65: static inline void pte_clear_not_present_full(struct mm_struct *mm,
-
include/linux/pgtable.h:553:39-553:57: static inline void pudp_set_wrprotect(struct mm_struct *mm,
-
include/linux/pgtable.h:752:38-752:56: static inline void arch_do_swap_page(struct mm_struct *mm,
-
include/linux/pgtable.h:770:34-770:52: static inline int arch_unmap_one(struct mm_struct *mm,
-
include/linux/sched/coredump.h:29:32-29:50: static inline int get_dumpable(struct mm_struct *mm)
-
include/linux/sched/mm.h:35:27-35:45: static inline void mmgrab(struct mm_struct *mm)
-
include/linux/sched/mm.h:42:27-42:45: static inline void mmdrop(struct mm_struct *mm)
-
include/linux/sched/mm.h:76:33-76:51: static inline void mmdrop_sched(struct mm_struct *mm)
-
include/linux/sched/mm.h:98:26-98:44: static inline void mmget(struct mm_struct *mm)
-
include/linux/sched/mm.h:103:35-103:53: static inline bool mmget_not_zero(struct mm_struct *mm)
-
include/linux/sched/mm.h:421:60-421:78: static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
-
include/linux/sched/mm.h:455:34-455:52: static inline void mm_pasid_init(struct mm_struct *mm)
-
include/linux/sched/mm.h:461:33-461:51: static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid)
-
include/linux/sched/mm.h:466:34-466:52: static inline void mm_pasid_drop(struct mm_struct *mm)
-
include/trace/events/huge_memory.h:53:1-53:1: TRACE_EVENT(mm_khugepaged_scan_pmd,
-
include/trace/events/huge_memory.h:90:1-90:1: TRACE_EVENT(mm_collapse_huge_page,
-
include/trace/events/huge_memory.h:145:1-145:1: TRACE_EVENT(mm_collapse_huge_page_swapin,
-
include/trace/events/huge_memory.h:172:1-172:1: TRACE_EVENT(mm_khugepaged_scan_file,
-
include/trace/events/huge_memory.h:206:1-206:1: TRACE_EVENT(mm_khugepaged_collapse_file,
-
include/trace/events/kmem.h:346:1-346:1: TRACE_EVENT(rss_stat,
-
include/trace/events/mmap.h:98:1-98:1: TRACE_EVENT(exit_mmap,
-
include/trace/events/mmap_lock.h:16:1-16:1: DECLARE_EVENT_CLASS(mmap_lock,
-
include/trace/events/mmap_lock.h:49:1-49:1: DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking);
-
include/trace/events/mmap_lock.h:50:1-50:1: DEFINE_MMAP_LOCK_EVENT(mmap_lock_released);
-
include/trace/events/mmap_lock.h:52:1-52:1: TRACE_EVENT_FN(mmap_lock_acquire_returned,
-
include/trace/events/xen.h:283:1-283:1: DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start);
-
include/trace/events/xen.h:284:1-284:1: DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit);
-
include/trace/events/xen.h:286:1-286:1: TRACE_EVENT(xen_mmu_alloc_ptpage,
-
include/trace/events/xen.h:336:1-336:1: DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
-
include/trace/events/xen.h:337:1-337:1: DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
-
include/trace/events/xen.h:349:1-349:1: TRACE_EVENT(xen_mmu_flush_tlb_multi,
-
kernel/audit.c:2194:6-2194:24: struct mm_struct *mm)
-
kernel/bpf/mmap_unlock_work.h:49:74-49:92: static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)
-
kernel/cgroup/cpuset.c:1883:31-1883:49: static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
-
kernel/events/core.c:7457:34-7457:52: static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
-
kernel/events/core.c:10592:8-10592:26: struct mm_struct *mm,
-
kernel/events/uprobes.c:282:45-282:63: delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
-
kernel/events/uprobes.c:292:54-292:72: static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
-
kernel/events/uprobes.c:317:58-317:76: static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
-
kernel/events/uprobes.c:351:41-351:59: find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
-
kernel/events/uprobes.c:364:18-364:36: __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
-
kernel/events/uprobes.c:404:5-404:23: struct mm_struct *mm, short d)
-
kernel/events/uprobes.c:413:50-413:68: static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
-
kernel/events/uprobes.c:460:54-460:72: int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
-
kernel/events/uprobes.c:571:50-571:68: int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
-
kernel/events/uprobes.c:586:44-586:62: set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
-
kernel/events/uprobes.c:835:5-835:23: struct mm_struct *mm, unsigned long vaddr)
-
kernel/events/uprobes.c:869:36-869:54: enum uprobe_filter_ctx ctx, struct mm_struct *mm)
-
kernel/events/uprobes.c:875:33-875:51: enum uprobe_filter_ctx ctx, struct mm_struct *mm)
-
kernel/events/uprobes.c:892:43-892:61: install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
-
kernel/events/uprobes.c:920:42-920:60: remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
-
kernel/events/uprobes.c:1234:50-1234:68: static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
-
kernel/events/uprobes.c:1439:24-1439:42: static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
-
kernel/events/uprobes.c:1543:25-1543:43: void uprobe_clear_state(struct mm_struct *mm)
-
kernel/events/uprobes.c:1987:32-1987:50: static void mmf_recalc_uprobes(struct mm_struct *mm)
-
kernel/events/uprobes.c:2008:28-2008:46: static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
-
kernel/exit.c:442:27-442:45: void mm_update_next_owner(struct mm_struct *mm)
-
kernel/fork.c:454:38-454:56: struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
-
kernel/fork.c:566:29-566:47: static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
-
kernel/fork.c:581:38-581:56: static __latent_entropy int dup_mmap(struct mm_struct *mm,
-
kernel/fork.c:724:32-724:50: static inline int mm_alloc_pgd(struct mm_struct *mm)
-
kernel/fork.c:732:32-732:50: static inline void mm_free_pgd(struct mm_struct *mm)
-
kernel/fork.c:748:22-748:40: static void check_mm(struct mm_struct *mm)
-
kernel/fork.c:780:15-780:33: void __mmdrop(struct mm_struct *mm)
-
kernel/fork.c:808:26-808:44: static void mmdrop_async(struct mm_struct *mm)
-
kernel/fork.c:1085:25-1085:43: static void mm_init_aio(struct mm_struct *mm)
-
kernel/fork.c:1093:44-1093:62: static __always_inline void mm_clear_owner(struct mm_struct *mm,
-
kernel/fork.c:1102:27-1102:45: static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
-
kernel/fork.c:1109:35-1109:53: static void mm_init_uprobes_state(struct mm_struct *mm)
-
kernel/fork.c:1116:34-1116:52: static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
-
kernel/fork.c:1196:28-1196:46: static inline void __mmput(struct mm_struct *mm)
-
kernel/fork.c:1221:12-1221:30: void mmput(struct mm_struct *mm)
-
kernel/fork.c:1239:18-1239:36: void mmput_async(struct mm_struct *mm)
-
kernel/fork.c:1260:21-1260:39: int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
-
kernel/fork.c:1297:25-1297:43: int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
-
kernel/fork.c:1349:30-1349:48: struct file *get_mm_exe_file(struct mm_struct *mm)
-
kernel/fork.c:1475:49-1475:67: static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
-
kernel/fork.c:1508:47-1508:65: void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
-
kernel/fork.c:1514:47-1514:65: void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
-
kernel/kthread.c:1410:21-1410:39: void kthread_use_mm(struct mm_struct *mm)
-
kernel/kthread.c:1455:23-1455:41: void kthread_unuse_mm(struct mm_struct *mm)
-
kernel/sched/membarrier.c:217:27-217:45: void membarrier_exec_mmap(struct mm_struct *mm)
-
kernel/sched/membarrier.c:426:44-426:62: static int sync_runqueues_membarrier_state(struct mm_struct *mm)
-
kernel/sched/sched.h:3252:32-3252:50: static inline int __mm_cid_get(struct mm_struct *mm)
-
kernel/sched/sched.h:3265:31-3265:49: static inline void mm_cid_put(struct mm_struct *mm, int cid)
-
kernel/sched/sched.h:3275:30-3275:48: static inline int mm_cid_get(struct mm_struct *mm)
-
kernel/sys.c:1872:34-1872:52: static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
-
kernel/sys.c:2079:27-2079:45: static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
-
kernel/trace/trace_output.c:384:51-384:69: static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
-
kernel/trace/trace_uprobe.c:1199:58-1199:76: __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
-
kernel/trace/trace_uprobe.c:1321:33-1321:51: enum uprobe_filter_ctx ctx, struct mm_struct *mm)
-
mm/damon/ops-common.c:40:35-40:53: void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
-
mm/damon/ops-common.c:65:35-65:53: void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
-
mm/damon/vaddr.c:116:37-116:55: static int __damon_va_three_regions(struct mm_struct *mm,
-
mm/damon/vaddr.c:333:45-333:63: static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
-
mm/damon/vaddr.c:389:28-389:46: static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
-
mm/damon/vaddr.c:400:45-400:63: static void __damon_va_prepare_access_check(struct mm_struct *mm,
-
mm/damon/vaddr.c:524:28-524:46: static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
-
mm/damon/vaddr.c:544:37-544:55: static void __damon_va_check_access(struct mm_struct *mm,
-
mm/debug.c:155:14-155:38: void dump_mm(const struct mm_struct *mm)
-
mm/filemap.c:1713:49-1713:67: bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
-
mm/gup.c:829:26-829:44: static int get_gate_page(struct mm_struct *mm, unsigned long address,
-
mm/gup.c:1076:30-1076:48: static long __get_user_pages(struct mm_struct *mm,
-
mm/gup.c:1255:22-1255:40: int fixup_user_fault(struct mm_struct *mm,
-
mm/gup.c:1340:53-1340:71: static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
-
mm/gup.c:2067:35-2067:53: static long __gup_longterm_locked(struct mm_struct *mm,
-
mm/gup.c:2219:28-2219:46: long get_user_pages_remote(struct mm_struct *mm,
-
mm/gup.c:3112:28-3112:46: long pin_user_pages_remote(struct mm_struct *mm,
-
mm/huge_memory.c:191:36-191:54: struct page *mm_get_huge_zero_page(struct mm_struct *mm)
-
mm/huge_memory.c:205:28-205:46: void mm_put_huge_zero_page(struct mm_struct *mm)
-
mm/huge_memory.c:768:51-768:69: static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
-
mm/huge_memory.c:1664:40-1664:58: static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
-
mm/hugetlb.c:4701:47-4701:65: void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
-
mm/hugetlb.c:5420:31-5420:49: static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:5476:30-5476:48: static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:5735:50-5735:68: static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
-
mm/hugetlb.c:5748:35-5748:53: static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
-
mm/hugetlb.c:5982:26-5982:44: vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:6431:26-6431:44: long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:7051:23-7051:41: pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:7109:22-7109:40: int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:7154:23-7154:41: pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/hugetlb.c:7192:24-7192:42: pte_t *huge_pte_offset(struct mm_struct *mm,
-
mm/khugepaged.c:411:44-411:62: static inline int hpage_collapse_test_exit(struct mm_struct *mm)
-
mm/khugepaged.c:416:25-416:43: void __khugepaged_enter(struct mm_struct *mm)
-
mm/khugepaged.c:460:24-460:42: void __khugepaged_exit(struct mm_struct *mm)
-
mm/khugepaged.c:830:36-830:54: static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
-
mm/khugepaged.c:865:36-865:54: static int find_pmd_or_thp_or_none(struct mm_struct *mm,
-
mm/khugepaged.c:894:34-894:52: static int check_pmd_still_valid(struct mm_struct *mm,
-
mm/khugepaged.c:916:40-916:58: static int __collapse_huge_page_swapin(struct mm_struct *mm,
-
mm/khugepaged.c:969:52-969:70: static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
-
mm/khugepaged.c:984:31-984:49: static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
-
mm/khugepaged.c:1139:36-1139:54: static int hpage_collapse_scan_pmd(struct mm_struct *mm,
-
mm/khugepaged.c:1354:43-1354:61: static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
-
mm/khugepaged.c:1410:35-1410:53: static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/khugepaged.c:1449:29-1449:47: int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
-
mm/khugepaged.c:1770:26-1770:44: static int collapse_file(struct mm_struct *mm, unsigned long addr,
-
mm/khugepaged.c:2142:37-2142:55: static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
-
mm/ksm.c:418:34-418:52: static inline bool ksm_test_exit(struct mm_struct *mm)
-
mm/ksm.c:515:50-515:68: static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
-
mm/ksm.c:2529:17-2529:35: int __ksm_enter(struct mm_struct *mm)
-
mm/ksm.c:2571:17-2571:35: void __ksm_exit(struct mm_struct *mm)
-
mm/madvise.c:1219:23-1219:41: int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
-
mm/madvise.c:1301:27-1301:45: int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
-
mm/madvise.c:1397:16-1397:34: int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
-
mm/memcontrol.c:1020:43-1020:61: struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
-
mm/memcontrol.c:6015:49-6015:67: static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
-
mm/memcontrol.c:6029:36-6029:54: static int mem_cgroup_precharge_mc(struct mm_struct *mm)
-
mm/memcontrol.c:6975:46-6975:64: int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
-
mm/memcontrol.c:6999:57-6999:75: int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
-
mm/memory.c:165:24-165:42: void mm_trace_rss_stat(struct mm_struct *mm, int member)
-
mm/memory.c:393:18-393:36: void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
-
mm/memory.c:419:17-419:35: int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
-
mm/memory.c:454:35-454:53: static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
-
mm/memory.c:1754:27-1754:45: static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
-
mm/memory.c:1776:25-1776:43: pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
-
mm/memory.c:2323:28-2323:46: static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
-
mm/memory.c:2349:35-2349:53: static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
-
mm/memory.c:2372:35-2372:53: static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
-
mm/memory.c:2394:35-2394:53: static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
-
mm/memory.c:2550:31-2550:49: static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
-
mm/memory.c:2593:31-2593:49: static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
-
mm/memory.c:2631:31-2631:49: static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
-
mm/memory.c:2667:31-2667:49: static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
-
mm/memory.c:2703:34-2703:52: static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
-
mm/memory.c:2744:25-2744:43: int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
-
mm/memory.c:2758:34-2758:52: int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
-
mm/memory.c:5224:17-5224:35: int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
-
mm/memory.c:5247:17-5247:35: int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
-
mm/memory.c:5270:17-5270:35: int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
-
mm/memory.c:5311:16-5311:34: int follow_pte(struct mm_struct *mm, unsigned long address,
-
mm/memory.c:5480:24-5480:42: int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
-
mm/memory.c:5555:22-5555:40: int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-
mm/mempolicy.c:381:21-381:39: void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
-
mm/mempolicy.c:734:19-734:37: queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
-
mm/mempolicy.c:794:24-794:42: static int mbind_range(struct mm_struct *mm, unsigned long start,
-
mm/mempolicy.c:913:24-913:42: static int lookup_node(struct mm_struct *mm, unsigned long addr)
-
mm/mempolicy.c:1060:28-1060:46: static int migrate_to_node(struct mm_struct *mm, int source, int dest,
-
mm/mempolicy.c:1101:22-1101:40: int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
-
mm/migrate.c:305:29-305:47: void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
-
mm/migrate.c:326:27-326:45: void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
-
mm/migrate.c:374:31-374:49: void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
-
mm/migrate.c:2080:34-2080:52: static int do_move_pages_to_node(struct mm_struct *mm,
-
mm/migrate.c:2105:35-2105:53: static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
-
mm/migrate.c:2174:40-2174:58: static int move_pages_and_store_status(struct mm_struct *mm, int node,
-
mm/migrate.c:2204:26-2204:44: static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
-
mm/migrate.c:2302:33-2302:51: static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
-
mm/migrate.c:2365:26-2365:44: static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
-
mm/mlock.c:525:47-525:65: static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
-
mm/mmap.c:303:28-303:46: static void validate_mm_mt(struct mm_struct *mm)
-
mm/mmap.c:340:25-340:43: static void validate_mm(struct mm_struct *mm)
-
mm/mmap.c:407:44-407:62: static unsigned long count_vma_pages_range(struct mm_struct *mm,
-
mm/mmap.c:435:21-435:39: static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
-
mm/mmap.c:551:31-551:49: struct vma_iterator *vmi, struct mm_struct *mm)
-
mm/mmap.c:886:60-886:78: struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
-
mm/mmap.c:1140:24-1140:42: int mlock_future_check(struct mm_struct *mm, unsigned long flags,
-
mm/mmap.c:1764:46-1764:64: struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
-
mm/mmap.c:1783:33-1783:51: struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
-
mm/mmap.c:1806:15-1806:33: find_vma_prev(struct mm_struct *mm, unsigned long addr,
-
mm/mmap.c:2079:17-2079:35: find_extend_vma(struct mm_struct *mm, unsigned long addr)
-
mm/mmap.c:2109:30-2109:48: static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
-
mm/mmap.c:2133:26-2133:44: static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
-
mm/mmap.c:2272:7-2272:25: struct mm_struct *mm, unsigned long start,
-
mm/mmap.c:2430:45-2430:63: int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
-
mm/mmap.c:2461:15-2461:33: int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
-
mm/mmap.c:3005:16-3005:34: void exit_mmap(struct mm_struct *mm)
-
mm/mmap.c:3069:22-3069:40: int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
-
mm/mmap.c:3203:20-3203:38: bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
-
mm/mmap.c:3228:22-3228:40: void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
-
mm/mmap.c:3324:2-3324:20: struct mm_struct *mm,
-
mm/mmap.c:3382:2-3382:20: struct mm_struct *mm,
-
mm/mmap.c:3390:29-3390:47: int install_special_mapping(struct mm_struct *mm,
-
mm/mmap.c:3403:30-3403:48: static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
-
mm/mmap.c:3426:29-3426:47: static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
-
mm/mmap.c:3481:23-3481:41: int mm_take_all_locks(struct mm_struct *mm)
-
mm/mmap.c:3564:24-3564:42: void mm_drop_all_locks(struct mm_struct *mm)
-
mm/mmap_lock.c:199:38-199:56: static const char *get_mm_memcg_path(struct mm_struct *mm)
-
mm/mmap_lock.c:228:41-228:59: void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write)
-
mm/mmap_lock.c:234:44-234:62: void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
-
mm/mmap_lock.c:241:36-241:54: void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write)
-
mm/mmu_gather.c:302:54-302:72: static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-
mm/mmu_gather.c:335:45-335:63: void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
-
mm/mmu_gather.c:351:52-351:70: void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
-
mm/mmu_notifier.c:262:9-262:27: struct mm_struct *mm)
-
mm/mmu_notifier.c:300:9-300:27: struct mm_struct *mm)
-
mm/mmu_notifier.c:348:29-348:47: void __mmu_notifier_release(struct mm_struct *mm)
-
mm/mmu_notifier.c:365:38-365:56: int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
-
mm/mmu_notifier.c:385:32-385:50: int __mmu_notifier_clear_young(struct mm_struct *mm,
-
mm/mmu_notifier.c:405:31-405:49: int __mmu_notifier_test_young(struct mm_struct *mm,
-
mm/mmu_notifier.c:427:32-427:50: void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
-
mm/mmu_notifier.c:607:38-607:56: void __mmu_notifier_invalidate_range(struct mm_struct *mm,
-
mm/mmu_notifier.c:630:8-630:26: struct mm_struct *mm)
-
mm/mmu_notifier.c:723:6-723:24: struct mm_struct *mm)
-
mm/mmu_notifier.c:735:23-735:41: find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
-
mm/mmu_notifier.c:775:11-775:29: struct mm_struct *mm)
-
mm/mmu_notifier.c:803:43-803:61: void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
-
mm/mmu_notifier.c:821:9-821:27: struct mm_struct *mm)
-
mm/mmu_notifier.c:914:46-914:64: struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
-
mm/mmu_notifier.c:996:6-996:24: struct mm_struct *mm, unsigned long start,
-
mm/mmu_notifier.c:1018:46-1018:64: struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
-
mm/mremap.c:35:27-35:45: static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
-
mm/mremap.c:56:27-56:45: static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
-
mm/mremap.c:72:29-72:47: static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/mremap.c:86:29-86:47: static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
-
mm/oom_kill.c:490:47-490:65: bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
-
mm/oom_kill.c:512:32-512:50: static bool __oom_reap_task_mm(struct mm_struct *mm)
-
mm/oom_kill.c:568:55-568:73: static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
-
mm/page_table_check.c:61:36-61:54: static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
-
mm/page_table_check.c:96:34-96:52: static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
-
mm/page_table_check.c:148:35-148:53: void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
-
mm/page_table_check.c:161:35-161:53: void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
-
mm/page_table_check.c:174:35-174:53: void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
-
mm/page_table_check.c:187:33-187:51: void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
-
mm/page_table_check.c:202:33-202:51: void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
-
mm/page_table_check.c:217:33-217:51: void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
-
mm/page_table_check.c:232:41-232:59: void __page_table_check_pte_clear_range(struct mm_struct *mm,
-
mm/pagewalk.c:427:21-427:39: int walk_page_range(struct mm_struct *mm, unsigned long start,
-
mm/pagewalk.c:499:27-499:45: int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
-
mm/pgalloc-track.h:6:38-6:56: static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd,
-
mm/pgalloc-track.h:19:38-19:56: static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d,
-
mm/pgalloc-track.h:32:38-32:56: static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud,
-
mm/pgtable-generic.c:162:33-162:51: void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
-
mm/pgtable-generic.c:178:39-178:57: pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
-
mm/process_vm_access.c:74:9-74:27: struct mm_struct *mm,
-
mm/ptdump.c:151:47-151:65: void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
-
mm/rmap.c:644:39-644:57: static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
-
mm/rmap.c:687:32-687:50: static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
-
mm/rmap.c:717:32-717:50: void flush_tlb_batched_pending(struct mm_struct *mm)
-
mm/rmap.c:774:20-774:38: pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
-
mm/rmap.c:2272:3-2272:21: struct mm_struct *mm, unsigned long address, void *owner)
-
mm/rmap.c:2320:33-2320:51: int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
-
mm/shmem.c:472:6-472:24: struct mm_struct *mm, unsigned long vm_flags)
-
mm/swapfile.c:1989:21-1989:39: static int unuse_mm(struct mm_struct *mm, unsigned int type)
-
mm/userfaultfd.c:287:28-287:46: static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
-
mm/util.c:470:25-470:43: int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
-
mm/util.c:513:23-513:41: int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
-
mm/util.c:933:24-933:42: int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
-
mm/vmscan.c:3352:21-3352:39: void lru_gen_add_mm(struct mm_struct *mm)
-
mm/vmscan.c:3378:21-3378:39: void lru_gen_del_mm(struct mm_struct *mm)
-
mm/vmscan.c:3422:25-3422:43: void lru_gen_migrate_mm(struct mm_struct *mm)
-
mm/vmscan.c:3476:28-3476:46: static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
-
mm/vmscan.c:4218:44-4218:62: static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
-
mm/z3fold.c:1630:4-1630:23: enum zpool_mapmode mm)
-
mm/zbud.c:566:4-566:23: enum zpool_mapmode mm)
-
mm/zsmalloc.c:453:4-453:23: enum zpool_mapmode mm)
-
mm/zsmalloc.c:1328:4-1328:20: enum zs_mapmode mm)
-
security/commoncap.c:1400:26-1400:44: int cap_vm_enough_memory(struct mm_struct *mm, long pages)
-
security/security.c:860:34-860:52: int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
-
security/selinux/hooks.c:2197:37-2197:55: static int selinux_vm_enough_memory(struct mm_struct *mm, long pages)
-
virt/kvm/kvm_main.c:522:12-522:30: struct mm_struct *mm,
-
virt/kvm/kvm_main.c:687:6-687:24: struct mm_struct *mm,
-
virt/kvm/kvm_main.c:838:12-838:30: struct mm_struct *mm,
-
virt/kvm/kvm_main.c:848:6-848:24: struct mm_struct *mm,
-
virt/kvm/kvm_main.c:871:12-871:30: struct mm_struct *mm,
-
virt/kvm/kvm_main.c:881:10-881:28: struct mm_struct *mm)
variable
Defined...
-
arch/x86/entry/vdso/vma.c:113:2-113:31: struct mm_struct *mm = task->mm;
-
arch/x86/entry/vdso/vma.c:225:2-225:34: struct mm_struct *mm = current->mm;
-
arch/x86/entry/vdso/vma.c:332:2-332:34: struct mm_struct *mm = current->mm;
-
arch/x86/kernel/ldt.c:138:2-138:25: struct mm_struct *mm = __mm;
-
arch/x86/kernel/ldt.c:502:2-502:34: struct mm_struct *mm = current->mm;
-
arch/x86/kernel/ldt.c:578:2-578:34: struct mm_struct *mm = current->mm;
-
arch/x86/kernel/sys_x86_64.c:126:2-126:34: struct mm_struct *mm = current->mm;
-
arch/x86/kernel/sys_x86_64.c:166:2-166:34: struct mm_struct *mm = current->mm;
-
arch/x86/mm/fault.c:872:2-872:34: struct mm_struct *mm = current->mm;
-
arch/x86/mm/fault.c:1236:2-1236:20: struct mm_struct *mm;
-
arch/x86/mm/hugetlbpage.c:117:2-117:34: struct mm_struct *mm = current->mm;
-
arch/x86/mm/mmap.c:155:2-155:34: struct mm_struct *mm = current->mm;
-
arch/x86/mm/tlb.c:687:2-687:25: struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
-
arch/x86/xen/mmu_pv.c:904:2-904:25: struct mm_struct *mm = info;
-
drivers/acpi/acpica/evrgnini.c:41:2-41:27: struct acpi_mem_mapping *mm;
-
drivers/acpi/acpica/exregion.c:44:2-44:42: struct acpi_mem_mapping *mm = mem_info->cur_mm;
-
drivers/android/binder_alloc.c:188:2-188:25: struct mm_struct *mm = NULL;
-
drivers/android/binder_alloc.c:990:2-990:25: struct mm_struct *mm = NULL;
-
drivers/dma-buf/dma-resv.c:741:2-741:34: struct mm_struct *mm = mm_alloc();
-
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:2559:2-2559:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c:167:2-167:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c:657:2-657:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c:273:2-273:31: struct drm_buddy *mm = &mgr->mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c:393:2-393:31: struct drm_buddy *mm = &mgr->mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c:582:2-582:31: struct drm_buddy *mm = &mgr->mm;
-
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c:802:2-802:31: struct drm_buddy *mm = &mgr->mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c:1020:2-1020:25: struct mm_struct *mm = NULL;
-
drivers/gpu/drm/amd/amdkfd/kfd_events.c:1142:2-1142:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c:893:2-893:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_process.c:106:2-106:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:524:2-524:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:1692:2-1692:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2186:2-2186:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:2786:2-2786:25: struct mm_struct *mm = NULL;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3323:2-3323:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3687:2-3687:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3903:2-3903:20: struct mm_struct *mm;
-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c:3991:2-3991:34: struct mm_struct *mm = current->mm;
-
drivers/gpu/drm/drm_mm.c:167:2-167:33: struct drm_mm *mm = hole_node->mm;
-
drivers/gpu/drm/drm_mm.c:268:2-268:28: struct drm_mm *mm = node->mm;
-
drivers/gpu/drm/drm_mm.c:629:2-629:28: struct drm_mm *mm = node->mm;
-
drivers/gpu/drm/drm_mm.c:662:2-662:27: struct drm_mm *mm = old->mm;
-
drivers/gpu/drm/drm_mm.c:783:2-783:28: struct drm_mm *mm = scan->mm;
-
drivers/gpu/drm/drm_mm.c:914:2-914:28: struct drm_mm *mm = scan->mm;
-
drivers/gpu/drm/i915/gem/i915_gem_mman.c:106:3-106:35: struct mm_struct *mm = current->mm;
-
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c:659:2-659:53: struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
-
drivers/gpu/drm/i915/gvt/cmd_parser.c:862:2-862:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/cmd_parser.c:1818:2-1819:39: struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
-
drivers/gpu/drm/i915/gvt/cmd_parser.c:1901:2-1902:39: struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
-
drivers/gpu/drm/i915/gvt/gtt.c:1880:2-1880:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:1913:2-1913:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:1954:2-1954:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:1999:2-1999:29: struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
-
drivers/gpu/drm/i915/gvt/gtt.c:2065:2-2065:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2547:2-2547:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2662:2-2662:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2699:2-2699:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2724:2-2724:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2820:2-2820:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/gtt.c:2907:2-2907:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/handlers.c:1467:2-1467:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/gvt/scheduler.c:437:2-437:39: struct intel_vgpu_mm *mm = workload->shadow_mm;
-
drivers/gpu/drm/i915/gvt/scheduler.c:1526:3-1526:29: struct intel_vgpu_mm *m, *mm;
-
drivers/gpu/drm/i915/gvt/scheduler.c:1583:2-1583:24: struct intel_vgpu_mm *mm;
-
drivers/gpu/drm/i915/i915_scatterlist.c:170:2-170:35: struct drm_buddy *mm = bman_res->mm;
-
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c:41:2-41:32: struct drm_buddy *mm = &bman->mm;
-
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c:183:2-183:32: struct drm_buddy *mm = &bman->mm;
-
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c:221:2-221:32: struct drm_buddy *mm = &bman->mm;
-
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c:360:2-360:32: struct drm_buddy *mm = &bman->mm;
-
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c:398:2-398:32: struct drm_buddy *mm = &bman->mm;
-
drivers/gpu/drm/i915/selftests/intel_memory_region.c:374:2-374:20: struct drm_buddy *mm;
-
drivers/gpu/drm/i915/selftests/intel_memory_region.c:459:2-459:20: struct drm_buddy *mm;
-
drivers/gpu/drm/i915/selftests/intel_memory_region.c:538:2-538:35: struct drm_buddy *mm = bman_res->mm;
-
drivers/gpu/drm/nouveau/nouveau_svm.c:117:2-117:20: struct mm_struct *mm;
-
drivers/gpu/drm/nouveau/nouveau_svm.c:595:2-595:40: struct mm_struct *mm = svmm->notifier.mm;
-
drivers/gpu/drm/nouveau/nouveau_svm.c:667:2-667:40: struct mm_struct *mm = svmm->notifier.mm;
-
drivers/gpu/drm/nouveau/nouveau_svm.c:782:3-782:21: struct mm_struct *mm;
-
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c:116:2-116:18: struct nvkm_mm *mm;
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:318:2-318:41: struct nvkm_mm *mm = &device->fb->ram->vram;
-
drivers/gpu/drm/tests/drm_buddy_test.c:331:2-331:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_buddy_test.c:409:2-409:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_buddy_test.c:501:2-501:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_buddy_test.c:596:2-596:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_buddy_test.c:641:2-641:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_buddy_test.c:700:2-700:19: struct drm_buddy mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:197:2-197:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:250:2-250:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:347:2-347:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:531:2-531:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:815:2-815:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:900:2-900:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:993:2-993:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1057:2-1057:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1102:2-1102:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1395:2-1395:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1489:2-1489:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1576:2-1576:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1683:2-1683:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1769:2-1769:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:1850:2-1850:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:2051:2-2051:16: struct drm_mm mm;
-
drivers/gpu/drm/tests/drm_mm_test.c:2135:2-2135:16: struct drm_mm mm;
-
drivers/gpu/drm/ttm/ttm_range_manager.c:65:2-65:29: struct drm_mm *mm = &rman->mm;
-
drivers/gpu/drm/ttm/ttm_range_manager.c:219:2-219:29: struct drm_mm *mm = &rman->mm;
-
drivers/infiniband/core/umem.c:156:2-156:20: struct mm_struct *mm;
-
drivers/infiniband/core/uverbs_main.c:818:3-818:26: struct mm_struct *mm = NULL;
-
drivers/infiniband/hw/cxgb4/cq.c:1009:2-1009:24: struct c4iw_mm_entry *mm, *mm2;
-
drivers/infiniband/hw/cxgb4/iw_cxgb4.h:546:2-546:24: struct c4iw_mm_entry *mm;
-
drivers/infiniband/hw/cxgb4/provider.c:66:2-66:24: struct c4iw_mm_entry *mm, *tmp;
-
drivers/infiniband/hw/cxgb4/provider.c:84:2-84:29: struct c4iw_mm_entry *mm = NULL;
-
drivers/infiniband/hw/cxgb4/provider.c:131:2-131:24: struct c4iw_mm_entry *mm;
-
drivers/infiniband/hw/hfi1/user_exp_rcv.c:141:2-141:20: struct mm_struct *mm;
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:196:2-196:20: struct ocrdma_mm *mm;
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:214:2-214:20: struct ocrdma_mm *mm, *tmp;
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:232:2-232:20: struct ocrdma_mm *mm;
-
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c:522:2-522:20: struct ocrdma_mm *mm, *tmp;
-
drivers/infiniband/hw/usnic/usnic_uiom.c:100:2-100:20: struct mm_struct *mm;
-
drivers/iommu/amd/iommu_v2.c:475:2-475:20: struct mm_struct *mm;
-
drivers/iommu/amd/iommu_v2.c:603:2-603:20: struct mm_struct *mm;
-
drivers/iommu/intel/svm.c:381:2-381:20: struct mm_struct *mm;
-
drivers/iommu/intel/svm.c:843:2-843:33: struct mm_struct *mm = domain->mm;
-
drivers/iommu/iommu-sva.c:192:2-192:25: struct mm_struct *mm = data;
-
drivers/media/dvb-frontends/drxd_hard.c:258:2-261:2: u8 mm[6] = { reg & 0xff, (reg >> 16) & 0xff,
-
drivers/media/dvb-frontends/drxd_hard.c:271:2-275:2: u8 mm[8] = { reg & 0xff, (reg >> 16) & 0xff,
-
drivers/media/dvb-frontends/drxd_hard.c:286:2-288:2: u8 mm[CHUNK_SIZE + 4] = { reg & 0xff, (reg >> 16) & 0xff,
-
drivers/media/dvb-frontends/drxk_hard.c:354:2-354:37: u8 adr = state->demod_address, mm[6], len;
-
drivers/media/dvb-frontends/drxk_hard.c:383:2-383:37: u8 adr = state->demod_address, mm[8], len;
-
drivers/misc/sgi-gru/grufault.c:68:2-68:34: struct mm_struct *mm = current->mm;
-
drivers/misc/sgi-gru/grufault.c:85:2-85:34: struct mm_struct *mm = current->mm;
-
drivers/misc/sgi-gru/grufault.c:255:2-255:30: struct mm_struct *mm = gts->ts_mm;
-
drivers/net/arcnet/com20020-pci.c:123:2-123:35: struct com20020_pci_channel_map *mm;
-
drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c:199:2-199:34: struct mm_struct *mm = current->mm;
-
drivers/net/ethernet/mscc/ocelot_mm.c:55:2-55:47: struct ocelot_mm_state *mm = &ocelot->mm[port];
-
drivers/net/ethernet/mscc/ocelot_mm.c:102:2-102:26: struct ocelot_mm_state *mm;
-
drivers/net/ethernet/mscc/ocelot_mm.c:153:2-153:26: struct ocelot_mm_state *mm;
-
drivers/net/ethernet/mscc/ocelot_mm.c:189:2-189:26: struct ocelot_mm_state *mm;
-
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c:1403:2-1403:18: u8 af = 0, mm = 0;
-
drivers/pcmcia/rsrc_nonstatic.c:538:2-538:26: struct resource_map *m, mm;
-
drivers/usb/gadget/legacy/inode.c:465:2-465:31: struct mm_struct *mm = priv->mm;
-
drivers/vfio/pci/vfio_pci_core.c:1556:3-1556:26: struct mm_struct *mm = NULL;
-
drivers/vfio/vfio_iommu_type1.c:427:2-427:20: struct mm_struct *mm;
-
drivers/vfio/vfio_iommu_type1.c:615:2-615:34: struct mm_struct *mm = current->mm;
-
drivers/vfio/vfio_iommu_type1.c:746:2-746:20: struct mm_struct *mm;
-
drivers/vfio/vfio_iommu_type1.c:1520:2-1520:34: struct mm_struct *mm = current->mm;
-
drivers/vfio/vfio_iommu_type1.c:3058:2-3058:20: struct mm_struct *mm;
-
drivers/xen/privcmd.c:254:2-254:34: struct mm_struct *mm = current->mm;
-
drivers/xen/privcmd.c:448:2-448:34: struct mm_struct *mm = current->mm;
-
drivers/xen/privcmd.c:725:2-725:34: struct mm_struct *mm = current->mm;
-
fs/aio.c:357:2-357:30: struct mm_struct *mm = vma->vm_mm;
-
fs/aio.c:494:2-494:34: struct mm_struct *mm = current->mm;
-
fs/aio.c:733:2-733:34: struct mm_struct *mm = current->mm;
-
fs/aio.c:1077:2-1077:34: struct mm_struct *mm = current->mm;
-
fs/binfmt_elf.c:179:2-179:34: struct mm_struct *mm = current->mm;
-
fs/binfmt_elf.c:843:2-843:20: struct mm_struct *mm;
-
fs/coredump.c:521:2-521:34: struct mm_struct *mm = current->mm;
-
fs/coredump.c:1145:2-1145:34: struct mm_struct *mm = current->mm;
-
fs/exec.c:188:2-188:34: struct mm_struct *mm = current->mm;
-
fs/exec.c:252:2-252:31: struct mm_struct *mm = bprm->mm;
-
fs/exec.c:366:2-366:25: struct mm_struct *mm = NULL;
-
fs/exec.c:680:2-680:30: struct mm_struct *mm = vma->vm_mm;
-
fs/exec.c:749:2-749:34: struct mm_struct *mm = current->mm;
-
fs/exec.c:2083:2-2083:34: struct mm_struct *mm = current->mm;
-
fs/hugetlbfs/inode.c:239:2-239:34: struct mm_struct *mm = current->mm;
-
fs/hugetlbfs/inode.c:775:2-775:34: struct mm_struct *mm = current->mm;
-
fs/proc/array.c:429:2-429:41: struct mm_struct *mm = get_task_mm(task);
-
fs/proc/array.c:463:2-463:20: struct mm_struct *mm;
-
fs/proc/array.c:662:2-662:41: struct mm_struct *mm = get_task_mm(task);
-
fs/proc/base.c:345:2-345:20: struct mm_struct *mm;
-
fs/proc/base.c:799:2-799:39: struct mm_struct *mm = ERR_PTR(-ESRCH);
-
fs/proc/base.c:818:2-818:50: struct mm_struct *mm = proc_mem_open(inode, mode);
-
fs/proc/base.c:840:2-840:31: struct mm_struct *mm = file->private_data;
-
fs/proc/base.c:922:2-922:31: struct mm_struct *mm = file->private_data;
-
fs/proc/base.c:947:2-947:31: struct mm_struct *mm = file->private_data;
-
fs/proc/base.c:1019:2-1019:31: struct mm_struct *mm = file->private_data;
-
fs/proc/base.c:1062:2-1062:25: struct mm_struct *mm = NULL;
-
fs/proc/base.c:1849:3-1849:21: struct mm_struct *mm;
-
fs/proc/base.c:2145:2-2145:25: struct mm_struct *mm = NULL;
-
fs/proc/base.c:2197:2-2197:20: struct mm_struct *mm;
-
fs/proc/base.c:2295:2-2295:20: struct mm_struct *mm;
-
fs/proc/base.c:2348:2-2348:20: struct mm_struct *mm;
-
fs/proc/base.c:2904:2-2904:20: struct mm_struct *mm;
-
fs/proc/base.c:2933:2-2933:20: struct mm_struct *mm;
-
fs/proc/base.c:3192:2-3192:20: struct mm_struct *mm;
-
fs/proc/base.c:3205:2-3205:20: struct mm_struct *mm;
-
fs/proc/task_mmu.c:145:2-145:20: struct mm_struct *mm;
-
fs/proc/task_mmu.c:189:2-189:31: struct mm_struct *mm = priv->mm;
-
fs/proc/task_mmu.c:278:2-278:30: struct mm_struct *mm = vma->vm_mm;
-
fs/proc/task_mmu.c:889:2-889:31: struct mm_struct *mm = priv->mm;
-
fs/proc/task_mmu.c:1256:2-1256:20: struct mm_struct *mm;
-
fs/proc/task_mmu.c:1657:2-1657:31: struct mm_struct *mm = file->private_data;
-
fs/proc/task_mmu.c:1745:2-1745:20: struct mm_struct *mm;
-
fs/proc/task_mmu.c:1756:2-1756:31: struct mm_struct *mm = file->private_data;
-
fs/proc/task_mmu.c:1943:2-1943:30: struct mm_struct *mm = vma->vm_mm;
-
fs/userfaultfd.c:302:2-302:30: struct mm_struct *mm = ctx->mm;
-
fs/userfaultfd.c:393:2-393:30: struct mm_struct *mm = vma->vm_mm;
-
fs/userfaultfd.c:638:3-638:43: struct mm_struct *mm = release_new_ctx->mm;
-
fs/userfaultfd.c:794:2-794:30: struct mm_struct *mm = vma->vm_mm;
-
fs/userfaultfd.c:881:2-881:30: struct mm_struct *mm = ctx->mm;
-
fs/userfaultfd.c:1295:2-1295:30: struct mm_struct *mm = ctx->mm;
-
fs/userfaultfd.c:1527:2-1527:30: struct mm_struct *mm = ctx->mm;
-
ipc/shm.c:1733:2-1733:34: struct mm_struct *mm = current->mm;
-
kernel/acct.c:560:3-560:35: struct mm_struct *mm = current->mm;
-
kernel/bpf/task_iter.c:786:2-786:20: struct mm_struct *mm;
-
kernel/cgroup/cpuset.c:1970:3-1970:21: struct mm_struct *mm;
-
kernel/cgroup/cpuset.c:2572:3-2572:44: struct mm_struct *mm = get_task_mm(leader);
-
kernel/cpu.c:618:2-618:31: struct mm_struct *mm = idle->active_mm;
-
kernel/events/core.c:7512:2-7512:20: struct mm_struct *mm;
-
kernel/events/core.c:10616:2-10616:25: struct mm_struct *mm = NULL;
-
kernel/events/uprobes.c:158:2-158:30: struct mm_struct *mm = vma->vm_mm;
-
kernel/events/uprobes.c:1046:3-1046:32: struct mm_struct *mm = info->mm;
-
kernel/events/uprobes.c:1481:2-1481:34: struct mm_struct *mm = current->mm;
-
kernel/events/uprobes.c:1529:2-1529:34: struct mm_struct *mm = current->mm;
-
kernel/events/uprobes.c:1799:2-1799:34: struct mm_struct *mm = current->mm;
-
kernel/events/uprobes.c:2044:2-2044:34: struct mm_struct *mm = current->mm;
-
kernel/exit.c:533:2-533:34: struct mm_struct *mm = current->mm;
-
kernel/fork.c:802:2-802:20: struct mm_struct *mm;
-
kernel/fork.c:1186:2-1186:20: struct mm_struct *mm;
-
kernel/fork.c:1233:2-1233:25: struct mm_struct *mm = container_of(work, struct mm_struct,
-
kernel/fork.c:1371:2-1371:20: struct mm_struct *mm;
-
kernel/fork.c:1394:2-1394:20: struct mm_struct *mm;
-
kernel/fork.c:1411:2-1411:20: struct mm_struct *mm;
-
kernel/fork.c:1533:2-1533:20: struct mm_struct *mm;
-
kernel/fork.c:1569:2-1569:20: struct mm_struct *mm, *oldmm;
-
kernel/futex/core.c:224:2-224:34: struct mm_struct *mm = current->mm;
-
kernel/futex/core.c:411:2-411:34: struct mm_struct *mm = current->mm;
-
kernel/ptrace.c:46:2-46:20: struct mm_struct *mm;
-
kernel/ptrace.c:289:2-289:20: struct mm_struct *mm;
-
kernel/sched/core.c:5147:2-5147:29: struct mm_struct *mm = rq->prev_mm;
-
kernel/sched/core.c:9286:2-9286:34: struct mm_struct *mm = current->active_mm;
-
kernel/sched/core.c:11388:2-11388:28: struct mm_struct *mm = t->mm;
-
kernel/sched/core.c:11402:2-11402:28: struct mm_struct *mm = t->mm;
-
kernel/sched/core.c:11416:2-11416:28: struct mm_struct *mm = t->mm;
-
kernel/sched/fair.c:2939:2-2939:28: struct mm_struct *mm = p->mm;
-
kernel/sched/fair.c:3084:2-3084:28: struct mm_struct *mm = p->mm;
-
kernel/sched/membarrier.c:202:2-202:46: struct mm_struct *mm = (struct mm_struct *) info;
-
kernel/sched/membarrier.c:313:2-313:34: struct mm_struct *mm = current->mm;
-
kernel/sched/membarrier.c:486:2-486:28: struct mm_struct *mm = p->mm;
-
kernel/sched/membarrier.c:505:2-505:28: struct mm_struct *mm = p->mm;
-
kernel/sched/membarrier.c:547:2-547:28: struct mm_struct *mm = p->mm;
-
kernel/sys.c:1830:3-1830:39: struct mm_struct *mm = get_task_mm(p);
-
kernel/sys.c:1975:2-1975:34: struct mm_struct *mm = current->mm;
-
kernel/sys.c:2112:2-2112:34: struct mm_struct *mm = current->mm;
-
kernel/sys.c:2304:2-2304:34: struct mm_struct *mm = current->mm;
-
kernel/trace/trace_output.c:1095:2-1095:25: struct mm_struct *mm = NULL;
-
kernel/tsacct.c:93:2-93:20: struct mm_struct *mm;
-
lib/is_single_threaded.c:18:2-18:31: struct mm_struct *mm = task->mm;
-
lib/test_hmm.c:290:2-290:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:330:2-330:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:788:2-788:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:911:2-911:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:971:2-971:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:1117:2-1117:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/test_hmm.c:1173:2-1173:43: struct mm_struct *mm = dmirror->notifier.mm;
-
lib/zlib_inflate/inffast.c:22:2-22:12: union uu mm;
-
lib/zlib_inflate/inffast.c:269:5-269:14: union uu mm;
-
mm/damon/vaddr.c:44:2-44:20: struct mm_struct *mm;
-
mm/damon/vaddr.c:176:2-176:20: struct mm_struct *mm;
-
mm/damon/vaddr.c:411:2-411:20: struct mm_struct *mm;
-
mm/damon/vaddr.c:569:2-569:20: struct mm_struct *mm;
-
mm/damon/vaddr.c:617:2-617:20: struct mm_struct *mm;
-
mm/filemap.c:3397:2-3397:35: struct mm_struct *mm = vmf->vma->vm_mm;
-
mm/gup.c:537:2-537:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:653:2-653:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:719:2-719:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:784:2-784:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:1501:2-1501:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:1570:2-1570:30: struct mm_struct *mm = vma->vm_mm;
-
mm/gup.c:1616:2-1616:34: struct mm_struct *mm = current->mm;
-
mm/gup.c:1815:2-1815:34: struct mm_struct *mm = current->mm;
-
mm/hmm.c:590:2-590:42: struct mm_struct *mm = range->notifier->mm;
-
mm/huge_memory.c:847:2-847:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:948:2-948:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:1039:2-1039:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:1194:2-1194:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:1461:2-1461:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:1600:2-1600:30: struct mm_struct *mm = tlb->mm;
-
mm/huge_memory.c:1766:2-1766:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:1820:2-1820:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:2038:2-2038:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:2074:2-2074:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:2989:2-2989:20: struct mm_struct *mm;
-
mm/huge_memory.c:3219:2-3219:30: struct mm_struct *mm = vma->vm_mm;
-
mm/huge_memory.c:3265:2-3265:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:5151:2-5151:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:5181:2-5181:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:5241:2-5241:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:6387:2-6387:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:6628:2-6628:30: struct mm_struct *mm = vma->vm_mm;
-
mm/hugetlb.c:7360:2-7360:30: struct mm_struct *mm = vma->vm_mm;
-
mm/khugepaged.c:1308:2-1308:31: struct mm_struct *mm = slot->mm;
-
mm/khugepaged.c:1611:2-1611:31: struct mm_struct *mm = slot->mm;
-
mm/khugepaged.c:1642:3-1642:26: struct mm_struct *mm = NULL;
-
mm/khugepaged.c:2267:2-2267:20: struct mm_struct *mm;
-
mm/khugepaged.c:2644:2-2644:30: struct mm_struct *mm = vma->vm_mm;
-
mm/ksm.c:529:2-529:36: struct mm_struct *mm = rmap_item->mm;
-
mm/ksm.c:548:2-548:36: struct mm_struct *mm = rmap_item->mm;
-
mm/ksm.c:975:2-975:20: struct mm_struct *mm;
-
mm/ksm.c:1054:2-1054:30: struct mm_struct *mm = vma->vm_mm;
-
mm/ksm.c:1145:2-1145:30: struct mm_struct *mm = vma->vm_mm;
-
mm/ksm.c:1306:2-1306:36: struct mm_struct *mm = rmap_item->mm;
-
mm/ksm.c:2064:2-2064:36: struct mm_struct *mm = rmap_item->mm;
-
mm/ksm.c:2254:2-2254:20: struct mm_struct *mm;
-
mm/ksm.c:2477:2-2477:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:142:2-142:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:275:2-275:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:341:2-341:30: struct mm_struct *mm = tlb->mm;
-
mm/madvise.c:549:2-549:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:582:2-582:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:612:2-612:30: struct mm_struct *mm = tlb->mm;
-
mm/madvise.c:749:2-749:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:842:2-842:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:909:2-909:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:971:2-971:30: struct mm_struct *mm = vma->vm_mm;
-
mm/madvise.c:1463:2-1463:20: struct mm_struct *mm;
-
mm/memcontrol.c:6081:2-6081:28: struct mm_struct *mm = mc.mm;
-
mm/memcontrol.c:6104:2-6104:20: struct mm_struct *mm;
-
mm/memory.c:1359:2-1359:30: struct mm_struct *mm = tlb->mm;
-
mm/memory.c:1857:2-1857:36: struct mm_struct *const mm = vma->vm_mm;
-
mm/memory.c:2080:2-2080:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:2426:2-2426:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:2803:2-2803:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:3048:2-3048:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:4970:2-4970:30: struct mm_struct *mm = vma->vm_mm;
-
mm/memory.c:5569:2-5569:20: struct mm_struct *mm;
-
mm/memory.c:5589:2-5589:34: struct mm_struct *mm = current->mm;
-
mm/mempolicy.c:931:2-931:34: struct mm_struct *mm = current->mm;
-
mm/mempolicy.c:1261:2-1261:34: struct mm_struct *mm = current->mm;
-
mm/mempolicy.c:1491:2-1491:34: struct mm_struct *mm = current->mm;
-
mm/mempolicy.c:1592:2-1592:25: struct mm_struct *mm = NULL;
-
mm/migrate.c:2401:2-2401:20: struct mm_struct *mm;
-
mm/migrate.c:2454:2-2454:20: struct mm_struct *mm;
-
mm/migrate_device.c:64:2-64:30: struct mm_struct *mm = vma->vm_mm;
-
mm/migrate_device.c:574:2-574:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mlock.c:408:2-408:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mmap.c:188:2-188:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:1197:2-1197:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:1606:2-1606:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:1655:2-1655:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:1827:2-1827:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mmap.c:1958:2-1958:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mmap.c:2473:2-2473:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:2719:2-2719:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:2762:2-2762:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:2855:2-2855:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mmap.c:2879:2-2879:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:2953:2-2953:34: struct mm_struct *mm = current->mm;
-
mm/mmap.c:3116:2-3116:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mmu_notifier.c:866:2-866:39: struct mm_struct *mm = subscription->mm;
-
mm/mmu_notifier.c:897:2-897:39: struct mm_struct *mm = subscription->mm;
-
mm/mmu_notifier.c:1063:2-1063:39: struct mm_struct *mm = interval_sub->mm;
-
mm/mprotect.c:503:2-503:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mprotect.c:592:2-592:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:141:2-141:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:229:2-229:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:297:2-297:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:346:2-346:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:578:2-578:30: struct mm_struct *mm = vma->vm_mm;
-
mm/mremap.c:736:2-736:34: struct mm_struct *mm = current->mm;
-
mm/mremap.c:793:2-793:34: struct mm_struct *mm = current->mm;
-
mm/mremap.c:899:2-899:34: struct mm_struct *mm = current->mm;
-
mm/msync.c:35:2-35:34: struct mm_struct *mm = current->mm;
-
mm/oom_kill.c:612:2-612:38: struct mm_struct *mm = tsk->signal->oom_mm;
-
mm/oom_kill.c:666:2-666:38: struct mm_struct *mm = tsk->signal->oom_mm;
-
mm/oom_kill.c:760:2-760:30: struct mm_struct *mm = tsk->mm;
-
mm/oom_kill.c:871:2-871:31: struct mm_struct *mm = task->mm;
-
mm/oom_kill.c:919:2-919:20: struct mm_struct *mm;
-
mm/oom_kill.c:1203:2-1203:25: struct mm_struct *mm = NULL;
-
mm/page_vma_mapped.c:154:2-154:30: struct mm_struct *mm = vma->vm_mm;
-
mm/pgtable-generic.c:94:2-94:32: struct mm_struct *mm = (vma)->vm_mm;
-
mm/process_vm_access.c:158:2-158:20: struct mm_struct *mm;
-
mm/rmap.c:189:2-189:30: struct mm_struct *mm = vma->vm_mm;
-
mm/rmap.c:1452:2-1452:30: struct mm_struct *mm = vma->vm_mm;
-
mm/rmap.c:1812:2-1812:30: struct mm_struct *mm = vma->vm_mm;
-
mm/rmap.c:2179:2-2179:30: struct mm_struct *mm = vma->vm_mm;
-
mm/swapfile.c:2043:2-2043:20: struct mm_struct *mm;
-
mm/util.c:534:2-534:34: struct mm_struct *mm = current->mm;
-
mm/util.c:991:2-991:41: struct mm_struct *mm = get_task_mm(task);
-
mm/vmscan.c:3505:2-3505:25: struct mm_struct *mm = NULL;
-
mm/vmscan.c:4428:2-4428:25: struct mm_struct *mm = NULL;
-
security/tomoyo/util.c:970:2-970:34: struct mm_struct *mm = current->mm;
-
virt/kvm/async_pf.c:49:2-49:30: struct mm_struct *mm = apf->mm;
-
virt/kvm/kvm_main.c:1275:2-1275:30: struct mm_struct *mm = kvm->mm;