Symbol: mmu
function parameter
Defined...
-
arch/x86/kvm/mmu.h:161:11-161:27: struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu.h:186:58-186:74: static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu.h:299:11-299:27: struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:228:1-228:1: BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
-
arch/x86/kvm/mmu/mmu.c:229:1-229:1: BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse);
-
arch/x86/kvm/mmu/mmu.c:230:1-230:1: BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep);
-
arch/x86/kvm/mmu/mmu.c:231:1-231:1: BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
-
arch/x86/kvm/mmu/mmu.c:232:1-232:1: BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke);
-
arch/x86/kvm/mmu/mmu.c:233:1-233:1: BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
-
arch/x86/kvm/mmu/mmu.c:234:1-234:1: BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
-
arch/x86/kvm/mmu/mmu.c:235:1-235:1: BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma);
-
arch/x86/kvm/mmu/mmu.c:237:30-237:46: static inline bool is_cr0_pg(struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:242:31-242:47: static inline bool is_cr4_pae(struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:264:9-264:25: struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:3559:42-3559:58: void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:3615:53-3615:69: void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:4047:58-4047:74: static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:4548:64-4548:80: static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:4582:63-4582:79: static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:4603:46-4603:62: static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:4945:39-4945:55: static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
-
arch/x86/kvm/mmu/mmu.c:5041:33-5041:49: static void update_pkru_bitmask(struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:5086:6-5086:22: struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:5151:6-5151:22: struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:5525:60-5525:76: static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:5768:62-5768:78: static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:5805:53-5805:69: void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/mmu.c:5957:28-5957:44: static void free_mmu_pages(struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/mmu.c:5966:52-5966:68: static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
-
arch/x86/kvm/mmu/paging_tmpl.h:109:46-109:62: static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
-
arch/x86/kvm/mmu/paging_tmpl.h:145:37-145:53: static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
-
arch/x86/kvm/mmu/paging_tmpl.h:198:11-198:27: struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/paging_tmpl.h:270:40-270:56: static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/paging_tmpl.h:303:32-303:48: struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-
arch/x86/kvm/mmu/paging_tmpl.h:868:55-868:71: static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-
drivers/accel/habanalabs/common/mmu/mmu_v1.c:802:50-802:71: void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
-
drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c:383:53-383:74: void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
-
drivers/gpu/drm/etnaviv/etnaviv_dump.c:101:2-101:32: struct etnaviv_iommu_context *mmu, size_t mmu_size)
-
drivers/gpu/drm/msm/msm_gem_vma.c:154:30-154:46: msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
-
drivers/gpu/drm/msm/msm_gpummu.c:24:31-24:47: static void msm_gpummu_detach(struct msm_mmu *mmu)
-
drivers/gpu/drm/msm/msm_gpummu.c:28:27-28:43: static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
-
drivers/gpu/drm/msm/msm_gpummu.c:56:29-56:45: static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
-
drivers/gpu/drm/msm/msm_gpummu.c:71:43-71:59: static void msm_gpummu_resume_translation(struct msm_mmu *mmu)
-
drivers/gpu/drm/msm/msm_gpummu.c:75:32-75:48: static void msm_gpummu_destroy(struct msm_mmu *mmu)
-
drivers/gpu/drm/msm/msm_gpummu.c:114:24-114:40: void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
-
drivers/gpu/drm/msm/msm_iommu.c:28:49-28:65: static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
-
drivers/gpu/drm/msm/msm_iommu.c:89:38-89:54: static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
-
drivers/gpu/drm/msm/msm_iommu.c:113:36-113:52: static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
-
drivers/gpu/drm/msm/msm_iommu.c:152:41-152:57: static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
-
drivers/gpu/drm/msm/msm_iommu.c:170:32-170:48: int msm_iommu_pagetable_params(struct msm_mmu *mmu,
-
drivers/gpu/drm/msm/msm_iommu.c:189:54-189:70: struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
-
drivers/gpu/drm/msm/msm_iommu.c:320:42-320:58: static void msm_iommu_resume_translation(struct msm_mmu *mmu)
-
drivers/gpu/drm/msm/msm_iommu.c:328:30-328:46: static void msm_iommu_detach(struct msm_mmu *mmu)
-
drivers/gpu/drm/msm/msm_iommu.c:335:26-335:42: static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
-
drivers/gpu/drm/msm/msm_iommu.c:351:28-351:44: static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
-
drivers/gpu/drm/msm/msm_iommu.c:363:31-363:47: static void msm_iommu_destroy(struct msm_mmu *mmu)
-
drivers/gpu/drm/msm/msm_mmu.h:35:33-35:49: static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
-
drivers/gpu/drm/msm/msm_mmu.h:47:46-47:62: static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
-
drivers/gpu/drm/nouveau/dispnv50/crc.c:500:43-500:60: nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
-
drivers/gpu/drm/nouveau/dispnv50/lut.c:67:39-67:56: nv50_lut_init(struct nv50_disp *disp, struct nvif_mmu *mmu,
-
drivers/gpu/drm/nouveau/include/nvif/mmu.h:39:21-39:38: nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
-
drivers/gpu/drm/nouveau/include/nvif/mmu.h:49:15-49:32: nvif_mmu_type(struct nvif_mmu *mmu, u8 mask)
-
drivers/gpu/drm/nouveau/nvif/mem.c:28:19-28:36: nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size,
-
drivers/gpu/drm/nouveau/nvif/mem.c:48:20-48:37: nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass,
-
drivers/gpu/drm/nouveau/nvif/mem.c:88:15-88:32: nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type,
-
drivers/gpu/drm/nouveau/nvif/mmu.c:28:15-28:32: nvif_mmu_dtor(struct nvif_mmu *mmu)
-
drivers/gpu/drm/nouveau/nvif/mmu.c:41:8-41:25: struct nvif_mmu *mmu)
-
drivers/gpu/drm/nouveau/nvif/vmm.c:199:15-199:32: nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:42:18-42:35: nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:65:18-65:35: nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:120:19-120:36: nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:141:18-141:35: nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:167:18-167:35: nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:220:19-220:36: nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:234:19-234:36: nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:246:19-246:36: nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:255:15-255:32: nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:265:15-265:32: nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:278:15-278:32: nvkm_mmu_host(struct nvkm_mmu *mmu)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:315:15-315:32: nvkm_mmu_vram(struct nvkm_mmu *mmu)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c:418:46-418:63: enum nvkm_subdev_type type, int inst, struct nvkm_mmu *mmu)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c:33:16-33:33: gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c:30:16-30:33: gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c:144:19-144:36: nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c:224:19-224:36: nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c:34:15-34:32: gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c:69:15-69:32: gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c:31:14-31:31: nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c:51:14-51:31: nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c:34:14-34:31: nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c:66:14-66:31: nv50_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c:32:15-32:32: nv41_mmu_init(struct nvkm_mmu *mmu)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c:32:15-32:32: nv44_mmu_init(struct nvkm_mmu *mmu)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c:31:16-31:33: tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c:1083:49-1083:66: nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c:1197:49-1197:66: nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c:404:9-404:26: struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c:420:15-420:32: gf100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c:98:15-98:32: gk104_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c:67:15-67:32: gk20a_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c:144:9-144:26: struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c:172:15-172:32: gm200_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c:181:21-181:38: gm200_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c:57:15-57:32: gm20b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c:66:21-66:38: gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c:601:9-601:26: struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c:629:15-629:32: gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c:45:15-45:32: gp10b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c:83:15-83:32: gv100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c:39:15-39:32: mcp77_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c:102:49-102:66: nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c:121:14-121:31: nv04_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c:106:14-106:31: nv41_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c:208:14-208:31: nv44_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c:381:14-381:31: nv50_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c:71:15-71:32: tu102_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
-
drivers/gpu/drm/panfrost/panfrost_mmu.c:113:11-113:32: struct panfrost_mmu *mmu,
-
drivers/gpu/drm/panfrost/panfrost_mmu.c:124:64-124:85: static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
-
drivers/gpu/drm/panfrost/panfrost_mmu.c:158:56-158:77: u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
-
drivers/gpu/drm/panfrost/panfrost_mmu.c:224:57-224:78: void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
-
drivers/gpu/drm/panfrost/panfrost_mmu.c:273:10-273:31: struct panfrost_mmu *mmu,
-
drivers/gpu/drm/panfrost/panfrost_mmu.c:288:54-288:75: static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
-
drivers/gpu/drm/panfrost/panfrost_mmu.c:570:27-570:48: void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
-
drivers/gpu/drm/panfrost/panfrost_mmu.c:575:43-575:64: struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
-
drivers/iommu/ipmmu-vmsa.c:150:27-150:53: static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
-
drivers/iommu/ipmmu-vmsa.c:178:23-178:49: static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
-
drivers/iommu/ipmmu-vmsa.c:183:25-183:51: static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
-
drivers/iommu/ipmmu-vmsa.c:189:35-189:61: static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
-
drivers/iommu/ipmmu-vmsa.c:200:27-200:53: static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
-
drivers/iommu/ipmmu-vmsa.c:206:29-206:55: static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
-
drivers/iommu/ipmmu-vmsa.c:233:27-233:53: static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
-
drivers/iommu/ipmmu-vmsa.c:238:33-238:59: static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
-
drivers/iommu/ipmmu-vmsa.c:244:32-244:58: static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
-
drivers/iommu/ipmmu-vmsa.c:320:42-320:68: static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
-
drivers/iommu/ipmmu-vmsa.c:340:39-340:65: static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
-
drivers/iommu/ipmmu-vmsa.c:875:32-875:58: static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
-
drivers/staging/media/atomisp/include/mmu/isp_mmu.h:154:42-154:58: static inline void isp_mmu_flush_tlb_all(struct isp_mmu *mmu)
-
drivers/staging/media/atomisp/include/mmu/isp_mmu.h:162:44-162:60: static inline void isp_mmu_flush_tlb_range(struct isp_mmu *mmu,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:79:38-79:54: static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:85:45-85:61: static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:97:37-97:53: static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:125:29-125:45: static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:142:29-142:45: static void mmu_remap_error(struct isp_mmu *mmu,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:161:36-161:52: static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:177:36-177:52: static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:189:35-189:51: static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:199:23-199:39: static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:244:23-244:39: static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:317:20-317:36: static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:363:26-363:42: static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:401:26-401:42: static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:448:23-448:39: static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:474:26-474:42: static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:486:17-486:33: int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:492:20-492:36: void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:498:45-498:61: static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:506:18-506:34: int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
-
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c:539:19-539:35: void isp_mmu_exit(struct isp_mmu *mmu)
-
drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c:31:36-31:52: static unsigned int sh_phys_to_pte(struct isp_mmu *mmu,
-
drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c:37:35-37:51: static phys_addr_t sh_pte_to_phys(struct isp_mmu *mmu,
-
drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c:45:36-45:52: static unsigned int sh_get_pd_base(struct isp_mmu *mmu,
-
drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c:64:26-64:42: static void sh_tlb_flush(struct isp_mmu *mmu)
-
drivers/staging/media/ipu3/ipu3-mmu.c:78:37-78:54: static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)
-
drivers/staging/media/ipu3/ipu3-mmu.c:83:37-83:54: static void call_if_imgu_is_powered(struct imgu_mmu *mmu,
-
drivers/staging/media/ipu3/ipu3-mmu.c:101:31-101:48: static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt)
-
drivers/staging/media/ipu3/ipu3-mmu.c:168:31-168:48: static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
-
drivers/staging/media/ipu3/ipu3-mmu.c:210:27-210:44: static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
-
drivers/staging/media/ipu3/ipu3-mmu.c:338:32-338:49: static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
variable
Defined...