#include "amdgpu_vm.h"
#include "amdgpu_job.h"
#include "amdgpu_object.h"
#include "amdgpu_trace.h"
#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
{
int r;
r = amdgpu_ttm_alloc_gart(&table->bo.tbo);
if (r)
return r;
if (table->shadow)
r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
return r;
}
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
unsigned int count)
{
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate
: &p->vm->delayed;
unsigned int ndw;
int r;
ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
if (p->pages_addr)
ndw += count * 2;
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
ndw * 4, pool, &p->job);
if (r)
return r;
p->num_dw_left = ndw;
return 0;
}
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{
struct amdgpu_sync sync;
int r;
r = amdgpu_vm_sdma_alloc_job(p, 0);
if (r)
return r;
if (!resv)
return 0;
amdgpu_sync_create(&sync);
r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
if (!r)
r = amdgpu_sync_push_to_job(&sync, p->job);
amdgpu_sync_free(&sync);
return r;
}
static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence)
{
struct amdgpu_ib *ib = p->job->ibs;
struct amdgpu_ring *ring;
struct dma_fence *f;
ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
f = amdgpu_job_submit(p->job);
if (p->unlocked) {
struct dma_fence *tmp = dma_fence_get(f);
swap(p->vm->last_unlocked, tmp);
dma_fence_put(tmp);
} else {
dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f,
DMA_RESV_USAGE_BOOKKEEP);
}
if (fence && !p->immediate) {
set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
swap(*fence, f);
}
dma_fence_put(f);
return 0;
}
static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe,
unsigned count)
{
struct amdgpu_ib *ib = p->job->ibs;
uint64_t src = ib->gpu_addr;
src += p->num_dw_left * 4;
pe += amdgpu_bo_gpu_offset_no_check(bo);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
}
static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{
struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_bo_gpu_offset_no_check(bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr);
} else {
amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
count, incr, flags);
}
}
static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *vmbo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
struct amdgpu_bo *bo = &vmbo->bo;
struct dma_resv_iter cursor;
unsigned int i, ndw, nptes;
struct dma_fence *fence;
uint64_t *pte;
int r;
dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
dma_fence_get(fence);
r = drm_sched_job_add_dependency(&p->job->base, fence);
if (r) {
dma_fence_put(fence);
dma_resv_iter_end(&cursor);
return r;
}
}
dma_resv_iter_end(&cursor);
do {
ndw = p->num_dw_left;
ndw -= p->job->ibs->length_dw;
if (ndw < 32) {
r = amdgpu_vm_sdma_commit(p, NULL);
if (r)
return r;
r = amdgpu_vm_sdma_alloc_job(p, count);
if (r)
return r;
}
if (!p->pages_addr) {
if (vmbo->shadow)
amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr,
count, incr, flags);
amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
incr, flags);
return 0;
}
ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
(vmbo->shadow ? 2 : 1);
ndw -= 7;
nptes = min(count, ndw / 2);
p->num_dw_left -= nptes * 2;
pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
for (i = 0; i < nptes; ++i, addr += incr) {
pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
pte[i] |= flags;
}
if (vmbo->shadow)
amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes);
amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
pe += nptes * 8;
count -= nptes;
} while (count);
return 0;
}
const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
.map_table = amdgpu_vm_sdma_map_table,
.prepare = amdgpu_vm_sdma_prepare,
.update = amdgpu_vm_sdma_update,
.commit = amdgpu_vm_sdma_commit
}