#include <linux/sched/mm.h>
#include <linux/dma-fence-array.h>
#include <drm/drm_gem.h>
#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_tiling.h"
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
#include "i915_drv.h"
#include "i915_gem_evict.h"
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_vma.h"
#include "i915_vma_resource.h"
static inline void assert_vma_held_evict(const struct i915_vma *vma)
{
if (kref_read(&vma->vm->ref))
assert_object_held_shared(vma->obj);
}
static struct kmem_cache *slab_vmas;
static struct i915_vma *i915_vma_alloc(void)
{
return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
}
static void i915_vma_free(struct i915_vma *vma)
{
return kmem_cache_free(slab_vmas, vma);
}
#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
#include <linux/stackdepot.h>
static void vma_print_allocator(struct i915_vma *vma, const char *reason)
{
char buf[512];
if (!vma->node.stack) {
drm_dbg(vma->obj->base.dev,
"vma.node [%08llx + %08llx] %s: unknown owner\n",
vma->node.start, vma->node.size, reason);
return;
}
stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
drm_dbg(vma->obj->base.dev,
"vma.node [%08llx + %08llx] %s: inserted at %s\n",
vma->node.start, vma->node.size, reason, buf);
}
#else
static void vma_print_allocator(struct i915_vma *vma, const char *reason)
{
}
#endif
static inline struct i915_vma *active_to_vma(struct i915_active *ref)
{
return container_of(ref, typeof(struct i915_vma), active);
}
static int __i915_vma_active(struct i915_active *ref)
{
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
}
static void __i915_vma_retire(struct i915_active *ref)
{
i915_vma_put(active_to_vma(ref));
}
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_gtt_view *view)
{
struct i915_vma *pos = ERR_PTR(-E2BIG);
struct i915_vma *vma;
struct rb_node *rb, **p;
int err;
GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
if (IS_ENABLED(CONFIG_LOCKDEP)) {
fs_reclaim_acquire(GFP_KERNEL);
might_lock(&vma->active.mutex);
fs_reclaim_release(GFP_KERNEL);
}
INIT_LIST_HEAD(&vma->closed_link);
INIT_LIST_HEAD(&vma->obj_link);
RB_CLEAR_NODE(&vma->obj_node);
if (view && view->type != I915_GTT_VIEW_NORMAL) {
vma->gtt_view = *view;
if (view->type == I915_GTT_VIEW_PARTIAL) {
GEM_BUG_ON(range_overflows_t(u64,
view->partial.offset,
view->partial.size,
obj->base.size >> PAGE_SHIFT));
vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
GEM_BUG_ON(vma->size > obj->base.size);
} else if (view->type == I915_GTT_VIEW_ROTATED) {
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
} else if (view->type == I915_GTT_VIEW_REMAPPED) {
vma->size = intel_remapped_info_size(&view->remapped);
vma->size <<= PAGE_SHIFT;
}
}
if (unlikely(vma->size > vm->total))
goto err_vma;
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
err = mutex_lock_interruptible(&vm->mutex);
if (err) {
pos = ERR_PTR(err);
goto err_vma;
}
vma->vm = vm;
list_add_tail(&vma->vm_link, &vm->unbound_list);
spin_lock(&obj->vma.lock);
if (i915_is_ggtt(vm)) {
if (unlikely(overflows_type(vma->size, u32)))
goto err_unlock;
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
if (unlikely(vma->fence_size < vma->size ||
vma->fence_size > vm->total))
goto err_unlock;
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
rb = NULL;
p = &obj->vma.tree.rb_node;
while (*p) {
long cmp;
rb = *p;
pos = rb_entry(rb, struct i915_vma, obj_node);
cmp = i915_vma_compare(pos, vm, view);
if (cmp < 0)
p = &rb->rb_right;
else if (cmp > 0)
p = &rb->rb_left;
else
goto err_unlock;
}
rb_link_node(&vma->obj_node, rb, p);
rb_insert_color(&vma->obj_node, &obj->vma.tree);
if (i915_vma_is_ggtt(vma))
list_add(&vma->obj_link, &obj->vma.list);
else
list_add_tail(&vma->obj_link, &obj->vma.list);
spin_unlock(&obj->vma.lock);
mutex_unlock(&vm->mutex);
return vma;
err_unlock:
spin_unlock(&obj->vma.lock);
list_del_init(&vma->vm_link);
mutex_unlock(&vm->mutex);
err_vma:
i915_vma_free(vma);
return pos;
}
static struct i915_vma *
i915_vma_lookup(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_gtt_view *view)
{
struct rb_node *rb;
rb = obj->vma.tree.rb_node;
while (rb) {
struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
long cmp;
cmp = i915_vma_compare(vma, vm, view);
if (cmp == 0)
return vma;
if (cmp < 0)
rb = rb->rb_right;
else
rb = rb->rb_left;
}
return NULL;
}
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_gtt_view *view)
{
struct i915_vma *vma;
GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
GEM_BUG_ON(!kref_read(&vm->ref));
spin_lock(&obj->vma.lock);
vma = i915_vma_lookup(obj, vm, view);
spin_unlock(&obj->vma.lock);
if (unlikely(!vma))
vma = vma_create(obj, vm, view);
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
return vma;
}
struct i915_vma_work {
struct dma_fence_work base;
struct i915_address_space *vm;
struct i915_vm_pt_stash stash;
struct i915_vma_resource *vma_res;
struct drm_i915_gem_object *obj;
struct i915_sw_dma_fence_cb cb;
unsigned int pat_index;
unsigned int flags;
};
static void __vma_bind(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
struct i915_vma_resource *vma_res = vw->vma_res;
if (i915_gem_object_has_unknown_state(vw->obj))
return;
vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
vma_res, vw->pat_index, vw->flags);
}
static void __vma_release(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
if (vw->obj)
i915_gem_object_put(vw->obj);
i915_vm_free_pt_stash(vw->vm, &vw->stash);
if (vw->vma_res)
i915_vma_resource_put(vw->vma_res);
}
static const struct dma_fence_work_ops bind_ops = {
.name = "bind",
.work = __vma_bind,
.release = __vma_release,
};
struct i915_vma_work *i915_vma_work(void)
{
struct i915_vma_work *vw;
vw = kzalloc(sizeof(*vw), GFP_KERNEL);
if (!vw)
return NULL;
dma_fence_work_init(&vw->base, &bind_ops);
vw->base.dma.error = -EAGAIN;
return vw;
}
int i915_vma_wait_for_bind(struct i915_vma *vma)
{
int err = 0;
if (rcu_access_pointer(vma->active.excl.fence)) {
struct dma_fence *fence;
rcu_read_lock();
fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
rcu_read_unlock();
if (fence) {
err = dma_fence_wait(fence, true);
dma_fence_put(fence);
}
}
return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
static int i915_vma_verify_bind_complete(struct i915_vma *vma)
{
struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
int err;
if (!fence)
return 0;
if (dma_fence_is_signaled(fence))
err = fence->error;
else
err = -EBUSY;
dma_fence_put(fence);
return err;
}
#else
#define i915_vma_verify_bind_complete(_vma) 0
#endif
I915_SELFTEST_EXPORT void
i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
obj->mm.rsgt, i915_gem_object_is_readonly(obj),
i915_gem_object_is_lmem(obj), obj->mm.region,
vma->ops, vma->private, __i915_vma_offset(vma),
__i915_vma_size(vma), vma->size, vma->guard);
}
int i915_vma_bind(struct i915_vma *vma,
unsigned int pat_index,
u32 flags,
struct i915_vma_work *work,
struct i915_vma_resource *vma_res)
{
u32 bind_flags;
u32 vma_flags;
int ret;
lockdep_assert_held(&vma->vm->mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > i915_vma_size(vma));
if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
vma->vm->total))) {
i915_vma_resource_free(vma_res);
return -ENODEV;
}
if (GEM_DEBUG_WARN_ON(!flags)) {
i915_vma_resource_free(vma_res);
return -EINVAL;
}
bind_flags = flags;
bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
vma_flags = atomic_read(&vma->flags);
vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
bind_flags &= ~vma_flags;
if (bind_flags == 0) {
i915_vma_resource_free(vma_res);
return 0;
}
GEM_BUG_ON(!atomic_read(&vma->pages_count));
if (work && bind_flags & vma->vm->bind_async_flags)
ret = i915_vma_resource_bind_dep_await(vma->vm,
&work->base.chain,
vma->node.start,
vma->node.size,
true,
GFP_NOWAIT |
__GFP_RETRY_MAYFAIL |
__GFP_NOWARN);
else
ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
vma->node.size, true);
if (ret) {
i915_vma_resource_free(vma_res);
return ret;
}
if (vma->resource || !vma_res) {
GEM_WARN_ON(!vma_flags);
i915_vma_resource_free(vma_res);
} else {
i915_vma_resource_init_from_vma(vma_res, vma);
vma->resource = vma_res;
}
trace_i915_vma_bind(vma, bind_flags);
if (work && bind_flags & vma->vm->bind_async_flags) {
struct dma_fence *prev;
work->vma_res = i915_vma_resource_get(vma->resource);
work->pat_index = pat_index;
work->flags = bind_flags;
prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
if (prev) {
__i915_sw_fence_await_dma_fence(&work->base.chain,
prev,
&work->cb);
dma_fence_put(prev);
}
work->base.dma.error = 0;
work->obj = i915_gem_object_get(vma->obj);
} else {
ret = i915_gem_object_wait_moving_fence(vma->obj, true);
if (ret) {
i915_vma_resource_free(vma->resource);
vma->resource = NULL;
return ret;
}
vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
bind_flags);
}
atomic_or(bind_flags, &vma->flags);
return 0;
}
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{
void __iomem *ptr;
int err;
if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
return IOMEM_ERR_PTR(-EINVAL);
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
ptr = READ_ONCE(vma->iomap);
if (ptr == NULL) {
if (i915_gem_object_is_lmem(vma->obj)) {
ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
vma->obj->base.size);
} else if (i915_vma_is_map_and_fenceable(vma)) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
i915_vma_offset(vma),
i915_vma_size(vma));
} else {
ptr = (void __iomem *)
i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(ptr)) {
err = PTR_ERR(ptr);
goto err;
}
ptr = page_pack_bits(ptr, 1);
}
if (ptr == NULL) {
err = -ENOMEM;
goto err;
}
if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
if (page_unmask_bits(ptr))
__i915_gem_object_release_map(vma->obj);
else
io_mapping_unmap(ptr);
ptr = vma->iomap;
}
}
__i915_vma_pin(vma);
err = i915_vma_pin_fence(vma);
if (err)
goto err_unpin;
i915_vma_set_ggtt_write(vma);
return page_mask_bits(ptr);
err_unpin:
__i915_vma_unpin(vma);
err:
return IOMEM_ERR_PTR(err);
}
void i915_vma_flush_writes(struct i915_vma *vma)
{
if (i915_vma_unset_ggtt_write(vma))
intel_gt_flush_ggtt_writes(vma->vm->gt);
}
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
GEM_BUG_ON(vma->iomap == NULL);
i915_vma_flush_writes(vma);
i915_vma_unpin_fence(vma);
i915_vma_unpin(vma);
}
void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
{
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
vma = fetch_and_zero(p_vma);
if (!vma)
return;
obj = vma->obj;
GEM_BUG_ON(!obj);
i915_vma_unpin(vma);
if (flags & I915_VMA_RELEASE_MAP)
i915_gem_object_unpin_map(obj);
i915_gem_object_put(obj);
}
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags)
{
if (!drm_mm_node_allocated(&vma->node))
return false;
if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
return true;
if (i915_vma_size(vma) < size)
return true;
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
return true;
if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
return true;
if (flags & PIN_OFFSET_BIAS &&
i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
return true;
if (flags & PIN_OFFSET_FIXED &&
i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
return true;
if (flags & PIN_OFFSET_GUARD &&
vma->guard < (flags & PIN_OFFSET_MASK))
return true;
return false;
}
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
bool mappable, fenceable;
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!vma->fence_size);
fenceable = (i915_vma_size(vma) >= vma->fence_size &&
IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
mappable = i915_ggtt_offset(vma) + vma->fence_size <=
i915_vm_to_ggtt(vma->vm)->mappable_end;
if (mappable && fenceable)
set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
else
clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
{
struct drm_mm_node *node = &vma->node;
struct drm_mm_node *other;
if (!i915_vm_has_cache_coloring(vma->vm))
return true;
GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(list_empty(&node->node_list));
other = list_prev_entry(node, node_list);
if (i915_node_color_differs(other, color) &&
!drm_mm_hole_follows(other))
return false;
other = list_next_entry(node, node_list);
if (i915_node_color_differs(other, color) &&
!drm_mm_hole_follows(node))
return false;
return true;
}
static int
i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags)
{
unsigned long color, guard;
u64 start, end;
int ret;
GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1);
size = max(size, vma->size);
alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
if (flags & PIN_MAPPABLE) {
size = max_t(typeof(size), size, vma->fence_size);
alignment = max_t(typeof(alignment),
alignment, vma->fence_alignment);
}
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(!is_power_of_2(alignment));
guard = vma->guard;
if (flags & PIN_OFFSET_GUARD) {
GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32));
guard = max_t(u32, guard, flags & PIN_OFFSET_MASK);
}
guard = ALIGN(guard, alignment);
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
end = vma->vm->total;
if (flags & PIN_MAPPABLE)
end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
if (size > end - 2 * guard) {
drm_dbg(vma->obj->base.dev,
"Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
return -ENOSPC;
}
color = 0;
if (i915_vm_has_cache_coloring(vma->vm))
color = vma->obj->pat_index;
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end))
return -EINVAL;
if (offset < guard || offset + size > end - guard)
return -ENOSPC;
ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
size + 2 * guard,
offset - guard,
color, flags);
if (ret)
return ret;
} else {
size += 2 * guard;
if (upper_32_bits(end - 1) &&
vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
!HAS_64K_PAGES(vma->vm->i915)) {
u64 page_alignment =
rounddown_pow_of_two(vma->page_sizes.sg |
I915_GTT_PAGE_SIZE_2M);
GEM_BUG_ON(i915_vma_is_ggtt(vma));
alignment = max(alignment, page_alignment);
if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
}
ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
size, alignment, color,
start, end, flags);
if (ret)
return ret;
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
vma->guard = guard;
return 0;
}
static void
i915_vma_detach(struct i915_vma *vma)
{
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
}
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
{
unsigned int bound;
bound = atomic_read(&vma->flags);
if (flags & PIN_VALIDATE) {
flags &= I915_VMA_BIND_MASK;
return (flags & bound) == flags;
}
flags &= I915_VMA_BIND_MASK;
do {
if (unlikely(flags & ~bound))
return false;
if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
return false;
GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
return true;
}
static struct scatterlist *
rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
unsigned int width, unsigned int height,
unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int column, row;
pgoff_t src_idx;
for (column = 0; column < width; column++) {
unsigned int left;
src_idx = src_stride * (height - 1) + column + offset;
for (row = 0; row < height; row++) {
st->nents++;
sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
sg_dma_address(sg) =
i915_gem_object_get_dma_address(obj, src_idx);
sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
src_idx -= src_stride;
}
left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
if (!left)
continue;
st->nents++;
sg_set_page(sg, NULL, left, 0);
sg_dma_address(sg) = 0;
sg_dma_len(sg) = left;
sg = sg_next(sg);
}
return sg;
}
static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
unsigned int size = intel_rotation_info_size(rot_info);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
int i;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
ret = sg_alloc_table(st, size, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
st->nents = 0;
sg = st->sgl;
for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
sg = rotate_pages(obj, rot_info->plane[i].offset,
rot_info->plane[i].width, rot_info->plane[i].height,
rot_info->plane[i].src_stride,
rot_info->plane[i].dst_stride,
st, sg);
return st;
err_sg_alloc:
kfree(st);
err_st_alloc:
drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width,
rot_info->plane[0].height, size);
return ERR_PTR(ret);
}
static struct scatterlist *
add_padding_pages(unsigned int count,
struct sg_table *st, struct scatterlist *sg)
{
st->nents++;
sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
sg_dma_address(sg) = 0;
sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
return sg;
}
static struct scatterlist *
remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
unsigned long offset, unsigned int alignment_pad,
unsigned int width, unsigned int height,
unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg,
unsigned int *gtt_offset)
{
unsigned int row;
if (!width || !height)
return sg;
if (alignment_pad)
sg = add_padding_pages(alignment_pad, st, sg);
for (row = 0; row < height; row++) {
unsigned int left = width * I915_GTT_PAGE_SIZE;
while (left) {
dma_addr_t addr;
unsigned int length;
addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
length = min(left, length);
st->nents++;
sg_set_page(sg, NULL, length, 0);
sg_dma_address(sg) = addr;
sg_dma_len(sg) = length;
sg = sg_next(sg);
offset += length / I915_GTT_PAGE_SIZE;
left -= length;
}
offset += src_stride - width;
left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
if (!left)
continue;
sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
}
*gtt_offset += alignment_pad + dst_stride * height;
return sg;
}
static struct scatterlist *
remap_contiguous_pages(struct drm_i915_gem_object *obj,
pgoff_t obj_offset,
unsigned int count,
struct sg_table *st, struct scatterlist *sg)
{
struct scatterlist *iter;
unsigned int offset;
iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
GEM_BUG_ON(!iter);
do {
unsigned int len;
len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
count << PAGE_SHIFT);
sg_set_page(sg, NULL, len, 0);
sg_dma_address(sg) =
sg_dma_address(iter) + (offset << PAGE_SHIFT);
sg_dma_len(sg) = len;
st->nents++;
count -= len >> PAGE_SHIFT;
if (count == 0)
return sg;
sg = __sg_next(sg);
iter = __sg_next(iter);
offset = 0;
} while (1);
}
static struct scatterlist *
remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
pgoff_t obj_offset, unsigned int alignment_pad,
unsigned int size,
struct sg_table *st, struct scatterlist *sg,
unsigned int *gtt_offset)
{
if (!size)
return sg;
if (alignment_pad)
sg = add_padding_pages(alignment_pad, st, sg);
sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
sg = sg_next(sg);
*gtt_offset += alignment_pad + size;
return sg;
}
static struct scatterlist *
remap_color_plane_pages(const struct intel_remapped_info *rem_info,
struct drm_i915_gem_object *obj,
int color_plane,
struct sg_table *st, struct scatterlist *sg,
unsigned int *gtt_offset)
{
unsigned int alignment_pad = 0;
if (rem_info->plane_alignment)
alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
if (rem_info->plane[color_plane].linear)
sg = remap_linear_color_plane_pages(obj,
rem_info->plane[color_plane].offset,
alignment_pad,
rem_info->plane[color_plane].size,
st, sg,
gtt_offset);
else
sg = remap_tiled_color_plane_pages(obj,
rem_info->plane[color_plane].offset,
alignment_pad,
rem_info->plane[color_plane].width,
rem_info->plane[color_plane].height,
rem_info->plane[color_plane].src_stride,
rem_info->plane[color_plane].dst_stride,
st, sg,
gtt_offset);
return sg;
}
static noinline struct sg_table *
intel_remap_pages(struct intel_remapped_info *rem_info,
struct drm_i915_gem_object *obj)
{
unsigned int size = intel_remapped_info_size(rem_info);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
unsigned int gtt_offset = 0;
int ret = -ENOMEM;
int i;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
ret = sg_alloc_table(st, size, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
st->nents = 0;
sg = st->sgl;
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset);
i915_sg_trim(st);
return st;
err_sg_alloc:
kfree(st);
err_st_alloc:
drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
obj->base.size, rem_info->plane[0].width,
rem_info->plane[0].height, size);
return ERR_PTR(ret);
}
static noinline struct sg_table *
intel_partial_pages(const struct i915_gtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
struct scatterlist *sg;
unsigned int count = view->partial.size;
int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
ret = sg_alloc_table(st, count, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
st->nents = 0;
sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
sg_mark_end(sg);
i915_sg_trim(st);
return st;
err_sg_alloc:
kfree(st);
err_st_alloc:
return ERR_PTR(ret);
}
static int
__i915_vma_get_pages(struct i915_vma *vma)
{
struct sg_table *pages;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
switch (vma->gtt_view.type) {
default:
GEM_BUG_ON(vma->gtt_view.type);
fallthrough;
case I915_GTT_VIEW_NORMAL:
pages = vma->obj->mm.pages;
break;
case I915_GTT_VIEW_ROTATED:
pages =
intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
break;
case I915_GTT_VIEW_REMAPPED:
pages =
intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
break;
case I915_GTT_VIEW_PARTIAL:
pages = intel_partial_pages(&vma->gtt_view, vma->obj);
break;
}
if (IS_ERR(pages)) {
drm_err(&vma->vm->i915->drm,
"Failed to get pages for VMA view type %u (%ld)!\n",
vma->gtt_view.type, PTR_ERR(pages));
return PTR_ERR(pages);
}
vma->pages = pages;
return 0;
}
I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
{
int err;
if (atomic_add_unless(&vma->pages_count, 1, 0))
return 0;
err = i915_gem_object_pin_pages(vma->obj);
if (err)
return err;
err = __i915_vma_get_pages(vma);
if (err)
goto err_unpin;
vma->page_sizes = vma->obj->mm.page_sizes;
atomic_inc(&vma->pages_count);
return 0;
err_unpin:
__i915_gem_object_unpin_pages(vma->obj);
return err;
}
void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
{
struct intel_gt *gt;
int id;
if (!tlb)
return;
for_each_gt(gt, vm->i915, id)
WRITE_ONCE(tlb[id],
intel_gt_next_invalidate_tlb_full(gt));
}
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
if (atomic_sub_return(count, &vma->pages_count) == 0) {
if (vma->pages != vma->obj->mm.pages) {
sg_free_table(vma->pages);
kfree(vma->pages);
}
vma->pages = NULL;
i915_gem_object_unpin_pages(vma->obj);
}
}
I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
{
if (atomic_add_unless(&vma->pages_count, -1, 1))
return;
__vma_put_pages(vma, 1);
}
static void vma_unbind_pages(struct i915_vma *vma)
{
unsigned int count;
lockdep_assert_held(&vma->vm->mutex);
count = atomic_read(&vma->pages_count);
count >>= I915_VMA_PAGES_BIAS;
GEM_BUG_ON(!count);
__vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
}
int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags)
{
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
intel_wakeref_t wakeref = 0;
unsigned int bound;
int err;
assert_vma_held(vma);
GEM_BUG_ON(!ww);
BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
if (try_qad_pin(vma, flags))
return 0;
err = i915_vma_get_pages(vma);
if (err)
return err;
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
err = i915_vm_lock_objects(vma->vm, ww);
if (err)
goto err_rpm;
work = i915_vma_work();
if (!work) {
err = -ENOMEM;
goto err_rpm;
}
work->vm = vma->vm;
err = i915_gem_object_get_moving_fence(vma->obj, &moving);
if (err)
goto err_rpm;
dma_fence_work_chain(&work->base, moving);
if (vma->vm->allocate_va_range) {
err = i915_vm_alloc_pt_stash(vma->vm,
&work->stash,
vma->size);
if (err)
goto err_fence;
err = i915_vm_map_pt_stash(vma->vm, &work->stash);
if (err)
goto err_fence;
}
}
vma_res = i915_vma_resource_alloc();
if (IS_ERR(vma_res)) {
err = PTR_ERR(vma_res);
goto err_fence;
}
err = mutex_lock_interruptible_nested(&vma->vm->mutex,
!(flags & PIN_GLOBAL));
if (err)
goto err_vma_res;
if (unlikely(i915_vma_is_closed(vma))) {
err = -ENOENT;
goto err_unlock;
}
bound = atomic_read(&vma->flags);
if (unlikely(bound & I915_VMA_ERROR)) {
err = -ENOMEM;
goto err_unlock;
}
if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
err = -EAGAIN;
goto err_unlock;
}
if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
if (!(flags & PIN_VALIDATE))
__i915_vma_pin(vma);
goto err_unlock;
}
err = i915_active_acquire(&vma->active);
if (err)
goto err_unlock;
if (!(bound & I915_VMA_BIND_MASK)) {
err = i915_vma_insert(vma, ww, size, alignment, flags);
if (err)
goto err_active;
if (i915_is_ggtt(vma->vm))
__i915_vma_set_map_and_fenceable(vma);
}
GEM_BUG_ON(!vma->pages);
err = i915_vma_bind(vma,
vma->obj->pat_index,
flags, work, vma_res);
vma_res = NULL;
if (err)
goto err_remove;
GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
if (!(flags & PIN_VALIDATE)) {
__i915_vma_pin(vma);
GEM_BUG_ON(!i915_vma_is_pinned(vma));
}
GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
err_remove:
if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
i915_vma_detach(vma);
drm_mm_remove_node(&vma->node);
}
err_active:
i915_active_release(&vma->active);
err_unlock:
mutex_unlock(&vma->vm->mutex);
err_vma_res:
i915_vma_resource_free(vma_res);
err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);
i915_vma_put_pages(vma);
return err;
}
static void flush_idle_contexts(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, gt, id)
intel_engine_flush_barriers(engine);
intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}
static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u32 align, unsigned int flags)
{
struct i915_address_space *vm = vma->vm;
struct intel_gt *gt;
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
int err;
do {
err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
if (err != -ENOSPC) {
if (!err) {
err = i915_vma_wait_for_bind(vma);
if (err)
i915_vma_unpin(vma);
}
return err;
}
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
flush_idle_contexts(gt);
if (mutex_lock_interruptible(&vm->mutex) == 0) {
i915_gem_evict_vm(vm, NULL, NULL);
mutex_unlock(&vm->mutex);
}
} while (1);
}
int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u32 align, unsigned int flags)
{
struct i915_gem_ww_ctx _ww;
int err;
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
if (ww)
return __i915_ggtt_pin(vma, ww, align, flags);
lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
for_i915_gem_ww(&_ww, err, true) {
err = i915_gem_object_lock(vma->obj, &_ww);
if (!err)
err = __i915_ggtt_pin(vma, &_ww, align, flags);
}
return err;
}
void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
i915_vma_clear_scanout(vma);
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
}
spin_unlock(&obj->vma.lock);
}
static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
{
GEM_BUG_ON(i915_vma_is_closed(vma));
list_add(&vma->closed_link, >->closed_vma);
}
void i915_vma_close(struct i915_vma *vma)
{
struct intel_gt *gt = vma->vm->gt;
unsigned long flags;
if (i915_vma_is_ggtt(vma))
return;
GEM_BUG_ON(!atomic_read(&vma->open_count));
if (atomic_dec_and_lock_irqsave(&vma->open_count,
>->closed_lock,
flags)) {
__vma_close(vma, gt);
spin_unlock_irqrestore(>->closed_lock, flags);
}
}
static void __i915_vma_remove_closed(struct i915_vma *vma)
{
list_del_init(&vma->closed_link);
}
void i915_vma_reopen(struct i915_vma *vma)
{
struct intel_gt *gt = vma->vm->gt;
spin_lock_irq(>->closed_lock);
if (i915_vma_is_closed(vma))
__i915_vma_remove_closed(vma);
spin_unlock_irq(>->closed_lock);
}
static void force_unbind(struct i915_vma *vma)
{
if (!drm_mm_node_allocated(&vma->node))
return;
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
WARN_ON(__i915_vma_unbind(vma));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
}
static void release_references(struct i915_vma *vma, struct intel_gt *gt,
bool vm_ddestroy)
{
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(i915_vma_is_active(vma));
spin_lock(&obj->vma.lock);
list_del(&vma->obj_link);
if (!RB_EMPTY_NODE(&vma->obj_node))
rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
spin_lock_irq(>->closed_lock);
__i915_vma_remove_closed(vma);
spin_unlock_irq(>->closed_lock);
if (vm_ddestroy)
i915_vm_resv_put(vma->vm);
i915_active_wait(&vma->active);
i915_active_fini(&vma->active);
GEM_WARN_ON(vma->resource);
i915_vma_free(vma);
}
void i915_vma_destroy_locked(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->mutex);
force_unbind(vma);
list_del_init(&vma->vm_link);
release_references(vma, vma->vm->gt, false);
}
void i915_vma_destroy(struct i915_vma *vma)
{
struct intel_gt *gt;
bool vm_ddestroy;
mutex_lock(&vma->vm->mutex);
force_unbind(vma);
list_del_init(&vma->vm_link);
vm_ddestroy = vma->vm_ddestroy;
vma->vm_ddestroy = false;
gt = vma->vm->gt;
mutex_unlock(&vma->vm->mutex);
release_references(vma, gt, vm_ddestroy);
}
void i915_vma_parked(struct intel_gt *gt)
{
struct i915_vma *vma, *next;
LIST_HEAD(closed);
spin_lock_irq(>->closed_lock);
list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
struct drm_i915_gem_object *obj = vma->obj;
struct i915_address_space *vm = vma->vm;
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
if (!i915_vm_tryget(vm)) {
i915_gem_object_put(obj);
continue;
}
list_move(&vma->closed_link, &closed);
}
spin_unlock_irq(>->closed_lock);
list_for_each_entry_safe(vma, next, &closed, closed_link) {
struct drm_i915_gem_object *obj = vma->obj;
struct i915_address_space *vm = vma->vm;
if (i915_gem_object_trylock(obj, NULL)) {
INIT_LIST_HEAD(&vma->closed_link);
i915_vma_destroy(vma);
i915_gem_object_unlock(obj);
} else {
spin_lock_irq(>->closed_lock);
list_add(&vma->closed_link, >->closed_vma);
spin_unlock_irq(>->closed_lock);
}
i915_gem_object_put(obj);
i915_vm_put(vm);
}
}
static void __i915_vma_iounmap(struct i915_vma *vma)
{
GEM_BUG_ON(i915_vma_is_pinned(vma));
if (vma->iomap == NULL)
return;
if (page_unmask_bits(vma->iomap))
__i915_gem_object_release_map(vma->obj);
else
io_mapping_unmap(vma->iomap);
vma->iomap = NULL;
}
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
struct drm_vma_offset_node *node;
u64 vma_offset;
if (!i915_vma_has_userfault(vma))
return;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
node = &vma->mmo->vma_node;
vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
1);
i915_vma_unset_userfault(vma);
if (!--vma->obj->userfault_count)
list_del(&vma->obj->userfault_link);
}
static int
__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
{
return __i915_request_await_exclusive(rq, &vma->active);
}
static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
{
int err;
err = __i915_request_await_bind(rq, vma);
if (err)
return err;
return i915_active_add_request(&vma->active, rq);
}
int _i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
struct dma_fence *fence,
unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
int err;
assert_object_held(obj);
GEM_BUG_ON(!vma->pages);
if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) {
err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
if (unlikely(err))
return err;
}
err = __i915_vma_move_to_active(vma, rq);
if (unlikely(err))
return err;
if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
struct dma_fence *curr;
int idx;
dma_fence_array_for_each(curr, idx, fence)
;
err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
if (unlikely(err))
return err;
}
if (flags & EXEC_OBJECT_WRITE) {
struct intel_frontbuffer *front;
front = i915_gem_object_get_frontbuffer(obj);
if (unlikely(front)) {
if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
i915_active_add_request(&front->write, rq);
intel_frontbuffer_put(front);
}
}
if (fence) {
struct dma_fence *curr;
enum dma_resv_usage usage;
int idx;
if (flags & EXEC_OBJECT_WRITE) {
usage = DMA_RESV_USAGE_WRITE;
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
} else {
usage = DMA_RESV_USAGE_READ;
obj->write_domain = 0;
}
dma_fence_array_for_each(curr, idx, fence)
dma_resv_add_fence(vma->obj->base.resv, curr, usage);
}
if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
i915_active_add_request(&vma->fence->active, rq);
obj->read_domains |= I915_GEM_GPU_DOMAINS;
obj->mm.dirty = true;
GEM_BUG_ON(!i915_vma_is_active(vma));
return 0;
}
struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
{
struct i915_vma_resource *vma_res = vma->resource;
struct dma_fence *unbind_fence;
GEM_BUG_ON(i915_vma_is_pinned(vma));
assert_vma_held_evict(vma);
if (i915_vma_is_map_and_fenceable(vma)) {
i915_vma_revoke_mmap(vma);
i915_vma_flush_writes(vma);
i915_vma_revoke_fence(vma);
clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
__i915_vma_iounmap(vma);
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
kref_read(&vma->vm->ref);
vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
if (async)
unbind_fence = i915_vma_resource_unbind(vma_res,
vma->obj->mm.tlb);
else
unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
vma->resource = NULL;
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags);
i915_vma_detach(vma);
if (!async) {
if (unbind_fence) {
dma_fence_wait(unbind_fence, false);
dma_fence_put(unbind_fence);
unbind_fence = NULL;
}
vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
}
vma_unbind_pages(vma);
return unbind_fence;
}
int __i915_vma_unbind(struct i915_vma *vma)
{
int ret;
lockdep_assert_held(&vma->vm->mutex);
assert_vma_held_evict(vma);
if (!drm_mm_node_allocated(&vma->node))
return 0;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
}
ret = i915_vma_sync(vma);
if (ret)
return ret;
GEM_BUG_ON(i915_vma_is_active(vma));
__i915_vma_evict(vma, false);
drm_mm_remove_node(&vma->node);
return 0;
}
static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
{
struct dma_fence *fence;
lockdep_assert_held(&vma->vm->mutex);
if (!drm_mm_node_allocated(&vma->node))
return NULL;
if (i915_vma_is_pinned(vma) ||
&vma->obj->mm.rsgt->table != vma->resource->bi.pages)
return ERR_PTR(-EAGAIN);
if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
I915_ACTIVE_AWAIT_EXCL |
I915_ACTIVE_AWAIT_ACTIVE) < 0) {
return ERR_PTR(-EBUSY);
}
fence = __i915_vma_evict(vma, true);
drm_mm_remove_node(&vma->node);
return fence;
}
int i915_vma_unbind(struct i915_vma *vma)
{
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref = 0;
int err;
assert_object_held_shared(vma->obj);
err = i915_vma_sync(vma);
if (err)
return err;
if (!drm_mm_node_allocated(&vma->node))
return 0;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
}
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
if (err)
goto out_rpm;
err = __i915_vma_unbind(vma);
mutex_unlock(&vm->mutex);
out_rpm:
if (wakeref)
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
return err;
}
int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
{
struct drm_i915_gem_object *obj = vma->obj;
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref = 0;
struct dma_fence *fence;
int err;
assert_object_held(obj);
if (!drm_mm_node_allocated(&vma->node))
return 0;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
}
if (!obj->mm.rsgt)
return -EBUSY;
err = dma_resv_reserve_fences(obj->base.resv, 2);
if (err)
return -EBUSY;
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
if (trylock_vm && !mutex_trylock(&vm->mutex)) {
err = -EBUSY;
goto out_rpm;
} else if (!trylock_vm) {
err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
if (err)
goto out_rpm;
}
fence = __i915_vma_unbind_async(vma);
mutex_unlock(&vm->mutex);
if (IS_ERR_OR_NULL(fence)) {
err = PTR_ERR_OR_ZERO(fence);
goto out_rpm;
}
dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
dma_fence_put(fence);
out_rpm:
if (wakeref)
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
return err;
}
int i915_vma_unbind_unlocked(struct i915_vma *vma)
{
int err;
i915_gem_object_lock(vma->obj, NULL);
err = i915_vma_unbind(vma);
i915_gem_object_unlock(vma->obj);
return err;
}
struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
{
i915_gem_object_make_unshrinkable(vma->obj);
return vma;
}
void i915_vma_make_shrinkable(struct i915_vma *vma)
{
i915_gem_object_make_shrinkable(vma->obj);
}
void i915_vma_make_purgeable(struct i915_vma *vma)
{
i915_gem_object_make_purgeable(vma->obj);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif
void i915_vma_module_exit(void)
{
kmem_cache_destroy(slab_vmas);
}
int __init i915_vma_module_init(void)
{
slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
if (!slab_vmas)
return -ENOMEM;
return 0;
}