#ifndef __AMDGPU_VCE_H__
#define __AMDGPU_VCE_H__
#define AMDGPU_MAX_VCE_HANDLES 16
#define AMDGPU_VCE_FIRMWARE_OFFSET 256
#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
#define AMDGPU_VCE_FW_53_45 ((53 << 24) | (45 << 16))
struct amdgpu_vce {
struct amdgpu_bo *vcpu_bo;
uint64_t gpu_addr;
void *cpu_addr;
void *saved_bo;
unsigned fw_version;
unsigned fb_version;
atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
struct delayed_work idle_work;
struct mutex idle_mutex;
const struct firmware *fw;
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
unsigned harvest_config;
struct drm_sched_entity entity;
uint32_t srbm_soft_reset;
unsigned num_rings;
};
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
struct amdgpu_ib *ib);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct amdgpu_ib *ib, uint32_t flags);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring);
#endif