#include <media/drv-intf/saa7146_vv.h>
static int vbi_pixel_to_capture = 720 * 2;
static int vbi_workaround(struct saa7146_dev *dev)
{
struct saa7146_vv *vv = dev->vv_data;
u32 *cpu;
dma_addr_t dma_addr;
int count = 0;
int i;
DECLARE_WAITQUEUE(wait, current);
DEB_VBI("dev:%p\n", dev);
cpu = dma_alloc_coherent(&dev->pci->dev, 4096, &dma_addr, GFP_KERNEL);
if (NULL == cpu)
return -ENOMEM;
saa7146_write(dev, BASE_EVEN3, dma_addr);
saa7146_write(dev, BASE_ODD3, dma_addr+vbi_pixel_to_capture);
saa7146_write(dev, PROT_ADDR3, dma_addr+4096);
saa7146_write(dev, PITCH3, vbi_pixel_to_capture);
saa7146_write(dev, BASE_PAGE3, 0x0);
saa7146_write(dev, NUM_LINE_BYTE3, (2<<16)|((vbi_pixel_to_capture)<<0));
saa7146_write(dev, MC2, MASK_04|MASK_20);
WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
WRITE_RPS1(0xc000008c);
if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
DEB_D("...using port b\n");
WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_E_FID_B);
WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_O_FID_B);
} else {
DEB_D("...using port a\n");
WRITE_RPS1(CMD_PAUSE | MASK_10);
}
WRITE_RPS1(CMD_UPLOAD | MASK_08);
WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
WRITE_RPS1(((1728-(vbi_pixel_to_capture)) << 7) | MASK_19);
WRITE_RPS1(CMD_PAUSE | MASK_08);
WRITE_RPS1(CMD_UPLOAD | MASK_08);
WRITE_RPS1(CMD_WR_REG | (1 << 8) | (NUM_LINE_BYTE3/4));
WRITE_RPS1((2 << 16) | (vbi_pixel_to_capture));
WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
WRITE_RPS1((540 << 7) | (5 << 19));
WRITE_RPS1(CMD_PAUSE | MASK_08);
WRITE_RPS1(CMD_UPLOAD | MASK_08 | MASK_04);
WRITE_RPS1(CMD_WR_REG | (1 << 8) | (MC1/4));
WRITE_RPS1(MASK_20 | MASK_04);
WRITE_RPS1(CMD_INTERRUPT);
WRITE_RPS1(CMD_STOP);
for(i = 0; i < 2; i++) {
saa7146_write(dev, MC2, MASK_31|MASK_15);
saa7146_write(dev, NUM_LINE_BYTE3, (1<<16)|(2<<0));
saa7146_write(dev, MC2, MASK_04|MASK_20);
SAA7146_IER_ENABLE(dev,MASK_28);
add_wait_queue(&vv->vbi_wq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
saa7146_write(dev, MC1, (MASK_13 | MASK_29));
schedule();
DEB_VBI("brs bug workaround %d/1\n", i);
remove_wait_queue(&vv->vbi_wq, &wait);
__set_current_state(TASK_RUNNING);
SAA7146_IER_DISABLE(dev,MASK_28);
saa7146_write(dev, MC1, MASK_20);
if(signal_pending(current)) {
DEB_VBI("aborted (rps:0x%08x)\n",
saa7146_read(dev, RPS_ADDR1));
saa7146_write(dev, MC1, MASK_29);
dma_free_coherent(&dev->pci->dev, 4096, cpu, dma_addr);
return -EINTR;
}
}
dma_free_coherent(&dev->pci->dev, 4096, cpu, dma_addr);
return 0;
}
static void saa7146_set_vbi_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next)
{
struct saa7146_vv *vv = dev->vv_data;
struct saa7146_video_dma vdma3;
int count = 0;
unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B;
unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B;
vdma3.base_even = buf->pt[2].offset;
vdma3.base_odd = buf->pt[2].offset + 16 * vbi_pixel_to_capture;
vdma3.prot_addr = buf->pt[2].offset + 16 * 2 * vbi_pixel_to_capture;
vdma3.pitch = vbi_pixel_to_capture;
vdma3.base_page = buf->pt[2].dma | ME1;
vdma3.num_line_byte = (16 << 16) | vbi_pixel_to_capture;
saa7146_write_out_dma(dev, 3, &vdma3);
count = 0;
WRITE_RPS1(CMD_WR_REG | (1 << 8) | (MC2/4));
WRITE_RPS1(MASK_28 | MASK_12);
WRITE_RPS1(CMD_WR_REG_MASK | (MC1/4));
WRITE_RPS1(MASK_04 | MASK_20);
WRITE_RPS1(MASK_04 | MASK_20);
WRITE_RPS1(CMD_PAUSE | o_wait);
WRITE_RPS1(CMD_PAUSE | e_wait);
WRITE_RPS1(CMD_INTERRUPT);
WRITE_RPS1(CMD_STOP);
SAA7146_IER_ENABLE(dev, MASK_28);
saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
saa7146_write(dev, MC1, (MASK_13 | MASK_29));
}
static int buffer_activate(struct saa7146_dev *dev,
struct saa7146_buf *buf,
struct saa7146_buf *next)
{
struct saa7146_vv *vv = dev->vv_data;
DEB_VBI("dev:%p, buf:%p, next:%p\n", dev, buf, next);
saa7146_set_vbi_capture(dev,buf,next);
mod_timer(&vv->vbi_dmaq.timeout, jiffies+BUFFER_TIMEOUT);
return 0;
}
static int queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], struct device *alloc_devs[])
{
unsigned int size = 16 * 2 * vbi_pixel_to_capture;
if (*num_planes)
return sizes[0] < size ? -EINVAL : 0;
*num_planes = 1;
sizes[0] = size;
return 0;
}
static void buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
struct saa7146_dev *dev = vb2_get_drv_priv(vq);
struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb);
unsigned long flags;
spin_lock_irqsave(&dev->slock, flags);
saa7146_buffer_queue(dev, &dev->vv_data->vbi_dmaq, buf);
spin_unlock_irqrestore(&dev->slock, flags);
}
static int buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
struct scatterlist *list = sgt->sgl;
int length = sgt->nents;
struct vb2_queue *vq = vb->vb2_queue;
struct saa7146_dev *dev = vb2_get_drv_priv(vq);
int ret;
buf->activate = buffer_activate;
saa7146_pgtable_alloc(dev->pci, &buf->pt[2]);
ret = saa7146_pgtable_build_single(dev->pci, &buf->pt[2],
list, length);
if (ret)
saa7146_pgtable_free(dev->pci, &buf->pt[2]);
return ret;
}
static int buf_prepare(struct vb2_buffer *vb)
{
unsigned int size = 16 * 2 * vbi_pixel_to_capture;
if (vb2_plane_size(vb, 0) < size)
return -EINVAL;
vb2_set_plane_payload(vb, 0, size);
return 0;
}
static void buf_cleanup(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb);
struct vb2_queue *vq = vb->vb2_queue;
struct saa7146_dev *dev = vb2_get_drv_priv(vq);
saa7146_pgtable_free(dev->pci, &buf->pt[2]);
}
static void return_buffers(struct vb2_queue *q, int state)
{
struct saa7146_dev *dev = vb2_get_drv_priv(q);
struct saa7146_dmaqueue *dq = &dev->vv_data->vbi_dmaq;
struct saa7146_buf *buf;
if (dq->curr) {
buf = dq->curr;
dq->curr = NULL;
vb2_buffer_done(&buf->vb.vb2_buf, state);
}
while (!list_empty(&dq->queue)) {
buf = list_entry(dq->queue.next, struct saa7146_buf, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, state);
}
}
static void vbi_stop(struct saa7146_dev *dev)
{
struct saa7146_vv *vv = dev->vv_data;
unsigned long flags;
DEB_VBI("dev:%p\n", dev);
spin_lock_irqsave(&dev->slock,flags);
saa7146_write(dev, MC1, MASK_29);
SAA7146_IER_DISABLE(dev, MASK_28);
saa7146_write(dev, MC1, MASK_20);
del_timer(&vv->vbi_dmaq.timeout);
del_timer(&vv->vbi_read_timeout);
spin_unlock_irqrestore(&dev->slock, flags);
}
static void vbi_read_timeout(struct timer_list *t)
{
struct saa7146_vv *vv = from_timer(vv, t, vbi_read_timeout);
struct saa7146_dev *dev = vv->vbi_dmaq.dev;
DEB_VBI("dev:%p\n", dev);
vbi_stop(dev);
}
static int vbi_begin(struct saa7146_dev *dev)
{
struct saa7146_vv *vv = dev->vv_data;
u32 arbtr_ctrl = saa7146_read(dev, PCI_BT_V1);
int ret = 0;
DEB_VBI("dev:%p\n", dev);
ret = saa7146_res_get(dev, RESOURCE_DMA3_BRS);
if (0 == ret) {
DEB_S("cannot get vbi RESOURCE_DMA3_BRS resource\n");
return -EBUSY;
}
arbtr_ctrl &= ~0x1f0000;
arbtr_ctrl |= 0x1d0000;
saa7146_write(dev, PCI_BT_V1, arbtr_ctrl);
saa7146_write(dev, MC2, (MASK_04|MASK_20));
vv->vbi_read_timeout.function = vbi_read_timeout;
if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
saa7146_write(dev, BRS_CTRL, MASK_30|MASK_29 | (7 << 19));
} else {
saa7146_write(dev, BRS_CTRL, 0x00000001);
if (0 != (ret = vbi_workaround(dev))) {
DEB_VBI("vbi workaround failed!\n");
}
}
saa7146_write(dev, MC2, (MASK_08|MASK_24));
return 0;
}
static int start_streaming(struct vb2_queue *q, unsigned int count)
{
struct saa7146_dev *dev = vb2_get_drv_priv(q);
int ret;
if (!vb2_is_streaming(&dev->vv_data->vbi_dmaq.q))
dev->vv_data->seqnr = 0;
ret = vbi_begin(dev);
if (ret)
return_buffers(q, VB2_BUF_STATE_QUEUED);
return ret;
}
static void stop_streaming(struct vb2_queue *q)
{
struct saa7146_dev *dev = vb2_get_drv_priv(q);
vbi_stop(dev);
return_buffers(q, VB2_BUF_STATE_ERROR);
saa7146_res_free(dev, RESOURCE_DMA3_BRS);
}
const struct vb2_ops vbi_qops = {
.queue_setup = queue_setup,
.buf_queue = buf_queue,
.buf_init = buf_init,
.buf_prepare = buf_prepare,
.buf_cleanup = buf_cleanup,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
{
DEB_VBI("dev:%p\n", dev);
INIT_LIST_HEAD(&vv->vbi_dmaq.queue);
timer_setup(&vv->vbi_dmaq.timeout, saa7146_buffer_timeout, 0);
vv->vbi_dmaq.dev = dev;
init_waitqueue_head(&vv->vbi_wq);
}
static void vbi_irq_done(struct saa7146_dev *dev, unsigned long status)
{
struct saa7146_vv *vv = dev->vv_data;
spin_lock(&dev->slock);
if (vv->vbi_dmaq.curr) {
DEB_VBI("dev:%p, curr:%p\n", dev, vv->vbi_dmaq.curr);
saa7146_buffer_finish(dev, &vv->vbi_dmaq, VB2_BUF_STATE_DONE);
} else {
DEB_VBI("dev:%p\n", dev);
}
saa7146_buffer_next(dev, &vv->vbi_dmaq, 1);
spin_unlock(&dev->slock);
}
const struct saa7146_use_ops saa7146_vbi_uops = {
.init = vbi_init,
.irq_done = vbi_irq_done,
}