aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/brcm2708/patches-4.14/950-0371-staging-bcm2835-camera-Remove-V4L2-MMAL-buffer-remap.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/brcm2708/patches-4.14/950-0371-staging-bcm2835-camera-Remove-V4L2-MMAL-buffer-remap.patch')
-rw-r--r--target/linux/brcm2708/patches-4.14/950-0371-staging-bcm2835-camera-Remove-V4L2-MMAL-buffer-remap.patch239
1 files changed, 239 insertions, 0 deletions
diff --git a/target/linux/brcm2708/patches-4.14/950-0371-staging-bcm2835-camera-Remove-V4L2-MMAL-buffer-remap.patch b/target/linux/brcm2708/patches-4.14/950-0371-staging-bcm2835-camera-Remove-V4L2-MMAL-buffer-remap.patch
new file mode 100644
index 0000000000..c3d5ea7291
--- /dev/null
+++ b/target/linux/brcm2708/patches-4.14/950-0371-staging-bcm2835-camera-Remove-V4L2-MMAL-buffer-remap.patch
@@ -0,0 +1,239 @@
+From 0d3b557ef5494adf6458fe4e6f4a9b41e6e0d12a Mon Sep 17 00:00:00 2001
+From: Dave Stevenson <dave.stevenson@raspberrypi.org>
+Date: Thu, 10 May 2018 12:42:11 -0700
+Subject: [PATCH 371/454] staging: bcm2835-camera: Remove V4L2/MMAL buffer
+ remapping
+
+commit 9384167070713570a25f854d641979e94163c425 upstream
+
+The MMAL and V4L2 buffers had been disassociated, and linked on
+demand. Seeing as both are finite and low in number, and we now have
+the same number of each, link them for the duration. This removes the
+complexity of maintaining lists as the struct mmal_buffer context
+comes back from the VPU, so we can directly link back to the relevant
+V4L2 buffer.
+
+Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ .../bcm2835-camera/bcm2835-camera.c | 7 +-
+ .../vc04_services/bcm2835-camera/mmal-vchiq.c | 109 ++++--------------
+ 2 files changed, 29 insertions(+), 87 deletions(-)
+
+--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
++++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+@@ -301,8 +301,8 @@ static int buffer_prepare(struct vb2_buf
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size;
+
+- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
+- __func__, dev);
++ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p, vb %p\n",
++ __func__, dev, vb);
+
+ BUG_ON(!dev->capture.port);
+ BUG_ON(!dev->capture.fmt);
+@@ -522,7 +522,8 @@ static void buffer_queue(struct vb2_buff
+ int ret;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+- "%s: dev:%p buf:%p\n", __func__, dev, buf);
++ "%s: dev:%p buf:%p, idx %u\n",
++ __func__, dev, buf, vb2->vb2_buf.index);
+
+ ret = vchiq_mmal_submit_buffer(dev->instance, dev->capture.port, buf);
+ if (ret < 0)
+--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
++++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+@@ -329,16 +329,12 @@ static int bulk_receive(struct vchiq_mma
+ struct mmal_msg_context *msg_context)
+ {
+ unsigned long rd_len;
+- unsigned long flags = 0;
+ int ret;
+
+ rd_len = msg->u.buffer_from_host.buffer_header.length;
+
+- /* take buffer from queue */
+- spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
+- if (list_empty(&msg_context->u.bulk.port->buffers)) {
+- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
+- pr_err("buffer list empty trying to submit bulk receive\n");
++ if (!msg_context->u.bulk.buffer) {
++ pr_err("bulk.buffer not configured - error in buffer_from_host\n");
+
+ /* todo: this is a serious error, we should never have
+ * committed a buffer_to_host operation to the mmal
+@@ -353,13 +349,6 @@ static int bulk_receive(struct vchiq_mma
+ return -EINVAL;
+ }
+
+- msg_context->u.bulk.buffer =
+- list_entry(msg_context->u.bulk.port->buffers.next,
+- struct mmal_buffer, list);
+- list_del(&msg_context->u.bulk.buffer->list);
+-
+- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
+-
+ /* ensure we do not overrun the available buffer */
+ if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
+ rd_len = msg_context->u.bulk.buffer->buffer_size;
+@@ -422,31 +411,6 @@ static int inline_receive(struct vchiq_m
+ struct mmal_msg *msg,
+ struct mmal_msg_context *msg_context)
+ {
+- unsigned long flags = 0;
+-
+- /* take buffer from queue */
+- spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
+- if (list_empty(&msg_context->u.bulk.port->buffers)) {
+- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
+- pr_err("buffer list empty trying to receive inline\n");
+-
+- /* todo: this is a serious error, we should never have
+- * committed a buffer_to_host operation to the mmal
+- * port without the buffer to back it up (with
+- * underflow handling) and there is no obvious way to
+- * deal with this. Less bad than the bulk case as we
+- * can just drop this on the floor but...unhelpful
+- */
+- return -EINVAL;
+- }
+-
+- msg_context->u.bulk.buffer =
+- list_entry(msg_context->u.bulk.port->buffers.next,
+- struct mmal_buffer, list);
+- list_del(&msg_context->u.bulk.buffer->list);
+-
+- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
+-
+ memcpy(msg_context->u.bulk.buffer->buffer,
+ msg->u.buffer_from_host.short_data,
+ msg->u.buffer_from_host.payload_in_message);
+@@ -466,6 +430,9 @@ buffer_from_host(struct vchiq_mmal_insta
+ struct mmal_msg m;
+ int ret;
+
++ if (!port->enabled)
++ return -EINVAL;
++
+ pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
+
+ /* get context */
+@@ -479,7 +446,7 @@ buffer_from_host(struct vchiq_mmal_insta
+ /* store bulk message context for when data arrives */
+ msg_context->u.bulk.instance = instance;
+ msg_context->u.bulk.port = port;
+- msg_context->u.bulk.buffer = NULL; /* not valid until bulk xfer */
++ msg_context->u.bulk.buffer = buf;
+ msg_context->u.bulk.buffer_used = 0;
+
+ /* initialise work structure ready to schedule callback */
+@@ -529,43 +496,6 @@ buffer_from_host(struct vchiq_mmal_insta
+ return ret;
+ }
+
+-/* submit a buffer to the mmal sevice
+- *
+- * the buffer_from_host uses size data from the ports next available
+- * mmal_buffer and deals with there being no buffer available by
+- * incrementing the underflow for later
+- */
+-static int port_buffer_from_host(struct vchiq_mmal_instance *instance,
+- struct vchiq_mmal_port *port)
+-{
+- int ret;
+- struct mmal_buffer *buf;
+- unsigned long flags = 0;
+-
+- if (!port->enabled)
+- return -EINVAL;
+-
+- /* peek buffer from queue */
+- spin_lock_irqsave(&port->slock, flags);
+- if (list_empty(&port->buffers)) {
+- spin_unlock_irqrestore(&port->slock, flags);
+- return -ENOSPC;
+- }
+-
+- buf = list_entry(port->buffers.next, struct mmal_buffer, list);
+-
+- spin_unlock_irqrestore(&port->slock, flags);
+-
+- /* issue buffer to mmal service */
+- ret = buffer_from_host(instance, port, buf);
+- if (ret) {
+- pr_err("adding buffer header failed\n");
+- /* todo: how should this be dealt with */
+- }
+-
+- return ret;
+-}
+-
+ /* deals with receipt of buffer to host message */
+ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg, u32 msg_len)
+@@ -1425,7 +1355,14 @@ static int port_disable(struct vchiq_mma
+ ret = port_action_port(instance, port,
+ MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
+ if (ret == 0) {
+- /* drain all queued buffers on port */
++ /*
++ * Drain all queued buffers on port. This should only
++ * apply to buffers that have been queued before the port
++ * has been enabled. If the port has been enabled and buffers
++ * passed, then the buffers should have been removed from this
++ * list, and we should get the relevant callbacks via VCHIQ
++ * to release the buffers.
++ */
+ spin_lock_irqsave(&port->slock, flags);
+
+ list_for_each_safe(buf_head, q, &port->buffers) {
+@@ -1454,7 +1391,7 @@ static int port_enable(struct vchiq_mmal
+ struct vchiq_mmal_port *port)
+ {
+ unsigned int hdr_count;
+- struct list_head *buf_head;
++ struct list_head *q, *buf_head;
+ int ret;
+
+ if (port->enabled)
+@@ -1480,7 +1417,7 @@ static int port_enable(struct vchiq_mmal
+ if (port->buffer_cb) {
+ /* send buffer headers to videocore */
+ hdr_count = 1;
+- list_for_each(buf_head, &port->buffers) {
++ list_for_each_safe(buf_head, q, &port->buffers) {
+ struct mmal_buffer *mmalbuf;
+
+ mmalbuf = list_entry(buf_head, struct mmal_buffer,
+@@ -1489,6 +1426,7 @@ static int port_enable(struct vchiq_mmal
+ if (ret)
+ goto done;
+
++ list_del(buf_head);
+ hdr_count++;
+ if (hdr_count > port->current_buffer.num)
+ break;
+@@ -1701,12 +1639,15 @@ int vchiq_mmal_submit_buffer(struct vchi
+ struct mmal_buffer *buffer)
+ {
+ unsigned long flags = 0;
++ int ret;
+
+- spin_lock_irqsave(&port->slock, flags);
+- list_add_tail(&buffer->list, &port->buffers);
+- spin_unlock_irqrestore(&port->slock, flags);
+-
+- port_buffer_from_host(instance, port);
++ ret = buffer_from_host(instance, port, buffer);
++ if (ret == -EINVAL) {
++ /* Port is disabled. Queue for when it is enabled. */
++ spin_lock_irqsave(&port->slock, flags);
++ list_add_tail(&buffer->list, &port->buffers);
++ spin_unlock_irqrestore(&port->slock, flags);
++ }
+
+ return 0;
+ }