aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/bcm27xx/patches-4.19/950-0208-staging-bcm2835-camera-Do-not-bulk-receive-from-serv.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/bcm27xx/patches-4.19/950-0208-staging-bcm2835-camera-Do-not-bulk-receive-from-serv.patch')
-rw-r--r--target/linux/bcm27xx/patches-4.19/950-0208-staging-bcm2835-camera-Do-not-bulk-receive-from-serv.patch197
1 files changed, 197 insertions, 0 deletions
diff --git a/target/linux/bcm27xx/patches-4.19/950-0208-staging-bcm2835-camera-Do-not-bulk-receive-from-serv.patch b/target/linux/bcm27xx/patches-4.19/950-0208-staging-bcm2835-camera-Do-not-bulk-receive-from-serv.patch
new file mode 100644
index 0000000000..9d2ae1bb3a
--- /dev/null
+++ b/target/linux/bcm27xx/patches-4.19/950-0208-staging-bcm2835-camera-Do-not-bulk-receive-from-serv.patch
@@ -0,0 +1,197 @@
+From 522f1499310d389e663a4e8dd0ccbb916b768766 Mon Sep 17 00:00:00 2001
+From: Dave Stevenson <dave.stevenson@raspberrypi.org>
+Date: Wed, 14 Feb 2018 17:04:26 +0000
+Subject: [PATCH] staging: bcm2835-camera: Do not bulk receive from
+ service thread
+
+vchi_bulk_queue_receive will queue up to a default of 4
+bulk receives on a connection before blocking.
+If called from the VCHI service_callback thread, then
+that thread is unable to service the VCHI_CALLBACK_BULK_RECEIVED
+events that would enable the queue call to succeed.
+
+Add a workqueue to schedule the call vchi_bulk_queue_receive
+in an alternate context to avoid the lock up.
+
+Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
+---
+ .../vc04_services/bcm2835-camera/mmal-vchiq.c | 101 ++++++++++--------
+ 1 file changed, 59 insertions(+), 42 deletions(-)
+
+--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
++++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+@@ -118,8 +118,10 @@ struct mmal_msg_context {
+
+ union {
+ struct {
+- /* work struct for defered callback - must come first */
++ /* work struct for buffer_cb callback */
+ struct work_struct work;
++ /* work struct for deferred callback */
++ struct work_struct buffer_to_host_work;
+ /* mmal instance */
+ struct vchiq_mmal_instance *instance;
+ /* mmal port */
+@@ -168,6 +170,9 @@ struct vchiq_mmal_instance {
+ /* component to use next */
+ int component_idx;
+ struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
++
++ /* ordered workqueue to process all bulk operations */
++ struct workqueue_struct *bulk_wq;
+ };
+
+ static struct mmal_msg_context *
+@@ -251,7 +256,44 @@ static void buffer_work_cb(struct work_s
+ msg_context->u.bulk.mmal_flags,
+ msg_context->u.bulk.dts,
+ msg_context->u.bulk.pts);
++}
+
++/* workqueue scheduled callback to handle receiving buffers
++ *
++ * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
++ * If we block in the service_callback context then we can't process the
++ * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
++ * vchi_bulk_queue_receive() call to complete.
++ */
++static void buffer_to_host_work_cb(struct work_struct *work)
++{
++ struct mmal_msg_context *msg_context =
++ container_of(work, struct mmal_msg_context,
++ u.bulk.buffer_to_host_work);
++ struct vchiq_mmal_instance *instance = msg_context->instance;
++ unsigned long len = msg_context->u.bulk.buffer_used;
++ int ret;
++
++ if (!len)
++ /* Dummy receive to ensure the buffers remain in order */
++ len = 8;
++ /* queue the bulk submission */
++ vchi_service_use(instance->handle);
++ ret = vchi_bulk_queue_receive(instance->handle,
++ msg_context->u.bulk.buffer->buffer,
++ /* Actual receive needs to be a multiple
++ * of 4 bytes
++ */
++ (len + 3) & ~3,
++ VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
++ VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
++ msg_context);
++
++ vchi_service_release(instance->handle);
++
++ if (ret != 0)
++ pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
++ __func__, msg_context, ret);
+ }
+
+ /* enqueue a bulk receive for a given message context */
+@@ -260,7 +302,6 @@ static int bulk_receive(struct vchiq_mma
+ struct mmal_msg_context *msg_context)
+ {
+ unsigned long rd_len;
+- int ret;
+
+ rd_len = msg->u.buffer_from_host.buffer_header.length;
+
+@@ -294,45 +335,10 @@ static int bulk_receive(struct vchiq_mma
+ msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
+ msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
+
+- /* queue the bulk submission */
+- vchi_service_use(instance->handle);
+- ret = vchi_bulk_queue_receive(instance->handle,
+- msg_context->u.bulk.buffer->buffer,
+- /* Actual receive needs to be a multiple
+- * of 4 bytes
+- */
+- (rd_len + 3) & ~3,
+- VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
+- VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
+- msg_context);
+-
+- vchi_service_release(instance->handle);
++ queue_work(msg_context->instance->bulk_wq,
++ &msg_context->u.bulk.buffer_to_host_work);
+
+- return ret;
+-}
+-
+-/* enque a dummy bulk receive for a given message context */
+-static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
+- struct mmal_msg_context *msg_context)
+-{
+- int ret;
+-
+- /* zero length indicates this was a dummy transfer */
+- msg_context->u.bulk.buffer_used = 0;
+-
+- /* queue the bulk submission */
+- vchi_service_use(instance->handle);
+-
+- ret = vchi_bulk_queue_receive(instance->handle,
+- instance->bulk_scratch,
+- 8,
+- VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
+- VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
+- msg_context);
+-
+- vchi_service_release(instance->handle);
+-
+- return ret;
++ return 0;
+ }
+
+ /* data in message, memcpy from packet into output buffer */
+@@ -380,6 +386,8 @@ buffer_from_host(struct vchiq_mmal_insta
+
+ /* initialise work structure ready to schedule callback */
+ INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
++ INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
++ buffer_to_host_work_cb);
+
+ atomic_inc(&port->buffers_with_vpu);
+
+@@ -465,7 +473,7 @@ static void buffer_to_host_cb(struct vch
+ if (msg->u.buffer_from_host.buffer_header.flags &
+ MMAL_BUFFER_HEADER_FLAG_EOS) {
+ msg_context->u.bulk.status =
+- dummy_bulk_receive(instance, msg_context);
++ bulk_receive(instance, msg, msg_context);
+ if (msg_context->u.bulk.status == 0)
+ return; /* successful bulk submission, bulk
+ * completion will trigger callback
+@@ -1789,6 +1797,9 @@ int vchiq_mmal_finalise(struct vchiq_mma
+
+ mutex_unlock(&instance->vchiq_mutex);
+
++ flush_workqueue(instance->bulk_wq);
++ destroy_workqueue(instance->bulk_wq);
++
+ vfree(instance->bulk_scratch);
+
+ idr_destroy(&instance->context_map);
+@@ -1858,6 +1869,11 @@ int vchiq_mmal_init(struct vchiq_mmal_in
+
+ params.callback_param = instance;
+
++ instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
++ WQ_MEM_RECLAIM);
++ if (!instance->bulk_wq)
++ goto err_free;
++
+ status = vchi_service_open(vchi_instance, &params, &instance->handle);
+ if (status) {
+ pr_err("Failed to open VCHI service connection (status=%d)\n",
+@@ -1872,8 +1888,9 @@ int vchiq_mmal_init(struct vchiq_mmal_in
+ return 0;
+
+ err_close_services:
+-
+ vchi_service_close(instance->handle);
++ destroy_workqueue(instance->bulk_wq);
++err_free:
+ vfree(instance->bulk_scratch);
+ kfree(instance);
+ return -ENODEV;