aboutsummaryrefslogtreecommitdiffstats
path: root/target
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2015-09-24 16:07:34 +0000
committerFelix Fietkau <nbd@openwrt.org>2015-09-24 16:07:34 +0000
commitad5f485a4bff3f7458d80dd4775878f7081bb2b5 (patch)
tree52c203c957e5e4c95c158290b7a2fdda2f863b09 /target
parentd0a48c1d4292773bb676d5eb89c036a19b606bce (diff)
downloadupstream-ad5f485a4bff3f7458d80dd4775878f7081bb2b5.tar.gz
upstream-ad5f485a4bff3f7458d80dd4775878f7081bb2b5.tar.bz2
upstream-ad5f485a4bff3f7458d80dd4775878f7081bb2b5.zip
kernel: backport some SPI layer improvements from 4.1 to 3.18, significantly improves flash speed on ramips
Signed-off-by: Felix Fietkau <nbd@openwrt.org> Backport of r47045 git-svn-id: svn://svn.openwrt.org/openwrt/branches/chaos_calmer@47046 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'target')
-rw-r--r--target/linux/generic/patches-3.18/092-01-spi-Check-to-see-if-the-device-is-processing-a-messa.patch47
-rw-r--r--target/linux/generic/patches-3.18/092-02-spi-Pump-transfers-inside-calling-context-for-spi_sy.patch184
-rw-r--r--target/linux/generic/patches-3.18/092-03-spi-Only-idle-the-message-pump-in-the-worker-kthread.patch83
3 files changed, 314 insertions, 0 deletions
diff --git a/target/linux/generic/patches-3.18/092-01-spi-Check-to-see-if-the-device-is-processing-a-messa.patch b/target/linux/generic/patches-3.18/092-01-spi-Check-to-see-if-the-device-is-processing-a-messa.patch
new file mode 100644
index 0000000000..fa3ab6a638
--- /dev/null
+++ b/target/linux/generic/patches-3.18/092-01-spi-Check-to-see-if-the-device-is-processing-a-messa.patch
@@ -0,0 +1,47 @@
+From: Mark Brown <broonie@kernel.org>
+Date: Tue, 9 Dec 2014 19:46:56 +0000
+Subject: [PATCH] spi: Check to see if the device is processing a message
+ before we idle
+
+cur_msg is updated under the queue lock and holds the message we are
+currently processing. Since currently we only ever do removals in the
+pump kthread it doesn't matter in what order we do things but we want
+to be able to push things out from the submitting thread so pull the
+check to see if we're currently handling a message before we check to
+see if the queue is idle.
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+---
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -891,8 +891,16 @@ static void spi_pump_messages(struct kth
+ bool was_busy = false;
+ int ret;
+
+- /* Lock queue and check for queue work */
++ /* Lock queue */
+ spin_lock_irqsave(&master->queue_lock, flags);
++
++ /* Make sure we are not already running a message */
++ if (master->cur_msg) {
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++ return;
++ }
++
++ /* Check if the queue is idle */
+ if (list_empty(&master->queue) || !master->running) {
+ if (!master->busy) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+@@ -916,11 +924,6 @@ static void spi_pump_messages(struct kth
+ return;
+ }
+
+- /* Make sure we are not already running a message */
+- if (master->cur_msg) {
+- spin_unlock_irqrestore(&master->queue_lock, flags);
+- return;
+- }
+ /* Extract head of queue */
+ master->cur_msg =
+ list_first_entry(&master->queue, struct spi_message, queue);
diff --git a/target/linux/generic/patches-3.18/092-02-spi-Pump-transfers-inside-calling-context-for-spi_sy.patch b/target/linux/generic/patches-3.18/092-02-spi-Pump-transfers-inside-calling-context-for-spi_sy.patch
new file mode 100644
index 0000000000..ddfb060629
--- /dev/null
+++ b/target/linux/generic/patches-3.18/092-02-spi-Pump-transfers-inside-calling-context-for-spi_sy.patch
@@ -0,0 +1,184 @@
+From: Mark Brown <broonie@kernel.org>
+Date: Tue, 9 Dec 2014 21:38:05 +0000
+Subject: [PATCH] spi: Pump transfers inside calling context for spi_sync()
+
+If we are using the standard SPI message pump (which all drivers should be
+transitioning over to) then special case the message enqueue and instead of
+starting the worker thread to push messages to the hardware do so in the
+context of the caller if the controller is idle. This avoids a context
+switch in the common case where the controller has a single user in a
+single thread, for short PIO transfers there may be no need to context
+switch away from the calling context to complete the transfer.
+
+The code is a bit more complex than is desirable in part due to the need
+to handle drivers not using the standard queue and in part due to handling
+the various combinations of bus locking and asynchronous submission in
+interrupt context.
+
+It is still suboptimal since it will still wake the message pump for each
+transfer in order to schedule idling of the hardware and if multiple
+contexts are using the controller simultaneously a caller may end up
+pumping a message for some random other thread rather than for itself,
+and if the thread ends up deferring due to another context idling the
+hardware then it will just busy wait. It can, however, have the benefit
+of aggregating power up and down of the hardware when a caller performs
+a series of transfers back to back without any need for the use of
+spi_async().
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+---
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -882,6 +882,9 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_t
+ * needs processing and if so call out to the driver to initialize hardware
+ * and transfer each message.
+ *
++ * Note that it is called both from the kthread itself and also from
++ * inside spi_sync(); the queue extraction handling at the top of the
++ * function should deal with this safely.
+ */
+ static void spi_pump_messages(struct kthread_work *work)
+ {
+@@ -900,6 +903,13 @@ static void spi_pump_messages(struct kth
+ return;
+ }
+
++ /* If another context is idling the device then defer */
++ if (master->idling) {
++ queue_kthread_work(&master->kworker, &master->pump_messages);
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++ return;
++ }
++
+ /* Check if the queue is idle */
+ if (list_empty(&master->queue) || !master->running) {
+ if (!master->busy) {
+@@ -907,7 +917,9 @@ static void spi_pump_messages(struct kth
+ return;
+ }
+ master->busy = false;
++ master->idling = true;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
++
+ kfree(master->dummy_rx);
+ master->dummy_rx = NULL;
+ kfree(master->dummy_tx);
+@@ -921,6 +933,10 @@ static void spi_pump_messages(struct kth
+ pm_runtime_put_autosuspend(master->dev.parent);
+ }
+ trace_spi_master_idle(master);
++
++ spin_lock_irqsave(&master->queue_lock, flags);
++ master->idling = false;
++ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
+@@ -1166,12 +1182,9 @@ static int spi_destroy_queue(struct spi_
+ return 0;
+ }
+
+-/**
+- * spi_queued_transfer - transfer function for queued transfers
+- * @spi: spi device which is requesting transfer
+- * @msg: spi message which is to handled is queued to driver queue
+- */
+-static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
++static int __spi_queued_transfer(struct spi_device *spi,
++ struct spi_message *msg,
++ bool need_pump)
+ {
+ struct spi_master *master = spi->master;
+ unsigned long flags;
+@@ -1186,13 +1199,23 @@ static int spi_queued_transfer(struct sp
+ msg->status = -EINPROGRESS;
+
+ list_add_tail(&msg->queue, &master->queue);
+- if (!master->busy)
++ if (!master->busy && need_pump)
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return 0;
+ }
+
++/**
++ * spi_queued_transfer - transfer function for queued transfers
++ * @spi: spi device which is requesting transfer
++ * @msg: spi message which is to handled is queued to driver queue
++ */
++static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
++{
++ return __spi_queued_transfer(spi, msg, true);
++}
++
+ static int spi_master_initialize_queue(struct spi_master *master)
+ {
+ int ret;
+@@ -2105,19 +2128,46 @@ static int __spi_sync(struct spi_device
+ DECLARE_COMPLETION_ONSTACK(done);
+ int status;
+ struct spi_master *master = spi->master;
++ unsigned long flags;
++
++ status = __spi_validate(spi, message);
++ if (status != 0)
++ return status;
+
+ message->complete = spi_complete;
+ message->context = &done;
++ message->spi = spi;
+
+ if (!bus_locked)
+ mutex_lock(&master->bus_lock_mutex);
+
+- status = spi_async_locked(spi, message);
++ /* If we're not using the legacy transfer method then we will
++ * try to transfer in the calling context so special case.
++ * This code would be less tricky if we could remove the
++ * support for driver implemented message queues.
++ */
++ if (master->transfer == spi_queued_transfer) {
++ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
++
++ trace_spi_message_submit(message);
++
++ status = __spi_queued_transfer(spi, message, false);
++
++ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
++ } else {
++ status = spi_async_locked(spi, message);
++ }
+
+ if (!bus_locked)
+ mutex_unlock(&master->bus_lock_mutex);
+
+ if (status == 0) {
++ /* Push out the messages in the calling context if we
++ * can.
++ */
++ if (master->transfer == spi_queued_transfer)
++ spi_pump_messages(&master->pump_messages);
++
+ wait_for_completion(&done);
+ status = message->status;
+ }
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -260,6 +260,7 @@ static inline void spi_unregister_driver
+ * @pump_messages: work struct for scheduling work to the message pump
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
++ * @idling: the device is entering idle state
+ * @cur_msg: the currently in-flight message
+ * @cur_msg_prepared: spi_prepare_message was called for the currently
+ * in-flight message
+@@ -425,6 +426,7 @@ struct spi_master {
+ spinlock_t queue_lock;
+ struct list_head queue;
+ struct spi_message *cur_msg;
++ bool idling;
+ bool busy;
+ bool running;
+ bool rt;
diff --git a/target/linux/generic/patches-3.18/092-03-spi-Only-idle-the-message-pump-in-the-worker-kthread.patch b/target/linux/generic/patches-3.18/092-03-spi-Only-idle-the-message-pump-in-the-worker-kthread.patch
new file mode 100644
index 0000000000..e8cbe6151e
--- /dev/null
+++ b/target/linux/generic/patches-3.18/092-03-spi-Only-idle-the-message-pump-in-the-worker-kthread.patch
@@ -0,0 +1,83 @@
+From: Mark Brown <broonie@kernel.org>
+Date: Wed, 10 Dec 2014 13:46:33 +0000
+Subject: [PATCH] spi: Only idle the message pump in the worker kthread
+
+In order to avoid the situation where the kthread is waiting for another
+context to make the hardware idle let the message pump know if it's being
+called from the worker thread context and if it isn't then defer to the
+worker thread instead of idling the hardware immediately. This will ensure
+that if this situation happens we block rather than busy waiting.
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+---
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -875,8 +875,9 @@ void spi_finalize_current_transfer(struc
+ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+
+ /**
+- * spi_pump_messages - kthread work function which processes spi message queue
+- * @work: pointer to kthread work struct contained in the master struct
++ * __spi_pump_messages - function which processes spi message queue
++ * @master: master to process queue for
++ * @in_kthread: true if we are in the context of the message pump thread
+ *
+ * This function checks if there is any spi message in the queue that
+ * needs processing and if so call out to the driver to initialize hardware
+@@ -886,10 +887,8 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_t
+ * inside spi_sync(); the queue extraction handling at the top of the
+ * function should deal with this safely.
+ */
+-static void spi_pump_messages(struct kthread_work *work)
++static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
+ {
+- struct spi_master *master =
+- container_of(work, struct spi_master, pump_messages);
+ unsigned long flags;
+ bool was_busy = false;
+ int ret;
+@@ -916,6 +915,15 @@ static void spi_pump_messages(struct kth
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
++
++ /* Only do teardown in the thread */
++ if (!in_kthread) {
++ queue_kthread_work(&master->kworker,
++ &master->pump_messages);
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++ return;
++ }
++
+ master->busy = false;
+ master->idling = true;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+@@ -1004,6 +1012,18 @@ static void spi_pump_messages(struct kth
+ }
+ }
+
++/**
++ * spi_pump_messages - kthread work function which processes spi message queue
++ * @work: pointer to kthread work struct contained in the master struct
++ */
++static void spi_pump_messages(struct kthread_work *work)
++{
++ struct spi_master *master =
++ container_of(work, struct spi_master, pump_messages);
++
++ __spi_pump_messages(master, true);
++}
++
+ static int spi_init_queue(struct spi_master *master)
+ {
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+@@ -2166,7 +2186,7 @@ static int __spi_sync(struct spi_device
+ * can.
+ */
+ if (master->transfer == spi_queued_transfer)
+- spi_pump_messages(&master->pump_messages);
++ __spi_pump_messages(master, false);
+
+ wait_for_completion(&done);
+ status = message->status;