aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.10/601-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2021-03-14 23:53:35 +0100
committerFelix Fietkau <nbd@nbd.name>2021-03-15 14:05:43 +0100
commit4a79c32fee81aba5ae3a7efda9db7819a66ac838 (patch)
treeb82934f0c3edb736b975f492f72162a89e0cb1b4 /target/linux/generic/backport-5.10/601-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch
parent12b5f898f9222d35b80058fee5bd2ae935226070 (diff)
downloadupstream-4a79c32fee81aba5ae3a7efda9db7819a66ac838.tar.gz
upstream-4a79c32fee81aba5ae3a7efda9db7819a66ac838.tar.bz2
upstream-4a79c32fee81aba5ae3a7efda9db7819a66ac838.zip
kernel: replace threaded NAPI implementation in 5.10 with upstream backport
This uses a kthread per NAPI instance instead of the workqueue approach Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'target/linux/generic/backport-5.10/601-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch')
-rw-r--r--target/linux/generic/backport-5.10/601-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch261
1 files changed, 261 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.10/601-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch b/target/linux/generic/backport-5.10/601-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch
new file mode 100644
index 0000000000..22075cb9bc
--- /dev/null
+++ b/target/linux/generic/backport-5.10/601-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch
@@ -0,0 +1,261 @@
+From: Wei Wang <weiwan@google.com>
+Date: Mon, 8 Feb 2021 11:34:09 -0800
+Subject: [PATCH] net: implement threaded-able napi poll loop support
+
+This patch allows running each napi poll loop inside its own
+kernel thread.
+The kthread is created during netif_napi_add() if dev->threaded
+is set. And threaded mode is enabled in napi_enable(). We will
+provide a way to set dev->threaded and enable threaded mode
+without a device up/down in the following patch.
+
+Once that threaded mode is enabled and the kthread is
+started, napi_schedule() will wake-up such thread instead
+of scheduling the softirq.
+
+The threaded poll loop behaves quite likely the net_rx_action,
+but it does not have to manipulate local irqs and uses
+an explicit scheduling point based on netdev_budget.
+
+Co-developed-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Co-developed-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Co-developed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Wei Wang <weiwan@google.com>
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -347,6 +347,7 @@ struct napi_struct {
+ struct list_head dev_list;
+ struct hlist_node napi_hash_node;
+ unsigned int napi_id;
++ struct task_struct *thread;
+ };
+
+ enum {
+@@ -357,6 +358,7 @@ enum {
+ NAPI_STATE_LISTED, /* NAPI added to system lists */
+ NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
+ NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
++ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
+ };
+
+ enum {
+@@ -367,6 +369,7 @@ enum {
+ NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
+ NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
+ NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
++ NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
+ };
+
+ enum gro_result {
+@@ -497,20 +500,7 @@ static inline bool napi_complete(struct
+ */
+ void napi_disable(struct napi_struct *n);
+
+-/**
+- * napi_enable - enable NAPI scheduling
+- * @n: NAPI context
+- *
+- * Resume NAPI from being scheduled on this context.
+- * Must be paired with napi_disable.
+- */
+-static inline void napi_enable(struct napi_struct *n)
+-{
+- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
+- smp_mb__before_atomic();
+- clear_bit(NAPI_STATE_SCHED, &n->state);
+- clear_bit(NAPI_STATE_NPSVC, &n->state);
+-}
++void napi_enable(struct napi_struct *n);
+
+ /**
+ * napi_synchronize - wait until NAPI is not running
+@@ -1835,6 +1825,8 @@ enum netdev_priv_flags {
+ *
+ * @wol_enabled: Wake-on-LAN is enabled
+ *
++ * @threaded: napi threaded mode is enabled
++ *
+ * @net_notifier_list: List of per-net netdev notifier block
+ * that follow this device when it is moved
+ * to another network namespace.
+@@ -2152,6 +2144,7 @@ struct net_device {
+ struct lock_class_key *qdisc_running_key;
+ bool proto_down;
+ unsigned wol_enabled:1;
++ unsigned threaded:1;
+
+ struct list_head net_notifier_list;
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -91,6 +91,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
+ #include <linux/skbuff.h>
++#include <linux/kthread.h>
+ #include <linux/bpf.h>
+ #include <linux/bpf_trace.h>
+ #include <net/net_namespace.h>
+@@ -1488,6 +1489,27 @@ void netdev_notify_peers(struct net_devi
+ }
+ EXPORT_SYMBOL(netdev_notify_peers);
+
++static int napi_threaded_poll(void *data);
++
++static int napi_kthread_create(struct napi_struct *n)
++{
++ int err = 0;
++
++ /* Create and wake up the kthread once to put it in
++ * TASK_INTERRUPTIBLE mode to avoid the blocked task
++ * warning and work with loadavg.
++ */
++ n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
++ n->dev->name, n->napi_id);
++ if (IS_ERR(n->thread)) {
++ err = PTR_ERR(n->thread);
++ pr_err("kthread_run failed with err %d\n", err);
++ n->thread = NULL;
++ }
++
++ return err;
++}
++
+ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
+ {
+ const struct net_device_ops *ops = dev->netdev_ops;
+@@ -4242,6 +4264,21 @@ int gro_normal_batch __read_mostly = 8;
+ static inline void ____napi_schedule(struct softnet_data *sd,
+ struct napi_struct *napi)
+ {
++ struct task_struct *thread;
++
++ if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
++ /* Paired with smp_mb__before_atomic() in
++ * napi_enable(). Use READ_ONCE() to guarantee
++ * a complete read on napi->thread. Only call
++ * wake_up_process() when it's not NULL.
++ */
++ thread = READ_ONCE(napi->thread);
++ if (thread) {
++ wake_up_process(thread);
++ return;
++ }
++ }
++
+ list_add_tail(&napi->poll_list, &sd->poll_list);
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ }
+@@ -6693,6 +6730,12 @@ void netif_napi_add(struct net_device *d
+ set_bit(NAPI_STATE_NPSVC, &napi->state);
+ list_add_rcu(&napi->dev_list, &dev->napi_list);
+ napi_hash_add(napi);
++ /* Create kthread for this napi if dev->threaded is set.
++ * Clear dev->threaded if kthread creation failed so that
++ * threaded mode will not be enabled in napi_enable().
++ */
++ if (dev->threaded && napi_kthread_create(napi))
++ dev->threaded = 0;
+ }
+ EXPORT_SYMBOL(netif_napi_add);
+
+@@ -6709,9 +6752,28 @@ void napi_disable(struct napi_struct *n)
+ hrtimer_cancel(&n->timer);
+
+ clear_bit(NAPI_STATE_DISABLE, &n->state);
++ clear_bit(NAPI_STATE_THREADED, &n->state);
+ }
+ EXPORT_SYMBOL(napi_disable);
+
++/**
++ * napi_enable - enable NAPI scheduling
++ * @n: NAPI context
++ *
++ * Resume NAPI from being scheduled on this context.
++ * Must be paired with napi_disable.
++ */
++void napi_enable(struct napi_struct *n)
++{
++ BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
++ smp_mb__before_atomic();
++ clear_bit(NAPI_STATE_SCHED, &n->state);
++ clear_bit(NAPI_STATE_NPSVC, &n->state);
++ if (n->dev->threaded && n->thread)
++ set_bit(NAPI_STATE_THREADED, &n->state);
++}
++EXPORT_SYMBOL(napi_enable);
++
+ static void flush_gro_hash(struct napi_struct *napi)
+ {
+ int i;
+@@ -6737,6 +6799,11 @@ void __netif_napi_del(struct napi_struct
+
+ flush_gro_hash(napi);
+ napi->gro_bitmask = 0;
++
++ if (napi->thread) {
++ kthread_stop(napi->thread);
++ napi->thread = NULL;
++ }
+ }
+ EXPORT_SYMBOL(__netif_napi_del);
+
+@@ -6818,6 +6885,51 @@ static int napi_poll(struct napi_struct
+ return work;
+ }
+
++static int napi_thread_wait(struct napi_struct *napi)
++{
++ set_current_state(TASK_INTERRUPTIBLE);
++
++ while (!kthread_should_stop() && !napi_disable_pending(napi)) {
++ if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
++ WARN_ON(!list_empty(&napi->poll_list));
++ __set_current_state(TASK_RUNNING);
++ return 0;
++ }
++
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ return -1;
++}
++
++static int napi_threaded_poll(void *data)
++{
++ struct napi_struct *napi = data;
++ void *have;
++
++ while (!napi_thread_wait(napi)) {
++ for (;;) {
++ bool repoll = false;
++
++ local_bh_disable();
++
++ have = netpoll_poll_lock(napi);
++ __napi_poll(napi, &repoll);
++ netpoll_poll_unlock(have);
++
++ __kfree_skb_flush();
++ local_bh_enable();
++
++ if (!repoll)
++ break;
++
++ cond_resched();
++ }
++ }
++ return 0;
++}
++
+ static __latent_entropy void net_rx_action(struct softirq_action *h)
+ {
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);