aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--target/linux/generic/backport-4.9/090-net-generalize-napi_complete_done.patch1412
1 files changed, 1412 insertions, 0 deletions
diff --git a/target/linux/generic/backport-4.9/090-net-generalize-napi_complete_done.patch b/target/linux/generic/backport-4.9/090-net-generalize-napi_complete_done.patch
new file mode 100644
index 0000000000..e1541e90a9
--- /dev/null
+++ b/target/linux/generic/backport-4.9/090-net-generalize-napi_complete_done.patch
@@ -0,0 +1,1412 @@
+From 6ad20165d376fa07919a70e4f43dfae564601829 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 30 Jan 2017 08:22:01 -0800
+Subject: drivers: net: generalize napi_complete_done()
+
+napi_complete_done() allows to opt-in for gro_flush_timeout,
+added back in linux-3.19, commit 3b47d30396ba
+("net: gro: add a per device gro flush timer")
+
+This allows for more efficient GRO aggregation without
+sacrifying latencies.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/can/at91_can.c | 2 +-
+ drivers/net/can/c_can/c_can.c | 2 +-
+ drivers/net/can/flexcan.c | 2 +-
+ drivers/net/can/ifi_canfd/ifi_canfd.c | 2 +-
+ drivers/net/can/janz-ican3.c | 2 +-
+ drivers/net/can/m_can/m_can.c | 2 +-
+ drivers/net/can/rcar/rcar_can.c | 2 +-
+ drivers/net/can/rcar/rcar_canfd.c | 2 +-
+ drivers/net/can/xilinx_can.c | 2 +-
+ drivers/net/ethernet/3com/typhoon.c | 2 +-
+ drivers/net/ethernet/adi/bfin_mac.c | 2 +-
+ drivers/net/ethernet/agere/et131x.c | 2 +-
+ drivers/net/ethernet/altera/altera_tse_main.c | 2 +-
+ drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 2 +-
+ drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 2 +-
+ drivers/net/ethernet/arc/emac_main.c | 2 +-
+ drivers/net/ethernet/atheros/alx/main.c | 2 +-
+ drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 2 +-
+ drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 2 +-
+ drivers/net/ethernet/atheros/atlx/atl1.c | 2 +-
+ drivers/net/ethernet/broadcom/b44.c | 2 +-
+ drivers/net/ethernet/broadcom/bcm63xx_enet.c | 2 +-
+ drivers/net/ethernet/broadcom/bgmac.c | 2 +-
+ drivers/net/ethernet/broadcom/bnx2.c | 4 ++--
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +-
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +-
+ drivers/net/ethernet/broadcom/sb1250-mac.c | 2 +-
+ drivers/net/ethernet/brocade/bna/bnad.c | 2 +-
+ drivers/net/ethernet/cadence/macb.c | 2 +-
+ drivers/net/ethernet/calxeda/xgmac.c | 2 +-
+ drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +-
+ drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 2 +-
+ drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 2 +-
+ drivers/net/ethernet/cavium/thunder/nicvf_main.c | 2 +-
+ drivers/net/ethernet/chelsio/cxgb/sge.c | 2 +-
+ drivers/net/ethernet/chelsio/cxgb3/sge.c | 4 ++--
+ drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2 +-
+ drivers/net/ethernet/cisco/enic/enic_main.c | 4 ++--
+ drivers/net/ethernet/dec/tulip/interrupt.c | 6 +++---
+ drivers/net/ethernet/dnet.c | 2 +-
+ drivers/net/ethernet/emulex/benet/be_main.c | 2 +-
+ drivers/net/ethernet/ethoc.c | 2 +-
+ drivers/net/ethernet/ezchip/nps_enet.c | 2 +-
+ drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2 +-
+ drivers/net/ethernet/freescale/fec_main.c | 2 +-
+ .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 2 +-
+ drivers/net/ethernet/freescale/gianfar.c | 4 ++--
+ drivers/net/ethernet/freescale/ucc_geth.c | 2 +-
+ drivers/net/ethernet/hisilicon/hip04_eth.c | 2 +-
+ drivers/net/ethernet/hisilicon/hisi_femac.c | 2 +-
+ drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | 2 +-
+ drivers/net/ethernet/ibm/ibmveth.c | 2 +-
+ drivers/net/ethernet/ibm/ibmvnic.c | 2 +-
+ drivers/net/ethernet/intel/e100.c | 2 +-
+ drivers/net/ethernet/intel/ixgb/ixgb_main.c | 2 +-
+ drivers/net/ethernet/korina.c | 2 +-
+ drivers/net/ethernet/lantiq_etop.c | 21 +++++++++------------
+ drivers/net/ethernet/marvell/mv643xx_eth.c | 2 +-
+ drivers/net/ethernet/marvell/mvneta.c | 6 ++----
+ drivers/net/ethernet/marvell/mvpp2.c | 2 +-
+ drivers/net/ethernet/marvell/pxa168_eth.c | 2 +-
+ drivers/net/ethernet/moxa/moxart_ether.c | 2 +-
+ drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 2 +-
+ drivers/net/ethernet/natsemi/natsemi.c | 2 +-
+ drivers/net/ethernet/neterion/s2io.c | 4 ++--
+ drivers/net/ethernet/neterion/vxge/vxge-main.c | 6 +++---
+ drivers/net/ethernet/nvidia/forcedeth.c | 2 +-
+ drivers/net/ethernet/nxp/lpc_eth.c | 2 +-
+ .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 2 +-
+ drivers/net/ethernet/pasemi/pasemi_mac.c | 2 +-
+ .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 2 +-
+ drivers/net/ethernet/qlogic/qede/qede_fp.c | 2 +-
+ drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 10 +++++-----
+ drivers/net/ethernet/qlogic/qlge/qlge_main.c | 2 +-
+ drivers/net/ethernet/qualcomm/emac/emac.c | 2 +-
+ drivers/net/ethernet/realtek/r8169.c | 2 +-
+ drivers/net/ethernet/rocker/rocker_main.c | 2 +-
+ drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 2 +-
+ drivers/net/ethernet/sfc/efx.c | 2 +-
+ drivers/net/ethernet/sfc/falcon/efx.c | 2 +-
+ drivers/net/ethernet/smsc/smsc9420.c | 2 +-
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +-
+ drivers/net/ethernet/sun/niu.c | 2 +-
+ drivers/net/ethernet/sun/sungem.c | 2 +-
+ drivers/net/ethernet/sun/sunvnet_common.c | 2 +-
+ drivers/net/ethernet/tehuti/tehuti.c | 2 +-
+ drivers/net/ethernet/ti/cpsw.c | 2 +-
+ drivers/net/ethernet/ti/davinci_emac.c | 2 +-
+ drivers/net/ethernet/ti/netcp_core.c | 2 +-
+ drivers/net/ethernet/tile/tilegx.c | 2 +-
+ drivers/net/ethernet/tile/tilepro.c | 2 +-
+ drivers/net/ethernet/toshiba/ps3_gelic_net.c | 2 +-
+ drivers/net/ethernet/toshiba/spider_net.c | 2 +-
+ drivers/net/ethernet/toshiba/tc35815.c | 2 +-
+ drivers/net/ethernet/tundra/tsi108_eth.c | 2 +-
+ drivers/net/ethernet/via/via-rhine.c | 2 +-
+ drivers/net/ethernet/via/via-velocity.c | 2 +-
+ drivers/net/ethernet/wiznet/w5100.c | 2 +-
+ drivers/net/ethernet/wiznet/w5300.c | 2 +-
+ drivers/net/fjes/fjes_main.c | 2 +-
+ drivers/net/vmxnet3/vmxnet3_drv.c | 4 ++--
+ drivers/net/wan/fsl_ucc_hdlc.c | 2 +-
+ drivers/net/wan/hd64572.c | 2 +-
+ drivers/net/wireless/ath/ath10k/pci.c | 2 +-
+ drivers/net/wireless/ath/wil6210/netdev.c | 2 +-
+ drivers/net/xen-netback/interface.c | 2 +-
+ drivers/net/xen-netfront.c | 2 +-
+ drivers/staging/octeon/ethernet-rx.c | 2 +-
+ drivers/staging/unisys/visornic/visornic_main.c | 2 +-
+ 109 files changed, 132 insertions(+), 137 deletions(-)
+
+--- a/drivers/net/can/at91_can.c
++++ b/drivers/net/can/at91_can.c
+@@ -813,7 +813,7 @@ static int at91_poll(struct napi_struct
+ u32 reg_ier = AT91_IRQ_ERR_FRAME;
+ reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
+
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ at91_write(priv, AT91_IER, reg_ier);
+ }
+
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -1070,7 +1070,7 @@ static int c_can_poll(struct napi_struct
+
+ end:
+ if (work_done < quota) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ /* enable all IRQs if we are not in bus off state */
+ if (priv->can.state != CAN_STATE_BUS_OFF)
+ c_can_irq_control(priv, true);
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -703,7 +703,7 @@ static int flexcan_poll(struct napi_stru
+ work_done += flexcan_poll_bus_err(dev, reg_esr);
+
+ if (work_done < quota) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ /* enable IRQs */
+ flexcan_write(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
+ flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
+--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
++++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
+@@ -578,7 +578,7 @@ static int ifi_canfd_poll(struct napi_st
+ work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
+
+ if (work_done < quota) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ ifi_canfd_irq_enable(ndev, 1);
+ }
+
+--- a/drivers/net/can/janz-ican3.c
++++ b/drivers/net/can/janz-ican3.c
+@@ -1475,7 +1475,7 @@ static int ican3_napi(struct napi_struct
+ /* We have processed all packets that the adapter had, but it
+ * was less than our budget, stop polling */
+ if (received < budget)
+- napi_complete(napi);
++ napi_complete_done(napi, received);
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -730,7 +730,7 @@ static int m_can_poll(struct napi_struct
+ work_done += m_can_do_rx_poll(dev, (quota - work_done));
+
+ if (work_done < quota) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ m_can_enable_all_interrupts(priv);
+ }
+
+--- a/drivers/net/can/rcar/rcar_can.c
++++ b/drivers/net/can/rcar/rcar_can.c
+@@ -695,7 +695,7 @@ static int rcar_can_rx_poll(struct napi_
+ }
+ /* All packets processed */
+ if (num_pkts < quota) {
+- napi_complete(napi);
++ napi_complete_done(napi, num_pkts);
+ priv->ier |= RCAR_CAN_IER_RXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+ }
+--- a/drivers/net/can/rcar/rcar_canfd.c
++++ b/drivers/net/can/rcar/rcar_canfd.c
+@@ -1512,7 +1512,7 @@ static int rcar_canfd_rx_poll(struct nap
+
+ /* All packets processed */
+ if (num_pkts < quota) {
+- napi_complete(napi);
++ napi_complete_done(napi, num_pkts);
+ /* Enable Rx FIFO interrupts */
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ RCANFD_RFCC_RFIE);
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -726,7 +726,7 @@ static int xcan_rx_poll(struct napi_stru
+ can_led_event(ndev, CAN_LED_EVENT_RX);
+
+ if (work_done < quota) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+--- a/drivers/net/ethernet/3com/typhoon.c
++++ b/drivers/net/ethernet/3com/typhoon.c
+@@ -1748,7 +1748,7 @@ typhoon_poll(struct napi_struct *napi, i
+ }
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ iowrite32(TYPHOON_INTR_NONE,
+ tp->ioaddr + TYPHOON_REG_INTR_MASK);
+ typhoon_post_pci_writes(tp->ioaddr);
+--- a/drivers/net/ethernet/adi/bfin_mac.c
++++ b/drivers/net/ethernet/adi/bfin_mac.c
+@@ -1274,7 +1274,7 @@ static int bfin_mac_poll(struct napi_str
+ }
+
+ if (i < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, i);
+ if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags))
+ enable_irq(IRQ_MAC_RX);
+ }
+--- a/drivers/net/ethernet/agere/et131x.c
++++ b/drivers/net/ethernet/agere/et131x.c
+@@ -3573,7 +3573,7 @@ static int et131x_poll(struct napi_struc
+ et131x_handle_send_pkts(adapter);
+
+ if (work_done < budget) {
+- napi_complete(&adapter->napi);
++ napi_complete_done(&adapter->napi, work_done);
+ et131x_enable_interrupts(adapter);
+ }
+
+--- a/drivers/net/ethernet/altera/altera_tse_main.c
++++ b/drivers/net/ethernet/altera/altera_tse_main.c
+@@ -491,7 +491,7 @@ static int tse_poll(struct napi_struct *
+
+ if (rxcomplete < budget) {
+
+- napi_complete(napi);
++ napi_complete_done(napi, rxcomplete);
+
+ netdev_dbg(priv->dev,
+ "NAPI Complete, did %d packets with budget %d\n",
+--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+@@ -651,7 +651,7 @@ static int xgene_enet_napi(struct napi_s
+ processed = xgene_enet_process_ring(ring, budget);
+
+ if (processed != budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, processed);
+ enable_irq(ring->irq);
+ }
+
+--- a/drivers/net/ethernet/arc/emac_main.c
++++ b/drivers/net/ethernet/arc/emac_main.c
+@@ -275,7 +275,7 @@ static int arc_emac_poll(struct napi_str
+
+ work_done = arc_emac_rx(ndev, budget);
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
+ }
+
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -292,7 +292,7 @@ static int alx_poll(struct napi_struct *
+ if (!tx_complete || work == budget)
+ return budget;
+
+- napi_complete(&alx->napi);
++ napi_complete_done(&alx->napi, work);
+
+ /* enable interrupt */
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -1885,7 +1885,7 @@ static int atl1c_clean(struct napi_struc
+
+ if (work_done < budget) {
+ quit_polling:
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ adapter->hw.intr_mask |= ISR_RX_PKT;
+ AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
+ }
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1532,7 +1532,7 @@ static int atl1e_clean(struct napi_struc
+ /* If no Tx and not enough Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ quit_polling:
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
+ /* test debug */
+--- a/drivers/net/ethernet/atheros/atlx/atl1.c
++++ b/drivers/net/ethernet/atheros/atlx/atl1.c
+@@ -2457,7 +2457,7 @@ static int atl1_rings_clean(struct napi_
+ if (work_done >= budget)
+ return work_done;
+
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ /* re-enable Interrupt */
+ if (likely(adapter->int_enabled))
+ atlx_imr_set(adapter, IMR_NORMAL_MASK);
+--- a/drivers/net/ethernet/broadcom/b44.c
++++ b/drivers/net/ethernet/broadcom/b44.c
+@@ -902,7 +902,7 @@ static int b44_poll(struct napi_struct *
+ }
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ b44_enable_ints(bp);
+ }
+
+--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
++++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+@@ -511,7 +511,7 @@ static int bcm_enet_poll(struct napi_str
+
+ /* no more packet in rx/tx queue, remove device from poll
+ * queue */
+- napi_complete(napi);
++ napi_complete_done(napi, rx_work_done);
+
+ /* restore rx/tx interrupt */
+ enet_dmac_writel(priv, priv->dma_chan_int_mask,
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -1145,7 +1145,7 @@ static int bgmac_poll(struct napi_struct
+ return weight;
+
+ if (handled < weight) {
+- napi_complete(napi);
++ napi_complete_done(napi, handled);
+ bgmac_chip_intrs_on(bgmac);
+ }
+
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -3522,7 +3522,7 @@ static int bnx2_poll_msix(struct napi_st
+ rmb();
+ if (likely(!bnx2_has_fast_work(bnapi))) {
+
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
+@@ -3559,7 +3559,7 @@ static int bnx2_poll(struct napi_struct
+
+ rmb();
+ if (likely(!bnx2_has_work(bnapi))) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -3229,7 +3229,7 @@ static int bnx2x_poll(struct napi_struct
+ * has been updated when NAPI was scheduled.
+ */
+ if (IS_FCOE_FP(fp)) {
+- napi_complete(napi);
++ napi_complete_done(napi, rx_work_done);
+ } else {
+ bnx2x_update_fpsb_idx(fp);
+ /* bnx2x_has_rx_work() reads the status block,
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1768,7 +1768,7 @@ static int bnxt_poll_nitroa0(struct napi
+ }
+
+ if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, rx_pkts);
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+ return rx_pkts;
+--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
++++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
+@@ -2545,7 +2545,7 @@ static int sbmac_poll(struct napi_struct
+ sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ #ifdef CONFIG_SBMAC_COALESCE
+ __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+--- a/drivers/net/ethernet/brocade/bna/bnad.c
++++ b/drivers/net/ethernet/brocade/bna/bnad.c
+@@ -1881,7 +1881,7 @@ bnad_napi_poll_rx(struct napi_struct *na
+ return rcvd;
+
+ poll_exit:
+- napi_complete(napi);
++ napi_complete_done(napi, rcvd);
+
+ rx_ctrl->rx_complete++;
+
+--- a/drivers/net/ethernet/cadence/macb.c
++++ b/drivers/net/ethernet/cadence/macb.c
+@@ -1069,7 +1069,7 @@ static int macb_poll(struct napi_struct
+
+ work_done = bp->macbgem_ops.mog_rx(bp, budget);
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ /* Packets received while interrupts were disabled */
+ status = macb_readl(bp, RSR);
+--- a/drivers/net/ethernet/calxeda/xgmac.c
++++ b/drivers/net/ethernet/calxeda/xgmac.c
+@@ -1247,7 +1247,7 @@ static int xgmac_poll(struct napi_struct
+ work_done = xgmac_rx(priv, budget);
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+ }
+ return work_done;
+--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
++++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
+@@ -2433,7 +2433,7 @@ static int liquidio_napi_poll(struct nap
+ }
+
+ if ((work_done < budget) && (tx_done)) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
+ POLL_EVENT_ENABLE_INTR, 0);
+ return 0;
+--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
++++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+@@ -501,7 +501,7 @@ static int octeon_mgmt_napi_poll(struct
+
+ if (work_done < budget) {
+ /* We stopped because no more packets were available. */
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ octeon_mgmt_enable_rx_irq(p);
+ }
+ octeon_mgmt_update_rx_stats(netdev);
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -737,7 +737,7 @@ static int nicvf_poll(struct napi_struct
+
+ if (work_done < budget) {
+ /* Slow packet rate, exit polling */
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ /* Re-enable interrupts */
+ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx);
+--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
+@@ -1605,7 +1605,7 @@ int t1_poll(struct napi_struct *napi, in
+ int work_done = process_responses(adapter, budget);
+
+ if (likely(work_done < budget)) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ writel(adapter->sge->respQ.cidx,
+ adapter->regs + A_SG_SLEEPING);
+ }
+--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+@@ -1843,7 +1843,7 @@ static int ofld_poll(struct napi_struct
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&q->rx_queue, &queue);
+ if (skb_queue_empty(&queue)) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ spin_unlock_irq(&q->lock);
+ return work_done;
+ }
+@@ -2414,7 +2414,7 @@ static int napi_rx_handler(struct napi_s
+ int work_done = process_responses(adap, qs, budget);
+
+ if (likely(work_done < budget)) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ /*
+ * Because we don't atomically flush the following
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+@@ -1889,7 +1889,7 @@ static int napi_rx_handler(struct napi_s
+ u32 val;
+
+ if (likely(work_done < budget)) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ intr_params = rspq->next_intr_params;
+ rspq->next_intr_params = rspq->intr_params;
+ } else
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1339,7 +1339,7 @@ static int enic_poll(struct napi_struct
+ * exit polling
+ */
+
+- napi_complete(napi);
++ napi_complete_done(napi, rq_work_done);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_set_int_moderation(enic, &enic->rq[0]);
+ vnic_intr_unmask(&enic->intr[intr]);
+@@ -1496,7 +1496,7 @@ static int enic_poll_msix_rq(struct napi
+ * exit polling
+ */
+
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_set_int_moderation(enic, &enic->rq[rq]);
+ vnic_intr_unmask(&enic->intr[intr]);
+--- a/drivers/net/ethernet/dec/tulip/interrupt.c
++++ b/drivers/net/ethernet/dec/tulip/interrupt.c
+@@ -319,8 +319,8 @@ int tulip_poll(struct napi_struct *napi,
+
+ /* Remove us from polling list and enable RX intr. */
+
+- napi_complete(napi);
+- iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
++ napi_complete_done(napi, work_done);
++ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
+
+ /* The last op happens after poll completion. Which means the following:
+ * 1. it can race with disabling irqs in irq handler
+@@ -355,7 +355,7 @@ int tulip_poll(struct napi_struct *napi,
+ * before we did napi_complete(). See? We would lose it. */
+
+ /* remove ourselves from the polling list */
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ return work_done;
+ }
+--- a/drivers/net/ethernet/dnet.c
++++ b/drivers/net/ethernet/dnet.c
+@@ -415,7 +415,7 @@ static int dnet_poll(struct napi_struct
+ /* We processed all packets available. Tell NAPI it can
+ * stop polling then re-enable rx interrupts.
+ */
+- napi_complete(napi);
++ napi_complete_done(napi, npackets);
+ int_enable = dnet_readl(bp, INTR_ENB);
+ int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
+ dnet_writel(bp, int_enable, INTR_ENB);
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -3346,7 +3346,7 @@ int be_poll(struct napi_struct *napi, in
+ be_process_mcc(adapter);
+
+ if (max_work < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, max_work);
+
+ /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
+ * delay via a delay multiplier encoding value
+--- a/drivers/net/ethernet/ethoc.c
++++ b/drivers/net/ethernet/ethoc.c
+@@ -614,7 +614,7 @@ static int ethoc_poll(struct napi_struct
+ tx_work_done = ethoc_tx(priv->netdev, budget);
+
+ if (rx_work_done < budget && tx_work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, rx_work_done);
+ ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ }
+
+--- a/drivers/net/ethernet/ezchip/nps_enet.c
++++ b/drivers/net/ethernet/ezchip/nps_enet.c
+@@ -192,7 +192,7 @@ static int nps_enet_poll(struct napi_str
+ if (work_done < budget) {
+ u32 buf_int_enable_value = 0;
+
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ /* set tx_done and rx_rdy bits */
+ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1615,7 +1615,7 @@ static int fec_enet_rx_napi(struct napi_
+ fec_enet_tx(ndev);
+
+ if (pkts < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, pkts);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ }
+ return pkts;
+--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
++++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+@@ -301,7 +301,7 @@ static int fs_enet_napi(struct napi_stru
+
+ if (received < budget && tx_left) {
+ /* done */
+- napi_complete(napi);
++ napi_complete_done(napi, received);
+ (*fep->ops->napi_enable)(dev);
+
+ return received;
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -3186,7 +3186,7 @@ static int gfar_poll_rx_sq(struct napi_s
+
+ if (work_done < budget) {
+ u32 imask;
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
+
+@@ -3275,7 +3275,7 @@ static int gfar_poll_rx(struct napi_stru
+
+ if (!num_act_queues) {
+ u32 imask;
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
+--- a/drivers/net/ethernet/freescale/ucc_geth.c
++++ b/drivers/net/ethernet/freescale/ucc_geth.c
+@@ -3303,7 +3303,7 @@ static int ucc_geth_poll(struct napi_str
+ howmany += ucc_geth_rx(ugeth, i, budget - howmany);
+
+ if (howmany < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, howmany);
+ setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
+ }
+
+--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
+@@ -555,7 +555,7 @@ refill:
+ priv->reg_inten |= RCV_INT;
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+ }
+- napi_complete(napi);
++ napi_complete_done(napi, rx);
+ done:
+ /* clean up tx descriptors and start a new timer if necessary */
+ tx_remaining = hip04_tx_reclaim(ndev, false);
+--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
++++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
+@@ -330,7 +330,7 @@ static int hisi_femac_poll(struct napi_s
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ hisi_femac_irq_enable(priv, DEF_INT_MASK &
+ (~IRQ_INT_TX_PER_PACKET));
+ }
+--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
++++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+@@ -578,7 +578,7 @@ static int hix5hd2_poll(struct napi_stru
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ hix5hd2_irq_enable(priv);
+ }
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1320,7 +1320,7 @@ restart_poll:
+ ibmveth_replenish_task(adapter);
+
+ if (frames_processed < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, frames_processed);
+
+ /* We think we are done - reenable interrupts,
+ * then check once more to make sure we are done.
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -999,7 +999,7 @@ restart_poll:
+
+ if (frames_processed < budget) {
+ enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
+- napi_complete(napi);
++ napi_complete_done(napi, frames_processed);
+ if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
+ napi_reschedule(napi)) {
+ disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
+--- a/drivers/net/ethernet/intel/e100.c
++++ b/drivers/net/ethernet/intel/e100.c
+@@ -2253,7 +2253,7 @@ static int e100_poll(struct napi_struct
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ e100_enable_irq(nic);
+ }
+
+--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
++++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+@@ -1825,7 +1825,7 @@ ixgb_clean(struct napi_struct *napi, int
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ if (!test_bit(__IXGB_DOWN, &adapter->flags))
+ ixgb_irq_enable(adapter);
+ }
+--- a/drivers/net/ethernet/korina.c
++++ b/drivers/net/ethernet/korina.c
+@@ -464,7 +464,7 @@ static int korina_poll(struct napi_struc
+
+ work_done = korina_rx(dev, budget);
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ writel(readl(&lp->rx_dma_regs->dmasm) &
+ ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
+--- a/drivers/net/ethernet/lantiq_etop.c
++++ b/drivers/net/ethernet/lantiq_etop.c
+@@ -156,24 +156,21 @@ ltq_etop_poll_rx(struct napi_struct *nap
+ {
+ struct ltq_etop_chan *ch = container_of(napi,
+ struct ltq_etop_chan, napi);
+- int rx = 0;
+- int complete = 0;
++ int work_done = 0;
+
+- while ((rx < budget) && !complete) {
++ while (work_done < budget) {
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+
+- if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
+- ltq_etop_hw_receive(ch);
+- rx++;
+- } else {
+- complete = 1;
+- }
++ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
++ break;
++ ltq_etop_hw_receive(ch);
++ work_done++;
+ }
+- if (complete || !rx) {
+- napi_complete(&ch->napi);
++ if (work_done < budget) {
++ napi_complete_done(&ch->napi, work_done);
+ ltq_dma_ack_irq(&ch->dma);
+ }
+- return rx;
++ return work_done;
+ }
+
+ static int
+--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
+@@ -2312,7 +2312,7 @@ static int mv643xx_eth_poll(struct napi_
+ if (work_done < budget) {
+ if (mp->oom)
+ mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ wrlp(mp, INT_MASK, mp->int_mask);
+ }
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -2696,11 +2696,9 @@ static int mvneta_poll(struct napi_struc
+ rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
+ }
+
+- budget -= rx_done;
+-
+- if (budget > 0) {
++ if (rx_done < budget) {
+ cause_rx_tx = 0;
+- napi_complete(&port->napi);
++ napi_complete_done(&port->napi, rx_done);
+ enable_percpu_irq(pp->dev->irq, 0);
+ }
+
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -5405,7 +5405,7 @@ static int mvpp2_poll(struct napi_struct
+
+ if (budget > 0) {
+ cause_rx = 0;
+- napi_complete(napi);
++ napi_complete_done(napi, rx_done);
+
+ mvpp2_interrupts_enable(port);
+ }
+--- a/drivers/net/ethernet/marvell/pxa168_eth.c
++++ b/drivers/net/ethernet/marvell/pxa168_eth.c
+@@ -1264,7 +1264,7 @@ static int pxa168_rx_poll(struct napi_st
+ }
+ work_done = rxq_process(dev, budget);
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ wrl(pep, INT_MASK, ALL_INTS);
+ }
+
+--- a/drivers/net/ethernet/moxa/moxart_ether.c
++++ b/drivers/net/ethernet/moxa/moxart_ether.c
+@@ -269,7 +269,7 @@ rx_next:
+ }
+
+ if (rx < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, rx);
+ }
+
+ priv->reg_imr |= RPKT_FINISH_M;
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -1678,7 +1678,7 @@ static int myri10ge_poll(struct napi_str
+
+ myri10ge_ss_unlock_napi(ss);
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ put_be32(htonl(3), ss->irq_claim);
+ }
+ return work_done;
+--- a/drivers/net/ethernet/natsemi/natsemi.c
++++ b/drivers/net/ethernet/natsemi/natsemi.c
+@@ -2261,7 +2261,7 @@ static int natsemi_poll(struct napi_stru
+ np->intr_status = readl(ioaddr + IntrStatus);
+ } while (np->intr_status);
+
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ /* Reenable interrupts providing nothing is trying to shut
+ * the chip down. */
+--- a/drivers/net/ethernet/neterion/s2io.c
++++ b/drivers/net/ethernet/neterion/s2io.c
+@@ -2783,7 +2783,7 @@ static int s2io_poll_msix(struct napi_st
+ s2io_chk_rx_buffers(nic, ring);
+
+ if (pkts_processed < budget_org) {
+- napi_complete(napi);
++ napi_complete_done(napi, pkts_processed);
+ /*Re Enable MSI-Rx Vector*/
+ addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
+ addr += 7 - ring->ring_no;
+@@ -2817,7 +2817,7 @@ static int s2io_poll_inta(struct napi_st
+ break;
+ }
+ if (pkts_processed < budget_org) {
+- napi_complete(napi);
++ napi_complete_done(napi, pkts_processed);
+ /* Re enable the Rx interrupts for the ring */
+ writeq(0, &bar0->rx_traffic_mask);
+ readl(&bar0->rx_traffic_mask);
+--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
++++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
+@@ -1823,8 +1823,8 @@ static int vxge_poll_msix(struct napi_st
+ vxge_hw_vpath_poll_rx(ring->handle);
+ pkts_processed = ring->pkts_processed;
+
+- if (ring->pkts_processed < budget_org) {
+- napi_complete(napi);
++ if (pkts_processed < budget_org) {
++ napi_complete_done(napi, pkts_processed);
+
+ /* Re enable the Rx interrupts for the vpath */
+ vxge_hw_channel_msix_unmask(
+@@ -1863,7 +1863,7 @@ static int vxge_poll_inta(struct napi_st
+ VXGE_COMPLETE_ALL_TX(vdev);
+
+ if (pkts_processed < budget_org) {
+- napi_complete(napi);
++ napi_complete_done(napi, pkts_processed);
+ /* Re enable the Rx interrupts for the ring */
+ vxge_hw_device_unmask_all(hldev);
+ vxge_hw_device_flush_io(hldev);
+--- a/drivers/net/ethernet/nvidia/forcedeth.c
++++ b/drivers/net/ethernet/nvidia/forcedeth.c
+@@ -3756,7 +3756,7 @@ static int nv_napi_poll(struct napi_stru
+ if (rx_work < budget) {
+ /* re-enable interrupts
+ (msix not enabled in napi) */
+- napi_complete(napi);
++ napi_complete_done(napi, rx_work);
+
+ writel(np->irqmask, base + NvRegIrqMask);
+ }
+--- a/drivers/net/ethernet/nxp/lpc_eth.c
++++ b/drivers/net/ethernet/nxp/lpc_eth.c
+@@ -999,7 +999,7 @@ static int lpc_eth_poll(struct napi_stru
+ rx_done = __lpc_handle_recv(ndev, budget);
+
+ if (rx_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, rx_done);
+ lpc_eth_enable_int(pldat->net_base);
+ }
+
+--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -2391,7 +2391,7 @@ static int pch_gbe_napi_poll(struct napi
+ poll_end_flag = true;
+
+ if (poll_end_flag) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ pch_gbe_irq_enable(adapter);
+ }
+
+--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
++++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
+@@ -1575,7 +1575,7 @@ static int pasemi_mac_poll(struct napi_s
+ pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
+ if (pkts < budget) {
+ /* all done, no more packets present */
+- napi_complete(napi);
++ napi_complete_done(napi, pkts);
+
+ pasemi_mac_restart_rx_intr(mac);
+ pasemi_mac_restart_tx_intr(mac);
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -2391,7 +2391,7 @@ static int netxen_nic_poll(struct napi_s
+ work_done = budget;
+
+ if (work_done < budget) {
+- napi_complete(&sds_ring->napi);
++ napi_complete_done(&sds_ring->napi, work_done);
+ if (test_bit(__NX_DEV_UP, &adapter->state))
+ netxen_nic_enable_int(sds_ring);
+ }
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+@@ -975,7 +975,7 @@ static int qlcnic_poll(struct napi_struc
+ work_done = budget;
+
+ if (work_done < budget) {
+- napi_complete(&sds_ring->napi);
++ napi_complete_done(&sds_ring->napi, work_done);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+@@ -1019,7 +1019,7 @@ static int qlcnic_rx_poll(struct napi_st
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if (work_done < budget) {
+- napi_complete(&sds_ring->napi);
++ napi_complete_done(&sds_ring->napi, work_done);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+@@ -1966,7 +1966,7 @@ static int qlcnic_83xx_msix_sriov_vf_pol
+ work_done = budget;
+
+ if (work_done < budget) {
+- napi_complete(&sds_ring->napi);
++ napi_complete_done(&sds_ring->napi, work_done);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+@@ -1994,7 +1994,7 @@ static int qlcnic_83xx_poll(struct napi_
+ work_done = budget;
+
+ if (work_done < budget) {
+- napi_complete(&sds_ring->napi);
++ napi_complete_done(&sds_ring->napi, work_done);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+@@ -2032,7 +2032,7 @@ static int qlcnic_83xx_rx_poll(struct na
+ adapter = sds_ring->adapter;
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if (work_done < budget) {
+- napi_complete(&sds_ring->napi);
++ napi_complete_done(&sds_ring->napi, work_done);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+@@ -2334,7 +2334,7 @@ static int ql_napi_poll_msix(struct napi
+ }
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ ql_enable_completion_interrupt(qdev, rx_ring->irq);
+ }
+ return work_done;
+--- a/drivers/net/ethernet/qualcomm/emac/emac.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac.c
+@@ -129,7 +129,7 @@ static int emac_napi_rtx(struct napi_str
+ emac_mac_rx_process(adpt, rx_q, &work_done, budget);
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ irq->mask |= rx_q->intr;
+ writel(irq->mask, adpt->base + EMAC_INT_MASK);
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -7579,7 +7579,7 @@ static int rtl8169_poll(struct napi_stru
+ }
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ rtl_irq_enable(tp, enable_mask);
+ mmiowb();
+--- a/drivers/net/ethernet/rocker/rocker_main.c
++++ b/drivers/net/ethernet/rocker/rocker_main.c
+@@ -2480,7 +2480,7 @@ static int rocker_port_poll_rx(struct na
+ }
+
+ if (credits < budget)
+- napi_complete(napi);
++ napi_complete_done(napi, credits);
+
+ rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
+
+--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
++++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+@@ -1578,7 +1578,7 @@ static int sxgbe_poll(struct napi_struct
+
+ work_done = sxgbe_rx(priv, budget);
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
+ }
+
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -332,7 +332,7 @@ static int efx_poll(struct napi_struct *
+ * since efx_nic_eventq_read_ack() will have no effect if
+ * interrupts have already been disabled.
+ */
+- napi_complete(napi);
++ napi_complete_done(napi, spent);
+ efx_nic_eventq_read_ack(channel);
+ }
+
+--- a/drivers/net/ethernet/smsc/smsc9420.c
++++ b/drivers/net/ethernet/smsc/smsc9420.c
+@@ -869,7 +869,7 @@ static int smsc9420_rx_poll(struct napi_
+ smsc9420_pci_flush_write(pd);
+
+ if (work_done < budget) {
+- napi_complete(&pd->napi);
++ napi_complete_done(&pd->napi, work_done);
+
+ /* re-enable RX DMA interrupts */
+ dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2677,7 +2677,7 @@ static int stmmac_poll(struct napi_struc
+
+ work_done = stmmac_rx(priv, budget);
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ stmmac_enable_dma_irq(priv);
+ }
+ return work_done;
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -3786,7 +3786,7 @@ static int niu_poll(struct napi_struct *
+ work_done = niu_poll_core(np, lp, budget);
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ niu_ldg_rearm(np, lp, 1);
+ }
+ return work_done;
+--- a/drivers/net/ethernet/sun/sungem.c
++++ b/drivers/net/ethernet/sun/sungem.c
+@@ -922,7 +922,7 @@ static int gem_poll(struct napi_struct *
+ gp->status = readl(gp->regs + GREG_STAT);
+ } while (gp->status & GREG_STAT_NAPI);
+
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ gem_enable_ints(gp);
+
+ return work_done;
+--- a/drivers/net/ethernet/sun/sunvnet_common.c
++++ b/drivers/net/ethernet/sun/sunvnet_common.c
+@@ -850,7 +850,7 @@ int sunvnet_poll_common(struct napi_stru
+ int processed = vnet_event_napi(port, budget);
+
+ if (processed < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, processed);
+ port->rx_event &= ~LDC_EVENT_DATA_READY;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
+ }
+--- a/drivers/net/ethernet/tehuti/tehuti.c
++++ b/drivers/net/ethernet/tehuti/tehuti.c
+@@ -303,7 +303,7 @@ static int bdx_poll(struct napi_struct *
+ * device lock and allow waiting tasks (eg rmmod) to advance) */
+ priv->napi_stop = 0;
+
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ bdx_enable_interrupts(priv);
+ }
+ return work_done;
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -839,7 +839,7 @@ static int cpsw_rx_poll(struct napi_stru
+ }
+
+ if (num_rx < budget) {
+- napi_complete(napi_rx);
++ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1295,7 +1295,7 @@ static int emac_poll(struct napi_struct
+ &emac_rxhost_errcodes[cause][0], ch);
+ }
+ } else if (num_rx_pkts < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, num_rx_pkts);
+ emac_int_enable(priv);
+ }
+
+--- a/drivers/net/ethernet/ti/netcp_core.c
++++ b/drivers/net/ethernet/ti/netcp_core.c
+@@ -939,7 +939,7 @@ static int netcp_rx_poll(struct napi_str
+
+ netcp_rxpool_refill(netcp);
+ if (packets < budget) {
+- napi_complete(&netcp->rx_napi);
++ napi_complete_done(&netcp->rx_napi, packets);
+ knav_queue_enable_notify(netcp->rx_queue);
+ }
+
+--- a/drivers/net/ethernet/tile/tilegx.c
++++ b/drivers/net/ethernet/tile/tilegx.c
+@@ -678,7 +678,7 @@ static int tile_net_poll(struct napi_str
+ }
+
+ /* There are no packets left. */
+- napi_complete(&info_mpipe->napi);
++ napi_complete_done(&info_mpipe->napi, work);
+
+ md = &mpipe_data[instance];
+ /* Re-enable hypervisor interrupts. */
+--- a/drivers/net/ethernet/tile/tilepro.c
++++ b/drivers/net/ethernet/tile/tilepro.c
+@@ -842,7 +842,7 @@ static int tile_net_poll(struct napi_str
+ }
+ }
+
+- napi_complete(&info->napi);
++ napi_complete_done(&info->napi, work);
+
+ if (!priv->active)
+ goto done;
+--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
++++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+@@ -1109,7 +1109,7 @@ static int gelic_net_poll(struct napi_st
+ }
+
+ if (packets_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, packets_done);
+ gelic_card_rx_irq_on(card);
+ }
+ return packets_done;
+--- a/drivers/net/ethernet/toshiba/spider_net.c
++++ b/drivers/net/ethernet/toshiba/spider_net.c
+@@ -1270,7 +1270,7 @@ static int spider_net_poll(struct napi_s
+ /* if all packets are in the stack, enable interrupts and return 0 */
+ /* if not, return 1 */
+ if (packets_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, packets_done);
+ spider_net_rx_irq_on(card);
+ card->ignore_rx_ramfull = 0;
+ }
+--- a/drivers/net/ethernet/toshiba/tc35815.c
++++ b/drivers/net/ethernet/toshiba/tc35815.c
+@@ -1639,7 +1639,7 @@ static int tc35815_poll(struct napi_stru
+ spin_unlock(&lp->rx_lock);
+
+ if (received < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, received);
+ /* enable interrupts */
+ tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
+ }
+--- a/drivers/net/ethernet/tundra/tsi108_eth.c
++++ b/drivers/net/ethernet/tundra/tsi108_eth.c
+@@ -887,7 +887,7 @@ static int tsi108_poll(struct napi_struc
+
+ if (num_received < budget) {
+ data->rxpending = 0;
+- napi_complete(napi);
++ napi_complete_done(napi, num_received);
+
+ TSI_WRITE(TSI108_EC_INTMASK,
+ TSI_READ(TSI108_EC_INTMASK)
+--- a/drivers/net/ethernet/via/via-rhine.c
++++ b/drivers/net/ethernet/via/via-rhine.c
+@@ -861,7 +861,7 @@ static int rhine_napipoll(struct napi_st
+ }
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ iowrite16(enable_mask, ioaddr + IntrEnable);
+ mmiowb();
+ }
+--- a/drivers/net/ethernet/via/via-velocity.c
++++ b/drivers/net/ethernet/via/via-velocity.c
+@@ -2160,7 +2160,7 @@ static int velocity_poll(struct napi_str
+ velocity_tx_srv(vptr);
+ /* If budget not fully consumed, exit the polling mode */
+ if (rx_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, rx_done);
+ mac_enable_int(vptr->mac_regs);
+ }
+ spin_unlock_irqrestore(&vptr->lock, flags);
+--- a/drivers/net/ethernet/wiznet/w5100.c
++++ b/drivers/net/ethernet/wiznet/w5100.c
+@@ -915,7 +915,7 @@ static int w5100_napi_poll(struct napi_s
+ }
+
+ if (rx_count < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, rx_count);
+ w5100_enable_intr(priv);
+ }
+
+--- a/drivers/net/ethernet/wiznet/w5300.c
++++ b/drivers/net/ethernet/wiznet/w5300.c
+@@ -417,7 +417,7 @@ static int w5300_napi_poll(struct napi_s
+ }
+
+ if (rx_count < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, rx_count);
+ w5300_write(priv, W5300_IMR, IR_S0);
+ mmiowb();
+ }
+--- a/drivers/net/fjes/fjes_main.c
++++ b/drivers/net/fjes/fjes_main.c
+@@ -1122,7 +1122,7 @@ static int fjes_poll(struct napi_struct
+ }
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ if (adapter->unset_rx_last) {
+ adapter->rx_last_jiffies = jiffies;
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1851,7 +1851,7 @@ vmxnet3_poll(struct napi_struct *napi, i
+ rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
+
+ if (rxd_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, rxd_done);
+ vmxnet3_enable_all_intrs(rx_queue->adapter);
+ }
+ return rxd_done;
+@@ -1882,7 +1882,7 @@ vmxnet3_poll_rx_only(struct napi_struct
+ rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
+
+ if (rxd_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, rxd_done);
+ vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
+ }
+ return rxd_done;
+--- a/drivers/net/wan/fsl_ucc_hdlc.c
++++ b/drivers/net/wan/fsl_ucc_hdlc.c
+@@ -573,7 +573,7 @@ static int ucc_hdlc_poll(struct napi_str
+ howmany += hdlc_rx_done(priv, budget - howmany);
+
+ if (howmany < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, howmany);
+ qe_setbits32(priv->uccf->p_uccm,
+ (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
+ }
+--- a/drivers/net/wan/hd64572.c
++++ b/drivers/net/wan/hd64572.c
+@@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *
+ received = sca_rx_done(port, budget);
+
+ if (received < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, received);
+ enable_intr(port);
+ }
+
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -2800,7 +2800,7 @@ static int ath10k_pci_napi_poll(struct n
+ done = ath10k_htt_txrx_compl_task(ar, budget);
+
+ if (done < budget) {
+- napi_complete(ctx);
++ napi_complete_done(ctx, done);
+ /* In case of MSI, it is possible that interrupts are received
+ * while NAPI poll is inprogress. So pending interrupts that are
+ * received after processing all copy engine pipes by NAPI poll
+--- a/drivers/net/wireless/ath/wil6210/netdev.c
++++ b/drivers/net/wireless/ath/wil6210/netdev.c
+@@ -84,7 +84,7 @@ static int wil6210_netdev_poll_rx(struct
+ done = budget - quota;
+
+ if (done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, done);
+ wil6210_unmask_irq_rx(wil);
+ wil_dbg_txrx(wil, "NAPI RX complete\n");
+ }
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -104,7 +104,7 @@ static int xenvif_poll(struct napi_struc
+ work_done = xenvif_tx_action(queue, budget);
+
+ if (work_done < budget) {
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+ /* If the queue is rate-limited, it shall be
+ * rescheduled in the timer callback.
+ */
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1059,7 +1059,7 @@ err:
+ if (work_done < budget) {
+ int more_to_do = 0;
+
+- napi_complete(napi);
++ napi_complete_done(napi, work_done);
+
+ RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
+ if (more_to_do)
+--- a/drivers/staging/octeon/ethernet-rx.c
++++ b/drivers/staging/octeon/ethernet-rx.c
+@@ -429,7 +429,7 @@ static int cvm_oct_napi_poll(struct napi
+
+ if (rx_count < budget) {
+ /* No more work */
+- napi_complete(napi);
++ napi_complete_done(napi, rx_count);
+ enable_irq(rx_group->irq);
+ }
+ return rx_count;
+--- a/drivers/staging/unisys/visornic/visornic_main.c
++++ b/drivers/staging/unisys/visornic/visornic_main.c
+@@ -1657,7 +1657,7 @@ static int visornic_poll(struct napi_str
+
+ /* If there aren't any more packets to receive stop the poll */
+ if (rx_count < budget)
+- napi_complete(napi);
++ napi_complete_done(napi, rx_count);
+
+ return rx_count;
+ }