aboutsummaryrefslogtreecommitdiffstats
path: root/linux-2.6-xen-sparse
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-03-07 16:00:47 +0000
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-03-07 16:00:47 +0000
commitef9e4decc4643da1cd77641a28689dd1b74129a8 (patch)
tree074c5e0783acbd8777a5b9bd261fc3f0cb88648b /linux-2.6-xen-sparse
parentb67783e2f4a17ddafa85c951b461283ad03e6736 (diff)
downloadxen-ef9e4decc4643da1cd77641a28689dd1b74129a8.tar.gz
xen-ef9e4decc4643da1cd77641a28689dd1b74129a8.tar.bz2
xen-ef9e4decc4643da1cd77641a28689dd1b74129a8.zip
linux: Use fake carrier flag for netfront/netback rather than the real
netif_carrier_XXX() functions. This makes network bringup much faster. Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'linux-2.6-xen-sparse')
-rw-r--r--linux-2.6-xen-sparse/drivers/xen/netback/common.h15
-rw-r--r--linux-2.6-xen-sparse/drivers/xen/netback/interface.c18
-rw-r--r--linux-2.6-xen-sparse/drivers/xen/netback/netback.c12
-rw-r--r--linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c4
-rw-r--r--linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c64
5 files changed, 69 insertions, 44 deletions
diff --git a/linux-2.6-xen-sparse/drivers/xen/netback/common.h b/linux-2.6-xen-sparse/drivers/xen/netback/common.h
index 170d2772cf..861c5faeac 100644
--- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h
@@ -99,9 +99,21 @@ typedef struct netif_st {
struct net_device *dev;
struct net_device_stats stats;
+ unsigned int carrier;
+
wait_queue_head_t waiting_to_free;
} netif_t;
+/*
+ * Implement our own carrier flag: the network stack's version causes delays
+ * when the carrier is re-enabled (in particular, dev_activate() may not
+ * immediately be called, which can cause packet loss; also the etherbridge
+ * can be rather lazy in activating its port).
+ */
+#define netback_carrier_on(netif) ((netif)->carrier = 1)
+#define netback_carrier_off(netif) ((netif)->carrier = 0)
+#define netback_carrier_ok(netif) ((netif)->carrier)
+
#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
@@ -120,7 +132,8 @@ int netif_map(netif_t *netif, unsigned long tx_ring_ref,
void netif_xenbus_init(void);
-#define netif_schedulable(dev) (netif_running(dev) && netif_carrier_ok(dev))
+#define netif_schedulable(netif) \
+ (netif_running((netif)->dev) && netback_carrier_ok(netif))
void netif_schedule_work(netif_t *netif);
void netif_deschedule_work(netif_t *netif);
diff --git a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c
index 8bf951117f..610891fc29 100644
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c
@@ -66,16 +66,19 @@ static void __netif_down(netif_t *netif)
static int net_open(struct net_device *dev)
{
netif_t *netif = netdev_priv(dev);
- if (netif_carrier_ok(dev))
+ if (netback_carrier_ok(netif)) {
__netif_up(netif);
+ netif_start_queue(dev);
+ }
return 0;
}
static int net_close(struct net_device *dev)
{
netif_t *netif = netdev_priv(dev);
- if (netif_carrier_ok(dev))
+ if (netback_carrier_ok(netif))
__netif_down(netif);
+ netif_stop_queue(dev);
return 0;
}
@@ -138,8 +141,6 @@ netif_t *netif_alloc(domid_t domid, unsigned int handle)
return ERR_PTR(-ENOMEM);
}
- netif_carrier_off(dev);
-
netif = netdev_priv(dev);
memset(netif, 0, sizeof(*netif));
netif->domid = domid;
@@ -148,6 +149,8 @@ netif_t *netif_alloc(domid_t domid, unsigned int handle)
init_waitqueue_head(&netif->waiting_to_free);
netif->dev = dev;
+ netback_carrier_off(netif);
+
netif->credit_bytes = netif->remaining_credit = ~0UL;
netif->credit_usec = 0UL;
init_timer(&netif->credit_timeout);
@@ -285,7 +288,7 @@ int netif_map(netif_t *netif, unsigned long tx_ring_ref,
netif_get(netif);
rtnl_lock();
- netif_carrier_on(netif->dev);
+ netback_carrier_on(netif);
if (netif_running(netif->dev))
__netif_up(netif);
rtnl_unlock();
@@ -302,9 +305,10 @@ err_rx:
void netif_disconnect(netif_t *netif)
{
- if (netif_carrier_ok(netif->dev)) {
+ if (netback_carrier_ok(netif)) {
rtnl_lock();
- netif_carrier_off(netif->dev);
+ netback_carrier_off(netif);
+ netif_carrier_off(netif->dev); /* discard queued packets */
if (netif_running(netif->dev))
__netif_down(netif);
rtnl_unlock();
diff --git a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c
index 837dd260ab..df72a6433a 100644
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c
@@ -38,7 +38,7 @@
#include <xen/balloon.h>
#include <xen/interface/memory.h>
-/*#define NETBE_DEBUG_INTERRUPT*/
+#define NETBE_DEBUG_INTERRUPT
/* extra field used in struct page */
#define netif_page_index(pg) (*(long *)&(pg)->mapping)
@@ -234,7 +234,7 @@ static inline int netbk_queue_full(netif_t *netif)
static void tx_queue_callback(unsigned long data)
{
netif_t *netif = (netif_t *)data;
- if (netif_schedulable(netif->dev))
+ if (netif_schedulable(netif))
netif_wake_queue(netif->dev);
}
@@ -245,7 +245,7 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(skb->dev != dev);
/* Drop the packet if the target domain has no receive buffers. */
- if (unlikely(!netif_schedulable(dev) || netbk_queue_full(netif)))
+ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
goto drop;
/*
@@ -684,7 +684,7 @@ static void net_rx_action(unsigned long unused)
}
if (netif_queue_stopped(netif->dev) &&
- netif_schedulable(netif->dev) &&
+ netif_schedulable(netif) &&
!netbk_queue_full(netif))
netif_wake_queue(netif->dev);
@@ -742,7 +742,7 @@ static void add_to_net_schedule_list_tail(netif_t *netif)
spin_lock_irq(&net_schedule_list_lock);
if (!__on_net_schedule_list(netif) &&
- likely(netif_schedulable(netif->dev))) {
+ likely(netif_schedulable(netif))) {
list_add_tail(&netif->list, &net_schedule_list);
netif_get(netif);
}
@@ -1340,7 +1340,7 @@ irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
add_to_net_schedule_list_tail(netif);
maybe_schedule_tx_action();
- if (netif_schedulable(netif->dev) && !netbk_queue_full(netif))
+ if (netif_schedulable(netif) && !netbk_queue_full(netif))
netif_wake_queue(netif->dev);
return IRQ_HANDLED;
diff --git a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
index 12a3dc26f8..50bc909309 100644
--- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
@@ -338,9 +338,7 @@ static void connect(struct backend_info *be)
xenbus_switch_state(dev, XenbusStateConnected);
- /* May not get a kick from the frontend, so start the tx_queue now. */
- if (!netbk_can_queue(be->netif->dev))
- netif_wake_queue(be->netif->dev);
+ netif_wake_queue(be->netif->dev);
}
diff --git a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
index ea503adfd0..17d1daf658 100644
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
@@ -154,6 +154,7 @@ struct netfront_info {
unsigned int irq;
unsigned int copying_receiver;
+ unsigned int carrier;
/* Receive-ring batched refills. */
#define RX_MIN_TARGET 8
@@ -193,6 +194,15 @@ struct netfront_rx_info {
};
/*
+ * Implement our own carrier flag: the network stack's version causes delays
+ * when the carrier is re-enabled (in particular, dev_activate() may not
+ * immediately be called, which can cause packet loss).
+ */
+#define netfront_carrier_on(netif) ((netif)->carrier = 1)
+#define netfront_carrier_off(netif) ((netif)->carrier = 0)
+#define netfront_carrier_ok(netif) ((netif)->carrier)
+
+/*
* Access macros for acquiring freeing slots in tx_skbs[].
*/
@@ -590,6 +600,22 @@ static int send_fake_arp(struct net_device *dev)
return dev_queue_xmit(skb);
}
+static inline int netfront_tx_slot_available(struct netfront_info *np)
+{
+ return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
+ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
+}
+
+static inline void network_maybe_wake_tx(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+
+ if (unlikely(netif_queue_stopped(dev)) &&
+ netfront_tx_slot_available(np) &&
+ likely(netif_running(dev)))
+ netif_wake_queue(dev);
+}
+
static int network_open(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
@@ -597,7 +623,7 @@ static int network_open(struct net_device *dev)
memset(&np->stats, 0, sizeof(np->stats));
spin_lock(&np->rx_lock);
- if (netif_carrier_ok(dev)) {
+ if (netfront_carrier_ok(np)) {
network_alloc_rx_buffers(dev);
np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
@@ -605,27 +631,11 @@ static int network_open(struct net_device *dev)
}
spin_unlock(&np->rx_lock);
- netif_start_queue(dev);
+ network_maybe_wake_tx(dev);
return 0;
}
-static inline int netfront_tx_slot_available(struct netfront_info *np)
-{
- return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
- (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
-}
-
-static inline void network_maybe_wake_tx(struct net_device *dev)
-{
- struct netfront_info *np = netdev_priv(dev);
-
- if (unlikely(netif_queue_stopped(dev)) &&
- netfront_tx_slot_available(np) &&
- likely(netif_running(dev)))
- netif_wake_queue(dev);
-}
-
static void network_tx_buf_gc(struct net_device *dev)
{
RING_IDX cons, prod;
@@ -633,7 +643,7 @@ static void network_tx_buf_gc(struct net_device *dev)
struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
- BUG_ON(!netif_carrier_ok(dev));
+ BUG_ON(!netfront_carrier_ok(np));
do {
prod = np->tx.sring->rsp_prod;
@@ -703,7 +713,7 @@ static void network_alloc_rx_buffers(struct net_device *dev)
int nr_flips;
netif_rx_request_t *req;
- if (unlikely(!netif_carrier_ok(dev)))
+ if (unlikely(!netfront_carrier_ok(np)))
return;
/*
@@ -934,7 +944,7 @@ static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irq(&np->tx_lock);
- if (unlikely(!netif_carrier_ok(dev) ||
+ if (unlikely(!netfront_carrier_ok(np) ||
(frags > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(dev, skb))) {
spin_unlock_irq(&np->tx_lock);
@@ -1024,7 +1034,7 @@ static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
spin_lock_irqsave(&np->tx_lock, flags);
- if (likely(netif_carrier_ok(dev))) {
+ if (likely(netfront_carrier_ok(np))) {
network_tx_buf_gc(dev);
/* Under tx_lock: protects access to rx shared-ring indexes. */
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
@@ -1299,7 +1309,7 @@ static int netif_poll(struct net_device *dev, int *pbudget)
spin_lock(&np->rx_lock);
- if (unlikely(!netif_carrier_ok(dev))) {
+ if (unlikely(!netfront_carrier_ok(np))) {
spin_unlock(&np->rx_lock);
return 0;
}
@@ -1317,7 +1327,7 @@ static int netif_poll(struct net_device *dev, int *pbudget)
work_done = 0;
while ((i != rp) && (work_done < budget)) {
memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
- memset(extras, 0, sizeof(extras));
+ memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(np, &rinfo, rp, &tmpq,
&pages_flipped);
@@ -1744,7 +1754,7 @@ static int network_connect(struct net_device *dev)
* domain a kick because we've probably just requeued some
* packets.
*/
- netif_carrier_on(dev);
+ netfront_carrier_on(np);
notify_remote_via_irq(np->irq);
network_tx_buf_gc(dev);
network_alloc_rx_buffers(dev);
@@ -1989,7 +1999,7 @@ static struct net_device * __devinit create_netdev(struct xenbus_device *dev)
np->netdev = netdev;
- netif_carrier_off(netdev);
+ netfront_carrier_off(np);
return netdev;
@@ -2023,7 +2033,7 @@ static void netif_disconnect_backend(struct netfront_info *info)
/* Stop old i/f to prevent errors whilst we rebuild the state. */
spin_lock_irq(&info->tx_lock);
spin_lock(&info->rx_lock);
- netif_carrier_off(info->netdev);
+ netfront_carrier_off(info);
spin_unlock(&info->rx_lock);
spin_unlock_irq(&info->tx_lock);