aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2003-11-13 16:41:02 +0000
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2003-11-13 16:41:02 +0000
commit7f2d3ddab5f2b7edb80a7d04e97414929945b156 (patch)
tree71d8a32694bb731e36bde2aa829dd84adfc5a33a
parentd4ca1c16e2867c461b3a9f37797d3234bffe56c9 (diff)
downloadxen-7f2d3ddab5f2b7edb80a7d04e97414929945b156.tar.gz
xen-7f2d3ddab5f2b7edb80a7d04e97414929945b156.tar.bz2
xen-7f2d3ddab5f2b7edb80a7d04e97414929945b156.zip
bitkeeper revision 1.596.1.3 (3fb3b41eWUoRU0H8A0jEX5roXjxKkA)
Many files: Greatly simplified Xen softirqs. They are now only executed in outermost Xen activation; they are never called within an irq context.
-rw-r--r--xen/arch/i386/apic.c44
-rw-r--r--xen/arch/i386/irq.c3
-rw-r--r--xen/arch/i386/smp.c12
-rw-r--r--xen/arch/i386/time.c2
-rw-r--r--xen/common/ac_timer.c97
-rw-r--r--xen/common/softirq.c42
-rw-r--r--xen/drivers/scsi/aacraid/aacraid.h10
-rw-r--r--xen/drivers/scsi/aacraid/commsup.c95
-rw-r--r--xen/drivers/scsi/aacraid/rx.c14
-rw-r--r--xen/drivers/scsi/aic7xxx/aic7xxx_osm.c2
-rw-r--r--xen/drivers/scsi/scsi.c1
-rw-r--r--xen/include/asm-i386/softirq.h36
-rw-r--r--xen/include/xeno/ac_timer.h1
-rw-r--r--xen/include/xeno/netdevice.h13
14 files changed, 54 insertions, 318 deletions
diff --git a/xen/arch/i386/apic.c b/xen/arch/i386/apic.c
index 0f4b9a3538..8a3a6b5cf8 100644
--- a/xen/arch/i386/apic.c
+++ b/xen/arch/i386/apic.c
@@ -711,58 +711,18 @@ int reprogram_ac_timer(s_time_t timeout)
return 1;
}
-/*
- * Local timer interrupt handler.
- * Here the programmable, accurate timers are executed.
- */
-inline void smp_local_timer_interrupt(struct pt_regs *regs)
-{
- do_ac_timer();
-}
-
-/*
- * Local APIC timer interrupt. This is the most natural way for doing
- * local interrupts, but local timer interrupts can be emulated by
- * broadcast interrupts too. [in case the hw doesnt support APIC timers]
- *
- * [ if a single-CPU system runs an SMP kernel then we call the local
- * interrupt as well. Thus we cannot inline the local irq ... ]
- */
unsigned int apic_timer_irqs [NR_CPUS];
void smp_apic_timer_interrupt(struct pt_regs * regs)
{
int cpu = smp_processor_id();
-#ifndef NDEBUG
- u32 cc_start, cc_end;
- rdtscl(cc_start);
-#endif
- /*
- * the NMI deadlock-detector uses this.
- */
- apic_timer_irqs[cpu]++;
-
- /*
- * NOTE! We'd better ACK the irq immediately, because timer handling can
- * be slow. XXX is this save?
- */
ack_APIC_irq();
- /* call the local handler */
- irq_enter(cpu, 0);
+ apic_timer_irqs[cpu]++;
perfc_incrc(apic_timer);
- smp_local_timer_interrupt(regs);
- irq_exit(cpu, 0);
- if (softirq_pending(cpu))
- do_softirq();
-
-#ifndef NDEBUG
- rdtscl(cc_end);
- if ( (cc_end - cc_start) > (cpu_khz * 100) )
- printk("APIC Long ISR on CPU=%02d %08X -> %08X\n",cpu,cc_start,cc_end);
-#endif
+ __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
}
/*
diff --git a/xen/arch/i386/irq.c b/xen/arch/i386/irq.c
index 504c7261d4..2793eba3d7 100644
--- a/xen/arch/i386/irq.c
+++ b/xen/arch/i386/irq.c
@@ -535,9 +535,6 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
desc->handler->end(irq);
spin_unlock(&desc->lock);
- if (softirq_pending(cpu))
- do_softirq();
-
rdtscl(cc_end);
if ( !action || (!(action->flags & SA_NOPROFILE)) )
diff --git a/xen/arch/i386/smp.c b/xen/arch/i386/smp.c
index baf88b5fba..b1dfe64d4f 100644
--- a/xen/arch/i386/smp.c
+++ b/xen/arch/i386/smp.c
@@ -261,17 +261,11 @@ static volatile unsigned long flush_cpumask;
static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
#define FLUSH_ALL 0xffffffff
-asmlinkage void smp_invalidate_interrupt (void)
+asmlinkage void smp_invalidate_interrupt(void)
{
- unsigned long cpu = smp_processor_id();
-
- if (!test_bit(cpu, &flush_cpumask))
- return;
-
- local_flush_tlb();
-
ack_APIC_irq();
- clear_bit(cpu, &flush_cpumask);
+ if (test_and_clear_bit(smp_processor_id(), &flush_cpumask))
+ local_flush_tlb();
}
void flush_tlb_others(unsigned long cpumask)
diff --git a/xen/arch/i386/time.c b/xen/arch/i386/time.c
index ddacbca63d..1328f31fd4 100644
--- a/xen/arch/i386/time.c
+++ b/xen/arch/i386/time.c
@@ -96,7 +96,7 @@ static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/* Rough hack to allow accurate timers to sort-of-work with no APIC. */
if ( do_timer_lists_from_pit )
- do_ac_timer();
+ __cpu_raise_softirq(smp_processor_id(), AC_TIMER_SOFTIRQ);
}
static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0,
diff --git a/xen/common/ac_timer.c b/xen/common/ac_timer.c
index f4752ca3fb..e456e83624 100644
--- a/xen/common/ac_timer.c
+++ b/xen/common/ac_timer.c
@@ -241,88 +241,35 @@ void mod_ac_timer(struct ac_timer *timer, s_time_t new_time)
}
-void do_ac_timer(void)
+static void ac_timer_softirq_action(struct softirq_action *a)
{
int cpu = smp_processor_id();
- unsigned long flags;
struct ac_timer *t, **heap;
- s_time_t diff, now = NOW();
- long max;
-
- spin_lock_irqsave(&ac_timers[cpu].lock, flags);
-
- do_timer_again:
- TRC(printk("ACT [%02d] do(): now=%lld\n", cpu, NOW()));
-
- heap = ac_timers[cpu].heap;
-
- while ( (GET_HEAP_SIZE(heap) != 0) &&
- ((t = heap[1])->expires < (NOW() + TIMER_SLOP)) )
- {
- remove_entry(heap, t);
-
- /* Do some stats collection. */
- diff = (now - t->expires);
- if ( diff > 0x7fffffff )
- diff = 0x7fffffff; /* THIS IS BAD! */
- max = perfc_valuea(ac_timer_max, cpu);
- if ( diff > max )
- perfc_seta(ac_timer_max, cpu, diff);
-
- spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
- if ( t->function != NULL )
- t->function(t->data);
- spin_lock_irqsave(&ac_timers[cpu].lock, flags);
-
- /* Heap may have grown while the lock was released. */
- heap = ac_timers[cpu].heap;
- }
-
- if ( GET_HEAP_SIZE(heap) != 0 )
- {
- if ( !reprogram_ac_timer(heap[1]->expires) )
- goto do_timer_again;
- }
- else
- {
- reprogram_ac_timer(0);
- }
-
- spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
- TRC(printk("ACT [%02d] do(): end\n", cpu));
-}
-
-
-static void ac_timer_softirq_action(struct softirq_action *a)
-{
- int cpu = smp_processor_id();
- unsigned long flags;
- struct ac_timer *t;
- int process_timer_list = 0;
+ s_time_t now;
- spin_lock_irqsave(&ac_timers[cpu].lock, flags);
+ spin_lock_irq(&ac_timers[cpu].lock);
- if ( GET_HEAP_SIZE(ac_timers[cpu].heap) != 0 )
- {
- /*
- * Reprogram timer with earliest deadline. If that has already passed
- * then we will process the timer list as soon as we release the lock.
- */
- t = ac_timers[cpu].heap[1];
- if ( (t->expires < (NOW() + TIMER_SLOP)) ||
- !reprogram_ac_timer(t->expires) )
- process_timer_list = 1;
- }
- else
- {
- /* No deadline to program the timer with.*/
- reprogram_ac_timer((s_time_t)0);
+ do {
+ heap = ac_timers[cpu].heap;
+ now = NOW();
+
+ while ( (GET_HEAP_SIZE(heap) != 0) &&
+ ((t = heap[1])->expires < (now + TIMER_SLOP)) )
+ {
+ remove_entry(heap, t);
+
+ spin_unlock_irq(&ac_timers[cpu].lock);
+ if ( t->function != NULL )
+ t->function(t->data);
+ spin_lock_irq(&ac_timers[cpu].lock);
+
+ /* Heap may have grown while the lock was released. */
+ heap = ac_timers[cpu].heap;
+ }
}
+ while ( !reprogram_ac_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
- spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
-
- if ( process_timer_list )
- do_ac_timer();
+ spin_unlock_irq(&ac_timers[cpu].lock);
}
diff --git a/xen/common/softirq.c b/xen/common/softirq.c
index 951fb91395..f209c6668d 100644
--- a/xen/common/softirq.c
+++ b/xen/common/softirq.c
@@ -12,9 +12,7 @@
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/sched.h>
-//#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
-//#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/tqueue.h>
@@ -51,48 +49,30 @@ asmlinkage void do_softirq()
int cpu = smp_processor_id();
struct softirq_action *h;
__u32 pending;
- long flags;
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
-
- pending = xchg(&softirq_pending(cpu), 0);
- if ( !pending ) goto out;
+ if ( in_interrupt() )
+ BUG();
local_bh_disable();
- do {
- local_irq_enable();
-
+ while ( (pending = xchg(&softirq_pending(cpu), 0)) != 0 )
+ {
h = softirq_vec;
-
- do {
+ while ( pending )
+ {
if (pending & 1)
h->action(h);
h++;
pending >>= 1;
- } while (pending);
-
- local_irq_disable();
-
- pending = xchg(&softirq_pending(cpu), 0);
- } while ( pending );
+ }
+ }
__local_bh_enable();
-
-out:
- local_irq_restore(flags);
}
-/*
- * This function must run with irq disabled!
- */
inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
{
__cpu_raise_softirq(cpu, nr);
-
#ifdef CONFIG_SMP
if ( cpu != smp_processor_id() )
smp_send_event_check_cpu(cpu);
@@ -101,11 +81,7 @@ inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
void raise_softirq(unsigned int nr)
{
- long flags;
-
- local_irq_save(flags);
cpu_raise_softirq(smp_processor_id(), nr);
- local_irq_restore(flags);
}
void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
@@ -224,7 +200,7 @@ void tasklet_init(struct tasklet_struct *t,
void tasklet_kill(struct tasklet_struct *t)
{
if (in_interrupt())
- printk("Attempt to kill tasklet from interrupt\n");
+ BUG();
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
while (test_bit(TASKLET_STATE_SCHED, &t->state))
do_softirq();
diff --git a/xen/drivers/scsi/aacraid/aacraid.h b/xen/drivers/scsi/aacraid/aacraid.h
index eb38cd1361..ecf56413cd 100644
--- a/xen/drivers/scsi/aacraid/aacraid.h
+++ b/xen/drivers/scsi/aacraid/aacraid.h
@@ -3,11 +3,8 @@
# define dprintk(x)
#endif
-/* Start of Xen additions XXX */
#include <asm/byteorder.h>
#include <xeno/interrupt.h>
-#define TRY_TASKLET
-/* End of Xen additions XXX */
/*------------------------------------------------------------------------------
* D E F I N E S
@@ -1416,12 +1413,7 @@ int aac_rx_init(struct aac_dev *dev, unsigned long devNumber);
int aac_sa_init(struct aac_dev *dev, unsigned long devNumber);
unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q);
-#ifdef TRY_TASKLET
-extern struct tasklet_struct aac_command_tasklet;
-void aac_command_thread(unsigned long data);
-#else
-int aac_command_thread(struct aac_dev * dev);
-#endif
+void aac_command_thread(struct aac_dev * dev);
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
int fib_adapter_complete(struct fib * fibptr, unsigned short size);
struct aac_driver_ident* aac_get_driver_ident(int devtype);
diff --git a/xen/drivers/scsi/aacraid/commsup.c b/xen/drivers/scsi/aacraid/commsup.c
index 84c77464f5..4450dcbe8b 100644
--- a/xen/drivers/scsi/aacraid/commsup.c
+++ b/xen/drivers/scsi/aacraid/commsup.c
@@ -523,24 +523,8 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
if (wait) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
-#if 0
- down(&fibptr->event_wait);
- if(fibptr->done == 0)
- BUG();
-#endif
-#ifdef TRY_TASKLET
- while (!fibptr->done) {
- tasklet_schedule(&aac_command_tasklet);
- do_softirq(); /* force execution */
- }
-#else
- while (!fibptr->done) {
- mdelay(100);
- aac_command_thread(dev);
- }
-#endif
-
-
+ while (!fibptr->done)
+ aac_command_thread(dev);
if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
return -ETIMEDOUT;
else
@@ -924,62 +908,27 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
* until the queue is empty. When the queue is empty it will wait for
* more FIBs.
*/
-#ifndef TRY_TASKLET
-int aac_command_thread(struct aac_dev * dev)
-{
-#else
-DECLARE_TASKLET_DISABLED(aac_command_tasklet, aac_command_thread, 0);
-void aac_command_thread(unsigned long data)
-#define return(_x) return
+void aac_command_thread(struct aac_dev * dev)
{
- struct aac_dev *dev = (struct aac_dev *)data;
-#endif
struct hw_fib *hw_fib, *hw_newfib;
struct fib *fib, *newfib;
struct aac_queue_block *queues = dev->queues;
struct aac_fib_context *fibctx;
unsigned long flags;
-#if 0
- DECLARE_WAITQUEUE(wait, current);
-#endif
+ static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+
+ spin_lock_irqsave(&lock, flags);
- /*
- * We can only have one thread per adapter for AIF's.
- */
- if (dev->aif_thread)
- return(-EINVAL);
-#if 0
- /*
- * Set up the name that will appear in 'ps'
- * stored in task_struct.comm[16].
- */
- sprintf(current->comm, "aacraid");
- daemonize();
-#endif
- /*
- * Let the DPC know it has a place to send the AIF's to.
- */
- dev->aif_thread = 1;
-#if 0
- add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- dprintk ((KERN_INFO "aac_command_thread start\n"));
- while(1)
-#endif
{
- spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
+ spin_lock(queues->queue[HostNormCmdQueue].lock);
while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
struct list_head *entry;
struct aac_aifcmd * aifcmd;
-#if 0
- set_current_state(TASK_RUNNING);
-#endif
-
entry = queues->queue[HostNormCmdQueue].cmdq.next;
list_del(entry);
- spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+ spin_unlock(queues->queue[HostNormCmdQueue].lock);
fib = list_entry(entry, struct fib, fiblink);
/*
* We will process the FIB here or pass it to a
@@ -1074,13 +1023,6 @@ void aac_command_thread(unsigned long data)
*/
list_add_tail(&newfib->fiblink, &fibctx->fib_list);
fibctx->count++;
-#if 0
- /*
- * Set the event to wake up the
- * thread that will waiting.
- */
- up(&fibctx->wait_sem);
-#endif
} else {
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
if(newfib)
@@ -1097,27 +1039,14 @@ void aac_command_thread(unsigned long data)
fib_adapter_complete(fib, sizeof(u32));
spin_unlock_irqrestore(&dev->fib_lock, flagv);
}
- spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
+ spin_lock(queues->queue[HostNormCmdQueue].lock);
kfree(fib);
}
/*
* There are no more AIF's
*/
- spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
-#if 0
- schedule();
-
- if(signal_pending(current))
- break;
- set_current_state(TASK_INTERRUPTIBLE);
-#endif
+ spin_unlock(queues->queue[HostNormCmdQueue].lock);
}
-#if 0
- remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
- dev->aif_thread = 0;
- complete_and_exit(&dev->aif_completion, 0);
-#else
- mdelay(50);
- dev->aif_thread = 0;
-#endif
+
+ spin_unlock_irqrestore(&lock, flags);
}
diff --git a/xen/drivers/scsi/aacraid/rx.c b/xen/drivers/scsi/aacraid/rx.c
index ebb797041e..0cb17691d1 100644
--- a/xen/drivers/scsi/aacraid/rx.c
+++ b/xen/drivers/scsi/aacraid/rx.c
@@ -406,20 +406,6 @@ int aac_rx_init(struct aac_dev *dev, unsigned long num)
if (aac_init_adapter(dev) == NULL)
return -1;
-#ifdef TRY_TASKLET
- aac_command_tasklet.data = (unsigned long)dev;
- tasklet_enable(&aac_command_tasklet);
-#else
- /*
- * Start any kernel threads needed
- */
- dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
- if(dev->thread_pid < 0)
- {
- printk(KERN_ERR "aacraid: Unable to create rx thread.\n");
- return -1;
- }
-#endif
/*
* Tell the adapter that all is configured, and it can start
* accepting requests
diff --git a/xen/drivers/scsi/aic7xxx/aic7xxx_osm.c b/xen/drivers/scsi/aic7xxx/aic7xxx_osm.c
index e868f32bbf..4f4ce01c4a 100644
--- a/xen/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/xen/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1903,7 +1903,6 @@ ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
if (next_dev) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
tasklet_schedule(&ahc->platform_data->runq_tasklet);
- do_softirq();
#else
ahc_runq_tasklet((unsigned long)ahc);
#endif
@@ -2535,7 +2534,6 @@ ahc_linux_release_sim_queue(u_long arg)
scsi_unblock_requests(ahc->platform_data->host);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
tasklet_schedule(&ahc->platform_data->runq_tasklet);
- do_softirq();
#else
ahc_runq_tasklet((unsigned long)ahc);
#endif
diff --git a/xen/drivers/scsi/scsi.c b/xen/drivers/scsi/scsi.c
index 3826d4ed1e..ed869dd6aa 100644
--- a/xen/drivers/scsi/scsi.c
+++ b/xen/drivers/scsi/scsi.c
@@ -840,6 +840,7 @@ void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
if it hasn't been done already. This is not the correct behaviour
in xen ... hmm .. how to fix? */
while(wait) {
+ do_softirq(); /* XXX KAF: this is safe, and necessary!! */
udelay(500);
usecs += 500;
if(usecs > 1000000) {
diff --git a/xen/include/asm-i386/softirq.h b/xen/include/asm-i386/softirq.h
index 254224411b..4f37cfdb7d 100644
--- a/xen/include/asm-i386/softirq.h
+++ b/xen/include/asm-i386/softirq.h
@@ -9,40 +9,10 @@
#define cpu_bh_disable(cpu) \
do { local_bh_count(cpu)++; barrier(); } while (0)
-#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
+#define local_bh_disable() cpu_bh_disable(smp_processor_id())
+#define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
+#define local_bh_enable() __local_bh_enable()
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
-/*
- * NOTE: this assembly code assumes:
- *
- * (char *)&local_bh_count - 8 == (char *)&softirq_pending
- *
- * If you change the offsets in irq_stat then you have to
- * update this code as well.
- */
-#define local_bh_enable() \
-do { \
- unsigned int *ptr = &local_bh_count(smp_processor_id()); \
- \
- barrier(); \
- if (!--*ptr) \
- __asm__ __volatile__ ( \
- "cmpl $0, -8(%0);" \
- "jnz 2f;" \
- "1:;" \
- \
- ".section .text.lock,\"ax\";" \
- "2: pushl %%eax; pushl %%ecx; pushl %%edx;" \
- "call %c1;" \
- "popl %%edx; popl %%ecx; popl %%eax;" \
- "jmp 1b;" \
- ".previous;" \
- \
- : /* no output */ \
- : "r" (ptr), "i" (do_softirq) \
- /* no registers clobbered */ ); \
-} while (0)
-
#endif /* __ASM_SOFTIRQ_H */
diff --git a/xen/include/xeno/ac_timer.h b/xen/include/xeno/ac_timer.h
index 78b380d76f..987fbeed33 100644
--- a/xen/include/xeno/ac_timer.h
+++ b/xen/include/xeno/ac_timer.h
@@ -67,6 +67,5 @@ static __inline__ int active_ac_timer(struct ac_timer *timer)
/* interface used by programmable timer, implemented hardware dependent */
extern int reprogram_ac_timer(s_time_t timeout);
-extern void do_ac_timer(void);
#endif /* _AC_TIMER_H_ */
diff --git a/xen/include/xeno/netdevice.h b/xen/include/xeno/netdevice.h
index e06faaaf64..de639a884d 100644
--- a/xen/include/xeno/netdevice.h
+++ b/xen/include/xeno/netdevice.h
@@ -466,19 +466,6 @@ extern int dev_ioctl(unsigned int cmd, void *);
extern int dev_change_flags(struct net_device *, unsigned);
extern void dev_init(void);
-extern int netdev_nit;
-
-/* Post buffer to the network code from _non interrupt_ context.
- * see net/core/dev.c for netif_rx description.
- */
-static inline int netif_rx_ni(struct sk_buff *skb)
-{
- int err = netif_rx(skb);
- if (softirq_pending(smp_processor_id()))
- do_softirq();
- return err;
-}
-
extern int netdev_finish_unregister(struct net_device *dev);
static inline void dev_put(struct net_device *dev)