aboutsummaryrefslogtreecommitdiffstats
path: root/package/busybox/config
Commit message (Expand)AuthorAgeFilesLines
* update busybox to 1.18.4, patch from Peter WagnerFlorian Fainelli2011-04-0317-196/+432
* busybox: enable cmp by default (#9133)Felix Fietkau2011-03-291-1/+1
* block-mount busybox: Removed busybox versions of blkid and swap-utils since t...Daniel Dickinson2011-03-201-25/+25
* busybox: enable mount helpers by default (#8946)Jo-Philipp Wich2011-03-011-1/+1
* busybox: disable killall5 - nothing uses itFelix Fietkau2011-02-131-1/+1
* busybox: disable nfs mount support by default - people that want to mount nfs...Felix Fietkau2011-02-132-2/+2
* busybox: disable diff by default - apparently it was necessary for ipkg once,...Felix Fietkau2011-02-131-1/+1
* package/busybox: add missing bits from 1.17.1 update (mostly config stuff), r...Nicolas Thill2010-08-1519-439/+810
* package/busybox: enable blkid, mkswap & swaponoff applets, enable uuid/volume...Nicolas Thill2010-04-061-15/+15
* busybox: update to v1.16.1 (based on v1.16.0 update patch from Peter Wagner)Nicolas Thill2010-04-0313-69/+255
* busybox: disable httpd appletJo-Philipp Wich2010-03-251-10/+10
* busybox: fix default value for BUSYBOX_CONFIG_DEFAULT_SETFONT_DIR (unused) op...Nicolas Thill2010-03-101-1/+1
* busybox: adjust hush default configurationFelix Fietkau2009-12-091-10/+10
* update busybox to 1.15.2 (#5926)Florian Fainelli2009-12-0717-122/+295
* busybox: support 64 bit math in shell tests. this does not seem to increase t...Felix Fietkau2009-10-031-2/+2
* busybox: update to v1.14.4 (closes: #5619)Nicolas Thill2009-09-2811-106/+234
* busybox: update to v1.13.4 (closes: #4279)Nicolas Thill2009-05-2520-247/+338
* busybox: update to v1.12.4 (partially closes: #4279)Nicolas Thill2009-05-2519-337/+416
* busybox brctl does not support showmacs or showstp (#4206)Florian Fainelli2009-01-181-2/+2
* busybox: Enabled FEATURE_HTTPD_PROXY by default (cost is 580 bytes)Rod Whitby2008-12-071-1/+1
* bridge (brctl) changes: - enable brctl in busybox - disable bridge-utils - bu...Nicolas Thill2008-11-111-3/+3
* remove ipkg from busyboxFelix Fietkau2008-09-021-9/+0
* remove awx from busybox, refresh patchesFelix Fietkau2008-09-021-7/+0
* upgrade busybox to v1.11.1 and add current upstream fixesImre Kaloz2008-08-2020-259/+1117
* make opkg the default package manager, disable busybox ipkg from building by ...Travis Kemen2008-08-161-1/+1
* make busybox have hwclocjk tool enable by defaultJohn Crispin2008-01-251-1/+1
* disable use of sendfile() for httpdTravis Kemen2008-01-151-1/+1
* Make ipkg depends on tar and gunzip (#2821)Florian Fainelli2007-12-171-0/+2
* upgrade busybox to 1.8.1Felix Fietkau2007-11-108-59/+199
* enable niceFelix Fietkau2007-10-071-1/+1
* Upgrade busybox to 1.7.2 - clean up insmod crap - add some lineno/programname...Felix Fietkau2007-10-0520-275/+859
* enable ps -w by defaultFelix Fietkau2007-09-221-1/+1
* enable start-stop-daemon by default, i want to use this to clean up a few ini...Felix Fietkau2007-09-061-1/+1
* Fix the CONFIG_LFS option (#2276)Florian Fainelli2007-08-281-1/+1
* busybox: Ensure that diff is available for ipkg upgrade requirementsRod Whitby2007-07-131-0/+1
* update to busybox 1.4.2 (fixes #1653)Mike Baker2007-05-101-0/+1
* add awx (awk web extension) - experimental core for a new web interface frame...Felix Fietkau2007-03-111-0/+7
* enable httpd config option for running scripts through an interpreterFelix Fietkau2007-03-111-1/+1
* nuke mdev and replace it with hotplug2 :)Felix Fietkau2007-03-041-2/+2
* R.I.P. devfsImre Kaloz2007-03-032-3/+3
* enable httpd sighup config reload by defaultFelix Fietkau2007-01-311-1/+1
* upgrade busybox to 1.4.0Felix Fietkau2007-01-259-50/+145
* enable pretty lsmod output for linux 2.6Felix Fietkau2007-01-101-1/+1
* make telnetd standaloneFelix Fietkau2007-01-011-1/+1
* update busybox menuconfig for the latest versionFelix Fietkau2007-01-0116-112/+349
* finally move buildroot-ng to trunkFelix Fietkau2016-03-2019-0/+4627
href='#n565'>565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
diff -pruN ../orig-linux-2.6.16.29/arch/i386/Kconfig ./arch/i386/Kconfig
--- ../orig-linux-2.6.16.29/arch/i386/Kconfig	2006-09-12 19:02:10.000000000 +0100
+++ ./arch/i386/Kconfig	2006-09-19 14:05:48.000000000 +0100
@@ -202,6 +202,19 @@ config SMP
 
 	  If you don't know what to do here, say N.
 
+config SMP_ALTERNATIVES
+	bool "SMP alternatives support (EXPERIMENTAL)"
+	depends on SMP && EXPERIMENTAL
+	help
+	  Try to reduce the overhead of running an SMP kernel on a uniprocessor
+	  host slightly by replacing certain key instruction sequences
+	  according to whether we currently have more than one CPU available.
+	  This should provide a noticeable boost to performance when
+	  running SMP kernels on UP machines, and have negligible impact
+	  when running on an true SMP host.
+
+          If unsure, say N.
+	  
 config NR_CPUS
 	int "Maximum number of CPUs (2-255)"
 	range 2 255
diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/Makefile ./arch/i386/kernel/Makefile
--- ../orig-linux-2.6.16.29/arch/i386/kernel/Makefile	2006-09-12 19:02:10.000000000 +0100
+++ ./arch/i386/kernel/Makefile	2006-09-19 14:05:48.000000000 +0100
@@ -37,6 +37,7 @@ obj-$(CONFIG_EFI) 		+= efi.o efi_stub.o
 obj-$(CONFIG_DOUBLEFAULT) 	+= doublefault.o
 obj-$(CONFIG_VM86)		+= vm86.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+obj-$(CONFIG_SMP_ALTERNATIVES)  += smpalts.o
 
 EXTRA_AFLAGS   := -traditional
 
diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/smpalts.c ./arch/i386/kernel/smpalts.c
--- ../orig-linux-2.6.16.29/arch/i386/kernel/smpalts.c	1970-01-01 01:00:00.000000000 +0100
+++ ./arch/i386/kernel/smpalts.c	2006-09-19 14:05:48.000000000 +0100
@@ -0,0 +1,85 @@
+#include <linux/kernel.h>
+#include <asm/system.h>
+#include <asm/smp_alt.h>
+#include <asm/processor.h>
+#include <asm/string.h>
+
+struct smp_replacement_record {
+	unsigned char targ_size;
+	unsigned char smp1_size;
+	unsigned char smp2_size;
+	unsigned char up_size;
+	unsigned char feature;
+	unsigned char data[0];
+};
+
+struct smp_alternative_record {
+	void *targ_start;
+	struct smp_replacement_record *repl;
+};
+
+extern struct smp_alternative_record __start_smp_alternatives_table,
+  __stop_smp_alternatives_table;
+extern unsigned long __init_begin, __init_end;
+
+void prepare_for_smp(void)
+{
+	struct smp_alternative_record *r;
+	printk(KERN_INFO "Enabling SMP...\n");
+	for (r = &__start_smp_alternatives_table;
+	     r != &__stop_smp_alternatives_table;
+	     r++) {
+		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
+		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
+		BUG_ON(r->repl->targ_size < r->repl->up_size);
+               if (system_state == SYSTEM_RUNNING &&
+                   r->targ_start >= (void *)&__init_begin &&
+                   r->targ_start < (void *)&__init_end)
+                       continue;
+		if (r->repl->feature != (unsigned char)-1 &&
+		    boot_cpu_has(r->repl->feature)) {
+			memcpy(r->targ_start,
+			       r->repl->data + r->repl->smp1_size,
+			       r->repl->smp2_size);
+			memset(r->targ_start + r->repl->smp2_size,
+			       0x90,
+			       r->repl->targ_size - r->repl->smp2_size);
+		} else {
+			memcpy(r->targ_start,
+			       r->repl->data,
+			       r->repl->smp1_size);
+			memset(r->targ_start + r->repl->smp1_size,
+			       0x90,
+			       r->repl->targ_size - r->repl->smp1_size);
+		}
+	}
+	/* Paranoia */
+	asm volatile ("jmp 1f\n1:");
+	mb();
+}
+
+void unprepare_for_smp(void)
+{
+	struct smp_alternative_record *r;
+	printk(KERN_INFO "Disabling SMP...\n");
+	for (r = &__start_smp_alternatives_table;
+	     r != &__stop_smp_alternatives_table;
+	     r++) {
+		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
+		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
+		BUG_ON(r->repl->targ_size < r->repl->up_size);
+               if (system_state == SYSTEM_RUNNING &&
+                   r->targ_start >= (void *)&__init_begin &&
+                   r->targ_start < (void *)&__init_end)
+                       continue;
+		memcpy(r->targ_start,
+		       r->repl->data + r->repl->smp1_size + r->repl->smp2_size,
+		       r->repl->up_size);
+		memset(r->targ_start + r->repl->up_size,
+		       0x90,
+		       r->repl->targ_size - r->repl->up_size);
+	}
+	/* Paranoia */
+	asm volatile ("jmp 1f\n1:");
+	mb();
+}
diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/smpboot.c ./arch/i386/kernel/smpboot.c
--- ../orig-linux-2.6.16.29/arch/i386/kernel/smpboot.c	2006-09-12 19:02:10.000000000 +0100
+++ ./arch/i386/kernel/smpboot.c	2006-09-19 14:05:48.000000000 +0100
@@ -1218,6 +1218,11 @@ static void __init smp_boot_cpus(unsigne
 		if (max_cpus <= cpucount+1)
 			continue;
 
+#ifdef CONFIG_SMP_ALTERNATIVES
+		if (kicked == 1)
+			prepare_for_smp();
+#endif
+
 		if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
 			printk("CPU #%d not responding - cannot use it.\n",
 								apicid);
@@ -1396,6 +1401,11 @@ int __devinit __cpu_up(unsigned int cpu)
 		return -EIO;
 	}
 
+#ifdef CONFIG_SMP_ALTERNATIVES
+	if (num_online_cpus() == 1)
+		prepare_for_smp();
+#endif
+
 	local_irq_enable();
 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 	/* Unleash the CPU! */
diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/vmlinux.lds.S ./arch/i386/kernel/vmlinux.lds.S
--- ../orig-linux-2.6.16.29/arch/i386/kernel/vmlinux.lds.S	2006-09-12 19:02:10.000000000 +0100
+++ ./arch/i386/kernel/vmlinux.lds.S	2006-09-19 14:05:48.000000000 +0100
@@ -34,6 +34,13 @@ SECTIONS
   __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
   __stop___ex_table = .;
 
+  . = ALIGN(16);
+  __start_smp_alternatives_table = .;
+  __smp_alternatives : { *(__smp_alternatives) }
+  __stop_smp_alternatives_table = .;
+
+  __smp_replacements : { *(__smp_replacements) }
+
   RODATA
 
   /* writeable */
diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/atomic.h ./include/asm-i386/atomic.h
--- ../orig-linux-2.6.16.29/include/asm-i386/atomic.h	2006-09-12 19:02:10.000000000 +0100
+++ ./include/asm-i386/atomic.h	2006-09-19 14:05:48.000000000 +0100
@@ -4,18 +4,13 @@
 #include <linux/config.h>
 #include <linux/compiler.h>
 #include <asm/processor.h>
+#include <asm/smp_alt.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
  * resource counting etc..
  */
 
-#ifdef CONFIG_SMP
-#define LOCK "lock ; "
-#else
-#define LOCK ""
-#endif
-
 /*
  * Make sure gcc doesn't try to be clever and move things around
  * on us. We need to use _exactly_ the address the user gave us,
diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/bitops.h ./include/asm-i386/bitops.h
--- ../orig-linux-2.6.16.29/include/asm-i386/bitops.h	2006-09-12 19:02:10.000000000 +0100
+++ ./include/asm-i386/bitops.h	2006-09-19 14:05:48.000000000 +0100
@@ -7,6 +7,7 @@
 
 #include <linux/config.h>
 #include <linux/compiler.h>
+#include <asm/smp_alt.h>
 
 /*
  * These have to be done with inline assembly: that way the bit-setting
@@ -16,12 +17,6 @@
  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  */
 
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX "lock ; "
-#else
-#define LOCK_PREFIX ""
-#endif
-
 #define ADDR (*(volatile long *) addr)
 
 /**
@@ -41,7 +36,7 @@
  */
 static inline void set_bit(int nr, volatile unsigned long * addr)
 {
-	__asm__ __volatile__( LOCK_PREFIX
+	__asm__ __volatile__( LOCK
 		"btsl %1,%0"
 		:"+m" (ADDR)
 		:"Ir" (nr));
@@ -76,7 +71,7 @@ static inline void __set_bit(int nr, vol
  */
 static inline void clear_bit(int nr, volatile unsigned long * addr)
 {
-	__asm__ __volatile__( LOCK_PREFIX
+	__asm__ __volatile__( LOCK
 		"btrl %1,%0"
 		:"+m" (ADDR)
 		:"Ir" (nr));
@@ -121,7 +116,7 @@ static inline void __change_bit(int nr, 
  */
 static inline void change_bit(int nr, volatile unsigned long * addr)
 {
-	__asm__ __volatile__( LOCK_PREFIX
+	__asm__ __volatile__( LOCK
 		"btcl %1,%0"
 		:"+m" (ADDR)
 		:"Ir" (nr));
@@ -140,7 +135,7 @@ static inline int test_and_set_bit(int n
 {
 	int oldbit;
 
-	__asm__ __volatile__( LOCK_PREFIX
+	__asm__ __volatile__( LOCK
 		"btsl %2,%1\n\tsbbl %0,%0"
 		:"=r" (oldbit),"+m" (ADDR)
 		:"Ir" (nr) : "memory");
@@ -180,7 +175,7 @@ static inline int test_and_clear_bit(int
 {
 	int oldbit;
 
-	__asm__ __volatile__( LOCK_PREFIX
+	__asm__ __volatile__( LOCK
 		"btrl %2,%1\n\tsbbl %0,%0"
 		:"=r" (oldbit),"+m" (ADDR)
 		:"Ir" (nr) : "memory");
@@ -231,7 +226,7 @@ static inline int test_and_change_bit(in
 {
 	int oldbit;
 
-	__asm__ __volatile__( LOCK_PREFIX
+	__asm__ __volatile__( LOCK
 		"btcl %2,%1\n\tsbbl %0,%0"
 		:"=r" (oldbit),"+m" (ADDR)
 		:"Ir" (nr) : "memory");
diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/futex.h ./include/asm-i386/futex.h
--- ../orig-linux-2.6.16.29/include/asm-i386/futex.h	2006-09-12 19:02:10.000000000 +0100
+++ ./include/asm-i386/futex.h	2006-09-19 14:05:48.000000000 +0100
@@ -28,7 +28,7 @@
 "1:	movl	%2, %0\n\
 	movl	%0, %3\n"					\
 	insn "\n"						\
-"2:	" LOCK_PREFIX "cmpxchgl %3, %2\n\
+"2:	" LOCK "cmpxchgl %3, %2\n\
 	jnz	1b\n\
 3:	.section .fixup,\"ax\"\n\
 4:	mov	%5, %1\n\
@@ -68,7 +68,7 @@ futex_atomic_op_inuser (int encoded_op, 
 #endif
 		switch (op) {
 		case FUTEX_OP_ADD:
-			__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret,
+			__futex_atomic_op1(LOCK "xaddl %0, %2", ret,
 					   oldval, uaddr, oparg);
 			break;
 		case FUTEX_OP_OR:
diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/rwsem.h ./include/asm-i386/rwsem.h
--- ../orig-linux-2.6.16.29/include/asm-i386/rwsem.h	2006-09-12 19:02:10.000000000 +0100
+++ ./include/asm-i386/rwsem.h	2006-09-19 14:05:48.000000000 +0100
@@ -40,6 +40,7 @@
 
 #include <linux/list.h>
 #include <linux/spinlock.h>
+#include <asm/smp_alt.h>
 
 struct rwsem_waiter;
 
@@ -99,7 +100,7 @@ static inline void __down_read(struct rw
 {
 	__asm__ __volatile__(
 		"# beginning down_read\n\t"
-LOCK_PREFIX	"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
+LOCK	        "  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
 		"  js        2f\n\t" /* jump if we weren't granted the lock */
 		"1:\n\t"
 		LOCK_SECTION_START("")
@@ -130,7 +131,7 @@ static inline int __down_read_trylock(st
 		"  movl	     %1,%2\n\t"
 		"  addl      %3,%2\n\t"
 		"  jle	     2f\n\t"
-LOCK_PREFIX	"  cmpxchgl  %2,%0\n\t"
+LOCK	        "  cmpxchgl  %2,%0\n\t"
 		"  jnz	     1b\n\t"
 		"2:\n\t"
 		"# ending __down_read_trylock\n\t"
@@ -150,7 +151,7 @@ static inline void __down_write(struct r
 	tmp = RWSEM_ACTIVE_WRITE_BIAS;
 	__asm__ __volatile__(
 		"# beginning down_write\n\t"
-LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
+LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
 		"  testl     %%edx,%%edx\n\t" /* was the count 0 before? */
 		"  jnz       2f\n\t" /* jump if we weren't granted the lock */
 		"1:\n\t"
@@ -188,7 +189,7 @@ static inline void __up_read(struct rw_s
 	__s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
 	__asm__ __volatile__(
 		"# beginning __up_read\n\t"
-LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
+LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
 		"  js        2f\n\t" /* jump if the lock is being waited upon */
 		"1:\n\t"
 		LOCK_SECTION_START("")
@@ -214,7 +215,7 @@ static inline void __up_write(struct rw_
 	__asm__ __volatile__(
 		"# beginning __up_write\n\t"
 		"  movl      %2,%%edx\n\t"
-LOCK_PREFIX	"  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
+LOCK	        "  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
 		"  jnz       2f\n\t" /* jump if the lock is being waited upon */
 		"1:\n\t"
 		LOCK_SECTION_START("")
@@ -239,7 +240,7 @@ static inline void __downgrade_write(str
 {
 	__asm__ __volatile__(
 		"# beginning __downgrade_write\n\t"
-LOCK_PREFIX	"  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+LOCK	        "  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
 		"  js        2f\n\t" /* jump if the lock is being waited upon */
 		"1:\n\t"
 		LOCK_SECTION_START("")
@@ -263,7 +264,7 @@ LOCK_PREFIX	"  addl      %2,(%%eax)\n\t"
 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
 {
 	__asm__ __volatile__(
-LOCK_PREFIX	"addl %1,%0"
+LOCK	          "addl %1,%0"
 		: "=m"(sem->count)
 		: "ir"(delta), "m"(sem->count));
 }
@@ -276,7 +277,7 @@ static inline int rwsem_atomic_update(in
 	int tmp = delta;
 
 	__asm__ __volatile__(
-LOCK_PREFIX	"xadd %0,(%2)"
+LOCK  	          "xadd %0,(%2)"
 		: "+r"(tmp), "=m"(sem->count)
 		: "r"(sem), "m"(sem->count)
 		: "memory");
diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/smp_alt.h ./include/asm-i386/smp_alt.h
--- ../orig-linux-2.6.16.29/include/asm-i386/smp_alt.h	1970-01-01 01:00:00.000000000 +0100
+++ ./include/asm-i386/smp_alt.h	2006-09-19 14:05:48.000000000 +0100
@@ -0,0 +1,32 @@
+#ifndef __ASM_SMP_ALT_H__
+#define __ASM_SMP_ALT_H__
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
+#define LOCK \
+        "6677: nop\n" \
+	".section __smp_alternatives,\"a\"\n" \
+	".long 6677b\n" \
+	".long 6678f\n" \
+	".previous\n" \
+	".section __smp_replacements,\"a\"\n" \
+	"6678: .byte 1\n" \
+	".byte 1\n" \
+	".byte 0\n" \
+        ".byte 1\n" \
+	".byte -1\n" \
+	"lock\n" \
+	"nop\n" \
+	".previous\n"
+void prepare_for_smp(void);
+void unprepare_for_smp(void);
+#else
+#define LOCK "lock ; "
+#endif
+#else
+#define LOCK ""
+#endif
+
+#endif /* __ASM_SMP_ALT_H__ */
diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/spinlock.h ./include/asm-i386/spinlock.h
--- ../orig-linux-2.6.16.29/include/asm-i386/spinlock.h	2006-09-12 19:02:10.000000000 +0100
+++ ./include/asm-i386/spinlock.h	2006-09-19 14:05:48.000000000 +0100
@@ -6,6 +6,7 @@
 #include <asm/page.h>
 #include <linux/config.h>
 #include <linux/compiler.h>
+#include <asm/smp_alt.h>
 
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -23,7 +24,8 @@
 
 #define __raw_spin_lock_string \
 	"\n1:\t" \
-	"lock ; decb %0\n\t" \
+	LOCK \
+	"decb %0\n\t" \
 	"jns 3f\n" \
 	"2:\t" \
 	"rep;nop\n\t" \
@@ -34,7 +36,8 @@
 
 #define __raw_spin_lock_string_flags \
 	"\n1:\t" \
-	"lock ; decb %0\n\t" \
+	LOCK \
+	"decb %0\n\t" \
 	"jns 4f\n\t" \
 	"2:\t" \
 	"testl $0x200, %1\n\t" \
@@ -65,10 +68,34 @@ static inline void __raw_spin_lock_flags
 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
 	char oldval;
+#ifdef CONFIG_SMP_ALTERNATIVES
 	__asm__ __volatile__(
-		"xchgb %b0,%1"
+		"1:movb %1,%b0\n"
+		"movb $0,%1\n"
+		"2:"
+		".section __smp_alternatives,\"a\"\n"
+		".long 1b\n"
+		".long 3f\n"
+		".previous\n"
+		".section __smp_replacements,\"a\"\n"
+		"3: .byte 2b - 1b\n"
+		".byte 5f-4f\n"
+		".byte 0\n"
+		".byte 6f-5f\n"
+		".byte -1\n"
+		"4: xchgb %b0,%1\n"
+		"5: movb %1,%b0\n"
+		"movb $0,%1\n"
+		"6:\n"
+		".previous\n"
 		:"=q" (oldval), "=m" (lock->slock)
 		:"0" (0) : "memory");
+#else
+	__asm__ __volatile__(
+		"xchgb %b0,%1\n"
+		:"=q" (oldval), "=m" (lock->slock)
+		:"0" (0) : "memory");
+#endif
 	return oldval > 0;
 }
 
@@ -178,12 +205,12 @@ static inline int __raw_write_trylock(ra
 
 static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
-	asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+	asm volatile(LOCK "incl %0" :"=m" (rw->lock) : : "memory");
 }
 
 static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
-	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
+	asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ", %0"
 				 : "=m" (rw->lock) : : "memory");
 }
 
diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/system.h ./include/asm-i386/system.h
--- ../orig-linux-2.6.16.29/include/asm-i386/system.h	2006-09-12 19:02:10.000000000 +0100
+++ ./include/asm-i386/system.h	2006-09-19 14:05:48.000000000 +0100
@@ -5,7 +5,7 @@
 #include <linux/kernel.h>
 #include <asm/segment.h>
 #include <asm/cpufeature.h>
-#include <linux/bitops.h> /* for LOCK_PREFIX */
+#include <asm/smp_alt.h>
 
 #ifdef __KERNEL__
 
@@ -271,19 +271,19 @@ static inline unsigned long __cmpxchg(vo
 	unsigned long prev;
 	switch (size) {
 	case 1:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+		__asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
 				     : "=a"(prev)
 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
 				     : "memory");
 		return prev;
 	case 2:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+		__asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
 				     : "=a"(prev)
 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
 				     : "memory");
 		return prev;
 	case 4:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+		__asm__ __volatile__(LOCK "cmpxchgl %1,%2"
 				     : "=a"(prev)
 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
 				     : "memory");
@@ -336,7 +336,7 @@ static inline unsigned long long __cmpxc
 				      unsigned long long new)
 {
 	unsigned long long prev;
-	__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
+	__asm__ __volatile__(LOCK "cmpxchg8b %3"
 			     : "=A"(prev)
 			     : "b"((unsigned long)new),
 			       "c"((unsigned long)(new >> 32)),
@@ -503,11 +503,55 @@ struct alt_instr { 
 #endif
 
 #ifdef CONFIG_SMP
+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
+#define smp_alt_mb(instr)                                           \
+__asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
+		     ".section __smp_alternatives,\"a\"\n"          \
+		     ".long 6667b\n"                                \
+                     ".long 6673f\n"                                \
+		     ".previous\n"                                  \
+		     ".section __smp_replacements,\"a\"\n"          \
+		     "6673:.byte 6668b-6667b\n"                     \
+		     ".byte 6670f-6669f\n"                          \
+		     ".byte 6671f-6670f\n"                          \
+                     ".byte 0\n"                                    \
+		     ".byte %c0\n"                                  \
+		     "6669:lock;addl $0,0(%%esp)\n"                 \
+		     "6670:" instr "\n"                             \
+		     "6671:\n"                                      \
+		     ".previous\n"                                  \
+		     :                                              \
+		     : "i" (X86_FEATURE_XMM2)                       \
+		     : "memory")
+#define smp_rmb() smp_alt_mb("lfence")
+#define smp_mb()  smp_alt_mb("mfence")
+#define set_mb(var, value) do {                                     \
+unsigned long __set_mb_temp;                                        \
+__asm__ __volatile__("6667:movl %1, %0\n6668:\n"                    \
+		     ".section __smp_alternatives,\"a\"\n"          \
+		     ".long 6667b\n"                                \
+		     ".long 6673f\n"                                \
+		     ".previous\n"                                  \
+		     ".section __smp_replacements,\"a\"\n"          \
+		     "6673: .byte 6668b-6667b\n"                    \
+		     ".byte 6670f-6669f\n"                          \
+		     ".byte 0\n"                                    \
+		     ".byte 6671f-6670f\n"                          \
+		     ".byte -1\n"                                   \
+		     "6669: xchg %1, %0\n"                          \
+		     "6670:movl %1, %0\n"                           \
+		     "6671:\n"                                      \
+		     ".previous\n"                                  \
+		     : "=m" (var), "=r" (__set_mb_temp)             \
+		     : "1" (value)                                  \
+		     : "memory"); } while (0)
+#else
 #define smp_mb()	mb()
 #define smp_rmb()	rmb()
+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+#endif
 #define smp_wmb()	wmb()
 #define smp_read_barrier_depends()	read_barrier_depends()
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
 #else
 #define smp_mb()	barrier()
 #define smp_rmb()	barrier()