aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/sort.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-12-24 08:46:46 +0000
committerKeir Fraser <keir@xen.org>2010-12-24 08:46:46 +0000
commitabf60d361232f53ad3c51fab74ff3b6f084d7f8e (patch)
tree04a72e02795a011a2ecb865dea94cc1704d6bab5 /xen/common/sort.c
parent98592ae8b75bb54243ebe54d1300cb0e745fbe85 (diff)
downloadxen-abf60d361232f53ad3c51fab74ff3b6f084d7f8e.tar.gz
xen-abf60d361232f53ad3c51fab74ff3b6f084d7f8e.tar.bz2
xen-abf60d361232f53ad3c51fab74ff3b6f084d7f8e.zip
make sort() generally available
Rather than having this general library function only on ia64, move it into common code, to be used by x86 exception table sorting too. Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/common/sort.c')
-rw-r--r--xen/common/sort.c82
1 files changed, 82 insertions, 0 deletions
diff --git a/xen/common/sort.c b/xen/common/sort.c
new file mode 100644
index 0000000000..d96fc2af18
--- /dev/null
+++ b/xen/common/sort.c
@@ -0,0 +1,82 @@
+/*
+ * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
+ *
+ * Jan 23 2005 Matt Mackall <mpm@selenic.com>
+ */
+
+#include <xen/types.h>
+
+static void u32_swap(void *a, void *b, int size)
+{
+ u32 t = *(u32 *)a;
+ *(u32 *)a = *(u32 *)b;
+ *(u32 *)b = t;
+}
+
+static void generic_swap(void *a, void *b, int size)
+{
+ char t;
+
+ do {
+ t = *(char *)a;
+ *(char *)a++ = *(char *)b;
+ *(char *)b++ = t;
+ } while ( --size > 0 );
+}
+
+/*
+ * sort - sort an array of elements
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp: pointer to comparison function
+ * @swap: pointer to swap function or NULL
+ *
+ * This function does a heapsort on the given array. You may provide a
+ * swap function optimized to your element type.
+ *
+ * Sorting time is O(n log n) both on average and worst-case. While
+ * qsort is about 20% faster on average, it suffers from exploitable
+ * O(n*n) worst-case behavior and extra memory requirements that make
+ * it less suitable for kernel use.
+ */
+
+void sort(void *base, size_t num, size_t size,
+ int (*cmp)(const void *, const void *),
+ void (*swap)(void *, void *, int size))
+{
+ /* pre-scale counters for performance */
+ int i = (num/2) * size, n = num * size, c, r;
+
+ if (!swap)
+ swap = (size == 4 ? u32_swap : generic_swap);
+
+ /* heapify */
+ for ( ; i >= 0; i -= size )
+ {
+ for ( r = i; r * 2 < n; r = c )
+ {
+ c = r * 2;
+ if ( (c < n - size) && (cmp(base + c, base + c + size) < 0) )
+ c += size;
+ if ( cmp(base + r, base + c) >= 0 )
+ break;
+ swap(base + r, base + c, size);
+ }
+ }
+
+ /* sort */
+ for ( i = n - size; i >= 0; i -= size )
+ {
+ swap(base, base + i, size);
+ for ( r = 0; r * 2 < i; r = c )
+ {
+ c = r * 2;
+ if ( (c < i - size) && (cmp(base + c, base + c + size) < 0) )
+ c += size;
+ if ( cmp(base + r, base + c) >= 0 )
+ break;
+ swap(base + r, base + c, size);
+ }
+ }
+}