summaryrefslogtreecommitdiffstats
path: root/tboot/include/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'tboot/include/atomic.h')
-rw-r--r--tboot/include/atomic.h181
1 files changed, 181 insertions, 0 deletions
diff --git a/tboot/include/atomic.h b/tboot/include/atomic.h
new file mode 100644
index 0000000..502cc36
--- /dev/null
+++ b/tboot/include/atomic.h
@@ -0,0 +1,181 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/i386/include/atomic.h,v 1.47.2.2.2.1 2010/02/10 00:26:20 kensmith Exp $
+ */
+/*
+ * Portions copyright (c) 2010, Intel Corporation
+ */
+
+#ifndef __ATOMIC_H__
+#define __ATOMIC_H__
+
+/*
+ * Various simple operations on memory, each of which is atomic in the
+ * presence of interrupts and multiple processors.
+ *
+ * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
+ * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
+ * atomic_add_char(P, V) (*(u_char *)(P) += (V))
+ * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
+ *
+ * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
+ * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
+ * atomic_add_short(P, V) (*(u_short *)(P) += (V))
+ * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
+ *
+ * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
+ * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
+ * atomic_add_int(P, V) (*(u_int *)(P) += (V))
+ * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
+ * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
+ *
+ * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
+ * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
+ * atomic_add_long(P, V) (*(u_long *)(P) += (V))
+ * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
+ * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
+ */
+
+/*
+ * The above functions are expanded inline in the statically-linked
+ * kernel. Lock prefixes are generated if an SMP kernel is being
+ * built.
+ *
+ * Kernel modules call real functions which are built into the kernel.
+ * This allows kernel modules to be portable between UP and SMP systems.
+ */
+
+/* all operations will be defined only for 'int's */
+#define atomic_t u_int
+
+#define MPLOCKED "lock ; "
+
+/*
+ * The assembly is volatilized to avoid code chunk removal by the compiler.
+ * GCC aggressively reorders operations and memory clobbering is necessary
+ * in order to avoid that for memory barriers.
+ */
+#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
+static __inline void \
+atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ __asm __volatile(MPLOCKED OP \
+ : "=m" (*p) \
+ : CONS (V), "m" (*p)); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ __asm __volatile(MPLOCKED OP \
+ : "=m" (*p) \
+ : CONS (V), "m" (*p) \
+ : "memory"); \
+} \
+struct __hack
+
+/*
+ * Atomically add the value of v to the integer pointed to by p and return
+ * the previous value of *p.
+ */
+static __inline u_int
+atomic_fetchadd_int(volatile u_int *p, u_int v)
+{
+
+ __asm __volatile(
+ " " MPLOCKED " "
+ " xaddl %0, %1 ; "
+ "# atomic_fetchadd_int"
+ : "+r" (v), /* 0 (result) */
+ "=m" (*p) /* 1 */
+ : "m" (*p)); /* 2 */
+
+ return (v);
+}
+
+#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
+static __inline u_##TYPE \
+atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
+{ \
+ u_##TYPE res; \
+ \
+ __asm __volatile(MPLOCKED LOP \
+ : "=a" (res), /* 0 */ \
+ "=m" (*p) /* 1 */ \
+ : "m" (*p) /* 2 */ \
+ : "memory"); \
+ \
+ return (res); \
+} \
+ \
+/* \
+ * The XCHG instruction asserts LOCK automagically. \
+ */ \
+static __inline void \
+atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ __asm __volatile(SOP \
+ : "=m" (*p), /* 0 */ \
+ "+r" (v) /* 1 */ \
+ : "m" (*p) /* 2 */ \
+ : "memory"); \
+} \
+struct __hack
+
+ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
+ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
+ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
+ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
+
+ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0");
+
+#undef ATOMIC_ASM
+#undef ATOMIC_STORE_LOAD
+
+/* Read the current value and store a zero in the destination. */
+static __inline u_int
+atomic_readandclear_int(volatile u_int *addr)
+{
+ u_int res;
+
+ res = 0;
+ __asm __volatile(
+ " xchgl %1,%0 ; "
+ "# atomic_readandclear_int"
+ : "+r" (res), /* 0 */
+ "=m" (*addr) /* 1 */
+ : "m" (*addr));
+
+ return (res);
+}
+
+
+#define atomic_read(atom) atomic_load_acq_int(atom)
+#define atomic_inc(atom) atomic_add_int((atom), 1)
+#define atomic_dec(atom) atomic_subtract_int((atom), 1)
+#define atomic_set(atom, val) atomic_set_int((atom), (val))
+
+#endif /* __ATOMIC_H__ */