From 00dd85d8f61db55d6e08d224ed30f077075e9766 Mon Sep 17 00:00:00 2001
From: Hauke Mehrtens <hauke@hauke-m.de>
Date: Sun, 24 Jul 2011 14:17:58 +0000
Subject: ocf-linux: version bump to 20110720

Fixes problem with TFM allocation in cryptosoft.c

Signed-off-by: Philip Prindeville <philipp@redfish-solutions.com>

Hauke:
 * remove ubsec_ssb package and take it from ocf-linux
 * use patches from ocf-linux package
 * refresh all patches
 * readd some build fixes for OpenWrt.
 * readd CRYPTO_MANAGER dependency

SVN-Revision: 27753
---
 .../generic/files/crypto/ocf/ubsec_ssb/Makefile    |   12 +
 .../generic/files/crypto/ocf/ubsec_ssb/bsdqueue.h  |  527 +++++
 .../generic/files/crypto/ocf/ubsec_ssb/ubsec_ssb.c | 2220 ++++++++++++++++++++
 .../generic/files/crypto/ocf/ubsec_ssb/ubsecreg.h  |  233 ++
 .../generic/files/crypto/ocf/ubsec_ssb/ubsecvar.h  |  228 ++
 5 files changed, 3220 insertions(+)
 create mode 100644 target/linux/generic/files/crypto/ocf/ubsec_ssb/Makefile
 create mode 100644 target/linux/generic/files/crypto/ocf/ubsec_ssb/bsdqueue.h
 create mode 100644 target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsec_ssb.c
 create mode 100644 target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecreg.h
 create mode 100644 target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecvar.h

(limited to 'target/linux/generic/files/crypto/ocf/ubsec_ssb')

diff --git a/target/linux/generic/files/crypto/ocf/ubsec_ssb/Makefile b/target/linux/generic/files/crypto/ocf/ubsec_ssb/Makefile
new file mode 100644
index 0000000000..f973efd7b0
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ubsec_ssb/Makefile
@@ -0,0 +1,12 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_UBSEC_SSB) += ubsec_ssb.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/ubsec_ssb/bsdqueue.h b/target/linux/generic/files/crypto/ocf/ubsec_ssb/bsdqueue.h
new file mode 100644
index 0000000000..601055267d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ubsec_ssb/bsdqueue.h
@@ -0,0 +1,527 @@
+/*  $OpenBSD: queue.h,v 1.32 2007/04/30 18:42:34 pedro Exp $    */
+/*  $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $   */
+
+/*
+ * Copyright (c) 1991, 1993
+ *  The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *  @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef _BSD_SYS_QUEUE_H_
+#define _BSD_SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists, 
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction.  Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(QUEUE_MACRO_DEBUG) || (defined(_KERNEL) && defined(DIAGNOSTIC))
+#define _Q_INVALIDATE(a) (a) = ((void *)-1)
+#else
+#define _Q_INVALIDATE(a)
+#endif
+
+/*
+ * Singly-linked List definitions.
+ */
+#define BSD_SLIST_HEAD(name, type)                      \
+struct name {                               \
+    struct type *slh_first; /* first element */         \
+}
+ 
+#define BSD_SLIST_HEAD_INITIALIZER(head)                    \
+    { NULL }
+ 
+#define BSD_SLIST_ENTRY(type)                       \
+struct {                                \
+    struct type *sle_next;  /* next element */          \
+}
+ 
+/*
+ * Singly-linked List access methods.
+ */
+#define BSD_SLIST_FIRST(head)   ((head)->slh_first)
+#define BSD_SLIST_END(head)     NULL
+#define BSD_SLIST_EMPTY(head)   (BSD_SLIST_FIRST(head) == BSD_SLIST_END(head))
+#define BSD_SLIST_NEXT(elm, field)  ((elm)->field.sle_next)
+
+#define BSD_SLIST_FOREACH(var, head, field)                 \
+    for((var) = BSD_SLIST_FIRST(head);                  \
+        (var) != BSD_SLIST_END(head);                   \
+        (var) = BSD_SLIST_NEXT(var, field))
+
+#define BSD_SLIST_FOREACH_PREVPTR(var, varp, head, field)           \
+    for ((varp) = &BSD_SLIST_FIRST((head));             \
+        ((var) = *(varp)) != BSD_SLIST_END(head);           \
+        (varp) = &BSD_SLIST_NEXT((var), field))
+
+/*
+ * Singly-linked List functions.
+ */
+#define BSD_SLIST_INIT(head) {                      \
+    BSD_SLIST_FIRST(head) = BSD_SLIST_END(head);                \
+}
+
+#define BSD_SLIST_INSERT_AFTER(slistelm, elm, field) do {           \
+    (elm)->field.sle_next = (slistelm)->field.sle_next;     \
+    (slistelm)->field.sle_next = (elm);             \
+} while (0)
+
+#define BSD_SLIST_INSERT_HEAD(head, elm, field) do {            \
+    (elm)->field.sle_next = (head)->slh_first;          \
+    (head)->slh_first = (elm);                  \
+} while (0)
+
+#define BSD_SLIST_REMOVE_NEXT(head, elm, field) do {            \
+    (elm)->field.sle_next = (elm)->field.sle_next->field.sle_next;  \
+} while (0)
+
+#define BSD_SLIST_REMOVE_HEAD(head, field) do {             \
+    (head)->slh_first = (head)->slh_first->field.sle_next;      \
+} while (0)
+
+#define BSD_SLIST_REMOVE(head, elm, type, field) do {           \
+    if ((head)->slh_first == (elm)) {               \
+        BSD_SLIST_REMOVE_HEAD((head), field);           \
+    } else {                            \
+        struct type *curelm = (head)->slh_first;        \
+                                    \
+        while (curelm->field.sle_next != (elm))         \
+            curelm = curelm->field.sle_next;        \
+        curelm->field.sle_next =                \
+            curelm->field.sle_next->field.sle_next;     \
+        _Q_INVALIDATE((elm)->field.sle_next);           \
+    }                               \
+} while (0)
+
+/*
+ * List definitions.
+ */
+#define BSD_LIST_HEAD(name, type)                       \
+struct name {                               \
+    struct type *lh_first;  /* first element */         \
+}
+
+#define BSD_LIST_HEAD_INITIALIZER(head)                 \
+    { NULL }
+
+#define BSD_LIST_ENTRY(type)                        \
+struct {                                \
+    struct type *le_next;   /* next element */          \
+    struct type **le_prev;  /* address of previous next element */  \
+}
+
+/*
+ * List access methods
+ */
+#define BSD_LIST_FIRST(head)        ((head)->lh_first)
+#define BSD_LIST_END(head)          NULL
+#define BSD_LIST_EMPTY(head)        (BSD_LIST_FIRST(head) == BSD_LIST_END(head))
+#define BSD_LIST_NEXT(elm, field)       ((elm)->field.le_next)
+
+#define BSD_LIST_FOREACH(var, head, field)                  \
+    for((var) = BSD_LIST_FIRST(head);                   \
+        (var)!= BSD_LIST_END(head);                 \
+        (var) = BSD_LIST_NEXT(var, field))
+
+/*
+ * List functions.
+ */
+#define BSD_LIST_INIT(head) do {                        \
+    BSD_LIST_FIRST(head) = BSD_LIST_END(head);              \
+} while (0)
+
+#define BSD_LIST_INSERT_AFTER(listelm, elm, field) do {         \
+    if (((elm)->field.le_next = (listelm)->field.le_next) != NULL)  \
+        (listelm)->field.le_next->field.le_prev =       \
+            &(elm)->field.le_next;              \
+    (listelm)->field.le_next = (elm);               \
+    (elm)->field.le_prev = &(listelm)->field.le_next;       \
+} while (0)
+
+#define BSD_LIST_INSERT_BEFORE(listelm, elm, field) do {            \
+    (elm)->field.le_prev = (listelm)->field.le_prev;        \
+    (elm)->field.le_next = (listelm);               \
+    *(listelm)->field.le_prev = (elm);              \
+    (listelm)->field.le_prev = &(elm)->field.le_next;       \
+} while (0)
+
+#define BSD_LIST_INSERT_HEAD(head, elm, field) do {             \
+    if (((elm)->field.le_next = (head)->lh_first) != NULL)      \
+        (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+    (head)->lh_first = (elm);                   \
+    (elm)->field.le_prev = &(head)->lh_first;           \
+} while (0)
+
+#define BSD_LIST_REMOVE(elm, field) do {                    \
+    if ((elm)->field.le_next != NULL)               \
+        (elm)->field.le_next->field.le_prev =           \
+            (elm)->field.le_prev;               \
+    *(elm)->field.le_prev = (elm)->field.le_next;           \
+    _Q_INVALIDATE((elm)->field.le_prev);                \
+    _Q_INVALIDATE((elm)->field.le_next);                \
+} while (0)
+
+#define BSD_LIST_REPLACE(elm, elm2, field) do {             \
+    if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
+        (elm2)->field.le_next->field.le_prev =          \
+            &(elm2)->field.le_next;             \
+    (elm2)->field.le_prev = (elm)->field.le_prev;           \
+    *(elm2)->field.le_prev = (elm2);                \
+    _Q_INVALIDATE((elm)->field.le_prev);                \
+    _Q_INVALIDATE((elm)->field.le_next);                \
+} while (0)
+
+/*
+ * Simple queue definitions.
+ */
+#define BSD_SIMPLEQ_HEAD(name, type)                    \
+struct name {                               \
+    struct type *sqh_first; /* first element */         \
+    struct type **sqh_last; /* addr of last next element */     \
+}
+
+#define BSD_SIMPLEQ_HEAD_INITIALIZER(head)                  \
+    { NULL, &(head).sqh_first }
+
+#define BSD_SIMPLEQ_ENTRY(type)                     \
+struct {                                \
+    struct type *sqe_next;  /* next element */          \
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define BSD_SIMPLEQ_FIRST(head)     ((head)->sqh_first)
+#define BSD_SIMPLEQ_END(head)       NULL
+#define BSD_SIMPLEQ_EMPTY(head)     (BSD_SIMPLEQ_FIRST(head) == BSD_SIMPLEQ_END(head))
+#define BSD_SIMPLEQ_NEXT(elm, field)    ((elm)->field.sqe_next)
+
+#define BSD_SIMPLEQ_FOREACH(var, head, field)               \
+    for((var) = BSD_SIMPLEQ_FIRST(head);                \
+        (var) != BSD_SIMPLEQ_END(head);                 \
+        (var) = BSD_SIMPLEQ_NEXT(var, field))
+
+/*
+ * Simple queue functions.
+ */
+#define BSD_SIMPLEQ_INIT(head) do {                     \
+    (head)->sqh_first = NULL;                   \
+    (head)->sqh_last = &(head)->sqh_first;              \
+} while (0)
+
+#define BSD_SIMPLEQ_INSERT_HEAD(head, elm, field) do {          \
+    if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)    \
+        (head)->sqh_last = &(elm)->field.sqe_next;      \
+    (head)->sqh_first = (elm);                  \
+} while (0)
+
+#define BSD_SIMPLEQ_INSERT_TAIL(head, elm, field) do {          \
+    (elm)->field.sqe_next = NULL;                   \
+    *(head)->sqh_last = (elm);                  \
+    (head)->sqh_last = &(elm)->field.sqe_next;          \
+} while (0)
+
+#define BSD_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {        \
+    if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+        (head)->sqh_last = &(elm)->field.sqe_next;      \
+    (listelm)->field.sqe_next = (elm);              \
+} while (0)
+
+#define BSD_SIMPLEQ_REMOVE_HEAD(head, field) do {           \
+    if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
+        (head)->sqh_last = &(head)->sqh_first;          \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define BSD_TAILQ_HEAD(name, type)                      \
+struct name {                               \
+    struct type *tqh_first; /* first element */         \
+    struct type **tqh_last; /* addr of last next element */     \
+}
+
+#define BSD_TAILQ_HEAD_INITIALIZER(head)                    \
+    { NULL, &(head).tqh_first }
+
+#define BSD_TAILQ_ENTRY(type)                       \
+struct {                                \
+    struct type *tqe_next;  /* next element */          \
+    struct type **tqe_prev; /* address of previous next element */  \
+}
+
+/* 
+ * tail queue access methods 
+ */
+#define BSD_TAILQ_FIRST(head)       ((head)->tqh_first)
+#define BSD_TAILQ_END(head)         NULL
+#define BSD_TAILQ_NEXT(elm, field)      ((elm)->field.tqe_next)
+#define BSD_TAILQ_LAST(head, headname)                  \
+    (*(((struct headname *)((head)->tqh_last))->tqh_last))
+/* XXX */
+#define BSD_TAILQ_PREV(elm, headname, field)                \
+    (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define BSD_TAILQ_EMPTY(head)                       \
+    (BSD_TAILQ_FIRST(head) == BSD_TAILQ_END(head))
+
+#define BSD_TAILQ_FOREACH(var, head, field)                 \
+    for((var) = BSD_TAILQ_FIRST(head);                  \
+        (var) != BSD_TAILQ_END(head);                   \
+        (var) = BSD_TAILQ_NEXT(var, field))
+
+#define BSD_TAILQ_FOREACH_REVERSE(var, head, headname, field)       \
+    for((var) = BSD_TAILQ_LAST(head, headname);             \
+        (var) != BSD_TAILQ_END(head);                   \
+        (var) = BSD_TAILQ_PREV(var, headname, field))
+
+/*
+ * Tail queue functions.
+ */
+#define BSD_TAILQ_INIT(head) do {                       \
+    (head)->tqh_first = NULL;                   \
+    (head)->tqh_last = &(head)->tqh_first;              \
+} while (0)
+
+#define BSD_TAILQ_INSERT_HEAD(head, elm, field) do {            \
+    if (((elm)->field.tqe_next = (head)->tqh_first) != NULL)    \
+        (head)->tqh_first->field.tqe_prev =         \
+            &(elm)->field.tqe_next;             \
+    else                                \
+        (head)->tqh_last = &(elm)->field.tqe_next;      \
+    (head)->tqh_first = (elm);                  \
+    (elm)->field.tqe_prev = &(head)->tqh_first;         \
+} while (0)
+
+#define BSD_TAILQ_INSERT_TAIL(head, elm, field) do {            \
+    (elm)->field.tqe_next = NULL;                   \
+    (elm)->field.tqe_prev = (head)->tqh_last;           \
+    *(head)->tqh_last = (elm);                  \
+    (head)->tqh_last = &(elm)->field.tqe_next;          \
+} while (0)
+
+#define BSD_TAILQ_INSERT_AFTER(head, listelm, elm, field) do {      \
+    if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+        (elm)->field.tqe_next->field.tqe_prev =         \
+            &(elm)->field.tqe_next;             \
+    else                                \
+        (head)->tqh_last = &(elm)->field.tqe_next;      \
+    (listelm)->field.tqe_next = (elm);              \
+    (elm)->field.tqe_prev = &(listelm)->field.tqe_next;     \
+} while (0)
+
+#define BSD_TAILQ_INSERT_BEFORE(listelm, elm, field) do {           \
+    (elm)->field.tqe_prev = (listelm)->field.tqe_prev;      \
+    (elm)->field.tqe_next = (listelm);              \
+    *(listelm)->field.tqe_prev = (elm);             \
+    (listelm)->field.tqe_prev = &(elm)->field.tqe_next;     \
+} while (0)
+
+#define BSD_TAILQ_REMOVE(head, elm, field) do {             \
+    if (((elm)->field.tqe_next) != NULL)                \
+        (elm)->field.tqe_next->field.tqe_prev =         \
+            (elm)->field.tqe_prev;              \
+    else                                \
+        (head)->tqh_last = (elm)->field.tqe_prev;       \
+    *(elm)->field.tqe_prev = (elm)->field.tqe_next;         \
+    _Q_INVALIDATE((elm)->field.tqe_prev);               \
+    _Q_INVALIDATE((elm)->field.tqe_next);               \
+} while (0)
+
+#define BSD_TAILQ_REPLACE(head, elm, elm2, field) do {          \
+    if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL)   \
+        (elm2)->field.tqe_next->field.tqe_prev =        \
+            &(elm2)->field.tqe_next;                \
+    else                                \
+        (head)->tqh_last = &(elm2)->field.tqe_next;     \
+    (elm2)->field.tqe_prev = (elm)->field.tqe_prev;         \
+    *(elm2)->field.tqe_prev = (elm2);               \
+    _Q_INVALIDATE((elm)->field.tqe_prev);               \
+    _Q_INVALIDATE((elm)->field.tqe_next);               \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define BSD_CIRCLEQ_HEAD(name, type)                    \
+struct name {                               \
+    struct type *cqh_first;     /* first element */     \
+    struct type *cqh_last;      /* last element */      \
+}
+
+#define BSD_CIRCLEQ_HEAD_INITIALIZER(head)                  \
+    { BSD_CIRCLEQ_END(&head), BSD_CIRCLEQ_END(&head) }
+
+#define BSD_CIRCLEQ_ENTRY(type)                     \
+struct {                                \
+    struct type *cqe_next;      /* next element */      \
+    struct type *cqe_prev;      /* previous element */      \
+}
+
+/*
+ * Circular queue access methods 
+ */
+#define BSD_CIRCLEQ_FIRST(head)     ((head)->cqh_first)
+#define BSD_CIRCLEQ_LAST(head)      ((head)->cqh_last)
+#define BSD_CIRCLEQ_END(head)       ((void *)(head))
+#define BSD_CIRCLEQ_NEXT(elm, field)    ((elm)->field.cqe_next)
+#define BSD_CIRCLEQ_PREV(elm, field)    ((elm)->field.cqe_prev)
+#define BSD_CIRCLEQ_EMPTY(head)                     \
+    (BSD_CIRCLEQ_FIRST(head) == BSD_CIRCLEQ_END(head))
+
+#define BSD_CIRCLEQ_FOREACH(var, head, field)               \
+    for((var) = BSD_CIRCLEQ_FIRST(head);                \
+        (var) != BSD_CIRCLEQ_END(head);                 \
+        (var) = BSD_CIRCLEQ_NEXT(var, field))
+
+#define BSD_CIRCLEQ_FOREACH_REVERSE(var, head, field)           \
+    for((var) = BSD_CIRCLEQ_LAST(head);                 \
+        (var) != BSD_CIRCLEQ_END(head);                 \
+        (var) = BSD_CIRCLEQ_PREV(var, field))
+
+/*
+ * Circular queue functions.
+ */
+#define BSD_CIRCLEQ_INIT(head) do {                     \
+    (head)->cqh_first = BSD_CIRCLEQ_END(head);              \
+    (head)->cqh_last = BSD_CIRCLEQ_END(head);               \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {        \
+    (elm)->field.cqe_next = (listelm)->field.cqe_next;      \
+    (elm)->field.cqe_prev = (listelm);              \
+    if ((listelm)->field.cqe_next == BSD_CIRCLEQ_END(head))     \
+        (head)->cqh_last = (elm);               \
+    else                                \
+        (listelm)->field.cqe_next->field.cqe_prev = (elm);  \
+    (listelm)->field.cqe_next = (elm);              \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {       \
+    (elm)->field.cqe_next = (listelm);              \
+    (elm)->field.cqe_prev = (listelm)->field.cqe_prev;      \
+    if ((listelm)->field.cqe_prev == BSD_CIRCLEQ_END(head))     \
+        (head)->cqh_first = (elm);              \
+    else                                \
+        (listelm)->field.cqe_prev->field.cqe_next = (elm);  \
+    (listelm)->field.cqe_prev = (elm);              \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_HEAD(head, elm, field) do {          \
+    (elm)->field.cqe_next = (head)->cqh_first;          \
+    (elm)->field.cqe_prev = BSD_CIRCLEQ_END(head);          \
+    if ((head)->cqh_last == BSD_CIRCLEQ_END(head))          \
+        (head)->cqh_last = (elm);               \
+    else                                \
+        (head)->cqh_first->field.cqe_prev = (elm);      \
+    (head)->cqh_first = (elm);                  \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_TAIL(head, elm, field) do {          \
+    (elm)->field.cqe_next = BSD_CIRCLEQ_END(head);          \
+    (elm)->field.cqe_prev = (head)->cqh_last;           \
+    if ((head)->cqh_first == BSD_CIRCLEQ_END(head))         \
+        (head)->cqh_first = (elm);              \
+    else                                \
+        (head)->cqh_last->field.cqe_next = (elm);       \
+    (head)->cqh_last = (elm);                   \
+} while (0)
+
+#define BSD_CIRCLEQ_REMOVE(head, elm, field) do {               \
+    if ((elm)->field.cqe_next == BSD_CIRCLEQ_END(head))         \
+        (head)->cqh_last = (elm)->field.cqe_prev;       \
+    else                                \
+        (elm)->field.cqe_next->field.cqe_prev =         \
+            (elm)->field.cqe_prev;              \
+    if ((elm)->field.cqe_prev == BSD_CIRCLEQ_END(head))         \
+        (head)->cqh_first = (elm)->field.cqe_next;      \
+    else                                \
+        (elm)->field.cqe_prev->field.cqe_next =         \
+            (elm)->field.cqe_next;              \
+    _Q_INVALIDATE((elm)->field.cqe_prev);               \
+    _Q_INVALIDATE((elm)->field.cqe_next);               \
+} while (0)
+
+#define BSD_CIRCLEQ_REPLACE(head, elm, elm2, field) do {            \
+    if (((elm2)->field.cqe_next = (elm)->field.cqe_next) ==     \
+        BSD_CIRCLEQ_END(head))                      \
+        (head).cqh_last = (elm2);               \
+    else                                \
+        (elm2)->field.cqe_next->field.cqe_prev = (elm2);    \
+    if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) ==     \
+        BSD_CIRCLEQ_END(head))                      \
+        (head).cqh_first = (elm2);              \
+    else                                \
+        (elm2)->field.cqe_prev->field.cqe_next = (elm2);    \
+    _Q_INVALIDATE((elm)->field.cqe_prev);               \
+    _Q_INVALIDATE((elm)->field.cqe_next);               \
+} while (0)
+
+#endif  /* !_BSD_SYS_QUEUE_H_ */
diff --git a/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsec_ssb.c b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsec_ssb.c
new file mode 100644
index 0000000000..aa0733f08b
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsec_ssb.c
@@ -0,0 +1,2220 @@
+
+/*
+ * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
+ * Copyright (c) 2007 David McCullough (david_mccullough@securecomputing.com)
+ * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
+ * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org)
+ * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+#undef UBSEC_DEBUG
+#undef UBSEC_VERBOSE_DEBUG
+
+#ifdef UBSEC_VERBOSE_DEBUG
+#define UBSEC_DEBUG
+#endif
+
+/*
+ * uBsec BCM5365 hardware crypto accelerator
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/stat.h>
+#include <asm/io.h>
+
+#include <linux/ssb/ssb.h>
+
+/*
+ * BSD queue
+ */
+//#include "bsdqueue.h"
+
+/* 
+ * OCF
+ */
+#include <cryptodev.h>
+#include <uio.h>
+
+#define HMAC_HACK 1
+
+#define HMAC_HACK 1
+#ifdef HMAC_HACK
+#include <safe/hmachack.h>
+#include <safe/md5.h>
+#include <safe/md5.c>
+#include <safe/sha1.h>
+#include <safe/sha1.c>
+#endif
+
+#include "bsdqueue.h"
+#include "ubsecreg.h"
+#include "ubsecvar.h"
+
+#define DRV_MODULE_NAME     "ubsec_ssb"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION  "0.02"
+#define DRV_MODULE_RELDATE  "Feb 21, 2009"
+
+#if 1
+#define DPRINTF(a...) \
+    if (debug) \
+    { \
+        printk(DRV_MODULE_NAME ": " a); \
+    }
+#else
+#define DPRINTF(a...)
+#endif
+
+/*
+ * Prototypes 
+ */
+static irqreturn_t ubsec_ssb_isr(int, void *, struct pt_regs *);
+static int __devinit ubsec_ssb_probe(struct ssb_device *sdev,
+    const struct ssb_device_id *ent);
+static void __devexit ubsec_ssb_remove(struct ssb_device *sdev);
+int ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent, 
+    struct device *self);
+static void ubsec_setup_mackey(struct ubsec_session *ses, int algo, 
+    caddr_t key, int klen);
+static int dma_map_skb(struct ubsec_softc *sc, 
+    struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen);
+static int dma_map_uio(struct ubsec_softc *sc, 
+    struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen);
+static void dma_unmap(struct ubsec_softc *sc, 
+    struct ubsec_dma_alloc *q_map, int mlen);
+static int ubsec_dmamap_aligned(struct ubsec_softc *sc, 
+    const struct ubsec_dma_alloc *q_map, int mlen);
+
+#ifdef UBSEC_DEBUG
+static int proc_read(char *buf, char **start, off_t offset,
+    int size, int *peof, void *data);
+#endif
+
+void ubsec_reset_board(struct ubsec_softc *);
+void ubsec_init_board(struct ubsec_softc *);
+void ubsec_cleanchip(struct ubsec_softc *);
+void ubsec_totalreset(struct ubsec_softc *);
+int  ubsec_free_q(struct ubsec_softc*, struct ubsec_q *);
+
+static int ubsec_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int ubsec_freesession(device_t, u_int64_t);
+static int ubsec_process(device_t, struct cryptop *, int);
+
+void    ubsec_callback(struct ubsec_softc *, struct ubsec_q *);
+void    ubsec_feed(struct ubsec_softc *);
+void    ubsec_mcopy(struct sk_buff *, struct sk_buff *, int, int);
+void    ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *);
+int     ubsec_dma_malloc(struct ubsec_softc *, struct ubsec_dma_alloc *,
+        size_t, int);
+
+/* DEBUG crap... */
+void ubsec_dump_pb(struct ubsec_pktbuf *);
+void ubsec_dump_mcr(struct ubsec_mcr *);
+
+#define READ_REG(sc,r) \
+    ssb_read32((sc)->sdev, (r));
+#define WRITE_REG(sc,r,val) \
+    ssb_write32((sc)->sdev, (r), (val));
+#define READ_REG_SDEV(sdev,r) \
+    ssb_read32((sdev), (r));
+#define WRITE_REG_SDEV(sdev,r,val) \
+    ssb_write32((sdev), (r), (val));
+
+#define SWAP32(x) (x) = htole32(ntohl((x)))
+#define HTOLE32(x) (x) = htole32(x)
+
+#ifdef __LITTLE_ENDIAN
+#define letoh16(x) (x)
+#define letoh32(x) (x)
+#endif
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable debug output");
+
+#define UBSEC_SSB_MAX_CHIPS 1
+static struct ubsec_softc *ubsec_chip_idx[UBSEC_SSB_MAX_CHIPS];
+static struct ubsec_stats ubsecstats;
+
+#ifdef UBSEC_DEBUG
+static struct proc_dir_entry *procdebug;
+#endif
+
+static struct ssb_device_id ubsec_ssb_tbl[] = {
+    /* Broadcom BCM5365P IPSec Core */
+    SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_IPSEC, SSB_ANY_REV),
+    SSB_DEVTABLE_END
+};
+
+static struct ssb_driver ubsec_ssb_driver = {
+    .name       = DRV_MODULE_NAME,
+    .id_table   = ubsec_ssb_tbl,
+    .probe      = ubsec_ssb_probe,
+    .remove     = __devexit_p(ubsec_ssb_remove),
+     /*
+    .suspend    = ubsec_ssb_suspend,
+    .resume     = ubsec_ssb_resume
+    */
+};
+
+static device_method_t ubsec_ssb_methods = {
+    /* crypto device methods */
+    DEVMETHOD(cryptodev_newsession, ubsec_newsession),
+    DEVMETHOD(cryptodev_freesession,ubsec_freesession),
+    DEVMETHOD(cryptodev_process,    ubsec_process),
+};
+
+#ifdef UBSEC_DEBUG
+static int 
+proc_read(char *buf, char **start, off_t offset,
+    int size, int *peof, void *data)
+{
+    int i = 0, byteswritten = 0, ret;
+    unsigned int stat, ctrl;
+#ifdef UBSEC_VERBOSE_DEBUG
+    struct ubsec_q *q;
+    struct ubsec_dma *dmap;
+#endif
+   
+    while ((i < UBSEC_SSB_MAX_CHIPS) && (ubsec_chip_idx[i] != NULL))
+    {
+        struct ubsec_softc *sc = ubsec_chip_idx[i];
+        
+        stat = READ_REG(sc, BS_STAT);
+        ctrl = READ_REG(sc, BS_CTRL);
+        ret = snprintf((buf + byteswritten), 
+            (size - byteswritten) , 
+            "DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
+
+        byteswritten += ret;
+
+#ifdef UBSEC_VERBOSE_DEBUG
+        printf("DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
+
+        /* Dump all queues MCRs */
+        if (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
+            BSD_SIMPLEQ_FOREACH(q, &sc->sc_qchip, q_next)
+            {
+                dmap = q->q_dma;
+                ubsec_dump_mcr(&dmap->d_dma->d_mcr);
+            }
+        }
+#endif
+
+        i++;
+    }
+
+    *peof = 1;
+
+    return byteswritten;
+}
+#endif
+
+/*
+ * map in a given sk_buff
+ */
+static int
+dma_map_skb(struct ubsec_softc *sc, struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen)
+{
+    int i = 0;
+    dma_addr_t tmp;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    /*
+     * We support only a limited number of fragments.
+     */
+    if (unlikely((skb_shinfo(skb)->nr_frags + 1) >= UBS_MAX_SCATTER))
+    {
+        printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
+        return (-ENOMEM);
+    }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, 0, (unsigned int)skb->data, skb_headlen(skb));
+#endif
+
+    /* first data package */
+    tmp = dma_map_single(sc->sc_dv,
+                         skb->data,
+                         skb_headlen(skb),
+                         DMA_BIDIRECTIONAL);
+    
+    q_map[i].dma_paddr = tmp;
+    q_map[i].dma_vaddr = skb->data;
+    q_map[i].dma_size = skb_headlen(skb);
+
+    if (unlikely(tmp == 0))
+    {
+        printk(KERN_ERR "Could not map memory region for dma.\n");
+        return (-EINVAL);
+    }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, 0, (unsigned int)tmp);
+#endif
+
+
+    /* all other data packages */    
+    for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, i + 1, 
+            (unsigned int)page_address(skb_shinfo(skb)->frags[i].page) +
+            skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].size);
+#endif
+
+        tmp = dma_map_single(sc->sc_dv,
+                             page_address(skb_shinfo(skb)->frags[i].page) +
+                                 skb_shinfo(skb)->frags[i].page_offset, 
+                             skb_shinfo(skb)->frags[i].size,
+                             DMA_BIDIRECTIONAL);
+
+        q_map[i + 1].dma_paddr = tmp;
+        q_map[i + 1].dma_vaddr = (void*)(page_address(skb_shinfo(skb)->frags[i].page) +
+                                  skb_shinfo(skb)->frags[i].page_offset);
+        q_map[i + 1].dma_size = skb_shinfo(skb)->frags[i].size;
+
+        if (unlikely(tmp == 0))
+        {
+            printk(KERN_ERR "Could not map memory region for dma.\n");
+            return (-EINVAL);
+        }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, i + 1, (unsigned int)tmp);
+#endif
+
+    }
+    *mlen = i + 1;
+
+    return(0);
+}
+
+/*
+ * map in a given uio buffer
+ */
+
+static int
+dma_map_uio(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen)
+{
+    struct iovec *iov = uio->uio_iov;
+    int n;
+    dma_addr_t tmp;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    /*
+     * We support only a limited number of fragments.
+     */
+    if (unlikely(uio->uio_iovcnt >= UBS_MAX_SCATTER))
+    {
+        printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
+        return (-ENOMEM);
+    }
+
+    for (n = 0; n < uio->uio_iovcnt; n++) {
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, n, (unsigned int)iov->iov_base, iov->iov_len);
+#endif
+        tmp = dma_map_single(sc->sc_dv,
+                             iov->iov_base,
+                             iov->iov_len,
+                             DMA_BIDIRECTIONAL);
+
+        q_map[n].dma_paddr = tmp;
+        q_map[n].dma_vaddr = iov->iov_base;
+        q_map[n].dma_size = iov->iov_len;
+
+        if (unlikely(tmp == 0))
+                       {
+            printk(KERN_ERR "Could not map memory region for dma.\n");
+            return (-EINVAL);
+        }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, n, (unsigned int)tmp);
+#endif
+
+        iov++;
+    }
+    *mlen = n;
+
+    return(0);
+}
+
+static void
+dma_unmap(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, int mlen)
+{
+    int i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    for(i = 0; i < mlen; i++)
+    {
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, (unsigned int)q_map[i].dma_paddr, q_map[i].dma_size);
+#endif
+        dma_unmap_single(sc->sc_dv,
+                         q_map[i].dma_paddr,
+                         q_map[i].dma_size,
+                         DMA_BIDIRECTIONAL);
+    }
+    return;
+}
+
+/*
+ * Is the operand suitable aligned for direct DMA.  Each
+ * segment must be aligned on a 32-bit boundary and all
+ * but the last segment must be a multiple of 4 bytes.
+ */
+static int
+ubsec_dmamap_aligned(struct ubsec_softc *sc, const struct ubsec_dma_alloc *q_map, int mlen)
+{
+    int i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    for (i = 0; i < mlen; i++) {
+        if (q_map[i].dma_paddr & 3)
+            return (0);
+        if (i != (mlen - 1) && (q_map[i].dma_size & 3))
+            return (0);
+    }
+    return (1);
+}
+
+
+#define N(a)    (sizeof(a) / sizeof (a[0]))
+static void
+ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen)
+{
+#ifdef HMAC_HACK
+    MD5_CTX md5ctx;
+    SHA1_CTX sha1ctx;
+    int i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    for (i = 0; i < klen; i++)
+        key[i] ^= HMAC_IPAD_VAL;
+
+    if (algo == CRYPTO_MD5_HMAC) {
+        MD5Init(&md5ctx);
+        MD5Update(&md5ctx, key, klen);
+        MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
+        bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
+    } else {
+        SHA1Init(&sha1ctx);
+        SHA1Update(&sha1ctx, key, klen);
+        SHA1Update(&sha1ctx, hmac_ipad_buffer,
+            SHA1_HMAC_BLOCK_LEN - klen);
+        bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
+    }
+
+    for (i = 0; i < klen; i++)
+        key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
+
+    if (algo == CRYPTO_MD5_HMAC) {
+        MD5Init(&md5ctx);
+        MD5Update(&md5ctx, key, klen);
+        MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
+        bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
+    } else {
+        SHA1Init(&sha1ctx);
+        SHA1Update(&sha1ctx, key, klen);
+        SHA1Update(&sha1ctx, hmac_opad_buffer,
+            SHA1_HMAC_BLOCK_LEN - klen);
+        bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
+    }
+
+    for (i = 0; i < klen; i++)
+        key[i] ^= HMAC_OPAD_VAL;
+
+#else /* HMAC_HACK */
+    DPRINTF("md5/sha not implemented\n");
+#endif /* HMAC_HACK */
+}
+#undef N
+
+static int 
+__devinit ubsec_ssb_probe(struct ssb_device *sdev, 
+    const struct ssb_device_id *ent) 
+{
+    int err;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    err = ssb_bus_powerup(sdev->bus, 0);
+    if (err) {
+        dev_err(sdev->dev, "Failed to powerup the bus\n");
+	goto err_out;
+    }
+
+    err = request_irq(sdev->irq, (irq_handler_t)ubsec_ssb_isr, 
+        IRQF_DISABLED | IRQF_SHARED, DRV_MODULE_NAME, sdev);
+    if (err) {
+        dev_err(sdev->dev, "Could not request irq\n");
+        goto err_out_powerdown;
+    }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+    err = dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(32)) || 
+	  dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(32));
+#else
+    err = ssb_dma_set_mask(sdev, DMA_32BIT_MASK);
+#endif
+    if (err) {
+        dev_err(sdev->dev,
+        "Required 32BIT DMA mask unsupported by the system.\n");
+        goto err_out_free_irq;
+    }
+
+    printk(KERN_INFO "Sentry5(tm) ROBOGateway(tm) IPSec Core at IRQ %u\n",
+        sdev->irq);
+
+    DPRINTF("Vendor: %x, core id: %x, revision: %x\n",
+        sdev->id.vendor, sdev->id.coreid, sdev->id.revision);
+
+    ssb_device_enable(sdev, 0);
+
+    if (ubsec_attach(sdev, ent, sdev->dev) != 0)
+        goto err_out_disable;
+
+#ifdef UBSEC_DEBUG
+    procdebug = create_proc_entry(DRV_MODULE_NAME, S_IRUSR, NULL);
+    if (procdebug)
+    {
+        procdebug->read_proc = proc_read;
+        procdebug->data = NULL;
+    } else 
+        DPRINTF("Unable to create proc file.\n");
+#endif
+
+    return 0;
+
+err_out_disable:
+    ssb_device_disable(sdev, 0);
+
+err_out_free_irq:
+    free_irq(sdev->irq, sdev);
+
+err_out_powerdown:
+    ssb_bus_may_powerdown(sdev->bus);
+
+err_out:
+    return err;
+}
+
+static void __devexit ubsec_ssb_remove(struct ssb_device *sdev) {
+
+    struct ubsec_softc *sc;
+    unsigned int ctrlflgs;
+    struct ubsec_dma *dmap;
+    u_int32_t i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    ctrlflgs = READ_REG_SDEV(sdev, BS_CTRL);
+    /* disable all IPSec Core interrupts globally */
+    ctrlflgs ^= (BS_CTRL_MCR1INT | BS_CTRL_MCR2INT |
+        BS_CTRL_DMAERR);
+    WRITE_REG_SDEV(sdev, BS_CTRL, ctrlflgs);
+
+    free_irq(sdev->irq, sdev);
+
+    sc = (struct ubsec_softc *)ssb_get_drvdata(sdev);
+
+    /* unregister all crypto algorithms */
+    crypto_unregister_all(sc->sc_cid);
+
+    /* Free queue / dma memory */
+    for (i = 0; i < UBS_MAX_NQUEUE; i++) {
+        struct ubsec_q *q;
+
+        q = sc->sc_queuea[i];
+        if (q != NULL)
+        {
+            dmap = q->q_dma;
+            if (dmap != NULL)
+            {
+                ubsec_dma_free(sc, &dmap->d_alloc);
+                q->q_dma = NULL;
+            }
+            kfree(q);
+        }
+        sc->sc_queuea[i] = NULL;
+    }
+
+    ssb_device_disable(sdev, 0);
+    ssb_bus_may_powerdown(sdev->bus);
+    ssb_set_drvdata(sdev, NULL);
+
+#ifdef UBSEC_DEBUG
+    if (procdebug)
+        remove_proc_entry(DRV_MODULE_NAME, NULL);
+#endif
+
+}
+
+
+int
+ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent, 
+    struct device *self)
+{
+    struct ubsec_softc *sc = NULL;
+    struct ubsec_dma *dmap;
+    u_int32_t i;
+    static int num_chips = 0;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    sc = (struct ubsec_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+    if (!sc)
+        return(-ENOMEM);
+    memset(sc, 0, sizeof(*sc));
+
+    sc->sc_dv = sdev->dev;
+    sc->sdev = sdev;
+
+    spin_lock_init(&sc->sc_ringmtx);
+
+    softc_device_init(sc, "ubsec_ssb", num_chips, ubsec_ssb_methods);
+
+    /* Maybe someday there are boards with more than one chip available */
+    if (num_chips < UBSEC_SSB_MAX_CHIPS) {
+        ubsec_chip_idx[device_get_unit(sc->sc_dev)] = sc;
+        num_chips++;
+    }
+
+    ssb_set_drvdata(sdev, sc);
+
+    BSD_SIMPLEQ_INIT(&sc->sc_queue);
+    BSD_SIMPLEQ_INIT(&sc->sc_qchip);
+    BSD_SIMPLEQ_INIT(&sc->sc_queue2);
+    BSD_SIMPLEQ_INIT(&sc->sc_qchip2);
+    BSD_SIMPLEQ_INIT(&sc->sc_q2free);
+
+    sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR;
+
+    sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
+    if (sc->sc_cid < 0) {
+        device_printf(sc->sc_dev, "could not get crypto driver id\n");
+        return -1;
+    }
+
+    BSD_SIMPLEQ_INIT(&sc->sc_freequeue);
+    dmap = sc->sc_dmaa;
+    for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) {
+        struct ubsec_q *q;
+
+        q = (struct ubsec_q *)kmalloc(sizeof(struct ubsec_q), GFP_KERNEL);
+        if (q == NULL) {
+            printf(": can't allocate queue buffers\n");
+            break;
+        }
+
+        if (ubsec_dma_malloc(sc, &dmap->d_alloc, sizeof(struct ubsec_dmachunk),0)) {
+            printf(": can't allocate dma buffers\n");
+            kfree(q);
+            break;
+        }
+        dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr;
+
+        q->q_dma = dmap;
+        sc->sc_queuea[i] = q;
+
+        BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+    }
+
+    /*
+     * Reset Broadcom chip
+     */
+    ubsec_reset_board(sc);
+
+    /*
+     * Init Broadcom chip
+     */
+    ubsec_init_board(sc);
+
+    /* supported crypto algorithms */
+    crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+    crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+
+    if (sc->sc_flags & UBS_FLAGS_AES) {
+        crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+        printf(KERN_INFO DRV_MODULE_NAME ": DES 3DES AES128 AES192 AES256 MD5_HMAC SHA1_HMAC\n");
+    }
+    else
+        printf(KERN_INFO DRV_MODULE_NAME ": DES 3DES MD5_HMAC SHA1_HMAC\n");
+
+    crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+    crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+
+    return 0;
+}
+
+/*
+ * UBSEC Interrupt routine
+ */
+static irqreturn_t 
+ubsec_ssb_isr(int irq, void *arg, struct pt_regs *regs) 
+{
+    struct ubsec_softc *sc = NULL;
+    volatile u_int32_t stat;
+    struct ubsec_q *q;
+    struct ubsec_dma *dmap;
+    int npkts = 0, i;
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    sc = (struct ubsec_softc *)ssb_get_drvdata(arg);
+
+    stat = READ_REG(sc, BS_STAT);
+
+    stat &= sc->sc_statmask;
+    if (stat == 0)
+        return IRQ_NONE;
+
+    WRITE_REG(sc, BS_STAT, stat);       /* IACK */
+
+    /*
+     * Check to see if we have any packets waiting for us
+     */
+    if ((stat & BS_STAT_MCR1_DONE)) {
+        while (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
+            q = BSD_SIMPLEQ_FIRST(&sc->sc_qchip);
+            dmap = q->q_dma;
+
+            if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0)
+            {
+                DPRINTF("error while processing MCR. Flags = %x\n", dmap->d_dma->d_mcr.mcr_flags);
+                break;
+            }
+
+            BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
+
+            npkts = q->q_nstacked_mcrs;
+            /*
+             * search for further sc_qchip ubsec_q's that share
+             * the same MCR, and complete them too, they must be
+             * at the top.
+             */
+            for (i = 0; i < npkts; i++) {
+                if(q->q_stacked_mcr[i])
+                    ubsec_callback(sc, q->q_stacked_mcr[i]);
+                else
+                    break;
+            }
+            ubsec_callback(sc, q);
+        }
+
+        /*
+         * Don't send any more packet to chip if there has been
+         * a DMAERR.
+         */
+        if (likely(!(stat & BS_STAT_DMAERR)))
+            ubsec_feed(sc);
+        else
+            DPRINTF("DMA error occurred. Stop feeding crypto chip.\n");
+    }
+
+    /*
+     * Check to see if we got any DMA Error
+     */
+    if (stat & BS_STAT_DMAERR) {
+        volatile u_int32_t a = READ_REG(sc, BS_ERR);
+
+        printf(KERN_ERR "%s: dmaerr %s@%08x\n", DRV_MODULE_NAME,
+            (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR);
+
+        ubsecstats.hst_dmaerr++;
+        ubsec_totalreset(sc);
+        ubsec_feed(sc);
+    }
+
+    return IRQ_HANDLED;
+}
+
+/*
+ * ubsec_feed() - aggregate and post requests to chip
+ *        It is assumed that the caller set splnet()
+ */
+void
+ubsec_feed(struct ubsec_softc *sc)
+{
+#ifdef UBSEC_VERBOSE_DEBUG
+    static int max;
+#endif 
+    struct ubsec_q *q, *q2;
+    int npkts, i;
+    void *v;
+    u_int32_t stat;
+
+    npkts = sc->sc_nqueue;
+    if (npkts > UBS_MAX_AGGR)
+        npkts = UBS_MAX_AGGR;
+    if (npkts < 2)
+        goto feed1;
+
+    stat = READ_REG(sc, BS_STAT);
+
+    if (stat & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
+        if(stat & BS_STAT_DMAERR) {
+            ubsec_totalreset(sc);
+            ubsecstats.hst_dmaerr++;
+        }
+        return;
+    }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("merging %d records\n", npkts);
+
+    /* XXX temporary aggregation statistics reporting code */
+    if (max < npkts) {
+        max = npkts;
+        DPRINTF("%s: new max aggregate %d\n", DRV_MODULE_NAME, max);
+    }
+#endif /* UBSEC_VERBOSE_DEBUG */
+
+    q = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
+    BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
+    --sc->sc_nqueue;
+
+#if 0
+    /* 
+     * XXX 
+     * We use dma_map_single() - no sync required!
+     */
+
+    bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
+        0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+    if (q->q_dst_map != NULL)
+        bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
+            0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
+#endif
+
+    q->q_nstacked_mcrs = npkts - 1;     /* Number of packets stacked */
+
+    for (i = 0; i < q->q_nstacked_mcrs; i++) {
+        q2 = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
+
+#if 0
+        bus_dmamap_sync(sc->sc_dmat, q2->q_src_map,
+            0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+        if (q2->q_dst_map != NULL)
+            bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map,
+                0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
+#endif
+        BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
+        --sc->sc_nqueue;
+
+        v = ((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) -
+            sizeof(struct ubsec_mcr_add);
+        bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add));
+        q->q_stacked_mcr[i] = q2;
+    }
+    q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts);
+    BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
+#if 0
+    bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
+        0, q->q_dma->d_alloc.dma_map->dm_mapsize,
+        BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+#endif
+    WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
+        offsetof(struct ubsec_dmachunk, d_mcr));
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("feed (1): q->chip %p %08x %08x\n", q,
+        (u_int32_t)q->q_dma->d_alloc.dma_paddr,
+        (u_int32_t)(q->q_dma->d_alloc.dma_paddr +
+        offsetof(struct ubsec_dmachunk, d_mcr)));
+#endif /* UBSEC_DEBUG */
+    return;
+
+feed1:
+    while (!BSD_SIMPLEQ_EMPTY(&sc->sc_queue)) {
+        stat = READ_REG(sc, BS_STAT);
+
+        if (stat & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
+            if(stat & BS_STAT_DMAERR) {
+                ubsec_totalreset(sc);
+                ubsecstats.hst_dmaerr++;
+            }
+            break;
+        }
+
+        q = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
+
+#if 0
+        bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
+            0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+        if (q->q_dst_map != NULL)
+            bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
+                0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
+        bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
+            0, q->q_dma->d_alloc.dma_map->dm_mapsize,
+            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+#endif
+
+        WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
+            offsetof(struct ubsec_dmachunk, d_mcr));
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("feed (2): q->chip %p %08x %08x\n", q, 
+            (u_int32_t)q->q_dma->d_alloc.dma_paddr,
+            (u_int32_t)(q->q_dma->d_alloc.dma_paddr +
+            offsetof(struct ubsec_dmachunk, d_mcr)));
+#endif /* UBSEC_DEBUG */
+        BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
+        --sc->sc_nqueue;
+        BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
+    }
+}
+
+/*
+ * Allocate a new 'session' and return an encoded session id.  'sidp'
+ * contains our registration id, and should contain an encoded session
+ * id on successful allocation.
+ */
+static int
+ubsec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+    struct cryptoini *c, *encini = NULL, *macini = NULL;
+    struct ubsec_softc *sc = NULL;
+    struct ubsec_session *ses = NULL;
+    int sesn, i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    if (sidp == NULL || cri == NULL)
+        return (EINVAL);
+
+    sc = device_get_softc(dev);
+
+    if (sc == NULL)
+        return (EINVAL);
+
+    for (c = cri; c != NULL; c = c->cri_next) {
+        if (c->cri_alg == CRYPTO_MD5_HMAC ||
+            c->cri_alg == CRYPTO_SHA1_HMAC) {
+            if (macini)
+                return (EINVAL);
+            macini = c;
+        } else if (c->cri_alg == CRYPTO_DES_CBC ||
+            c->cri_alg == CRYPTO_3DES_CBC ||
+            c->cri_alg == CRYPTO_AES_CBC) {
+            if (encini)
+                return (EINVAL);
+            encini = c;
+        } else
+            return (EINVAL);
+    }
+    if (encini == NULL && macini == NULL)
+        return (EINVAL);
+
+    if (sc->sc_sessions == NULL) {
+        ses = sc->sc_sessions = (struct ubsec_session *)kmalloc(
+            sizeof(struct ubsec_session), SLAB_ATOMIC);
+        if (ses == NULL)
+            return (ENOMEM);
+        memset(ses, 0, sizeof(struct ubsec_session));
+        sesn = 0;
+        sc->sc_nsessions = 1;
+    } else {
+        for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+            if (sc->sc_sessions[sesn].ses_used == 0) {
+                ses = &sc->sc_sessions[sesn];
+                break;
+            }
+        }
+
+        if (ses == NULL) {
+            sesn = sc->sc_nsessions;
+            ses = (struct ubsec_session *)kmalloc((sesn + 1) *
+                sizeof(struct ubsec_session), SLAB_ATOMIC);
+            if (ses == NULL)
+                return (ENOMEM);
+            memset(ses, 0, (sesn + 1) * sizeof(struct ubsec_session));
+            bcopy(sc->sc_sessions, ses, sesn *
+                sizeof(struct ubsec_session));
+            bzero(sc->sc_sessions, sesn *
+                sizeof(struct ubsec_session));
+            kfree(sc->sc_sessions);
+            sc->sc_sessions = ses;
+            ses = &sc->sc_sessions[sesn];
+            sc->sc_nsessions++;
+        }
+    }
+
+    bzero(ses, sizeof(struct ubsec_session));
+    ses->ses_used = 1;
+    if (encini) {
+        /* get an IV */
+        /* XXX may read fewer than requested */
+        read_random(ses->ses_iv, sizeof(ses->ses_iv));
+
+        /* Go ahead and compute key in ubsec's byte order */
+        if (encini->cri_alg == CRYPTO_DES_CBC) {
+            /* DES uses the same key three times:
+             * 1st encrypt -> 2nd decrypt -> 3nd encrypt */
+            bcopy(encini->cri_key, &ses->ses_key[0], 8);
+            bcopy(encini->cri_key, &ses->ses_key[2], 8);
+            bcopy(encini->cri_key, &ses->ses_key[4], 8);
+            ses->ses_keysize = 192; /* Fake! Actually its only 64bits .. 
+                                       oh no it is even less: 54bits. */
+        } else if(encini->cri_alg == CRYPTO_3DES_CBC) {
+            bcopy(encini->cri_key, ses->ses_key, 24);
+            ses->ses_keysize = 192;
+        } else if(encini->cri_alg == CRYPTO_AES_CBC) {
+            ses->ses_keysize = encini->cri_klen;
+
+            if (ses->ses_keysize != 128 &&
+                ses->ses_keysize != 192 &&
+                ses->ses_keysize != 256)
+            {
+                DPRINTF("unsupported AES key size: %d\n", ses->ses_keysize);
+                return (EINVAL);
+            }
+            bcopy(encini->cri_key, ses->ses_key, (ses->ses_keysize / 8));
+        }
+
+        /* Hardware requires the keys in little endian byte order */
+        for (i=0; i < (ses->ses_keysize / 32); i++)
+            SWAP32(ses->ses_key[i]);
+    }
+
+    if (macini) {
+        ses->ses_mlen = macini->cri_mlen;
+
+        if (ses->ses_mlen == 0 ||
+            ses->ses_mlen > SHA1_HASH_LEN) {
+
+            if (macini->cri_alg == CRYPTO_MD5_HMAC ||
+                macini->cri_alg == CRYPTO_SHA1_HMAC)
+            {
+                ses->ses_mlen = DEFAULT_HMAC_LEN;
+            } else
+            {
+                /*
+                 * Reserved for future usage. MD5/SHA1 calculations have
+                 * different hash sizes.
+                 */
+                printk(KERN_ERR DRV_MODULE_NAME ": unsupported hash operation with mac/hash len: %d\n", ses->ses_mlen);
+                return (EINVAL);
+            }
+            
+        }
+
+        if (macini->cri_key != NULL) {
+            ubsec_setup_mackey(ses, macini->cri_alg, macini->cri_key,
+                macini->cri_klen / 8);
+        }
+    }
+
+    *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn);
+    return (0);
+}
+
+/*
+ * Deallocate a session.
+ */
+static int
+ubsec_freesession(device_t dev, u_int64_t tid)
+{
+    struct ubsec_softc *sc = device_get_softc(dev);
+    int session;
+    u_int32_t sid = ((u_int32_t)tid) & 0xffffffff;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    if (sc == NULL)
+        return (EINVAL);
+
+    session = UBSEC_SESSION(sid);
+    if (session < sc->sc_nsessions) {
+        bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
+        return (0);
+    } else
+        return (EINVAL);
+}
+
+static int
+ubsec_process(device_t dev, struct cryptop *crp, int hint)
+{
+    struct ubsec_q *q = NULL;
+    int err = 0, i, j, nicealign;
+    struct ubsec_softc *sc = device_get_softc(dev);
+    struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+    int encoffset = 0, macoffset = 0, cpskip, cpoffset;
+    int sskip, dskip, stheend, dtheend, ivsize = 8;
+    int16_t coffset;
+    struct ubsec_session *ses;
+    struct ubsec_generic_ctx ctx;
+    struct ubsec_dma *dmap = NULL;
+    unsigned long flags;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    if (unlikely(crp == NULL || crp->crp_callback == NULL)) {
+        ubsecstats.hst_invalid++;
+        return (EINVAL);
+    }
+
+    if (unlikely(sc == NULL))
+        return (EINVAL);
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("spin_lock_irqsave\n");
+#endif
+    spin_lock_irqsave(&sc->sc_ringmtx, flags);
+    //spin_lock_irq(&sc->sc_ringmtx);
+
+    if (BSD_SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
+        ubsecstats.hst_queuefull++;
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("spin_unlock_irqrestore\n");
+#endif
+        spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+        //spin_unlock_irq(&sc->sc_ringmtx);
+        err = ENOMEM;
+        goto errout2;
+    }
+
+    q = BSD_SIMPLEQ_FIRST(&sc->sc_freequeue);
+    BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("spin_unlock_irqrestore\n");
+#endif
+    spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+    //spin_unlock_irq(&sc->sc_ringmtx);
+
+    dmap = q->q_dma; /* Save dma pointer */
+    bzero(q, sizeof(struct ubsec_q));
+    bzero(&ctx, sizeof(ctx));
+
+    q->q_sesn = UBSEC_SESSION(crp->crp_sid);
+    q->q_dma = dmap;
+    ses = &sc->sc_sessions[q->q_sesn];
+
+    if (crp->crp_flags & CRYPTO_F_SKBUF) {
+        q->q_src_m = (struct sk_buff *)crp->crp_buf;
+        q->q_dst_m = (struct sk_buff *)crp->crp_buf;
+    } else if (crp->crp_flags & CRYPTO_F_IOV) {
+        q->q_src_io = (struct uio *)crp->crp_buf;
+        q->q_dst_io = (struct uio *)crp->crp_buf;
+    } else {
+        err = EINVAL;
+        goto errout;    /* XXX we don't handle contiguous blocks! */
+    }
+
+    bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));
+
+    dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
+    dmap->d_dma->d_mcr.mcr_flags = 0;
+    q->q_crp = crp;
+
+    crd1 = crp->crp_desc;
+    if (crd1 == NULL) {
+        err = EINVAL;
+        goto errout;
+    }
+    crd2 = crd1->crd_next;
+
+    if (crd2 == NULL) {
+        if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
+            crd1->crd_alg == CRYPTO_SHA1_HMAC) {
+            maccrd = crd1;
+            enccrd = NULL;
+        } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
+            crd1->crd_alg == CRYPTO_3DES_CBC || 
+            crd1->crd_alg == CRYPTO_AES_CBC) {
+            maccrd = NULL;
+            enccrd = crd1;
+        } else {
+            err = EINVAL;
+            goto errout;
+        }
+    } else {
+        if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
+            crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
+            (crd2->crd_alg == CRYPTO_DES_CBC ||
+            crd2->crd_alg == CRYPTO_3DES_CBC ||
+            crd2->crd_alg == CRYPTO_AES_CBC) &&
+            ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
+            maccrd = crd1;
+            enccrd = crd2;
+        } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
+            crd1->crd_alg == CRYPTO_3DES_CBC ||
+            crd1->crd_alg == CRYPTO_AES_CBC) &&
+            (crd2->crd_alg == CRYPTO_MD5_HMAC ||
+            crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
+            (crd1->crd_flags & CRD_F_ENCRYPT)) {
+            enccrd = crd1;
+            maccrd = crd2;
+        } else {
+            /*
+             * We cannot order the ubsec as requested
+             */
+            printk(KERN_ERR DRV_MODULE_NAME ": got wrong algorithm/signature order.\n");
+            err = EINVAL;
+            goto errout;
+        }
+    }
+
+    /* Encryption/Decryption requested */
+    if (enccrd) {
+        encoffset = enccrd->crd_skip;
+
+        if (enccrd->crd_alg == CRYPTO_DES_CBC ||
+            enccrd->crd_alg == CRYPTO_3DES_CBC)
+        {
+            ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES);
+            ctx.pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC_DES);
+            ivsize = 8;     /* [3]DES uses 64bit IVs */
+        } else {
+            ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_AES);
+            ctx.pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC_AES);
+            ivsize = 16;    /* AES uses 128bit IVs / [3]DES 64bit IVs */
+
+            switch(ses->ses_keysize)
+            {
+                case 128:
+                    ctx.pc_flags |= htole16(UBS_PKTCTX_AES128);
+                    break;
+                case 192:
+                    ctx.pc_flags |= htole16(UBS_PKTCTX_AES192);
+                    break;
+                case 256:
+                    ctx.pc_flags |= htole16(UBS_PKTCTX_AES256);
+                    break;
+                default:
+                    DPRINTF("invalid AES key size: %d\n", ses->ses_keysize);
+                    err = EINVAL;
+                    goto errout;
+            }
+        }
+
+        if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+            /* Direction: Outbound */
+
+            q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
+
+            if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+                bcopy(enccrd->crd_iv, ctx.pc_iv, ivsize);
+            } else {
+                for(i=0; i < (ivsize / 4); i++)
+                    ctx.pc_iv[i] = ses->ses_iv[i];
+            }
+
+            /* If there is no IV in the buffer -> copy it here */
+            if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+                if (crp->crp_flags & CRYPTO_F_SKBUF)
+                    /*
+                    m_copyback(q->q_src_m,
+                        enccrd->crd_inject,
+                        8, ctx.pc_iv);
+                    */
+                    crypto_copyback(crp->crp_flags, (caddr_t)q->q_src_m,
+                        enccrd->crd_inject, ivsize, (caddr_t)ctx.pc_iv);
+                else if (crp->crp_flags & CRYPTO_F_IOV)
+                    /*
+                    cuio_copyback(q->q_src_io,
+                        enccrd->crd_inject,
+                        8, ctx.pc_iv);
+                    */
+                    crypto_copyback(crp->crp_flags, (caddr_t)q->q_src_io,
+                        enccrd->crd_inject, ivsize, (caddr_t)ctx.pc_iv);
+            }
+        } else {
+            /* Direction: Inbound */
+
+            ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND);
+
+            if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+                bcopy(enccrd->crd_iv, ctx.pc_iv, ivsize);
+            else if (crp->crp_flags & CRYPTO_F_SKBUF)
+                /*
+                m_copydata(q->q_src_m, enccrd->crd_inject,
+                    8, (caddr_t)ctx.pc_iv);
+                */
+                crypto_copydata(crp->crp_flags, (caddr_t)q->q_src_m,
+                    enccrd->crd_inject, ivsize,
+                    (caddr_t)ctx.pc_iv);
+            else if (crp->crp_flags & CRYPTO_F_IOV)
+                /*
+                cuio_copydata(q->q_src_io,
+                    enccrd->crd_inject, 8,
+                    (caddr_t)ctx.pc_iv);
+                */
+                crypto_copydata(crp->crp_flags, (caddr_t)q->q_src_io,
+                    enccrd->crd_inject, ivsize,
+                    (caddr_t)ctx.pc_iv);
+
+        }
+
+        /* Even though key & IV sizes differ from cipher to cipher
+         * copy / swap the full array lengths. Let the compiler unroll
+         * the loop to increase the cpu pipeline performance... */
+        for(i=0; i < 8; i++)
+            ctx.pc_key[i] = ses->ses_key[i];
+        for(i=0; i < 4; i++)
+            SWAP32(ctx.pc_iv[i]);
+    }
+
+    /* Authentication requested */
+    if (maccrd) {
+        macoffset = maccrd->crd_skip;
+
+        if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
+            ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5);
+        else
+            ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1);
+
+        for (i = 0; i < 5; i++) {
+            ctx.pc_hminner[i] = ses->ses_hminner[i];
+            ctx.pc_hmouter[i] = ses->ses_hmouter[i];
+
+            HTOLE32(ctx.pc_hminner[i]);
+            HTOLE32(ctx.pc_hmouter[i]);
+        }
+    }
+
+    if (enccrd && maccrd) {
+        /*
+         * ubsec cannot handle packets where the end of encryption
+         * and authentication are not the same, or where the
+         * encrypted part begins before the authenticated part.
+         */
+        if (((encoffset + enccrd->crd_len) !=
+            (macoffset + maccrd->crd_len)) ||
+            (enccrd->crd_skip < maccrd->crd_skip)) {
+            err = EINVAL;
+            goto errout;
+        }
+        sskip = maccrd->crd_skip;
+        cpskip = dskip = enccrd->crd_skip;
+        stheend = maccrd->crd_len;
+        dtheend = enccrd->crd_len;
+        coffset = enccrd->crd_skip - maccrd->crd_skip;
+        cpoffset = cpskip + dtheend;
+#ifdef UBSEC_DEBUG
+        DPRINTF("mac: skip %d, len %d, inject %d\n",
+            maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
+        DPRINTF("enc: skip %d, len %d, inject %d\n",
+            enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
+        DPRINTF("src: skip %d, len %d\n", sskip, stheend);
+        DPRINTF("dst: skip %d, len %d\n", dskip, dtheend);
+        DPRINTF("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
+            coffset, stheend, cpskip, cpoffset);
+#endif
+    } else {
+        cpskip = dskip = sskip = macoffset + encoffset;
+        dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
+        cpoffset = cpskip + dtheend;
+        coffset = 0;
+    }
+    ctx.pc_offset = htole16(coffset >> 2);
+
+#if 0
+    if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER,
+        0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) {
+        err = ENOMEM;
+        goto errout;
+    }
+#endif
+
+    if (crp->crp_flags & CRYPTO_F_SKBUF) {
+#if 0
+        if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
+            q->q_src_m, BUS_DMA_NOWAIT) != 0) {
+            bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+            q->q_src_map = NULL;
+            err = ENOMEM;
+            goto errout;
+        }
+#endif
+        err = dma_map_skb(sc, q->q_src_map, q->q_src_m, &q->q_src_len);
+        if (unlikely(err != 0))
+            goto errout;
+
+    } else if (crp->crp_flags & CRYPTO_F_IOV) {
+#if 0
+        if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
+            q->q_src_io, BUS_DMA_NOWAIT) != 0) {
+            bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+            q->q_src_map = NULL;
+            err = ENOMEM;
+            goto errout;
+        }
+#endif
+        err = dma_map_uio(sc, q->q_src_map, q->q_src_io, &q->q_src_len);
+        if (unlikely(err != 0))
+           goto errout;
+    }
+
+    /* 
+     * Check alignment 
+     */
+    nicealign = ubsec_dmamap_aligned(sc, q->q_src_map, q->q_src_len);
+
+    dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("src skip: %d\n", sskip);
+#endif
+    for (i = j = 0; i < q->q_src_len; i++) {
+        struct ubsec_pktbuf *pb;
+        size_t packl = q->q_src_map[i].dma_size;
+        dma_addr_t packp = q->q_src_map[i].dma_paddr;
+
+        if (sskip >= packl) {
+            sskip -= packl;
+            continue;
+        }
+
+        packl -= sskip;
+        packp += sskip;
+        sskip = 0;
+
+        /* maximum fragment size is 0xfffc */
+        if (packl > 0xfffc) {
+            DPRINTF("Error: fragment size is bigger than 0xfffc.\n");
+            err = EIO;
+            goto errout;
+        }
+
+        if (j == 0)
+            pb = &dmap->d_dma->d_mcr.mcr_ipktbuf;
+        else
+            pb = &dmap->d_dma->d_sbuf[j - 1];
+
+        pb->pb_addr = htole32(packp);
+
+        if (stheend) {
+            if (packl > stheend) {
+                pb->pb_len = htole32(stheend);
+                stheend = 0;
+            } else {
+                pb->pb_len = htole32(packl);
+                stheend -= packl;
+            }
+        } else
+            pb->pb_len = htole32(packl);
+
+        if ((i + 1) == q->q_src_len)
+            pb->pb_next = 0;
+        else
+            pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
+                offsetof(struct ubsec_dmachunk, d_sbuf[j]));
+        j++;
+    }
+
+    if (enccrd == NULL && maccrd != NULL) {
+        /* Authentication only */
+        dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
+        dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
+        dmap->d_dma->d_mcr.mcr_opktbuf.pb_next =
+            htole32(dmap->d_alloc.dma_paddr +
+            offsetof(struct ubsec_dmachunk, d_macbuf[0]));
+#ifdef UBSEC_DEBUG
+        DPRINTF("opkt: %x %x %x\n",
+            dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr,
+            dmap->d_dma->d_mcr.mcr_opktbuf.pb_len,
+            dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
+#endif
+    } else {
+        if (crp->crp_flags & CRYPTO_F_IOV) {
+            if (!nicealign) {
+                err = EINVAL;
+                goto errout;
+            }
+#if 0
+            if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
+                UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
+                &q->q_dst_map) != 0) {
+                err = ENOMEM;
+                goto errout;
+            }
+            if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
+                q->q_dst_io, BUS_DMA_NOWAIT) != 0) {
+                bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
+                q->q_dst_map = NULL;
+                goto errout;
+            }
+#endif
+
+            /* HW shall copy the result into the source memory */
+            for(i = 0; i < q->q_src_len; i++)
+                q->q_dst_map[i] = q->q_src_map[i];
+
+            q->q_dst_len = q->q_src_len;
+            q->q_has_dst = 0;
+
+        } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
+            if (nicealign) {
+
+                /* HW shall copy the result into the source memory */
+                q->q_dst_m = q->q_src_m;
+                for(i = 0; i < q->q_src_len; i++)
+                    q->q_dst_map[i] = q->q_src_map[i];
+
+                q->q_dst_len = q->q_src_len;
+                q->q_has_dst = 0;
+
+            } else {
+#ifdef NOTYET
+                int totlen, len;
+                struct sk_buff *m, *top, **mp;
+
+                totlen = q->q_src_map->dm_mapsize;
+                if (q->q_src_m->m_flags & M_PKTHDR) {
+                    len = MHLEN;
+                    MGETHDR(m, M_DONTWAIT, MT_DATA);
+                } else {
+                    len = MLEN;
+                    MGET(m, M_DONTWAIT, MT_DATA);
+                }
+                if (m == NULL) {
+                    err = ENOMEM;
+                    goto errout;
+                }
+                if (len == MHLEN)
+                    M_DUP_PKTHDR(m, q->q_src_m);
+                if (totlen >= MINCLSIZE) {
+                    MCLGET(m, M_DONTWAIT);
+                    if (m->m_flags & M_EXT)
+                        len = MCLBYTES;
+                }
+                m->m_len = len;
+                top = NULL;
+                mp = &top;
+
+                while (totlen > 0) {
+                    if (top) {
+                        MGET(m, M_DONTWAIT, MT_DATA);
+                        if (m == NULL) {
+                            m_freem(top);
+                            err = ENOMEM;
+                            goto errout;
+                        }
+                        len = MLEN;
+                    }
+                    if (top && totlen >= MINCLSIZE) {
+                        MCLGET(m, M_DONTWAIT);
+                        if (m->m_flags & M_EXT)
+                            len = MCLBYTES;
+                    }
+                    m->m_len = len = min(totlen, len);
+                    totlen -= len;
+                    *mp = m;
+                    mp = &m->m_next;
+                }
+                q->q_dst_m = top;
+                ubsec_mcopy(q->q_src_m, q->q_dst_m,
+                    cpskip, cpoffset);
+                if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
+                    UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
+                    &q->q_dst_map) != 0) {
+                    err = ENOMEM;
+                    goto errout;
+                }
+                if (bus_dmamap_load_mbuf(sc->sc_dmat,
+                    q->q_dst_map, q->q_dst_m,
+                    BUS_DMA_NOWAIT) != 0) {
+                    bus_dmamap_destroy(sc->sc_dmat,
+                    q->q_dst_map);
+                    q->q_dst_map = NULL;
+                    err = ENOMEM;
+                    goto errout;
+                }
+#else
+                device_printf(sc->sc_dev,
+                    "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
+                    __FILE__, __LINE__);
+                err = EINVAL;
+                goto errout;
+#endif
+            }
+        } else {
+            err = EINVAL;
+            goto errout;
+        }
+
+#ifdef UBSEC_DEBUG
+        DPRINTF("dst skip: %d\n", dskip);
+#endif
+        for (i = j = 0; i < q->q_dst_len; i++) {
+            struct ubsec_pktbuf *pb;
+            size_t packl = q->q_dst_map[i].dma_size;
+            dma_addr_t packp = q->q_dst_map[i].dma_paddr;
+
+            if (dskip >= packl) {
+                dskip -= packl;
+                continue;
+            }
+
+            packl -= dskip;
+            packp += dskip;
+            dskip = 0;
+
+            if (packl > 0xfffc) {
+                DPRINTF("Error: fragment size is bigger than 0xfffc.\n");
+                err = EIO;
+                goto errout;
+            }
+
+            if (j == 0)
+                pb = &dmap->d_dma->d_mcr.mcr_opktbuf;
+            else
+                pb = &dmap->d_dma->d_dbuf[j - 1];
+
+            pb->pb_addr = htole32(packp);
+
+            if (dtheend) {
+                if (packl > dtheend) {
+                    pb->pb_len = htole32(dtheend);
+                    dtheend = 0;
+                } else {
+                    pb->pb_len = htole32(packl);
+                    dtheend -= packl;
+                }
+            } else
+                pb->pb_len = htole32(packl);
+
+            if ((i + 1) == q->q_dst_len) {
+                if (maccrd)
+                    /* Authentication:
+                     * The last fragment of the output buffer 
+                     * contains the HMAC. */
+                    pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
+                        offsetof(struct ubsec_dmachunk, d_macbuf[0]));
+                else
+                    pb->pb_next = 0;
+            } else
+                pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
+                    offsetof(struct ubsec_dmachunk, d_dbuf[j]));
+            j++;
+        }
+    }
+
+    dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr +
+        offsetof(struct ubsec_dmachunk, d_ctx));
+
+    if (sc->sc_flags & UBS_FLAGS_LONGCTX) {
+        /* new Broadcom cards with dynamic long command context structure */
+
+        if (enccrd != NULL &&
+            enccrd->crd_alg == CRYPTO_AES_CBC)
+        {
+            struct ubsec_pktctx_aes128 *ctxaes128;    
+            struct ubsec_pktctx_aes192 *ctxaes192;    
+            struct ubsec_pktctx_aes256 *ctxaes256;    
+
+            switch(ses->ses_keysize)
+            {
+                /* AES 128bit */
+                case 128:
+                ctxaes128 = (struct ubsec_pktctx_aes128 *)
+                    (dmap->d_alloc.dma_vaddr + 
+                    offsetof(struct ubsec_dmachunk, d_ctx));
+
+                ctxaes128->pc_len = htole16(sizeof(struct ubsec_pktctx_aes128));
+                ctxaes128->pc_type = ctx.pc_type;
+                ctxaes128->pc_flags = ctx.pc_flags;
+                ctxaes128->pc_offset = ctx.pc_offset;
+                for (i = 0; i < 4; i++)
+                    ctxaes128->pc_aeskey[i] = ctx.pc_key[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes128->pc_hminner[i] = ctx.pc_hminner[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes128->pc_hmouter[i] = ctx.pc_hmouter[i];
+                for (i = 0; i < 4; i++)
+                    ctxaes128->pc_iv[i] = ctx.pc_iv[i];
+                break;
+
+                /* AES 192bit */
+                case 192:
+                ctxaes192 = (struct ubsec_pktctx_aes192 *)
+                    (dmap->d_alloc.dma_vaddr + 
+                    offsetof(struct ubsec_dmachunk, d_ctx));
+
+                ctxaes192->pc_len = htole16(sizeof(struct ubsec_pktctx_aes192));
+                ctxaes192->pc_type = ctx.pc_type;
+                ctxaes192->pc_flags = ctx.pc_flags;
+                ctxaes192->pc_offset = ctx.pc_offset;
+                for (i = 0; i < 6; i++)
+                    ctxaes192->pc_aeskey[i] = ctx.pc_key[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes192->pc_hminner[i] = ctx.pc_hminner[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes192->pc_hmouter[i] = ctx.pc_hmouter[i];
+                for (i = 0; i < 4; i++)
+                    ctxaes192->pc_iv[i] = ctx.pc_iv[i];
+                break;
+
+                /* AES 256bit */
+                case 256:
+                ctxaes256 = (struct ubsec_pktctx_aes256 *)
+                    (dmap->d_alloc.dma_vaddr + 
+                    offsetof(struct ubsec_dmachunk, d_ctx));
+
+                ctxaes256->pc_len = htole16(sizeof(struct ubsec_pktctx_aes256));
+                ctxaes256->pc_type = ctx.pc_type;
+                ctxaes256->pc_flags = ctx.pc_flags;
+                ctxaes256->pc_offset = ctx.pc_offset;
+                for (i = 0; i < 8; i++)
+                    ctxaes256->pc_aeskey[i] = ctx.pc_key[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes256->pc_hminner[i] = ctx.pc_hminner[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes256->pc_hmouter[i] = ctx.pc_hmouter[i];
+                for (i = 0; i < 4; i++)
+                    ctxaes256->pc_iv[i] = ctx.pc_iv[i];
+                break;
+
+            }
+        } else {
+            /* 
+             * [3]DES / MD5_HMAC / SHA1_HMAC
+             *
+             * MD5_HMAC / SHA1_HMAC can use the IPSEC 3DES operation without
+             * encryption.
+             */
+            struct ubsec_pktctx_des *ctxdes;
+
+            ctxdes = (struct ubsec_pktctx_des *)(dmap->d_alloc.dma_vaddr +
+                offsetof(struct ubsec_dmachunk, d_ctx));
+            
+            ctxdes->pc_len = htole16(sizeof(struct ubsec_pktctx_des));
+            ctxdes->pc_type = ctx.pc_type;
+            ctxdes->pc_flags = ctx.pc_flags;
+            ctxdes->pc_offset = ctx.pc_offset;
+            for (i = 0; i < 6; i++)
+                ctxdes->pc_deskey[i] = ctx.pc_key[i];
+            for (i = 0; i < 5; i++)
+                ctxdes->pc_hminner[i] = ctx.pc_hminner[i];
+            for (i = 0; i < 5; i++)
+                ctxdes->pc_hmouter[i] = ctx.pc_hmouter[i];   
+            ctxdes->pc_iv[0] = ctx.pc_iv[0];
+            ctxdes->pc_iv[1] = ctx.pc_iv[1];
+        }
+    } else
+    {
+        /* old Broadcom card with fixed small command context structure */
+
+        /*
+         * [3]DES / MD5_HMAC / SHA1_HMAC
+         */
+        struct ubsec_pktctx *ctxs;
+
+        ctxs = (struct ubsec_pktctx *)(dmap->d_alloc.dma_vaddr +
+                    offsetof(struct ubsec_dmachunk, d_ctx));
+ 
+        /* transform generic context into small context */
+        for (i = 0; i < 6; i++)
+            ctxs->pc_deskey[i] = ctx.pc_key[i];
+        for (i = 0; i < 5; i++)
+            ctxs->pc_hminner[i] = ctx.pc_hminner[i];
+        for (i = 0; i < 5; i++)
+            ctxs->pc_hmouter[i] = ctx.pc_hmouter[i];
+        ctxs->pc_iv[0] = ctx.pc_iv[0];
+        ctxs->pc_iv[1] = ctx.pc_iv[1];
+        ctxs->pc_flags = ctx.pc_flags;
+        ctxs->pc_offset = ctx.pc_offset;
+    }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("spin_lock_irqsave\n");
+#endif
+    spin_lock_irqsave(&sc->sc_ringmtx, flags);
+    //spin_lock_irq(&sc->sc_ringmtx);
+
+    BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
+    sc->sc_nqueue++;
+    ubsecstats.hst_ipackets++;
+    ubsecstats.hst_ibytes += stheend;
+    ubsec_feed(sc);
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("spin_unlock_irqrestore\n");
+#endif
+    spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+    //spin_unlock_irq(&sc->sc_ringmtx);
+    
+    return (0);
+
+errout:
+    if (q != NULL) {
+#ifdef NOTYET
+        if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
+            m_freem(q->q_dst_m);
+#endif
+
+        if ((q->q_has_dst == 1) && q->q_dst_len > 0) {
+#if 0
+            bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
+            bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
+#endif
+            dma_unmap(sc, q->q_dst_map, q->q_dst_len);
+        }
+        if (q->q_src_len > 0) {
+#if 0
+            bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
+            bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+#endif
+            dma_unmap(sc, q->q_src_map, q->q_src_len);
+        }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("spin_lock_irqsave\n");
+#endif
+        spin_lock_irqsave(&sc->sc_ringmtx, flags);
+        //spin_lock_irq(&sc->sc_ringmtx);
+
+        BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+
+#ifdef UBSEC_VERBOSE_DEBUG
+       DPRINTF("spin_unlock_irqrestore\n");
+#endif
+        spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+        //spin_unlock_irq(&sc->sc_ringmtx);
+
+    }
+    if (err == EINVAL)
+        ubsecstats.hst_invalid++;
+    else
+        ubsecstats.hst_nomem++;
+errout2:
+    crp->crp_etype = err;
+    crypto_done(crp);
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s() err = %x\n", __FUNCTION__, err);
+#endif
+
+    return (0);
+}
+
+void
+ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
+{
+    struct cryptop *crp = (struct cryptop *)q->q_crp;
+    struct cryptodesc *crd;
+    struct ubsec_dma *dmap = q->q_dma;
+    int ivsize = 8;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    ubsecstats.hst_opackets++;
+    ubsecstats.hst_obytes += dmap->d_alloc.dma_size;
+
+#if 0
+    bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0,
+        dmap->d_alloc.dma_map->dm_mapsize,
+        BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+    if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
+        bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
+            0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+        bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
+        bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
+    }
+    bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
+        0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+    bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
+    bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+#endif
+
+    if ((q->q_has_dst == 1) && q->q_dst_len > 0)
+        dma_unmap(sc, q->q_dst_map, q->q_dst_len);
+
+    dma_unmap(sc, q->q_src_map, q->q_src_len);
+
+#ifdef NOTYET
+    if ((crp->crp_flags & CRYPTO_F_SKBUF) && (q->q_src_m != q->q_dst_m)) {
+        m_freem(q->q_src_m);
+        crp->crp_buf = (caddr_t)q->q_dst_m;
+    }
+#endif
+
+    /* copy out IV for future use */
+    if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) {
+        for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+            if (crd->crd_alg != CRYPTO_DES_CBC &&
+                crd->crd_alg != CRYPTO_3DES_CBC &&
+                crd->crd_alg != CRYPTO_AES_CBC)
+                continue;
+
+            if (crd->crd_alg == CRYPTO_AES_CBC)
+                ivsize = 16;
+            else
+                ivsize = 8;
+
+            if (crp->crp_flags & CRYPTO_F_SKBUF)
+#if 0
+                m_copydata((struct sk_buff *)crp->crp_buf,
+                    crd->crd_skip + crd->crd_len - 8, 8,
+                    (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+#endif
+                crypto_copydata(crp->crp_flags, (caddr_t)crp->crp_buf,
+                    crd->crd_skip + crd->crd_len - ivsize, ivsize,
+                    (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+
+            else if (crp->crp_flags & CRYPTO_F_IOV) {
+#if 0
+                cuio_copydata((struct uio *)crp->crp_buf,
+                    crd->crd_skip + crd->crd_len - 8, 8,
+                    (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+#endif
+                crypto_copydata(crp->crp_flags, (caddr_t)crp->crp_buf,
+                    crd->crd_skip + crd->crd_len - ivsize, ivsize,
+                    (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+                    
+            }
+            break;
+        }
+    }
+
+    for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+        if (crd->crd_alg != CRYPTO_MD5_HMAC &&
+            crd->crd_alg != CRYPTO_SHA1_HMAC)
+            continue;
+#if 0
+        if (crp->crp_flags & CRYPTO_F_SKBUF)
+            m_copyback((struct sk_buff *)crp->crp_buf,
+                crd->crd_inject, 12,
+                dmap->d_dma->d_macbuf);
+#endif
+#if 0
+            /* BUG? it does not honor the mac len.. */
+            crypto_copyback(crp->crp_flags, crp->crp_buf,
+                crd->crd_inject, 12,
+                (caddr_t)dmap->d_dma->d_macbuf);
+#endif
+            crypto_copyback(crp->crp_flags, crp->crp_buf,
+                crd->crd_inject, 
+                sc->sc_sessions[q->q_sesn].ses_mlen,
+                (caddr_t)dmap->d_dma->d_macbuf);
+#if 0
+        else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac)
+            bcopy((caddr_t)dmap->d_dma->d_macbuf,
+                crp->crp_mac, 12);
+#endif
+        break;
+    }
+    BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+    crypto_done(crp);
+}
+
+void
+ubsec_mcopy(struct sk_buff *srcm, struct sk_buff *dstm, int hoffset, int toffset)
+{
+    int i, j, dlen, slen;
+    caddr_t dptr, sptr;
+
+    j = 0;
+    sptr = srcm->data;
+    slen = srcm->len;
+    dptr = dstm->data;
+    dlen = dstm->len;
+
+    while (1) {
+        for (i = 0; i < min(slen, dlen); i++) {
+            if (j < hoffset || j >= toffset)
+                *dptr++ = *sptr++;
+            slen--;
+            dlen--;
+            j++;
+        }
+        if (slen == 0) {
+            srcm = srcm->next;
+            if (srcm == NULL)
+                return;
+            sptr = srcm->data;
+            slen = srcm->len;
+        }
+        if (dlen == 0) {
+            dstm = dstm->next;
+            if (dstm == NULL)
+                return;
+            dptr = dstm->data;
+            dlen = dstm->len;
+        }
+    }
+}
+
+int
+ubsec_dma_malloc(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma, 
+    size_t size, int mapflags)
+{
+    dma->dma_vaddr = dma_alloc_coherent(sc->sc_dv, 
+        size, &dma->dma_paddr, GFP_KERNEL);
+
+    if (likely(dma->dma_vaddr))
+    {
+        dma->dma_size = size;
+        return (0);
+    }
+
+    DPRINTF("could not allocate %d bytes of coherent memory.\n", size);
+
+    return (1);
+}
+
+void
+ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma)
+{
+    dma_free_coherent(sc->sc_dv, dma->dma_size, dma->dma_vaddr, 
+        dma->dma_paddr);
+}
+
+/*
+ * Resets the board.  Values in the regesters are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+void
+ubsec_reset_board(struct ubsec_softc *sc)
+{
+    volatile u_int32_t ctrl;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+    DPRINTF("Send reset signal to chip.\n");
+
+    ctrl = READ_REG(sc, BS_CTRL);
+    ctrl |= BS_CTRL_RESET;
+    WRITE_REG(sc, BS_CTRL, ctrl);
+
+    /*
+     * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us
+     */
+    DELAY(10);
+}
+
+/*
+ * Init Broadcom registers
+ */
+void
+ubsec_init_board(struct ubsec_softc *sc)
+{
+    u_int32_t ctrl;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+    DPRINTF("Initialize chip.\n");
+
+    ctrl = READ_REG(sc, BS_CTRL);
+    ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64);
+    ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT | BS_CTRL_DMAERR;
+
+    WRITE_REG(sc, BS_CTRL, ctrl);
+
+    /* Set chip capabilities (BCM5365P) */
+    sc->sc_flags |= UBS_FLAGS_LONGCTX | UBS_FLAGS_AES;
+}
+
+/*
+ * Clean up after a chip crash.
+ * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
+ */
+void
+ubsec_cleanchip(struct ubsec_softc *sc)
+{
+    struct ubsec_q *q;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+    DPRINTF("Clean up queues after chip crash.\n");
+
+    while (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
+        q = BSD_SIMPLEQ_FIRST(&sc->sc_qchip);
+        BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
+        ubsec_free_q(sc, q);
+    }
+}
+
+/*
+ * free a ubsec_q
+ * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
+ */
+int
+ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q)
+{
+    struct ubsec_q *q2;
+    struct cryptop *crp;
+    int npkts;
+    int i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    npkts = q->q_nstacked_mcrs;
+
+    for (i = 0; i < npkts; i++) {
+        if(q->q_stacked_mcr[i]) {
+            q2 = q->q_stacked_mcr[i];
+
+            if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) 
+#ifdef NOTYET
+                m_freem(q2->q_dst_m);
+#else
+                printk(KERN_ERR "%s,%d: SKB not supported\n", __FILE__, __LINE__);
+#endif
+
+            crp = (struct cryptop *)q2->q_crp;
+            
+            BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next);
+            
+            crp->crp_etype = EFAULT;
+            crypto_done(crp);
+        } else {
+            break;
+        }
+    }
+
+    /*
+     * Free header MCR
+     */
+    if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
+#ifdef NOTYET
+        m_freem(q->q_dst_m);
+#else
+        printk(KERN_ERR "%s,%d: SKB not supported\n", __FILE__, __LINE__);
+#endif
+
+    crp = (struct cryptop *)q->q_crp;
+    
+    BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+    
+    crp->crp_etype = EFAULT;
+    crypto_done(crp);
+    return(0);
+}
+
+/*
+ * Routine to reset the chip and clean up.
+ * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
+ */
+void
+ubsec_totalreset(struct ubsec_softc *sc)
+{
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+    DPRINTF("initiate total chip reset.. \n");
+    ubsec_reset_board(sc);
+    ubsec_init_board(sc);
+    ubsec_cleanchip(sc);
+}
+
+void
+ubsec_dump_pb(struct ubsec_pktbuf *pb)
+{
+    printf("addr 0x%x (0x%x) next 0x%x\n",
+        pb->pb_addr, pb->pb_len, pb->pb_next);
+}
+
+void
+ubsec_dump_mcr(struct ubsec_mcr *mcr)
+{
+    struct ubsec_mcr_add *ma;
+    int i;
+
+    printf("MCR:\n");
+    printf(" pkts: %u, flags 0x%x\n",
+        letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags));
+    ma = (struct ubsec_mcr_add *)&mcr->mcr_cmdctxp;
+    for (i = 0; i < letoh16(mcr->mcr_pkts); i++) {
+        printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i,
+            letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen),
+            letoh16(ma->mcr_reserved));
+        printf(" %d: ipkt ", i);
+        ubsec_dump_pb(&ma->mcr_ipktbuf);
+        printf(" %d: opkt ", i);
+        ubsec_dump_pb(&ma->mcr_opktbuf);
+        ma++;
+    }
+    printf("END MCR\n");
+}
+
+static int __init mod_init(void) {
+        return ssb_driver_register(&ubsec_ssb_driver);
+}
+
+static void __exit mod_exit(void) {
+        ssb_driver_unregister(&ubsec_ssb_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+// Meta information
+MODULE_AUTHOR("Daniel Mueller <daniel@danm.de>");
+MODULE_LICENSE("BSD");
+MODULE_DESCRIPTION("OCF driver for BCM5365P IPSec Core");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
diff --git a/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecreg.h b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecreg.h
new file mode 100644
index 0000000000..dafac5b413
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecreg.h
@@ -0,0 +1,233 @@
+
+/*
+ * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
+ * Copyright (c) 2000 Theo de Raadt
+ * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+/*
+ * Register definitions for 5601 BlueSteel Networks Ubiquitous Broadband
+ * Security "uBSec" chip.  Definitions from revision 2.8 of the product
+ * datasheet.
+ */
+
+#define BS_BAR          0x10    /* DMA base address register */
+#define BS_TRDY_TIMEOUT     0x40    /* TRDY timeout */
+#define BS_RETRY_TIMEOUT    0x41    /* DMA retry timeout */
+
+#define UBS_PCI_RTY_SHIFT           8
+#define UBS_PCI_RTY_MASK            0xff
+#define UBS_PCI_RTY(misc) \
+    (((misc) >> UBS_PCI_RTY_SHIFT) & UBS_PCI_RTY_MASK)
+
+#define UBS_PCI_TOUT_SHIFT          0
+#define UBS_PCI_TOUT_MASK           0xff
+#define UBS_PCI_TOUT(misc) \
+    (((misc) >> PCI_TOUT_SHIFT) & PCI_TOUT_MASK)
+
+/*
+ * DMA Control & Status Registers (offset from BS_BAR)
+ */
+#define BS_MCR1     0x20    /* DMA Master Command Record 1 */
+#define BS_CTRL     0x24    /* DMA Control */
+#define BS_STAT     0x28    /* DMA Status */
+#define BS_ERR      0x2c    /* DMA Error Address */
+#define BS_DEV_ID   0x34    /* IPSec Device ID */
+
+/* BS_CTRL - DMA Control */
+#define BS_CTRL_RESET       0x80000000  /* hardware reset, 5805/5820 */
+#define BS_CTRL_MCR2INT     0x40000000  /* enable intr MCR for MCR2 */
+#define BS_CTRL_MCR1INT     0x20000000  /* enable intr MCR for MCR1 */
+#define BS_CTRL_OFM     0x10000000  /* Output fragment mode */
+#define BS_CTRL_BE32        0x08000000  /* big-endian, 32bit bytes */
+#define BS_CTRL_BE64        0x04000000  /* big-endian, 64bit bytes */
+#define BS_CTRL_DMAERR      0x02000000  /* enable intr DMA error */
+#define BS_CTRL_RNG_M       0x01800000  /* RNG mode */
+#define BS_CTRL_RNG_1       0x00000000  /* 1bit rn/one slow clock */
+#define BS_CTRL_RNG_4       0x00800000  /* 1bit rn/four slow clocks */
+#define BS_CTRL_RNG_8       0x01000000  /* 1bit rn/eight slow clocks */
+#define BS_CTRL_RNG_16      0x01800000  /* 1bit rn/16 slow clocks */
+#define BS_CTRL_SWNORM      0x00400000  /* 582[01], sw normalization */
+#define BS_CTRL_FRAG_M      0x0000ffff  /* output fragment size mask */
+#define BS_CTRL_LITTLE_ENDIAN   (BS_CTRL_BE32 | BS_CTRL_BE64)
+
+/* BS_STAT - DMA Status */
+#define BS_STAT_MCR1_BUSY   0x80000000  /* MCR1 is busy */
+#define BS_STAT_MCR1_FULL   0x40000000  /* MCR1 is full */
+#define BS_STAT_MCR1_DONE   0x20000000  /* MCR1 is done */
+#define BS_STAT_DMAERR      0x10000000  /* DMA error */
+#define BS_STAT_MCR2_FULL   0x08000000  /* MCR2 is full */
+#define BS_STAT_MCR2_DONE   0x04000000  /* MCR2 is done */
+#define BS_STAT_MCR1_ALLEMPTY   0x02000000  /* 5821, MCR1 is empty */
+#define BS_STAT_MCR2_ALLEMPTY   0x01000000  /* 5821, MCR2 is empty */
+
+/* BS_ERR - DMA Error Address */
+#define BS_ERR_ADDR     0xfffffffc  /* error address mask */
+#define BS_ERR_READ     0x00000002  /* fault was on read */
+
+struct ubsec_pktctx {
+    u_int32_t   pc_deskey[6];       /* 3DES key */
+    u_int32_t   pc_hminner[5];      /* hmac inner state */
+    u_int32_t   pc_hmouter[5];      /* hmac outer state */
+    u_int32_t   pc_iv[2];       /* [3]DES iv */
+    u_int16_t   pc_flags;       /* flags, below */
+    u_int16_t   pc_offset;      /* crypto offset */
+} __attribute__ ((packed));
+
+#define UBS_PKTCTX_ENC_3DES 0x8000      /* use 3des */
+#define UBS_PKTCTX_ENC_AES  0x8000      /* use aes */
+#define UBS_PKTCTX_ENC_NONE 0x0000      /* no encryption */
+#define UBS_PKTCTX_INBOUND  0x4000      /* inbound packet */
+#define UBS_PKTCTX_AUTH     0x3000      /* authentication mask */
+#define UBS_PKTCTX_AUTH_NONE    0x0000      /* no authentication */
+#define UBS_PKTCTX_AUTH_MD5 0x1000      /* use hmac-md5 */
+#define UBS_PKTCTX_AUTH_SHA1    0x2000      /* use hmac-sha1 */
+#define UBS_PKTCTX_AES128   0x0         /* AES 128bit keys */
+#define UBS_PKTCTX_AES192   0x100       /* AES 192bit keys */
+#define UBS_PKTCTX_AES256   0x200       /* AES 256bit keys */
+
+struct ubsec_pktctx_des {
+    volatile u_int16_t  pc_len;     /* length of ctx struct */
+    volatile u_int16_t  pc_type;    /* context type */
+    volatile u_int16_t  pc_flags;   /* flags, same as above */
+    volatile u_int16_t  pc_offset;  /* crypto/auth offset */
+    volatile u_int32_t  pc_deskey[6];   /* 3DES key */
+    volatile u_int32_t  pc_iv[2];   /* [3]DES iv */
+    volatile u_int32_t  pc_hminner[5];  /* hmac inner state */
+    volatile u_int32_t  pc_hmouter[5];  /* hmac outer state */
+} __attribute__ ((packed));
+
+struct ubsec_pktctx_aes128 {
+    volatile u_int16_t  pc_len;         /* length of ctx struct */
+    volatile u_int16_t  pc_type;        /* context type */
+    volatile u_int16_t  pc_flags;       /* flags, same as above */
+    volatile u_int16_t  pc_offset;      /* crypto/auth offset */
+    volatile u_int32_t  pc_aeskey[4];   /* AES 128bit key */
+    volatile u_int32_t  pc_iv[4];       /* AES iv */
+    volatile u_int32_t  pc_hminner[5];  /* hmac inner state */
+    volatile u_int32_t  pc_hmouter[5];  /* hmac outer state */
+} __attribute__ ((packed));
+
+struct ubsec_pktctx_aes192 {
+    volatile u_int16_t  pc_len;         /* length of ctx struct */
+    volatile u_int16_t  pc_type;        /* context type */
+    volatile u_int16_t  pc_flags;       /* flags, same as above */
+    volatile u_int16_t  pc_offset;      /* crypto/auth offset */
+    volatile u_int32_t  pc_aeskey[6];   /* AES 192bit key */
+    volatile u_int32_t  pc_iv[4];       /* AES iv */
+    volatile u_int32_t  pc_hminner[5];  /* hmac inner state */
+    volatile u_int32_t  pc_hmouter[5];  /* hmac outer state */
+} __attribute__ ((packed));
+
+struct ubsec_pktctx_aes256 {
+    volatile u_int16_t  pc_len;         /* length of ctx struct */
+    volatile u_int16_t  pc_type;        /* context type */
+    volatile u_int16_t  pc_flags;       /* flags, same as above */
+    volatile u_int16_t  pc_offset;      /* crypto/auth offset */
+    volatile u_int32_t  pc_aeskey[8];   /* AES 256bit key */
+    volatile u_int32_t  pc_iv[4];       /* AES iv */
+    volatile u_int32_t  pc_hminner[5];  /* hmac inner state */
+    volatile u_int32_t  pc_hmouter[5];  /* hmac outer state */
+} __attribute__ ((packed));
+
+#define UBS_PKTCTX_TYPE_IPSEC_DES   0x0000
+#define UBS_PKTCTX_TYPE_IPSEC_AES   0x0040
+
+struct ubsec_pktbuf {
+    volatile u_int32_t  pb_addr;    /* address of buffer start */
+    volatile u_int32_t  pb_next;    /* pointer to next pktbuf */
+    volatile u_int32_t  pb_len;     /* packet length */
+} __attribute__ ((packed));
+#define UBS_PKTBUF_LEN      0x0000ffff  /* length mask */
+
+struct ubsec_mcr {
+    volatile u_int16_t  mcr_pkts;   /* #pkts in this mcr */
+    volatile u_int16_t  mcr_flags;  /* mcr flags (below) */
+    volatile u_int32_t  mcr_cmdctxp;    /* command ctx pointer */
+    struct ubsec_pktbuf mcr_ipktbuf;    /* input chain header */
+    volatile u_int16_t  mcr_reserved;
+    volatile u_int16_t  mcr_pktlen;
+    struct ubsec_pktbuf mcr_opktbuf;    /* output chain header */
+} __attribute__ ((packed));
+
+struct ubsec_mcr_add {
+    volatile u_int32_t  mcr_cmdctxp;    /* command ctx pointer */
+    struct ubsec_pktbuf mcr_ipktbuf;    /* input chain header */
+    volatile u_int16_t  mcr_reserved;
+    volatile u_int16_t  mcr_pktlen;
+    struct ubsec_pktbuf mcr_opktbuf;    /* output chain header */
+} __attribute__ ((packed));
+
+#define UBS_MCR_DONE        0x0001      /* mcr has been processed */
+#define UBS_MCR_ERROR       0x0002      /* error in processing */
+#define UBS_MCR_ERRORCODE   0xff00      /* error type */
+
+struct ubsec_ctx_keyop {
+    volatile u_int16_t  ctx_len;    /* command length */
+    volatile u_int16_t  ctx_op;     /* operation code */
+    volatile u_int8_t   ctx_pad[60];    /* padding */
+} __attribute__ ((packed));
+#define UBS_CTXOP_DHPKGEN   0x01        /* dh public key generation */
+#define UBS_CTXOP_DHSSGEN   0x02        /* dh shared secret gen. */
+#define UBS_CTXOP_RSAPUB    0x03        /* rsa public key op */
+#define UBS_CTXOP_RSAPRIV   0x04        /* rsa private key op */
+#define UBS_CTXOP_DSASIGN   0x05        /* dsa signing op */
+#define UBS_CTXOP_DSAVRFY   0x06        /* dsa verification */
+#define UBS_CTXOP_RNGBYPASS 0x41        /* rng direct test mode */
+#define UBS_CTXOP_RNGSHA1   0x42        /* rng sha1 test mode */
+#define UBS_CTXOP_MODADD    0x43        /* modular addition */
+#define UBS_CTXOP_MODSUB    0x44        /* modular subtraction */
+#define UBS_CTXOP_MODMUL    0x45        /* modular multiplication */
+#define UBS_CTXOP_MODRED    0x46        /* modular reduction */
+#define UBS_CTXOP_MODEXP    0x47        /* modular exponentiation */
+#define UBS_CTXOP_MODINV    0x48        /* modular inverse */
+
+struct ubsec_ctx_rngbypass {
+    volatile u_int16_t  rbp_len;    /* command length, 64 */
+    volatile u_int16_t  rbp_op;     /* rng bypass, 0x41 */
+    volatile u_int8_t   rbp_pad[60];    /* padding */
+} __attribute__ ((packed));
+
+/* modexp: C = (M ^ E) mod N */
+struct ubsec_ctx_modexp {
+    volatile u_int16_t  me_len;     /* command length */
+    volatile u_int16_t  me_op;      /* modexp, 0x47 */
+    volatile u_int16_t  me_E_len;   /* E (bits) */
+    volatile u_int16_t  me_N_len;   /* N (bits) */
+    u_int8_t        me_N[2048/8];   /* N */
+} __attribute__ ((packed));
+
+struct ubsec_ctx_rsapriv {
+    volatile u_int16_t  rpr_len;    /* command length */
+    volatile u_int16_t  rpr_op;     /* rsaprivate, 0x04 */
+    volatile u_int16_t  rpr_q_len;  /* q (bits) */
+    volatile u_int16_t  rpr_p_len;  /* p (bits) */
+    u_int8_t        rpr_buf[5 * 1024 / 8];  /* parameters: */
+                        /* p, q, dp, dq, pinv */
+} __attribute__ ((packed));
diff --git a/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecvar.h b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecvar.h
new file mode 100644
index 0000000000..c808f955b4
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecvar.h
@@ -0,0 +1,228 @@
+
+/*
+ * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
+ * Copyright (c) 2000 Theo de Raadt
+ * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+/* Maximum queue length */
+#ifndef UBS_MAX_NQUEUE
+#define UBS_MAX_NQUEUE      60
+#endif
+
+#define UBS_MAX_SCATTER     64  /* Maximum scatter/gather depth */
+
+#ifndef UBS_MAX_AGGR
+#define UBS_MAX_AGGR        5   /* Maximum aggregation count */
+#endif
+
+#define UBSEC_CARD(sid)     (((sid) & 0xf0000000) >> 28)
+#define UBSEC_SESSION(sid)  ( (sid) & 0x0fffffff)
+#define UBSEC_SID(crd, sesn)    (((crd) << 28) | ((sesn) & 0x0fffffff))
+
+#define UBS_DEF_RTY     0xff    /* PCI Retry Timeout */
+#define UBS_DEF_TOUT        0xff    /* PCI TRDY Timeout */
+#define UBS_DEF_CACHELINE   0x01    /* Cache Line setting */
+
+#define DEFAULT_HMAC_LEN     12
+
+struct ubsec_dma_alloc {
+    dma_addr_t      dma_paddr;
+    void            *dma_vaddr;
+    /*
+    bus_dmamap_t            dma_map;
+    bus_dma_segment_t       dma_seg;
+    */
+    size_t          dma_size;
+    /*
+    int             dma_nseg;
+    */
+};
+
+struct ubsec_q2 {
+    BSD_SIMPLEQ_ENTRY(ubsec_q2)     q_next;
+    struct ubsec_dma_alloc      q_mcr;
+    struct ubsec_dma_alloc      q_ctx;
+    u_int               q_type;
+};
+
+struct ubsec_q2_rng {
+    struct ubsec_q2         rng_q;
+    struct ubsec_dma_alloc      rng_buf;
+    int             rng_used;
+};
+
+/* C = (M ^ E) mod N */
+#define UBS_MODEXP_PAR_M    0
+#define UBS_MODEXP_PAR_E    1
+#define UBS_MODEXP_PAR_N    2
+struct ubsec_q2_modexp {
+    struct ubsec_q2         me_q;
+    struct cryptkop *       me_krp;
+    struct ubsec_dma_alloc      me_M;
+    struct ubsec_dma_alloc      me_E;
+    struct ubsec_dma_alloc      me_C;
+    struct ubsec_dma_alloc      me_epb;
+    int             me_modbits;
+    int             me_shiftbits;
+    int             me_normbits;
+};
+
+#define UBS_RSAPRIV_PAR_P   0
+#define UBS_RSAPRIV_PAR_Q   1
+#define UBS_RSAPRIV_PAR_DP  2
+#define UBS_RSAPRIV_PAR_DQ  3
+#define UBS_RSAPRIV_PAR_PINV    4
+#define UBS_RSAPRIV_PAR_MSGIN   5
+#define UBS_RSAPRIV_PAR_MSGOUT  6
+struct ubsec_q2_rsapriv {
+    struct ubsec_q2         rpr_q;
+    struct cryptkop *       rpr_krp;
+    struct ubsec_dma_alloc      rpr_msgin;
+    struct ubsec_dma_alloc      rpr_msgout;
+};
+
+#define UBSEC_RNG_BUFSIZ    16      /* measured in 32bit words */
+
+struct ubsec_dmachunk {
+    struct ubsec_mcr    d_mcr;
+    struct ubsec_mcr_add    d_mcradd[UBS_MAX_AGGR-1];
+    struct ubsec_pktbuf d_sbuf[UBS_MAX_SCATTER-1];
+    struct ubsec_pktbuf d_dbuf[UBS_MAX_SCATTER-1];
+    u_int32_t       d_macbuf[5];
+    union {
+        struct ubsec_pktctx_aes256 ctxaes256;
+        struct ubsec_pktctx_aes192 ctxaes192;
+        struct ubsec_pktctx_des ctxdes;
+        struct ubsec_pktctx_aes128 ctxaes128;
+        struct ubsec_pktctx     ctx;
+    } d_ctx;
+};
+
+struct ubsec_dma {
+    BSD_SIMPLEQ_ENTRY(ubsec_dma)    d_next;
+    struct ubsec_dmachunk       *d_dma;
+    struct ubsec_dma_alloc      d_alloc;
+};
+
+#define UBS_FLAGS_KEY       0x01        /* has key accelerator */
+#define UBS_FLAGS_LONGCTX   0x02        /* uses long ipsec ctx */
+#define UBS_FLAGS_BIGKEY    0x04        /* 2048bit keys */
+#define UBS_FLAGS_HWNORM    0x08        /* hardware normalization */
+#define UBS_FLAGS_RNG       0x10        /* hardware rng */
+#define UBS_FLAGS_AES       0x20        /* hardware AES support */
+
+struct ubsec_q {
+    BSD_SIMPLEQ_ENTRY(ubsec_q)      q_next;
+    int             q_nstacked_mcrs;
+    struct ubsec_q          *q_stacked_mcr[UBS_MAX_AGGR-1];
+    struct cryptop          *q_crp;
+    struct ubsec_dma        *q_dma;
+
+    //struct mbuf           *q_src_m, *q_dst_m;
+    struct sk_buff      *q_src_m, *q_dst_m;
+    struct uio          *q_src_io, *q_dst_io;
+
+    /*
+    bus_dmamap_t            q_src_map;
+    bus_dmamap_t            q_dst_map;
+    */
+
+    /* DMA addresses for In-/Out packages */
+    int q_src_len;
+    int q_dst_len;
+    struct ubsec_dma_alloc  q_src_map[UBS_MAX_SCATTER];
+    struct ubsec_dma_alloc  q_dst_map[UBS_MAX_SCATTER];
+    int q_has_dst;
+
+    int             q_sesn;
+    int             q_flags;
+};
+
+struct ubsec_softc {
+    softc_device_decl   sc_dev;
+    struct ssb_device   *sdev;      /* device backpointer */
+
+    struct device       *sc_dv;     /* generic device */
+    void                *sc_ih;     /* interrupt handler cookie */
+    int                 sc_flags;   /* device specific flags */
+    u_int32_t           sc_statmask;    /* interrupt status mask */
+    int32_t             sc_cid;     /* crypto tag */
+    BSD_SIMPLEQ_HEAD(,ubsec_q)  sc_queue;   /* packet queue, mcr1 */
+    int                 sc_nqueue;  /* count enqueued, mcr1 */
+    BSD_SIMPLEQ_HEAD(,ubsec_q)  sc_qchip;   /* on chip, mcr1 */
+    BSD_SIMPLEQ_HEAD(,ubsec_q)  sc_freequeue;   /* list of free queue elements */
+    BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_queue2;  /* packet queue, mcr2 */
+    int                 sc_nqueue2; /* count enqueued, mcr2 */
+    BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_qchip2;  /* on chip, mcr2 */
+    int                 sc_nsessions;   /* # of sessions */
+    struct ubsec_session        *sc_sessions;   /* sessions */
+    int                 sc_rnghz;   /* rng poll time */
+    struct ubsec_q2_rng sc_rng;
+    struct ubsec_dma    sc_dmaa[UBS_MAX_NQUEUE];
+    struct ubsec_q      *sc_queuea[UBS_MAX_NQUEUE];
+    BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_q2free;  /* free list */
+    spinlock_t          sc_ringmtx; /* PE ring lock */
+};
+
+#define UBSEC_QFLAGS_COPYOUTIV      0x1
+
+struct ubsec_session {
+    u_int32_t   ses_used;
+    u_int32_t   ses_key[8];         /* 3DES/AES key */
+    u_int32_t   ses_hminner[5];     /* hmac inner state */
+    u_int32_t   ses_hmouter[5];     /* hmac outer state */
+    u_int32_t   ses_iv[4];          /* [3]DES/AES iv */
+    u_int32_t   ses_keysize;        /* AES key size */
+    u_int32_t   ses_mlen;           /* hmac/hash length */
+};
+
+struct ubsec_stats {
+    u_int64_t hst_ibytes;
+    u_int64_t hst_obytes;
+    u_int32_t hst_ipackets;
+    u_int32_t hst_opackets;
+    u_int32_t hst_invalid;
+    u_int32_t hst_nomem;
+    u_int32_t hst_queuefull;
+    u_int32_t hst_dmaerr;
+    u_int32_t hst_mcrerr;
+    u_int32_t hst_nodmafree;
+};
+
+struct ubsec_generic_ctx {
+    u_int32_t   pc_key[8];      /* [3]DES/AES key */
+    u_int32_t   pc_hminner[5];  /* hmac inner state */
+    u_int32_t   pc_hmouter[5];  /* hmac outer state */
+    u_int32_t   pc_iv[4];       /* [3]DES/AES iv */
+    u_int16_t   pc_flags;       /* flags, below */
+    u_int16_t   pc_offset;      /* crypto offset */
+    u_int16_t   pc_type;        /* Cryptographic operation */
+};
+
-- 
cgit v1.2.3