/* $Id: $ */
/*
* Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
* Copyright (c) 2007 David McCullough (david_mccullough@securecomputing.com)
* Copyright (c) 2000 Jason L. Wright (jason@thought.net)
* Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org)
* Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Effort sponsored in part by the Defense Advanced Research Projects
* Agency (DARPA) and Air Force Research Laboratory, Air Force
* Materiel Command, USAF, under agreement number F30602-01-2-0537.
*
*/
#undef UBSEC_DEBUG
#undef UBSEC_VERBOSE_DEBUG
#ifdef UBSEC_VERBOSE_DEBUG
#define UBSEC_DEBUG
#endif
/*
* uBsec BCM5365 hardware crypto accelerator
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/proc_fs.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/stat.h>
#include <asm/io.h>
#include <linux/ssb/ssb.h>
/*
* BSD queue
*/
#include "bsdqueue.h"
/*
* OCF
*/
#include "cryptodev.h"
#include "uio.h"
#define HMAC_HACK 1
#ifdef HMAC_HACK
#include "hmachack.h"
#include "md5.h"
#include "md5.c"
#include "sha1.h"
#include "sha1.c"
#endif
#include "ubsecreg.h"
#include "ubsecvar.h"
#define DRV_MODULE_NAME "ubsec_ssb"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.02"
#define DRV_MODULE_RELDATE "Feb 21, 2009"
#if 1
#define DPRINTF(a...) \
if (debug) \
{ \
printk(DRV_MODULE_NAME ": " a); \
}
#else
#define DPRINTF(a...)
#endif
/*
* Prototypes
*/
static irqreturn_t ubsec_ssb_isr(int, void *, struct pt_regs *);
static int __devinit ubsec_ssb_probe(struct ssb_device *sdev,
const struct ssb_device_id *ent);
static void __devexit ubsec_ssb_remove(struct ssb_device *sdev);
int ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent,
struct device *self);
static void ubsec_setup_mackey(struct ubsec_session *ses, int algo,
caddr_t key, int klen);
static int dma_map_skb(struct ubsec_softc *sc,
struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen);
static int dma_map_uio(struct ubsec_softc *sc,
struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen);
static void dma_unmap(struct ubsec_softc *sc,
struct ubsec_dma_alloc *q_map, int mlen);
static int ubsec_dmamap_aligned(struct ubsec_softc *sc,
const struct ubsec_dma_alloc *q_map, int mlen);
#ifdef UBSEC_DEBUG
static int proc_read(char *buf, char **start, off_t offset,
int size, int *peof, void *data);
#endif
void ubsec_reset_board(struct ubsec_softc *);
void ubsec_init_board(struct ubsec_softc *);
void ubsec_cleanchip(struct ubsec_softc *);
void ubsec_totalreset(struct ubsec_softc *);
int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *);
static int ubsec_newsession(device_t, u_int32_t *, struct cryptoini *);
static int ubsec_freesession(device_t, u_int64_t);
static int ubsec_process(device_t, struct cryptop *, int);
void ubsec_callback(struct ubsec_softc *, struct ubsec_q *);
void ubsec_feed(struct ubsec_softc *);
void ubsec_mcopy(struct sk_buff *, struct sk_buff *, int, int);
void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *);
int ubsec_dma_malloc(struct ubsec_softc *, struct ubsec_dma_alloc *,
size_t, int);
/* DEBUG crap... */
void ubsec_dump_pb(struct ubsec_pktbuf *);
void ubsec_dump_mcr(struct ubsec_mcr *);
#define READ_REG(sc,r) \
ssb_read32((sc)->sdev, (r));
#define WRITE_REG(sc,r,val) \
ssb_write32((sc)->sdev, (r), (val));
#define READ_REG_SDEV(sdev,r) \
ssb_read32((sdev), (r));
#define WRITE_REG_SDEV(sdev,r,val) \
ssb_write32((sdev), (r), (val));
#define SWAP32(x) (x) = htole32(ntohl((x)))
#define HTOLE32(x) (x) = htole32(x)
#ifdef __LITTLE_ENDIAN
#define letoh16(x) (x)
#define letoh32(x) (x)
#endif
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable debug output");
#define UBSEC_SSB_MAX_CHIPS 1
static struct ubsec_softc *ubsec_chip_idx[UBSEC_SSB_MAX_CHIPS];
static struct ubsec_stats ubsecstats;
#ifdef UBSEC_DEBUG
static struct proc_dir_entry *procdebug;
#endif
static struct ssb_device_id ubsec_ssb_tbl[] = {
/* Broadcom BCM5365P IPSec Core */
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_IPSEC, SSB_ANY_REV),
SSB_DEVTABLE_END
};
static struct ssb_driver ubsec_ssb_driver = {
.name = DRV_MODULE_NAME,
.id_table = ubsec_ssb_tbl,
.probe = ubsec_ssb_probe,
.remove = __devexit_p(ubsec_ssb_remove),
/*
.suspend = ubsec_ssb_suspend,
.resume = ubsec_ssb_resume
*/
};
static device_method_t ubsec_ssb_methods = {
/* crypto device methods */
DEVMETHOD(cryptodev_newsession, ubsec_newsession),
DEVMETHOD(cryptodev_freesession,ubsec_freesession),
DEVMETHOD(cryptodev_process, ubsec_process),
};
#ifdef UBSEC_DEBUG
static int
proc_read(char *buf, char **start, off_t offset,
int size, int *peof, void *data)
{
int i = 0, byteswritten = 0, ret;
unsigned int stat, ctrl;
#ifdef UBSEC_VERBOSE_DEBUG
struct ubsec_q *q;
struct ubsec_dma *dmap;
#endif
while ((i < UBSEC_SSB_MAX_CHIPS) && (ubsec_chip_idx[i] != NULL))
{
struct ubsec_softc *sc = ubsec_chip_idx[i];
stat = READ_REG(sc, BS_STAT);
ctrl = READ_REG(sc, BS_CTRL);
ret = snprintf((buf + byteswritten),
(size - byteswritten) ,
"DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
byteswritten += ret;
#ifdef UBSEC_VERBOSE_DEBUG
printf("DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
/* Dump all queues MCRs */
if (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
BSD_SIMPLEQ_FOREACH(q, &sc->sc_qchip, q_next)
{
dmap = q->q_dma;
ubsec_dump_mcr(&dmap->d_dma->d_mcr);
}
}
#endif
i++;
}
*peof = 1;
return byteswritten;
}
#endif
/*
* map in a given sk_buff
*/
static int
dma_map_skb(struct ubsec_softc *sc, struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen)
{
int i = 0;
dma_addr_t tmp;
#ifdef UBSEC_DEBUG
DPRINTF("%s()\n", __FUNCTION__);
#endif
/*
* We support only a limited number of fragments.
*/
if (unlikely((skb_shinfo(skb)->nr_frags + 1) >= UBS_MAX_SCATTER))
{
printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
return (-ENOMEM);
}
#ifdef UBSEC_VERBOSE_DEBUG
DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, 0, (unsigned int)skb->data, skb_headlen(skb));
#endif
/* first data package */
tmp = dma_map_single(sc->sc_dv,
skb->data,
skb_headlen(skb),
DMA_BIDIRECTIONAL);
q_map[i].dma_paddr = tmp;
q_map[i].dma_vaddr = skb->data;
q_map[i].dma_size = skb_headlen(skb);
if (unlikely(tmp == 0))
{
printk(KERN_ERR "Could not map memory region for dma.\n");
return (-EINVAL);
}
#ifdef UBSEC_VERBOSE_DEBUG
DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, 0, (unsigned int)tmp);
#endif
/* all other data packages */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
#ifdef UBSEC_VERBOSE_DEBUG
DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, i + 1,
(unsigned int)page_address(skb_shinfo(skb)->frags[i].page) +
skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].size);
#endif
tmp = dma_map_single(sc->sc_dv,
page_address(skb_shinfo(skb)->frags[i].page) +
skb_shinfo(skb)->frags[i].page_offset,
skb_shinfo(skb)->frags[i].size,
DMA_BIDIRECTIONAL);
q_map[i + 1].dma_paddr = tmp;
q_map[i + 1].dma_vaddr = (void*)(page_address(skb_shinfo(skb)->frags[i].page) +
skb_shinfo(skb)->frags[i].page_offset);
q_map[i + 1].dma_size = skb_shinfo(skb)->frags[i].size;
if (unlikely(tmp == 0))
{
printk(KERN_ERR "Could not map memory region for dma.\n");
return (-EINVAL);
}
#ifdef UBSEC_VERBOSE_DEBUG
DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, i + 1, (unsigned int)tmp);
#endif
}
*mlen = i + 1;
return(0);
}
/*
* map in a given uio buffer
*/
static int
dma_map_uio(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen)
{
struct iovec *iov = uio->uio_iov;
int n;
dma_addr_t tmp;
#ifdef UBSEC_DEBUG
DPRINTF("%s()\n", __FUNCTION__);
#endif
/*
* We support only a limited number of fragments.
*/
if (unlikely(uio->uio_iovcnt >= UBS_MAX_SCATTER))
{
printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
return (-ENOMEM);
}
for (n = 0; n < uio->uio_iovcnt; n++) {
#ifdef UBSEC_VERBOSE_DEBUG
DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, n, (unsigned int)iov->iov_base, iov->iov_len);
#endif
tmp = dma_map_single(sc->sc_dv,
iov->iov_base,
iov->iov_len,
DMA_BIDIRECTIONAL);
q_map[n].dma_paddr = tmp;
q_map[n].dma_vaddr = iov->iov_base;
q_map[n].dma_size = iov->iov_len;
if (unlikely(tmp == 0))
{
printk(KERN_ERR "Could not map memory region for dma.\n");
return (-EINVAL);
}
#ifdef UBSEC_VERBOSE_DEBUG
DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, n, (unsigned int)tmp);