diff options
| author | blogic <blogic@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2012-10-05 10:12:53 +0000 | 
|---|---|---|
| committer | blogic <blogic@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2012-10-05 10:12:53 +0000 | 
| commit | 5c105d9f3fd086aff195d3849dcf847d6b0bd927 (patch) | |
| tree | 1229a11f725bfa58aa7c57a76898553bb5f6654a /target/linux/generic/files/crypto/ocf/cryptocteon | |
| download | openwrt-5c105d9f3fd086aff195d3849dcf847d6b0bd927.tar.gz openwrt-5c105d9f3fd086aff195d3849dcf847d6b0bd927.zip | |
branch Attitude Adjustment
git-svn-id: svn://svn.openwrt.org/openwrt/branches/attitude_adjustment@33625 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'target/linux/generic/files/crypto/ocf/cryptocteon')
4 files changed, 2887 insertions, 0 deletions
| diff --git a/target/linux/generic/files/crypto/ocf/cryptocteon/Makefile b/target/linux/generic/files/crypto/ocf/cryptocteon/Makefile new file mode 100644 index 000000000..eeed0d641 --- /dev/null +++ b/target/linux/generic/files/crypto/ocf/cryptocteon/Makefile @@ -0,0 +1,17 @@ +# for SGlinux builds +-include $(ROOTDIR)/modules/.config + +obj-$(CONFIG_OCF_CRYPTOCTEON) += cryptocteon.o + +obj ?= . +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/ + +ifdef CONFIG_OCF_CRYPTOCTEON +# you need the cavium crypto component installed +EXTRA_CFLAGS += -I$(ROOTDIR)/prop/include +endif + +ifdef TOPDIR +-include $(TOPDIR)/Rules.make +endif + diff --git a/target/linux/generic/files/crypto/ocf/cryptocteon/README.txt b/target/linux/generic/files/crypto/ocf/cryptocteon/README.txt new file mode 100644 index 000000000..807b2e518 --- /dev/null +++ b/target/linux/generic/files/crypto/ocf/cryptocteon/README.txt @@ -0,0 +1,11 @@ + +You will need the CRYPTO package installed to build this driver,  and +potentially the ADK. + +cavium_crypto sourced from: + +	adk/components/source/cavium_ipsec_kame/cavium_ipsec.c + +and significantly modified to suit use with OCF.  All original +copyright/ownership headers retained. + diff --git a/target/linux/generic/files/crypto/ocf/cryptocteon/cavium_crypto.c b/target/linux/generic/files/crypto/ocf/cryptocteon/cavium_crypto.c new file mode 100644 index 000000000..ceaf77c5c --- /dev/null +++ b/target/linux/generic/files/crypto/ocf/cryptocteon/cavium_crypto.c @@ -0,0 +1,2283 @@ +/* + * Copyright (c) 2009 David McCullough <david.mccullough@securecomputing.com> + * + * Copyright (c) 2003-2007 Cavium Networks (support@cavium.com). All rights + * reserved. + *  + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Cavium Networks + * 4. Cavium Networks' name may not be used to endorse or promote products + * derived from this software without specific prior written permission. + *  + * This Software, including technical data, may be subject to U.S. export + * control laws, including the U.S. Export Administration Act and its + * associated regulations, and may be subject to export or import regulations + * in other countries. You warrant that You will comply strictly in all + * respects with all such regulations and acknowledge that you have the + * responsibility to obtain licenses to export, re-export or import the + * Software. + *  + * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" AND + * WITH ALL FAULTS AND CAVIUM MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, + * EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE + * SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR + * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM + * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, + * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF + * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR + * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR + * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. +*/ +/****************************************************************************/ + +#include <linux/scatterlist.h> +#include <asm/octeon/octeon.h> +#include "octeon-asm.h" + +/****************************************************************************/ + +extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *); +extern void octeon_crypto_disable(struct octeon_cop2_state *, unsigned long); + +#define SG_INIT(s, p, i, l) \ +	{ \ +	    (i) = 0; \ +	    (l) = (s)[0].length; \ +	    (p) = (typeof(p)) sg_virt((s)); \ +		CVMX_PREFETCH0((p)); \ +	} + +#define SG_CONSUME(s, p, i, l) \ +	{ \ +		(p)++; \ +		(l) -= sizeof(*(p)); \ +		if ((l) < 0) { \ +			dprintk("%s, %d: l = %d\n", __FILE__, __LINE__, l); \ +		} else if ((l) == 0) { \ +		    (i)++; \ +		    (l) = (s)[0].length; \ +		    (p) = (typeof(p)) sg_virt(s); \ +			CVMX_PREFETCH0((p)); \ +		} \ +	} + +#define ESP_HEADER_LENGTH     8 +#define DES_CBC_IV_LENGTH     8 +#define AES_CBC_IV_LENGTH     16 +#define ESP_HMAC_LEN          12 + +#define ESP_HEADER_LENGTH 8 +#define DES_CBC_IV_LENGTH 8 + +/****************************************************************************/ + +#define CVM_LOAD_SHA_UNIT(dat, next)  { \ +   if (next == 0) {                     \ +      next = 1;                         \ +      CVMX_MT_HSH_DAT (dat, 0);         \ +   } else if (next == 1) {              \ +      next = 2;                         \ +      CVMX_MT_HSH_DAT (dat, 1);         \ +   } else if (next == 2) {              \ +      next = 3;                    \ +      CVMX_MT_HSH_DAT (dat, 2);         \ +   } else if (next == 3) {              \ +      next = 4;                         \ +      CVMX_MT_HSH_DAT (dat, 3);         \ +   } else if (next == 4) {              \ +      next = 5;                           \ +      CVMX_MT_HSH_DAT (dat, 4);         \ +   } else if (next == 5) {              \ +      next = 6;                         \ +      CVMX_MT_HSH_DAT (dat, 5);         \ +   } else if (next == 6) {              \ +      next = 7;                         \ +      CVMX_MT_HSH_DAT (dat, 6);         \ +   } else {                             \ +     CVMX_MT_HSH_STARTSHA (dat);        \ +     next = 0;                          \ +   }                                    \ +} + +#define CVM_LOAD2_SHA_UNIT(dat1, dat2, next)  { \ +   if (next == 0) {                      \ +      CVMX_MT_HSH_DAT (dat1, 0);         \ +      CVMX_MT_HSH_DAT (dat2, 1);         \ +      next = 2;                          \ +   } else if (next == 1) {               \ +      CVMX_MT_HSH_DAT (dat1, 1);         \ +      CVMX_MT_HSH_DAT (dat2, 2);         \ +      next = 3;                          \ +   } else if (next == 2) {               \ +      CVMX_MT_HSH_DAT (dat1, 2);         \ +      CVMX_MT_HSH_DAT (dat2, 3);         \ +      next = 4;                          \ +   } else if (next == 3) {               \ +      CVMX_MT_HSH_DAT (dat1, 3);         \ +      CVMX_MT_HSH_DAT (dat2, 4);         \ +      next = 5;                          \ +   } else if (next == 4) {               \ +      CVMX_MT_HSH_DAT (dat1, 4);         \ +      CVMX_MT_HSH_DAT (dat2, 5);         \ +      next = 6;                          \ +   } else if (next == 5) {               \ +      CVMX_MT_HSH_DAT (dat1, 5);         \ +      CVMX_MT_HSH_DAT (dat2, 6);         \ +      next = 7;                          \ +   } else if (next == 6) {               \ +      CVMX_MT_HSH_DAT (dat1, 6);         \ +      CVMX_MT_HSH_STARTSHA (dat2);       \ +      next = 0;                          \ +   } else {                              \ +     CVMX_MT_HSH_STARTSHA (dat1);        \ +     CVMX_MT_HSH_DAT (dat2, 0);          \ +     next = 1;                           \ +   }                                     \ +} + +/****************************************************************************/ + +#define CVM_LOAD_MD5_UNIT(dat, next)  { \ +   if (next == 0) {                     \ +      next = 1;                         \ +      CVMX_MT_HSH_DAT (dat, 0);         \ +   } else if (next == 1) {              \ +      next = 2;                         \ +      CVMX_MT_HSH_DAT (dat, 1);         \ +   } else if (next == 2) {              \ +      next = 3;                    \ +      CVMX_MT_HSH_DAT (dat, 2);         \ +   } else if (next == 3) {              \ +      next = 4;                         \ +      CVMX_MT_HSH_DAT (dat, 3);         \ +   } else if (next == 4) {              \ +      next = 5;                           \ +      CVMX_MT_HSH_DAT (dat, 4);         \ +   } else if (next == 5) {              \ +      next = 6;                         \ +      CVMX_MT_HSH_DAT (dat, 5);         \ +   } else if (next == 6) {              \ +      next = 7;                         \ +      CVMX_MT_HSH_DAT (dat, 6);         \ +   } else {                             \ +     CVMX_MT_HSH_STARTMD5 (dat);        \ +     next = 0;                          \ +   }                                    \ +} + +#define CVM_LOAD2_MD5_UNIT(dat1, dat2, next)  { \ +   if (next == 0) {                      \ +      CVMX_MT_HSH_DAT (dat1, 0);         \ +      CVMX_MT_HSH_DAT (dat2, 1);         \ +      next = 2;                          \ +   } else if (next == 1) {               \ +      CVMX_MT_HSH_DAT (dat1, 1);         \ +      CVMX_MT_HSH_DAT (dat2, 2);         \ +      next = 3;                          \ +   } else if (next == 2) {               \ +      CVMX_MT_HSH_DAT (dat1, 2);         \ +      CVMX_MT_HSH_DAT (dat2, 3);         \ +      next = 4;                          \ +   } else if (next == 3) {               \ +      CVMX_MT_HSH_DAT (dat1, 3);         \ +      CVMX_MT_HSH_DAT (dat2, 4);         \ +      next = 5;                          \ +   } else if (next == 4) {               \ +      CVMX_MT_HSH_DAT (dat1, 4);         \ +      CVMX_MT_HSH_DAT (dat2, 5);         \ +      next = 6;                          \ +   } else if (next == 5) {               \ +      CVMX_MT_HSH_DAT (dat1, 5);         \ +      CVMX_MT_HSH_DAT (dat2, 6);         \ +      next = 7;                          \ +   } else if (next == 6) {               \ +      CVMX_MT_HSH_DAT (dat1, 6);         \ +      CVMX_MT_HSH_STARTMD5 (dat2);       \ +      next = 0;                          \ +   } else {                              \ +     CVMX_MT_HSH_STARTMD5 (dat1);        \ +     CVMX_MT_HSH_DAT (dat2, 0);          \ +     next = 1;                           \ +   }                                     \ +} + +/****************************************************************************/ + +static inline uint64_t +swap64(uint64_t a) +{ +    return ((a >> 56) | +       (((a >> 48) & 0xfful) << 8) | +       (((a >> 40) & 0xfful) << 16) | +       (((a >> 32) & 0xfful) << 24) | +       (((a >> 24) & 0xfful) << 32) | +       (((a >> 16) & 0xfful) << 40) | +       (((a >> 8) & 0xfful) << 48) | (((a >> 0) & 0xfful) << 56)); +} + +/****************************************************************************/ + +void +octo_calc_hash(__u8 auth, unsigned char *key, uint64_t *inner, uint64_t *outer) +{ +    uint8_t hash_key[64]; +    uint64_t *key1; +    register uint64_t xor1 = 0x3636363636363636ULL; +    register uint64_t xor2 = 0x5c5c5c5c5c5c5c5cULL; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    memset(hash_key, 0, sizeof(hash_key)); +    memcpy(hash_key, (uint8_t *) key, (auth ? 20 : 16)); +    key1 = (uint64_t *) hash_key; +    flags = octeon_crypto_enable(&state); +    if (auth) { +       CVMX_MT_HSH_IV(0x67452301EFCDAB89ULL, 0); +       CVMX_MT_HSH_IV(0x98BADCFE10325476ULL, 1); +       CVMX_MT_HSH_IV(0xC3D2E1F000000000ULL, 2); +    } else { +       CVMX_MT_HSH_IV(0x0123456789ABCDEFULL, 0); +       CVMX_MT_HSH_IV(0xFEDCBA9876543210ULL, 1); +    } + +    CVMX_MT_HSH_DAT((*key1 ^ xor1), 0); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor1), 1); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor1), 2); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor1), 3); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor1), 4); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor1), 5); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor1), 6); +    key1++; +    if (auth) +	CVMX_MT_HSH_STARTSHA((*key1 ^ xor1)); +    else +	CVMX_MT_HSH_STARTMD5((*key1 ^ xor1)); + +    CVMX_MF_HSH_IV(inner[0], 0); +    CVMX_MF_HSH_IV(inner[1], 1); +    if (auth) { +	inner[2] = 0; +	CVMX_MF_HSH_IV(((uint64_t *) inner)[2], 2); +    } + +    memset(hash_key, 0, sizeof(hash_key)); +    memcpy(hash_key, (uint8_t *) key, (auth ? 20 : 16)); +    key1 = (uint64_t *) hash_key; +    if (auth) { +      CVMX_MT_HSH_IV(0x67452301EFCDAB89ULL, 0); +      CVMX_MT_HSH_IV(0x98BADCFE10325476ULL, 1); +      CVMX_MT_HSH_IV(0xC3D2E1F000000000ULL, 2); +    } else { +      CVMX_MT_HSH_IV(0x0123456789ABCDEFULL, 0); +      CVMX_MT_HSH_IV(0xFEDCBA9876543210ULL, 1); +    } + +    CVMX_MT_HSH_DAT((*key1 ^ xor2), 0); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor2), 1); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor2), 2); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor2), 3); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor2), 4); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor2), 5); +    key1++; +    CVMX_MT_HSH_DAT((*key1 ^ xor2), 6); +    key1++; +    if (auth) +       CVMX_MT_HSH_STARTSHA((*key1 ^ xor2)); +    else  +       CVMX_MT_HSH_STARTMD5((*key1 ^ xor2)); + +    CVMX_MF_HSH_IV(outer[0], 0); +    CVMX_MF_HSH_IV(outer[1], 1); +    if (auth) { +      outer[2] = 0; +      CVMX_MF_HSH_IV(outer[2], 2); +    } +    octeon_crypto_disable(&state, flags); +    return; +} + +/****************************************************************************/ +/* DES functions */ + +int +octo_des_cbc_encrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    uint64_t *data; +    int data_i, data_l; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load 3DES Key */ +    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    if (od->octo_encklen == 24) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +    } else if (od->octo_encklen == 8) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } + +    CVMX_MT_3DES_IV(* (uint64_t *) ivp); + +    while (crypt_off > 0) { +	SG_CONSUME(sg, data, data_i, data_l); +	crypt_off -= 8; +    } + +    while (crypt_len > 0) { +	CVMX_MT_3DES_ENC_CBC(*data); +	CVMX_MF_3DES_RESULT(*data); +	SG_CONSUME(sg, data, data_i, data_l); +	crypt_len -= 8; +    } + +    octeon_crypto_disable(&state, flags); +    return 0; +} + + +int +octo_des_cbc_decrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    uint64_t *data; +    int data_i, data_l; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load 3DES Key */ +    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    if (od->octo_encklen == 24) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +    } else if (od->octo_encklen == 8) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } + +    CVMX_MT_3DES_IV(* (uint64_t *) ivp); + +    while (crypt_off > 0) { +	SG_CONSUME(sg, data, data_i, data_l); +	crypt_off -= 8; +    } + +    while (crypt_len > 0) { +	CVMX_MT_3DES_DEC_CBC(*data); +	CVMX_MF_3DES_RESULT(*data); +	SG_CONSUME(sg, data, data_i, data_l); +	crypt_len -= 8; +    } + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +/****************************************************************************/ +/* AES functions */ + +int +octo_aes_cbc_encrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    uint64_t *data, *pdata; +    int data_i, data_l; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load AES Key */ +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1); + +    if (od->octo_encklen == 16) { +	CVMX_MT_AES_KEY(0x0, 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 24) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 32) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } +    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1); + +    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0); +    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1); + +    while (crypt_off > 0) { +	SG_CONSUME(sg, data, data_i, data_l); +	crypt_off -= 8; +    } + +    while (crypt_len > 0) { +	pdata = data; +	CVMX_MT_AES_ENC_CBC0(*data); +	SG_CONSUME(sg, data, data_i, data_l); +	CVMX_MT_AES_ENC_CBC1(*data); +	CVMX_MF_AES_RESULT(*pdata, 0); +	CVMX_MF_AES_RESULT(*data, 1); +	SG_CONSUME(sg, data, data_i, data_l); +	crypt_len -= 16; +    } + +    octeon_crypto_disable(&state, flags); +    return 0; +} + + +int +octo_aes_cbc_decrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    uint64_t *data, *pdata; +    int data_i, data_l; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load AES Key */ +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1); + +    if (od->octo_encklen == 16) { +	CVMX_MT_AES_KEY(0x0, 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 24) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 32) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } +    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1); + +    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0); +    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1); + +    while (crypt_off > 0) { +	SG_CONSUME(sg, data, data_i, data_l); +	crypt_off -= 8; +    } + +    while (crypt_len > 0) { +	pdata = data; +	CVMX_MT_AES_DEC_CBC0(*data); +	SG_CONSUME(sg, data, data_i, data_l); +	CVMX_MT_AES_DEC_CBC1(*data); +	CVMX_MF_AES_RESULT(*pdata, 0); +	CVMX_MF_AES_RESULT(*data, 1); +	SG_CONSUME(sg, data, data_i, data_l); +	crypt_len -= 16; +    } + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +/****************************************************************************/ +/* MD5 */ + +int +octo_null_md5_encrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    register int next = 0; +    uint64_t *data; +    uint64_t tmp1, tmp2; +    int data_i, data_l, alen = auth_len; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || +	    (auth_off & 0x7) || (auth_off + auth_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data, data_i, data_l); + +    flags = octeon_crypto_enable(&state); + +    /* Load MD5 IV */ +    CVMX_MT_HSH_IV(od->octo_hminner[0], 0); +    CVMX_MT_HSH_IV(od->octo_hminner[1], 1); + +    while (auth_off > 0) { +	SG_CONSUME(sg, data, data_i, data_l); +	auth_off -= 8; +    } + +    while (auth_len > 0) { +	CVM_LOAD_MD5_UNIT(*data, next); +	auth_len -= 8; +	SG_CONSUME(sg, data, data_i, data_l); +    } + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_MD5_UNIT(tmp, next); +    } else { +	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +#endif + +    /* Finish Inner hash */ +    while (next != 7) { +	CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next); +    } +    CVMX_ES64(tmp1, ((alen + 64) << 3)); +    CVM_LOAD_MD5_UNIT(tmp1, next); + +    /* Get the inner hash of HMAC */ +    CVMX_MF_HSH_IV(tmp1, 0); +    CVMX_MF_HSH_IV(tmp2, 1); + +    /* Initialize hash unit */ +    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0); +    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1); + +    CVMX_MT_HSH_DAT(tmp1, 0); +    CVMX_MT_HSH_DAT(tmp2, 1); +    CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2); +    CVMX_MT_HSH_DATZ(3); +    CVMX_MT_HSH_DATZ(4); +    CVMX_MT_HSH_DATZ(5); +    CVMX_MT_HSH_DATZ(6); +    CVMX_ES64(tmp1, ((64 + 16) << 3)); +    CVMX_MT_HSH_STARTMD5(tmp1); + +    /* save the HMAC */ +    SG_INIT(sg, data, data_i, data_l); +    while (icv_off > 0) { +	SG_CONSUME(sg, data, data_i, data_l); +	icv_off -= 8; +    } +    CVMX_MF_HSH_IV(*data, 0); +    SG_CONSUME(sg, data, data_i, data_l); +    CVMX_MF_HSH_IV(tmp1, 1); +    *(uint32_t *)data = (uint32_t) (tmp1 >> 32); + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +/****************************************************************************/ +/* SHA1 */ + +int +octo_null_sha1_encrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    register int next = 0; +    uint64_t *data; +    uint64_t tmp1, tmp2, tmp3; +    int data_i, data_l, alen = auth_len; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || +	    (auth_off & 0x7) || (auth_off + auth_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data, data_i, data_l); + +    flags = octeon_crypto_enable(&state); + +    /* Load SHA1 IV */ +    CVMX_MT_HSH_IV(od->octo_hminner[0], 0); +    CVMX_MT_HSH_IV(od->octo_hminner[1], 1); +    CVMX_MT_HSH_IV(od->octo_hminner[2], 2); + +    while (auth_off > 0) { +	SG_CONSUME(sg, data, data_i, data_l); +	auth_off -= 8; +    } + +    while (auth_len > 0) { +	CVM_LOAD_SHA_UNIT(*data, next); +	auth_len -= 8; +	SG_CONSUME(sg, data, data_i, data_l); +    } + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_MD5_UNIT(tmp, next); +    } else { +	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next); +#endif + +    /* Finish Inner hash */ +    while (next != 7) { +	CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next); +    } +    CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next); + +    /* Get the inner hash of HMAC */ +    CVMX_MF_HSH_IV(tmp1, 0); +    CVMX_MF_HSH_IV(tmp2, 1); +    tmp3 = 0; +    CVMX_MF_HSH_IV(tmp3, 2); + +    /* Initialize hash unit */ +    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0); +    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1); +    CVMX_MT_HSH_IV(od->octo_hmouter[2], 2); + +    CVMX_MT_HSH_DAT(tmp1, 0); +    CVMX_MT_HSH_DAT(tmp2, 1); +    tmp3 |= 0x0000000080000000; +    CVMX_MT_HSH_DAT(tmp3, 2); +    CVMX_MT_HSH_DATZ(3); +    CVMX_MT_HSH_DATZ(4); +    CVMX_MT_HSH_DATZ(5); +    CVMX_MT_HSH_DATZ(6); +    CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3)); + +    /* save the HMAC */ +    SG_INIT(sg, data, data_i, data_l); +    while (icv_off > 0) { +	SG_CONSUME(sg, data, data_i, data_l); +	icv_off -= 8; +    } +    CVMX_MF_HSH_IV(*data, 0); +    SG_CONSUME(sg, data, data_i, data_l); +    CVMX_MF_HSH_IV(tmp1, 1); +    *(uint32_t *)data = (uint32_t) (tmp1 >> 32); + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +/****************************************************************************/ +/* DES MD5 */ + +int +octo_des_cbc_md5_encrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    register int next = 0; +    union { +	uint32_t data32[2]; +	uint64_t data64[1]; +    } mydata; +    uint64_t *data = &mydata.data64[0]; +    uint32_t *data32; +    uint64_t tmp1, tmp2; +    int data_i, data_l, alen = auth_len; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) || +	    (crypt_len  & 0x7) || +	    (auth_len  & 0x7) || +	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data32, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load 3DES Key */ +    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    if (od->octo_encklen == 24) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +    } else if (od->octo_encklen == 8) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } + +    CVMX_MT_3DES_IV(* (uint64_t *) ivp); + +    /* Load MD5 IV */ +    CVMX_MT_HSH_IV(od->octo_hminner[0], 0); +    CVMX_MT_HSH_IV(od->octo_hminner[1], 1); + +    while (crypt_off > 0 && auth_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	crypt_off -= 4; +	auth_off -= 4; +    } + +    while (crypt_len > 0 || auth_len > 0) { +    	uint32_t *first = data32; +	mydata.data32[0] = *first; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata.data32[1] = *data32; +    	if (crypt_off <= 0) { +	    if (crypt_len > 0) { +		CVMX_MT_3DES_ENC_CBC(*data); +		CVMX_MF_3DES_RESULT(*data); +		crypt_len -= 8; +	    } +	} else +	    crypt_off -= 8; +    	if (auth_off <= 0) { +	    if (auth_len > 0) { +		CVM_LOAD_MD5_UNIT(*data, next); +		auth_len -= 8; +	    } +	} else +	    auth_off -= 8; +	*first = mydata.data32[0]; +	*data32 = mydata.data32[1]; +	SG_CONSUME(sg, data32, data_i, data_l); +    } + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_MD5_UNIT(tmp, next); +    } else { +	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +#endif + +    /* Finish Inner hash */ +    while (next != 7) { +	CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next); +    } +    CVMX_ES64(tmp1, ((alen + 64) << 3)); +    CVM_LOAD_MD5_UNIT(tmp1, next); + +    /* Get the inner hash of HMAC */ +    CVMX_MF_HSH_IV(tmp1, 0); +    CVMX_MF_HSH_IV(tmp2, 1); + +    /* Initialize hash unit */ +    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0); +    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1); + +    CVMX_MT_HSH_DAT(tmp1, 0); +    CVMX_MT_HSH_DAT(tmp2, 1); +    CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2); +    CVMX_MT_HSH_DATZ(3); +    CVMX_MT_HSH_DATZ(4); +    CVMX_MT_HSH_DATZ(5); +    CVMX_MT_HSH_DATZ(6); +    CVMX_ES64(tmp1, ((64 + 16) << 3)); +    CVMX_MT_HSH_STARTMD5(tmp1); + +    /* save the HMAC */ +    SG_INIT(sg, data32, data_i, data_l); +    while (icv_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	icv_off -= 4; +    } +    CVMX_MF_HSH_IV(tmp1, 0); +    *data32 = (uint32_t) (tmp1 >> 32); +    SG_CONSUME(sg, data32, data_i, data_l); +    *data32 = (uint32_t) tmp1; +    SG_CONSUME(sg, data32, data_i, data_l); +    CVMX_MF_HSH_IV(tmp1, 1); +    *data32 = (uint32_t) (tmp1 >> 32); + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +int +octo_des_cbc_md5_decrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    register int next = 0; +    union { +	uint32_t data32[2]; +	uint64_t data64[1]; +    } mydata; +    uint64_t *data = &mydata.data64[0]; +    uint32_t *data32; +    uint64_t tmp1, tmp2; +    int data_i, data_l, alen = auth_len; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) || +	    (crypt_len  & 0x7) || +	    (auth_len  & 0x7) || +	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data32, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load 3DES Key */ +    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    if (od->octo_encklen == 24) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +    } else if (od->octo_encklen == 8) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } + +    CVMX_MT_3DES_IV(* (uint64_t *) ivp); + +    /* Load MD5 IV */ +    CVMX_MT_HSH_IV(od->octo_hminner[0], 0); +    CVMX_MT_HSH_IV(od->octo_hminner[1], 1); + +    while (crypt_off > 0 && auth_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	crypt_off -= 4; +	auth_off -= 4; +    } + +    while (crypt_len > 0 || auth_len > 0) { +    	uint32_t *first = data32; +	mydata.data32[0] = *first; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata.data32[1] = *data32; +    	if (auth_off <= 0) { +	    if (auth_len > 0) { +		CVM_LOAD_MD5_UNIT(*data, next); +		auth_len -= 8; +	    } +	} else +	    auth_off -= 8; +    	if (crypt_off <= 0) { +	    if (crypt_len > 0) { +		CVMX_MT_3DES_DEC_CBC(*data); +		CVMX_MF_3DES_RESULT(*data); +		crypt_len -= 8; +	    } +	} else +	    crypt_off -= 8; +	*first = mydata.data32[0]; +	*data32 = mydata.data32[1]; +	SG_CONSUME(sg, data32, data_i, data_l); +    } + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_MD5_UNIT(tmp, next); +    } else { +	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +#endif + +    /* Finish Inner hash */ +    while (next != 7) { +	CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next); +    } +    CVMX_ES64(tmp1, ((alen + 64) << 3)); +    CVM_LOAD_MD5_UNIT(tmp1, next); + +    /* Get the inner hash of HMAC */ +    CVMX_MF_HSH_IV(tmp1, 0); +    CVMX_MF_HSH_IV(tmp2, 1); + +    /* Initialize hash unit */ +    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0); +    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1); + +    CVMX_MT_HSH_DAT(tmp1, 0); +    CVMX_MT_HSH_DAT(tmp2, 1); +    CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2); +    CVMX_MT_HSH_DATZ(3); +    CVMX_MT_HSH_DATZ(4); +    CVMX_MT_HSH_DATZ(5); +    CVMX_MT_HSH_DATZ(6); +    CVMX_ES64(tmp1, ((64 + 16) << 3)); +    CVMX_MT_HSH_STARTMD5(tmp1); + +    /* save the HMAC */ +    SG_INIT(sg, data32, data_i, data_l); +    while (icv_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	icv_off -= 4; +    } +    CVMX_MF_HSH_IV(tmp1, 0); +    *data32 = (uint32_t) (tmp1 >> 32); +    SG_CONSUME(sg, data32, data_i, data_l); +    *data32 = (uint32_t) tmp1; +    SG_CONSUME(sg, data32, data_i, data_l); +    CVMX_MF_HSH_IV(tmp1, 1); +    *data32 = (uint32_t) (tmp1 >> 32); + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +/****************************************************************************/ +/* DES SHA */ + +int +octo_des_cbc_sha1_encrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    register int next = 0; +    union { +	uint32_t data32[2]; +	uint64_t data64[1]; +    } mydata; +    uint64_t *data = &mydata.data64[0]; +    uint32_t *data32; +    uint64_t tmp1, tmp2, tmp3; +    int data_i, data_l, alen = auth_len; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) || +	    (crypt_len  & 0x7) || +	    (auth_len  & 0x7) || +	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data32, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load 3DES Key */ +    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    if (od->octo_encklen == 24) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +    } else if (od->octo_encklen == 8) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } + +    CVMX_MT_3DES_IV(* (uint64_t *) ivp); + +    /* Load SHA1 IV */ +    CVMX_MT_HSH_IV(od->octo_hminner[0], 0); +    CVMX_MT_HSH_IV(od->octo_hminner[1], 1); +    CVMX_MT_HSH_IV(od->octo_hminner[2], 2); + +    while (crypt_off > 0 && auth_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	crypt_off -= 4; +	auth_off -= 4; +    } + +    while (crypt_len > 0 || auth_len > 0) { +    	uint32_t *first = data32; +	mydata.data32[0] = *first; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata.data32[1] = *data32; +    	if (crypt_off <= 0) { +	    if (crypt_len > 0) { +		CVMX_MT_3DES_ENC_CBC(*data); +		CVMX_MF_3DES_RESULT(*data); +		crypt_len -= 8; +	    } +	} else +	    crypt_off -= 8; +    	if (auth_off <= 0) { +	    if (auth_len > 0) { +		CVM_LOAD_SHA_UNIT(*data, next); +		auth_len -= 8; +	    } +	} else +	    auth_off -= 8; +	*first = mydata.data32[0]; +	*data32 = mydata.data32[1]; +	SG_CONSUME(sg, data32, data_i, data_l); +    } + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_SHA_UNIT(tmp, next); +    } else { +	CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next); +#endif + +    /* Finish Inner hash */ +    while (next != 7) { +	CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next); +    } +	CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next); + +    /* Get the inner hash of HMAC */ +    CVMX_MF_HSH_IV(tmp1, 0); +    CVMX_MF_HSH_IV(tmp2, 1); +    tmp3 = 0; +    CVMX_MF_HSH_IV(tmp3, 2); + +    /* Initialize hash unit */ +    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0); +    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1); +    CVMX_MT_HSH_IV(od->octo_hmouter[2], 2); + +    CVMX_MT_HSH_DAT(tmp1, 0); +    CVMX_MT_HSH_DAT(tmp2, 1); +    tmp3 |= 0x0000000080000000; +    CVMX_MT_HSH_DAT(tmp3, 2); +    CVMX_MT_HSH_DATZ(3); +    CVMX_MT_HSH_DATZ(4); +    CVMX_MT_HSH_DATZ(5); +    CVMX_MT_HSH_DATZ(6); +    CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3)); + +    /* save the HMAC */ +    SG_INIT(sg, data32, data_i, data_l); +    while (icv_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	icv_off -= 4; +    } +    CVMX_MF_HSH_IV(tmp1, 0); +    *data32 = (uint32_t) (tmp1 >> 32); +    SG_CONSUME(sg, data32, data_i, data_l); +    *data32 = (uint32_t) tmp1; +    SG_CONSUME(sg, data32, data_i, data_l); +    CVMX_MF_HSH_IV(tmp1, 1); +    *data32 = (uint32_t) (tmp1 >> 32); + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +int +octo_des_cbc_sha1_decrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    register int next = 0; +    union { +	uint32_t data32[2]; +	uint64_t data64[1]; +    } mydata; +    uint64_t *data = &mydata.data64[0]; +    uint32_t *data32; +    uint64_t tmp1, tmp2, tmp3; +    int data_i, data_l, alen = auth_len; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) || +	    (crypt_len  & 0x7) || +	    (auth_len  & 0x7) || +	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data32, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load 3DES Key */ +    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    if (od->octo_encklen == 24) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +    } else if (od->octo_encklen == 8) { +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1); +	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } + +    CVMX_MT_3DES_IV(* (uint64_t *) ivp); + +    /* Load SHA1 IV */ +    CVMX_MT_HSH_IV(od->octo_hminner[0], 0); +    CVMX_MT_HSH_IV(od->octo_hminner[1], 1); +    CVMX_MT_HSH_IV(od->octo_hminner[2], 2); + +    while (crypt_off > 0 && auth_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	crypt_off -= 4; +	auth_off -= 4; +    } + +    while (crypt_len > 0 || auth_len > 0) { +    	uint32_t *first = data32; +	mydata.data32[0] = *first; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata.data32[1] = *data32; +    	if (auth_off <= 0) { +	    if (auth_len > 0) { +		CVM_LOAD_SHA_UNIT(*data, next); +		auth_len -= 8; +	    } +	} else +	    auth_off -= 8; +    	if (crypt_off <= 0) { +	    if (crypt_len > 0) { +		CVMX_MT_3DES_DEC_CBC(*data); +		CVMX_MF_3DES_RESULT(*data); +		crypt_len -= 8; +	    } +	} else +	    crypt_off -= 8; +	*first = mydata.data32[0]; +	*data32 = mydata.data32[1]; +	SG_CONSUME(sg, data32, data_i, data_l); +    } + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_SHA_UNIT(tmp, next); +    } else { +	CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next); +#endif + +    /* Finish Inner hash */ +    while (next != 7) { +	CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next); +    } +    CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next); + +    /* Get the inner hash of HMAC */ +    CVMX_MF_HSH_IV(tmp1, 0); +    CVMX_MF_HSH_IV(tmp2, 1); +    tmp3 = 0; +    CVMX_MF_HSH_IV(tmp3, 2); + +    /* Initialize hash unit */ +    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0); +    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1); +    CVMX_MT_HSH_IV(od->octo_hmouter[2], 2); + +    CVMX_MT_HSH_DAT(tmp1, 0); +    CVMX_MT_HSH_DAT(tmp2, 1); +    tmp3 |= 0x0000000080000000; +    CVMX_MT_HSH_DAT(tmp3, 2); +    CVMX_MT_HSH_DATZ(3); +    CVMX_MT_HSH_DATZ(4); +    CVMX_MT_HSH_DATZ(5); +    CVMX_MT_HSH_DATZ(6); +    CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3)); +    /* save the HMAC */ +    SG_INIT(sg, data32, data_i, data_l); +    while (icv_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	icv_off -= 4; +    } +    CVMX_MF_HSH_IV(tmp1, 0); +    *data32 = (uint32_t) (tmp1 >> 32); +    SG_CONSUME(sg, data32, data_i, data_l); +    *data32 = (uint32_t) tmp1; +    SG_CONSUME(sg, data32, data_i, data_l); +    CVMX_MF_HSH_IV(tmp1, 1); +    *data32 = (uint32_t) (tmp1 >> 32); + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +/****************************************************************************/ +/* AES MD5 */ + +int +octo_aes_cbc_md5_encrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    register int next = 0; +    union { +	uint32_t data32[2]; +	uint64_t data64[1]; +    } mydata[2]; +    uint64_t *pdata = &mydata[0].data64[0]; +    uint64_t *data =  &mydata[1].data64[0]; +    uint32_t *data32; +    uint64_t tmp1, tmp2; +    int data_i, data_l, alen = auth_len; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) || +	    (crypt_len  & 0x7) || +	    (auth_len  & 0x7) || +	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data32, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load AES Key */ +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1); + +    if (od->octo_encklen == 16) { +	CVMX_MT_AES_KEY(0x0, 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 24) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 32) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } +    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1); + +    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0); +    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1); + +    /* Load MD5 IV */ +    CVMX_MT_HSH_IV(od->octo_hminner[0], 0); +    CVMX_MT_HSH_IV(od->octo_hminner[1], 1); + +    while (crypt_off > 0 && auth_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	crypt_off -= 4; +	auth_off -= 4; +    } + +    /* align auth and crypt */ +    while (crypt_off > 0 && auth_len > 0) { +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	CVM_LOAD_MD5_UNIT(*pdata, next); +	crypt_off -= 8; +	auth_len -= 8; +    } + +    while (crypt_len > 0) { +    	uint32_t *pdata32[3]; + +	pdata32[0] = data32; +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); + +	pdata32[1] = data32; +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); + +	pdata32[2] = data32; +	mydata[1].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); + +	mydata[1].data32[1] = *data32; + +	CVMX_MT_AES_ENC_CBC0(*pdata); +	CVMX_MT_AES_ENC_CBC1(*data); +	CVMX_MF_AES_RESULT(*pdata, 0); +	CVMX_MF_AES_RESULT(*data, 1); +	crypt_len -= 16; + +	if (auth_len > 0) { +	    CVM_LOAD_MD5_UNIT(*pdata, next); +	    auth_len -= 8; +	} +	if (auth_len > 0) { +	    CVM_LOAD_MD5_UNIT(*data, next); +	    auth_len -= 8; +	} + +	*pdata32[0] = mydata[0].data32[0]; +	*pdata32[1] = mydata[0].data32[1]; +	*pdata32[2] = mydata[1].data32[0]; +	*data32     = mydata[1].data32[1]; + +	SG_CONSUME(sg, data32, data_i, data_l); +    } + +    /* finish any left over hashing */ +    while (auth_len > 0) { +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	CVM_LOAD_MD5_UNIT(*pdata, next); +	auth_len -= 8; +    } + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_MD5_UNIT(tmp, next); +    } else { +	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +#endif + +    /* Finish Inner hash */ +    while (next != 7) { +	CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next); +    } +    CVMX_ES64(tmp1, ((alen + 64) << 3)); +    CVM_LOAD_MD5_UNIT(tmp1, next); + +    /* Get the inner hash of HMAC */ +    CVMX_MF_HSH_IV(tmp1, 0); +    CVMX_MF_HSH_IV(tmp2, 1); + +    /* Initialize hash unit */ +    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0); +    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1); + +    CVMX_MT_HSH_DAT(tmp1, 0); +    CVMX_MT_HSH_DAT(tmp2, 1); +    CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2); +    CVMX_MT_HSH_DATZ(3); +    CVMX_MT_HSH_DATZ(4); +    CVMX_MT_HSH_DATZ(5); +    CVMX_MT_HSH_DATZ(6); +    CVMX_ES64(tmp1, ((64 + 16) << 3)); +    CVMX_MT_HSH_STARTMD5(tmp1); + +    /* save the HMAC */ +    SG_INIT(sg, data32, data_i, data_l); +    while (icv_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	icv_off -= 4; +    } +    CVMX_MF_HSH_IV(tmp1, 0); +    *data32 = (uint32_t) (tmp1 >> 32); +    SG_CONSUME(sg, data32, data_i, data_l); +    *data32 = (uint32_t) tmp1; +    SG_CONSUME(sg, data32, data_i, data_l); +    CVMX_MF_HSH_IV(tmp1, 1); +    *data32 = (uint32_t) (tmp1 >> 32); + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +int +octo_aes_cbc_md5_decrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    register int next = 0; +    union { +	uint32_t data32[2]; +	uint64_t data64[1]; +    } mydata[2]; +    uint64_t *pdata = &mydata[0].data64[0]; +    uint64_t *data =  &mydata[1].data64[0]; +    uint32_t *data32; +    uint64_t tmp1, tmp2; +    int data_i, data_l, alen = auth_len; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s()\n", __FUNCTION__); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) || +	    (crypt_len  & 0x7) || +	    (auth_len  & 0x7) || +	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data32, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load AES Key */ +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1); + +    if (od->octo_encklen == 16) { +	CVMX_MT_AES_KEY(0x0, 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 24) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 32) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } +    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1); + +    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0); +    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1); + +    /* Load MD5 IV */ +    CVMX_MT_HSH_IV(od->octo_hminner[0], 0); +    CVMX_MT_HSH_IV(od->octo_hminner[1], 1); + +    while (crypt_off > 0 && auth_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	crypt_off -= 4; +	auth_off -= 4; +    } + +    /* align auth and crypt */ +    while (crypt_off > 0 && auth_len > 0) { +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	CVM_LOAD_MD5_UNIT(*pdata, next); +	crypt_off -= 8; +	auth_len -= 8; +    } + +    while (crypt_len > 0) { +    	uint32_t *pdata32[3]; + +	pdata32[0] = data32; +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	pdata32[1] = data32; +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	pdata32[2] = data32; +	mydata[1].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[1].data32[1] = *data32; + +	if (auth_len > 0) { +	    CVM_LOAD_MD5_UNIT(*pdata, next); +	    auth_len -= 8; +	} + +	if (auth_len > 0) { +	    CVM_LOAD_MD5_UNIT(*data, next); +	    auth_len -= 8; +	} + +	CVMX_MT_AES_DEC_CBC0(*pdata); +	CVMX_MT_AES_DEC_CBC1(*data); +	CVMX_MF_AES_RESULT(*pdata, 0); +	CVMX_MF_AES_RESULT(*data, 1); +	crypt_len -= 16; + +	*pdata32[0] = mydata[0].data32[0]; +	*pdata32[1] = mydata[0].data32[1]; +	*pdata32[2] = mydata[1].data32[0]; +	*data32     = mydata[1].data32[1]; + +	SG_CONSUME(sg, data32, data_i, data_l); +    } + +    /* finish left over hash if any */ +    while (auth_len > 0) { +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	CVM_LOAD_MD5_UNIT(*pdata, next); +	auth_len -= 8; +    } + + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_MD5_UNIT(tmp, next); +    } else { +	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +#endif + +    /* Finish Inner hash */ +    while (next != 7) { +	CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next); +    } +    CVMX_ES64(tmp1, ((alen + 64) << 3)); +    CVM_LOAD_MD5_UNIT(tmp1, next); + +    /* Get the inner hash of HMAC */ +    CVMX_MF_HSH_IV(tmp1, 0); +    CVMX_MF_HSH_IV(tmp2, 1); + +    /* Initialize hash unit */ +    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0); +    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1); + +    CVMX_MT_HSH_DAT(tmp1, 0); +    CVMX_MT_HSH_DAT(tmp2, 1); +    CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2); +    CVMX_MT_HSH_DATZ(3); +    CVMX_MT_HSH_DATZ(4); +    CVMX_MT_HSH_DATZ(5); +    CVMX_MT_HSH_DATZ(6); +    CVMX_ES64(tmp1, ((64 + 16) << 3)); +    CVMX_MT_HSH_STARTMD5(tmp1); + +    /* save the HMAC */ +    SG_INIT(sg, data32, data_i, data_l); +    while (icv_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	icv_off -= 4; +    } +    CVMX_MF_HSH_IV(tmp1, 0); +    *data32 = (uint32_t) (tmp1 >> 32); +    SG_CONSUME(sg, data32, data_i, data_l); +    *data32 = (uint32_t) tmp1; +    SG_CONSUME(sg, data32, data_i, data_l); +    CVMX_MF_HSH_IV(tmp1, 1); +    *data32 = (uint32_t) (tmp1 >> 32); + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +/****************************************************************************/ +/* AES SHA1 */ + +int +octo_aes_cbc_sha1_encrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    register int next = 0; +    union { +	uint32_t data32[2]; +	uint64_t data64[1]; +    } mydata[2]; +    uint64_t *pdata = &mydata[0].data64[0]; +    uint64_t *data =  &mydata[1].data64[0]; +    uint32_t *data32; +    uint64_t tmp1, tmp2, tmp3; +    int data_i, data_l, alen = auth_len; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s(a_off=%d a_len=%d c_off=%d c_len=%d icv_off=%d)\n", +			__FUNCTION__, auth_off, auth_len, crypt_off, crypt_len, icv_off); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) || +	    (crypt_len  & 0x7) || +	    (auth_len  & 0x7) || +	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data32, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load AES Key */ +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1); + +    if (od->octo_encklen == 16) { +	CVMX_MT_AES_KEY(0x0, 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 24) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 32) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } +    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1); + +    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0); +    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1); + +    /* Load SHA IV */ +    CVMX_MT_HSH_IV(od->octo_hminner[0], 0); +    CVMX_MT_HSH_IV(od->octo_hminner[1], 1); +    CVMX_MT_HSH_IV(od->octo_hminner[2], 2); + +    while (crypt_off > 0 && auth_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	crypt_off -= 4; +	auth_off -= 4; +    } + +    /* align auth and crypt */ +    while (crypt_off > 0 && auth_len > 0) { +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	CVM_LOAD_SHA_UNIT(*pdata, next); +	crypt_off -= 8; +	auth_len -= 8; +    } + +    while (crypt_len > 0) { +    	uint32_t *pdata32[3]; + +	pdata32[0] = data32; +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	pdata32[1] = data32; +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	pdata32[2] = data32; +	mydata[1].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[1].data32[1] = *data32; + +	CVMX_MT_AES_ENC_CBC0(*pdata); +	CVMX_MT_AES_ENC_CBC1(*data); +	CVMX_MF_AES_RESULT(*pdata, 0); +	CVMX_MF_AES_RESULT(*data, 1); +	crypt_len -= 16; + +	if (auth_len > 0) { +	    CVM_LOAD_SHA_UNIT(*pdata, next); +	    auth_len -= 8; +	} +	if (auth_len > 0) { +	    CVM_LOAD_SHA_UNIT(*data, next); +	    auth_len -= 8; +	} + +	*pdata32[0] = mydata[0].data32[0]; +	*pdata32[1] = mydata[0].data32[1]; +	*pdata32[2] = mydata[1].data32[0]; +	*data32     = mydata[1].data32[1]; + +	SG_CONSUME(sg, data32, data_i, data_l); +    } + +    /* finish and hashing */ +    while (auth_len > 0) { +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	CVM_LOAD_SHA_UNIT(*pdata, next); +	auth_len -= 8; +    } + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_SHA_UNIT(tmp, next); +    } else { +	CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next); +#endif + +    /* Finish Inner hash */ +    while (next != 7) { +	CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next); +    } +    CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next); + +    /* Get the inner hash of HMAC */ +    CVMX_MF_HSH_IV(tmp1, 0); +    CVMX_MF_HSH_IV(tmp2, 1); +    tmp3 = 0; +    CVMX_MF_HSH_IV(tmp3, 2); + +    /* Initialize hash unit */ +    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0); +    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1); +    CVMX_MT_HSH_IV(od->octo_hmouter[2], 2); + +    CVMX_MT_HSH_DAT(tmp1, 0); +    CVMX_MT_HSH_DAT(tmp2, 1); +    tmp3 |= 0x0000000080000000; +    CVMX_MT_HSH_DAT(tmp3, 2); +    CVMX_MT_HSH_DATZ(3); +    CVMX_MT_HSH_DATZ(4); +    CVMX_MT_HSH_DATZ(5); +    CVMX_MT_HSH_DATZ(6); +    CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3)); + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_MD5_UNIT(tmp, next); +    } else { +	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +#endif + +    /* save the HMAC */ +    SG_INIT(sg, data32, data_i, data_l); +    while (icv_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	icv_off -= 4; +    } +    CVMX_MF_HSH_IV(tmp1, 0); +    *data32 = (uint32_t) (tmp1 >> 32); +    SG_CONSUME(sg, data32, data_i, data_l); +    *data32 = (uint32_t) tmp1; +    SG_CONSUME(sg, data32, data_i, data_l); +    CVMX_MF_HSH_IV(tmp1, 1); +    *data32 = (uint32_t) (tmp1 >> 32); + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +int +octo_aes_cbc_sha1_decrypt( +    struct octo_sess *od, +    struct scatterlist *sg, int sg_len, +    int auth_off, int auth_len, +    int crypt_off, int crypt_len, +    int icv_off, uint8_t *ivp) +{ +    register int next = 0; +    union { +	uint32_t data32[2]; +	uint64_t data64[1]; +    } mydata[2]; +    uint64_t *pdata = &mydata[0].data64[0]; +    uint64_t *data =  &mydata[1].data64[0]; +    uint32_t *data32; +    uint64_t tmp1, tmp2, tmp3; +    int data_i, data_l, alen = auth_len; +    struct octeon_cop2_state state; +    unsigned long flags; + +    dprintk("%s(a_off=%d a_len=%d c_off=%d c_len=%d icv_off=%d)\n", +			__FUNCTION__, auth_off, auth_len, crypt_off, crypt_len, icv_off); + +    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL || +	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) || +	    (crypt_len  & 0x7) || +	    (auth_len  & 0x7) || +	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) { +	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d " +		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d " +		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len, +		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	return -EINVAL; +    } + +    SG_INIT(sg, data32, data_i, data_l); + +    CVMX_PREFETCH0(ivp); +    CVMX_PREFETCH0(od->octo_enckey); + +    flags = octeon_crypto_enable(&state); + +    /* load AES Key */ +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0); +    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1); + +    if (od->octo_encklen == 16) { +	CVMX_MT_AES_KEY(0x0, 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 24) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(0x0, 3); +    } else if (od->octo_encklen == 32) { +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2); +	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3); +    } else { +	octeon_crypto_disable(&state, flags); +	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen); +	return -EINVAL; +    } +    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1); + +    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0); +    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1); + +    /* Load SHA1 IV */ +    CVMX_MT_HSH_IV(od->octo_hminner[0], 0); +    CVMX_MT_HSH_IV(od->octo_hminner[1], 1); +    CVMX_MT_HSH_IV(od->octo_hminner[2], 2); + +    while (crypt_off > 0 && auth_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	crypt_off -= 4; +	auth_off -= 4; +    } + +    /* align auth and crypt */ +    while (crypt_off > 0 && auth_len > 0) { +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	CVM_LOAD_SHA_UNIT(*pdata, next); +	crypt_off -= 8; +	auth_len -= 8; +    } + +    while (crypt_len > 0) { +    	uint32_t *pdata32[3]; + +	pdata32[0] = data32; +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	pdata32[1] = data32; +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	pdata32[2] = data32; +	mydata[1].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[1].data32[1] = *data32; + +	if (auth_len > 0) { +	    CVM_LOAD_SHA_UNIT(*pdata, next); +	    auth_len -= 8; +	} +	if (auth_len > 0) { +	    CVM_LOAD_SHA_UNIT(*data, next); +	    auth_len -= 8; +	} + +	CVMX_MT_AES_DEC_CBC0(*pdata); +	CVMX_MT_AES_DEC_CBC1(*data); +	CVMX_MF_AES_RESULT(*pdata, 0); +	CVMX_MF_AES_RESULT(*data, 1); +	crypt_len -= 16; + +	*pdata32[0] = mydata[0].data32[0]; +	*pdata32[1] = mydata[0].data32[1]; +	*pdata32[2] = mydata[1].data32[0]; +	*data32     = mydata[1].data32[1]; + +	SG_CONSUME(sg, data32, data_i, data_l); +    } + +    /* finish and leftover hashing */ +    while (auth_len > 0) { +	mydata[0].data32[0] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	mydata[0].data32[1] = *data32; +	SG_CONSUME(sg, data32, data_i, data_l); +	CVM_LOAD_SHA_UNIT(*pdata, next); +	auth_len -= 8; +    } + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_SHA_UNIT(tmp, next); +    } else { +	CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next); +#endif + +    /* Finish Inner hash */ +    while (next != 7) { +	CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next); +    } +	CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next); + +    /* Get the inner hash of HMAC */ +    CVMX_MF_HSH_IV(tmp1, 0); +    CVMX_MF_HSH_IV(tmp2, 1); +    tmp3 = 0; +    CVMX_MF_HSH_IV(tmp3, 2); + +    /* Initialize hash unit */ +    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0); +    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1); +    CVMX_MT_HSH_IV(od->octo_hmouter[2], 2); + +    CVMX_MT_HSH_DAT(tmp1, 0); +    CVMX_MT_HSH_DAT(tmp2, 1); +    tmp3 |= 0x0000000080000000; +    CVMX_MT_HSH_DAT(tmp3, 2); +    CVMX_MT_HSH_DATZ(3); +    CVMX_MT_HSH_DATZ(4); +    CVMX_MT_HSH_DATZ(5); +    CVMX_MT_HSH_DATZ(6); +    CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3)); + +    /* finish the hash */ +    CVMX_PREFETCH0(od->octo_hmouter); +#if 0 +    if (unlikely(inplen)) { +	uint64_t tmp = 0; +	uint8_t *p = (uint8_t *) & tmp; +	p[inplen] = 0x80; +	do { +	    inplen--; +	    p[inplen] = ((uint8_t *) data)[inplen]; +	} while (inplen); +	CVM_LOAD_MD5_UNIT(tmp, next); +    } else { +	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +    } +#else +    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next); +#endif + +    /* save the HMAC */ +    SG_INIT(sg, data32, data_i, data_l); +    while (icv_off > 0) { +	SG_CONSUME(sg, data32, data_i, data_l); +	icv_off -= 4; +    } +    CVMX_MF_HSH_IV(tmp1, 0); +    *data32 = (uint32_t) (tmp1 >> 32); +    SG_CONSUME(sg, data32, data_i, data_l); +    *data32 = (uint32_t) tmp1; +    SG_CONSUME(sg, data32, data_i, data_l); +    CVMX_MF_HSH_IV(tmp1, 1); +    *data32 = (uint32_t) (tmp1 >> 32); + +    octeon_crypto_disable(&state, flags); +    return 0; +} + +/****************************************************************************/ diff --git a/target/linux/generic/files/crypto/ocf/cryptocteon/cryptocteon.c b/target/linux/generic/files/crypto/ocf/cryptocteon/cryptocteon.c new file mode 100644 index 000000000..0168ad321 --- /dev/null +++ b/target/linux/generic/files/crypto/ocf/cryptocteon/cryptocteon.c @@ -0,0 +1,576 @@ +/* + * Octeon Crypto for OCF + * + * Written by David McCullough <david_mccullough@mcafee.com> + * Copyright (C) 2009-2010 David McCullough + * + * LICENSE TERMS + * + * The free distribution and use of this software in both source and binary + * form is allowed (with or without changes) provided that: + * + *   1. distributions of this source code include the above copyright + *      notice, this list of conditions and the following disclaimer; + * + *   2. distributions in binary form include the above copyright + *      notice, this list of conditions and the following disclaimer + *      in the documentation and/or other associated materials; + * + *   3. the copyright holder's name is not used to endorse products + *      built using this software without specific written permission. + * + * DISCLAIMER + * + * This software is provided 'as is' with no explicit or implied warranties + * in respect of its properties, including, but not limited to, correctness + * and/or fitness for purpose. + * --------------------------------------------------------------------------- + */ + +#include <linux/version.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED) +#include <linux/config.h> +#endif +#include <linux/module.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/crypto.h> +#include <linux/mm.h> +#include <linux/skbuff.h> +#include <linux/random.h> +#include <linux/scatterlist.h> + +#include <cryptodev.h> +#include <uio.h> + +struct { +	softc_device_decl	sc_dev; +} octo_softc; + +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) + +struct octo_sess { +	int					 octo_encalg; +	#define MAX_CIPHER_KEYLEN	64 +	char				 octo_enckey[MAX_CIPHER_KEYLEN]; +	int					 octo_encklen; + +	int					 octo_macalg; +	#define MAX_HASH_KEYLEN	64 +	char				 octo_mackey[MAX_HASH_KEYLEN]; +	int					 octo_macklen; +	int					 octo_mackey_set; + +	int					 octo_mlen; +	int					 octo_ivsize; + +	int					(*octo_encrypt)(struct octo_sess *od, +	                      struct scatterlist *sg, int sg_len, +						  int auth_off, int auth_len, +						  int crypt_off, int crypt_len, +						  int icv_off, uint8_t *ivp); +	int					(*octo_decrypt)(struct octo_sess *od, +	                      struct scatterlist *sg, int sg_len, +						  int auth_off, int auth_len, +						  int crypt_off, int crypt_len, +						  int icv_off, uint8_t *ivp); + +	uint64_t			 octo_hminner[3]; +	uint64_t			 octo_hmouter[3]; +}; + +int32_t octo_id = -1; +module_param(octo_id, int, 0444); +MODULE_PARM_DESC(octo_id, "Read-Only OCF ID for cryptocteon driver"); + +static struct octo_sess **octo_sessions = NULL; +static u_int32_t octo_sesnum = 0; + +static	int octo_process(device_t, struct cryptop *, int); +static	int octo_newsession(device_t, u_int32_t *, struct cryptoini *); +static	int octo_freesession(device_t, u_int64_t); + +static device_method_t octo_methods = { +	/* crypto device methods */ +	DEVMETHOD(cryptodev_newsession,	octo_newsession), +	DEVMETHOD(cryptodev_freesession,octo_freesession), +	DEVMETHOD(cryptodev_process,	octo_process), +}; + +#define debug octo_debug +int octo_debug = 0; +module_param(octo_debug, int, 0644); +MODULE_PARM_DESC(octo_debug, "Enable debug"); + + +#include "cavium_crypto.c" + + +/* + * Generate a new octo session.  We artifically limit it to a single + * hash/cipher or hash-cipher combo just to make it easier, most callers + * do not expect more than this anyway. + */ +static int +octo_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) +{ +	struct cryptoini *c, *encini = NULL, *macini = NULL; +	struct octo_sess **ocd; +	int i; + +	dprintk("%s()\n", __FUNCTION__); +	if (sid == NULL || cri == NULL) { +		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__); +		return EINVAL; +	} + +	/* +	 * To keep it simple, we only handle hash, cipher or hash/cipher in a +	 * session,  you cannot currently do multiple ciphers/hashes in one +	 * session even though it would be possibel to code this driver to +	 * handle it. +	 */ +	for (i = 0, c = cri; c && i < 2; i++) { +		if (c->cri_alg == CRYPTO_MD5_HMAC || +				c->cri_alg == CRYPTO_SHA1_HMAC || +				c->cri_alg == CRYPTO_NULL_HMAC) { +			if (macini) { +				break; +			} +			macini = c; +		} +		if (c->cri_alg == CRYPTO_DES_CBC || +				c->cri_alg == CRYPTO_3DES_CBC || +				c->cri_alg == CRYPTO_AES_CBC || +				c->cri_alg == CRYPTO_NULL_CBC) { +			if (encini) { +				break; +			} +			encini = c; +		} +		c = c->cri_next; +	} +	if (!macini && !encini) { +		dprintk("%s,%d - EINVAL bad cipher/hash or combination\n", +				__FILE__, __LINE__); +		return EINVAL; +	} +	if (c) { +		dprintk("%s,%d - EINVAL cannot handle chained cipher/hash combos\n", +				__FILE__, __LINE__); +		return EINVAL; +	} + +	/* +	 * So we have something we can do, lets setup the session +	 */ + +	if (octo_sessions) { +		for (i = 1; i < octo_sesnum; i++) +			if (octo_sessions[i] == NULL) +				break; +	} else +		i = 1;		/* NB: to silence compiler warning */ + +	if (octo_sessions == NULL || i == octo_sesnum) { +		if (octo_sessions == NULL) { +			i = 1; /* We leave octo_sessions[0] empty */ +			octo_sesnum = CRYPTO_SW_SESSIONS; +		} else +			octo_sesnum *= 2; + +		ocd = kmalloc(octo_sesnum * sizeof(struct octo_sess *), SLAB_ATOMIC); +		if (ocd == NULL) { +			/* Reset session number */ +			if (octo_sesnum == CRYPTO_SW_SESSIONS) +				octo_sesnum = 0; +			else +				octo_sesnum /= 2; +			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__); +			return ENOBUFS; +		} +		memset(ocd, 0, octo_sesnum * sizeof(struct octo_sess *)); + +		/* Copy existing sessions */ +		if (octo_sessions) { +			memcpy(ocd, octo_sessions, +			    (octo_sesnum / 2) * sizeof(struct octo_sess *)); +			kfree(octo_sessions); +		} + +		octo_sessions = ocd; +	} + +	ocd = &octo_sessions[i]; +	*sid = i; + + +	*ocd = (struct octo_sess *) kmalloc(sizeof(struct octo_sess), SLAB_ATOMIC); +	if (*ocd == NULL) { +		octo_freesession(NULL, i); +		dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__); +		return ENOBUFS; +	} +	memset(*ocd, 0, sizeof(struct octo_sess)); + +	if (encini && encini->cri_key) { +		(*ocd)->octo_encklen = (encini->cri_klen + 7) / 8; +		memcpy((*ocd)->octo_enckey, encini->cri_key, (*ocd)->octo_encklen); +	} + +	if (macini && macini->cri_key) { +		(*ocd)->octo_macklen = (macini->cri_klen + 7) / 8; +		memcpy((*ocd)->octo_mackey, macini->cri_key, (*ocd)->octo_macklen); +	} + +	(*ocd)->octo_mlen = 0; +	if (encini && encini->cri_mlen) +		(*ocd)->octo_mlen = encini->cri_mlen; +	else if (macini && macini->cri_mlen) +		(*ocd)->octo_mlen = macini->cri_mlen; +	else +		(*ocd)->octo_mlen = 12; + +	/* +	 * point c at the enc if it exists, otherwise the mac +	 */ +	c = encini ? encini : macini; + +	switch (c->cri_alg) { +	case CRYPTO_DES_CBC: +	case CRYPTO_3DES_CBC: +		(*ocd)->octo_ivsize  = 8; +		switch (macini ? macini->cri_alg : -1) { +		case CRYPTO_MD5_HMAC: +			(*ocd)->octo_encrypt = octo_des_cbc_md5_encrypt; +			(*ocd)->octo_decrypt = octo_des_cbc_md5_decrypt; +			octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner, +					(*ocd)->octo_hmouter); +			break; +		case CRYPTO_SHA1_HMAC: +			(*ocd)->octo_encrypt = octo_des_cbc_sha1_encrypt; +			(*ocd)->octo_decrypt = octo_des_cbc_sha1_decrypt; +			octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner, +					(*ocd)->octo_hmouter); +			break; +		case -1: +			(*ocd)->octo_encrypt = octo_des_cbc_encrypt; +			(*ocd)->octo_decrypt = octo_des_cbc_decrypt; +			break; +		default: +			octo_freesession(NULL, i); +			dprintk("%s,%d: EINVALn", __FILE__, __LINE__); +			return EINVAL; +		} +		break; +	case CRYPTO_AES_CBC: +		(*ocd)->octo_ivsize  = 16; +		switch (macini ? macini->cri_alg : -1) { +		case CRYPTO_MD5_HMAC: +			(*ocd)->octo_encrypt = octo_aes_cbc_md5_encrypt; +			(*ocd)->octo_decrypt = octo_aes_cbc_md5_decrypt; +			octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner, +					(*ocd)->octo_hmouter); +			break; +		case CRYPTO_SHA1_HMAC: +			(*ocd)->octo_encrypt = octo_aes_cbc_sha1_encrypt; +			(*ocd)->octo_decrypt = octo_aes_cbc_sha1_decrypt; +			octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner, +					(*ocd)->octo_hmouter); +			break; +		case -1: +			(*ocd)->octo_encrypt = octo_aes_cbc_encrypt; +			(*ocd)->octo_decrypt = octo_aes_cbc_decrypt; +			break; +		default: +			octo_freesession(NULL, i); +			dprintk("%s,%d: EINVALn", __FILE__, __LINE__); +			return EINVAL; +		} +		break; +	case CRYPTO_MD5_HMAC: +		(*ocd)->octo_encrypt = octo_null_md5_encrypt; +		(*ocd)->octo_decrypt = octo_null_md5_encrypt; /* encrypt == decrypt */ +		octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner, +				(*ocd)->octo_hmouter); +		break; +	case CRYPTO_SHA1_HMAC: +		(*ocd)->octo_encrypt = octo_null_sha1_encrypt; +		(*ocd)->octo_decrypt = octo_null_sha1_encrypt; /* encrypt == decrypt */ +		octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner, +				(*ocd)->octo_hmouter); +		break; +	default: +		octo_freesession(NULL, i); +		dprintk("%s,%d: EINVALn", __FILE__, __LINE__); +		return EINVAL; +	} + +	(*ocd)->octo_encalg = encini ? encini->cri_alg : -1; +	(*ocd)->octo_macalg = macini ? macini->cri_alg : -1; + +	return 0; +} + +/* + * Free a session. + */ +static int +octo_freesession(device_t dev, u_int64_t tid) +{ +	u_int32_t sid = CRYPTO_SESID2LID(tid); + +	dprintk("%s()\n", __FUNCTION__); +	if (sid > octo_sesnum || octo_sessions == NULL || +			octo_sessions[sid] == NULL) { +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); +		return(EINVAL); +	} + +	/* Silently accept and return */ +	if (sid == 0) +		return(0); + +	if (octo_sessions[sid]) +		kfree(octo_sessions[sid]); +	octo_sessions[sid] = NULL; +	return 0; +} + +/* + * Process a request. + */ +static int +octo_process(device_t dev, struct cryptop *crp, int hint) +{ +	struct cryptodesc *crd; +	struct octo_sess *od; +	u_int32_t lid; +#define SCATTERLIST_MAX 16 +	struct scatterlist sg[SCATTERLIST_MAX]; +	int sg_num, sg_len; +	struct sk_buff *skb = NULL; +	struct uio *uiop = NULL; +	struct cryptodesc *enccrd = NULL, *maccrd = NULL; +	unsigned char *ivp = NULL; +	unsigned char iv_data[HASH_MAX_LEN]; +	int auth_off = 0, auth_len = 0, crypt_off = 0, crypt_len = 0, icv_off = 0; + +	dprintk("%s()\n", __FUNCTION__); +	/* Sanity check */ +	if (crp == NULL) { +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); +		return EINVAL; +	} + +	crp->crp_etype = 0; + +	if (crp->crp_desc == NULL || crp->crp_buf == NULL) { +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); +		crp->crp_etype = EINVAL; +		goto done; +	} + +	lid = crp->crp_sid & 0xffffffff; +	if (lid >= octo_sesnum || lid == 0 || octo_sessions == NULL || +			octo_sessions[lid] == NULL) { +		crp->crp_etype = ENOENT; +		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__); +		goto done; +	} +	od = octo_sessions[lid]; + +	/* +	 * do some error checking outside of the loop for SKB and IOV processing +	 * this leaves us with valid skb or uiop pointers for later +	 */ +	if (crp->crp_flags & CRYPTO_F_SKBUF) { +		skb = (struct sk_buff *) crp->crp_buf; +		if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) { +			printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__, +					skb_shinfo(skb)->nr_frags); +			goto done; +		} +	} else if (crp->crp_flags & CRYPTO_F_IOV) { +		uiop = (struct uio *) crp->crp_buf; +		if (uiop->uio_iovcnt > SCATTERLIST_MAX) { +			printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__, +					uiop->uio_iovcnt); +			goto done; +		} +	} + +	/* point our enccrd and maccrd appropriately */ +	crd = crp->crp_desc; +	if (crd->crd_alg == od->octo_encalg) enccrd = crd; +	if (crd->crd_alg == od->octo_macalg) maccrd = crd; +	crd = crd->crd_next; +	if (crd) { +		if (crd->crd_alg == od->octo_encalg) enccrd = crd; +		if (crd->crd_alg == od->octo_macalg) maccrd = crd; +		crd = crd->crd_next; +	} +	if (crd) { +		crp->crp_etype = EINVAL; +		dprintk("%s,%d: ENOENT - descriptors do not match session\n", +				__FILE__, __LINE__); +		goto done; +	} + +	if (enccrd) { +		if (enccrd->crd_flags & CRD_F_ENCRYPT) { +			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) +				ivp = enccrd->crd_iv; +			else +				read_random((ivp = iv_data), od->octo_ivsize); +			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) +				crypto_copyback(crp->crp_flags, crp->crp_buf, +						enccrd->crd_inject, od->octo_ivsize, ivp); +		} else { +			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) { +				ivp = enccrd->crd_iv; +			} else { +				ivp = iv_data; +				crypto_copydata(crp->crp_flags, crp->crp_buf, +						enccrd->crd_inject, od->octo_ivsize, (caddr_t) ivp); +			} +		} + +		if (maccrd) { +			auth_off = maccrd->crd_skip; +			auth_len = maccrd->crd_len; +			icv_off  = maccrd->crd_inject; +		} + +		crypt_off = enccrd->crd_skip; +		crypt_len = enccrd->crd_len; +	} else { /* if (maccrd) */ +		auth_off = maccrd->crd_skip; +		auth_len = maccrd->crd_len; +		icv_off  = maccrd->crd_inject; +	} + + +	/* +	 * setup the SG list to cover the buffer +	 */ +	memset(sg, 0, sizeof(sg)); +	if (crp->crp_flags & CRYPTO_F_SKBUF) { +		int i, len; + +		sg_num = 0; +		sg_len = 0; + +		len = skb_headlen(skb); +		sg_set_page(&sg[sg_num], virt_to_page(skb->data), len, +				offset_in_page(skb->data)); +		sg_len += len; +		sg_num++; + +		for (i = 0; i < skb_shinfo(skb)->nr_frags && sg_num < SCATTERLIST_MAX; +				i++) { +			len = skb_shinfo(skb)->frags[i].size; +			sg_set_page(&sg[sg_num], skb_frag_page(&skb_shinfo(skb)->frags[i]), +					len, skb_shinfo(skb)->frags[i].page_offset); +			sg_len += len; +			sg_num++; +		} +	} else if (crp->crp_flags & CRYPTO_F_IOV) { +		int len; + +		sg_len = 0; +		for (sg_num = 0; sg_len < crp->crp_ilen && +				sg_num < uiop->uio_iovcnt && +				sg_num < SCATTERLIST_MAX; sg_num++) { +			len = uiop->uio_iov[sg_num].iov_len; +			sg_set_page(&sg[sg_num], +					virt_to_page(uiop->uio_iov[sg_num].iov_base), len, +					offset_in_page(uiop->uio_iov[sg_num].iov_base)); +			sg_len += len; +		} +	} else { +		sg_len = crp->crp_ilen; +		sg_set_page(&sg[0], virt_to_page(crp->crp_buf), sg_len, +				offset_in_page(crp->crp_buf)); +		sg_num = 1; +	} +	if (sg_num > 0) +		sg_mark_end(&sg[sg_num-1]); + +	/* +	 * setup a new explicit key +	 */ +	if (enccrd) { +		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { +			od->octo_encklen = (enccrd->crd_klen + 7) / 8; +			memcpy(od->octo_enckey, enccrd->crd_key, od->octo_encklen); +		} +	} +	if (maccrd) { +		if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) { +			od->octo_macklen = (maccrd->crd_klen + 7) / 8; +			memcpy(od->octo_mackey, maccrd->crd_key, od->octo_macklen); +			od->octo_mackey_set = 0; +		} +		if (!od->octo_mackey_set) { +			octo_calc_hash(maccrd->crd_alg == CRYPTO_MD5_HMAC ? 0 : 1, +				maccrd->crd_key, od->octo_hminner, od->octo_hmouter); +			od->octo_mackey_set = 1; +		} +	} + + +	if (!enccrd || (enccrd->crd_flags & CRD_F_ENCRYPT)) +		(*od->octo_encrypt)(od, sg, sg_len, +				auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); +	else +		(*od->octo_decrypt)(od, sg, sg_len, +				auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); + +done: +	crypto_done(crp); +	return 0; +} + +static int +cryptocteon_init(void) +{ +	dprintk("%s(%p)\n", __FUNCTION__, cryptocteon_init); + +	softc_device_init(&octo_softc, "cryptocteon", 0, octo_methods); + +	octo_id = crypto_get_driverid(softc_get_device(&octo_softc), +			CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC); +	if (octo_id < 0) { +		printk("Cryptocteon device cannot initialize!"); +		return -ENODEV; +	} + +	crypto_register(octo_id, CRYPTO_MD5_HMAC, 0,0); +	crypto_register(octo_id, CRYPTO_SHA1_HMAC, 0,0); +	//crypto_register(octo_id, CRYPTO_MD5, 0,0); +	//crypto_register(octo_id, CRYPTO_SHA1, 0,0); +	crypto_register(octo_id, CRYPTO_DES_CBC, 0,0); +	crypto_register(octo_id, CRYPTO_3DES_CBC, 0,0); +	crypto_register(octo_id, CRYPTO_AES_CBC, 0,0); + +	return(0); +} + +static void +cryptocteon_exit(void) +{ +	dprintk("%s()\n", __FUNCTION__); +	crypto_unregister_all(octo_id); +	octo_id = -1; +} + +module_init(cryptocteon_init); +module_exit(cryptocteon_exit); + +MODULE_LICENSE("BSD"); +MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>"); +MODULE_DESCRIPTION("Cryptocteon (OCF module for Cavium OCTEON crypto)"); | 
