summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/opencrypto
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2013-10-09 22:42:09 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2013-10-10 09:06:58 +0200
commitbceabc95c1c85d793200446fa85f1ddc6313ea29 (patch)
tree973c8bd8deca9fd69913f2895cc91e0e6114d46c /freebsd/sys/opencrypto
parentAdd FreeBSD sources as a submodule (diff)
downloadrtems-libbsd-bceabc95c1c85d793200446fa85f1ddc6313ea29.tar.bz2
Move files to match FreeBSD layout
Diffstat (limited to 'freebsd/sys/opencrypto')
-rw-r--r--freebsd/sys/opencrypto/cast.c246
-rw-r--r--freebsd/sys/opencrypto/cast.h23
-rw-r--r--freebsd/sys/opencrypto/castsb.h545
-rw-r--r--freebsd/sys/opencrypto/criov.c200
-rw-r--r--freebsd/sys/opencrypto/crypto.c1580
-rw-r--r--freebsd/sys/opencrypto/cryptodev.c1178
-rw-r--r--freebsd/sys/opencrypto/cryptodev.h432
-rw-r--r--freebsd/sys/opencrypto/cryptosoft.c1156
-rw-r--r--freebsd/sys/opencrypto/cryptosoft.h67
-rw-r--r--freebsd/sys/opencrypto/deflate.c267
-rw-r--r--freebsd/sys/opencrypto/deflate.h60
-rw-r--r--freebsd/sys/opencrypto/rmd160.c369
-rw-r--r--freebsd/sys/opencrypto/rmd160.h41
-rw-r--r--freebsd/sys/opencrypto/skipjack.c262
-rw-r--r--freebsd/sys/opencrypto/skipjack.h19
-rw-r--r--freebsd/sys/opencrypto/xform.c815
-rw-r--r--freebsd/sys/opencrypto/xform.h104
17 files changed, 7364 insertions, 0 deletions
diff --git a/freebsd/sys/opencrypto/cast.c b/freebsd/sys/opencrypto/cast.c
new file mode 100644
index 00000000..fde6352c
--- /dev/null
+++ b/freebsd/sys/opencrypto/cast.c
@@ -0,0 +1,246 @@
+#include <freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: cast.c,v 1.2 2000/06/06 06:49:47 deraadt Exp $ */
+/*-
+ * CAST-128 in C
+ * Written by Steve Reid <sreid@sea-to-sky.net>
+ * 100% Public Domain - no warranty
+ * Released 1997.10.11
+ */
+
+#include <freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <freebsd/sys/types.h>
+#include <freebsd/opencrypto/cast.h>
+#include <freebsd/opencrypto/castsb.h>
+
+/* Macros to access 8-bit bytes out of a 32-bit word */
+#define U_INT8_Ta(x) ( (u_int8_t) (x>>24) )
+#define U_INT8_Tb(x) ( (u_int8_t) ((x>>16)&255) )
+#define U_INT8_Tc(x) ( (u_int8_t) ((x>>8)&255) )
+#define U_INT8_Td(x) ( (u_int8_t) ((x)&255) )
+
+/* Circular left shift */
+#define ROL(x, n) ( ((x)<<(n)) | ((x)>>(32-(n))) )
+
+/* CAST-128 uses three different round functions */
+#define F1(l, r, i) \
+ t = ROL(key->xkey[i] + r, key->xkey[i+16]); \
+ l ^= ((cast_sbox1[U_INT8_Ta(t)] ^ cast_sbox2[U_INT8_Tb(t)]) - \
+ cast_sbox3[U_INT8_Tc(t)]) + cast_sbox4[U_INT8_Td(t)];
+#define F2(l, r, i) \
+ t = ROL(key->xkey[i] ^ r, key->xkey[i+16]); \
+ l ^= ((cast_sbox1[U_INT8_Ta(t)] - cast_sbox2[U_INT8_Tb(t)]) + \
+ cast_sbox3[U_INT8_Tc(t)]) ^ cast_sbox4[U_INT8_Td(t)];
+#define F3(l, r, i) \
+ t = ROL(key->xkey[i] - r, key->xkey[i+16]); \
+ l ^= ((cast_sbox1[U_INT8_Ta(t)] + cast_sbox2[U_INT8_Tb(t)]) ^ \
+ cast_sbox3[U_INT8_Tc(t)]) - cast_sbox4[U_INT8_Td(t)];
+
+
+/***** Encryption Function *****/
+
+void cast_encrypt(cast_key* key, u_int8_t* inblock, u_int8_t* outblock)
+{
+u_int32_t t, l, r;
+
+ /* Get inblock into l,r */
+ l = ((u_int32_t)inblock[0] << 24) | ((u_int32_t)inblock[1] << 16) |
+ ((u_int32_t)inblock[2] << 8) | (u_int32_t)inblock[3];
+ r = ((u_int32_t)inblock[4] << 24) | ((u_int32_t)inblock[5] << 16) |
+ ((u_int32_t)inblock[6] << 8) | (u_int32_t)inblock[7];
+ /* Do the work */
+ F1(l, r, 0);
+ F2(r, l, 1);
+ F3(l, r, 2);
+ F1(r, l, 3);
+ F2(l, r, 4);
+ F3(r, l, 5);
+ F1(l, r, 6);
+ F2(r, l, 7);
+ F3(l, r, 8);
+ F1(r, l, 9);
+ F2(l, r, 10);
+ F3(r, l, 11);
+ /* Only do full 16 rounds if key length > 80 bits */
+ if (key->rounds > 12) {
+ F1(l, r, 12);
+ F2(r, l, 13);
+ F3(l, r, 14);
+ F1(r, l, 15);
+ }
+ /* Put l,r into outblock */
+ outblock[0] = U_INT8_Ta(r);
+ outblock[1] = U_INT8_Tb(r);
+ outblock[2] = U_INT8_Tc(r);
+ outblock[3] = U_INT8_Td(r);
+ outblock[4] = U_INT8_Ta(l);
+ outblock[5] = U_INT8_Tb(l);
+ outblock[6] = U_INT8_Tc(l);
+ outblock[7] = U_INT8_Td(l);
+ /* Wipe clean */
+ t = l = r = 0;
+}
+
+
+/***** Decryption Function *****/
+
+void cast_decrypt(cast_key* key, u_int8_t* inblock, u_int8_t* outblock)
+{
+u_int32_t t, l, r;
+
+ /* Get inblock into l,r */
+ r = ((u_int32_t)inblock[0] << 24) | ((u_int32_t)inblock[1] << 16) |
+ ((u_int32_t)inblock[2] << 8) | (u_int32_t)inblock[3];
+ l = ((u_int32_t)inblock[4] << 24) | ((u_int32_t)inblock[5] << 16) |
+ ((u_int32_t)inblock[6] << 8) | (u_int32_t)inblock[7];
+ /* Do the work */
+ /* Only do full 16 rounds if key length > 80 bits */
+ if (key->rounds > 12) {
+ F1(r, l, 15);
+ F3(l, r, 14);
+ F2(r, l, 13);
+ F1(l, r, 12);
+ }
+ F3(r, l, 11);
+ F2(l, r, 10);
+ F1(r, l, 9);
+ F3(l, r, 8);
+ F2(r, l, 7);
+ F1(l, r, 6);
+ F3(r, l, 5);
+ F2(l, r, 4);
+ F1(r, l, 3);
+ F3(l, r, 2);
+ F2(r, l, 1);
+ F1(l, r, 0);
+ /* Put l,r into outblock */
+ outblock[0] = U_INT8_Ta(l);
+ outblock[1] = U_INT8_Tb(l);
+ outblock[2] = U_INT8_Tc(l);
+ outblock[3] = U_INT8_Td(l);
+ outblock[4] = U_INT8_Ta(r);
+ outblock[5] = U_INT8_Tb(r);
+ outblock[6] = U_INT8_Tc(r);
+ outblock[7] = U_INT8_Td(r);
+ /* Wipe clean */
+ t = l = r = 0;
+}
+
+
+/***** Key Schedual *****/
+
+void cast_setkey(cast_key* key, u_int8_t* rawkey, int keybytes)
+{
+u_int32_t t[4] = {0, 0, 0, 0}, z[4] = {0, 0, 0, 0}, x[4];
+int i;
+
+ /* Set number of rounds to 12 or 16, depending on key length */
+ key->rounds = (keybytes <= 10 ? 12 : 16);
+
+ /* Copy key to workspace x */
+ for (i = 0; i < 4; i++) {
+ x[i] = 0;
+ if ((i*4+0) < keybytes) x[i] = (u_int32_t)rawkey[i*4+0] << 24;
+ if ((i*4+1) < keybytes) x[i] |= (u_int32_t)rawkey[i*4+1] << 16;
+ if ((i*4+2) < keybytes) x[i] |= (u_int32_t)rawkey[i*4+2] << 8;
+ if ((i*4+3) < keybytes) x[i] |= (u_int32_t)rawkey[i*4+3];
+ }
+ /* Generate 32 subkeys, four at a time */
+ for (i = 0; i < 32; i+=4) {
+ switch (i & 4) {
+ case 0:
+ t[0] = z[0] = x[0] ^ cast_sbox5[U_INT8_Tb(x[3])] ^
+ cast_sbox6[U_INT8_Td(x[3])] ^ cast_sbox7[U_INT8_Ta(x[3])] ^
+ cast_sbox8[U_INT8_Tc(x[3])] ^ cast_sbox7[U_INT8_Ta(x[2])];
+ t[1] = z[1] = x[2] ^ cast_sbox5[U_INT8_Ta(z[0])] ^
+ cast_sbox6[U_INT8_Tc(z[0])] ^ cast_sbox7[U_INT8_Tb(z[0])] ^
+ cast_sbox8[U_INT8_Td(z[0])] ^ cast_sbox8[U_INT8_Tc(x[2])];
+ t[2] = z[2] = x[3] ^ cast_sbox5[U_INT8_Td(z[1])] ^
+ cast_sbox6[U_INT8_Tc(z[1])] ^ cast_sbox7[U_INT8_Tb(z[1])] ^
+ cast_sbox8[U_INT8_Ta(z[1])] ^ cast_sbox5[U_INT8_Tb(x[2])];
+ t[3] = z[3] = x[1] ^ cast_sbox5[U_INT8_Tc(z[2])] ^
+ cast_sbox6[U_INT8_Tb(z[2])] ^ cast_sbox7[U_INT8_Td(z[2])] ^
+ cast_sbox8[U_INT8_Ta(z[2])] ^ cast_sbox6[U_INT8_Td(x[2])];
+ break;
+ case 4:
+ t[0] = x[0] = z[2] ^ cast_sbox5[U_INT8_Tb(z[1])] ^
+ cast_sbox6[U_INT8_Td(z[1])] ^ cast_sbox7[U_INT8_Ta(z[1])] ^
+ cast_sbox8[U_INT8_Tc(z[1])] ^ cast_sbox7[U_INT8_Ta(z[0])];
+ t[1] = x[1] = z[0] ^ cast_sbox5[U_INT8_Ta(x[0])] ^
+ cast_sbox6[U_INT8_Tc(x[0])] ^ cast_sbox7[U_INT8_Tb(x[0])] ^
+ cast_sbox8[U_INT8_Td(x[0])] ^ cast_sbox8[U_INT8_Tc(z[0])];
+ t[2] = x[2] = z[1] ^ cast_sbox5[U_INT8_Td(x[1])] ^
+ cast_sbox6[U_INT8_Tc(x[1])] ^ cast_sbox7[U_INT8_Tb(x[1])] ^
+ cast_sbox8[U_INT8_Ta(x[1])] ^ cast_sbox5[U_INT8_Tb(z[0])];
+ t[3] = x[3] = z[3] ^ cast_sbox5[U_INT8_Tc(x[2])] ^
+ cast_sbox6[U_INT8_Tb(x[2])] ^ cast_sbox7[U_INT8_Td(x[2])] ^
+ cast_sbox8[U_INT8_Ta(x[2])] ^ cast_sbox6[U_INT8_Td(z[0])];
+ break;
+ }
+ switch (i & 12) {
+ case 0:
+ case 12:
+ key->xkey[i+0] = cast_sbox5[U_INT8_Ta(t[2])] ^ cast_sbox6[U_INT8_Tb(t[2])] ^
+ cast_sbox7[U_INT8_Td(t[1])] ^ cast_sbox8[U_INT8_Tc(t[1])];
+ key->xkey[i+1] = cast_sbox5[U_INT8_Tc(t[2])] ^ cast_sbox6[U_INT8_Td(t[2])] ^
+ cast_sbox7[U_INT8_Tb(t[1])] ^ cast_sbox8[U_INT8_Ta(t[1])];
+ key->xkey[i+2] = cast_sbox5[U_INT8_Ta(t[3])] ^ cast_sbox6[U_INT8_Tb(t[3])] ^
+ cast_sbox7[U_INT8_Td(t[0])] ^ cast_sbox8[U_INT8_Tc(t[0])];
+ key->xkey[i+3] = cast_sbox5[U_INT8_Tc(t[3])] ^ cast_sbox6[U_INT8_Td(t[3])] ^
+ cast_sbox7[U_INT8_Tb(t[0])] ^ cast_sbox8[U_INT8_Ta(t[0])];
+ break;
+ case 4:
+ case 8:
+ key->xkey[i+0] = cast_sbox5[U_INT8_Td(t[0])] ^ cast_sbox6[U_INT8_Tc(t[0])] ^
+ cast_sbox7[U_INT8_Ta(t[3])] ^ cast_sbox8[U_INT8_Tb(t[3])];
+ key->xkey[i+1] = cast_sbox5[U_INT8_Tb(t[0])] ^ cast_sbox6[U_INT8_Ta(t[0])] ^
+ cast_sbox7[U_INT8_Tc(t[3])] ^ cast_sbox8[U_INT8_Td(t[3])];
+ key->xkey[i+2] = cast_sbox5[U_INT8_Td(t[1])] ^ cast_sbox6[U_INT8_Tc(t[1])] ^
+ cast_sbox7[U_INT8_Ta(t[2])] ^ cast_sbox8[U_INT8_Tb(t[2])];
+ key->xkey[i+3] = cast_sbox5[U_INT8_Tb(t[1])] ^ cast_sbox6[U_INT8_Ta(t[1])] ^
+ cast_sbox7[U_INT8_Tc(t[2])] ^ cast_sbox8[U_INT8_Td(t[2])];
+ break;
+ }
+ switch (i & 12) {
+ case 0:
+ key->xkey[i+0] ^= cast_sbox5[U_INT8_Tc(z[0])];
+ key->xkey[i+1] ^= cast_sbox6[U_INT8_Tc(z[1])];
+ key->xkey[i+2] ^= cast_sbox7[U_INT8_Tb(z[2])];
+ key->xkey[i+3] ^= cast_sbox8[U_INT8_Ta(z[3])];
+ break;
+ case 4:
+ key->xkey[i+0] ^= cast_sbox5[U_INT8_Ta(x[2])];
+ key->xkey[i+1] ^= cast_sbox6[U_INT8_Tb(x[3])];
+ key->xkey[i+2] ^= cast_sbox7[U_INT8_Td(x[0])];
+ key->xkey[i+3] ^= cast_sbox8[U_INT8_Td(x[1])];
+ break;
+ case 8:
+ key->xkey[i+0] ^= cast_sbox5[U_INT8_Tb(z[2])];
+ key->xkey[i+1] ^= cast_sbox6[U_INT8_Ta(z[3])];
+ key->xkey[i+2] ^= cast_sbox7[U_INT8_Tc(z[0])];
+ key->xkey[i+3] ^= cast_sbox8[U_INT8_Tc(z[1])];
+ break;
+ case 12:
+ key->xkey[i+0] ^= cast_sbox5[U_INT8_Td(x[0])];
+ key->xkey[i+1] ^= cast_sbox6[U_INT8_Td(x[1])];
+ key->xkey[i+2] ^= cast_sbox7[U_INT8_Ta(x[2])];
+ key->xkey[i+3] ^= cast_sbox8[U_INT8_Tb(x[3])];
+ break;
+ }
+ if (i >= 16) {
+ key->xkey[i+0] &= 31;
+ key->xkey[i+1] &= 31;
+ key->xkey[i+2] &= 31;
+ key->xkey[i+3] &= 31;
+ }
+ }
+ /* Wipe clean */
+ for (i = 0; i < 4; i++) {
+ t[i] = x[i] = z[i] = 0;
+ }
+}
+
+/* Made in Canada */
+
diff --git a/freebsd/sys/opencrypto/cast.h b/freebsd/sys/opencrypto/cast.h
new file mode 100644
index 00000000..25d6c763
--- /dev/null
+++ b/freebsd/sys/opencrypto/cast.h
@@ -0,0 +1,23 @@
+/* $FreeBSD$ */
+/* $OpenBSD: cast.h,v 1.2 2002/03/14 01:26:51 millert Exp $ */
+
+/*-
+ * CAST-128 in C
+ * Written by Steve Reid <sreid@sea-to-sky.net>
+ * 100% Public Domain - no warranty
+ * Released 1997.10.11
+ */
+
+#ifndef _CAST_HH_
+#define _CAST_HH_
+
+typedef struct {
+ u_int32_t xkey[32]; /* Key, after expansion */
+ int rounds; /* Number of rounds to use, 12 or 16 */
+} cast_key;
+
+void cast_setkey(cast_key * key, u_int8_t * rawkey, int keybytes);
+void cast_encrypt(cast_key * key, u_int8_t * inblock, u_int8_t * outblock);
+void cast_decrypt(cast_key * key, u_int8_t * inblock, u_int8_t * outblock);
+
+#endif /* ifndef _CAST_HH_ */
diff --git a/freebsd/sys/opencrypto/castsb.h b/freebsd/sys/opencrypto/castsb.h
new file mode 100644
index 00000000..ed13058c
--- /dev/null
+++ b/freebsd/sys/opencrypto/castsb.h
@@ -0,0 +1,545 @@
+/* $FreeBSD$ */
+/* $OpenBSD: castsb.h,v 1.1 2000/02/28 23:13:04 deraadt Exp $ */
+/*-
+ * CAST-128 in C
+ * Written by Steve Reid <sreid@sea-to-sky.net>
+ * 100% Public Domain - no warranty
+ * Released 1997.10.11
+ */
+
+static const u_int32_t cast_sbox1[256] = {
+ 0x30FB40D4, 0x9FA0FF0B, 0x6BECCD2F, 0x3F258C7A,
+ 0x1E213F2F, 0x9C004DD3, 0x6003E540, 0xCF9FC949,
+ 0xBFD4AF27, 0x88BBBDB5, 0xE2034090, 0x98D09675,
+ 0x6E63A0E0, 0x15C361D2, 0xC2E7661D, 0x22D4FF8E,
+ 0x28683B6F, 0xC07FD059, 0xFF2379C8, 0x775F50E2,
+ 0x43C340D3, 0xDF2F8656, 0x887CA41A, 0xA2D2BD2D,
+ 0xA1C9E0D6, 0x346C4819, 0x61B76D87, 0x22540F2F,
+ 0x2ABE32E1, 0xAA54166B, 0x22568E3A, 0xA2D341D0,
+ 0x66DB40C8, 0xA784392F, 0x004DFF2F, 0x2DB9D2DE,
+ 0x97943FAC, 0x4A97C1D8, 0x527644B7, 0xB5F437A7,
+ 0xB82CBAEF, 0xD751D159, 0x6FF7F0ED, 0x5A097A1F,
+ 0x827B68D0, 0x90ECF52E, 0x22B0C054, 0xBC8E5935,
+ 0x4B6D2F7F, 0x50BB64A2, 0xD2664910, 0xBEE5812D,
+ 0xB7332290, 0xE93B159F, 0xB48EE411, 0x4BFF345D,
+ 0xFD45C240, 0xAD31973F, 0xC4F6D02E, 0x55FC8165,
+ 0xD5B1CAAD, 0xA1AC2DAE, 0xA2D4B76D, 0xC19B0C50,
+ 0x882240F2, 0x0C6E4F38, 0xA4E4BFD7, 0x4F5BA272,
+ 0x564C1D2F, 0xC59C5319, 0xB949E354, 0xB04669FE,
+ 0xB1B6AB8A, 0xC71358DD, 0x6385C545, 0x110F935D,
+ 0x57538AD5, 0x6A390493, 0xE63D37E0, 0x2A54F6B3,
+ 0x3A787D5F, 0x6276A0B5, 0x19A6FCDF, 0x7A42206A,
+ 0x29F9D4D5, 0xF61B1891, 0xBB72275E, 0xAA508167,
+ 0x38901091, 0xC6B505EB, 0x84C7CB8C, 0x2AD75A0F,
+ 0x874A1427, 0xA2D1936B, 0x2AD286AF, 0xAA56D291,
+ 0xD7894360, 0x425C750D, 0x93B39E26, 0x187184C9,
+ 0x6C00B32D, 0x73E2BB14, 0xA0BEBC3C, 0x54623779,
+ 0x64459EAB, 0x3F328B82, 0x7718CF82, 0x59A2CEA6,
+ 0x04EE002E, 0x89FE78E6, 0x3FAB0950, 0x325FF6C2,
+ 0x81383F05, 0x6963C5C8, 0x76CB5AD6, 0xD49974C9,
+ 0xCA180DCF, 0x380782D5, 0xC7FA5CF6, 0x8AC31511,
+ 0x35E79E13, 0x47DA91D0, 0xF40F9086, 0xA7E2419E,
+ 0x31366241, 0x051EF495, 0xAA573B04, 0x4A805D8D,
+ 0x548300D0, 0x00322A3C, 0xBF64CDDF, 0xBA57A68E,
+ 0x75C6372B, 0x50AFD341, 0xA7C13275, 0x915A0BF5,
+ 0x6B54BFAB, 0x2B0B1426, 0xAB4CC9D7, 0x449CCD82,
+ 0xF7FBF265, 0xAB85C5F3, 0x1B55DB94, 0xAAD4E324,
+ 0xCFA4BD3F, 0x2DEAA3E2, 0x9E204D02, 0xC8BD25AC,
+ 0xEADF55B3, 0xD5BD9E98, 0xE31231B2, 0x2AD5AD6C,
+ 0x954329DE, 0xADBE4528, 0xD8710F69, 0xAA51C90F,
+ 0xAA786BF6, 0x22513F1E, 0xAA51A79B, 0x2AD344CC,
+ 0x7B5A41F0, 0xD37CFBAD, 0x1B069505, 0x41ECE491,
+ 0xB4C332E6, 0x032268D4, 0xC9600ACC, 0xCE387E6D,
+ 0xBF6BB16C, 0x6A70FB78, 0x0D03D9C9, 0xD4DF39DE,
+ 0xE01063DA, 0x4736F464, 0x5AD328D8, 0xB347CC96,
+ 0x75BB0FC3, 0x98511BFB, 0x4FFBCC35, 0xB58BCF6A,
+ 0xE11F0ABC, 0xBFC5FE4A, 0xA70AEC10, 0xAC39570A,
+ 0x3F04442F, 0x6188B153, 0xE0397A2E, 0x5727CB79,
+ 0x9CEB418F, 0x1CACD68D, 0x2AD37C96, 0x0175CB9D,
+ 0xC69DFF09, 0xC75B65F0, 0xD9DB40D8, 0xEC0E7779,
+ 0x4744EAD4, 0xB11C3274, 0xDD24CB9E, 0x7E1C54BD,
+ 0xF01144F9, 0xD2240EB1, 0x9675B3FD, 0xA3AC3755,
+ 0xD47C27AF, 0x51C85F4D, 0x56907596, 0xA5BB15E6,
+ 0x580304F0, 0xCA042CF1, 0x011A37EA, 0x8DBFAADB,
+ 0x35BA3E4A, 0x3526FFA0, 0xC37B4D09, 0xBC306ED9,
+ 0x98A52666, 0x5648F725, 0xFF5E569D, 0x0CED63D0,
+ 0x7C63B2CF, 0x700B45E1, 0xD5EA50F1, 0x85A92872,
+ 0xAF1FBDA7, 0xD4234870, 0xA7870BF3, 0x2D3B4D79,
+ 0x42E04198, 0x0CD0EDE7, 0x26470DB8, 0xF881814C,
+ 0x474D6AD7, 0x7C0C5E5C, 0xD1231959, 0x381B7298,
+ 0xF5D2F4DB, 0xAB838653, 0x6E2F1E23, 0x83719C9E,
+ 0xBD91E046, 0x9A56456E, 0xDC39200C, 0x20C8C571,
+ 0x962BDA1C, 0xE1E696FF, 0xB141AB08, 0x7CCA89B9,
+ 0x1A69E783, 0x02CC4843, 0xA2F7C579, 0x429EF47D,
+ 0x427B169C, 0x5AC9F049, 0xDD8F0F00, 0x5C8165BF
+};
+
+static const u_int32_t cast_sbox2[256] = {
+ 0x1F201094, 0xEF0BA75B, 0x69E3CF7E, 0x393F4380,
+ 0xFE61CF7A, 0xEEC5207A, 0x55889C94, 0x72FC0651,
+ 0xADA7EF79, 0x4E1D7235, 0xD55A63CE, 0xDE0436BA,
+ 0x99C430EF, 0x5F0C0794, 0x18DCDB7D, 0xA1D6EFF3,
+ 0xA0B52F7B, 0x59E83605, 0xEE15B094, 0xE9FFD909,
+ 0xDC440086, 0xEF944459, 0xBA83CCB3, 0xE0C3CDFB,
+ 0xD1DA4181, 0x3B092AB1, 0xF997F1C1, 0xA5E6CF7B,
+ 0x01420DDB, 0xE4E7EF5B, 0x25A1FF41, 0xE180F806,
+ 0x1FC41080, 0x179BEE7A, 0xD37AC6A9, 0xFE5830A4,
+ 0x98DE8B7F, 0x77E83F4E, 0x79929269, 0x24FA9F7B,
+ 0xE113C85B, 0xACC40083, 0xD7503525, 0xF7EA615F,
+ 0x62143154, 0x0D554B63, 0x5D681121, 0xC866C359,
+ 0x3D63CF73, 0xCEE234C0, 0xD4D87E87, 0x5C672B21,
+ 0x071F6181, 0x39F7627F, 0x361E3084, 0xE4EB573B,
+ 0x602F64A4, 0xD63ACD9C, 0x1BBC4635, 0x9E81032D,
+ 0x2701F50C, 0x99847AB4, 0xA0E3DF79, 0xBA6CF38C,
+ 0x10843094, 0x2537A95E, 0xF46F6FFE, 0xA1FF3B1F,
+ 0x208CFB6A, 0x8F458C74, 0xD9E0A227, 0x4EC73A34,
+ 0xFC884F69, 0x3E4DE8DF, 0xEF0E0088, 0x3559648D,
+ 0x8A45388C, 0x1D804366, 0x721D9BFD, 0xA58684BB,
+ 0xE8256333, 0x844E8212, 0x128D8098, 0xFED33FB4,
+ 0xCE280AE1, 0x27E19BA5, 0xD5A6C252, 0xE49754BD,
+ 0xC5D655DD, 0xEB667064, 0x77840B4D, 0xA1B6A801,
+ 0x84DB26A9, 0xE0B56714, 0x21F043B7, 0xE5D05860,
+ 0x54F03084, 0x066FF472, 0xA31AA153, 0xDADC4755,
+ 0xB5625DBF, 0x68561BE6, 0x83CA6B94, 0x2D6ED23B,
+ 0xECCF01DB, 0xA6D3D0BA, 0xB6803D5C, 0xAF77A709,
+ 0x33B4A34C, 0x397BC8D6, 0x5EE22B95, 0x5F0E5304,
+ 0x81ED6F61, 0x20E74364, 0xB45E1378, 0xDE18639B,
+ 0x881CA122, 0xB96726D1, 0x8049A7E8, 0x22B7DA7B,
+ 0x5E552D25, 0x5272D237, 0x79D2951C, 0xC60D894C,
+ 0x488CB402, 0x1BA4FE5B, 0xA4B09F6B, 0x1CA815CF,
+ 0xA20C3005, 0x8871DF63, 0xB9DE2FCB, 0x0CC6C9E9,
+ 0x0BEEFF53, 0xE3214517, 0xB4542835, 0x9F63293C,
+ 0xEE41E729, 0x6E1D2D7C, 0x50045286, 0x1E6685F3,
+ 0xF33401C6, 0x30A22C95, 0x31A70850, 0x60930F13,
+ 0x73F98417, 0xA1269859, 0xEC645C44, 0x52C877A9,
+ 0xCDFF33A6, 0xA02B1741, 0x7CBAD9A2, 0x2180036F,
+ 0x50D99C08, 0xCB3F4861, 0xC26BD765, 0x64A3F6AB,
+ 0x80342676, 0x25A75E7B, 0xE4E6D1FC, 0x20C710E6,
+ 0xCDF0B680, 0x17844D3B, 0x31EEF84D, 0x7E0824E4,
+ 0x2CCB49EB, 0x846A3BAE, 0x8FF77888, 0xEE5D60F6,
+ 0x7AF75673, 0x2FDD5CDB, 0xA11631C1, 0x30F66F43,
+ 0xB3FAEC54, 0x157FD7FA, 0xEF8579CC, 0xD152DE58,
+ 0xDB2FFD5E, 0x8F32CE19, 0x306AF97A, 0x02F03EF8,
+ 0x99319AD5, 0xC242FA0F, 0xA7E3EBB0, 0xC68E4906,
+ 0xB8DA230C, 0x80823028, 0xDCDEF3C8, 0xD35FB171,
+ 0x088A1BC8, 0xBEC0C560, 0x61A3C9E8, 0xBCA8F54D,
+ 0xC72FEFFA, 0x22822E99, 0x82C570B4, 0xD8D94E89,
+ 0x8B1C34BC, 0x301E16E6, 0x273BE979, 0xB0FFEAA6,
+ 0x61D9B8C6, 0x00B24869, 0xB7FFCE3F, 0x08DC283B,
+ 0x43DAF65A, 0xF7E19798, 0x7619B72F, 0x8F1C9BA4,
+ 0xDC8637A0, 0x16A7D3B1, 0x9FC393B7, 0xA7136EEB,
+ 0xC6BCC63E, 0x1A513742, 0xEF6828BC, 0x520365D6,
+ 0x2D6A77AB, 0x3527ED4B, 0x821FD216, 0x095C6E2E,
+ 0xDB92F2FB, 0x5EEA29CB, 0x145892F5, 0x91584F7F,
+ 0x5483697B, 0x2667A8CC, 0x85196048, 0x8C4BACEA,
+ 0x833860D4, 0x0D23E0F9, 0x6C387E8A, 0x0AE6D249,
+ 0xB284600C, 0xD835731D, 0xDCB1C647, 0xAC4C56EA,
+ 0x3EBD81B3, 0x230EABB0, 0x6438BC87, 0xF0B5B1FA,
+ 0x8F5EA2B3, 0xFC184642, 0x0A036B7A, 0x4FB089BD,
+ 0x649DA589, 0xA345415E, 0x5C038323, 0x3E5D3BB9,
+ 0x43D79572, 0x7E6DD07C, 0x06DFDF1E, 0x6C6CC4EF,
+ 0x7160A539, 0x73BFBE70, 0x83877605, 0x4523ECF1
+};
+
+static const u_int32_t cast_sbox3[256] = {
+ 0x8DEFC240, 0x25FA5D9F, 0xEB903DBF, 0xE810C907,
+ 0x47607FFF, 0x369FE44B, 0x8C1FC644, 0xAECECA90,
+ 0xBEB1F9BF, 0xEEFBCAEA, 0xE8CF1950, 0x51DF07AE,
+ 0x920E8806, 0xF0AD0548, 0xE13C8D83, 0x927010D5,
+ 0x11107D9F, 0x07647DB9, 0xB2E3E4D4, 0x3D4F285E,
+ 0xB9AFA820, 0xFADE82E0, 0xA067268B, 0x8272792E,
+ 0x553FB2C0, 0x489AE22B, 0xD4EF9794, 0x125E3FBC,
+ 0x21FFFCEE, 0x825B1BFD, 0x9255C5ED, 0x1257A240,
+ 0x4E1A8302, 0xBAE07FFF, 0x528246E7, 0x8E57140E,
+ 0x3373F7BF, 0x8C9F8188, 0xA6FC4EE8, 0xC982B5A5,
+ 0xA8C01DB7, 0x579FC264, 0x67094F31, 0xF2BD3F5F,
+ 0x40FFF7C1, 0x1FB78DFC, 0x8E6BD2C1, 0x437BE59B,
+ 0x99B03DBF, 0xB5DBC64B, 0x638DC0E6, 0x55819D99,
+ 0xA197C81C, 0x4A012D6E, 0xC5884A28, 0xCCC36F71,
+ 0xB843C213, 0x6C0743F1, 0x8309893C, 0x0FEDDD5F,
+ 0x2F7FE850, 0xD7C07F7E, 0x02507FBF, 0x5AFB9A04,
+ 0xA747D2D0, 0x1651192E, 0xAF70BF3E, 0x58C31380,
+ 0x5F98302E, 0x727CC3C4, 0x0A0FB402, 0x0F7FEF82,
+ 0x8C96FDAD, 0x5D2C2AAE, 0x8EE99A49, 0x50DA88B8,
+ 0x8427F4A0, 0x1EAC5790, 0x796FB449, 0x8252DC15,
+ 0xEFBD7D9B, 0xA672597D, 0xADA840D8, 0x45F54504,
+ 0xFA5D7403, 0xE83EC305, 0x4F91751A, 0x925669C2,
+ 0x23EFE941, 0xA903F12E, 0x60270DF2, 0x0276E4B6,
+ 0x94FD6574, 0x927985B2, 0x8276DBCB, 0x02778176,
+ 0xF8AF918D, 0x4E48F79E, 0x8F616DDF, 0xE29D840E,
+ 0x842F7D83, 0x340CE5C8, 0x96BBB682, 0x93B4B148,
+ 0xEF303CAB, 0x984FAF28, 0x779FAF9B, 0x92DC560D,
+ 0x224D1E20, 0x8437AA88, 0x7D29DC96, 0x2756D3DC,
+ 0x8B907CEE, 0xB51FD240, 0xE7C07CE3, 0xE566B4A1,
+ 0xC3E9615E, 0x3CF8209D, 0x6094D1E3, 0xCD9CA341,
+ 0x5C76460E, 0x00EA983B, 0xD4D67881, 0xFD47572C,
+ 0xF76CEDD9, 0xBDA8229C, 0x127DADAA, 0x438A074E,
+ 0x1F97C090, 0x081BDB8A, 0x93A07EBE, 0xB938CA15,
+ 0x97B03CFF, 0x3DC2C0F8, 0x8D1AB2EC, 0x64380E51,
+ 0x68CC7BFB, 0xD90F2788, 0x12490181, 0x5DE5FFD4,
+ 0xDD7EF86A, 0x76A2E214, 0xB9A40368, 0x925D958F,
+ 0x4B39FFFA, 0xBA39AEE9, 0xA4FFD30B, 0xFAF7933B,
+ 0x6D498623, 0x193CBCFA, 0x27627545, 0x825CF47A,
+ 0x61BD8BA0, 0xD11E42D1, 0xCEAD04F4, 0x127EA392,
+ 0x10428DB7, 0x8272A972, 0x9270C4A8, 0x127DE50B,
+ 0x285BA1C8, 0x3C62F44F, 0x35C0EAA5, 0xE805D231,
+ 0x428929FB, 0xB4FCDF82, 0x4FB66A53, 0x0E7DC15B,
+ 0x1F081FAB, 0x108618AE, 0xFCFD086D, 0xF9FF2889,
+ 0x694BCC11, 0x236A5CAE, 0x12DECA4D, 0x2C3F8CC5,
+ 0xD2D02DFE, 0xF8EF5896, 0xE4CF52DA, 0x95155B67,
+ 0x494A488C, 0xB9B6A80C, 0x5C8F82BC, 0x89D36B45,
+ 0x3A609437, 0xEC00C9A9, 0x44715253, 0x0A874B49,
+ 0xD773BC40, 0x7C34671C, 0x02717EF6, 0x4FEB5536,
+ 0xA2D02FFF, 0xD2BF60C4, 0xD43F03C0, 0x50B4EF6D,
+ 0x07478CD1, 0x006E1888, 0xA2E53F55, 0xB9E6D4BC,
+ 0xA2048016, 0x97573833, 0xD7207D67, 0xDE0F8F3D,
+ 0x72F87B33, 0xABCC4F33, 0x7688C55D, 0x7B00A6B0,
+ 0x947B0001, 0x570075D2, 0xF9BB88F8, 0x8942019E,
+ 0x4264A5FF, 0x856302E0, 0x72DBD92B, 0xEE971B69,
+ 0x6EA22FDE, 0x5F08AE2B, 0xAF7A616D, 0xE5C98767,
+ 0xCF1FEBD2, 0x61EFC8C2, 0xF1AC2571, 0xCC8239C2,
+ 0x67214CB8, 0xB1E583D1, 0xB7DC3E62, 0x7F10BDCE,
+ 0xF90A5C38, 0x0FF0443D, 0x606E6DC6, 0x60543A49,
+ 0x5727C148, 0x2BE98A1D, 0x8AB41738, 0x20E1BE24,
+ 0xAF96DA0F, 0x68458425, 0x99833BE5, 0x600D457D,
+ 0x282F9350, 0x8334B362, 0xD91D1120, 0x2B6D8DA0,
+ 0x642B1E31, 0x9C305A00, 0x52BCE688, 0x1B03588A,
+ 0xF7BAEFD5, 0x4142ED9C, 0xA4315C11, 0x83323EC5,
+ 0xDFEF4636, 0xA133C501, 0xE9D3531C, 0xEE353783
+};
+
+static const u_int32_t cast_sbox4[256] = {
+ 0x9DB30420, 0x1FB6E9DE, 0xA7BE7BEF, 0xD273A298,
+ 0x4A4F7BDB, 0x64AD8C57, 0x85510443, 0xFA020ED1,
+ 0x7E287AFF, 0xE60FB663, 0x095F35A1, 0x79EBF120,
+ 0xFD059D43, 0x6497B7B1, 0xF3641F63, 0x241E4ADF,
+ 0x28147F5F, 0x4FA2B8CD, 0xC9430040, 0x0CC32220,
+ 0xFDD30B30, 0xC0A5374F, 0x1D2D00D9, 0x24147B15,
+ 0xEE4D111A, 0x0FCA5167, 0x71FF904C, 0x2D195FFE,
+ 0x1A05645F, 0x0C13FEFE, 0x081B08CA, 0x05170121,
+ 0x80530100, 0xE83E5EFE, 0xAC9AF4F8, 0x7FE72701,
+ 0xD2B8EE5F, 0x06DF4261, 0xBB9E9B8A, 0x7293EA25,
+ 0xCE84FFDF, 0xF5718801, 0x3DD64B04, 0xA26F263B,
+ 0x7ED48400, 0x547EEBE6, 0x446D4CA0, 0x6CF3D6F5,
+ 0x2649ABDF, 0xAEA0C7F5, 0x36338CC1, 0x503F7E93,
+ 0xD3772061, 0x11B638E1, 0x72500E03, 0xF80EB2BB,
+ 0xABE0502E, 0xEC8D77DE, 0x57971E81, 0xE14F6746,
+ 0xC9335400, 0x6920318F, 0x081DBB99, 0xFFC304A5,
+ 0x4D351805, 0x7F3D5CE3, 0xA6C866C6, 0x5D5BCCA9,
+ 0xDAEC6FEA, 0x9F926F91, 0x9F46222F, 0x3991467D,
+ 0xA5BF6D8E, 0x1143C44F, 0x43958302, 0xD0214EEB,
+ 0x022083B8, 0x3FB6180C, 0x18F8931E, 0x281658E6,
+ 0x26486E3E, 0x8BD78A70, 0x7477E4C1, 0xB506E07C,
+ 0xF32D0A25, 0x79098B02, 0xE4EABB81, 0x28123B23,
+ 0x69DEAD38, 0x1574CA16, 0xDF871B62, 0x211C40B7,
+ 0xA51A9EF9, 0x0014377B, 0x041E8AC8, 0x09114003,
+ 0xBD59E4D2, 0xE3D156D5, 0x4FE876D5, 0x2F91A340,
+ 0x557BE8DE, 0x00EAE4A7, 0x0CE5C2EC, 0x4DB4BBA6,
+ 0xE756BDFF, 0xDD3369AC, 0xEC17B035, 0x06572327,
+ 0x99AFC8B0, 0x56C8C391, 0x6B65811C, 0x5E146119,
+ 0x6E85CB75, 0xBE07C002, 0xC2325577, 0x893FF4EC,
+ 0x5BBFC92D, 0xD0EC3B25, 0xB7801AB7, 0x8D6D3B24,
+ 0x20C763EF, 0xC366A5FC, 0x9C382880, 0x0ACE3205,
+ 0xAAC9548A, 0xECA1D7C7, 0x041AFA32, 0x1D16625A,
+ 0x6701902C, 0x9B757A54, 0x31D477F7, 0x9126B031,
+ 0x36CC6FDB, 0xC70B8B46, 0xD9E66A48, 0x56E55A79,
+ 0x026A4CEB, 0x52437EFF, 0x2F8F76B4, 0x0DF980A5,
+ 0x8674CDE3, 0xEDDA04EB, 0x17A9BE04, 0x2C18F4DF,
+ 0xB7747F9D, 0xAB2AF7B4, 0xEFC34D20, 0x2E096B7C,
+ 0x1741A254, 0xE5B6A035, 0x213D42F6, 0x2C1C7C26,
+ 0x61C2F50F, 0x6552DAF9, 0xD2C231F8, 0x25130F69,
+ 0xD8167FA2, 0x0418F2C8, 0x001A96A6, 0x0D1526AB,
+ 0x63315C21, 0x5E0A72EC, 0x49BAFEFD, 0x187908D9,
+ 0x8D0DBD86, 0x311170A7, 0x3E9B640C, 0xCC3E10D7,
+ 0xD5CAD3B6, 0x0CAEC388, 0xF73001E1, 0x6C728AFF,
+ 0x71EAE2A1, 0x1F9AF36E, 0xCFCBD12F, 0xC1DE8417,
+ 0xAC07BE6B, 0xCB44A1D8, 0x8B9B0F56, 0x013988C3,
+ 0xB1C52FCA, 0xB4BE31CD, 0xD8782806, 0x12A3A4E2,
+ 0x6F7DE532, 0x58FD7EB6, 0xD01EE900, 0x24ADFFC2,
+ 0xF4990FC5, 0x9711AAC5, 0x001D7B95, 0x82E5E7D2,
+ 0x109873F6, 0x00613096, 0xC32D9521, 0xADA121FF,
+ 0x29908415, 0x7FBB977F, 0xAF9EB3DB, 0x29C9ED2A,
+ 0x5CE2A465, 0xA730F32C, 0xD0AA3FE8, 0x8A5CC091,
+ 0xD49E2CE7, 0x0CE454A9, 0xD60ACD86, 0x015F1919,
+ 0x77079103, 0xDEA03AF6, 0x78A8565E, 0xDEE356DF,
+ 0x21F05CBE, 0x8B75E387, 0xB3C50651, 0xB8A5C3EF,
+ 0xD8EEB6D2, 0xE523BE77, 0xC2154529, 0x2F69EFDF,
+ 0xAFE67AFB, 0xF470C4B2, 0xF3E0EB5B, 0xD6CC9876,
+ 0x39E4460C, 0x1FDA8538, 0x1987832F, 0xCA007367,
+ 0xA99144F8, 0x296B299E, 0x492FC295, 0x9266BEAB,
+ 0xB5676E69, 0x9BD3DDDA, 0xDF7E052F, 0xDB25701C,
+ 0x1B5E51EE, 0xF65324E6, 0x6AFCE36C, 0x0316CC04,
+ 0x8644213E, 0xB7DC59D0, 0x7965291F, 0xCCD6FD43,
+ 0x41823979, 0x932BCDF6, 0xB657C34D, 0x4EDFD282,
+ 0x7AE5290C, 0x3CB9536B, 0x851E20FE, 0x9833557E,
+ 0x13ECF0B0, 0xD3FFB372, 0x3F85C5C1, 0x0AEF7ED2
+};
+
+static const u_int32_t cast_sbox5[256] = {
+ 0x7EC90C04, 0x2C6E74B9, 0x9B0E66DF, 0xA6337911,
+ 0xB86A7FFF, 0x1DD358F5, 0x44DD9D44, 0x1731167F,
+ 0x08FBF1FA, 0xE7F511CC, 0xD2051B00, 0x735ABA00,
+ 0x2AB722D8, 0x386381CB, 0xACF6243A, 0x69BEFD7A,
+ 0xE6A2E77F, 0xF0C720CD, 0xC4494816, 0xCCF5C180,
+ 0x38851640, 0x15B0A848, 0xE68B18CB, 0x4CAADEFF,
+ 0x5F480A01, 0x0412B2AA, 0x259814FC, 0x41D0EFE2,
+ 0x4E40B48D, 0x248EB6FB, 0x8DBA1CFE, 0x41A99B02,
+ 0x1A550A04, 0xBA8F65CB, 0x7251F4E7, 0x95A51725,
+ 0xC106ECD7, 0x97A5980A, 0xC539B9AA, 0x4D79FE6A,
+ 0xF2F3F763, 0x68AF8040, 0xED0C9E56, 0x11B4958B,
+ 0xE1EB5A88, 0x8709E6B0, 0xD7E07156, 0x4E29FEA7,
+ 0x6366E52D, 0x02D1C000, 0xC4AC8E05, 0x9377F571,
+ 0x0C05372A, 0x578535F2, 0x2261BE02, 0xD642A0C9,
+ 0xDF13A280, 0x74B55BD2, 0x682199C0, 0xD421E5EC,
+ 0x53FB3CE8, 0xC8ADEDB3, 0x28A87FC9, 0x3D959981,
+ 0x5C1FF900, 0xFE38D399, 0x0C4EFF0B, 0x062407EA,
+ 0xAA2F4FB1, 0x4FB96976, 0x90C79505, 0xB0A8A774,
+ 0xEF55A1FF, 0xE59CA2C2, 0xA6B62D27, 0xE66A4263,
+ 0xDF65001F, 0x0EC50966, 0xDFDD55BC, 0x29DE0655,
+ 0x911E739A, 0x17AF8975, 0x32C7911C, 0x89F89468,
+ 0x0D01E980, 0x524755F4, 0x03B63CC9, 0x0CC844B2,
+ 0xBCF3F0AA, 0x87AC36E9, 0xE53A7426, 0x01B3D82B,
+ 0x1A9E7449, 0x64EE2D7E, 0xCDDBB1DA, 0x01C94910,
+ 0xB868BF80, 0x0D26F3FD, 0x9342EDE7, 0x04A5C284,
+ 0x636737B6, 0x50F5B616, 0xF24766E3, 0x8ECA36C1,
+ 0x136E05DB, 0xFEF18391, 0xFB887A37, 0xD6E7F7D4,
+ 0xC7FB7DC9, 0x3063FCDF, 0xB6F589DE, 0xEC2941DA,
+ 0x26E46695, 0xB7566419, 0xF654EFC5, 0xD08D58B7,
+ 0x48925401, 0xC1BACB7F, 0xE5FF550F, 0xB6083049,
+ 0x5BB5D0E8, 0x87D72E5A, 0xAB6A6EE1, 0x223A66CE,
+ 0xC62BF3CD, 0x9E0885F9, 0x68CB3E47, 0x086C010F,
+ 0xA21DE820, 0xD18B69DE, 0xF3F65777, 0xFA02C3F6,
+ 0x407EDAC3, 0xCBB3D550, 0x1793084D, 0xB0D70EBA,
+ 0x0AB378D5, 0xD951FB0C, 0xDED7DA56, 0x4124BBE4,
+ 0x94CA0B56, 0x0F5755D1, 0xE0E1E56E, 0x6184B5BE,
+ 0x580A249F, 0x94F74BC0, 0xE327888E, 0x9F7B5561,
+ 0xC3DC0280, 0x05687715, 0x646C6BD7, 0x44904DB3,
+ 0x66B4F0A3, 0xC0F1648A, 0x697ED5AF, 0x49E92FF6,
+ 0x309E374F, 0x2CB6356A, 0x85808573, 0x4991F840,
+ 0x76F0AE02, 0x083BE84D, 0x28421C9A, 0x44489406,
+ 0x736E4CB8, 0xC1092910, 0x8BC95FC6, 0x7D869CF4,
+ 0x134F616F, 0x2E77118D, 0xB31B2BE1, 0xAA90B472,
+ 0x3CA5D717, 0x7D161BBA, 0x9CAD9010, 0xAF462BA2,
+ 0x9FE459D2, 0x45D34559, 0xD9F2DA13, 0xDBC65487,
+ 0xF3E4F94E, 0x176D486F, 0x097C13EA, 0x631DA5C7,
+ 0x445F7382, 0x175683F4, 0xCDC66A97, 0x70BE0288,
+ 0xB3CDCF72, 0x6E5DD2F3, 0x20936079, 0x459B80A5,
+ 0xBE60E2DB, 0xA9C23101, 0xEBA5315C, 0x224E42F2,
+ 0x1C5C1572, 0xF6721B2C, 0x1AD2FFF3, 0x8C25404E,
+ 0x324ED72F, 0x4067B7FD, 0x0523138E, 0x5CA3BC78,
+ 0xDC0FD66E, 0x75922283, 0x784D6B17, 0x58EBB16E,
+ 0x44094F85, 0x3F481D87, 0xFCFEAE7B, 0x77B5FF76,
+ 0x8C2302BF, 0xAAF47556, 0x5F46B02A, 0x2B092801,
+ 0x3D38F5F7, 0x0CA81F36, 0x52AF4A8A, 0x66D5E7C0,
+ 0xDF3B0874, 0x95055110, 0x1B5AD7A8, 0xF61ED5AD,
+ 0x6CF6E479, 0x20758184, 0xD0CEFA65, 0x88F7BE58,
+ 0x4A046826, 0x0FF6F8F3, 0xA09C7F70, 0x5346ABA0,
+ 0x5CE96C28, 0xE176EDA3, 0x6BAC307F, 0x376829D2,
+ 0x85360FA9, 0x17E3FE2A, 0x24B79767, 0xF5A96B20,
+ 0xD6CD2595, 0x68FF1EBF, 0x7555442C, 0xF19F06BE,
+ 0xF9E0659A, 0xEEB9491D, 0x34010718, 0xBB30CAB8,
+ 0xE822FE15, 0x88570983, 0x750E6249, 0xDA627E55,
+ 0x5E76FFA8, 0xB1534546, 0x6D47DE08, 0xEFE9E7D4
+};
+
+static const u_int32_t cast_sbox6[256] = {
+ 0xF6FA8F9D, 0x2CAC6CE1, 0x4CA34867, 0xE2337F7C,
+ 0x95DB08E7, 0x016843B4, 0xECED5CBC, 0x325553AC,
+ 0xBF9F0960, 0xDFA1E2ED, 0x83F0579D, 0x63ED86B9,
+ 0x1AB6A6B8, 0xDE5EBE39, 0xF38FF732, 0x8989B138,
+ 0x33F14961, 0xC01937BD, 0xF506C6DA, 0xE4625E7E,
+ 0xA308EA99, 0x4E23E33C, 0x79CBD7CC, 0x48A14367,
+ 0xA3149619, 0xFEC94BD5, 0xA114174A, 0xEAA01866,
+ 0xA084DB2D, 0x09A8486F, 0xA888614A, 0x2900AF98,
+ 0x01665991, 0xE1992863, 0xC8F30C60, 0x2E78EF3C,
+ 0xD0D51932, 0xCF0FEC14, 0xF7CA07D2, 0xD0A82072,
+ 0xFD41197E, 0x9305A6B0, 0xE86BE3DA, 0x74BED3CD,
+ 0x372DA53C, 0x4C7F4448, 0xDAB5D440, 0x6DBA0EC3,
+ 0x083919A7, 0x9FBAEED9, 0x49DBCFB0, 0x4E670C53,
+ 0x5C3D9C01, 0x64BDB941, 0x2C0E636A, 0xBA7DD9CD,
+ 0xEA6F7388, 0xE70BC762, 0x35F29ADB, 0x5C4CDD8D,
+ 0xF0D48D8C, 0xB88153E2, 0x08A19866, 0x1AE2EAC8,
+ 0x284CAF89, 0xAA928223, 0x9334BE53, 0x3B3A21BF,
+ 0x16434BE3, 0x9AEA3906, 0xEFE8C36E, 0xF890CDD9,
+ 0x80226DAE, 0xC340A4A3, 0xDF7E9C09, 0xA694A807,
+ 0x5B7C5ECC, 0x221DB3A6, 0x9A69A02F, 0x68818A54,
+ 0xCEB2296F, 0x53C0843A, 0xFE893655, 0x25BFE68A,
+ 0xB4628ABC, 0xCF222EBF, 0x25AC6F48, 0xA9A99387,
+ 0x53BDDB65, 0xE76FFBE7, 0xE967FD78, 0x0BA93563,
+ 0x8E342BC1, 0xE8A11BE9, 0x4980740D, 0xC8087DFC,
+ 0x8DE4BF99, 0xA11101A0, 0x7FD37975, 0xDA5A26C0,
+ 0xE81F994F, 0x9528CD89, 0xFD339FED, 0xB87834BF,
+ 0x5F04456D, 0x22258698, 0xC9C4C83B, 0x2DC156BE,
+ 0x4F628DAA, 0x57F55EC5, 0xE2220ABE, 0xD2916EBF,
+ 0x4EC75B95, 0x24F2C3C0, 0x42D15D99, 0xCD0D7FA0,
+ 0x7B6E27FF, 0xA8DC8AF0, 0x7345C106, 0xF41E232F,
+ 0x35162386, 0xE6EA8926, 0x3333B094, 0x157EC6F2,
+ 0x372B74AF, 0x692573E4, 0xE9A9D848, 0xF3160289,
+ 0x3A62EF1D, 0xA787E238, 0xF3A5F676, 0x74364853,
+ 0x20951063, 0x4576698D, 0xB6FAD407, 0x592AF950,
+ 0x36F73523, 0x4CFB6E87, 0x7DA4CEC0, 0x6C152DAA,
+ 0xCB0396A8, 0xC50DFE5D, 0xFCD707AB, 0x0921C42F,
+ 0x89DFF0BB, 0x5FE2BE78, 0x448F4F33, 0x754613C9,
+ 0x2B05D08D, 0x48B9D585, 0xDC049441, 0xC8098F9B,
+ 0x7DEDE786, 0xC39A3373, 0x42410005, 0x6A091751,
+ 0x0EF3C8A6, 0x890072D6, 0x28207682, 0xA9A9F7BE,
+ 0xBF32679D, 0xD45B5B75, 0xB353FD00, 0xCBB0E358,
+ 0x830F220A, 0x1F8FB214, 0xD372CF08, 0xCC3C4A13,
+ 0x8CF63166, 0x061C87BE, 0x88C98F88, 0x6062E397,
+ 0x47CF8E7A, 0xB6C85283, 0x3CC2ACFB, 0x3FC06976,
+ 0x4E8F0252, 0x64D8314D, 0xDA3870E3, 0x1E665459,
+ 0xC10908F0, 0x513021A5, 0x6C5B68B7, 0x822F8AA0,
+ 0x3007CD3E, 0x74719EEF, 0xDC872681, 0x073340D4,
+ 0x7E432FD9, 0x0C5EC241, 0x8809286C, 0xF592D891,
+ 0x08A930F6, 0x957EF305, 0xB7FBFFBD, 0xC266E96F,
+ 0x6FE4AC98, 0xB173ECC0, 0xBC60B42A, 0x953498DA,
+ 0xFBA1AE12, 0x2D4BD736, 0x0F25FAAB, 0xA4F3FCEB,
+ 0xE2969123, 0x257F0C3D, 0x9348AF49, 0x361400BC,
+ 0xE8816F4A, 0x3814F200, 0xA3F94043, 0x9C7A54C2,
+ 0xBC704F57, 0xDA41E7F9, 0xC25AD33A, 0x54F4A084,
+ 0xB17F5505, 0x59357CBE, 0xEDBD15C8, 0x7F97C5AB,
+ 0xBA5AC7B5, 0xB6F6DEAF, 0x3A479C3A, 0x5302DA25,
+ 0x653D7E6A, 0x54268D49, 0x51A477EA, 0x5017D55B,
+ 0xD7D25D88, 0x44136C76, 0x0404A8C8, 0xB8E5A121,
+ 0xB81A928A, 0x60ED5869, 0x97C55B96, 0xEAEC991B,
+ 0x29935913, 0x01FDB7F1, 0x088E8DFA, 0x9AB6F6F5,
+ 0x3B4CBF9F, 0x4A5DE3AB, 0xE6051D35, 0xA0E1D855,
+ 0xD36B4CF1, 0xF544EDEB, 0xB0E93524, 0xBEBB8FBD,
+ 0xA2D762CF, 0x49C92F54, 0x38B5F331, 0x7128A454,
+ 0x48392905, 0xA65B1DB8, 0x851C97BD, 0xD675CF2F
+};
+
+static const u_int32_t cast_sbox7[256] = {
+ 0x85E04019, 0x332BF567, 0x662DBFFF, 0xCFC65693,
+ 0x2A8D7F6F, 0xAB9BC912, 0xDE6008A1, 0x2028DA1F,
+ 0x0227BCE7, 0x4D642916, 0x18FAC300, 0x50F18B82,
+ 0x2CB2CB11, 0xB232E75C, 0x4B3695F2, 0xB28707DE,
+ 0xA05FBCF6, 0xCD4181E9, 0xE150210C, 0xE24EF1BD,
+ 0xB168C381, 0xFDE4E789, 0x5C79B0D8, 0x1E8BFD43,
+ 0x4D495001, 0x38BE4341, 0x913CEE1D, 0x92A79C3F,
+ 0x089766BE, 0xBAEEADF4, 0x1286BECF, 0xB6EACB19,
+ 0x2660C200, 0x7565BDE4, 0x64241F7A, 0x8248DCA9,
+ 0xC3B3AD66, 0x28136086, 0x0BD8DFA8, 0x356D1CF2,
+ 0x107789BE, 0xB3B2E9CE, 0x0502AA8F, 0x0BC0351E,
+ 0x166BF52A, 0xEB12FF82, 0xE3486911, 0xD34D7516,
+ 0x4E7B3AFF, 0x5F43671B, 0x9CF6E037, 0x4981AC83,
+ 0x334266CE, 0x8C9341B7, 0xD0D854C0, 0xCB3A6C88,
+ 0x47BC2829, 0x4725BA37, 0xA66AD22B, 0x7AD61F1E,
+ 0x0C5CBAFA, 0x4437F107, 0xB6E79962, 0x42D2D816,
+ 0x0A961288, 0xE1A5C06E, 0x13749E67, 0x72FC081A,
+ 0xB1D139F7, 0xF9583745, 0xCF19DF58, 0xBEC3F756,
+ 0xC06EBA30, 0x07211B24, 0x45C28829, 0xC95E317F,
+ 0xBC8EC511, 0x38BC46E9, 0xC6E6FA14, 0xBAE8584A,
+ 0xAD4EBC46, 0x468F508B, 0x7829435F, 0xF124183B,
+ 0x821DBA9F, 0xAFF60FF4, 0xEA2C4E6D, 0x16E39264,
+ 0x92544A8B, 0x009B4FC3, 0xABA68CED, 0x9AC96F78,
+ 0x06A5B79A, 0xB2856E6E, 0x1AEC3CA9, 0xBE838688,
+ 0x0E0804E9, 0x55F1BE56, 0xE7E5363B, 0xB3A1F25D,
+ 0xF7DEBB85, 0x61FE033C, 0x16746233, 0x3C034C28,
+ 0xDA6D0C74, 0x79AAC56C, 0x3CE4E1AD, 0x51F0C802,
+ 0x98F8F35A, 0x1626A49F, 0xEED82B29, 0x1D382FE3,
+ 0x0C4FB99A, 0xBB325778, 0x3EC6D97B, 0x6E77A6A9,
+ 0xCB658B5C, 0xD45230C7, 0x2BD1408B, 0x60C03EB7,
+ 0xB9068D78, 0xA33754F4, 0xF430C87D, 0xC8A71302,
+ 0xB96D8C32, 0xEBD4E7BE, 0xBE8B9D2D, 0x7979FB06,
+ 0xE7225308, 0x8B75CF77, 0x11EF8DA4, 0xE083C858,
+ 0x8D6B786F, 0x5A6317A6, 0xFA5CF7A0, 0x5DDA0033,
+ 0xF28EBFB0, 0xF5B9C310, 0xA0EAC280, 0x08B9767A,
+ 0xA3D9D2B0, 0x79D34217, 0x021A718D, 0x9AC6336A,
+ 0x2711FD60, 0x438050E3, 0x069908A8, 0x3D7FEDC4,
+ 0x826D2BEF, 0x4EEB8476, 0x488DCF25, 0x36C9D566,
+ 0x28E74E41, 0xC2610ACA, 0x3D49A9CF, 0xBAE3B9DF,
+ 0xB65F8DE6, 0x92AEAF64, 0x3AC7D5E6, 0x9EA80509,
+ 0xF22B017D, 0xA4173F70, 0xDD1E16C3, 0x15E0D7F9,
+ 0x50B1B887, 0x2B9F4FD5, 0x625ABA82, 0x6A017962,
+ 0x2EC01B9C, 0x15488AA9, 0xD716E740, 0x40055A2C,
+ 0x93D29A22, 0xE32DBF9A, 0x058745B9, 0x3453DC1E,
+ 0xD699296E, 0x496CFF6F, 0x1C9F4986, 0xDFE2ED07,
+ 0xB87242D1, 0x19DE7EAE, 0x053E561A, 0x15AD6F8C,
+ 0x66626C1C, 0x7154C24C, 0xEA082B2A, 0x93EB2939,
+ 0x17DCB0F0, 0x58D4F2AE, 0x9EA294FB, 0x52CF564C,
+ 0x9883FE66, 0x2EC40581, 0x763953C3, 0x01D6692E,
+ 0xD3A0C108, 0xA1E7160E, 0xE4F2DFA6, 0x693ED285,
+ 0x74904698, 0x4C2B0EDD, 0x4F757656, 0x5D393378,
+ 0xA132234F, 0x3D321C5D, 0xC3F5E194, 0x4B269301,
+ 0xC79F022F, 0x3C997E7E, 0x5E4F9504, 0x3FFAFBBD,
+ 0x76F7AD0E, 0x296693F4, 0x3D1FCE6F, 0xC61E45BE,
+ 0xD3B5AB34, 0xF72BF9B7, 0x1B0434C0, 0x4E72B567,
+ 0x5592A33D, 0xB5229301, 0xCFD2A87F, 0x60AEB767,
+ 0x1814386B, 0x30BCC33D, 0x38A0C07D, 0xFD1606F2,
+ 0xC363519B, 0x589DD390, 0x5479F8E6, 0x1CB8D647,
+ 0x97FD61A9, 0xEA7759F4, 0x2D57539D, 0x569A58CF,
+ 0xE84E63AD, 0x462E1B78, 0x6580F87E, 0xF3817914,
+ 0x91DA55F4, 0x40A230F3, 0xD1988F35, 0xB6E318D2,
+ 0x3FFA50BC, 0x3D40F021, 0xC3C0BDAE, 0x4958C24C,
+ 0x518F36B2, 0x84B1D370, 0x0FEDCE83, 0x878DDADA,
+ 0xF2A279C7, 0x94E01BE8, 0x90716F4B, 0x954B8AA3
+};
+
+static const u_int32_t cast_sbox8[256] = {
+ 0xE216300D, 0xBBDDFFFC, 0xA7EBDABD, 0x35648095,
+ 0x7789F8B7, 0xE6C1121B, 0x0E241600, 0x052CE8B5,
+ 0x11A9CFB0, 0xE5952F11, 0xECE7990A, 0x9386D174,
+ 0x2A42931C, 0x76E38111, 0xB12DEF3A, 0x37DDDDFC,
+ 0xDE9ADEB1, 0x0A0CC32C, 0xBE197029, 0x84A00940,
+ 0xBB243A0F, 0xB4D137CF, 0xB44E79F0, 0x049EEDFD,
+ 0x0B15A15D, 0x480D3168, 0x8BBBDE5A, 0x669DED42,
+ 0xC7ECE831, 0x3F8F95E7, 0x72DF191B, 0x7580330D,
+ 0x94074251, 0x5C7DCDFA, 0xABBE6D63, 0xAA402164,
+ 0xB301D40A, 0x02E7D1CA, 0x53571DAE, 0x7A3182A2,
+ 0x12A8DDEC, 0xFDAA335D, 0x176F43E8, 0x71FB46D4,
+ 0x38129022, 0xCE949AD4, 0xB84769AD, 0x965BD862,
+ 0x82F3D055, 0x66FB9767, 0x15B80B4E, 0x1D5B47A0,
+ 0x4CFDE06F, 0xC28EC4B8, 0x57E8726E, 0x647A78FC,
+ 0x99865D44, 0x608BD593, 0x6C200E03, 0x39DC5FF6,
+ 0x5D0B00A3, 0xAE63AFF2, 0x7E8BD632, 0x70108C0C,
+ 0xBBD35049, 0x2998DF04, 0x980CF42A, 0x9B6DF491,
+ 0x9E7EDD53, 0x06918548, 0x58CB7E07, 0x3B74EF2E,
+ 0x522FFFB1, 0xD24708CC, 0x1C7E27CD, 0xA4EB215B,
+ 0x3CF1D2E2, 0x19B47A38, 0x424F7618, 0x35856039,
+ 0x9D17DEE7, 0x27EB35E6, 0xC9AFF67B, 0x36BAF5B8,
+ 0x09C467CD, 0xC18910B1, 0xE11DBF7B, 0x06CD1AF8,
+ 0x7170C608, 0x2D5E3354, 0xD4DE495A, 0x64C6D006,
+ 0xBCC0C62C, 0x3DD00DB3, 0x708F8F34, 0x77D51B42,
+ 0x264F620F, 0x24B8D2BF, 0x15C1B79E, 0x46A52564,
+ 0xF8D7E54E, 0x3E378160, 0x7895CDA5, 0x859C15A5,
+ 0xE6459788, 0xC37BC75F, 0xDB07BA0C, 0x0676A3AB,
+ 0x7F229B1E, 0x31842E7B, 0x24259FD7, 0xF8BEF472,
+ 0x835FFCB8, 0x6DF4C1F2, 0x96F5B195, 0xFD0AF0FC,
+ 0xB0FE134C, 0xE2506D3D, 0x4F9B12EA, 0xF215F225,
+ 0xA223736F, 0x9FB4C428, 0x25D04979, 0x34C713F8,
+ 0xC4618187, 0xEA7A6E98, 0x7CD16EFC, 0x1436876C,
+ 0xF1544107, 0xBEDEEE14, 0x56E9AF27, 0xA04AA441,
+ 0x3CF7C899, 0x92ECBAE6, 0xDD67016D, 0x151682EB,
+ 0xA842EEDF, 0xFDBA60B4, 0xF1907B75, 0x20E3030F,
+ 0x24D8C29E, 0xE139673B, 0xEFA63FB8, 0x71873054,
+ 0xB6F2CF3B, 0x9F326442, 0xCB15A4CC, 0xB01A4504,
+ 0xF1E47D8D, 0x844A1BE5, 0xBAE7DFDC, 0x42CBDA70,
+ 0xCD7DAE0A, 0x57E85B7A, 0xD53F5AF6, 0x20CF4D8C,
+ 0xCEA4D428, 0x79D130A4, 0x3486EBFB, 0x33D3CDDC,
+ 0x77853B53, 0x37EFFCB5, 0xC5068778, 0xE580B3E6,
+ 0x4E68B8F4, 0xC5C8B37E, 0x0D809EA2, 0x398FEB7C,
+ 0x132A4F94, 0x43B7950E, 0x2FEE7D1C, 0x223613BD,
+ 0xDD06CAA2, 0x37DF932B, 0xC4248289, 0xACF3EBC3,
+ 0x5715F6B7, 0xEF3478DD, 0xF267616F, 0xC148CBE4,
+ 0x9052815E, 0x5E410FAB, 0xB48A2465, 0x2EDA7FA4,
+ 0xE87B40E4, 0xE98EA084, 0x5889E9E1, 0xEFD390FC,
+ 0xDD07D35B, 0xDB485694, 0x38D7E5B2, 0x57720101,
+ 0x730EDEBC, 0x5B643113, 0x94917E4F, 0x503C2FBA,
+ 0x646F1282, 0x7523D24A, 0xE0779695, 0xF9C17A8F,
+ 0x7A5B2121, 0xD187B896, 0x29263A4D, 0xBA510CDF,
+ 0x81F47C9F, 0xAD1163ED, 0xEA7B5965, 0x1A00726E,
+ 0x11403092, 0x00DA6D77, 0x4A0CDD61, 0xAD1F4603,
+ 0x605BDFB0, 0x9EEDC364, 0x22EBE6A8, 0xCEE7D28A,
+ 0xA0E736A0, 0x5564A6B9, 0x10853209, 0xC7EB8F37,
+ 0x2DE705CA, 0x8951570F, 0xDF09822B, 0xBD691A6C,
+ 0xAA12E4F2, 0x87451C0F, 0xE0F6A27A, 0x3ADA4819,
+ 0x4CF1764F, 0x0D771C2B, 0x67CDB156, 0x350D8384,
+ 0x5938FA0F, 0x42399EF3, 0x36997B07, 0x0E84093D,
+ 0x4AA93E61, 0x8360D87B, 0x1FA98B0C, 0x1149382C,
+ 0xE97625A5, 0x0614D1B7, 0x0E25244B, 0x0C768347,
+ 0x589E8D82, 0x0D2059D1, 0xA466BB1E, 0xF8DA0A82,
+ 0x04F19130, 0xBA6E4EC0, 0x99265164, 0x1EE7230D,
+ 0x50B2AD80, 0xEAEE6801, 0x8DB2A283, 0xEA8BF59E
+};
+
diff --git a/freebsd/sys/opencrypto/criov.c b/freebsd/sys/opencrypto/criov.c
new file mode 100644
index 00000000..1e389c1d
--- /dev/null
+++ b/freebsd/sys/opencrypto/criov.c
@@ -0,0 +1,200 @@
+#include <freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */
+
+/*-
+ * Copyright (c) 1999 Theo de Raadt
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <freebsd/sys/param.h>
+#include <freebsd/sys/systm.h>
+#include <freebsd/sys/proc.h>
+#include <freebsd/sys/errno.h>
+#include <freebsd/sys/malloc.h>
+#include <freebsd/sys/kernel.h>
+#include <freebsd/sys/mbuf.h>
+#include <freebsd/sys/uio.h>
+
+#include <freebsd/opencrypto/cryptodev.h>
+
+/*
+ * This macro is only for avoiding code duplication, as we need to skip
+ * given number of bytes in the same way in three functions below.
+ */
+#define CUIO_SKIP() do { \
+ KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
+ KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
+ while (off > 0) { \
+ KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \
+ if (off < iov->iov_len) \
+ break; \
+ off -= iov->iov_len; \
+ iol--; \
+ iov++; \
+ } \
+} while (0)
+
+void
+cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+ unsigned count;
+
+ CUIO_SKIP();
+ while (len > 0) {
+ KASSERT(iol >= 0, ("%s: empty", __func__));
+ count = min(iov->iov_len - off, len);
+ bcopy(((caddr_t)iov->iov_base) + off, cp, count);
+ len -= count;
+ cp += count;
+ off = 0;
+ iol--;
+ iov++;
+ }
+}
+
+void
+cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+ unsigned count;
+
+ CUIO_SKIP();
+ while (len > 0) {
+ KASSERT(iol >= 0, ("%s: empty", __func__));
+ count = min(iov->iov_len - off, len);
+ bcopy(cp, ((caddr_t)iov->iov_base) + off, count);
+ len -= count;
+ cp += count;
+ off = 0;
+ iol--;
+ iov++;
+ }
+}
+
+/*
+ * Return a pointer to iov/offset of location in iovec list.
+ */
+struct iovec *
+cuio_getptr(struct uio *uio, int loc, int *off)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+
+ while (loc >= 0) {
+ /* Normal end of search */
+ if (loc < iov->iov_len) {
+ *off = loc;
+ return (iov);
+ }
+
+ loc -= iov->iov_len;
+ if (iol == 0) {
+ if (loc == 0) {
+ /* Point at the end of valid data */
+ *off = iov->iov_len;
+ return (iov);
+ } else
+ return (NULL);
+ } else {
+ iov++, iol--;
+ }
+ }
+
+ return (NULL);
+}
+
+/*
+ * Apply function f to the data in an iovec list starting "off" bytes from
+ * the beginning, continuing for "len" bytes.
+ */
+int
+cuio_apply(struct uio *uio, int off, int len, int (*f)(void *, void *, u_int),
+ void *arg)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+ unsigned count;
+ int rval;
+
+ CUIO_SKIP();
+ while (len > 0) {
+ KASSERT(iol >= 0, ("%s: empty", __func__));
+ count = min(iov->iov_len - off, len);
+ rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count);
+ if (rval)
+ return (rval);
+ len -= count;
+ off = 0;
+ iol--;
+ iov++;
+ }
+ return (0);
+}
+
+void
+crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
+{
+
+ if ((flags & CRYPTO_F_IMBUF) != 0)
+ m_copyback((struct mbuf *)buf, off, size, in);
+ else if ((flags & CRYPTO_F_IOV) != 0)
+ cuio_copyback((struct uio *)buf, off, size, in);
+ else
+ bcopy(in, buf + off, size);
+}
+
+void
+crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
+{
+
+ if ((flags & CRYPTO_F_IMBUF) != 0)
+ m_copydata((struct mbuf *)buf, off, size, out);
+ else if ((flags & CRYPTO_F_IOV) != 0)
+ cuio_copydata((struct uio *)buf, off, size, out);
+ else
+ bcopy(buf + off, out, size);
+}
+
+int
+crypto_apply(int flags, caddr_t buf, int off, int len,
+ int (*f)(void *, void *, u_int), void *arg)
+{
+ int error;
+
+ if ((flags & CRYPTO_F_IMBUF) != 0)
+ error = m_apply((struct mbuf *)buf, off, len, f, arg);
+ else if ((flags & CRYPTO_F_IOV) != 0)
+ error = cuio_apply((struct uio *)buf, off, len, f, arg);
+ else
+ error = (*f)(arg, buf + off, len);
+ return (error);
+}
diff --git a/freebsd/sys/opencrypto/crypto.c b/freebsd/sys/opencrypto/crypto.c
new file mode 100644
index 00000000..597bbc94
--- /dev/null
+++ b/freebsd/sys/opencrypto/crypto.c
@@ -0,0 +1,1580 @@
+#include <freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Cryptographic Subsystem.
+ *
+ * This code is derived from the Openbsd Cryptographic Framework (OCF)
+ * that has the copyright shown below. Very little of the original
+ * code remains.
+ */
+
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000, 2001 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#define CRYPTO_TIMING /* enable timing support */
+
+#include <freebsd/local/opt_ddb.h>
+#include <freebsd/local/opt_kdtrace.h>
+
+#include <freebsd/sys/param.h>
+#include <freebsd/sys/systm.h>
+#include <freebsd/sys/eventhandler.h>
+#include <freebsd/sys/kernel.h>
+#include <freebsd/sys/kthread.h>
+#include <freebsd/sys/lock.h>
+#include <freebsd/sys/module.h>
+#include <freebsd/sys/mutex.h>
+#include <freebsd/sys/malloc.h>
+#include <freebsd/sys/proc.h>
+#include <freebsd/sys/sdt.h>
+#include <freebsd/sys/sysctl.h>
+
+#include <freebsd/ddb/ddb.h>
+
+#include <freebsd/vm/uma.h>
+#include <freebsd/opencrypto/cryptodev.h>
+#include <freebsd/opencrypto/xform.h> /* XXX for M_XDATA */
+
+#include <freebsd/sys/kobj.h>
+#include <freebsd/sys/bus.h>
+#include <freebsd/local/cryptodev_if.h>
+
+#if defined(__i386__) || defined(__amd64__)
+#include <freebsd/machine/pcb.h>
+#endif
+
+SDT_PROVIDER_DEFINE(opencrypto);
+
+/*
+ * Crypto drivers register themselves by allocating a slot in the
+ * crypto_drivers table with crypto_get_driverid() and then registering
+ * each algorithm they support with crypto_register() and crypto_kregister().
+ */
+static struct mtx crypto_drivers_mtx; /* lock on driver table */
+#define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx)
+#define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx)
+#define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED)
+
+/*
+ * Crypto device/driver capabilities structure.
+ *
+ * Synchronization:
+ * (d) - protected by CRYPTO_DRIVER_LOCK()
+ * (q) - protected by CRYPTO_Q_LOCK()
+ * Not tagged fields are read-only.
+ */
+struct cryptocap {
+ device_t cc_dev; /* (d) device/driver */
+ u_int32_t cc_sessions; /* (d) # of sessions */
+ u_int32_t cc_koperations; /* (d) # os asym operations */
+ /*
+ * Largest possible operator length (in bits) for each type of
+ * encryption algorithm. XXX not used
+ */
+ u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
+ u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
+ u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
+
+ int cc_flags; /* (d) flags */
+#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
+ int cc_qblocked; /* (q) symmetric q blocked */
+ int cc_kqblocked; /* (q) asymmetric q blocked */
+};
+static struct cryptocap *crypto_drivers = NULL;
+static int crypto_drivers_num = 0;
+
+/*
+ * There are two queues for crypto requests; one for symmetric (e.g.
+ * cipher) operations and one for asymmetric (e.g. MOD)operations.
+ * A single mutex is used to lock access to both queues. We could
+ * have one per-queue but having one simplifies handling of block/unblock
+ * operations.
+ */
+static int crp_sleep = 0;
+static TAILQ_HEAD(,cryptop) crp_q; /* request queues */
+static TAILQ_HEAD(,cryptkop) crp_kq;
+static struct mtx crypto_q_mtx;
+#define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
+#define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
+
+/*
+ * There are two queues for processing completed crypto requests; one
+ * for the symmetric and one for the asymmetric ops. We only need one
+ * but have two to avoid type futzing (cryptop vs. cryptkop). A single
+ * mutex is used to lock access to both queues. Note that this lock
+ * must be separate from the lock on request queues to insure driver
+ * callbacks don't generate lock order reversals.
+ */
+static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */
+static TAILQ_HEAD(,cryptkop) crp_ret_kq;
+static struct mtx crypto_ret_q_mtx;
+#define CRYPTO_RETQ_LOCK() mtx_lock(&crypto_ret_q_mtx)
+#define CRYPTO_RETQ_UNLOCK() mtx_unlock(&crypto_ret_q_mtx)
+#define CRYPTO_RETQ_EMPTY() (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq))
+
+static uma_zone_t cryptop_zone;
+static uma_zone_t cryptodesc_zone;
+
+int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
+SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
+ &crypto_userasymcrypto, 0,
+ "Enable/disable user-mode access to asymmetric crypto support");
+int crypto_devallowsoft = 0; /* only use hardware crypto for asym */
+SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
+ &crypto_devallowsoft, 0,
+ "Enable/disable use of software asym crypto support");
+
+MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
+
+static void crypto_proc(void);
+static struct proc *cryptoproc;
+static void crypto_ret_proc(void);
+static struct proc *cryptoretproc;
+static void crypto_destroy(void);
+static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
+static int crypto_kinvoke(struct cryptkop *krp, int flags);
+
+static struct cryptostats cryptostats;
+SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
+ cryptostats, "Crypto system statistics");
+
+#ifdef CRYPTO_TIMING
+static int crypto_timing = 0;
+SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
+ &crypto_timing, 0, "Enable/disable crypto timing support");
+#endif
+
+static int
+crypto_init(void)
+{
+ int error;
+
+ mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
+ MTX_DEF|MTX_QUIET);
+
+ TAILQ_INIT(&crp_q);
+ TAILQ_INIT(&crp_kq);
+ mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
+
+ TAILQ_INIT(&crp_ret_q);
+ TAILQ_INIT(&crp_ret_kq);
+ mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF);
+
+ cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
+ 0, 0, 0, 0,
+ UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
+ cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
+ 0, 0, 0, 0,
+ UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
+ if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
+ printf("crypto_init: cannot setup crypto zones\n");
+ error = ENOMEM;
+ goto bad;
+ }
+
+ crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
+ crypto_drivers = malloc(crypto_drivers_num *
+ sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
+ if (crypto_drivers == NULL) {
+ printf("crypto_init: cannot setup crypto drivers\n");
+ error = ENOMEM;
+ goto bad;
+ }
+
+ error = kproc_create((void (*)(void *)) crypto_proc, NULL,
+ &cryptoproc, 0, 0, "crypto");
+ if (error) {
+ printf("crypto_init: cannot start crypto thread; error %d",
+ error);
+ goto bad;
+ }
+
+ error = kproc_create((void (*)(void *)) crypto_ret_proc, NULL,
+ &cryptoretproc, 0, 0, "crypto returns");
+ if (error) {
+ printf("crypto_init: cannot start cryptoret thread; error %d",
+ error);
+ goto bad;
+ }
+ return 0;
+bad:
+ crypto_destroy();
+ return error;
+}
+
+/*
+ * Signal a crypto thread to terminate. We use the driver
+ * table lock to synchronize the sleep/wakeups so that we
+ * are sure the threads have terminated before we release
+ * the data structures they use. See crypto_finis below
+ * for the other half of this song-and-dance.
+ */
+static void
+crypto_terminate(struct proc **pp, void *q)
+{
+ struct proc *p;
+
+ mtx_assert(&crypto_drivers_mtx, MA_OWNED);
+ p = *pp;
+ *pp = NULL;
+ if (p) {
+ wakeup_one(q);
+ PROC_LOCK(p); /* NB: insure we don't miss wakeup */
+ CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
+ msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
+ PROC_UNLOCK(p);
+ CRYPTO_DRIVER_LOCK();
+ }
+}
+
+static void
+crypto_destroy(void)
+{
+ /*
+ * Terminate any crypto threads.
+ */
+ CRYPTO_DRIVER_LOCK();
+ crypto_terminate(&cryptoproc, &crp_q);
+ crypto_terminate(&cryptoretproc, &crp_ret_q);
+ CRYPTO_DRIVER_UNLOCK();
+
+ /* XXX flush queues??? */
+
+ /*
+ * Reclaim dynamically allocated resources.
+ */
+ if (crypto_drivers != NULL)
+ free(crypto_drivers, M_CRYPTO_DATA);
+
+ if (cryptodesc_zone != NULL)
+ uma_zdestroy(cryptodesc_zone);
+ if (cryptop_zone != NULL)
+ uma_zdestroy(cryptop_zone);
+ mtx_destroy(&crypto_q_mtx);
+ mtx_destroy(&crypto_ret_q_mtx);
+ mtx_destroy(&crypto_drivers_mtx);
+}
+
+static struct cryptocap *
+crypto_checkdriver(u_int32_t hid)
+{
+ if (crypto_drivers == NULL)
+ return NULL;
+ return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
+}
+
+/*
+ * Compare a driver's list of supported algorithms against another
+ * list; return non-zero if all algorithms are supported.
+ */
+static int
+driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
+{
+ const struct cryptoini *cr;
+
+ /* See if all the algorithms are supported. */
+ for (cr = cri; cr; cr = cr->cri_next)
+ if (cap->cc_alg[cr->cri_alg] == 0)
+ return 0;
+ return 1;
+}
+
+/*
+ * Select a driver for a new session that supports the specified
+ * algorithms and, optionally, is constrained according to the flags.
+ * The algorithm we use here is pretty stupid; just use the
+ * first driver that supports all the algorithms we need. If there
+ * are multiple drivers we choose the driver with the fewest active
+ * sessions. We prefer hardware-backed drivers to software ones.
+ *
+ * XXX We need more smarts here (in real life too, but that's
+ * XXX another story altogether).
+ */
+static struct cryptocap *
+crypto_select_driver(const struct cryptoini *cri, int flags)
+{
+ struct cryptocap *cap, *best;
+ int match, hid;
+
+ CRYPTO_DRIVER_ASSERT();
+
+ /*
+ * Look first for hardware crypto devices if permitted.
+ */
+ if (flags & CRYPTOCAP_F_HARDWARE)
+ match = CRYPTOCAP_F_HARDWARE;
+ else
+ match = CRYPTOCAP_F_SOFTWARE;
+ best = NULL;
+again:
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ cap = &crypto_drivers[hid];
+ /*
+ * If it's not initialized, is in the process of
+ * going away, or is not appropriate (hardware
+ * or software based on match), then skip.
+ */
+ if (cap->cc_dev == NULL ||
+ (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
+ (cap->cc_flags & match) == 0)
+ continue;
+
+ /* verify all the algorithms are supported. */
+ if (driver_suitable(cap, cri)) {
+ if (best == NULL ||
+ cap->cc_sessions < best->cc_sessions)
+ best = cap;
+ }
+ }
+ if (best != NULL)
+ return best;
+ if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
+ /* sort of an Algol 68-style for loop */
+ match = CRYPTOCAP_F_SOFTWARE;
+ goto again;
+ }
+ return best;
+}
+
+/*
+ * Create a new session. The crid argument specifies a crypto
+ * driver to use or constraints on a driver to select (hardware
+ * only, software only, either). Whatever driver is selected
+ * must be capable of the requested crypto algorithms.
+ */
+int
+crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
+{
+ struct cryptocap *cap;
+ u_int32_t hid, lid;
+ int err;
+
+ CRYPTO_DRIVER_LOCK();
+ if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+ /*
+ * Use specified driver; verify it is capable.
+ */
+ cap = crypto_checkdriver(crid);
+ if (cap != NULL && !driver_suitable(cap, cri))
+ cap = NULL;
+ } else {
+ /*
+ * No requested driver; select based on crid flags.
+ */
+ cap = crypto_select_driver(cri, crid);
+ /*
+ * if NULL then can't do everything in one session.
+ * XXX Fix this. We need to inject a "virtual" session
+ * XXX layer right about here.
+ */
+ }
+ if (cap != NULL) {
+ /* Call the driver initialization routine. */
+ hid = cap - crypto_drivers;
+ lid = hid; /* Pass the driver ID. */
+ err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
+ if (err == 0) {
+ (*sid) = (cap->cc_flags & 0xff000000)
+ | (hid & 0x00ffffff);
+ (*sid) <<= 32;
+ (*sid) |= (lid & 0xffffffff);
+ cap->cc_sessions++;
+ }
+ } else
+ err = EINVAL;
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+static void
+crypto_remove(struct cryptocap *cap)
+{
+
+ mtx_assert(&crypto_drivers_mtx, MA_OWNED);
+ if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
+ bzero(cap, sizeof(*cap));
+}
+
+/*
+ * Delete an existing session (or a reserved session on an unregistered
+ * driver).
+ */
+int
+crypto_freesession(u_int64_t sid)
+{
+ struct cryptocap *cap;
+ u_int32_t hid;
+ int err;
+
+ CRYPTO_DRIVER_LOCK();
+
+ if (crypto_drivers == NULL) {
+ err = EINVAL;
+ goto done;
+ }
+
+ /* Determine two IDs. */
+ hid = CRYPTO_SESID2HID(sid);
+
+ if (hid >= crypto_drivers_num) {
+ err = ENOENT;
+ goto done;
+ }
+ cap = &crypto_drivers[hid];
+
+ if (cap->cc_sessions)
+ cap->cc_sessions--;
+
+ /* Call the driver cleanup routine, if available. */
+ err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
+
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+ crypto_remove(cap);
+
+done:
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+/*
+ * Return an unused driver id. Used by drivers prior to registering
+ * support for the algorithms they handle.
+ */
+int32_t
+crypto_get_driverid(device_t dev, int flags)
+{
+ struct cryptocap *newdrv;
+ int i;
+
+ if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+ printf("%s: no flags specified when registering driver\n",
+ device_get_nameunit(dev));
+ return -1;
+ }
+
+ CRYPTO_DRIVER_LOCK();
+
+ for (i = 0; i < crypto_drivers_num; i++) {
+ if (crypto_drivers[i].cc_dev == NULL &&
+ (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
+ break;
+ }
+ }
+
+ /* Out of entries, allocate some more. */
+ if (i == crypto_drivers_num) {
+ /* Be careful about wrap-around. */
+ if (2 * crypto_drivers_num <= crypto_drivers_num) {
+ CRYPTO_DRIVER_UNLOCK();
+ printf("crypto: driver count wraparound!\n");
+ return -1;
+ }
+
+ newdrv = malloc(2 * crypto_drivers_num *
+ sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (newdrv == NULL) {
+ CRYPTO_DRIVER_UNLOCK();
+ printf("crypto: no space to expand driver table!\n");
+ return -1;
+ }
+
+ bcopy(crypto_drivers, newdrv,
+ crypto_drivers_num * sizeof(struct cryptocap));
+
+ crypto_drivers_num *= 2;
+
+ free(crypto_drivers, M_CRYPTO_DATA);
+ crypto_drivers = newdrv;
+ }
+
+ /* NB: state is zero'd on free */
+ crypto_drivers[i].cc_sessions = 1; /* Mark */
+ crypto_drivers[i].cc_dev = dev;
+ crypto_drivers[i].cc_flags = flags;
+ if (bootverbose)
+ printf("crypto: assign %s driver id %u, flags %u\n",
+ device_get_nameunit(dev), i, flags);
+
+ CRYPTO_DRIVER_UNLOCK();
+
+ return i;
+}
+
+/*
+ * Lookup a driver by name. We match against the full device
+ * name and unit, and against just the name. The latter gives
+ * us a simple widlcarding by device name. On success return the
+ * driver/hardware identifier; otherwise return -1.
+ */
+int
+crypto_find_driver(const char *match)
+{
+ int i, len = strlen(match);
+
+ CRYPTO_DRIVER_LOCK();
+ for (i = 0; i < crypto_drivers_num; i++) {
+ device_t dev = crypto_drivers[i].cc_dev;
+ if (dev == NULL ||
+ (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
+ continue;
+ if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
+ strncmp(match, device_get_name(dev), len) == 0)
+ break;
+ }
+ CRYPTO_DRIVER_UNLOCK();
+ return i < crypto_drivers_num ? i : -1;
+}
+
+/*
+ * Return the device_t for the specified driver or NULL
+ * if the driver identifier is invalid.
+ */
+device_t
+crypto_find_device_byhid(int hid)
+{
+ struct cryptocap *cap = crypto_checkdriver(hid);
+ return cap != NULL ? cap->cc_dev : NULL;
+}
+
+/*
+ * Return the device/driver capabilities.
+ */
+int
+crypto_getcaps(int hid)
+{
+ struct cryptocap *cap = crypto_checkdriver(hid);
+ return cap != NULL ? cap->cc_flags : 0;
+}
+
+/*
+ * Register support for a key-related algorithm. This routine
+ * is called once for each algorithm supported a driver.
+ */
+int
+crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
+{
+ struct cryptocap *cap;
+ int err;
+
+ CRYPTO_DRIVER_LOCK();
+
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL &&
+ (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
+ /*
+ * XXX Do some performance testing to determine placing.
+ * XXX We probably need an auxiliary data structure that
+ * XXX describes relative performances.
+ */
+
+ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
+ if (bootverbose)
+ printf("crypto: %s registers key alg %u flags %u\n"
+ , device_get_nameunit(cap->cc_dev)
+ , kalg
+ , flags
+ );
+ err = 0;
+ } else
+ err = EINVAL;
+
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+/*
+ * Register support for a non-key-related algorithm. This routine
+ * is called once for each such algorithm supported by a driver.
+ */
+int
+crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
+ u_int32_t flags)
+{
+ struct cryptocap *cap;
+ int err;
+
+ CRYPTO_DRIVER_LOCK();
+
+ cap = crypto_checkdriver(driverid);
+ /* NB: algorithms are in the range [1..max] */
+ if (cap != NULL &&
+ (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
+ /*
+ * XXX Do some performance testing to determine placing.
+ * XXX We probably need an auxiliary data structure that
+ * XXX describes relative performances.
+ */
+
+ cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
+ cap->cc_max_op_len[alg] = maxoplen;
+ if (bootverbose)
+ printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
+ , device_get_nameunit(cap->cc_dev)
+ , alg
+ , flags
+ , maxoplen
+ );
+ cap->cc_sessions = 0; /* Unmark */
+ err = 0;
+ } else
+ err = EINVAL;
+
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+static void
+driver_finis(struct cryptocap *cap)
+{
+ u_int32_t ses, kops;
+
+ CRYPTO_DRIVER_ASSERT();
+
+ ses = cap->cc_sessions;
+ kops = cap->cc_koperations;
+ bzero(cap, sizeof(*cap));
+ if (ses != 0 || kops != 0) {
+ /*
+ * If there are pending sessions,
+ * just mark as invalid.
+ */
+ cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
+ cap->cc_sessions = ses;
+ cap->cc_koperations = kops;
+ }
+}
+
+/*
+ * Unregister a crypto driver. If there are pending sessions using it,
+ * leave enough information around so that subsequent calls using those
+ * sessions will correctly detect the driver has been unregistered and
+ * reroute requests.
+ */
+int
+crypto_unregister(u_int32_t driverid, int alg)
+{
+ struct cryptocap *cap;
+ int i, err;
+
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL &&
+ (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
+ cap->cc_alg[alg] != 0) {
+ cap->cc_alg[alg] = 0;
+ cap->cc_max_op_len[alg] = 0;
+
+ /* Was this the last algorithm ? */
+ for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
+ if (cap->cc_alg[i] != 0)
+ break;
+
+ if (i == CRYPTO_ALGORITHM_MAX + 1)
+ driver_finis(cap);
+ err = 0;
+ } else
+ err = EINVAL;
+ CRYPTO_DRIVER_UNLOCK();
+
+ return err;
+}
+
+/*
+ * Unregister all algorithms associated with a crypto driver.
+ * If there are pending sessions using it, leave enough information
+ * around so that subsequent calls using those sessions will
+ * correctly detect the driver has been unregistered and reroute
+ * requests.
+ */
+int
+crypto_unregister_all(u_int32_t driverid)
+{
+ struct cryptocap *cap;
+ int err;
+
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL) {
+ driver_finis(cap);
+ err = 0;
+ } else
+ err = EINVAL;
+ CRYPTO_DRIVER_UNLOCK();
+
+ return err;
+}
+
+/*
+ * Clear blockage on a driver. The what parameter indicates whether
+ * the driver is now ready for cryptop's and/or cryptokop's.
+ */
+int
+crypto_unblock(u_int32_t driverid, int what)
+{
+ struct cryptocap *cap;
+ int err;
+
+ CRYPTO_Q_LOCK();
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL) {
+ if (what & CRYPTO_SYMQ)
+ cap->cc_qblocked = 0;
+ if (what & CRYPTO_ASYMQ)
+ cap->cc_kqblocked = 0;
+ if (crp_sleep)
+ wakeup_one(&crp_q);
+ err = 0;
+ } else
+ err = EINVAL;
+ CRYPTO_Q_UNLOCK();
+
+ return err;
+}
+
+/*
+ * Add a crypto request to a queue, to be processed by the kernel thread.
+ */
+int
+crypto_dispatch(struct cryptop *crp)
+{
+ struct cryptocap *cap;
+ u_int32_t hid;
+ int result;
+
+ cryptostats.cs_ops++;
+
+#ifdef CRYPTO_TIMING
+ if (crypto_timing)
+ binuptime(&crp->crp_tstamp);
+#endif
+
+ hid = CRYPTO_SESID2HID(crp->crp_sid);
+
+ if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
+ /*
+ * Caller marked the request to be processed
+ * immediately; dispatch it directly to the
+ * driver unless the driver is currently blocked.
+ */
+ cap = crypto_checkdriver(hid);
+ /* Driver cannot disappeared when there is an active session. */
+ KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
+ if (!cap->cc_qblocked) {
+ result = crypto_invoke(cap, crp, 0);
+ if (result != ERESTART)
+ return (result);
+ /*
+ * The driver ran out of resources, put the request on
+ * the queue.
+ */
+ }
+ }
+ CRYPTO_Q_LOCK();
+ TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
+ if (crp_sleep)
+ wakeup_one(&crp_q);
+ CRYPTO_Q_UNLOCK();
+ return 0;
+}
+
+/*
+ * Add an asymetric crypto request to a queue,
+ * to be processed by the kernel thread.
+ */
+int
+crypto_kdispatch(struct cryptkop *krp)
+{
+ int error;
+
+ cryptostats.cs_kops++;
+
+ error = crypto_kinvoke(krp, krp->krp_crid);
+ if (error == ERESTART) {
+ CRYPTO_Q_LOCK();
+ TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
+ if (crp_sleep)
+ wakeup_one(&crp_q);
+ CRYPTO_Q_UNLOCK();
+ error = 0;
+ }
+ return error;
+}
+
+/*
+ * Verify a driver is suitable for the specified operation.
+ */
+static __inline int
+kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
+{
+ return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
+}
+
+/*
+ * Select a driver for an asym operation. The driver must
+ * support the necessary algorithm. The caller can constrain
+ * which device is selected with the flags parameter. The
+ * algorithm we use here is pretty stupid; just use the first
+ * driver that supports the algorithms we need. If there are
+ * multiple suitable drivers we choose the driver with the
+ * fewest active operations. We prefer hardware-backed
+ * drivers to software ones when either may be used.
+ */
+static struct cryptocap *
+crypto_select_kdriver(const struct cryptkop *krp, int flags)
+{
+ struct cryptocap *cap, *best, *blocked;
+ int match, hid;
+
+ CRYPTO_DRIVER_ASSERT();
+
+ /*
+ * Look first for hardware crypto devices if permitted.
+ */
+ if (flags & CRYPTOCAP_F_HARDWARE)
+ match = CRYPTOCAP_F_HARDWARE;
+ else
+ match = CRYPTOCAP_F_SOFTWARE;
+ best = NULL;
+ blocked = NULL;
+again:
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ cap = &crypto_drivers[hid];
+ /*
+ * If it's not initialized, is in the process of
+ * going away, or is not appropriate (hardware
+ * or software based on match), then skip.
+ */
+ if (cap->cc_dev == NULL ||
+ (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
+ (cap->cc_flags & match) == 0)
+ continue;
+
+ /* verify all the algorithms are supported. */
+ if (kdriver_suitable(cap, krp)) {
+ if (best == NULL ||
+ cap->cc_koperations < best->cc_koperations)
+ best = cap;
+ }
+ }
+ if (best != NULL)
+ return best;
+ if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
+ /* sort of an Algol 68-style for loop */
+ match = CRYPTOCAP_F_SOFTWARE;
+ goto again;
+ }
+ return best;
+}
+
+/*
+ * Dispatch an assymetric crypto request.
+ */
+static int
+crypto_kinvoke(struct cryptkop *krp, int crid)
+{
+ struct cryptocap *cap = NULL;
+ int error;
+
+ KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
+ KASSERT(krp->krp_callback != NULL,
+ ("%s: krp->crp_callback == NULL", __func__));
+
+ CRYPTO_DRIVER_LOCK();
+ if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+ cap = crypto_checkdriver(crid);
+ if (cap != NULL) {
+ /*
+ * Driver present, it must support the necessary
+ * algorithm and, if s/w drivers are excluded,
+ * it must be registered as hardware-backed.
+ */
+ if (!kdriver_suitable(cap, krp) ||
+ (!crypto_devallowsoft &&
+ (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
+ cap = NULL;
+ }
+ } else {
+ /*
+ * No requested driver; select based on crid flags.
+ */
+ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
+ crid &= ~CRYPTOCAP_F_SOFTWARE;
+ cap = crypto_select_kdriver(krp, crid);
+ }
+ if (cap != NULL && !cap->cc_kqblocked) {
+ krp->krp_hid = cap - crypto_drivers;
+ cap->cc_koperations++;
+ CRYPTO_DRIVER_UNLOCK();
+ error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
+ CRYPTO_DRIVER_LOCK();
+ if (error == ERESTART) {
+ cap->cc_koperations--;
+ CRYPTO_DRIVER_UNLOCK();
+ return (error);
+ }
+ } else {
+ /*
+ * NB: cap is !NULL if device is blocked; in
+ * that case return ERESTART so the operation
+ * is resubmitted if possible.
+ */
+ error = (cap == NULL) ? ENODEV : ERESTART;
+ }
+ CRYPTO_DRIVER_UNLOCK();
+
+ if (error) {
+ krp->krp_status = error;
+ crypto_kdone(krp);
+ }
+ return 0;
+}
+
+#ifdef CRYPTO_TIMING
+static void
+crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
+{
+ struct bintime now, delta;
+ struct timespec t;
+ uint64_t u;
+
+ binuptime(&now);
+ u = now.frac;
+ delta.frac = now.frac - bt->frac;
+ delta.sec = now.sec - bt->sec;
+ if (u < delta.frac)
+ delta.sec--;
+ bintime2timespec(&delta, &t);
+ timespecadd(&ts->acc, &t);
+ if (timespeccmp(&t, &ts->min, <))
+ ts->min = t;
+ if (timespeccmp(&t, &ts->max, >))
+ ts->max = t;
+ ts->count++;
+
+ *bt = now;
+}
+#endif
+
+/*
+ * Dispatch a crypto request to the appropriate crypto devices.
+ */
+static int
+crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
+{
+
+ KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
+ KASSERT(crp->crp_callback != NULL,
+ ("%s: crp->crp_callback == NULL", __func__));
+ KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
+
+#ifdef CRYPTO_TIMING
+ if (crypto_timing)
+ crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
+#endif
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
+ struct cryptodesc *crd;
+ u_int64_t nid;
+
+ /*
+ * Driver has unregistered; migrate the session and return
+ * an error to the caller so they'll resubmit the op.
+ *
+ * XXX: What if there are more already queued requests for this
+ * session?
+ */
+ crypto_freesession(crp->crp_sid);
+
+ for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
+ crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
+
+ /* XXX propagate flags from initial session? */
+ if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
+ crp->crp_sid = nid;
+
+ crp->crp_etype = EAGAIN;
+ crypto_done(crp);
+ return 0;
+ } else {
+ /*
+ * Invoke the driver to process the request.
+ */
+ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
+ }
+}
+
+/*
+ * Release a set of crypto descriptors.
+ */
+void
+crypto_freereq(struct cryptop *crp)
+{
+ struct cryptodesc *crd;
+
+ if (crp == NULL)
+ return;
+
+#ifdef DIAGNOSTIC
+ {
+ struct cryptop *crp2;
+
+ CRYPTO_Q_LOCK();
+ TAILQ_FOREACH(crp2, &crp_q, crp_next) {
+ KASSERT(crp2 != crp,
+ ("Freeing cryptop from the crypto queue (%p).",
+ crp));
+ }
+ CRYPTO_Q_UNLOCK();
+ CRYPTO_RETQ_LOCK();
+ TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
+ KASSERT(crp2 != crp,
+ ("Freeing cryptop from the return queue (%p).",
+ crp));
+ }
+ CRYPTO_RETQ_UNLOCK();
+ }
+#endif
+
+ while ((crd = crp->crp_desc) != NULL) {
+ crp->crp_desc = crd->crd_next;
+ uma_zfree(cryptodesc_zone, crd);
+ }
+ uma_zfree(cryptop_zone, crp);
+}
+
+/*
+ * Acquire a set of crypto descriptors.
+ */
+struct cryptop *
+crypto_getreq(int num)
+{
+ struct cryptodesc *crd;
+ struct cryptop *crp;
+
+ crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
+ if (crp != NULL) {
+ while (num--) {
+ crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
+ if (crd == NULL) {
+ crypto_freereq(crp);
+ return NULL;
+ }
+
+ crd->crd_next = crp->crp_desc;
+ crp->crp_desc = crd;
+ }
+ }
+ return crp;
+}
+
+/*
+ * Invoke the callback on behalf of the driver.
+ */
+void
+crypto_done(struct cryptop *crp)
+{
+ KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
+ ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
+ crp->crp_flags |= CRYPTO_F_DONE;
+ if (crp->crp_etype != 0)
+ cryptostats.cs_errs++;
+#ifdef CRYPTO_TIMING
+ if (crypto_timing)
+ crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
+#endif
+ /*
+ * CBIMM means unconditionally do the callback immediately;
+ * CBIFSYNC means do the callback immediately only if the
+ * operation was done synchronously. Both are used to avoid
+ * doing extraneous context switches; the latter is mostly
+ * used with the software crypto driver.
+ */
+ if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
+ ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
+ (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
+ /*
+ * Do the callback directly. This is ok when the
+ * callback routine does very little (e.g. the
+ * /dev/crypto callback method just does a wakeup).
+ */
+#ifdef CRYPTO_TIMING
+ if (crypto_timing) {
+ /*
+ * NB: We must copy the timestamp before
+ * doing the callback as the cryptop is
+ * likely to be reclaimed.
+ */
+ struct bintime t = crp->crp_tstamp;
+ crypto_tstat(&cryptostats.cs_cb, &t);
+ crp->crp_callback(crp);
+ crypto_tstat(&cryptostats.cs_finis, &t);
+ } else
+#endif
+ crp->crp_callback(crp);
+ } else {
+ /*
+ * Normal case; queue the callback for the thread.
+ */
+ CRYPTO_RETQ_LOCK();
+ if (CRYPTO_RETQ_EMPTY())
+ wakeup_one(&crp_ret_q); /* shared wait channel */
+ TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
+ CRYPTO_RETQ_UNLOCK();
+ }
+}
+
+/*
+ * Invoke the callback on behalf of the driver.
+ */
+void
+crypto_kdone(struct cryptkop *krp)
+{
+ struct cryptocap *cap;
+
+ if (krp->krp_status != 0)
+ cryptostats.cs_kerrs++;
+ CRYPTO_DRIVER_LOCK();
+ /* XXX: What if driver is loaded in the meantime? */
+ if (krp->krp_hid < crypto_drivers_num) {
+ cap = &crypto_drivers[krp->krp_hid];
+ cap->cc_koperations--;
+ KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+ crypto_remove(cap);
+ }
+ CRYPTO_DRIVER_UNLOCK();
+ CRYPTO_RETQ_LOCK();
+ if (CRYPTO_RETQ_EMPTY())
+ wakeup_one(&crp_ret_q); /* shared wait channel */
+ TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
+ CRYPTO_RETQ_UNLOCK();
+}
+
+int
+crypto_getfeat(int *featp)
+{
+ int hid, kalg, feat = 0;
+
+ CRYPTO_DRIVER_LOCK();
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ const struct cryptocap *cap = &crypto_drivers[hid];
+
+ if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
+ !crypto_devallowsoft) {
+ continue;
+ }
+ for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
+ if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
+ feat |= 1 << kalg;
+ }
+ CRYPTO_DRIVER_UNLOCK();
+ *featp = feat;
+ return (0);
+}
+
+/*
+ * Terminate a thread at module unload. The process that
+ * initiated this is waiting for us to signal that we're gone;
+ * wake it up and exit. We use the driver table lock to insure
+ * we don't do the wakeup before they're waiting. There is no
+ * race here because the waiter sleeps on the proc lock for the
+ * thread so it gets notified at the right time because of an
+ * extra wakeup that's done in exit1().
+ */
+static void
+crypto_finis(void *chan)
+{
+ CRYPTO_DRIVER_LOCK();
+ wakeup_one(chan);
+ CRYPTO_DRIVER_UNLOCK();
+ kproc_exit(0);
+}
+
+/*
+ * Crypto thread, dispatches crypto requests.
+ */
+static void
+crypto_proc(void)
+{
+ struct cryptop *crp, *submit;
+ struct cryptkop *krp;
+ struct cryptocap *cap;
+ u_int32_t hid;
+ int result, hint;
+
+#ifndef __rtems__
+#if defined(__i386__) || defined(__amd64__)
+ fpu_kern_thread(FPU_KERN_NORMAL);
+#endif
+#endif /* __rtems__ */
+
+ CRYPTO_Q_LOCK();
+ for (;;) {
+ /*
+ * Find the first element in the queue that can be
+ * processed and look-ahead to see if multiple ops
+ * are ready for the same driver.
+ */
+ submit = NULL;
+ hint = 0;
+ TAILQ_FOREACH(crp, &crp_q, crp_next) {
+ hid = CRYPTO_SESID2HID(crp->crp_sid);
+ cap = crypto_checkdriver(hid);
+ /*
+ * Driver cannot disappeared when there is an active
+ * session.
+ */
+ KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
+ __func__, __LINE__));
+ if (cap == NULL || cap->cc_dev == NULL) {
+ /* Op needs to be migrated, process it. */
+ if (submit == NULL)
+ submit = crp;
+ break;
+ }
+ if (!cap->cc_qblocked) {
+ if (submit != NULL) {
+ /*
+ * We stop on finding another op,
+ * regardless whether its for the same
+ * driver or not. We could keep
+ * searching the queue but it might be
+ * better to just use a per-driver
+ * queue instead.
+ */
+ if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
+ hint = CRYPTO_HINT_MORE;
+ break;
+ } else {
+ submit = crp;
+ if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
+ break;
+ /* keep scanning for more are q'd */
+ }
+ }
+ }
+ if (submit != NULL) {
+ TAILQ_REMOVE(&crp_q, submit, crp_next);
+ hid = CRYPTO_SESID2HID(submit->crp_sid);
+ cap = crypto_checkdriver(hid);
+ KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
+ __func__, __LINE__));
+ result = crypto_invoke(cap, submit, hint);
+ if (result == ERESTART) {
+ /*
+ * The driver ran out of resources, mark the
+ * driver ``blocked'' for cryptop's and put
+ * the request back in the queue. It would
+ * best to put the request back where we got
+ * it but that's hard so for now we put it
+ * at the front. This should be ok; putting
+ * it at the end does not work.
+ */
+ /* XXX validate sid again? */
+ crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
+ TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
+ cryptostats.cs_blocks++;
+ }
+ }
+
+ /* As above, but for key ops */
+ TAILQ_FOREACH(krp, &crp_kq, krp_next) {
+ cap = crypto_checkdriver(krp->krp_hid);
+ if (cap == NULL || cap->cc_dev == NULL) {
+ /*
+ * Operation needs to be migrated, invalidate
+ * the assigned device so it will reselect a
+ * new one below. Propagate the original
+ * crid selection flags if supplied.
+ */
+ krp->krp_hid = krp->krp_crid &
+ (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
+ if (krp->krp_hid == 0)
+ krp->krp_hid =
+ CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
+ break;
+ }
+ if (!cap->cc_kqblocked)
+ break;
+ }
+ if (krp != NULL) {
+ TAILQ_REMOVE(&crp_kq, krp, krp_next);
+ result = crypto_kinvoke(krp, krp->krp_hid);
+ if (result == ERESTART) {
+ /*
+ * The driver ran out of resources, mark the
+ * driver ``blocked'' for cryptkop's and put
+ * the request back in the queue. It would
+ * best to put the request back where we got
+ * it but that's hard so for now we put it
+ * at the front. This should be ok; putting
+ * it at the end does not work.
+ */
+ /* XXX validate sid again? */
+ crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
+ TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
+ cryptostats.cs_kblocks++;
+ }
+ }
+
+ if (submit == NULL && krp == NULL) {
+ /*
+ * Nothing more to be processed. Sleep until we're
+ * woken because there are more ops to process.
+ * This happens either by submission or by a driver
+ * becoming unblocked and notifying us through
+ * crypto_unblock. Note that when we wakeup we
+ * start processing each queue again from the
+ * front. It's not clear that it's important to
+ * preserve this ordering since ops may finish
+ * out of order if dispatched to different devices
+ * and some become blocked while others do not.
+ */
+ crp_sleep = 1;
+ msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
+ crp_sleep = 0;
+ if (cryptoproc == NULL)
+ break;
+ cryptostats.cs_intrs++;
+ }
+ }
+ CRYPTO_Q_UNLOCK();
+
+ crypto_finis(&crp_q);
+}
+
+/*
+ * Crypto returns thread, does callbacks for processed crypto requests.
+ * Callbacks are done here, rather than in the crypto drivers, because
+ * callbacks typically are expensive and would slow interrupt handling.
+ */
+static void
+crypto_ret_proc(void)
+{
+ struct cryptop *crpt;
+ struct cryptkop *krpt;
+
+ CRYPTO_RETQ_LOCK();
+ for (;;) {
+ /* Harvest return q's for completed ops */
+ crpt = TAILQ_FIRST(&crp_ret_q);
+ if (crpt != NULL)
+ TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
+
+ krpt = TAILQ_FIRST(&crp_ret_kq);
+ if (krpt != NULL)
+ TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
+
+ if (crpt != NULL || krpt != NULL) {
+ CRYPTO_RETQ_UNLOCK();
+ /*
+ * Run callbacks unlocked.
+ */
+ if (crpt != NULL) {
+#ifdef CRYPTO_TIMING
+ if (crypto_timing) {
+ /*
+ * NB: We must copy the timestamp before
+ * doing the callback as the cryptop is
+ * likely to be reclaimed.
+ */
+ struct bintime t = crpt->crp_tstamp;
+ crypto_tstat(&cryptostats.cs_cb, &t);
+ crpt->crp_callback(crpt);
+ crypto_tstat(&cryptostats.cs_finis, &t);
+ } else
+#endif
+ crpt->crp_callback(crpt);
+ }
+ if (krpt != NULL)
+ krpt->krp_callback(krpt);
+ CRYPTO_RETQ_LOCK();
+ } else {
+ /*
+ * Nothing more to be processed. Sleep until we're
+ * woken because there are more returns to process.
+ */
+ msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT,
+ "crypto_ret_wait", 0);
+ if (cryptoretproc == NULL)
+ break;
+ cryptostats.cs_rets++;
+ }
+ }
+ CRYPTO_RETQ_UNLOCK();
+
+ crypto_finis(&crp_ret_q);
+}
+
+#ifdef DDB
+static void
+db_show_drivers(void)
+{
+ int hid;
+
+ db_printf("%12s %4s %4s %8s %2s %2s\n"
+ , "Device"
+ , "Ses"
+ , "Kops"
+ , "Flags"
+ , "QB"
+ , "KB"
+ );
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ const struct cryptocap *cap = &crypto_drivers[hid];
+ if (cap->cc_dev == NULL)
+ continue;
+ db_printf("%-12s %4u %4u %08x %2u %2u\n"
+ , device_get_nameunit(cap->cc_dev)
+ , cap->cc_sessions
+ , cap->cc_koperations
+ , cap->cc_flags
+ , cap->cc_qblocked
+ , cap->cc_kqblocked
+ );
+ }
+}
+
+DB_SHOW_COMMAND(crypto, db_show_crypto)
+{
+ struct cryptop *crp;
+
+ db_show_drivers();
+ db_printf("\n");
+
+ db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
+ "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
+ "Desc", "Callback");
+ TAILQ_FOREACH(crp, &crp_q, crp_next) {
+ db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
+ , (int) CRYPTO_SESID2HID(crp->crp_sid)
+ , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
+ , crp->crp_ilen, crp->crp_olen
+ , crp->crp_etype
+ , crp->crp_flags
+ , crp->crp_desc
+ , crp->crp_callback
+ );
+ }
+ if (!TAILQ_EMPTY(&crp_ret_q)) {
+ db_printf("\n%4s %4s %4s %8s\n",
+ "HID", "Etype", "Flags", "Callback");
+ TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
+ db_printf("%4u %4u %04x %8p\n"
+ , (int) CRYPTO_SESID2HID(crp->crp_sid)
+ , crp->crp_etype
+ , crp->crp_flags
+ , crp->crp_callback
+ );
+ }
+ }
+}
+
+DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
+{
+ struct cryptkop *krp;
+
+ db_show_drivers();
+ db_printf("\n");
+
+ db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
+ "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
+ TAILQ_FOREACH(krp, &crp_kq, krp_next) {
+ db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
+ , krp->krp_op
+ , krp->krp_status
+ , krp->krp_iparams, krp->krp_oparams
+ , krp->krp_crid, krp->krp_hid
+ , krp->krp_callback
+ );
+ }
+ if (!TAILQ_EMPTY(&crp_ret_q)) {
+ db_printf("%4s %5s %8s %4s %8s\n",
+ "Op", "Status", "CRID", "HID", "Callback");
+ TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
+ db_printf("%4u %5u %08x %4u %8p\n"
+ , krp->krp_op
+ , krp->krp_status
+ , krp->krp_crid, krp->krp_hid
+ , krp->krp_callback
+ );
+ }
+ }
+}
+#endif
+
+int crypto_modevent(module_t mod, int type, void *unused);
+
+/*
+ * Initialization code, both for static and dynamic loading.
+ * Note this is not invoked with the usual MODULE_DECLARE
+ * mechanism but instead is listed as a dependency by the
+ * cryptosoft driver. This guarantees proper ordering of
+ * calls on module load/unload.
+ */
+int
+crypto_modevent(module_t mod, int type, void *unused)
+{
+ int error = EINVAL;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = crypto_init();
+ if (error == 0 && bootverbose)
+ printf("crypto: <crypto core>\n");
+ break;
+ case MOD_UNLOAD:
+ /*XXX disallow if active sessions */
+ error = 0;
+ crypto_destroy();
+ return 0;
+ }
+ return error;
+}
+MODULE_VERSION(crypto, 1);
+MODULE_DEPEND(crypto, zlib, 1, 1, 1);
diff --git a/freebsd/sys/opencrypto/cryptodev.c b/freebsd/sys/opencrypto/cryptodev.c
new file mode 100644
index 00000000..fed4f5df
--- /dev/null
+++ b/freebsd/sys/opencrypto/cryptodev.c
@@ -0,0 +1,1178 @@
+#include <freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */
+
+/*-
+ * Copyright (c) 2001 Theo de Raadt
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ */
+
+#include <freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <freebsd/local/opt_compat.h>
+
+#include <freebsd/sys/param.h>
+#include <freebsd/sys/systm.h>
+#include <freebsd/sys/malloc.h>
+#include <freebsd/sys/mbuf.h>
+#include <freebsd/sys/lock.h>
+#include <freebsd/sys/mutex.h>
+#include <freebsd/sys/sysctl.h>
+#include <freebsd/sys/file.h>
+#include <freebsd/sys/filedesc.h>
+#include <freebsd/sys/errno.h>
+#include <freebsd/sys/uio.h>
+#include <freebsd/sys/random.h>
+#include <freebsd/sys/conf.h>
+#include <freebsd/sys/kernel.h>
+#include <freebsd/sys/module.h>
+#include <freebsd/sys/fcntl.h>
+#include <freebsd/sys/bus.h>
+
+#include <freebsd/opencrypto/cryptodev.h>
+#include <freebsd/opencrypto/xform.h>
+
+#ifdef COMPAT_FREEBSD32
+#include <freebsd/sys/mount.h>
+#include <freebsd/compat/freebsd32/freebsd32.h>
+
+struct session_op32 {
+ u_int32_t cipher;
+ u_int32_t mac;
+ u_int32_t keylen;
+ u_int32_t key;
+ int mackeylen;
+ u_int32_t mackey;
+ u_int32_t ses;
+};
+
+struct session2_op32 {
+ u_int32_t cipher;
+ u_int32_t mac;
+ u_int32_t keylen;
+ u_int32_t key;
+ int mackeylen;
+ u_int32_t mackey;
+ u_int32_t ses;
+ int crid;
+ int pad[4];
+};
+
+struct crypt_op32 {
+ u_int32_t ses;
+ u_int16_t op;
+ u_int16_t flags;
+ u_int len;
+ u_int32_t src, dst;
+ u_int32_t mac;
+ u_int32_t iv;
+};
+
+struct crparam32 {
+ u_int32_t crp_p;
+ u_int crp_nbits;
+};
+
+struct crypt_kop32 {
+ u_int crk_op;
+ u_int crk_status;
+ u_short crk_iparams;
+ u_short crk_oparams;
+ u_int crk_crid;
+ struct crparam32 crk_param[CRK_MAXPARAM];
+};
+
+struct cryptotstat32 {
+ struct timespec32 acc;
+ struct timespec32 min;
+ struct timespec32 max;
+ u_int32_t count;
+};
+
+struct cryptostats32 {
+ u_int32_t cs_ops;
+ u_int32_t cs_errs;
+ u_int32_t cs_kops;
+ u_int32_t cs_kerrs;
+ u_int32_t cs_intrs;
+ u_int32_t cs_rets;
+ u_int32_t cs_blocks;
+ u_int32_t cs_kblocks;
+ struct cryptotstat32 cs_invoke;
+ struct cryptotstat32 cs_done;
+ struct cryptotstat32 cs_cb;
+ struct cryptotstat32 cs_finis;
+};
+
+#define CIOCGSESSION32 _IOWR('c', 101, struct session_op32)
+#define CIOCCRYPT32 _IOWR('c', 103, struct crypt_op32)
+#define CIOCKEY32 _IOWR('c', 104, struct crypt_kop32)
+#define CIOCGSESSION232 _IOWR('c', 106, struct session2_op32)
+#define CIOCKEY232 _IOWR('c', 107, struct crypt_kop32)
+
+static void
+session_op_from_32(const struct session_op32 *from, struct session_op *to)
+{
+
+ CP(*from, *to, cipher);
+ CP(*from, *to, mac);
+ CP(*from, *to, keylen);
+ PTRIN_CP(*from, *to, key);
+ CP(*from, *to, mackeylen);
+ PTRIN_CP(*from, *to, mackey);
+ CP(*from, *to, ses);
+}
+
+static void
+session2_op_from_32(const struct session2_op32 *from, struct session2_op *to)
+{
+
+ session_op_from_32((const struct session_op32 *)from,
+ (struct session_op *)to);
+ CP(*from, *to, crid);
+}
+
+static void
+session_op_to_32(const struct session_op *from, struct session_op32 *to)
+{
+
+ CP(*from, *to, cipher);
+ CP(*from, *to, mac);
+ CP(*from, *to, keylen);
+ PTROUT_CP(*from, *to, key);
+ CP(*from, *to, mackeylen);
+ PTROUT_CP(*from, *to, mackey);
+ CP(*from, *to, ses);
+}
+
+static void
+session2_op_to_32(const struct session2_op *from, struct session2_op32 *to)
+{
+
+ session_op_to_32((const struct session_op *)from,
+ (struct session_op32 *)to);
+ CP(*from, *to, crid);
+}
+
+static void
+crypt_op_from_32(const struct crypt_op32 *from, struct crypt_op *to)
+{
+
+ CP(*from, *to, ses);
+ CP(*from, *to, op);
+ CP(*from, *to, flags);
+ CP(*from, *to, len);
+ PTRIN_CP(*from, *to, src);
+ PTRIN_CP(*from, *to, dst);
+ PTRIN_CP(*from, *to, mac);
+ PTRIN_CP(*from, *to, iv);
+}
+
+static void
+crypt_op_to_32(const struct crypt_op *from, struct crypt_op32 *to)
+{
+
+ CP(*from, *to, ses);
+ CP(*from, *to, op);
+ CP(*from, *to, flags);
+ CP(*from, *to, len);
+ PTROUT_CP(*from, *to, src);
+ PTROUT_CP(*from, *to, dst);
+ PTROUT_CP(*from, *to, mac);
+ PTROUT_CP(*from, *to, iv);
+}
+
+static void
+crparam_from_32(const struct crparam32 *from, struct crparam *to)
+{
+
+ PTRIN_CP(*from, *to, crp_p);
+ CP(*from, *to, crp_nbits);
+}
+
+static void
+crparam_to_32(const struct crparam *from, struct crparam32 *to)
+{
+
+ PTROUT_CP(*from, *to, crp_p);
+ CP(*from, *to, crp_nbits);
+}
+
+static void
+crypt_kop_from_32(const struct crypt_kop32 *from, struct crypt_kop *to)
+{
+ int i;
+
+ CP(*from, *to, crk_op);
+ CP(*from, *to, crk_status);
+ CP(*from, *to, crk_iparams);
+ CP(*from, *to, crk_oparams);
+ CP(*from, *to, crk_crid);
+ for (i = 0; i < CRK_MAXPARAM; i++)
+ crparam_from_32(&from->crk_param[i], &to->crk_param[i]);
+}
+
+static void
+crypt_kop_to_32(const struct crypt_kop *from, struct crypt_kop32 *to)
+{
+ int i;
+
+ CP(*from, *to, crk_op);
+ CP(*from, *to, crk_status);
+ CP(*from, *to, crk_iparams);
+ CP(*from, *to, crk_oparams);
+ CP(*from, *to, crk_crid);
+ for (i = 0; i < CRK_MAXPARAM; i++)
+ crparam_to_32(&from->crk_param[i], &to->crk_param[i]);
+}
+#endif
+
+struct csession {
+ TAILQ_ENTRY(csession) next;
+ u_int64_t sid;
+ u_int32_t ses;
+ struct mtx lock; /* for op submission */
+
+ u_int32_t cipher;
+ struct enc_xform *txform;
+ u_int32_t mac;
+ struct auth_hash *thash;
+
+ caddr_t key;
+ int keylen;
+ u_char tmp_iv[EALG_MAX_BLOCK_LEN];
+
+ caddr_t mackey;
+ int mackeylen;
+
+ struct iovec iovec;
+ struct uio uio;
+ int error;
+};
+
+struct fcrypt {
+ TAILQ_HEAD(csessionlist, csession) csessions;
+ int sesn;
+};
+
+static int cryptof_rw(struct file *fp, struct uio *uio,
+ struct ucred *cred, int flags, struct thread *);
+static int cryptof_truncate(struct file *, off_t, struct ucred *,
+ struct thread *);
+static int cryptof_ioctl(struct file *, u_long, void *,
+ struct ucred *, struct thread *);
+static int cryptof_poll(struct file *, int, struct ucred *, struct thread *);
+static int cryptof_kqfilter(struct file *, struct knote *);
+static int cryptof_stat(struct file *, struct stat *,
+ struct ucred *, struct thread *);
+static int cryptof_close(struct file *, struct thread *);
+
+static struct fileops cryptofops = {
+ .fo_read = cryptof_rw,
+ .fo_write = cryptof_rw,
+ .fo_truncate = cryptof_truncate,
+ .fo_ioctl = cryptof_ioctl,
+ .fo_poll = cryptof_poll,
+ .fo_kqfilter = cryptof_kqfilter,
+ .fo_stat = cryptof_stat,
+ .fo_close = cryptof_close
+};
+
+static struct csession *csefind(struct fcrypt *, u_int);
+static int csedelete(struct fcrypt *, struct csession *);
+static struct csession *cseadd(struct fcrypt *, struct csession *);
+static struct csession *csecreate(struct fcrypt *, u_int64_t, caddr_t,
+ u_int64_t, caddr_t, u_int64_t, u_int32_t, u_int32_t, struct enc_xform *,
+ struct auth_hash *);
+static int csefree(struct csession *);
+
+static int cryptodev_op(struct csession *, struct crypt_op *,
+ struct ucred *, struct thread *td);
+static int cryptodev_key(struct crypt_kop *);
+static int cryptodev_find(struct crypt_find_op *);
+
+static int
+cryptof_rw(
+ struct file *fp,
+ struct uio *uio,
+ struct ucred *active_cred,
+ int flags,
+ struct thread *td)
+{
+
+ return (EIO);
+}
+
+static int
+cryptof_truncate(
+ struct file *fp,
+ off_t length,
+ struct ucred *active_cred,
+ struct thread *td)
+{
+
+ return (EINVAL);
+}
+
+/*
+ * Check a crypto identifier to see if it requested
+ * a software device/driver. This can be done either
+ * by device name/class or through search constraints.
+ */
+static int
+checkforsoftware(int crid)
+{
+ if (crid & CRYPTOCAP_F_SOFTWARE)
+ return EINVAL; /* XXX */
+ if ((crid & CRYPTOCAP_F_HARDWARE) == 0 &&
+ (crypto_getcaps(crid) & CRYPTOCAP_F_HARDWARE) == 0)
+ return EINVAL; /* XXX */
+ return 0;
+}
+
+/* ARGSUSED */
+static int
+cryptof_ioctl(
+ struct file *fp,
+ u_long cmd,
+ void *data,
+ struct ucred *active_cred,
+ struct thread *td)
+{
+#define SES2(p) ((struct session2_op *)p)
+ struct cryptoini cria, crie;
+ struct fcrypt *fcr = fp->f_data;
+ struct csession *cse;
+ struct session_op *sop;
+ struct crypt_op *cop;
+ struct enc_xform *txform = NULL;
+ struct auth_hash *thash = NULL;
+ struct crypt_kop *kop;
+ u_int64_t sid;
+ u_int32_t ses;
+ int error = 0, crid;
+#ifdef COMPAT_FREEBSD32
+ struct session2_op sopc;
+ struct crypt_op copc;
+ struct crypt_kop kopc;
+#endif
+
+ switch (cmd) {
+ case CIOCGSESSION:
+ case CIOCGSESSION2:
+#ifdef COMPAT_FREEBSD32
+ case CIOCGSESSION32:
+ case CIOCGSESSION232:
+ if (cmd == CIOCGSESSION32) {
+ session_op_from_32(data, (struct session_op *)&sopc);
+ sop = (struct session_op *)&sopc;
+ } else if (cmd == CIOCGSESSION232) {
+ session2_op_from_32(data, &sopc);
+ sop = (struct session_op *)&sopc;
+ } else
+#endif
+ sop = (struct session_op *)data;
+ switch (sop->cipher) {
+ case 0:
+ break;
+ case CRYPTO_DES_CBC:
+ txform = &enc_xform_des;
+ break;
+ case CRYPTO_3DES_CBC:
+ txform = &enc_xform_3des;
+ break;
+ case CRYPTO_BLF_CBC:
+ txform = &enc_xform_blf;
+ break;
+ case CRYPTO_CAST_CBC:
+ txform = &enc_xform_cast5;
+ break;
+ case CRYPTO_SKIPJACK_CBC:
+ txform = &enc_xform_skipjack;
+ break;
+ case CRYPTO_AES_CBC:
+ txform = &enc_xform_rijndael128;
+ break;
+ case CRYPTO_AES_XTS:
+ txform = &enc_xform_aes_xts;
+ break;
+ case CRYPTO_NULL_CBC:
+ txform = &enc_xform_null;
+ break;
+ case CRYPTO_ARC4:
+ txform = &enc_xform_arc4;
+ break;
+ case CRYPTO_CAMELLIA_CBC:
+ txform = &enc_xform_camellia;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ switch (sop->mac) {
+ case 0:
+ break;
+ case CRYPTO_MD5_HMAC:
+ thash = &auth_hash_hmac_md5;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ thash = &auth_hash_hmac_sha1;
+ break;
+ case CRYPTO_SHA2_256_HMAC:
+ thash = &auth_hash_hmac_sha2_256;
+ break;
+ case CRYPTO_SHA2_384_HMAC:
+ thash = &auth_hash_hmac_sha2_384;
+ break;
+ case CRYPTO_SHA2_512_HMAC:
+ thash = &auth_hash_hmac_sha2_512;
+ break;
+ case CRYPTO_RIPEMD160_HMAC:
+ thash = &auth_hash_hmac_ripemd_160;
+ break;
+#ifdef notdef
+ case CRYPTO_MD5:
+ thash = &auth_hash_md5;
+ break;
+ case CRYPTO_SHA1:
+ thash = &auth_hash_sha1;
+ break;
+#endif
+ case CRYPTO_NULL_HMAC:
+ thash = &auth_hash_null;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ bzero(&crie, sizeof(crie));
+ bzero(&cria, sizeof(cria));
+
+ if (txform) {
+ crie.cri_alg = txform->type;
+ crie.cri_klen = sop->keylen * 8;
+ if (sop->keylen > txform->maxkey ||
+ sop->keylen < txform->minkey) {
+ error = EINVAL;
+ goto bail;
+ }
+
+ crie.cri_key = malloc(crie.cri_klen / 8,
+ M_XDATA, M_WAITOK);
+ if ((error = copyin(sop->key, crie.cri_key,
+ crie.cri_klen / 8)))
+ goto bail;
+ if (thash)
+ crie.cri_next = &cria;
+ }
+
+ if (thash) {
+ cria.cri_alg = thash->type;
+ cria.cri_klen = sop->mackeylen * 8;
+ if (sop->mackeylen != thash->keysize) {
+ error = EINVAL;
+ goto bail;
+ }
+
+ if (cria.cri_klen) {
+ cria.cri_key = malloc(cria.cri_klen / 8,
+ M_XDATA, M_WAITOK);
+ if ((error = copyin(sop->mackey, cria.cri_key,
+ cria.cri_klen / 8)))
+ goto bail;
+ }
+ }
+
+ /* NB: CIOGSESSION2 has the crid */
+ if (cmd == CIOCGSESSION2
+#ifdef COMPAT_FREEBSD32
+ || cmd == CIOCGSESSION232
+#endif
+ ) {
+ crid = SES2(sop)->crid;
+ error = checkforsoftware(crid);
+ if (error)
+ goto bail;
+ } else
+ crid = CRYPTOCAP_F_HARDWARE;
+ error = crypto_newsession(&sid, (txform ? &crie : &cria), crid);
+ if (error)
+ goto bail;
+
+ cse = csecreate(fcr, sid, crie.cri_key, crie.cri_klen,
+ cria.cri_key, cria.cri_klen, sop->cipher, sop->mac, txform,
+ thash);
+
+ if (cse == NULL) {
+ crypto_freesession(sid);
+ error = EINVAL;
+ goto bail;
+ }
+ sop->ses = cse->ses;
+ if (cmd == CIOCGSESSION2
+#ifdef COMPAT_FREEBSD32
+ || cmd == CIOCGSESSION232
+#endif
+ ) {
+ /* return hardware/driver id */
+ SES2(sop)->crid = CRYPTO_SESID2HID(cse->sid);
+ }
+bail:
+ if (error) {
+ if (crie.cri_key)
+ free(crie.cri_key, M_XDATA);
+ if (cria.cri_key)
+ free(cria.cri_key, M_XDATA);
+ }
+#ifdef COMPAT_FREEBSD32
+ else {
+ if (cmd == CIOCGSESSION32)
+ session_op_to_32(sop, data);
+ else if (cmd == CIOCGSESSION232)
+ session2_op_to_32((struct session2_op *)sop,
+ data);
+ }
+#endif
+ break;
+ case CIOCFSESSION:
+ ses = *(u_int32_t *)data;
+ cse = csefind(fcr, ses);
+ if (cse == NULL)
+ return (EINVAL);
+ csedelete(fcr, cse);
+ error = csefree(cse);
+ break;
+ case CIOCCRYPT:
+#ifdef COMPAT_FREEBSD32
+ case CIOCCRYPT32:
+ if (cmd == CIOCCRYPT32) {
+ cop = &copc;
+ crypt_op_from_32(data, cop);
+ } else
+#endif
+ cop = (struct crypt_op *)data;
+ cse = csefind(fcr, cop->ses);
+ if (cse == NULL)
+ return (EINVAL);
+ error = cryptodev_op(cse, cop, active_cred, td);
+#ifdef COMPAT_FREEBSD32
+ if (error == 0 && cmd == CIOCCRYPT32)
+ crypt_op_to_32(cop, data);
+#endif
+ break;
+ case CIOCKEY:
+ case CIOCKEY2:
+#ifdef COMPAT_FREEBSD32
+ case CIOCKEY32:
+ case CIOCKEY232:
+#endif
+ if (!crypto_userasymcrypto)
+ return (EPERM); /* XXX compat? */
+#ifdef COMPAT_FREEBSD32
+ if (cmd == CIOCKEY32 || cmd == CIOCKEY232) {
+ kop = &kopc;
+ crypt_kop_from_32(data, kop);
+ } else
+#endif
+ kop = (struct crypt_kop *)data;
+ if (cmd == CIOCKEY
+#ifdef COMPAT_FREEBSD32
+ || cmd == CIOCKEY32
+#endif
+ ) {
+ /* NB: crypto core enforces s/w driver use */
+ kop->crk_crid =
+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
+ }
+ mtx_lock(&Giant);
+ error = cryptodev_key(kop);
+ mtx_unlock(&Giant);
+#ifdef COMPAT_FREEBSD32
+ if (cmd == CIOCKEY32 || cmd == CIOCKEY232)
+ crypt_kop_to_32(kop, data);
+#endif
+ break;
+ case CIOCASYMFEAT:
+ if (!crypto_userasymcrypto) {
+ /*
+ * NB: if user asym crypto operations are
+ * not permitted return "no algorithms"
+ * so well-behaved applications will just
+ * fallback to doing them in software.
+ */
+ *(int *)data = 0;
+ } else
+ error = crypto_getfeat((int *)data);
+ break;
+ case CIOCFINDDEV:
+ error = cryptodev_find((struct crypt_find_op *)data);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
+#undef SES2
+}
+
+static int cryptodev_cb(void *);
+
+
+static int
+cryptodev_op(
+ struct csession *cse,
+ struct crypt_op *cop,
+ struct ucred *active_cred,
+ struct thread *td)
+{
+ struct cryptop *crp = NULL;
+ struct cryptodesc *crde = NULL, *crda = NULL;
+ int error;
+
+ if (cop->len > 256*1024-4)
+ return (E2BIG);
+
+ if (cse->txform) {
+ if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0)
+ return (EINVAL);
+ }
+
+ cse->uio.uio_iov = &cse->iovec;
+ cse->uio.uio_iovcnt = 1;
+ cse->uio.uio_offset = 0;
+ cse->uio.uio_resid = cop->len;
+ cse->uio.uio_segflg = UIO_SYSSPACE;
+ cse->uio.uio_rw = UIO_WRITE;
+ cse->uio.uio_td = td;
+ cse->uio.uio_iov[0].iov_len = cop->len;
+ if (cse->thash) {
+ cse->uio.uio_iov[0].iov_len += cse->thash->hashsize;
+ cse->uio.uio_resid += cse->thash->hashsize;
+ }
+ cse->uio.uio_iov[0].iov_base = malloc(cse->uio.uio_iov[0].iov_len,
+ M_XDATA, M_WAITOK);
+
+ crp = crypto_getreq((cse->txform != NULL) + (cse->thash != NULL));
+ if (crp == NULL) {
+ error = ENOMEM;
+ goto bail;
+ }
+
+ if (cse->thash) {
+ crda = crp->crp_desc;
+ if (cse->txform)
+ crde = crda->crd_next;
+ } else {
+ if (cse->txform)
+ crde = crp->crp_desc;
+ else {
+ error = EINVAL;
+ goto bail;
+ }
+ }
+
+ if ((error = copyin(cop->src, cse->uio.uio_iov[0].iov_base, cop->len)))
+ goto bail;
+
+ if (crda) {
+ crda->crd_skip = 0;
+ crda->crd_len = cop->len;
+ crda->crd_inject = cop->len;
+
+ crda->crd_alg = cse->mac;
+ crda->crd_key = cse->mackey;
+ crda->crd_klen = cse->mackeylen * 8;
+ }
+
+ if (crde) {
+ if (cop->op == COP_ENCRYPT)
+ crde->crd_flags |= CRD_F_ENCRYPT;
+ else
+ crde->crd_flags &= ~CRD_F_ENCRYPT;
+ crde->crd_len = cop->len;
+ crde->crd_inject = 0;
+
+ crde->crd_alg = cse->cipher;
+ crde->crd_key = cse->key;
+ crde->crd_klen = cse->keylen * 8;
+ }
+
+ crp->crp_ilen = cop->len;
+ crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
+ | (cop->flags & COP_F_BATCH);
+ crp->crp_buf = (caddr_t)&cse->uio;
+ crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
+ crp->crp_sid = cse->sid;
+ crp->crp_opaque = (void *)cse;
+
+ if (cop->iv) {
+ if (crde == NULL) {
+ error = EINVAL;
+ goto bail;
+ }
+ if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
+ error = EINVAL;
+ goto bail;
+ }
+ if ((error = copyin(cop->iv, cse->tmp_iv, cse->txform->blocksize)))
+ goto bail;
+ bcopy(cse->tmp_iv, crde->crd_iv, cse->txform->blocksize);
+ crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
+ crde->crd_skip = 0;
+ } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
+ crde->crd_skip = 0;
+ } else if (crde) {
+ crde->crd_flags |= CRD_F_IV_PRESENT;
+ crde->crd_skip = cse->txform->blocksize;
+ crde->crd_len -= cse->txform->blocksize;
+ }
+
+ if (cop->mac && crda == NULL) {
+ error = EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Let the dispatch run unlocked, then, interlock against the
+ * callback before checking if the operation completed and going
+ * to sleep. This insures drivers don't inherit our lock which
+ * results in a lock order reversal between crypto_dispatch forced
+ * entry and the crypto_done callback into us.
+ */
+ error = crypto_dispatch(crp);
+ mtx_lock(&cse->lock);
+ if (error == 0 && (crp->crp_flags & CRYPTO_F_DONE) == 0)
+ error = msleep(crp, &cse->lock, PWAIT, "crydev", 0);
+ mtx_unlock(&cse->lock);
+
+ if (error != 0)
+ goto bail;
+
+ if (crp->crp_etype != 0) {
+ error = crp->crp_etype;
+ goto bail;
+ }
+
+ if (cse->error) {
+ error = cse->error;
+ goto bail;
+ }
+
+ if (cop->dst &&
+ (error = copyout(cse->uio.uio_iov[0].iov_base, cop->dst, cop->len)))
+ goto bail;
+
+ if (cop->mac &&
+ (error = copyout((caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
+ cop->mac, cse->thash->hashsize)))
+ goto bail;
+
+bail:
+ if (crp)
+ crypto_freereq(crp);
+ if (cse->uio.uio_iov[0].iov_base)
+ free(cse->uio.uio_iov[0].iov_base, M_XDATA);
+
+ return (error);
+}
+
+static int
+cryptodev_cb(void *op)
+{
+ struct cryptop *crp = (struct cryptop *) op;
+ struct csession *cse = (struct csession *)crp->crp_opaque;
+ int error;
+
+ error = crp->crp_etype;
+ if (error == EAGAIN)
+ error = crypto_dispatch(crp);
+ mtx_lock(&cse->lock);
+ if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
+ cse->error = error;
+ wakeup_one(crp);
+ }
+ mtx_unlock(&cse->lock);
+ return (0);
+}
+
+static int
+cryptodevkey_cb(void *op)
+{
+ struct cryptkop *krp = (struct cryptkop *) op;
+
+ wakeup_one(krp);
+ return (0);
+}
+
+static int
+cryptodev_key(struct crypt_kop *kop)
+{
+ struct cryptkop *krp = NULL;
+ int error = EINVAL;
+ int in, out, size, i;
+
+ if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
+ return (EFBIG);
+ }
+
+ in = kop->crk_iparams;
+ out = kop->crk_oparams;
+ switch (kop->crk_op) {
+ case CRK_MOD_EXP:
+ if (in == 3 && out == 1)
+ break;
+ return (EINVAL);
+ case CRK_MOD_EXP_CRT:
+ if (in == 6 && out == 1)
+ break;
+ return (EINVAL);
+ case CRK_DSA_SIGN:
+ if (in == 5 && out == 2)
+ break;
+ return (EINVAL);
+ case CRK_DSA_VERIFY:
+ if (in == 7 && out == 0)
+ break;
+ return (EINVAL);
+ case CRK_DH_COMPUTE_KEY:
+ if (in == 3 && out == 1)
+ break;
+ return (EINVAL);
+ default:
+ return (EINVAL);
+ }
+
+ krp = (struct cryptkop *)malloc(sizeof *krp, M_XDATA, M_WAITOK|M_ZERO);
+ if (!krp)
+ return (ENOMEM);
+ krp->krp_op = kop->crk_op;
+ krp->krp_status = kop->crk_status;
+ krp->krp_iparams = kop->crk_iparams;
+ krp->krp_oparams = kop->crk_oparams;
+ krp->krp_crid = kop->crk_crid;
+ krp->krp_status = 0;
+ krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
+
+ for (i = 0; i < CRK_MAXPARAM; i++) {
+ if (kop->crk_param[i].crp_nbits > 65536)
+ /* Limit is the same as in OpenBSD */
+ goto fail;
+ krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
+ }
+ for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
+ size = (krp->krp_param[i].crp_nbits + 7) / 8;
+ if (size == 0)
+ continue;
+ krp->krp_param[i].crp_p = malloc(size, M_XDATA, M_WAITOK);
+ if (i >= krp->krp_iparams)
+ continue;
+ error = copyin(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p, size);
+ if (error)
+ goto fail;
+ }
+
+ error = crypto_kdispatch(krp);
+ if (error)
+ goto fail;
+ error = tsleep(krp, PSOCK, "crydev", 0);
+ if (error) {
+ /* XXX can this happen? if so, how do we recover? */
+ goto fail;
+ }
+
+ kop->crk_crid = krp->krp_crid; /* device that did the work */
+ if (krp->krp_status != 0) {
+ error = krp->krp_status;
+ goto fail;
+ }
+
+ for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
+ size = (krp->krp_param[i].crp_nbits + 7) / 8;
+ if (size == 0)
+ continue;
+ error = copyout(krp->krp_param[i].crp_p, kop->crk_param[i].crp_p, size);
+ if (error)
+ goto fail;
+ }
+
+fail:
+ if (krp) {
+ kop->crk_status = krp->krp_status;
+ for (i = 0; i < CRK_MAXPARAM; i++) {
+ if (krp->krp_param[i].crp_p)
+ free(krp->krp_param[i].crp_p, M_XDATA);
+ }
+ free(krp, M_XDATA);
+ }
+ return (error);
+}
+
+static int
+cryptodev_find(struct crypt_find_op *find)
+{
+ device_t dev;
+
+ if (find->crid != -1) {
+ dev = crypto_find_device_byhid(find->crid);
+ if (dev == NULL)
+ return (ENOENT);
+ strlcpy(find->name, device_get_nameunit(dev),
+ sizeof(find->name));
+ } else {
+ find->crid = crypto_find_driver(find->name);
+ if (find->crid == -1)
+ return (ENOENT);
+ }
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+cryptof_poll(
+ struct file *fp,
+ int events,
+ struct ucred *active_cred,
+ struct thread *td)
+{
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+cryptof_kqfilter(struct file *fp, struct knote *kn)
+{
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+cryptof_stat(
+ struct file *fp,
+ struct stat *sb,
+ struct ucred *active_cred,
+ struct thread *td)
+{
+
+ return (EOPNOTSUPP);
+}
+
+/* ARGSUSED */
+static int
+cryptof_close(struct file *fp, struct thread *td)
+{
+ struct fcrypt *fcr = fp->f_data;
+ struct csession *cse;
+
+ while ((cse = TAILQ_FIRST(&fcr->csessions))) {
+ TAILQ_REMOVE(&fcr->csessions, cse, next);
+ (void)csefree(cse);
+ }
+ free(fcr, M_XDATA);
+ fp->f_data = NULL;
+ return 0;
+}
+
+static struct csession *
+csefind(struct fcrypt *fcr, u_int ses)
+{
+ struct csession *cse;
+
+ TAILQ_FOREACH(cse, &fcr->csessions, next)
+ if (cse->ses == ses)
+ return (cse);
+ return (NULL);
+}
+
+static int
+csedelete(struct fcrypt *fcr, struct csession *cse_del)
+{
+ struct csession *cse;
+
+ TAILQ_FOREACH(cse, &fcr->csessions, next) {
+ if (cse == cse_del) {
+ TAILQ_REMOVE(&fcr->csessions, cse, next);
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static struct csession *
+cseadd(struct fcrypt *fcr, struct csession *cse)
+{
+ TAILQ_INSERT_TAIL(&fcr->csessions, cse, next);
+ cse->ses = fcr->sesn++;
+ return (cse);
+}
+
+struct csession *
+csecreate(struct fcrypt *fcr, u_int64_t sid, caddr_t key, u_int64_t keylen,
+ caddr_t mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac,
+ struct enc_xform *txform, struct auth_hash *thash)
+{
+ struct csession *cse;
+
+#ifdef INVARIANTS
+ /* NB: required when mtx_init is built with INVARIANTS */
+ cse = malloc(sizeof(struct csession), M_XDATA, M_NOWAIT | M_ZERO);
+#else
+ cse = malloc(sizeof(struct csession), M_XDATA, M_NOWAIT);
+#endif
+ if (cse == NULL)
+ return NULL;
+ mtx_init(&cse->lock, "cryptodev", "crypto session lock", MTX_DEF);
+ cse->key = key;
+ cse->keylen = keylen/8;
+ cse->mackey = mackey;
+ cse->mackeylen = mackeylen/8;
+ cse->sid = sid;
+ cse->cipher = cipher;
+ cse->mac = mac;
+ cse->txform = txform;
+ cse->thash = thash;
+ cseadd(fcr, cse);
+ return (cse);
+}
+
+static int
+csefree(struct csession *cse)
+{
+ int error;
+
+ error = crypto_freesession(cse->sid);
+ mtx_destroy(&cse->lock);
+ if (cse->key)
+ free(cse->key, M_XDATA);
+ if (cse->mackey)
+ free(cse->mackey, M_XDATA);
+ free(cse, M_XDATA);
+ return (error);
+}
+
+static int
+cryptoopen(struct cdev *dev, int oflags, int devtype, struct thread *td)
+{
+ return (0);
+}
+
+static int
+cryptoread(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ return (EIO);
+}
+
+static int
+cryptowrite(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ return (EIO);
+}
+
+static int
+cryptoioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
+{
+ struct file *f;
+ struct fcrypt *fcr;
+ int fd, error;
+
+ switch (cmd) {
+ case CRIOGET:
+ fcr = malloc(sizeof(struct fcrypt), M_XDATA, M_WAITOK);
+ TAILQ_INIT(&fcr->csessions);
+ fcr->sesn = 0;
+
+ error = falloc(td, &f, &fd);
+
+ if (error) {
+ free(fcr, M_XDATA);
+ return (error);
+ }
+ /* falloc automatically provides an extra reference to 'f'. */
+ finit(f, FREAD | FWRITE, DTYPE_CRYPTO, fcr, &cryptofops);
+ *(u_int32_t *)data = fd;
+ fdrop(f, td);
+ break;
+ case CRIOFINDDEV:
+ error = cryptodev_find((struct crypt_find_op *)data);
+ break;
+ case CRIOASYMFEAT:
+ error = crypto_getfeat((int *)data);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
+}
+
+static struct cdevsw crypto_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = D_NEEDGIANT,
+ .d_open = cryptoopen,
+ .d_read = cryptoread,
+ .d_write = cryptowrite,
+ .d_ioctl = cryptoioctl,
+ .d_name = "crypto",
+};
+static struct cdev *crypto_dev;
+
+/*
+ * Initialization code, both for static and dynamic loading.
+ */
+static int
+cryptodev_modevent(module_t mod, int type, void *unused)
+{
+ switch (type) {
+ case MOD_LOAD:
+ if (bootverbose)
+ printf("crypto: <crypto device>\n");
+ crypto_dev = make_dev(&crypto_cdevsw, 0,
+ UID_ROOT, GID_WHEEL, 0666,
+ "crypto");
+ return 0;
+ case MOD_UNLOAD:
+ /*XXX disallow if active sessions */
+ destroy_dev(crypto_dev);
+ return 0;
+ }
+ return EINVAL;
+}
+
+static moduledata_t cryptodev_mod = {
+ "cryptodev",
+ cryptodev_modevent,
+ 0
+};
+MODULE_VERSION(cryptodev, 1);
+DECLARE_MODULE(cryptodev, cryptodev_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_DEPEND(cryptodev, crypto, 1, 1, 1);
+MODULE_DEPEND(cryptodev, zlib, 1, 1, 1);
diff --git a/freebsd/sys/opencrypto/cryptodev.h b/freebsd/sys/opencrypto/cryptodev.h
new file mode 100644
index 00000000..2ca71802
--- /dev/null
+++ b/freebsd/sys/opencrypto/cryptodev.h
@@ -0,0 +1,432 @@
+/* $FreeBSD$ */
+/* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */
+
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ *
+ * Copyright (c) 2001 Theo de Raadt
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifndef _CRYPTO_CRYPTO_HH_
+#define _CRYPTO_CRYPTO_HH_
+
+#include <freebsd/sys/ioccom.h>
+
+/* Some initial values */
+#define CRYPTO_DRIVERS_INITIAL 4
+#define CRYPTO_SW_SESSIONS 32
+
+/* Hash values */
+#define NULL_HASH_LEN 16
+#define MD5_HASH_LEN 16
+#define SHA1_HASH_LEN 20
+#define RIPEMD160_HASH_LEN 20
+#define SHA2_256_HASH_LEN 32
+#define SHA2_384_HASH_LEN 48
+#define SHA2_512_HASH_LEN 64
+#define MD5_KPDK_HASH_LEN 16
+#define SHA1_KPDK_HASH_LEN 20
+/* Maximum hash algorithm result length */
+#define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */
+
+/* HMAC values */
+#define NULL_HMAC_BLOCK_LEN 64
+#define MD5_HMAC_BLOCK_LEN 64
+#define SHA1_HMAC_BLOCK_LEN 64
+#define RIPEMD160_HMAC_BLOCK_LEN 64
+#define SHA2_256_HMAC_BLOCK_LEN 64
+#define SHA2_384_HMAC_BLOCK_LEN 128
+#define SHA2_512_HMAC_BLOCK_LEN 128
+/* Maximum HMAC block length */
+#define HMAC_MAX_BLOCK_LEN SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
+#define HMAC_IPAD_VAL 0x36
+#define HMAC_OPAD_VAL 0x5C
+
+/* Encryption algorithm block sizes */
+#define NULL_BLOCK_LEN 4
+#define DES_BLOCK_LEN 8
+#define DES3_BLOCK_LEN 8
+#define BLOWFISH_BLOCK_LEN 8
+#define SKIPJACK_BLOCK_LEN 8
+#define CAST128_BLOCK_LEN 8
+#define RIJNDAEL128_BLOCK_LEN 16
+#define AES_BLOCK_LEN RIJNDAEL128_BLOCK_LEN
+#define CAMELLIA_BLOCK_LEN 16
+#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */
+
+#define CRYPTO_ALGORITHM_MIN 1
+#define CRYPTO_DES_CBC 1
+#define CRYPTO_3DES_CBC 2
+#define CRYPTO_BLF_CBC 3
+#define CRYPTO_CAST_CBC 4
+#define CRYPTO_SKIPJACK_CBC 5
+#define CRYPTO_MD5_HMAC 6
+#define CRYPTO_SHA1_HMAC 7
+#define CRYPTO_RIPEMD160_HMAC 8
+#define CRYPTO_MD5_KPDK 9
+#define CRYPTO_SHA1_KPDK 10
+#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
+#define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */
+#define CRYPTO_ARC4 12
+#define CRYPTO_MD5 13
+#define CRYPTO_SHA1 14
+#define CRYPTO_NULL_HMAC 15
+#define CRYPTO_NULL_CBC 16
+#define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */
+#define CRYPTO_SHA2_256_HMAC 18
+#define CRYPTO_SHA2_384_HMAC 19
+#define CRYPTO_SHA2_512_HMAC 20
+#define CRYPTO_CAMELLIA_CBC 21
+#define CRYPTO_AES_XTS 22
+#define CRYPTO_ALGORITHM_MAX 22 /* Keep updated - see below */
+
+/* Algorithm flags */
+#define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */
+#define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */
+#define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */
+
+/*
+ * Crypto driver/device flags. They can set in the crid
+ * parameter when creating a session or submitting a key
+ * op to affect the device/driver assigned. If neither
+ * of these are specified then the crid is assumed to hold
+ * the driver id of an existing (and suitable) device that
+ * must be used to satisfy the request.
+ */
+#define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */
+#define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */
+
+/* NB: deprecated */
+struct session_op {
+ u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
+ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
+
+ u_int32_t keylen; /* cipher key */
+ caddr_t key;
+ int mackeylen; /* mac key */
+ caddr_t mackey;
+
+ u_int32_t ses; /* returns: session # */
+};
+
+struct session2_op {
+ u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
+ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
+
+ u_int32_t keylen; /* cipher key */
+ caddr_t key;
+ int mackeylen; /* mac key */
+ caddr_t mackey;
+
+ u_int32_t ses; /* returns: session # */
+ int crid; /* driver id + flags (rw) */
+ int pad[4]; /* for future expansion */
+};
+
+struct crypt_op {
+ u_int32_t ses;
+ u_int16_t op; /* i.e. COP_ENCRYPT */
+#define COP_ENCRYPT 1
+#define COP_DECRYPT 2
+ u_int16_t flags;
+#define COP_F_BATCH 0x0008 /* Batch op if possible */
+ u_int len;
+ caddr_t src, dst; /* become iov[] inside kernel */
+ caddr_t mac; /* must be big enough for chosen MAC */
+ caddr_t iv;
+};
+
+/*
+ * Parameters for looking up a crypto driver/device by
+ * device name or by id. The latter are returned for
+ * created sessions (crid) and completed key operations.
+ */
+struct crypt_find_op {
+ int crid; /* driver id + flags */
+ char name[32]; /* device/driver name */
+};
+
+/* bignum parameter, in packed bytes, ... */
+struct crparam {
+ caddr_t crp_p;
+ u_int crp_nbits;
+};
+
+#define CRK_MAXPARAM 8
+
+struct crypt_kop {
+ u_int crk_op; /* ie. CRK_MOD_EXP or other */
+ u_int crk_status; /* return status */
+ u_short crk_iparams; /* # of input parameters */
+ u_short crk_oparams; /* # of output parameters */
+ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */
+ struct crparam crk_param[CRK_MAXPARAM];
+};
+#define CRK_ALGORITM_MIN 0
+#define CRK_MOD_EXP 0
+#define CRK_MOD_EXP_CRT 1
+#define CRK_DSA_SIGN 2
+#define CRK_DSA_VERIFY 3
+#define CRK_DH_COMPUTE_KEY 4
+#define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */
+
+#define CRF_MOD_EXP (1 << CRK_MOD_EXP)
+#define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT)
+#define CRF_DSA_SIGN (1 << CRK_DSA_SIGN)
+#define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY)
+#define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY)
+
+/*
+ * done against open of /dev/crypto, to get a cloned descriptor.
+ * Please use F_SETFD against the cloned descriptor.
+ */
+#define CRIOGET _IOWR('c', 100, u_int32_t)
+#define CRIOASYMFEAT CIOCASYMFEAT
+#define CRIOFINDDEV CIOCFINDDEV
+
+/* the following are done against the cloned descriptor */
+#define CIOCGSESSION _IOWR('c', 101, struct session_op)
+#define CIOCFSESSION _IOW('c', 102, u_int32_t)
+#define CIOCCRYPT _IOWR('c', 103, struct crypt_op)
+#define CIOCKEY _IOWR('c', 104, struct crypt_kop)
+#define CIOCASYMFEAT _IOR('c', 105, u_int32_t)
+#define CIOCGSESSION2 _IOWR('c', 106, struct session2_op)
+#define CIOCKEY2 _IOWR('c', 107, struct crypt_kop)
+#define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op)
+
+struct cryptotstat {
+ struct timespec acc; /* total accumulated time */
+ struct timespec min; /* min time */
+ struct timespec max; /* max time */
+ u_int32_t count; /* number of observations */
+};
+
+struct cryptostats {
+ u_int32_t cs_ops; /* symmetric crypto ops submitted */
+ u_int32_t cs_errs; /* symmetric crypto ops that failed */
+ u_int32_t cs_kops; /* asymetric/key ops submitted */
+ u_int32_t cs_kerrs; /* asymetric/key ops that failed */
+ u_int32_t cs_intrs; /* crypto swi thread activations */
+ u_int32_t cs_rets; /* crypto return thread activations */
+ u_int32_t cs_blocks; /* symmetric op driver block */
+ u_int32_t cs_kblocks; /* symmetric op driver block */
+ /*
+ * When CRYPTO_TIMING is defined at compile time and the
+ * sysctl debug.crypto is set to 1, the crypto system will
+ * accumulate statistics about how long it takes to process
+ * crypto requests at various points during processing.
+ */
+ struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */
+ struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */
+ struct cryptotstat cs_cb; /* crypto_done -> callback */
+ struct cryptotstat cs_finis; /* callback -> callback return */
+};
+
+#ifdef _KERNEL
+/* Standard initialization structure beginning */
+struct cryptoini {
+ int cri_alg; /* Algorithm to use */
+ int cri_klen; /* Key length, in bits */
+ int cri_mlen; /* Number of bytes we want from the
+ entire hash. 0 means all. */
+ caddr_t cri_key; /* key to use */
+ u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */
+ struct cryptoini *cri_next;
+};
+
+/* Describe boundaries of a single crypto operation */
+struct cryptodesc {
+ int crd_skip; /* How many bytes to ignore from start */
+ int crd_len; /* How many bytes to process */
+ int crd_inject; /* Where to inject results, if applicable */
+ int crd_flags;
+
+#define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */
+#define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in
+ place, so don't copy. */
+#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */
+#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */
+#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */
+#define CRD_F_COMP 0x0f /* Set when doing compression */
+
+ struct cryptoini CRD_INI; /* Initialization/context data */
+#define crd_iv CRD_INI.cri_iv
+#define crd_key CRD_INI.cri_key
+#define crd_alg CRD_INI.cri_alg
+#define crd_klen CRD_INI.cri_klen
+
+ struct cryptodesc *crd_next;
+};
+
+/* Structure describing complete operation */
+struct cryptop {
+ TAILQ_ENTRY(cryptop) crp_next;
+
+ u_int64_t crp_sid; /* Session ID */
+ int crp_ilen; /* Input data total length */
+ int crp_olen; /* Result total length */
+
+ int crp_etype; /*
+ * Error type (zero means no error).
+ * All error codes except EAGAIN
+ * indicate possible data corruption (as in,
+ * the data have been touched). On all
+ * errors, the crp_sid may have changed
+ * (reset to a new one), so the caller
+ * should always check and use the new
+ * value on future requests.
+ */
+ int crp_flags;
+
+#define CRYPTO_F_IMBUF 0x0001 /* Input/output are mbuf chains */
+#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
+#define CRYPTO_F_REL 0x0004 /* Must return data in same place */
+#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
+#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
+#define CRYPTO_F_DONE 0x0020 /* Operation completed */
+#define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */
+
+ caddr_t crp_buf; /* Data to be processed */
+ caddr_t crp_opaque; /* Opaque pointer, passed along */
+ struct cryptodesc *crp_desc; /* Linked list of processing descriptors */
+
+ int (*crp_callback)(struct cryptop *); /* Callback function */
+
+ struct bintime crp_tstamp; /* performance time stamp */
+};
+
+#define CRYPTO_BUF_CONTIG 0x0
+#define CRYPTO_BUF_IOV 0x1
+#define CRYPTO_BUF_MBUF 0x2
+
+#define CRYPTO_OP_DECRYPT 0x0
+#define CRYPTO_OP_ENCRYPT 0x1
+
+/*
+ * Hints passed to process methods.
+ */
+#define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */
+
+struct cryptkop {
+ TAILQ_ENTRY(cryptkop) krp_next;
+
+ u_int krp_op; /* ie. CRK_MOD_EXP or other */
+ u_int krp_status; /* return status */
+ u_short krp_iparams; /* # of input parameters */
+ u_short krp_oparams; /* # of output parameters */
+ u_int krp_crid; /* desired device, etc. */
+ u_int32_t krp_hid;
+ struct crparam krp_param[CRK_MAXPARAM]; /* kvm */
+ int (*krp_callback)(struct cryptkop *);
+};
+
+/*
+ * Session ids are 64 bits. The lower 32 bits contain a "local id" which
+ * is a driver-private session identifier. The upper 32 bits contain a
+ * "hardware id" used by the core crypto code to identify the driver and
+ * a copy of the driver's capabilities that can be used by client code to
+ * optimize operation.
+ */
+#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff)
+#define CRYPTO_SESID2CAPS(_sid) (((_sid) >> 32) & 0xff000000)
+#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
+
+MALLOC_DECLARE(M_CRYPTO_DATA);
+
+extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
+extern int crypto_freesession(u_int64_t sid);
+#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
+#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
+#define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */
+extern int32_t crypto_get_driverid(device_t dev, int flags);
+extern int crypto_find_driver(const char *);
+extern device_t crypto_find_device_byhid(int hid);
+extern int crypto_getcaps(int hid);
+extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
+ u_int32_t flags);
+extern int crypto_kregister(u_int32_t, int, u_int32_t);
+extern int crypto_unregister(u_int32_t driverid, int alg);
+extern int crypto_unregister_all(u_int32_t driverid);
+extern int crypto_dispatch(struct cryptop *crp);
+extern int crypto_kdispatch(struct cryptkop *);
+#define CRYPTO_SYMQ 0x1
+#define CRYPTO_ASYMQ 0x2
+extern int crypto_unblock(u_int32_t, int);
+extern void crypto_done(struct cryptop *crp);
+extern void crypto_kdone(struct cryptkop *);
+extern int crypto_getfeat(int *);
+
+extern void crypto_freereq(struct cryptop *crp);
+extern struct cryptop *crypto_getreq(int num);
+
+extern int crypto_usercrypto; /* userland may do crypto requests */
+extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */
+extern int crypto_devallowsoft; /* only use hardware crypto */
+
+/*
+ * Crypto-related utility routines used mainly by drivers.
+ *
+ * XXX these don't really belong here; but for now they're
+ * kept apart from the rest of the system.
+ */
+struct uio;
+extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
+extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
+extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
+extern int cuio_apply(struct uio *uio, int off, int len,
+ int (*f)(void *, void *, u_int), void *arg);
+
+extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
+ caddr_t in);
+extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
+ caddr_t out);
+extern int crypto_apply(int flags, caddr_t buf, int off, int len,
+ int (*f)(void *, void *, u_int), void *arg);
+#endif /* _KERNEL */
+#endif /* _CRYPTO_CRYPTO_HH_ */
diff --git a/freebsd/sys/opencrypto/cryptosoft.c b/freebsd/sys/opencrypto/cryptosoft.c
new file mode 100644
index 00000000..69b2d656
--- /dev/null
+++ b/freebsd/sys/opencrypto/cryptosoft.c
@@ -0,0 +1,1156 @@
+#include <freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
+
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000, 2001 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#include <freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <freebsd/sys/param.h>
+#include <freebsd/sys/systm.h>
+#include <freebsd/sys/malloc.h>
+#include <freebsd/sys/mbuf.h>
+#include <freebsd/sys/module.h>
+#include <freebsd/sys/sysctl.h>
+#include <freebsd/sys/errno.h>
+#include <freebsd/sys/random.h>
+#include <freebsd/sys/kernel.h>
+#include <freebsd/sys/uio.h>
+
+#include <freebsd/crypto/blowfish/blowfish.h>
+#include <freebsd/crypto/sha1.h>
+#include <freebsd/opencrypto/rmd160.h>
+#include <freebsd/opencrypto/cast.h>
+#include <freebsd/opencrypto/skipjack.h>
+#include <freebsd/sys/md5.h>
+
+#include <freebsd/opencrypto/cryptodev.h>
+#include <freebsd/opencrypto/cryptosoft.h>
+#include <freebsd/opencrypto/xform.h>
+
+#include <freebsd/sys/kobj.h>
+#include <freebsd/sys/bus.h>
+#include <freebsd/local/cryptodev_if.h>
+
+static int32_t swcr_id;
+static struct swcr_data **swcr_sessions = NULL;
+static u_int32_t swcr_sesnum;
+
+u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
+u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
+
+static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
+static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
+static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
+static int swcr_freesession(device_t dev, u_int64_t tid);
+
+/*
+ * Apply a symmetric encryption/decryption algorithm.
+ */
+static int
+swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
+ int flags)
+{
+ unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
+ unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
+ struct enc_xform *exf;
+ int i, k, j, blks;
+
+ exf = sw->sw_exf;
+ blks = exf->blocksize;
+
+ /* Check for non-padded data */
+ if (crd->crd_len % blks)
+ return EINVAL;
+
+ /* Initialize the IV */
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ /* IV explicitly provided ? */
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+ bcopy(crd->crd_iv, iv, blks);
+ else
+ arc4rand(iv, blks, 0);
+
+ /* Do we need to write the IV */
+ if (!(crd->crd_flags & CRD_F_IV_PRESENT))
+ crypto_copyback(flags, buf, crd->crd_inject, blks, iv);
+
+ } else { /* Decryption */
+ /* IV explicitly provided ? */
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+ bcopy(crd->crd_iv, iv, blks);
+ else {
+ /* Get IV off buf */
+ crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
+ }
+ }
+
+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ int error;
+
+ if (sw->sw_kschedule)
+ exf->zerokey(&(sw->sw_kschedule));
+ error = exf->setkey(&sw->sw_kschedule,
+ crd->crd_key, crd->crd_klen / 8);
+ if (error)
+ return (error);
+ }
+
+ ivp = iv;
+
+ /*
+ * xforms that provide a reinit method perform all IV
+ * handling themselves.
+ */
+ if (exf->reinit)
+ exf->reinit(sw->sw_kschedule, iv);
+
+ if (flags & CRYPTO_F_IMBUF) {
+ struct mbuf *m = (struct mbuf *) buf;
+
+ /* Find beginning of data */
+ m = m_getptr(m, crd->crd_skip, &k);
+ if (m == NULL)
+ return EINVAL;
+
+ i = crd->crd_len;
+
+ while (i > 0) {
+ /*
+ * If there's insufficient data at the end of
+ * an mbuf, we have to do some copying.
+ */
+ if (m->m_len < k + blks && m->m_len != k) {
+ m_copydata(m, k, blks, blk);
+
+ /* Actual encryption/decryption */
+ if (exf->reinit) {
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ exf->encrypt(sw->sw_kschedule,
+ blk);
+ } else {
+ exf->decrypt(sw->sw_kschedule,
+ blk);
+ }
+ } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ /* XOR with previous block */
+ for (j = 0; j < blks; j++)
+ blk[j] ^= ivp[j];
+
+ exf->encrypt(sw->sw_kschedule, blk);
+
+ /*
+ * Keep encrypted block for XOR'ing
+ * with next block
+ */
+ bcopy(blk, iv, blks);
+ ivp = iv;
+ } else { /* decrypt */
+ /*
+ * Keep encrypted block for XOR'ing
+ * with next block
+ */
+ if (ivp == iv)
+ bcopy(blk, piv, blks);
+ else
+ bcopy(blk, iv, blks);
+
+ exf->decrypt(sw->sw_kschedule, blk);
+
+ /* XOR with previous block */
+ for (j = 0; j < blks; j++)
+ blk[j] ^= ivp[j];
+
+ if (ivp == iv)
+ bcopy(piv, iv, blks);
+ else
+ ivp = iv;
+ }
+
+ /* Copy back decrypted block */
+ m_copyback(m, k, blks, blk);
+
+ /* Advance pointer */
+ m = m_getptr(m, k + blks, &k);
+ if (m == NULL)
+ return EINVAL;
+
+ i -= blks;
+
+ /* Could be done... */
+ if (i == 0)
+ break;
+ }
+
+ /* Skip possibly empty mbufs */
+ if (k == m->m_len) {
+ for (m = m->m_next; m && m->m_len == 0;
+ m = m->m_next)
+ ;
+ k = 0;
+ }
+
+ /* Sanity check */
+ if (m == NULL)
+ return EINVAL;
+
+ /*
+ * Warning: idat may point to garbage here, but
+ * we only use it in the while() loop, only if
+ * there are indeed enough data.
+ */
+ idat = mtod(m, unsigned char *) + k;
+
+ while (m->m_len >= k + blks && i > 0) {
+ if (exf->reinit) {
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ exf->encrypt(sw->sw_kschedule,
+ idat);
+ } else {
+ exf->decrypt(sw->sw_kschedule,
+ idat);
+ }
+ } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ /* XOR with previous block/IV */
+ for (j = 0; j < blks; j++)
+ idat[j] ^= ivp[j];
+
+ exf->encrypt(sw->sw_kschedule, idat);
+ ivp = idat;
+ } else { /* decrypt */
+ /*
+ * Keep encrypted block to be used
+ * in next block's processing.
+ */
+ if (ivp == iv)
+ bcopy(idat, piv, blks);
+ else
+ bcopy(idat, iv, blks);
+
+ exf->decrypt(sw->sw_kschedule, idat);
+
+ /* XOR with previous block/IV */
+ for (j = 0; j < blks; j++)
+ idat[j] ^= ivp[j];
+
+ if (ivp == iv)
+ bcopy(piv, iv, blks);
+ else
+ ivp = iv;
+ }
+
+ idat += blks;
+ k += blks;
+ i -= blks;
+ }
+ }
+
+ return 0; /* Done with mbuf encryption/decryption */
+ } else if (flags & CRYPTO_F_IOV) {
+ struct uio *uio = (struct uio *) buf;
+ struct iovec *iov;
+
+ /* Find beginning of data */
+ iov = cuio_getptr(uio, crd->crd_skip, &k);
+ if (iov == NULL)
+ return EINVAL;
+
+ i = crd->crd_len;
+
+ while (i > 0) {
+ /*
+ * If there's insufficient data at the end of
+ * an iovec, we have to do some copying.
+ */
+ if (iov->iov_len < k + blks && iov->iov_len != k) {
+ cuio_copydata(uio, k, blks, blk);
+
+ /* Actual encryption/decryption */
+ if (exf->reinit) {
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ exf->encrypt(sw->sw_kschedule,
+ blk);
+ } else {
+ exf->decrypt(sw->sw_kschedule,
+ blk);
+ }
+ } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ /* XOR with previous block */
+ for (j = 0; j < blks; j++)
+ blk[j] ^= ivp[j];
+
+ exf->encrypt(sw->sw_kschedule, blk);
+
+ /*
+ * Keep encrypted block for XOR'ing
+ * with next block
+ */
+ bcopy(blk, iv, blks);
+ ivp = iv;
+ } else { /* decrypt */
+ /*
+ * Keep encrypted block for XOR'ing
+ * with next block
+ */
+ if (ivp == iv)
+ bcopy(blk, piv, blks);
+ else
+ bcopy(blk, iv, blks);
+
+ exf->decrypt(sw->sw_kschedule, blk);
+
+ /* XOR with previous block */
+ for (j = 0; j < blks; j++)
+ blk[j] ^= ivp[j];
+
+ if (ivp == iv)
+ bcopy(piv, iv, blks);
+ else
+ ivp = iv;
+ }
+
+ /* Copy back decrypted block */
+ cuio_copyback(uio, k, blks, blk);
+
+ /* Advance pointer */
+ iov = cuio_getptr(uio, k + blks, &k);
+ if (iov == NULL)
+ return EINVAL;
+
+ i -= blks;
+
+ /* Could be done... */
+ if (i == 0)
+ break;
+ }
+
+ /*
+ * Warning: idat may point to garbage here, but
+ * we only use it in the while() loop, only if
+ * there are indeed enough data.
+ */
+ idat = (char *)iov->iov_base + k;
+
+ while (iov->iov_len >= k + blks && i > 0) {
+ if (exf->reinit) {
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ exf->encrypt(sw->sw_kschedule,
+ idat);
+ } else {
+ exf->decrypt(sw->sw_kschedule,
+ idat);
+ }
+ } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ /* XOR with previous block/IV */
+ for (j = 0; j < blks; j++)
+ idat[j] ^= ivp[j];
+
+ exf->encrypt(sw->sw_kschedule, idat);
+ ivp = idat;
+ } else { /* decrypt */
+ /*
+ * Keep encrypted block to be used
+ * in next block's processing.
+ */
+ if (ivp == iv)
+ bcopy(idat, piv, blks);
+ else
+ bcopy(idat, iv, blks);
+
+ exf->decrypt(sw->sw_kschedule, idat);
+
+ /* XOR with previous block/IV */
+ for (j = 0; j < blks; j++)
+ idat[j] ^= ivp[j];
+
+ if (ivp == iv)
+ bcopy(piv, iv, blks);
+ else
+ ivp = iv;
+ }
+
+ idat += blks;
+ k += blks;
+ i -= blks;
+ }
+ if (k == iov->iov_len) {
+ iov++;
+ k = 0;
+ }
+ }
+
+ return 0; /* Done with iovec encryption/decryption */
+ } else { /* contiguous buffer */
+ if (exf->reinit) {
+ for (i = crd->crd_skip;
+ i < crd->crd_skip + crd->crd_len; i += blks) {
+ if (crd->crd_flags & CRD_F_ENCRYPT)
+ exf->encrypt(sw->sw_kschedule, buf + i);
+ else
+ exf->decrypt(sw->sw_kschedule, buf + i);
+ }
+ } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ for (i = crd->crd_skip;
+ i < crd->crd_skip + crd->crd_len; i += blks) {
+ /* XOR with the IV/previous block, as appropriate. */
+ if (i == crd->crd_skip)
+ for (k = 0; k < blks; k++)
+ buf[i + k] ^= ivp[k];
+ else
+ for (k = 0; k < blks; k++)
+ buf[i + k] ^= buf[i + k - blks];
+ exf->encrypt(sw->sw_kschedule, buf + i);
+ }
+ } else { /* Decrypt */
+ /*
+ * Start at the end, so we don't need to keep the encrypted
+ * block as the IV for the next block.
+ */
+ for (i = crd->crd_skip + crd->crd_len - blks;
+ i >= crd->crd_skip; i -= blks) {
+ exf->decrypt(sw->sw_kschedule, buf + i);
+
+ /* XOR with the IV/previous block, as appropriate */
+ if (i == crd->crd_skip)
+ for (k = 0; k < blks; k++)
+ buf[i + k] ^= ivp[k];
+ else
+ for (k = 0; k < blks; k++)
+ buf[i + k] ^= buf[i + k - blks];
+ }
+ }
+
+ return 0; /* Done with contiguous buffer encryption/decryption */
+ }
+
+ /* Unreachable */
+ return EINVAL;
+}
+
+static void
+swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
+ int klen)
+{
+ int k;
+
+ klen /= 8;
+
+ switch (axf->type) {
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_NULL_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ for (k = 0; k < klen; k++)
+ key[k] ^= HMAC_IPAD_VAL;
+
+ axf->Init(sw->sw_ictx);
+ axf->Update(sw->sw_ictx, key, klen);
+ axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
+
+ for (k = 0; k < klen; k++)
+ key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
+
+ axf->Init(sw->sw_octx);
+ axf->Update(sw->sw_octx, key, klen);
+ axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
+
+ for (k = 0; k < klen; k++)
+ key[k] ^= HMAC_OPAD_VAL;
+ break;
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ {
+ /*
+ * We need a buffer that can hold an md5 and a sha1 result
+ * just to throw it away.
+ * What we do here is the initial part of:
+ * ALGO( key, keyfill, .. )
+ * adding the key to sw_ictx and abusing Final() to get the
+ * "keyfill" padding.
+ * In addition we abuse the sw_octx to save the key to have
+ * it to be able to append it at the end in swcr_authcompute().
+ */
+ u_char buf[SHA1_RESULTLEN];
+
+ sw->sw_klen = klen;
+ bcopy(key, sw->sw_octx, klen);
+ axf->Init(sw->sw_ictx);
+ axf->Update(sw->sw_ictx, key, klen);
+ axf->Final(buf, sw->sw_ictx);
+ break;
+ }
+ default:
+ printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
+ "doesn't use keys.\n", __func__, axf->type);
+ }
+}
+
+/*
+ * Compute keyed-hash authenticator.
+ */
+static int
+swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
+ int flags)
+{
+ unsigned char aalg[HASH_MAX_LEN];
+ struct auth_hash *axf;
+ union authctx ctx;
+ int err;
+
+ if (sw->sw_ictx == 0)
+ return EINVAL;
+
+ axf = sw->sw_axf;
+
+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
+ swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
+
+ bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
+
+ err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
+ (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
+ if (err)
+ return err;
+
+ switch (sw->sw_alg) {
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ if (sw->sw_octx == NULL)
+ return EINVAL;
+
+ axf->Final(aalg, &ctx);
+ bcopy(sw->sw_octx, &ctx, axf->ctxsize);
+ axf->Update(&ctx, aalg, axf->hashsize);
+ axf->Final(aalg, &ctx);
+ break;
+
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ /* If we have no key saved, return error. */
+ if (sw->sw_octx == NULL)
+ return EINVAL;
+
+ /*
+ * Add the trailing copy of the key (see comment in
+ * swcr_authprepare()) after the data:
+ * ALGO( .., key, algofill )
+ * and let Final() do the proper, natural "algofill"
+ * padding.
+ */
+ axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
+ axf->Final(aalg, &ctx);
+ break;
+
+ case CRYPTO_NULL_HMAC:
+ axf->Final(aalg, &ctx);
+ break;
+ }
+
+ /* Inject the authentication data */
+ crypto_copyback(flags, buf, crd->crd_inject,
+ sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
+ return 0;
+}
+
+/*
+ * Apply a compression/decompression algorithm
+ */
+static int
+swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
+ caddr_t buf, int flags)
+{
+ u_int8_t *data, *out;
+ struct comp_algo *cxf;
+ int adj;
+ u_int32_t result;
+
+ cxf = sw->sw_cxf;
+
+ /* We must handle the whole buffer of data in one time
+ * then if there is not all the data in the mbuf, we must
+ * copy in a buffer.
+ */
+
+ data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
+ if (data == NULL)
+ return (EINVAL);
+ crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
+
+ if (crd->crd_flags & CRD_F_COMP)
+ result = cxf->compress(data, crd->crd_len, &out);
+ else
+ result = cxf->decompress(data, crd->crd_len, &out);
+
+ free(data, M_CRYPTO_DATA);
+ if (result == 0)
+ return EINVAL;
+
+ /* Copy back the (de)compressed data. m_copyback is
+ * extending the mbuf as necessary.
+ */
+ sw->sw_size = result;
+ /* Check the compressed size when doing compression */
+ if (crd->crd_flags & CRD_F_COMP) {
+ if (result >= crd->crd_len) {
+ /* Compression was useless, we lost time */
+ free(out, M_CRYPTO_DATA);
+ return 0;
+ }
+ }
+
+ crypto_copyback(flags, buf, crd->crd_skip, result, out);
+ if (result < crd->crd_len) {
+ adj = result - crd->crd_len;
+ if (flags & CRYPTO_F_IMBUF) {
+ adj = result - crd->crd_len;
+ m_adj((struct mbuf *)buf, adj);
+ } else if (flags & CRYPTO_F_IOV) {
+ struct uio *uio = (struct uio *)buf;
+ int ind;
+
+ adj = crd->crd_len - result;
+ ind = uio->uio_iovcnt - 1;
+
+ while (adj > 0 && ind >= 0) {
+ if (adj < uio->uio_iov[ind].iov_len) {
+ uio->uio_iov[ind].iov_len -= adj;
+ break;
+ }
+
+ adj -= uio->uio_iov[ind].iov_len;
+ uio->uio_iov[ind].iov_len = 0;
+ ind--;
+ uio->uio_iovcnt--;
+ }
+ }
+ }
+ free(out, M_CRYPTO_DATA);
+ return 0;
+}
+
+/*
+ * Generate a new software session.
+ */
+static int
+swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+ struct swcr_data **swd;
+ struct auth_hash *axf;
+ struct enc_xform *txf;
+ struct comp_algo *cxf;
+ u_int32_t i;
+ int error;
+
+ if (sid == NULL || cri == NULL)
+ return EINVAL;
+
+ if (swcr_sessions) {
+ for (i = 1; i < swcr_sesnum; i++)
+ if (swcr_sessions[i] == NULL)
+ break;
+ } else
+ i = 1; /* NB: to silence compiler warning */
+
+ if (swcr_sessions == NULL || i == swcr_sesnum) {
+ if (swcr_sessions == NULL) {
+ i = 1; /* We leave swcr_sessions[0] empty */
+ swcr_sesnum = CRYPTO_SW_SESSIONS;
+ } else
+ swcr_sesnum *= 2;
+
+ swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (swd == NULL) {
+ /* Reset session number */
+ if (swcr_sesnum == CRYPTO_SW_SESSIONS)
+ swcr_sesnum = 0;
+ else
+ swcr_sesnum /= 2;
+ return ENOBUFS;
+ }
+
+ /* Copy existing sessions */
+ if (swcr_sessions != NULL) {
+ bcopy(swcr_sessions, swd,
+ (swcr_sesnum / 2) * sizeof(struct swcr_data *));
+ free(swcr_sessions, M_CRYPTO_DATA);
+ }
+
+ swcr_sessions = swd;
+ }
+
+ swd = &swcr_sessions[i];
+ *sid = i;
+
+ while (cri) {
+ *swd = malloc(sizeof(struct swcr_data),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (*swd == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ switch (cri->cri_alg) {
+ case CRYPTO_DES_CBC:
+ txf = &enc_xform_des;
+ goto enccommon;
+ case CRYPTO_3DES_CBC:
+ txf = &enc_xform_3des;
+ goto enccommon;
+ case CRYPTO_BLF_CBC:
+ txf = &enc_xform_blf;
+ goto enccommon;
+ case CRYPTO_CAST_CBC:
+ txf = &enc_xform_cast5;
+ goto enccommon;
+ case CRYPTO_SKIPJACK_CBC:
+ txf = &enc_xform_skipjack;
+ goto enccommon;
+ case CRYPTO_RIJNDAEL128_CBC:
+ txf = &enc_xform_rijndael128;
+ goto enccommon;
+ case CRYPTO_AES_XTS:
+ txf = &enc_xform_aes_xts;
+ goto enccommon;
+ case CRYPTO_CAMELLIA_CBC:
+ txf = &enc_xform_camellia;
+ goto enccommon;
+ case CRYPTO_NULL_CBC:
+ txf = &enc_xform_null;
+ goto enccommon;
+ enccommon:
+ if (cri->cri_key != NULL) {
+ error = txf->setkey(&((*swd)->sw_kschedule),
+ cri->cri_key, cri->cri_klen / 8);
+ if (error) {
+ swcr_freesession(dev, i);
+ return error;
+ }
+ }
+ (*swd)->sw_exf = txf;
+ break;
+
+ case CRYPTO_MD5_HMAC:
+ axf = &auth_hash_hmac_md5;
+ goto authcommon;
+ case CRYPTO_SHA1_HMAC:
+ axf = &auth_hash_hmac_sha1;
+ goto authcommon;
+ case CRYPTO_SHA2_256_HMAC:
+ axf = &auth_hash_hmac_sha2_256;
+ goto authcommon;
+ case CRYPTO_SHA2_384_HMAC:
+ axf = &auth_hash_hmac_sha2_384;
+ goto authcommon;
+ case CRYPTO_SHA2_512_HMAC:
+ axf = &auth_hash_hmac_sha2_512;
+ goto authcommon;
+ case CRYPTO_NULL_HMAC:
+ axf = &auth_hash_null;
+ goto authcommon;
+ case CRYPTO_RIPEMD160_HMAC:
+ axf = &auth_hash_hmac_ripemd_160;
+ authcommon:
+ (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if ((*swd)->sw_ictx == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if ((*swd)->sw_octx == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ if (cri->cri_key != NULL) {
+ swcr_authprepare(axf, *swd, cri->cri_key,
+ cri->cri_klen);
+ }
+
+ (*swd)->sw_mlen = cri->cri_mlen;
+ (*swd)->sw_axf = axf;
+ break;
+
+ case CRYPTO_MD5_KPDK:
+ axf = &auth_hash_key_md5;
+ goto auth2common;
+
+ case CRYPTO_SHA1_KPDK:
+ axf = &auth_hash_key_sha1;
+ auth2common:
+ (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if ((*swd)->sw_ictx == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ (*swd)->sw_octx = malloc(cri->cri_klen / 8,
+ M_CRYPTO_DATA, M_NOWAIT);
+ if ((*swd)->sw_octx == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ /* Store the key so we can "append" it to the payload */
+ if (cri->cri_key != NULL) {
+ swcr_authprepare(axf, *swd, cri->cri_key,
+ cri->cri_klen);
+ }
+
+ (*swd)->sw_mlen = cri->cri_mlen;
+ (*swd)->sw_axf = axf;
+ break;
+#ifdef notdef
+ case CRYPTO_MD5:
+ axf = &auth_hash_md5;
+ goto auth3common;
+
+ case CRYPTO_SHA1:
+ axf = &auth_hash_sha1;
+ auth3common:
+ (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if ((*swd)->sw_ictx == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ axf->Init((*swd)->sw_ictx);
+ (*swd)->sw_mlen = cri->cri_mlen;
+ (*swd)->sw_axf = axf;
+ break;
+#endif
+ case CRYPTO_DEFLATE_COMP:
+ cxf = &comp_algo_deflate;
+ (*swd)->sw_cxf = cxf;
+ break;
+ default:
+ swcr_freesession(dev, i);
+ return EINVAL;
+ }
+
+ (*swd)->sw_alg = cri->cri_alg;
+ cri = cri->cri_next;
+ swd = &((*swd)->sw_next);
+ }
+ return 0;
+}
+
+/*
+ * Free a session.
+ */
+static int
+swcr_freesession(device_t dev, u_int64_t tid)
+{
+ struct swcr_data *swd;
+ struct enc_xform *txf;
+ struct auth_hash *axf;
+ struct comp_algo *cxf;
+ u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+ if (sid > swcr_sesnum || swcr_sessions == NULL ||
+ swcr_sessions[sid] == NULL)
+ return EINVAL;
+
+ /* Silently accept and return */
+ if (sid == 0)
+ return 0;
+
+ while ((swd = swcr_sessions[sid]) != NULL) {
+ swcr_sessions[sid] = swd->sw_next;
+
+ switch (swd->sw_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ case CRYPTO_BLF_CBC:
+ case CRYPTO_CAST_CBC:
+ case CRYPTO_SKIPJACK_CBC:
+ case CRYPTO_RIJNDAEL128_CBC:
+ case CRYPTO_AES_XTS:
+ case CRYPTO_CAMELLIA_CBC:
+ case CRYPTO_NULL_CBC:
+ txf = swd->sw_exf;
+
+ if (swd->sw_kschedule)
+ txf->zerokey(&(swd->sw_kschedule));
+ break;
+
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ case CRYPTO_NULL_HMAC:
+ axf = swd->sw_axf;
+
+ if (swd->sw_ictx) {
+ bzero(swd->sw_ictx, axf->ctxsize);
+ free(swd->sw_ictx, M_CRYPTO_DATA);
+ }
+ if (swd->sw_octx) {
+ bzero(swd->sw_octx, axf->ctxsize);
+ free(swd->sw_octx, M_CRYPTO_DATA);
+ }
+ break;
+
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ axf = swd->sw_axf;
+
+ if (swd->sw_ictx) {
+ bzero(swd->sw_ictx, axf->ctxsize);
+ free(swd->sw_ictx, M_CRYPTO_DATA);
+ }
+ if (swd->sw_octx) {
+ bzero(swd->sw_octx, swd->sw_klen);
+ free(swd->sw_octx, M_CRYPTO_DATA);
+ }
+ break;
+
+ case CRYPTO_MD5:
+ case CRYPTO_SHA1:
+ axf = swd->sw_axf;
+
+ if (swd->sw_ictx)
+ free(swd->sw_ictx, M_CRYPTO_DATA);
+ break;
+
+ case CRYPTO_DEFLATE_COMP:
+ cxf = swd->sw_cxf;
+ break;
+ }
+
+ free(swd, M_CRYPTO_DATA);
+ }
+ return 0;
+}
+
+/*
+ * Process a software request.
+ */
+static int
+swcr_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct cryptodesc *crd;
+ struct swcr_data *sw;
+ u_int32_t lid;
+
+ /* Sanity check */
+ if (crp == NULL)
+ return EINVAL;
+
+ if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ lid = crp->crp_sid & 0xffffffff;
+ if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
+ crp->crp_etype = ENOENT;
+ goto done;
+ }
+
+ /* Go through crypto descriptors, processing as we go */
+ for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+ /*
+ * Find the crypto context.
+ *
+ * XXX Note that the logic here prevents us from having
+ * XXX the same algorithm multiple times in a session
+ * XXX (or rather, we can but it won't give us the right
+ * XXX results). To do that, we'd need some way of differentiating
+ * XXX between the various instances of an algorithm (so we can
+ * XXX locate the correct crypto context).
+ */
+ for (sw = swcr_sessions[lid];
+ sw && sw->sw_alg != crd->crd_alg;
+ sw = sw->sw_next)
+ ;
+
+ /* No such context ? */
+ if (sw == NULL) {
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+ switch (sw->sw_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ case CRYPTO_BLF_CBC:
+ case CRYPTO_CAST_CBC:
+ case CRYPTO_SKIPJACK_CBC:
+ case CRYPTO_RIJNDAEL128_CBC:
+ case CRYPTO_AES_XTS:
+ case CRYPTO_CAMELLIA_CBC:
+ if ((crp->crp_etype = swcr_encdec(crd, sw,
+ crp->crp_buf, crp->crp_flags)) != 0)
+ goto done;
+ break;
+ case CRYPTO_NULL_CBC:
+ crp->crp_etype = 0;
+ break;
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ case CRYPTO_NULL_HMAC:
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ case CRYPTO_MD5:
+ case CRYPTO_SHA1:
+ if ((crp->crp_etype = swcr_authcompute(crd, sw,
+ crp->crp_buf, crp->crp_flags)) != 0)
+ goto done;
+ break;
+
+ case CRYPTO_DEFLATE_COMP:
+ if ((crp->crp_etype = swcr_compdec(crd, sw,
+ crp->crp_buf, crp->crp_flags)) != 0)
+ goto done;
+ else
+ crp->crp_olen = (int)sw->sw_size;
+ break;
+
+ default:
+ /* Unknown/unsupported algorithm */
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ crypto_done(crp);
+ return 0;
+}
+
+static void
+swcr_identify(driver_t *drv, device_t parent)
+{
+ /* NB: order 10 is so we get attached after h/w devices */
+ if (device_find_child(parent, "cryptosoft", -1) == NULL &&
+ BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
+ panic("cryptosoft: could not attach");
+}
+
+static int
+swcr_probe(device_t dev)
+{
+ device_set_desc(dev, "software crypto");
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+static int
+swcr_attach(device_t dev)
+{
+ memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
+ memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
+
+ swcr_id = crypto_get_driverid(dev,
+ CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
+ if (swcr_id < 0) {
+ device_printf(dev, "cannot initialize!");
+ return ENOMEM;
+ }
+#define REGISTER(alg) \
+ crypto_register(swcr_id, alg, 0,0)
+ REGISTER(CRYPTO_DES_CBC);
+ REGISTER(CRYPTO_3DES_CBC);
+ REGISTER(CRYPTO_BLF_CBC);
+ REGISTER(CRYPTO_CAST_CBC);
+ REGISTER(CRYPTO_SKIPJACK_CBC);
+ REGISTER(CRYPTO_NULL_CBC);
+ REGISTER(CRYPTO_MD5_HMAC);
+ REGISTER(CRYPTO_SHA1_HMAC);
+ REGISTER(CRYPTO_SHA2_256_HMAC);
+ REGISTER(CRYPTO_SHA2_384_HMAC);
+ REGISTER(CRYPTO_SHA2_512_HMAC);
+ REGISTER(CRYPTO_RIPEMD160_HMAC);
+ REGISTER(CRYPTO_NULL_HMAC);
+ REGISTER(CRYPTO_MD5_KPDK);
+ REGISTER(CRYPTO_SHA1_KPDK);
+ REGISTER(CRYPTO_MD5);
+ REGISTER(CRYPTO_SHA1);
+ REGISTER(CRYPTO_RIJNDAEL128_CBC);
+ REGISTER(CRYPTO_AES_XTS);
+ REGISTER(CRYPTO_CAMELLIA_CBC);
+ REGISTER(CRYPTO_DEFLATE_COMP);
+#undef REGISTER
+
+ return 0;
+}
+
+static int
+swcr_detach(device_t dev)
+{
+ crypto_unregister_all(swcr_id);
+ if (swcr_sessions != NULL)
+ free(swcr_sessions, M_CRYPTO_DATA);
+ return 0;
+}
+
+static device_method_t swcr_methods[] = {
+ DEVMETHOD(device_identify, swcr_identify),
+ DEVMETHOD(device_probe, swcr_probe),
+ DEVMETHOD(device_attach, swcr_attach),
+ DEVMETHOD(device_detach, swcr_detach),
+
+ DEVMETHOD(cryptodev_newsession, swcr_newsession),
+ DEVMETHOD(cryptodev_freesession,swcr_freesession),
+ DEVMETHOD(cryptodev_process, swcr_process),
+
+ {0, 0},
+};
+
+static driver_t swcr_driver = {
+ "cryptosoft",
+ swcr_methods,
+ 0, /* NB: no softc */
+};
+static devclass_t swcr_devclass;
+
+/*
+ * NB: We explicitly reference the crypto module so we
+ * get the necessary ordering when built as a loadable
+ * module. This is required because we bundle the crypto
+ * module code together with the cryptosoft driver (otherwise
+ * normal module dependencies would handle things).
+ */
+extern int crypto_modevent(struct module *, int, void *);
+/* XXX where to attach */
+DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
+MODULE_VERSION(cryptosoft, 1);
+MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
diff --git a/freebsd/sys/opencrypto/cryptosoft.h b/freebsd/sys/opencrypto/cryptosoft.h
new file mode 100644
index 00000000..363fdbba
--- /dev/null
+++ b/freebsd/sys/opencrypto/cryptosoft.h
@@ -0,0 +1,67 @@
+/* $FreeBSD$ */
+/* $OpenBSD: cryptosoft.h,v 1.10 2002/04/22 23:10:09 deraadt Exp $ */
+
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _CRYPTO_CRYPTOSOFT_HH_
+#define _CRYPTO_CRYPTOSOFT_HH_
+
+/* Software session entry */
+struct swcr_data {
+ int sw_alg; /* Algorithm */
+ union {
+ struct {
+ u_int8_t *SW_ictx;
+ u_int8_t *SW_octx;
+ u_int16_t SW_klen;
+ u_int16_t SW_mlen;
+ struct auth_hash *SW_axf;
+ } SWCR_AUTH;
+ struct {
+ u_int8_t *SW_kschedule;
+ struct enc_xform *SW_exf;
+ } SWCR_ENC;
+ struct {
+ u_int32_t SW_size;
+ struct comp_algo *SW_cxf;
+ } SWCR_COMP;
+ } SWCR_UN;
+
+#define sw_ictx SWCR_UN.SWCR_AUTH.SW_ictx
+#define sw_octx SWCR_UN.SWCR_AUTH.SW_octx
+#define sw_klen SWCR_UN.SWCR_AUTH.SW_klen
+#define sw_mlen SWCR_UN.SWCR_AUTH.SW_mlen
+#define sw_axf SWCR_UN.SWCR_AUTH.SW_axf
+#define sw_kschedule SWCR_UN.SWCR_ENC.SW_kschedule
+#define sw_exf SWCR_UN.SWCR_ENC.SW_exf
+#define sw_size SWCR_UN.SWCR_COMP.SW_size
+#define sw_cxf SWCR_UN.SWCR_COMP.SW_cxf
+
+ struct swcr_data *sw_next;
+};
+
+#ifdef _KERNEL
+extern u_int8_t hmac_ipad_buffer[];
+extern u_int8_t hmac_opad_buffer[];
+#endif /* _KERNEL */
+
+#endif /* _CRYPTO_CRYPTO_HH_ */
diff --git a/freebsd/sys/opencrypto/deflate.c b/freebsd/sys/opencrypto/deflate.c
new file mode 100644
index 00000000..ddf8239e
--- /dev/null
+++ b/freebsd/sys/opencrypto/deflate.c
@@ -0,0 +1,267 @@
+#include <freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: deflate.c,v 1.3 2001/08/20 02:45:22 hugh Exp $ */
+
+/*-
+ * Copyright (c) 2001 Jean-Jacques Bernard-Gundol (jj@wabbitt.org)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file contains a wrapper around the deflate algo compression
+ * functions using the zlib library (see net/zlib.{c,h})
+ */
+
+#include <freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <freebsd/local/opt_kdtrace.h>
+
+#include <freebsd/sys/types.h>
+#include <freebsd/sys/param.h>
+#include <freebsd/sys/malloc.h>
+#include <freebsd/sys/param.h>
+#include <freebsd/sys/kernel.h>
+#include <freebsd/sys/sdt.h>
+#include <freebsd/sys/systm.h>
+#include <freebsd/net/zlib.h>
+
+#include <freebsd/opencrypto/cryptodev.h>
+#include <freebsd/opencrypto/deflate.h>
+
+SDT_PROVIDER_DECLARE(opencrypto);
+SDT_PROBE_DEFINE2(opencrypto, deflate, deflate_global, entry,
+ "int", "u_int32_t");
+SDT_PROBE_DEFINE5(opencrypto, deflate, deflate_global, bad,
+ "int", "int", "int", "int", "int");
+SDT_PROBE_DEFINE5(opencrypto, deflate, deflate_global, iter,
+ "int", "int", "int", "int", "int");
+SDT_PROBE_DEFINE2(opencrypto, deflate, deflate_global, return,
+ "int", "u_int32_t");
+
+int window_inflate = -1 * MAX_WBITS;
+int window_deflate = -12;
+
+/*
+ * This function takes a block of data and (de)compress it using the deflate
+ * algorithm
+ */
+
+u_int32_t
+deflate_global(data, size, decomp, out)
+ u_int8_t *data;
+ u_int32_t size;
+ int decomp;
+ u_int8_t **out;
+{
+ /* decomp indicates whether we compress (0) or decompress (1) */
+
+ z_stream zbuf;
+ u_int8_t *output;
+ u_int32_t count, result;
+ int error, i;
+ struct deflate_buf *bufh, *bufp;
+
+ SDT_PROBE2(opencrypto, deflate, deflate_global, entry, decomp, size);
+
+ bufh = bufp = NULL;
+ if (!decomp) {
+ i = 1;
+ } else {
+ /*
+ * Choose a buffer with 4x the size of the input buffer
+ * for the size of the output buffer in the case of
+ * decompression. If it's not sufficient, it will need to be
+ * updated while the decompression is going on.
+ */
+ i = 4;
+ }
+ /*
+ * Make sure we do have enough output space. Repeated calls to
+ * deflate need at least 6 bytes of output buffer space to avoid
+ * repeated markers. We will always provide at least 16 bytes.
+ */
+ while ((size * i) < 16)
+ i++;
+
+ bufh = bufp = malloc(sizeof(*bufp) + (size_t)(size * i),
+ M_CRYPTO_DATA, M_NOWAIT);
+ if (bufp == NULL) {
+ SDT_PROBE3(opencrypto, deflate, deflate_global, bad,
+ decomp, 0, __LINE__);
+ goto bad2;
+ }
+ bufp->next = NULL;
+ bufp->size = size * i;
+
+ bzero(&zbuf, sizeof(z_stream));
+ zbuf.zalloc = z_alloc;
+ zbuf.zfree = z_free;
+ zbuf.opaque = Z_NULL;
+ zbuf.next_in = data; /* Data that is going to be processed. */
+ zbuf.avail_in = size; /* Total length of data to be processed. */
+ zbuf.next_out = bufp->data;
+ zbuf.avail_out = bufp->size;
+
+ error = decomp ? inflateInit2(&zbuf, window_inflate) :
+ deflateInit2(&zbuf, Z_DEFAULT_COMPRESSION, Z_METHOD,
+ window_deflate, Z_MEMLEVEL, Z_DEFAULT_STRATEGY);
+ if (error != Z_OK) {
+ SDT_PROBE3(opencrypto, deflate, deflate_global, bad,
+ decomp, error, __LINE__);
+ goto bad;
+ }
+
+ for (;;) {
+ error = decomp ? inflate(&zbuf, Z_SYNC_FLUSH) :
+ deflate(&zbuf, Z_FINISH);
+ if (error != Z_OK && error != Z_STREAM_END) {
+ /*
+ * Unfortunately we are limited to 5 arguments,
+ * thus use two probes.
+ */
+ SDT_PROBE5(opencrypto, deflate, deflate_global, bad,
+ decomp, error, __LINE__,
+ zbuf.avail_in, zbuf.avail_out);
+ SDT_PROBE5(opencrypto, deflate, deflate_global, bad,
+ decomp, error, __LINE__,
+ zbuf.state->dummy, zbuf.total_out);
+ goto bad;
+ }
+ SDT_PROBE5(opencrypto, deflate, deflate_global, iter,
+ decomp, error, __LINE__,
+ zbuf.avail_in, zbuf.avail_out);
+ SDT_PROBE5(opencrypto, deflate, deflate_global, iter,
+ decomp, error, __LINE__,
+ zbuf.state->dummy, zbuf.total_out);
+ if (decomp && zbuf.avail_in == 0 && error == Z_STREAM_END) {
+ /* Done. */
+ break;
+ } else if (!decomp && error == Z_STREAM_END) {
+ /* Done. */
+ break;
+ } else if (zbuf.avail_out == 0) {
+ struct deflate_buf *p;
+
+ /* We need more output space for another iteration. */
+ p = malloc(sizeof(*p) + (size_t)(size * i),
+ M_CRYPTO_DATA, M_NOWAIT);
+ if (p == NULL) {
+ SDT_PROBE3(opencrypto, deflate, deflate_global,
+ bad, decomp, 0, __LINE__);
+ goto bad;
+ }
+ p->next = NULL;
+ p->size = size * i;
+ bufp->next = p;
+ bufp = p;
+ zbuf.next_out = bufp->data;
+ zbuf.avail_out = bufp->size;
+ } else {
+ /* Unexpect result. */
+ /*
+ * Unfortunately we are limited to 5 arguments,
+ * thus, again, use two probes.
+ */
+ SDT_PROBE5(opencrypto, deflate, deflate_global, bad,
+ decomp, error, __LINE__,
+ zbuf.avail_in, zbuf.avail_out);
+ SDT_PROBE5(opencrypto, deflate, deflate_global, bad,
+ decomp, error, __LINE__,
+ zbuf.state->dummy, zbuf.total_out);
+ goto bad;
+ }
+ }
+
+ result = count = zbuf.total_out;
+
+ *out = malloc(result, M_CRYPTO_DATA, M_NOWAIT);
+ if (*out == NULL) {
+ SDT_PROBE3(opencrypto, deflate, deflate_global, bad,
+ decomp, 0, __LINE__);
+ goto bad;
+ }
+ if (decomp)
+ inflateEnd(&zbuf);
+ else
+ deflateEnd(&zbuf);
+ output = *out;
+ for (bufp = bufh; bufp != NULL; ) {
+ if (count > bufp->size) {
+ struct deflate_buf *p;
+
+ bcopy(bufp->data, *out, bufp->size);
+ *out += bufp->size;
+ count -= bufp->size;
+ p = bufp;
+ bufp = bufp->next;
+ free(p, M_CRYPTO_DATA);
+ } else {
+ /* It should be the last buffer. */
+ bcopy(bufp->data, *out, count);
+ *out += count;
+ free(bufp, M_CRYPTO_DATA);
+ bufp = NULL;
+ count = 0;
+ }
+ }
+ *out = output;
+ SDT_PROBE2(opencrypto, deflate, deflate_global, return, decomp, result);
+ return result;
+
+bad:
+ if (decomp)
+ inflateEnd(&zbuf);
+ else
+ deflateEnd(&zbuf);
+ for (bufp = bufh; bufp != NULL; ) {
+ struct deflate_buf *p;
+
+ p = bufp;
+ bufp = bufp->next;
+ free(p, M_CRYPTO_DATA);
+ }
+bad2:
+ *out = NULL;
+ return 0;
+}
+
+void *
+z_alloc(nil, type, size)
+ void *nil;
+ u_int type, size;
+{
+ void *ptr;
+
+ ptr = malloc(type *size, M_CRYPTO_DATA, M_NOWAIT);
+ return ptr;
+}
+
+void
+z_free(nil, ptr)
+ void *nil, *ptr;
+{
+ free(ptr, M_CRYPTO_DATA);
+}
diff --git a/freebsd/sys/opencrypto/deflate.h b/freebsd/sys/opencrypto/deflate.h
new file mode 100644
index 00000000..732620b5
--- /dev/null
+++ b/freebsd/sys/opencrypto/deflate.h
@@ -0,0 +1,60 @@
+/* $FreeBSD$ */
+/* $OpenBSD: deflate.h,v 1.3 2002/03/14 01:26:51 millert Exp $ */
+
+/*-
+ * Copyright (c) 2001 Jean-Jacques Bernard-Gundol (jj@wabbitt.org)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Definition for the wrapper around the deflate compression
+ * algorithm used in /sys/crypto
+ */
+
+#ifndef _CRYPTO_DEFLATE_HH_
+#define _CRYPTO_DEFLATE_HH_
+
+#include <freebsd/net/zlib.h>
+
+#define Z_METHOD 8
+#define Z_MEMLEVEL 8
+#define MINCOMP 2 /* won't be used, but must be defined */
+#define ZBUF 10
+
+u_int32_t deflate_global(u_int8_t *, u_int32_t, int, u_int8_t **);
+void *z_alloc(void *, u_int, u_int);
+void z_free(void *, void *);
+
+/*
+ * We are going to use a combined allocation to hold the metadata
+ * from the struct immediately followed by the real application data.
+ */
+struct deflate_buf {
+ struct deflate_buf *next;
+ uint32_t size;
+ uint8_t data[];
+};
+
+#endif /* _CRYPTO_DEFLATE_HH_ */
diff --git a/freebsd/sys/opencrypto/rmd160.c b/freebsd/sys/opencrypto/rmd160.c
new file mode 100644
index 00000000..99f987ee
--- /dev/null
+++ b/freebsd/sys/opencrypto/rmd160.c
@@ -0,0 +1,369 @@
+#include <freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: rmd160.c,v 1.3 2001/09/26 21:40:13 markus Exp $ */
+/*-
+ * Copyright (c) 2001 Markus Friedl. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Preneel, Bosselaers, Dobbertin, "The Cryptographic Hash Function RIPEMD-160",
+ * RSA Laboratories, CryptoBytes, Volume 3, Number 2, Autumn 1997,
+ * ftp://ftp.rsasecurity.com/pub/cryptobytes/crypto3n2.pdf
+ */
+
+#include <freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <freebsd/sys/param.h>
+#include <freebsd/sys/systm.h>
+#include <freebsd/sys/endian.h>
+#include <freebsd/opencrypto/rmd160.h>
+
+#define PUT_64BIT_LE(cp, value) do { \
+ (cp)[7] = (value) >> 56; \
+ (cp)[6] = (value) >> 48; \
+ (cp)[5] = (value) >> 40; \
+ (cp)[4] = (value) >> 32; \
+ (cp)[3] = (value) >> 24; \
+ (cp)[2] = (value) >> 16; \
+ (cp)[1] = (value) >> 8; \
+ (cp)[0] = (value); } while (0)
+
+#define PUT_32BIT_LE(cp, value) do { \
+ (cp)[3] = (value) >> 24; \
+ (cp)[2] = (value) >> 16; \
+ (cp)[1] = (value) >> 8; \
+ (cp)[0] = (value); } while (0)
+
+#define H0 0x67452301U
+#define H1 0xEFCDAB89U
+#define H2 0x98BADCFEU
+#define H3 0x10325476U
+#define H4 0xC3D2E1F0U
+
+#define K0 0x00000000U
+#define K1 0x5A827999U
+#define K2 0x6ED9EBA1U
+#define K3 0x8F1BBCDCU
+#define K4 0xA953FD4EU
+
+#define KK0 0x50A28BE6U
+#define KK1 0x5C4DD124U
+#define KK2 0x6D703EF3U
+#define KK3 0x7A6D76E9U
+#define KK4 0x00000000U
+
+/* rotate x left n bits. */
+#define ROL(n, x) (((x) << (n)) | ((x) >> (32-(n))))
+
+#define F0(x, y, z) ((x) ^ (y) ^ (z))
+#define F1(x, y, z) (((x) & (y)) | ((~x) & (z)))
+#define F2(x, y, z) (((x) | (~y)) ^ (z))
+#define F3(x, y, z) (((x) & (z)) | ((y) & (~z)))
+#define F4(x, y, z) ((x) ^ ((y) | (~z)))
+
+#define R(a, b, c, d, e, Fj, Kj, sj, rj) \
+ do { \
+ a = ROL(sj, a + Fj(b,c,d) + X(rj) + Kj) + e; \
+ c = ROL(10, c); \
+ } while(0)
+
+#define X(i) x[i]
+
+static u_char PADDING[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+void
+RMD160Init(RMD160_CTX *ctx)
+{
+ ctx->count = 0;
+ ctx->state[0] = H0;
+ ctx->state[1] = H1;
+ ctx->state[2] = H2;
+ ctx->state[3] = H3;
+ ctx->state[4] = H4;
+}
+
+void
+RMD160Update(RMD160_CTX *ctx, const u_char *input, u_int32_t len)
+{
+ u_int32_t have, off, need;
+
+ have = (ctx->count/8) % 64;
+ need = 64 - have;
+ ctx->count += 8 * len;
+ off = 0;
+
+ if (len >= need) {
+ if (have) {
+ memcpy(ctx->buffer + have, input, need);
+ RMD160Transform(ctx->state, ctx->buffer);
+ off = need;
+ have = 0;
+ }
+ /* now the buffer is empty */
+ while (off + 64 <= len) {
+ RMD160Transform(ctx->state, input+off);
+ off += 64;
+ }
+ }
+ if (off < len)
+ memcpy(ctx->buffer + have, input+off, len-off);
+}
+
+void
+RMD160Final(u_char digest[20], RMD160_CTX *ctx)
+{
+ int i;
+ u_char size[8];
+ u_int32_t padlen;
+
+ PUT_64BIT_LE(size, ctx->count);
+
+ /*
+ * pad to 64 byte blocks, at least one byte from PADDING plus 8 bytes
+ * for the size
+ */
+ padlen = 64 - ((ctx->count/8) % 64);
+ if (padlen < 1 + 8)
+ padlen += 64;
+ RMD160Update(ctx, PADDING, padlen - 8); /* padlen - 8 <= 64 */
+ RMD160Update(ctx, size, 8);
+
+ if (digest != NULL)
+ for (i = 0; i < 5; i++)
+ PUT_32BIT_LE(digest + i*4, ctx->state[i]);
+
+ memset(ctx, 0, sizeof (*ctx));
+}
+
+void
+RMD160Transform(u_int32_t state[5], const u_char block[64])
+{
+ u_int32_t a, b, c, d, e, aa, bb, cc, dd, ee, t, x[16];
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+ memcpy(x, block, 64);
+#else
+ int i;
+
+ for (i = 0; i < 16; i++)
+ x[i] = bswap32(*(const u_int32_t*)(block+i*4));
+#endif
+
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* Round 1 */
+ R(a, b, c, d, e, F0, K0, 11, 0);
+ R(e, a, b, c, d, F0, K0, 14, 1);
+ R(d, e, a, b, c, F0, K0, 15, 2);
+ R(c, d, e, a, b, F0, K0, 12, 3);
+ R(b, c, d, e, a, F0, K0, 5, 4);
+ R(a, b, c, d, e, F0, K0, 8, 5);
+ R(e, a, b, c, d, F0, K0, 7, 6);
+ R(d, e, a, b, c, F0, K0, 9, 7);
+ R(c, d, e, a, b, F0, K0, 11, 8);
+ R(b, c, d, e, a, F0, K0, 13, 9);
+ R(a, b, c, d, e, F0, K0, 14, 10);
+ R(e, a, b, c, d, F0, K0, 15, 11);
+ R(d, e, a, b, c, F0, K0, 6, 12);
+ R(c, d, e, a, b, F0, K0, 7, 13);
+ R(b, c, d, e, a, F0, K0, 9, 14);
+ R(a, b, c, d, e, F0, K0, 8, 15); /* #15 */
+ /* Round 2 */
+ R(e, a, b, c, d, F1, K1, 7, 7);
+ R(d, e, a, b, c, F1, K1, 6, 4);
+ R(c, d, e, a, b, F1, K1, 8, 13);
+ R(b, c, d, e, a, F1, K1, 13, 1);
+ R(a, b, c, d, e, F1, K1, 11, 10);
+ R(e, a, b, c, d, F1, K1, 9, 6);
+ R(d, e, a, b, c, F1, K1, 7, 15);
+ R(c, d, e, a, b, F1, K1, 15, 3);
+ R(b, c, d, e, a, F1, K1, 7, 12);
+ R(a, b, c, d, e, F1, K1, 12, 0);
+ R(e, a, b, c, d, F1, K1, 15, 9);
+ R(d, e, a, b, c, F1, K1, 9, 5);
+ R(c, d, e, a, b, F1, K1, 11, 2);
+ R(b, c, d, e, a, F1, K1, 7, 14);
+ R(a, b, c, d, e, F1, K1, 13, 11);
+ R(e, a, b, c, d, F1, K1, 12, 8); /* #31 */
+ /* Round 3 */
+ R(d, e, a, b, c, F2, K2, 11, 3);
+ R(c, d, e, a, b, F2, K2, 13, 10);
+ R(b, c, d, e, a, F2, K2, 6, 14);
+ R(a, b, c, d, e, F2, K2, 7, 4);
+ R(e, a, b, c, d, F2, K2, 14, 9);
+ R(d, e, a, b, c, F2, K2, 9, 15);
+ R(c, d, e, a, b, F2, K2, 13, 8);
+ R(b, c, d, e, a, F2, K2, 15, 1);
+ R(a, b, c, d, e, F2, K2, 14, 2);
+ R(e, a, b, c, d, F2, K2, 8, 7);
+ R(d, e, a, b, c, F2, K2, 13, 0);
+ R(c, d, e, a, b, F2, K2, 6, 6);
+ R(b, c, d, e, a, F2, K2, 5, 13);
+ R(a, b, c, d, e, F2, K2, 12, 11);
+ R(e, a, b, c, d, F2, K2, 7, 5);
+ R(d, e, a, b, c, F2, K2, 5, 12); /* #47 */
+ /* Round 4 */
+ R(c, d, e, a, b, F3, K3, 11, 1);
+ R(b, c, d, e, a, F3, K3, 12, 9);
+ R(a, b, c, d, e, F3, K3, 14, 11);
+ R(e, a, b, c, d, F3, K3, 15, 10);
+ R(d, e, a, b, c, F3, K3, 14, 0);
+ R(c, d, e, a, b, F3, K3, 15, 8);
+ R(b, c, d, e, a, F3, K3, 9, 12);
+ R(a, b, c, d, e, F3, K3, 8, 4);
+ R(e, a, b, c, d, F3, K3, 9, 13);
+ R(d, e, a, b, c, F3, K3, 14, 3);
+ R(c, d, e, a, b, F3, K3, 5, 7);
+ R(b, c, d, e, a, F3, K3, 6, 15);
+ R(a, b, c, d, e, F3, K3, 8, 14);
+ R(e, a, b, c, d, F3, K3, 6, 5);
+ R(d, e, a, b, c, F3, K3, 5, 6);
+ R(c, d, e, a, b, F3, K3, 12, 2); /* #63 */
+ /* Round 5 */
+ R(b, c, d, e, a, F4, K4, 9, 4);
+ R(a, b, c, d, e, F4, K4, 15, 0);
+ R(e, a, b, c, d, F4, K4, 5, 5);
+ R(d, e, a, b, c, F4, K4, 11, 9);
+ R(c, d, e, a, b, F4, K4, 6, 7);
+ R(b, c, d, e, a, F4, K4, 8, 12);
+ R(a, b, c, d, e, F4, K4, 13, 2);
+ R(e, a, b, c, d, F4, K4, 12, 10);
+ R(d, e, a, b, c, F4, K4, 5, 14);
+ R(c, d, e, a, b, F4, K4, 12, 1);
+ R(b, c, d, e, a, F4, K4, 13, 3);
+ R(a, b, c, d, e, F4, K4, 14, 8);
+ R(e, a, b, c, d, F4, K4, 11, 11);
+ R(d, e, a, b, c, F4, K4, 8, 6);
+ R(c, d, e, a, b, F4, K4, 5, 15);
+ R(b, c, d, e, a, F4, K4, 6, 13); /* #79 */
+
+ aa = a ; bb = b; cc = c; dd = d; ee = e;
+
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* Parallel round 1 */
+ R(a, b, c, d, e, F4, KK0, 8, 5);
+ R(e, a, b, c, d, F4, KK0, 9, 14);
+ R(d, e, a, b, c, F4, KK0, 9, 7);
+ R(c, d, e, a, b, F4, KK0, 11, 0);
+ R(b, c, d, e, a, F4, KK0, 13, 9);
+ R(a, b, c, d, e, F4, KK0, 15, 2);
+ R(e, a, b, c, d, F4, KK0, 15, 11);
+ R(d, e, a, b, c, F4, KK0, 5, 4);
+ R(c, d, e, a, b, F4, KK0, 7, 13);
+ R(b, c, d, e, a, F4, KK0, 7, 6);
+ R(a, b, c, d, e, F4, KK0, 8, 15);
+ R(e, a, b, c, d, F4, KK0, 11, 8);
+ R(d, e, a, b, c, F4, KK0, 14, 1);
+ R(c, d, e, a, b, F4, KK0, 14, 10);
+ R(b, c, d, e, a, F4, KK0, 12, 3);
+ R(a, b, c, d, e, F4, KK0, 6, 12); /* #15 */
+ /* Parallel round 2 */
+ R(e, a, b, c, d, F3, KK1, 9, 6);
+ R(d, e, a, b, c, F3, KK1, 13, 11);
+ R(c, d, e, a, b, F3, KK1, 15, 3);
+ R(b, c, d, e, a, F3, KK1, 7, 7);
+ R(a, b, c, d, e, F3, KK1, 12, 0);
+ R(e, a, b, c, d, F3, KK1, 8, 13);
+ R(d, e, a, b, c, F3, KK1, 9, 5);
+ R(c, d, e, a, b, F3, KK1, 11, 10);
+ R(b, c, d, e, a, F3, KK1, 7, 14);
+ R(a, b, c, d, e, F3, KK1, 7, 15);
+ R(e, a, b, c, d, F3, KK1, 12, 8);
+ R(d, e, a, b, c, F3, KK1, 7, 12);
+ R(c, d, e, a, b, F3, KK1, 6, 4);
+ R(b, c, d, e, a, F3, KK1, 15, 9);
+ R(a, b, c, d, e, F3, KK1, 13, 1);
+ R(e, a, b, c, d, F3, KK1, 11, 2); /* #31 */
+ /* Parallel round 3 */
+ R(d, e, a, b, c, F2, KK2, 9, 15);
+ R(c, d, e, a, b, F2, KK2, 7, 5);
+ R(b, c, d, e, a, F2, KK2, 15, 1);
+ R(a, b, c, d, e, F2, KK2, 11, 3);
+ R(e, a, b, c, d, F2, KK2, 8, 7);
+ R(d, e, a, b, c, F2, KK2, 6, 14);
+ R(c, d, e, a, b, F2, KK2, 6, 6);
+ R(b, c, d, e, a, F2, KK2, 14, 9);
+ R(a, b, c, d, e, F2, KK2, 12, 11);
+ R(e, a, b, c, d, F2, KK2, 13, 8);
+ R(d, e, a, b, c, F2, KK2, 5, 12);
+ R(c, d, e, a, b, F2, KK2, 14, 2);
+ R(b, c, d, e, a, F2, KK2, 13, 10);
+ R(a, b, c, d, e, F2, KK2, 13, 0);
+ R(e, a, b, c, d, F2, KK2, 7, 4);
+ R(d, e, a, b, c, F2, KK2, 5, 13); /* #47 */
+ /* Parallel round 4 */
+ R(c, d, e, a, b, F1, KK3, 15, 8);
+ R(b, c, d, e, a, F1, KK3, 5, 6);
+ R(a, b, c, d, e, F1, KK3, 8, 4);
+ R(e, a, b, c, d, F1, KK3, 11, 1);
+ R(d, e, a, b, c, F1, KK3, 14, 3);
+ R(c, d, e, a, b, F1, KK3, 14, 11);
+ R(b, c, d, e, a, F1, KK3, 6, 15);
+ R(a, b, c, d, e, F1, KK3, 14, 0);
+ R(e, a, b, c, d, F1, KK3, 6, 5);
+ R(d, e, a, b, c, F1, KK3, 9, 12);
+ R(c, d, e, a, b, F1, KK3, 12, 2);
+ R(b, c, d, e, a, F1, KK3, 9, 13);
+ R(a, b, c, d, e, F1, KK3, 12, 9);
+ R(e, a, b, c, d, F1, KK3, 5, 7);
+ R(d, e, a, b, c, F1, KK3, 15, 10);
+ R(c, d, e, a, b, F1, KK3, 8, 14); /* #63 */
+ /* Parallel round 5 */
+ R(b, c, d, e, a, F0, KK4, 8, 12);
+ R(a, b, c, d, e, F0, KK4, 5, 15);
+ R(e, a, b, c, d, F0, KK4, 12, 10);
+ R(d, e, a, b, c, F0, KK4, 9, 4);
+ R(c, d, e, a, b, F0, KK4, 12, 1);
+ R(b, c, d, e, a, F0, KK4, 5, 5);
+ R(a, b, c, d, e, F0, KK4, 14, 8);
+ R(e, a, b, c, d, F0, KK4, 6, 7);
+ R(d, e, a, b, c, F0, KK4, 8, 6);
+ R(c, d, e, a, b, F0, KK4, 13, 2);
+ R(b, c, d, e, a, F0, KK4, 6, 13);
+ R(a, b, c, d, e, F0, KK4, 5, 14);
+ R(e, a, b, c, d, F0, KK4, 15, 0);
+ R(d, e, a, b, c, F0, KK4, 13, 3);
+ R(c, d, e, a, b, F0, KK4, 11, 9);
+ R(b, c, d, e, a, F0, KK4, 11, 11); /* #79 */
+
+ t = state[1] + cc + d;
+ state[1] = state[2] + dd + e;
+ state[2] = state[3] + ee + a;
+ state[3] = state[4] + aa + b;
+ state[4] = state[0] + bb + c;
+ state[0] = t;
+}
diff --git a/freebsd/sys/opencrypto/rmd160.h b/freebsd/sys/opencrypto/rmd160.h
new file mode 100644
index 00000000..60dce642
--- /dev/null
+++ b/freebsd/sys/opencrypto/rmd160.h
@@ -0,0 +1,41 @@
+/* $FreeBSD$ */
+/* $OpenBSD: rmd160.h,v 1.3 2002/03/14 01:26:51 millert Exp $ */
+/*-
+ * Copyright (c) 2001 Markus Friedl. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RMD160_H
+#define _RMD160_H
+
+/* RMD160 context. */
+typedef struct RMD160Context {
+ u_int32_t state[5]; /* state */
+ u_int64_t count; /* number of bits, modulo 2^64 */
+ u_char buffer[64]; /* input buffer */
+} RMD160_CTX;
+
+void RMD160Init(RMD160_CTX *);
+void RMD160Transform(u_int32_t [5], const u_char [64]);
+void RMD160Update(RMD160_CTX *, const u_char *, u_int32_t);
+void RMD160Final(u_char [20], RMD160_CTX *);
+
+#endif /* _RMD160_H */
diff --git a/freebsd/sys/opencrypto/skipjack.c b/freebsd/sys/opencrypto/skipjack.c
new file mode 100644
index 00000000..ae06dc1b
--- /dev/null
+++ b/freebsd/sys/opencrypto/skipjack.c
@@ -0,0 +1,262 @@
+#include <freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: skipjack.c,v 1.3 2001/05/05 00:31:34 angelos Exp $ */
+/*-
+ * Further optimized test implementation of SKIPJACK algorithm
+ * Mark Tillotson <markt@chaos.org.uk>, 25 June 98
+ * Optimizations suit RISC (lots of registers) machine best.
+ *
+ * based on unoptimized implementation of
+ * Panu Rissanen <bande@lut.fi> 960624
+ *
+ * SKIPJACK and KEA Algorithm Specifications
+ * Version 2.0
+ * 29 May 1998
+*/
+
+#include <freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <freebsd/sys/param.h>
+
+#include <freebsd/opencrypto/skipjack.h>
+
+static const u_int8_t ftable[0x100] =
+{
+ 0xa3, 0xd7, 0x09, 0x83, 0xf8, 0x48, 0xf6, 0xf4,
+ 0xb3, 0x21, 0x15, 0x78, 0x99, 0xb1, 0xaf, 0xf9,
+ 0xe7, 0x2d, 0x4d, 0x8a, 0xce, 0x4c, 0xca, 0x2e,
+ 0x52, 0x95, 0xd9, 0x1e, 0x4e, 0x38, 0x44, 0x28,
+ 0x0a, 0xdf, 0x02, 0xa0, 0x17, 0xf1, 0x60, 0x68,
+ 0x12, 0xb7, 0x7a, 0xc3, 0xe9, 0xfa, 0x3d, 0x53,
+ 0x96, 0x84, 0x6b, 0xba, 0xf2, 0x63, 0x9a, 0x19,
+ 0x7c, 0xae, 0xe5, 0xf5, 0xf7, 0x16, 0x6a, 0xa2,
+ 0x39, 0xb6, 0x7b, 0x0f, 0xc1, 0x93, 0x81, 0x1b,
+ 0xee, 0xb4, 0x1a, 0xea, 0xd0, 0x91, 0x2f, 0xb8,
+ 0x55, 0xb9, 0xda, 0x85, 0x3f, 0x41, 0xbf, 0xe0,
+ 0x5a, 0x58, 0x80, 0x5f, 0x66, 0x0b, 0xd8, 0x90,
+ 0x35, 0xd5, 0xc0, 0xa7, 0x33, 0x06, 0x65, 0x69,
+ 0x45, 0x00, 0x94, 0x56, 0x6d, 0x98, 0x9b, 0x76,
+ 0x97, 0xfc, 0xb2, 0xc2, 0xb0, 0xfe, 0xdb, 0x20,
+ 0xe1, 0xeb, 0xd6, 0xe4, 0xdd, 0x47, 0x4a, 0x1d,
+ 0x42, 0xed, 0x9e, 0x6e, 0x49, 0x3c, 0xcd, 0x43,
+ 0x27, 0xd2, 0x07, 0xd4, 0xde, 0xc7, 0x67, 0x18,
+ 0x89, 0xcb, 0x30, 0x1f, 0x8d, 0xc6, 0x8f, 0xaa,
+ 0xc8, 0x74, 0xdc, 0xc9, 0x5d, 0x5c, 0x31, 0xa4,
+ 0x70, 0x88, 0x61, 0x2c, 0x9f, 0x0d, 0x2b, 0x87,
+ 0x50, 0x82, 0x54, 0x64, 0x26, 0x7d, 0x03, 0x40,
+ 0x34, 0x4b, 0x1c, 0x73, 0xd1, 0xc4, 0xfd, 0x3b,
+ 0xcc, 0xfb, 0x7f, 0xab, 0xe6, 0x3e, 0x5b, 0xa5,
+ 0xad, 0x04, 0x23, 0x9c, 0x14, 0x51, 0x22, 0xf0,
+ 0x29, 0x79, 0x71, 0x7e, 0xff, 0x8c, 0x0e, 0xe2,
+ 0x0c, 0xef, 0xbc, 0x72, 0x75, 0x6f, 0x37, 0xa1,
+ 0xec, 0xd3, 0x8e, 0x62, 0x8b, 0x86, 0x10, 0xe8,
+ 0x08, 0x77, 0x11, 0xbe, 0x92, 0x4f, 0x24, 0xc5,
+ 0x32, 0x36, 0x9d, 0xcf, 0xf3, 0xa6, 0xbb, 0xac,
+ 0x5e, 0x6c, 0xa9, 0x13, 0x57, 0x25, 0xb5, 0xe3,
+ 0xbd, 0xa8, 0x3a, 0x01, 0x05, 0x59, 0x2a, 0x46
+};
+
+/*
+ * For each key byte generate a table to represent the function
+ * ftable [in ^ keybyte]
+ *
+ * These tables used to save an XOR in each stage of the G-function
+ * the tables are hopefully pointed to by register allocated variables
+ * k0, k1..k9
+ */
+
+void
+subkey_table_gen (u_int8_t *key, u_int8_t **key_tables)
+{
+ int i, k;
+
+ for (k = 0; k < 10; k++) {
+ u_int8_t key_byte = key [k];
+ u_int8_t * table = key_tables[k];
+ for (i = 0; i < 0x100; i++)
+ table [i] = ftable [i ^ key_byte];
+ }
+}
+
+
+#define g(k0, k1, k2, k3, ih, il, oh, ol) \
+{ \
+ oh = k##k0 [il] ^ ih; \
+ ol = k##k1 [oh] ^ il; \
+ oh = k##k2 [ol] ^ oh; \
+ ol = k##k3 [oh] ^ ol; \
+}
+
+#define g0(ih, il, oh, ol) g(0, 1, 2, 3, ih, il, oh, ol)
+#define g4(ih, il, oh, ol) g(4, 5, 6, 7, ih, il, oh, ol)
+#define g8(ih, il, oh, ol) g(8, 9, 0, 1, ih, il, oh, ol)
+#define g2(ih, il, oh, ol) g(2, 3, 4, 5, ih, il, oh, ol)
+#define g6(ih, il, oh, ol) g(6, 7, 8, 9, ih, il, oh, ol)
+
+
+#define g_inv(k0, k1, k2, k3, ih, il, oh, ol) \
+{ \
+ ol = k##k3 [ih] ^ il; \
+ oh = k##k2 [ol] ^ ih; \
+ ol = k##k1 [oh] ^ ol; \
+ oh = k##k0 [ol] ^ oh; \
+}
+
+
+#define g0_inv(ih, il, oh, ol) g_inv(0, 1, 2, 3, ih, il, oh, ol)
+#define g4_inv(ih, il, oh, ol) g_inv(4, 5, 6, 7, ih, il, oh, ol)
+#define g8_inv(ih, il, oh, ol) g_inv(8, 9, 0, 1, ih, il, oh, ol)
+#define g2_inv(ih, il, oh, ol) g_inv(2, 3, 4, 5, ih, il, oh, ol)
+#define g6_inv(ih, il, oh, ol) g_inv(6, 7, 8, 9, ih, il, oh, ol)
+
+/* optimized version of Skipjack algorithm
+ *
+ * the appropriate g-function is inlined for each round
+ *
+ * the data movement is minimized by rotating the names of the
+ * variables w1..w4, not their contents (saves 3 moves per round)
+ *
+ * the loops are completely unrolled (needed to staticize choice of g)
+ *
+ * compiles to about 470 instructions on a Sparc (gcc -O)
+ * which is about 58 instructions per byte, 14 per round.
+ * gcc seems to leave in some unnecessary and with 0xFF operations
+ * but only in the latter part of the functions. Perhaps it
+ * runs out of resources to properly optimize long inlined function?
+ * in theory should get about 11 instructions per round, not 14
+ */
+
+void
+skipjack_forwards(u_int8_t *plain, u_int8_t *cipher, u_int8_t **key_tables)
+{
+ u_int8_t wh1 = plain[0]; u_int8_t wl1 = plain[1];
+ u_int8_t wh2 = plain[2]; u_int8_t wl2 = plain[3];
+ u_int8_t wh3 = plain[4]; u_int8_t wl3 = plain[5];
+ u_int8_t wh4 = plain[6]; u_int8_t wl4 = plain[7];
+
+ u_int8_t * k0 = key_tables [0];
+ u_int8_t * k1 = key_tables [1];
+ u_int8_t * k2 = key_tables [2];
+ u_int8_t * k3 = key_tables [3];
+ u_int8_t * k4 = key_tables [4];
+ u_int8_t * k5 = key_tables [5];
+ u_int8_t * k6 = key_tables [6];
+ u_int8_t * k7 = key_tables [7];
+ u_int8_t * k8 = key_tables [8];
+ u_int8_t * k9 = key_tables [9];
+
+ /* first 8 rounds */
+ g0 (wh1,wl1, wh1,wl1); wl4 ^= wl1 ^ 1; wh4 ^= wh1;
+ g4 (wh4,wl4, wh4,wl4); wl3 ^= wl4 ^ 2; wh3 ^= wh4;
+ g8 (wh3,wl3, wh3,wl3); wl2 ^= wl3 ^ 3; wh2 ^= wh3;
+ g2 (wh2,wl2, wh2,wl2); wl1 ^= wl2 ^ 4; wh1 ^= wh2;
+ g6 (wh1,wl1, wh1,wl1); wl4 ^= wl1 ^ 5; wh4 ^= wh1;
+ g0 (wh4,wl4, wh4,wl4); wl3 ^= wl4 ^ 6; wh3 ^= wh4;
+ g4 (wh3,wl3, wh3,wl3); wl2 ^= wl3 ^ 7; wh2 ^= wh3;
+ g8 (wh2,wl2, wh2,wl2); wl1 ^= wl2 ^ 8; wh1 ^= wh2;
+
+ /* second 8 rounds */
+ wh2 ^= wh1; wl2 ^= wl1 ^ 9 ; g2 (wh1,wl1, wh1,wl1);
+ wh1 ^= wh4; wl1 ^= wl4 ^ 10; g6 (wh4,wl4, wh4,wl4);
+ wh4 ^= wh3; wl4 ^= wl3 ^ 11; g0 (wh3,wl3, wh3,wl3);
+ wh3 ^= wh2; wl3 ^= wl2 ^ 12; g4 (wh2,wl2, wh2,wl2);
+ wh2 ^= wh1; wl2 ^= wl1 ^ 13; g8 (wh1,wl1, wh1,wl1);
+ wh1 ^= wh4; wl1 ^= wl4 ^ 14; g2 (wh4,wl4, wh4,wl4);
+ wh4 ^= wh3; wl4 ^= wl3 ^ 15; g6 (wh3,wl3, wh3,wl3);
+ wh3 ^= wh2; wl3 ^= wl2 ^ 16; g0 (wh2,wl2, wh2,wl2);
+
+ /* third 8 rounds */
+ g4 (wh1,wl1, wh1,wl1); wl4 ^= wl1 ^ 17; wh4 ^= wh1;
+ g8 (wh4,wl4, wh4,wl4); wl3 ^= wl4 ^ 18; wh3 ^= wh4;
+ g2 (wh3,wl3, wh3,wl3); wl2 ^= wl3 ^ 19; wh2 ^= wh3;
+ g6 (wh2,wl2, wh2,wl2); wl1 ^= wl2 ^ 20; wh1 ^= wh2;
+ g0 (wh1,wl1, wh1,wl1); wl4 ^= wl1 ^ 21; wh4 ^= wh1;
+ g4 (wh4,wl4, wh4,wl4); wl3 ^= wl4 ^ 22; wh3 ^= wh4;
+ g8 (wh3,wl3, wh3,wl3); wl2 ^= wl3 ^ 23; wh2 ^= wh3;
+ g2 (wh2,wl2, wh2,wl2); wl1 ^= wl2 ^ 24; wh1 ^= wh2;
+
+ /* last 8 rounds */
+ wh2 ^= wh1; wl2 ^= wl1 ^ 25; g6 (wh1,wl1, wh1,wl1);
+ wh1 ^= wh4; wl1 ^= wl4 ^ 26; g0 (wh4,wl4, wh4,wl4);
+ wh4 ^= wh3; wl4 ^= wl3 ^ 27; g4 (wh3,wl3, wh3,wl3);
+ wh3 ^= wh2; wl3 ^= wl2 ^ 28; g8 (wh2,wl2, wh2,wl2);
+ wh2 ^= wh1; wl2 ^= wl1 ^ 29; g2 (wh1,wl1, wh1,wl1);
+ wh1 ^= wh4; wl1 ^= wl4 ^ 30; g6 (wh4,wl4, wh4,wl4);
+ wh4 ^= wh3; wl4 ^= wl3 ^ 31; g0 (wh3,wl3, wh3,wl3);
+ wh3 ^= wh2; wl3 ^= wl2 ^ 32; g4 (wh2,wl2, wh2,wl2);
+
+ /* pack into byte vector */
+ cipher [0] = wh1; cipher [1] = wl1;
+ cipher [2] = wh2; cipher [3] = wl2;
+ cipher [4] = wh3; cipher [5] = wl3;
+ cipher [6] = wh4; cipher [7] = wl4;
+}
+
+
+void
+skipjack_backwards (u_int8_t *cipher, u_int8_t *plain, u_int8_t **key_tables)
+{
+ /* setup 4 16-bit portions */
+ u_int8_t wh1 = cipher[0]; u_int8_t wl1 = cipher[1];
+ u_int8_t wh2 = cipher[2]; u_int8_t wl2 = cipher[3];
+ u_int8_t wh3 = cipher[4]; u_int8_t wl3 = cipher[5];
+ u_int8_t wh4 = cipher[6]; u_int8_t wl4 = cipher[7];
+
+ u_int8_t * k0 = key_tables [0];
+ u_int8_t * k1 = key_tables [1];
+ u_int8_t * k2 = key_tables [2];
+ u_int8_t * k3 = key_tables [3];
+ u_int8_t * k4 = key_tables [4];
+ u_int8_t * k5 = key_tables [5];
+ u_int8_t * k6 = key_tables [6];
+ u_int8_t * k7 = key_tables [7];
+ u_int8_t * k8 = key_tables [8];
+ u_int8_t * k9 = key_tables [9];
+
+ /* first 8 rounds */
+ g4_inv (wh2,wl2, wh2,wl2); wl3 ^= wl2 ^ 32; wh3 ^= wh2;
+ g0_inv (wh3,wl3, wh3,wl3); wl4 ^= wl3 ^ 31; wh4 ^= wh3;
+ g6_inv (wh4,wl4, wh4,wl4); wl1 ^= wl4 ^ 30; wh1 ^= wh4;
+ g2_inv (wh1,wl1, wh1,wl1); wl2 ^= wl1 ^ 29; wh2 ^= wh1;
+ g8_inv (wh2,wl2, wh2,wl2); wl3 ^= wl2 ^ 28; wh3 ^= wh2;
+ g4_inv (wh3,wl3, wh3,wl3); wl4 ^= wl3 ^ 27; wh4 ^= wh3;
+ g0_inv (wh4,wl4, wh4,wl4); wl1 ^= wl4 ^ 26; wh1 ^= wh4;
+ g6_inv (wh1,wl1, wh1,wl1); wl2 ^= wl1 ^ 25; wh2 ^= wh1;
+
+ /* second 8 rounds */
+ wh1 ^= wh2; wl1 ^= wl2 ^ 24; g2_inv (wh2,wl2, wh2,wl2);
+ wh2 ^= wh3; wl2 ^= wl3 ^ 23; g8_inv (wh3,wl3, wh3,wl3);
+ wh3 ^= wh4; wl3 ^= wl4 ^ 22; g4_inv (wh4,wl4, wh4,wl4);
+ wh4 ^= wh1; wl4 ^= wl1 ^ 21; g0_inv (wh1,wl1, wh1,wl1);
+ wh1 ^= wh2; wl1 ^= wl2 ^ 20; g6_inv (wh2,wl2, wh2,wl2);
+ wh2 ^= wh3; wl2 ^= wl3 ^ 19; g2_inv (wh3,wl3, wh3,wl3);
+ wh3 ^= wh4; wl3 ^= wl4 ^ 18; g8_inv (wh4,wl4, wh4,wl4);
+ wh4 ^= wh1; wl4 ^= wl1 ^ 17; g4_inv (wh1,wl1, wh1,wl1);
+
+ /* third 8 rounds */
+ g0_inv (wh2,wl2, wh2,wl2); wl3 ^= wl2 ^ 16; wh3 ^= wh2;
+ g6_inv (wh3,wl3, wh3,wl3); wl4 ^= wl3 ^ 15; wh4 ^= wh3;
+ g2_inv (wh4,wl4, wh4,wl4); wl1 ^= wl4 ^ 14; wh1 ^= wh4;
+ g8_inv (wh1,wl1, wh1,wl1); wl2 ^= wl1 ^ 13; wh2 ^= wh1;
+ g4_inv (wh2,wl2, wh2,wl2); wl3 ^= wl2 ^ 12; wh3 ^= wh2;
+ g0_inv (wh3,wl3, wh3,wl3); wl4 ^= wl3 ^ 11; wh4 ^= wh3;
+ g6_inv (wh4,wl4, wh4,wl4); wl1 ^= wl4 ^ 10; wh1 ^= wh4;
+ g2_inv (wh1,wl1, wh1,wl1); wl2 ^= wl1 ^ 9; wh2 ^= wh1;
+
+ /* last 8 rounds */
+ wh1 ^= wh2; wl1 ^= wl2 ^ 8; g8_inv (wh2,wl2, wh2,wl2);
+ wh2 ^= wh3; wl2 ^= wl3 ^ 7; g4_inv (wh3,wl3, wh3,wl3);
+ wh3 ^= wh4; wl3 ^= wl4 ^ 6; g0_inv (wh4,wl4, wh4,wl4);
+ wh4 ^= wh1; wl4 ^= wl1 ^ 5; g6_inv (wh1,wl1, wh1,wl1);
+ wh1 ^= wh2; wl1 ^= wl2 ^ 4; g2_inv (wh2,wl2, wh2,wl2);
+ wh2 ^= wh3; wl2 ^= wl3 ^ 3; g8_inv (wh3,wl3, wh3,wl3);
+ wh3 ^= wh4; wl3 ^= wl4 ^ 2; g4_inv (wh4,wl4, wh4,wl4);
+ wh4 ^= wh1; wl4 ^= wl1 ^ 1; g0_inv (wh1,wl1, wh1,wl1);
+
+ /* pack into byte vector */
+ plain [0] = wh1; plain [1] = wl1;
+ plain [2] = wh2; plain [3] = wl2;
+ plain [4] = wh3; plain [5] = wl3;
+ plain [6] = wh4; plain [7] = wl4;
+}
diff --git a/freebsd/sys/opencrypto/skipjack.h b/freebsd/sys/opencrypto/skipjack.h
new file mode 100644
index 00000000..3e88418c
--- /dev/null
+++ b/freebsd/sys/opencrypto/skipjack.h
@@ -0,0 +1,19 @@
+/* $FreeBSD$ */
+/* $OpenBSD: skipjack.h,v 1.3 2002/03/14 01:26:51 millert Exp $ */
+
+/*-
+ * Further optimized test implementation of SKIPJACK algorithm
+ * Mark Tillotson <markt@chaos.org.uk>, 25 June 98
+ * Optimizations suit RISC (lots of registers) machine best.
+ *
+ * based on unoptimized implementation of
+ * Panu Rissanen <bande@lut.fi> 960624
+ *
+ * SKIPJACK and KEA Algorithm Specifications
+ * Version 2.0
+ * 29 May 1998
+*/
+
+extern void skipjack_forwards(u_int8_t *plain, u_int8_t *cipher, u_int8_t **key);
+extern void skipjack_backwards(u_int8_t *cipher, u_int8_t *plain, u_int8_t **key);
+extern void subkey_table_gen(u_int8_t *key, u_int8_t **key_tables);
diff --git a/freebsd/sys/opencrypto/xform.c b/freebsd/sys/opencrypto/xform.c
new file mode 100644
index 00000000..e54a6740
--- /dev/null
+++ b/freebsd/sys/opencrypto/xform.c
@@ -0,0 +1,815 @@
+#include <freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: xform.c,v 1.16 2001/08/28 12:20:43 ben Exp $ */
+/*-
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr) and
+ * Niels Provos (provos@physnet.uni-hamburg.de).
+ *
+ * This code was written by John Ioannidis for BSD/OS in Athens, Greece,
+ * in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Additional features in 1999 by Angelos D. Keromytis.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis,
+ * Angelos D. Keromytis and Niels Provos.
+ *
+ * Copyright (C) 2001, Angelos D. Keromytis.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#include <freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <freebsd/sys/param.h>
+#include <freebsd/sys/systm.h>
+#include <freebsd/sys/malloc.h>
+#include <freebsd/sys/sysctl.h>
+#include <freebsd/sys/errno.h>
+#include <freebsd/sys/time.h>
+#include <freebsd/sys/kernel.h>
+#include <freebsd/machine/cpu.h>
+
+#include <freebsd/crypto/blowfish/blowfish.h>
+#include <freebsd/crypto/des/des.h>
+#include <freebsd/crypto/rijndael/rijndael.h>
+#include <freebsd/crypto/camellia/camellia.h>
+#include <freebsd/crypto/sha1.h>
+
+#include <freebsd/opencrypto/cast.h>
+#include <freebsd/opencrypto/deflate.h>
+#include <freebsd/opencrypto/rmd160.h>
+#include <freebsd/opencrypto/skipjack.h>
+
+#include <freebsd/sys/md5.h>
+
+#include <freebsd/opencrypto/cryptodev.h>
+#include <freebsd/opencrypto/xform.h>
+
+static int null_setkey(u_int8_t **, u_int8_t *, int);
+static int des1_setkey(u_int8_t **, u_int8_t *, int);
+static int des3_setkey(u_int8_t **, u_int8_t *, int);
+static int blf_setkey(u_int8_t **, u_int8_t *, int);
+static int cast5_setkey(u_int8_t **, u_int8_t *, int);
+static int skipjack_setkey(u_int8_t **, u_int8_t *, int);
+static int rijndael128_setkey(u_int8_t **, u_int8_t *, int);
+static int aes_xts_setkey(u_int8_t **, u_int8_t *, int);
+static int cml_setkey(u_int8_t **, u_int8_t *, int);
+
+static void null_encrypt(caddr_t, u_int8_t *);
+static void des1_encrypt(caddr_t, u_int8_t *);
+static void des3_encrypt(caddr_t, u_int8_t *);
+static void blf_encrypt(caddr_t, u_int8_t *);
+static void cast5_encrypt(caddr_t, u_int8_t *);
+static void skipjack_encrypt(caddr_t, u_int8_t *);
+static void rijndael128_encrypt(caddr_t, u_int8_t *);
+static void aes_xts_encrypt(caddr_t, u_int8_t *);
+static void cml_encrypt(caddr_t, u_int8_t *);
+
+static void null_decrypt(caddr_t, u_int8_t *);
+static void des1_decrypt(caddr_t, u_int8_t *);
+static void des3_decrypt(caddr_t, u_int8_t *);
+static void blf_decrypt(caddr_t, u_int8_t *);
+static void cast5_decrypt(caddr_t, u_int8_t *);
+static void skipjack_decrypt(caddr_t, u_int8_t *);
+static void rijndael128_decrypt(caddr_t, u_int8_t *);
+static void aes_xts_decrypt(caddr_t, u_int8_t *);
+static void cml_decrypt(caddr_t, u_int8_t *);
+
+static void null_zerokey(u_int8_t **);
+static void des1_zerokey(u_int8_t **);
+static void des3_zerokey(u_int8_t **);
+static void blf_zerokey(u_int8_t **);
+static void cast5_zerokey(u_int8_t **);
+static void skipjack_zerokey(u_int8_t **);
+static void rijndael128_zerokey(u_int8_t **);
+static void aes_xts_zerokey(u_int8_t **);
+static void cml_zerokey(u_int8_t **);
+
+static void aes_xts_reinit(caddr_t, u_int8_t *);
+
+static void null_init(void *);
+static int null_update(void *, u_int8_t *, u_int16_t);
+static void null_final(u_int8_t *, void *);
+static int MD5Update_int(void *, u_int8_t *, u_int16_t);
+static void SHA1Init_int(void *);
+static int SHA1Update_int(void *, u_int8_t *, u_int16_t);
+static void SHA1Final_int(u_int8_t *, void *);
+static int RMD160Update_int(void *, u_int8_t *, u_int16_t);
+static int SHA256Update_int(void *, u_int8_t *, u_int16_t);
+static int SHA384Update_int(void *, u_int8_t *, u_int16_t);
+static int SHA512Update_int(void *, u_int8_t *, u_int16_t);
+
+static u_int32_t deflate_compress(u_int8_t *, u_int32_t, u_int8_t **);
+static u_int32_t deflate_decompress(u_int8_t *, u_int32_t, u_int8_t **);
+
+MALLOC_DEFINE(M_XDATA, "xform", "xform data buffers");
+
+/* Encryption instances */
+struct enc_xform enc_xform_null = {
+ CRYPTO_NULL_CBC, "NULL",
+ /* NB: blocksize of 4 is to generate a properly aligned ESP header */
+ NULL_BLOCK_LEN, 0, 256, /* 2048 bits, max key */
+ null_encrypt,
+ null_decrypt,
+ null_setkey,
+ null_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_des = {
+ CRYPTO_DES_CBC, "DES",
+ DES_BLOCK_LEN, 8, 8,
+ des1_encrypt,
+ des1_decrypt,
+ des1_setkey,
+ des1_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_3des = {
+ CRYPTO_3DES_CBC, "3DES",
+ DES3_BLOCK_LEN, 24, 24,
+ des3_encrypt,
+ des3_decrypt,
+ des3_setkey,
+ des3_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_blf = {
+ CRYPTO_BLF_CBC, "Blowfish",
+ BLOWFISH_BLOCK_LEN, 5, 56 /* 448 bits, max key */,
+ blf_encrypt,
+ blf_decrypt,
+ blf_setkey,
+ blf_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_cast5 = {
+ CRYPTO_CAST_CBC, "CAST-128",
+ CAST128_BLOCK_LEN, 5, 16,
+ cast5_encrypt,
+ cast5_decrypt,
+ cast5_setkey,
+ cast5_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_skipjack = {
+ CRYPTO_SKIPJACK_CBC, "Skipjack",
+ SKIPJACK_BLOCK_LEN, 10, 10,
+ skipjack_encrypt,
+ skipjack_decrypt,
+ skipjack_setkey,
+ skipjack_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_rijndael128 = {
+ CRYPTO_RIJNDAEL128_CBC, "Rijndael-128/AES",
+ RIJNDAEL128_BLOCK_LEN, 8, 32,
+ rijndael128_encrypt,
+ rijndael128_decrypt,
+ rijndael128_setkey,
+ rijndael128_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_aes_xts = {
+ CRYPTO_AES_XTS, "AES-XTS",
+ RIJNDAEL128_BLOCK_LEN, 32, 64,
+ aes_xts_encrypt,
+ aes_xts_decrypt,
+ aes_xts_setkey,
+ aes_xts_zerokey,
+ aes_xts_reinit
+};
+
+struct enc_xform enc_xform_arc4 = {
+ CRYPTO_ARC4, "ARC4",
+ 1, 1, 32,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+struct enc_xform enc_xform_camellia = {
+ CRYPTO_CAMELLIA_CBC, "Camellia",
+ CAMELLIA_BLOCK_LEN, 8, 32,
+ cml_encrypt,
+ cml_decrypt,
+ cml_setkey,
+ cml_zerokey,
+ NULL
+};
+
+/* Authentication instances */
+struct auth_hash auth_hash_null = {
+ CRYPTO_NULL_HMAC, "NULL-HMAC",
+ 0, NULL_HASH_LEN, NULL_HMAC_BLOCK_LEN, sizeof(int), /* NB: context isn't used */
+ null_init, null_update, null_final
+};
+
+struct auth_hash auth_hash_hmac_md5 = {
+ CRYPTO_MD5_HMAC, "HMAC-MD5",
+ 16, MD5_HASH_LEN, MD5_HMAC_BLOCK_LEN, sizeof(MD5_CTX),
+ (void (*) (void *)) MD5Init, MD5Update_int,
+ (void (*) (u_int8_t *, void *)) MD5Final
+};
+
+struct auth_hash auth_hash_hmac_sha1 = {
+ CRYPTO_SHA1_HMAC, "HMAC-SHA1",
+ 20, SHA1_HASH_LEN, SHA1_HMAC_BLOCK_LEN, sizeof(SHA1_CTX),
+ SHA1Init_int, SHA1Update_int, SHA1Final_int
+};
+
+struct auth_hash auth_hash_hmac_ripemd_160 = {
+ CRYPTO_RIPEMD160_HMAC, "HMAC-RIPEMD-160",
+ 20, RIPEMD160_HASH_LEN, RIPEMD160_HMAC_BLOCK_LEN, sizeof(RMD160_CTX),
+ (void (*)(void *)) RMD160Init, RMD160Update_int,
+ (void (*)(u_int8_t *, void *)) RMD160Final
+};
+
+struct auth_hash auth_hash_key_md5 = {
+ CRYPTO_MD5_KPDK, "Keyed MD5",
+ 0, MD5_KPDK_HASH_LEN, 0, sizeof(MD5_CTX),
+ (void (*)(void *)) MD5Init, MD5Update_int,
+ (void (*)(u_int8_t *, void *)) MD5Final
+};
+
+struct auth_hash auth_hash_key_sha1 = {
+ CRYPTO_SHA1_KPDK, "Keyed SHA1",
+ 0, SHA1_KPDK_HASH_LEN, 0, sizeof(SHA1_CTX),
+ SHA1Init_int, SHA1Update_int, SHA1Final_int
+};
+
+struct auth_hash auth_hash_hmac_sha2_256 = {
+ CRYPTO_SHA2_256_HMAC, "HMAC-SHA2-256",
+ 32, SHA2_256_HASH_LEN, SHA2_256_HMAC_BLOCK_LEN, sizeof(SHA256_CTX),
+ (void (*)(void *)) SHA256_Init, SHA256Update_int,
+ (void (*)(u_int8_t *, void *)) SHA256_Final
+};
+
+struct auth_hash auth_hash_hmac_sha2_384 = {
+ CRYPTO_SHA2_384_HMAC, "HMAC-SHA2-384",
+ 48, SHA2_384_HASH_LEN, SHA2_384_HMAC_BLOCK_LEN, sizeof(SHA384_CTX),
+ (void (*)(void *)) SHA384_Init, SHA384Update_int,
+ (void (*)(u_int8_t *, void *)) SHA384_Final
+};
+
+struct auth_hash auth_hash_hmac_sha2_512 = {
+ CRYPTO_SHA2_512_HMAC, "HMAC-SHA2-512",
+ 64, SHA2_512_HASH_LEN, SHA2_512_HMAC_BLOCK_LEN, sizeof(SHA512_CTX),
+ (void (*)(void *)) SHA512_Init, SHA512Update_int,
+ (void (*)(u_int8_t *, void *)) SHA512_Final
+};
+
+/* Compression instance */
+struct comp_algo comp_algo_deflate = {
+ CRYPTO_DEFLATE_COMP, "Deflate",
+ 90, deflate_compress,
+ deflate_decompress
+};
+
+/*
+ * Encryption wrapper routines.
+ */
+static void
+null_encrypt(caddr_t key, u_int8_t *blk)
+{
+}
+static void
+null_decrypt(caddr_t key, u_int8_t *blk)
+{
+}
+static int
+null_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ *sched = NULL;
+ return 0;
+}
+static void
+null_zerokey(u_int8_t **sched)
+{
+ *sched = NULL;
+}
+
+static void
+des1_encrypt(caddr_t key, u_int8_t *blk)
+{
+ des_cblock *cb = (des_cblock *) blk;
+ des_key_schedule *p = (des_key_schedule *) key;
+
+ des_ecb_encrypt(cb, cb, p[0], DES_ENCRYPT);
+}
+
+static void
+des1_decrypt(caddr_t key, u_int8_t *blk)
+{
+ des_cblock *cb = (des_cblock *) blk;
+ des_key_schedule *p = (des_key_schedule *) key;
+
+ des_ecb_encrypt(cb, cb, p[0], DES_DECRYPT);
+}
+
+static int
+des1_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ des_key_schedule *p;
+ int err;
+
+ p = malloc(sizeof (des_key_schedule),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (p != NULL) {
+ des_set_key((des_cblock *) key, p[0]);
+ err = 0;
+ } else
+ err = ENOMEM;
+ *sched = (u_int8_t *) p;
+ return err;
+}
+
+static void
+des1_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof (des_key_schedule));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+des3_encrypt(caddr_t key, u_int8_t *blk)
+{
+ des_cblock *cb = (des_cblock *) blk;
+ des_key_schedule *p = (des_key_schedule *) key;
+
+ des_ecb3_encrypt(cb, cb, p[0], p[1], p[2], DES_ENCRYPT);
+}
+
+static void
+des3_decrypt(caddr_t key, u_int8_t *blk)
+{
+ des_cblock *cb = (des_cblock *) blk;
+ des_key_schedule *p = (des_key_schedule *) key;
+
+ des_ecb3_encrypt(cb, cb, p[0], p[1], p[2], DES_DECRYPT);
+}
+
+static int
+des3_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ des_key_schedule *p;
+ int err;
+
+ p = malloc(3*sizeof (des_key_schedule),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (p != NULL) {
+ des_set_key((des_cblock *)(key + 0), p[0]);
+ des_set_key((des_cblock *)(key + 8), p[1]);
+ des_set_key((des_cblock *)(key + 16), p[2]);
+ err = 0;
+ } else
+ err = ENOMEM;
+ *sched = (u_int8_t *) p;
+ return err;
+}
+
+static void
+des3_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, 3*sizeof (des_key_schedule));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+blf_encrypt(caddr_t key, u_int8_t *blk)
+{
+ BF_LONG t[2];
+
+ memcpy(t, blk, sizeof (t));
+ t[0] = ntohl(t[0]);
+ t[1] = ntohl(t[1]);
+ /* NB: BF_encrypt expects the block in host order! */
+ BF_encrypt(t, (BF_KEY *) key);
+ t[0] = htonl(t[0]);
+ t[1] = htonl(t[1]);
+ memcpy(blk, t, sizeof (t));
+}
+
+static void
+blf_decrypt(caddr_t key, u_int8_t *blk)
+{
+ BF_LONG t[2];
+
+ memcpy(t, blk, sizeof (t));
+ t[0] = ntohl(t[0]);
+ t[1] = ntohl(t[1]);
+ /* NB: BF_decrypt expects the block in host order! */
+ BF_decrypt(t, (BF_KEY *) key);
+ t[0] = htonl(t[0]);
+ t[1] = htonl(t[1]);
+ memcpy(blk, t, sizeof (t));
+}
+
+static int
+blf_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ int err;
+
+ *sched = malloc(sizeof(BF_KEY),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (*sched != NULL) {
+ BF_set_key((BF_KEY *) *sched, len, key);
+ err = 0;
+ } else
+ err = ENOMEM;
+ return err;
+}
+
+static void
+blf_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof(BF_KEY));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+cast5_encrypt(caddr_t key, u_int8_t *blk)
+{
+ cast_encrypt((cast_key *) key, blk, blk);
+}
+
+static void
+cast5_decrypt(caddr_t key, u_int8_t *blk)
+{
+ cast_decrypt((cast_key *) key, blk, blk);
+}
+
+static int
+cast5_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ int err;
+
+ *sched = malloc(sizeof(cast_key), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (*sched != NULL) {
+ cast_setkey((cast_key *)*sched, key, len);
+ err = 0;
+ } else
+ err = ENOMEM;
+ return err;
+}
+
+static void
+cast5_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof(cast_key));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+skipjack_encrypt(caddr_t key, u_int8_t *blk)
+{
+ skipjack_forwards(blk, blk, (u_int8_t **) key);
+}
+
+static void
+skipjack_decrypt(caddr_t key, u_int8_t *blk)
+{
+ skipjack_backwards(blk, blk, (u_int8_t **) key);
+}
+
+static int
+skipjack_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ int err;
+
+ /* NB: allocate all the memory that's needed at once */
+ *sched = malloc(10 * (sizeof(u_int8_t *) + 0x100),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (*sched != NULL) {
+ u_int8_t** key_tables = (u_int8_t**) *sched;
+ u_int8_t* table = (u_int8_t*) &key_tables[10];
+ int k;
+
+ for (k = 0; k < 10; k++) {
+ key_tables[k] = table;
+ table += 0x100;
+ }
+ subkey_table_gen(key, (u_int8_t **) *sched);
+ err = 0;
+ } else
+ err = ENOMEM;
+ return err;
+}
+
+static void
+skipjack_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, 10 * (sizeof(u_int8_t *) + 0x100));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+rijndael128_encrypt(caddr_t key, u_int8_t *blk)
+{
+ rijndael_encrypt((rijndael_ctx *) key, (u_char *) blk, (u_char *) blk);
+}
+
+static void
+rijndael128_decrypt(caddr_t key, u_int8_t *blk)
+{
+ rijndael_decrypt(((rijndael_ctx *) key), (u_char *) blk,
+ (u_char *) blk);
+}
+
+static int
+rijndael128_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ int err;
+
+ if (len != 16 && len != 24 && len != 32)
+ return (EINVAL);
+ *sched = malloc(sizeof(rijndael_ctx), M_CRYPTO_DATA,
+ M_NOWAIT|M_ZERO);
+ if (*sched != NULL) {
+ rijndael_set_key((rijndael_ctx *) *sched, (u_char *) key,
+ len * 8);
+ err = 0;
+ } else
+ err = ENOMEM;
+ return err;
+}
+
+static void
+rijndael128_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof(rijndael_ctx));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+#define AES_XTS_BLOCKSIZE 16
+#define AES_XTS_IVSIZE 8
+#define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */
+
+struct aes_xts_ctx {
+ rijndael_ctx key1;
+ rijndael_ctx key2;
+ u_int8_t tweak[AES_XTS_BLOCKSIZE];
+};
+
+void
+aes_xts_reinit(caddr_t key, u_int8_t *iv)
+{
+ struct aes_xts_ctx *ctx = (struct aes_xts_ctx *)key;
+ u_int64_t blocknum;
+ u_int i;
+
+ /*
+ * Prepare tweak as E_k2(IV). IV is specified as LE representation
+ * of a 64-bit block number which we allow to be passed in directly.
+ */
+ bcopy(iv, &blocknum, AES_XTS_IVSIZE);
+ for (i = 0; i < AES_XTS_IVSIZE; i++) {
+ ctx->tweak[i] = blocknum & 0xff;
+ blocknum >>= 8;
+ }
+ /* Last 64 bits of IV are always zero */
+ bzero(ctx->tweak + AES_XTS_IVSIZE, AES_XTS_IVSIZE);
+
+ rijndael_encrypt(&ctx->key2, ctx->tweak, ctx->tweak);
+}
+
+static void
+aes_xts_crypt(struct aes_xts_ctx *ctx, u_int8_t *data, u_int do_encrypt)
+{
+ u_int8_t block[AES_XTS_BLOCKSIZE];
+ u_int i, carry_in, carry_out;
+
+ for (i = 0; i < AES_XTS_BLOCKSIZE; i++)
+ block[i] = data[i] ^ ctx->tweak[i];
+
+ if (do_encrypt)
+ rijndael_encrypt(&ctx->key1, block, data);
+ else
+ rijndael_decrypt(&ctx->key1, block, data);
+
+ for (i = 0; i < AES_XTS_BLOCKSIZE; i++)
+ data[i] ^= ctx->tweak[i];
+
+ /* Exponentiate tweak */
+ carry_in = 0;
+ for (i = 0; i < AES_XTS_BLOCKSIZE; i++) {
+ carry_out = ctx->tweak[i] & 0x80;
+ ctx->tweak[i] = (ctx->tweak[i] << 1) | (carry_in ? 1 : 0);
+ carry_in = carry_out;
+ }
+ if (carry_in)
+ ctx->tweak[0] ^= AES_XTS_ALPHA;
+ bzero(block, sizeof(block));
+}
+
+void
+aes_xts_encrypt(caddr_t key, u_int8_t *data)
+{
+ aes_xts_crypt((struct aes_xts_ctx *)key, data, 1);
+}
+
+void
+aes_xts_decrypt(caddr_t key, u_int8_t *data)
+{
+ aes_xts_crypt((struct aes_xts_ctx *)key, data, 0);
+}
+
+int
+aes_xts_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ struct aes_xts_ctx *ctx;
+
+ if (len != 32 && len != 64)
+ return EINVAL;
+
+ *sched = malloc(sizeof(struct aes_xts_ctx), M_CRYPTO_DATA,
+ M_NOWAIT | M_ZERO);
+ if (*sched == NULL)
+ return ENOMEM;
+ ctx = (struct aes_xts_ctx *)*sched;
+
+ rijndael_set_key(&ctx->key1, key, len * 4);
+ rijndael_set_key(&ctx->key2, key + (len / 2), len * 4);
+
+ return 0;
+}
+
+void
+aes_xts_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof(struct aes_xts_ctx));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+cml_encrypt(caddr_t key, u_int8_t *blk)
+{
+ camellia_encrypt((camellia_ctx *) key, (u_char *) blk, (u_char *) blk);
+}
+
+static void
+cml_decrypt(caddr_t key, u_int8_t *blk)
+{
+ camellia_decrypt(((camellia_ctx *) key), (u_char *) blk,
+ (u_char *) blk);
+}
+
+static int
+cml_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ int err;
+
+ if (len != 16 && len != 24 && len != 32)
+ return (EINVAL);
+ *sched = malloc(sizeof(camellia_ctx), M_CRYPTO_DATA,
+ M_NOWAIT|M_ZERO);
+ if (*sched != NULL) {
+ camellia_set_key((camellia_ctx *) *sched, (u_char *) key,
+ len * 8);
+ err = 0;
+ } else
+ err = ENOMEM;
+ return err;
+}
+
+static void
+cml_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof(camellia_ctx));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+/*
+ * And now for auth.
+ */
+
+static void
+null_init(void *ctx)
+{
+}
+
+static int
+null_update(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ return 0;
+}
+
+static void
+null_final(u_int8_t *buf, void *ctx)
+{
+ if (buf != (u_int8_t *) 0)
+ bzero(buf, 12);
+}
+
+static int
+RMD160Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ RMD160Update(ctx, buf, len);
+ return 0;
+}
+
+static int
+MD5Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ MD5Update(ctx, buf, len);
+ return 0;
+}
+
+static void
+SHA1Init_int(void *ctx)
+{
+ SHA1Init(ctx);
+}
+
+static int
+SHA1Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ SHA1Update(ctx, buf, len);
+ return 0;
+}
+
+static void
+SHA1Final_int(u_int8_t *blk, void *ctx)
+{
+ SHA1Final(blk, ctx);
+}
+
+static int
+SHA256Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ SHA256_Update(ctx, buf, len);
+ return 0;
+}
+
+static int
+SHA384Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ SHA384_Update(ctx, buf, len);
+ return 0;
+}
+
+static int
+SHA512Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ SHA512_Update(ctx, buf, len);
+ return 0;
+}
+
+/*
+ * And compression
+ */
+
+static u_int32_t
+deflate_compress(data, size, out)
+ u_int8_t *data;
+ u_int32_t size;
+ u_int8_t **out;
+{
+ return deflate_global(data, size, 0, out);
+}
+
+static u_int32_t
+deflate_decompress(data, size, out)
+ u_int8_t *data;
+ u_int32_t size;
+ u_int8_t **out;
+{
+ return deflate_global(data, size, 1, out);
+}
diff --git a/freebsd/sys/opencrypto/xform.h b/freebsd/sys/opencrypto/xform.h
new file mode 100644
index 00000000..7c70ca90
--- /dev/null
+++ b/freebsd/sys/opencrypto/xform.h
@@ -0,0 +1,104 @@
+/* $FreeBSD$ */
+/* $OpenBSD: xform.h,v 1.8 2001/08/28 12:20:43 ben Exp $ */
+
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _CRYPTO_XFORM_HH_
+#define _CRYPTO_XFORM_HH_
+
+#include <freebsd/sys/md5.h>
+#include <freebsd/crypto/sha1.h>
+#include <freebsd/crypto/sha2/sha2.h>
+#include <freebsd/opencrypto/rmd160.h>
+
+/* Declarations */
+struct auth_hash {
+ int type;
+ char *name;
+ u_int16_t keysize;
+ u_int16_t hashsize;
+ u_int16_t blocksize;
+ u_int16_t ctxsize;
+ void (*Init) (void *);
+ int (*Update) (void *, u_int8_t *, u_int16_t);
+ void (*Final) (u_int8_t *, void *);
+};
+
+#define AH_ALEN_MAX 20 /* max authenticator hash length */
+
+struct enc_xform {
+ int type;
+ char *name;
+ u_int16_t blocksize;
+ u_int16_t minkey, maxkey;
+ void (*encrypt) (caddr_t, u_int8_t *);
+ void (*decrypt) (caddr_t, u_int8_t *);
+ int (*setkey) (u_int8_t **, u_int8_t *, int len);
+ void (*zerokey) (u_int8_t **);
+ void (*reinit) (caddr_t, u_int8_t *);
+};
+
+struct comp_algo {
+ int type;
+ char *name;
+ size_t minlen;
+ u_int32_t (*compress) (u_int8_t *, u_int32_t, u_int8_t **);
+ u_int32_t (*decompress) (u_int8_t *, u_int32_t, u_int8_t **);
+};
+
+union authctx {
+ MD5_CTX md5ctx;
+ SHA1_CTX sha1ctx;
+ RMD160_CTX rmd160ctx;
+ SHA256_CTX sha256ctx;
+ SHA384_CTX sha384ctx;
+ SHA512_CTX sha512ctx;
+};
+
+extern struct enc_xform enc_xform_null;
+extern struct enc_xform enc_xform_des;
+extern struct enc_xform enc_xform_3des;
+extern struct enc_xform enc_xform_blf;
+extern struct enc_xform enc_xform_cast5;
+extern struct enc_xform enc_xform_skipjack;
+extern struct enc_xform enc_xform_rijndael128;
+extern struct enc_xform enc_xform_aes_xts;
+extern struct enc_xform enc_xform_arc4;
+extern struct enc_xform enc_xform_camellia;
+
+extern struct auth_hash auth_hash_null;
+extern struct auth_hash auth_hash_key_md5;
+extern struct auth_hash auth_hash_key_sha1;
+extern struct auth_hash auth_hash_hmac_md5;
+extern struct auth_hash auth_hash_hmac_sha1;
+extern struct auth_hash auth_hash_hmac_ripemd_160;
+extern struct auth_hash auth_hash_hmac_sha2_256;
+extern struct auth_hash auth_hash_hmac_sha2_384;
+extern struct auth_hash auth_hash_hmac_sha2_512;
+
+extern struct comp_algo comp_algo_deflate;
+
+#ifdef _KERNEL
+#include <freebsd/sys/malloc.h>
+MALLOC_DECLARE(M_XDATA);
+#endif
+#endif /* _CRYPTO_XFORM_HH_ */