net/mlx5: Refactor accel IPSec code

The current code has one layer that executed FPGA commands and
the Ethernet part directly used this code. Since downstream patches
introduces support for IPSec in mlx5_ib, we need to provide some
abstractions. This patch refactors the accel code into one layer
that creates a software IPSec transformation and another one which
creates the actual hardware context.
The internal command implementation is now hidden in the FPGA
core layer. The code also adds the ability to share FPGA hardware
contexts. If two contexts are the same, only a reference count
is taken.

Signed-off-by: Aviad Yehezkel <aviadye@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Aviad Yehezkel 2018-01-18 13:05:48 +02:00 committed by Saeed Mahameed
parent af9fe19d66
commit d6c4f0298c
7 changed files with 666 additions and 223 deletions

View File

@ -37,27 +37,6 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "fpga/ipsec.h" #include "fpga/ipsec.h"
void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd)
{
int cmd_size;
if (!MLX5_IPSEC_DEV(mdev))
return ERR_PTR(-EOPNOTSUPP);
if (mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_V2_CMD)
cmd_size = sizeof(*cmd);
else
cmd_size = sizeof(cmd->ipsec_sa_v1);
return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd, cmd_size);
}
int mlx5_accel_ipsec_sa_cmd_wait(void *ctx)
{
return mlx5_fpga_ipsec_sa_cmd_wait(ctx);
}
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
{ {
return mlx5_fpga_ipsec_device_caps(mdev); return mlx5_fpga_ipsec_device_caps(mdev);
@ -75,6 +54,21 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
return mlx5_fpga_ipsec_counters_read(mdev, counters, count); return mlx5_fpga_ipsec_counters_read(mdev, counters, count);
} }
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr,
spi, is_ipv6);
}
void mlx5_accel_esp_free_hw_context(void *context)
{
mlx5_fpga_ipsec_delete_sa_ctx(context);
}
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{ {
return mlx5_fpga_ipsec_init(mdev); return mlx5_fpga_ipsec_init(mdev);
@ -84,3 +78,25 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{ {
mlx5_fpga_ipsec_cleanup(mdev); mlx5_fpga_ipsec_cleanup(mdev);
} }
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
struct mlx5_accel_esp_xfrm *xfrm;
xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags);
if (IS_ERR(xfrm))
return xfrm;
xfrm->mdev = mdev;
return xfrm;
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
mlx5_fpga_esp_destroy_xfrm(xfrm);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);

View File

@ -39,89 +39,20 @@
#ifdef CONFIG_MLX5_ACCEL #ifdef CONFIG_MLX5_ACCEL
#define MLX5_IPSEC_SADB_IP_AH BIT(7)
#define MLX5_IPSEC_SADB_IP_ESP BIT(6)
#define MLX5_IPSEC_SADB_SA_VALID BIT(5)
#define MLX5_IPSEC_SADB_SPI_EN BIT(4)
#define MLX5_IPSEC_SADB_DIR_SX BIT(3)
#define MLX5_IPSEC_SADB_IPV6 BIT(2)
enum {
MLX5_IPSEC_CMD_ADD_SA = 0,
MLX5_IPSEC_CMD_DEL_SA = 1,
MLX5_IPSEC_CMD_ADD_SA_V2 = 2,
MLX5_IPSEC_CMD_DEL_SA_V2 = 3,
MLX5_IPSEC_CMD_MOD_SA_V2 = 4,
MLX5_IPSEC_CMD_SET_CAP = 5,
};
enum mlx5_accel_ipsec_enc_mode {
MLX5_IPSEC_SADB_MODE_NONE = 0,
MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1,
MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3,
};
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
MLX5_ACCEL_IPSEC_CAP_DEVICE) MLX5_ACCEL_IPSEC_CAP_DEVICE)
struct mlx5_accel_ipsec_sa_v1 {
__be32 cmd;
u8 key_enc[32];
u8 key_auth[32];
__be32 sip[4];
__be32 dip[4];
union {
struct {
__be32 reserved;
u8 salt_iv[8];
__be32 salt;
} __packed gcm;
struct {
u8 salt[16];
} __packed cbc;
};
__be32 spi;
__be32 sw_sa_handle;
__be16 tfclen;
u8 enc_mode;
u8 reserved1[2];
u8 flags;
u8 reserved2[2];
};
struct mlx5_accel_ipsec_sa {
struct mlx5_accel_ipsec_sa_v1 ipsec_sa_v1;
__be16 udp_sp;
__be16 udp_dp;
u8 reserved1[4];
__be32 esn;
__be16 vid; /* only 12 bits, rest is reserved */
__be16 reserved2;
} __packed;
/**
* mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command
* @mdev: mlx5 device
* @cmd: command to execute
* May be called from atomic context. Returns context pointer, or error
* Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic
* context, to cleanup the context pointer
*/
void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd);
/**
* mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion
* @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec
* Sleeps (killable) until command execution is complete.
* Returns the command result, or -EINTR if killed
*/
int mlx5_accel_ipsec_sa_cmd_wait(void *context);
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count); unsigned int count);
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6);
void mlx5_accel_esp_free_hw_context(void *context);
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
@ -129,6 +60,20 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
#define MLX5_IPSEC_DEV(mdev) false #define MLX5_IPSEC_DEV(mdev) false
static inline void *
mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
return NULL;
}
static inline void mlx5_accel_esp_free_hw_context(void *context)
{
}
static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{ {
return 0; return 0;

View File

@ -47,7 +47,8 @@ struct mlx5e_ipsec_sa_entry {
unsigned int handle; /* Handle in SADB_RX */ unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x; struct xfrm_state *x;
struct mlx5e_ipsec *ipsec; struct mlx5e_ipsec *ipsec;
void *context; struct mlx5_accel_esp_xfrm *xfrm;
void *hw_context;
}; };
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec, struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
@ -105,74 +106,51 @@ static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
ida_simple_remove(&ipsec->halloc, sa_entry->handle); ida_simple_remove(&ipsec->halloc, sa_entry->handle);
} }
static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x) static void
{ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4; struct mlx5_accel_esp_xfrm_attrs *attrs)
switch (key_len) {
case 16:
return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128;
case 32:
return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128;
default:
netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n",
key_len, x->aead->alg_name);
return -1;
}
}
static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_ipsec_sa *hw_sa)
{ {
struct xfrm_state *x = sa_entry->x; struct xfrm_state *x = sa_entry->x;
struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
struct aead_geniv_ctx *geniv_ctx; struct aead_geniv_ctx *geniv_ctx;
unsigned int crypto_data_len;
struct crypto_aead *aead; struct crypto_aead *aead;
unsigned int key_len; unsigned int crypto_data_len, key_len;
int ivsize; int ivsize;
memset(hw_sa, 0, sizeof(*hw_sa)); memset(attrs, 0, sizeof(*attrs));
/* key */
crypto_data_len = (x->aead->alg_key_len + 7) / 8; crypto_data_len = (x->aead->alg_key_len + 7) / 8;
key_len = crypto_data_len - 4; /* 4 bytes salt at end */ key_len = crypto_data_len - 4; /* 4 bytes salt at end */
memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
aes_gcm->key_len = key_len * 8;
/* salt and seq_iv */
aead = x->data; aead = x->data;
geniv_ctx = crypto_aead_ctx(aead); geniv_ctx = crypto_aead_ctx(aead);
ivsize = crypto_aead_ivsize(aead); ivsize = crypto_aead_ivsize(aead);
memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
sizeof(aes_gcm->salt));
memcpy(&hw_sa->ipsec_sa_v1.key_enc, x->aead->alg_key, key_len); /* iv len */
/* Duplicate 128 bit key twice according to HW layout */ aes_gcm->icv_len = x->aead->alg_icv_len;
if (key_len == 16)
memcpy(&hw_sa->ipsec_sa_v1.key_enc[16], x->aead->alg_key, key_len);
memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, geniv_ctx->salt, ivsize);
hw_sa->ipsec_sa_v1.gcm.salt = *((__be32 *)(x->aead->alg_key + key_len));
hw_sa->ipsec_sa_v1.cmd = htonl(op); /* rx handle */
hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN; attrs->sa_handle = sa_entry->handle;
if (x->props.family == AF_INET) {
hw_sa->ipsec_sa_v1.sip[3] = x->props.saddr.a4; /* algo type */
hw_sa->ipsec_sa_v1.dip[3] = x->id.daddr.a4; attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
} else {
memcpy(hw_sa->ipsec_sa_v1.sip, x->props.saddr.a6, /* action */
sizeof(hw_sa->ipsec_sa_v1.sip)); attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
memcpy(hw_sa->ipsec_sa_v1.dip, x->id.daddr.a6, MLX5_ACCEL_ESP_ACTION_ENCRYPT :
sizeof(hw_sa->ipsec_sa_v1.dip)); MLX5_ACCEL_ESP_ACTION_DECRYPT;
hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IPV6; /* flags */
} attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
hw_sa->ipsec_sa_v1.spi = x->id.spi; MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(sa_entry->handle); MLX5_ACCEL_ESP_FLAGS_TUNNEL;
switch (x->id.proto) {
case IPPROTO_ESP:
hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IP_ESP;
break;
case IPPROTO_AH:
hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IP_AH;
break;
default:
break;
}
hw_sa->ipsec_sa_v1.enc_mode = mlx5e_ipsec_enc_mode(x);
if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND))
hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_DIR_SX;
} }
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@ -254,9 +232,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{ {
struct mlx5e_ipsec_sa_entry *sa_entry = NULL; struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.dev; struct net_device *netdev = x->xso.dev;
struct mlx5_accel_ipsec_sa hw_sa; struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
void *context; __be32 saddr[4] = {0}, daddr[4] = {0}, spi;
bool is_ipv6 = false;
int err; int err;
priv = netdev_priv(netdev); priv = netdev_priv(netdev);
@ -285,20 +264,41 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
} }
} }
mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa); /* create xfrm */
context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa); mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
if (IS_ERR(context)) { sa_entry->xfrm =
err = PTR_ERR(context); mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm);
goto err_sadb_rx; goto err_sadb_rx;
} }
err = mlx5_accel_ipsec_sa_cmd_wait(context); /* create hw context */
if (err) if (x->props.family == AF_INET) {
goto err_sadb_rx; saddr[3] = x->props.saddr.a4;
daddr[3] = x->id.daddr.a4;
} else {
memcpy(saddr, x->props.saddr.a6, sizeof(saddr));
memcpy(daddr, x->id.daddr.a6, sizeof(daddr));
is_ipv6 = true;
}
spi = x->id.spi;
sa_entry->hw_context =
mlx5_accel_esp_create_hw_context(priv->mdev,
sa_entry->xfrm,
saddr, daddr, spi,
is_ipv6);
if (IS_ERR(sa_entry->hw_context)) {
err = PTR_ERR(sa_entry->hw_context);
goto err_xfrm;
}
x->xso.offload_handle = (unsigned long)sa_entry; x->xso.offload_handle = (unsigned long)sa_entry;
goto out; goto out;
err_xfrm:
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
err_sadb_rx: err_sadb_rx:
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) { if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
mlx5e_ipsec_sadb_rx_del(sa_entry); mlx5e_ipsec_sadb_rx_del(sa_entry);
@ -313,8 +313,6 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
static void mlx5e_xfrm_del_state(struct xfrm_state *x) static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{ {
struct mlx5e_ipsec_sa_entry *sa_entry; struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5_accel_ipsec_sa hw_sa;
void *context;
if (!x->xso.offload_handle) if (!x->xso.offload_handle)
return; return;
@ -324,19 +322,11 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
mlx5e_ipsec_sadb_rx_del(sa_entry); mlx5e_ipsec_sadb_rx_del(sa_entry);
mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa);
context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
if (IS_ERR(context))
return;
sa_entry->context = context;
} }
static void mlx5e_xfrm_free_state(struct xfrm_state *x) static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{ {
struct mlx5e_ipsec_sa_entry *sa_entry; struct mlx5e_ipsec_sa_entry *sa_entry;
int res;
if (!x->xso.offload_handle) if (!x->xso.offload_handle)
return; return;
@ -344,11 +334,9 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
WARN_ON(sa_entry->x != x); WARN_ON(sa_entry->x != x);
res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context); if (sa_entry->hw_context) {
sa_entry->context = NULL; mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
if (res) { mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
/* Leftover object will leak */
return;
} }
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) if (x->xso.flags & XFRM_OFFLOAD_INBOUND)

View File

@ -31,6 +31,7 @@
* *
*/ */
#include <linux/rhashtable.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include "mlx5_core.h" #include "mlx5_core.h"
@ -47,7 +48,7 @@ enum mlx5_fpga_ipsec_cmd_status {
MLX5_FPGA_IPSEC_CMD_COMPLETE, MLX5_FPGA_IPSEC_CMD_COMPLETE,
}; };
struct mlx5_ipsec_command_context { struct mlx5_fpga_ipsec_cmd_context {
struct mlx5_fpga_dma_buf buf; struct mlx5_fpga_dma_buf buf;
enum mlx5_fpga_ipsec_cmd_status status; enum mlx5_fpga_ipsec_cmd_status status;
struct mlx5_ifc_fpga_ipsec_cmd_resp resp; struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
@ -58,11 +59,43 @@ struct mlx5_ipsec_command_context {
u8 command[0]; u8 command[0];
}; };
struct mlx5_fpga_esp_xfrm;
struct mlx5_fpga_ipsec_sa_ctx {
struct rhash_head hash;
struct mlx5_ifc_fpga_ipsec_sa hw_sa;
struct mlx5_core_dev *dev;
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
};
struct mlx5_fpga_esp_xfrm {
unsigned int num_rules;
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mutex lock; /* xfrm lock */
struct mlx5_accel_esp_xfrm accel_xfrm;
};
static const struct rhashtable_params rhash_sa = {
.key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
.automatic_shrinking = true,
.min_size = 1,
};
struct mlx5_fpga_ipsec { struct mlx5_fpga_ipsec {
struct list_head pending_cmds; struct list_head pending_cmds;
spinlock_t pending_cmds_lock; /* Protects pending_cmds */ spinlock_t pending_cmds_lock; /* Protects pending_cmds */
u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)]; u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
struct mlx5_fpga_conn *conn; struct mlx5_fpga_conn *conn;
/* Map hardware SA --> SA context
* (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
* We will use this hash to avoid SAs duplication in fpga which
* aren't allowed
*/
struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
struct mutex sa_hash_lock;
}; };
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
@ -86,10 +119,10 @@ static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_dma_buf *buf, struct mlx5_fpga_dma_buf *buf,
u8 status) u8 status)
{ {
struct mlx5_ipsec_command_context *context; struct mlx5_fpga_ipsec_cmd_context *context;
if (status) { if (status) {
context = container_of(buf, struct mlx5_ipsec_command_context, context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
buf); buf);
mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n", mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
status); status);
@ -117,7 +150,7 @@ int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
{ {
struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data; struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
struct mlx5_ipsec_command_context *context; struct mlx5_fpga_ipsec_cmd_context *context;
enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome; enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
struct mlx5_fpga_device *fdev = cb_arg; struct mlx5_fpga_device *fdev = cb_arg;
unsigned long flags; unsigned long flags;
@ -133,7 +166,7 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
context = list_first_entry_or_null(&fdev->ipsec->pending_cmds, context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
struct mlx5_ipsec_command_context, struct mlx5_fpga_ipsec_cmd_context,
list); list);
if (context) if (context)
list_del(&context->list); list_del(&context->list);
@ -160,7 +193,7 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
const void *cmd, int cmd_size) const void *cmd, int cmd_size)
{ {
struct mlx5_ipsec_command_context *context; struct mlx5_fpga_ipsec_cmd_context *context;
struct mlx5_fpga_device *fdev = mdev->fpga; struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned long flags; unsigned long flags;
int res; int res;
@ -203,7 +236,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
static int mlx5_fpga_ipsec_cmd_wait(void *ctx) static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
{ {
struct mlx5_ipsec_command_context *context = ctx; struct mlx5_fpga_ipsec_cmd_context *context = ctx;
unsigned long timeout = unsigned long timeout =
msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC); msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC);
int res; int res;
@ -222,33 +255,49 @@ static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
return res; return res;
} }
void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
struct mlx5_accel_ipsec_sa *cmd, int cmd_size)
{ {
return mlx5_fpga_ipsec_cmd_exec(mdev, cmd, cmd_size); if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
return true;
return false;
} }
int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
int opcode)
{ {
struct mlx5_ipsec_command_context *context = ctx; struct mlx5_core_dev *dev = fdev->mdev;
struct mlx5_accel_ipsec_sa *sa; struct mlx5_ifc_fpga_ipsec_sa *sa;
int res; struct mlx5_fpga_ipsec_cmd_context *cmd_context;
size_t sa_cmd_size;
int err;
res = mlx5_fpga_ipsec_cmd_wait(ctx); hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
if (res) if (is_v2_sadb_supported(fdev->ipsec))
sa_cmd_size = sizeof(*hw_sa);
else
sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
if (IS_ERR(cmd_context))
return PTR_ERR(cmd_context);
err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
if (err)
goto out; goto out;
sa = (struct mlx5_accel_ipsec_sa *)&context->command; sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
if (sa->ipsec_sa_v1.sw_sa_handle != context->resp.sw_sa_handle) { if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
mlx5_fpga_err(context->dev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
ntohl(sa->ipsec_sa_v1.sw_sa_handle), ntohl(sa->ipsec_sa_v1.sw_sa_handle),
ntohl(context->resp.sw_sa_handle)); ntohl(cmd_context->resp.sw_sa_handle));
res = -EIO; err = -EIO;
} }
out: out:
kfree(context); kfree(cmd_context);
return res; return err;
} }
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
@ -278,9 +327,6 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer)) if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER; ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, v2_command))
ret |= MLX5_ACCEL_IPSEC_CAP_V2_CMD;
return ret; return ret;
} }
@ -345,11 +391,11 @@ int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
{ {
struct mlx5_ipsec_command_context *context; struct mlx5_fpga_ipsec_cmd_context *context;
struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0}; struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
int err; int err;
cmd.cmd = htonl(MLX5_IPSEC_CMD_SET_CAP); cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
cmd.flags = htonl(flags); cmd.flags = htonl(flags);
context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
if (IS_ERR(context)) { if (IS_ERR(context)) {
@ -383,6 +429,200 @@ static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
return mlx5_fpga_ipsec_set_caps(mdev, flags); return mlx5_fpga_ipsec_set_caps(mdev, flags);
} }
static void
mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
{
const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
/* key */
memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
aes_gcm->key_len / 8);
/* Duplicate 128 bit key twice according to HW layout */
if (aes_gcm->key_len == 128)
memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
aes_gcm->aes_key, aes_gcm->key_len / 8);
/* salt and seq_iv */
memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
sizeof(aes_gcm->seq_iv));
memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
sizeof(aes_gcm->salt));
/* rx handle */
hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
/* enc mode */
switch (aes_gcm->key_len) {
case 128:
hw_sa->ipsec_sa_v1.enc_mode =
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
break;
case 256:
hw_sa->ipsec_sa_v1.enc_mode =
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
break;
}
/* flags */
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
MLX5_FPGA_IPSEC_SA_SPI_EN |
MLX5_FPGA_IPSEC_SA_IP_ESP;
if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
else
hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
}
static void
mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6,
struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
{
mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
/* IPs */
memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
/* SPI */
hw_sa->ipsec_sa_v1.spi = spi;
/* flags */
if (is_ipv6)
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
}
void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
container_of(accel_xfrm, typeof(*fpga_xfrm),
accel_xfrm);
struct mlx5_fpga_device *fdev = mdev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
int opcode, err;
void *context;
/* alloc SA */
sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
if (!sa_ctx)
return ERR_PTR(-ENOMEM);
sa_ctx->dev = mdev;
/* build candidate SA */
mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
saddr, daddr, spi, is_ipv6,
&sa_ctx->hw_sa);
mutex_lock(&fpga_xfrm->lock);
if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
/* all rules must be with same IPs and SPI */
if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
sizeof(sa_ctx->hw_sa))) {
context = ERR_PTR(-EINVAL);
goto exists;
}
++fpga_xfrm->num_rules;
context = fpga_xfrm->sa_ctx;
goto exists;
}
/* This is unbounded fpga_xfrm, try to add to hash */
mutex_lock(&fipsec->sa_hash_lock);
err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa);
if (err) {
/* Can't bound different accel_xfrm to already existing sa_ctx.
* This is because we can't support multiple ketmats for
* same IPs and SPI
*/
context = ERR_PTR(-EEXIST);
goto unlock_hash;
}
/* Bound accel_xfrm to sa_ctx */
opcode = is_v2_sadb_supported(fdev->ipsec) ?
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
if (err) {
context = ERR_PTR(err);
goto delete_hash;
}
mutex_unlock(&fipsec->sa_hash_lock);
++fpga_xfrm->num_rules;
fpga_xfrm->sa_ctx = sa_ctx;
sa_ctx->fpga_xfrm = fpga_xfrm;
mutex_unlock(&fpga_xfrm->lock);
return sa_ctx;
delete_hash:
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa));
unlock_hash:
mutex_unlock(&fipsec->sa_hash_lock);
exists:
mutex_unlock(&fpga_xfrm->lock);
kfree(sa_ctx);
return context;
}
static void
mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
{
struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
int opcode = is_v2_sadb_supported(fdev->ipsec) ?
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
int err;
err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
if (err) {
WARN_ON(err);
return;
}
mutex_lock(&fipsec->sa_hash_lock);
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa));
mutex_unlock(&fipsec->sa_hash_lock);
}
void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
mutex_lock(&fpga_xfrm->lock);
if (!--fpga_xfrm->num_rules) {
mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
fpga_xfrm->sa_ctx = NULL;
}
mutex_unlock(&fpga_xfrm->lock);
}
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
{ {
struct mlx5_fpga_conn_attr init_attr = {0}; struct mlx5_fpga_conn_attr init_attr = {0};
@ -421,15 +661,23 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
} }
fdev->ipsec->conn = conn; fdev->ipsec->conn = conn;
err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
if (err)
goto err_destroy_conn;
mutex_init(&fdev->ipsec->sa_hash_lock);
err = mlx5_fpga_ipsec_enable_supported_caps(mdev); err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
if (err) { if (err) {
mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n", mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
err); err);
goto err_destroy_conn; goto err_destroy_hash;
} }
return 0; return 0;
err_destroy_hash:
rhashtable_destroy(&fdev->ipsec->sa_hash);
err_destroy_conn: err_destroy_conn:
mlx5_fpga_sbu_conn_destroy(conn); mlx5_fpga_sbu_conn_destroy(conn);
@ -446,7 +694,92 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
if (!mlx5_fpga_is_ipsec_device(mdev)) if (!mlx5_fpga_is_ipsec_device(mdev))
return; return;
rhashtable_destroy(&fdev->ipsec->sa_hash);
mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn); mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
kfree(fdev->ipsec); kfree(fdev->ipsec);
fdev->ipsec = NULL; fdev->ipsec = NULL;
} }
static int
mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
if (attrs->tfc_pad) {
mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
return -EOPNOTSUPP;
}
if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
return -EOPNOTSUPP;
}
if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.iv_algo !=
MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.icv_len != 128) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.key_len != 128 &&
attrs->keymat.aes_gcm.key_len != 256) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
return -EOPNOTSUPP;
}
if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
(!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
v2_command))) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
return -EOPNOTSUPP;
}
return 0;
}
struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
return ERR_PTR(-EINVAL);
}
if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
return ERR_PTR(-EOPNOTSUPP);
}
fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
if (!fpga_xfrm)
return ERR_PTR(-ENOMEM);
mutex_init(&fpga_xfrm->lock);
memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
sizeof(fpga_xfrm->accel_xfrm.attrs));
return &fpga_xfrm->accel_xfrm;
}
void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
container_of(xfrm, struct mlx5_fpga_esp_xfrm,
accel_xfrm);
/* assuming no sa_ctx are connected to this xfrm_ctx */
kfree(fpga_xfrm);
}

View File

@ -38,32 +38,29 @@
#ifdef CONFIG_MLX5_FPGA #ifdef CONFIG_MLX5_FPGA
void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd, int cmd_size);
int mlx5_fpga_ipsec_sa_cmd_wait(void *context);
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev); unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int counters_count); unsigned int counters_count);
void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6);
void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev); int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev); void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev);
struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags);
void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
#else #else
static inline void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd,
int cmd_size)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int mlx5_fpga_ipsec_sa_cmd_wait(void *context)
{
return -EOPNOTSUPP;
}
static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
{ {
return 0; return 0;
@ -81,6 +78,20 @@ static inline int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev,
return 0; return 0;
} }
static inline void *
mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
return NULL;
}
static inline void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
{
}
static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
{ {
return 0; return 0;
@ -90,6 +101,18 @@ static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
{ {
} }
static inline struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
}
#endif /* CONFIG_MLX5_FPGA */ #endif /* CONFIG_MLX5_FPGA */
#endif /* __MLX5_FPGA_SADB_H__ */ #endif /* __MLX5_FPGA_SADB_H__ */

View File

@ -36,23 +36,102 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
enum mlx5_accel_ipsec_caps { enum mlx5_accel_esp_aes_gcm_keymat_iv_algo {
MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ,
};
enum mlx5_accel_esp_flags {
MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
};
enum mlx5_accel_esp_action {
MLX5_ACCEL_ESP_ACTION_DECRYPT,
MLX5_ACCEL_ESP_ACTION_ENCRYPT,
};
enum mlx5_accel_esp_keymats {
MLX5_ACCEL_ESP_KEYMAT_AES_NONE,
MLX5_ACCEL_ESP_KEYMAT_AES_GCM,
};
enum mlx5_accel_esp_replay {
MLX5_ACCEL_ESP_REPLAY_NONE,
MLX5_ACCEL_ESP_REPLAY_BMP,
};
struct aes_gcm_keymat {
u64 seq_iv;
enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo;
u32 salt;
u32 icv_len;
u32 key_len;
u32 aes_key[256 / 32];
};
struct mlx5_accel_esp_xfrm_attrs {
enum mlx5_accel_esp_action action;
u32 esn;
u32 spi;
u32 seq;
u32 tfc_pad;
u32 flags;
u32 sa_handle;
enum mlx5_accel_esp_replay replay_type;
union {
struct {
u32 size;
} bmp;
} replay;
enum mlx5_accel_esp_keymats keymat_type;
union {
struct aes_gcm_keymat aes_gcm;
} keymat;
};
struct mlx5_accel_esp_xfrm {
struct mlx5_core_dev *mdev;
struct mlx5_accel_esp_xfrm_attrs attrs;
};
enum {
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0,
};
enum mlx5_accel_ipsec_cap {
MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0,
MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1, MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1,
MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2, MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2,
MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3,
MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4,
MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5, MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5,
MLX5_ACCEL_IPSEC_CAP_V2_CMD = 1 << 6,
}; };
#ifdef CONFIG_MLX5_ACCEL #ifdef CONFIG_MLX5_ACCEL
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
#else #else
static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags) { return ERR_PTR(-EOPNOTSUPP); }
static inline void
mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {}
#endif #endif
#endif #endif

View File

@ -448,6 +448,15 @@ struct mlx5_ifc_fpga_ipsec_cmd_resp {
u8 reserved[24]; u8 reserved[24];
} __packed; } __packed;
enum mlx5_ifc_fpga_ipsec_cmd_opcode {
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0,
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1,
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2,
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3,
MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4,
MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5,
};
enum mlx5_ifc_fpga_ipsec_cap { enum mlx5_ifc_fpga_ipsec_cap {
MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0), MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0),
}; };
@ -458,4 +467,54 @@ struct mlx5_ifc_fpga_ipsec_cmd_cap {
u8 reserved[24]; u8 reserved[24];
} __packed; } __packed;
enum mlx5_ifc_fpga_ipsec_sa_flags {
MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5),
MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6),
MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7),
};
enum mlx5_ifc_fpga_ipsec_sa_enc_mode {
MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0,
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1,
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3,
};
struct mlx5_ifc_fpga_ipsec_sa_v1 {
__be32 cmd;
u8 key_enc[32];
u8 key_auth[32];
__be32 sip[4];
__be32 dip[4];
union {
struct {
__be32 reserved;
u8 salt_iv[8];
__be32 salt;
} __packed gcm;
struct {
u8 salt[16];
} __packed cbc;
};
__be32 spi;
__be32 sw_sa_handle;
__be16 tfclen;
u8 enc_mode;
u8 reserved1[2];
u8 flags;
u8 reserved2[2];
};
struct mlx5_ifc_fpga_ipsec_sa {
struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1;
__be16 udp_sp;
__be16 udp_dp;
u8 reserved1[4];
__be32 esn;
__be16 vid; /* only 12 bits, rest is reserved */
__be16 reserved2;
} __packed;
#endif /* MLX5_IFC_FPGA_H */ #endif /* MLX5_IFC_FPGA_H */