forked from luck/tmp_suning_uos_patched
tls: convert to generic sk_msg interface
Convert kTLS over to make use of sk_msg interface for plaintext and encrypted scattergather data, so it reuses all the sk_msg helpers and data structure which later on in a second step enables to glue this to BPF. This also allows to remove quite a bit of open coded helpers which are covered by the sk_msg API. Recent changes in kTLs80ece6a03a
("tls: Remove redundant vars from tls record structure") and4e6d47206c
("tls: Add support for inplace records encryption") changed the data path handling a bit; while we've kept the latter optimization intact, we had to undo the former change to better fit the sk_msg model, hence the sg_aead_in and sg_aead_out have been brought back and are linked into the sk_msg sgs. Now the kTLS record contains a msg_plaintext and msg_encrypted sk_msg each. In the original code, the zerocopy_from_iter() has been used out of TX but also RX path. For the strparser skb-based RX path, we've left the zerocopy_from_iter() in decrypt_internal() mostly untouched, meaning it has been moved into tls_setup_from_iter() with charging logic removed (as not used from RX). Given RX path is not based on sk_msg objects, we haven't pursued setting up a dummy sk_msg to call into sk_msg_zerocopy_from_iter(), but it could be an option to prusue in a later step. Joint work with John. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
604326b41a
commit
d829e9c411
|
@ -102,6 +102,8 @@ struct sk_psock {
|
|||
|
||||
int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
|
||||
int elem_first_coalesce);
|
||||
int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
|
||||
u32 off, u32 len);
|
||||
void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
|
||||
int sk_msg_free(struct sock *sk, struct sk_msg *msg);
|
||||
int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
|
||||
|
|
|
@ -2214,10 +2214,6 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
|
|||
|
||||
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
|
||||
|
||||
int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
|
||||
int sg_start, int *sg_curr, unsigned int *sg_size,
|
||||
int first_coalesce);
|
||||
|
||||
/*
|
||||
* Default write policy as shown to user space via poll/select/SIGIO
|
||||
*/
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
#include <linux/crypto.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/skmsg.h>
|
||||
|
||||
#include <net/tcp.h>
|
||||
#include <net/strparser.h>
|
||||
#include <crypto/aead.h>
|
||||
|
@ -103,15 +105,13 @@ struct tls_rec {
|
|||
int tx_flags;
|
||||
int inplace_crypto;
|
||||
|
||||
/* AAD | sg_plaintext_data | sg_tag */
|
||||
struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS + 1];
|
||||
/* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */
|
||||
struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS + 1];
|
||||
struct sk_msg msg_plaintext;
|
||||
struct sk_msg msg_encrypted;
|
||||
|
||||
unsigned int sg_plaintext_size;
|
||||
unsigned int sg_encrypted_size;
|
||||
int sg_plaintext_num_elem;
|
||||
int sg_encrypted_num_elem;
|
||||
/* AAD | msg_plaintext.sg.data | sg_tag */
|
||||
struct scatterlist sg_aead_in[2];
|
||||
/* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
|
||||
struct scatterlist sg_aead_out[2];
|
||||
|
||||
char aad_space[TLS_AAD_SPACE_SIZE];
|
||||
struct aead_request aead_req;
|
||||
|
@ -223,8 +223,8 @@ struct tls_context {
|
|||
|
||||
unsigned long flags;
|
||||
bool in_tcp_sendpages;
|
||||
bool pending_open_record_frags;
|
||||
|
||||
u16 pending_open_record_frags;
|
||||
int (*push_pending_record)(struct sock *sk, int flags);
|
||||
|
||||
void (*sk_write_space)(struct sock *sk);
|
||||
|
|
|
@ -73,6 +73,45 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sk_msg_alloc);
|
||||
|
||||
int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
|
||||
u32 off, u32 len)
|
||||
{
|
||||
int i = src->sg.start;
|
||||
struct scatterlist *sge = sk_msg_elem(src, i);
|
||||
u32 sge_len, sge_off;
|
||||
|
||||
if (sk_msg_full(dst))
|
||||
return -ENOSPC;
|
||||
|
||||
while (off) {
|
||||
if (sge->length > off)
|
||||
break;
|
||||
off -= sge->length;
|
||||
sk_msg_iter_var_next(i);
|
||||
if (i == src->sg.end && off)
|
||||
return -ENOSPC;
|
||||
sge = sk_msg_elem(src, i);
|
||||
}
|
||||
|
||||
while (len) {
|
||||
sge_len = sge->length - off;
|
||||
sge_off = sge->offset + off;
|
||||
if (sge_len > len)
|
||||
sge_len = len;
|
||||
off = 0;
|
||||
len -= sge_len;
|
||||
sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
|
||||
sk_mem_charge(sk, sge_len);
|
||||
sk_msg_iter_var_next(i);
|
||||
if (i == src->sg.end && len)
|
||||
return -ENOSPC;
|
||||
sge = sk_msg_elem(src, i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_msg_clone);
|
||||
|
||||
void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
|
||||
{
|
||||
int i = msg->sg.start;
|
||||
|
|
|
@ -2238,67 +2238,6 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
|
|||
}
|
||||
EXPORT_SYMBOL(sk_page_frag_refill);
|
||||
|
||||
int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
|
||||
int sg_start, int *sg_curr_index, unsigned int *sg_curr_size,
|
||||
int first_coalesce)
|
||||
{
|
||||
int sg_curr = *sg_curr_index, use = 0, rc = 0;
|
||||
unsigned int size = *sg_curr_size;
|
||||
struct page_frag *pfrag;
|
||||
struct scatterlist *sge;
|
||||
|
||||
len -= size;
|
||||
pfrag = sk_page_frag(sk);
|
||||
|
||||
while (len > 0) {
|
||||
unsigned int orig_offset;
|
||||
|
||||
if (!sk_page_frag_refill(sk, pfrag)) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
use = min_t(int, len, pfrag->size - pfrag->offset);
|
||||
|
||||
if (!sk_wmem_schedule(sk, use)) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sk_mem_charge(sk, use);
|
||||
size += use;
|
||||
orig_offset = pfrag->offset;
|
||||
pfrag->offset += use;
|
||||
|
||||
sge = sg + sg_curr - 1;
|
||||
if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
|
||||
sge->offset + sge->length == orig_offset) {
|
||||
sge->length += use;
|
||||
} else {
|
||||
sge = sg + sg_curr;
|
||||
sg_unmark_end(sge);
|
||||
sg_set_page(sge, pfrag->page, use, orig_offset);
|
||||
get_page(pfrag->page);
|
||||
sg_curr++;
|
||||
|
||||
if (sg_curr == MAX_SKB_FRAGS)
|
||||
sg_curr = 0;
|
||||
|
||||
if (sg_curr == sg_start) {
|
||||
rc = -ENOSPC;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
len -= use;
|
||||
}
|
||||
out:
|
||||
*sg_curr_size = size;
|
||||
*sg_curr_index = sg_curr;
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(sk_alloc_sg);
|
||||
|
||||
static void __lock_sock(struct sock *sk)
|
||||
__releases(&sk->sk_lock.slock)
|
||||
__acquires(&sk->sk_lock.slock)
|
||||
|
|
|
@ -8,6 +8,7 @@ config TLS
|
|||
select CRYPTO_AES
|
||||
select CRYPTO_GCM
|
||||
select STREAM_PARSER
|
||||
select NET_SOCK_MSG
|
||||
default n
|
||||
---help---
|
||||
Enable kernel support for TLS protocol. This allows symmetric
|
||||
|
|
|
@ -421,7 +421,7 @@ static int tls_push_data(struct sock *sk,
|
|||
tls_push_record_flags = flags;
|
||||
if (more) {
|
||||
tls_ctx->pending_open_record_frags =
|
||||
record->num_frags;
|
||||
!!record->num_frags;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
511
net/tls/tls_sw.c
511
net/tls/tls_sw.c
|
@ -213,153 +213,49 @@ static int tls_do_decryption(struct sock *sk,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void trim_sg(struct sock *sk, struct scatterlist *sg,
|
||||
int *sg_num_elem, unsigned int *sg_size, int target_size)
|
||||
{
|
||||
int i = *sg_num_elem - 1;
|
||||
int trim = *sg_size - target_size;
|
||||
|
||||
if (trim <= 0) {
|
||||
WARN_ON(trim < 0);
|
||||
return;
|
||||
}
|
||||
|
||||
*sg_size = target_size;
|
||||
while (trim >= sg[i].length) {
|
||||
trim -= sg[i].length;
|
||||
sk_mem_uncharge(sk, sg[i].length);
|
||||
put_page(sg_page(&sg[i]));
|
||||
i--;
|
||||
|
||||
if (i < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg[i].length -= trim;
|
||||
sk_mem_uncharge(sk, trim);
|
||||
|
||||
out:
|
||||
*sg_num_elem = i + 1;
|
||||
}
|
||||
|
||||
static void trim_both_sgl(struct sock *sk, int target_size)
|
||||
static void tls_trim_both_msgs(struct sock *sk, int target_size)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct tls_rec *rec = ctx->open_rec;
|
||||
|
||||
trim_sg(sk, &rec->sg_plaintext_data[1],
|
||||
&rec->sg_plaintext_num_elem,
|
||||
&rec->sg_plaintext_size,
|
||||
target_size);
|
||||
|
||||
sk_msg_trim(sk, &rec->msg_plaintext, target_size);
|
||||
if (target_size > 0)
|
||||
target_size += tls_ctx->tx.overhead_size;
|
||||
|
||||
trim_sg(sk, &rec->sg_encrypted_data[1],
|
||||
&rec->sg_encrypted_num_elem,
|
||||
&rec->sg_encrypted_size,
|
||||
target_size);
|
||||
sk_msg_trim(sk, &rec->msg_encrypted, target_size);
|
||||
}
|
||||
|
||||
static int alloc_encrypted_sg(struct sock *sk, int len)
|
||||
static int tls_alloc_encrypted_msg(struct sock *sk, int len)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct tls_rec *rec = ctx->open_rec;
|
||||
int rc = 0;
|
||||
struct sk_msg *msg_en = &rec->msg_encrypted;
|
||||
|
||||
rc = sk_alloc_sg(sk, len,
|
||||
&rec->sg_encrypted_data[1], 0,
|
||||
&rec->sg_encrypted_num_elem,
|
||||
&rec->sg_encrypted_size, 0);
|
||||
|
||||
if (rc == -ENOSPC)
|
||||
rec->sg_encrypted_num_elem =
|
||||
ARRAY_SIZE(rec->sg_encrypted_data) - 1;
|
||||
|
||||
return rc;
|
||||
return sk_msg_alloc(sk, msg_en, len, 0);
|
||||
}
|
||||
|
||||
static int move_to_plaintext_sg(struct sock *sk, int required_size)
|
||||
static int tls_clone_plaintext_msg(struct sock *sk, int required)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct tls_rec *rec = ctx->open_rec;
|
||||
struct scatterlist *plain_sg = &rec->sg_plaintext_data[1];
|
||||
struct scatterlist *enc_sg = &rec->sg_encrypted_data[1];
|
||||
int enc_sg_idx = 0;
|
||||
struct sk_msg *msg_pl = &rec->msg_plaintext;
|
||||
struct sk_msg *msg_en = &rec->msg_encrypted;
|
||||
int skip, len;
|
||||
|
||||
if (rec->sg_plaintext_num_elem == MAX_SKB_FRAGS)
|
||||
return -ENOSPC;
|
||||
|
||||
/* We add page references worth len bytes from enc_sg at the
|
||||
* end of plain_sg. It is guaranteed that sg_encrypted_data
|
||||
/* We add page references worth len bytes from encrypted sg
|
||||
* at the end of plaintext sg. It is guaranteed that msg_en
|
||||
* has enough required room (ensured by caller).
|
||||
*/
|
||||
len = required_size - rec->sg_plaintext_size;
|
||||
len = required - msg_pl->sg.size;
|
||||
|
||||
/* Skip initial bytes in sg_encrypted_data to be able
|
||||
* to use same offset of both plain and encrypted data.
|
||||
/* Skip initial bytes in msg_en's data to be able to use
|
||||
* same offset of both plain and encrypted data.
|
||||
*/
|
||||
skip = tls_ctx->tx.prepend_size + rec->sg_plaintext_size;
|
||||
skip = tls_ctx->tx.prepend_size + msg_pl->sg.size;
|
||||
|
||||
while (enc_sg_idx < rec->sg_encrypted_num_elem) {
|
||||
if (enc_sg[enc_sg_idx].length > skip)
|
||||
break;
|
||||
|
||||
skip -= enc_sg[enc_sg_idx].length;
|
||||
enc_sg_idx++;
|
||||
}
|
||||
|
||||
/* unmark the end of plain_sg*/
|
||||
sg_unmark_end(plain_sg + rec->sg_plaintext_num_elem - 1);
|
||||
|
||||
while (len) {
|
||||
struct page *page = sg_page(&enc_sg[enc_sg_idx]);
|
||||
int bytes = enc_sg[enc_sg_idx].length - skip;
|
||||
int offset = enc_sg[enc_sg_idx].offset + skip;
|
||||
|
||||
if (bytes > len)
|
||||
bytes = len;
|
||||
else
|
||||
enc_sg_idx++;
|
||||
|
||||
/* Skipping is required only one time */
|
||||
skip = 0;
|
||||
|
||||
/* Increment page reference */
|
||||
get_page(page);
|
||||
|
||||
sg_set_page(&plain_sg[rec->sg_plaintext_num_elem], page,
|
||||
bytes, offset);
|
||||
|
||||
sk_mem_charge(sk, bytes);
|
||||
|
||||
len -= bytes;
|
||||
rec->sg_plaintext_size += bytes;
|
||||
|
||||
rec->sg_plaintext_num_elem++;
|
||||
|
||||
if (rec->sg_plaintext_num_elem == MAX_SKB_FRAGS)
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_sg(struct sock *sk, struct scatterlist *sg,
|
||||
int *sg_num_elem, unsigned int *sg_size)
|
||||
{
|
||||
int i, n = *sg_num_elem;
|
||||
|
||||
for (i = 0; i < n; ++i) {
|
||||
sk_mem_uncharge(sk, sg[i].length);
|
||||
put_page(sg_page(&sg[i]));
|
||||
}
|
||||
*sg_num_elem = 0;
|
||||
*sg_size = 0;
|
||||
return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
|
||||
}
|
||||
|
||||
static void tls_free_open_rec(struct sock *sk)
|
||||
|
@ -372,14 +268,8 @@ static void tls_free_open_rec(struct sock *sk)
|
|||
if (!rec)
|
||||
return;
|
||||
|
||||
free_sg(sk, &rec->sg_encrypted_data[1],
|
||||
&rec->sg_encrypted_num_elem,
|
||||
&rec->sg_encrypted_size);
|
||||
|
||||
free_sg(sk, &rec->sg_plaintext_data[1],
|
||||
&rec->sg_plaintext_num_elem,
|
||||
&rec->sg_plaintext_size);
|
||||
|
||||
sk_msg_free(sk, &rec->msg_encrypted);
|
||||
sk_msg_free(sk, &rec->msg_plaintext);
|
||||
kfree(rec);
|
||||
}
|
||||
|
||||
|
@ -388,6 +278,7 @@ int tls_tx_records(struct sock *sk, int flags)
|
|||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct tls_rec *rec, *tmp;
|
||||
struct sk_msg *msg_en;
|
||||
int tx_flags, rc = 0;
|
||||
|
||||
if (tls_is_partially_sent_record(tls_ctx)) {
|
||||
|
@ -407,9 +298,7 @@ int tls_tx_records(struct sock *sk, int flags)
|
|||
* Remove the head of tx_list
|
||||
*/
|
||||
list_del(&rec->list);
|
||||
free_sg(sk, &rec->sg_plaintext_data[1],
|
||||
&rec->sg_plaintext_num_elem, &rec->sg_plaintext_size);
|
||||
|
||||
sk_msg_free(sk, &rec->msg_plaintext);
|
||||
kfree(rec);
|
||||
}
|
||||
|
||||
|
@ -421,17 +310,15 @@ int tls_tx_records(struct sock *sk, int flags)
|
|||
else
|
||||
tx_flags = flags;
|
||||
|
||||
msg_en = &rec->msg_encrypted;
|
||||
rc = tls_push_sg(sk, tls_ctx,
|
||||
&rec->sg_encrypted_data[1],
|
||||
&msg_en->sg.data[msg_en->sg.curr],
|
||||
0, tx_flags);
|
||||
if (rc)
|
||||
goto tx_err;
|
||||
|
||||
list_del(&rec->list);
|
||||
free_sg(sk, &rec->sg_plaintext_data[1],
|
||||
&rec->sg_plaintext_num_elem,
|
||||
&rec->sg_plaintext_size);
|
||||
|
||||
sk_msg_free(sk, &rec->msg_plaintext);
|
||||
kfree(rec);
|
||||
} else {
|
||||
break;
|
||||
|
@ -451,15 +338,18 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
|
|||
struct sock *sk = req->data;
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct scatterlist *sge;
|
||||
struct sk_msg *msg_en;
|
||||
struct tls_rec *rec;
|
||||
bool ready = false;
|
||||
int pending;
|
||||
|
||||
rec = container_of(aead_req, struct tls_rec, aead_req);
|
||||
msg_en = &rec->msg_encrypted;
|
||||
|
||||
rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size;
|
||||
rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size;
|
||||
|
||||
sge = sk_msg_elem(msg_en, msg_en->sg.curr);
|
||||
sge->offset -= tls_ctx->tx.prepend_size;
|
||||
sge->length += tls_ctx->tx.prepend_size;
|
||||
|
||||
/* Check if error is previously set on socket */
|
||||
if (err || sk->sk_err) {
|
||||
|
@ -497,31 +387,29 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
|
|||
|
||||
/* Schedule the transmission */
|
||||
if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
|
||||
schedule_delayed_work(&ctx->tx_work.work, 2);
|
||||
schedule_delayed_work(&ctx->tx_work.work, 1);
|
||||
}
|
||||
|
||||
static int tls_do_encryption(struct sock *sk,
|
||||
struct tls_context *tls_ctx,
|
||||
struct tls_sw_context_tx *ctx,
|
||||
struct aead_request *aead_req,
|
||||
size_t data_len)
|
||||
size_t data_len, u32 start)
|
||||
{
|
||||
struct tls_rec *rec = ctx->open_rec;
|
||||
struct scatterlist *plain_sg = rec->sg_plaintext_data;
|
||||
struct scatterlist *enc_sg = rec->sg_encrypted_data;
|
||||
struct sk_msg *msg_en = &rec->msg_encrypted;
|
||||
struct scatterlist *sge = sk_msg_elem(msg_en, start);
|
||||
int rc;
|
||||
|
||||
/* Skip the first index as it contains AAD data */
|
||||
rec->sg_encrypted_data[1].offset += tls_ctx->tx.prepend_size;
|
||||
rec->sg_encrypted_data[1].length -= tls_ctx->tx.prepend_size;
|
||||
sge->offset += tls_ctx->tx.prepend_size;
|
||||
sge->length -= tls_ctx->tx.prepend_size;
|
||||
|
||||
/* If it is inplace crypto, then pass same SG list as both src, dst */
|
||||
if (rec->inplace_crypto)
|
||||
plain_sg = enc_sg;
|
||||
msg_en->sg.curr = start;
|
||||
|
||||
aead_request_set_tfm(aead_req, ctx->aead_send);
|
||||
aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
|
||||
aead_request_set_crypt(aead_req, plain_sg, enc_sg,
|
||||
aead_request_set_crypt(aead_req, rec->sg_aead_in,
|
||||
rec->sg_aead_out,
|
||||
data_len, tls_ctx->tx.iv);
|
||||
|
||||
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
|
@ -534,8 +422,8 @@ static int tls_do_encryption(struct sock *sk,
|
|||
rc = crypto_aead_encrypt(aead_req);
|
||||
if (!rc || rc != -EINPROGRESS) {
|
||||
atomic_dec(&ctx->encrypt_pending);
|
||||
rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size;
|
||||
rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size;
|
||||
sge->offset -= tls_ctx->tx.prepend_size;
|
||||
sge->length += tls_ctx->tx.prepend_size;
|
||||
}
|
||||
|
||||
if (!rc) {
|
||||
|
@ -557,35 +445,50 @@ static int tls_push_record(struct sock *sk, int flags,
|
|||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct tls_rec *rec = ctx->open_rec;
|
||||
struct sk_msg *msg_pl, *msg_en;
|
||||
struct aead_request *req;
|
||||
int rc;
|
||||
u32 i;
|
||||
|
||||
if (!rec)
|
||||
return 0;
|
||||
|
||||
msg_pl = &rec->msg_plaintext;
|
||||
msg_en = &rec->msg_encrypted;
|
||||
|
||||
rec->tx_flags = flags;
|
||||
req = &rec->aead_req;
|
||||
|
||||
sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem);
|
||||
sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem);
|
||||
i = msg_pl->sg.end;
|
||||
sk_msg_iter_var_prev(i);
|
||||
sg_mark_end(sk_msg_elem(msg_pl, i));
|
||||
|
||||
tls_make_aad(rec->aad_space, rec->sg_plaintext_size,
|
||||
i = msg_pl->sg.start;
|
||||
sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
|
||||
&msg_en->sg.data[i] : &msg_pl->sg.data[i]);
|
||||
|
||||
i = msg_en->sg.end;
|
||||
sk_msg_iter_var_prev(i);
|
||||
sg_mark_end(sk_msg_elem(msg_en, i));
|
||||
|
||||
i = msg_en->sg.start;
|
||||
sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
|
||||
|
||||
tls_make_aad(rec->aad_space, msg_pl->sg.size,
|
||||
tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
|
||||
record_type);
|
||||
|
||||
tls_fill_prepend(tls_ctx,
|
||||
page_address(sg_page(&rec->sg_encrypted_data[1])) +
|
||||
rec->sg_encrypted_data[1].offset,
|
||||
rec->sg_plaintext_size, record_type);
|
||||
page_address(sg_page(&msg_en->sg.data[i])) +
|
||||
msg_en->sg.data[i].offset, msg_pl->sg.size,
|
||||
record_type);
|
||||
|
||||
tls_ctx->pending_open_record_frags = 0;
|
||||
|
||||
rc = tls_do_encryption(sk, tls_ctx, ctx, req, rec->sg_plaintext_size);
|
||||
if (rc == -EINPROGRESS)
|
||||
return -EINPROGRESS;
|
||||
tls_ctx->pending_open_record_frags = false;
|
||||
|
||||
rc = tls_do_encryption(sk, tls_ctx, ctx, req, msg_pl->sg.size, i);
|
||||
if (rc < 0) {
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
if (rc != -EINPROGRESS)
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -597,104 +500,11 @@ static int tls_sw_push_pending_record(struct sock *sk, int flags)
|
|||
return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
|
||||
}
|
||||
|
||||
static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
|
||||
int length, int *pages_used,
|
||||
unsigned int *size_used,
|
||||
struct scatterlist *to, int to_max_pages,
|
||||
bool charge)
|
||||
{
|
||||
struct page *pages[MAX_SKB_FRAGS];
|
||||
|
||||
size_t offset;
|
||||
ssize_t copied, use;
|
||||
int i = 0;
|
||||
unsigned int size = *size_used;
|
||||
int num_elem = *pages_used;
|
||||
int rc = 0;
|
||||
int maxpages;
|
||||
|
||||
while (length > 0) {
|
||||
i = 0;
|
||||
maxpages = to_max_pages - num_elem;
|
||||
if (maxpages == 0) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
copied = iov_iter_get_pages(from, pages,
|
||||
length,
|
||||
maxpages, &offset);
|
||||
if (copied <= 0) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iov_iter_advance(from, copied);
|
||||
|
||||
length -= copied;
|
||||
size += copied;
|
||||
while (copied) {
|
||||
use = min_t(int, copied, PAGE_SIZE - offset);
|
||||
|
||||
sg_set_page(&to[num_elem],
|
||||
pages[i], use, offset);
|
||||
sg_unmark_end(&to[num_elem]);
|
||||
if (charge)
|
||||
sk_mem_charge(sk, use);
|
||||
|
||||
offset = 0;
|
||||
copied -= use;
|
||||
|
||||
++i;
|
||||
++num_elem;
|
||||
}
|
||||
}
|
||||
|
||||
/* Mark the end in the last sg entry if newly added */
|
||||
if (num_elem > *pages_used)
|
||||
sg_mark_end(&to[num_elem - 1]);
|
||||
out:
|
||||
if (rc)
|
||||
iov_iter_revert(from, size - *size_used);
|
||||
*size_used = size;
|
||||
*pages_used = num_elem;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
|
||||
int bytes)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct tls_rec *rec = ctx->open_rec;
|
||||
struct scatterlist *sg = &rec->sg_plaintext_data[1];
|
||||
int copy, i, rc = 0;
|
||||
|
||||
for (i = tls_ctx->pending_open_record_frags;
|
||||
i < rec->sg_plaintext_num_elem; ++i) {
|
||||
copy = sg[i].length;
|
||||
if (copy_from_iter(
|
||||
page_address(sg_page(&sg[i])) + sg[i].offset,
|
||||
copy, from) != copy) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
bytes -= copy;
|
||||
|
||||
++tls_ctx->pending_open_record_frags;
|
||||
|
||||
if (!bytes)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct tls_rec *get_rec(struct sock *sk)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct sk_msg *msg_pl, *msg_en;
|
||||
struct tls_rec *rec;
|
||||
int mem_size;
|
||||
|
||||
|
@ -708,15 +518,21 @@ static struct tls_rec *get_rec(struct sock *sk)
|
|||
if (!rec)
|
||||
return NULL;
|
||||
|
||||
sg_init_table(&rec->sg_plaintext_data[0],
|
||||
ARRAY_SIZE(rec->sg_plaintext_data));
|
||||
sg_init_table(&rec->sg_encrypted_data[0],
|
||||
ARRAY_SIZE(rec->sg_encrypted_data));
|
||||
msg_pl = &rec->msg_plaintext;
|
||||
msg_en = &rec->msg_encrypted;
|
||||
|
||||
sg_set_buf(&rec->sg_plaintext_data[0], rec->aad_space,
|
||||
sk_msg_init(msg_pl);
|
||||
sk_msg_init(msg_en);
|
||||
|
||||
sg_init_table(rec->sg_aead_in, 2);
|
||||
sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
|
||||
sizeof(rec->aad_space));
|
||||
sg_set_buf(&rec->sg_encrypted_data[0], rec->aad_space,
|
||||
sg_unmark_end(&rec->sg_aead_in[1]);
|
||||
|
||||
sg_init_table(rec->sg_aead_out, 2);
|
||||
sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
|
||||
sizeof(rec->aad_space));
|
||||
sg_unmark_end(&rec->sg_aead_out[1]);
|
||||
|
||||
ctx->open_rec = rec;
|
||||
rec->inplace_crypto = 1;
|
||||
|
@ -735,6 +551,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
bool is_kvec = msg->msg_iter.type & ITER_KVEC;
|
||||
bool eor = !(msg->msg_flags & MSG_MORE);
|
||||
size_t try_to_copy, copied = 0;
|
||||
struct sk_msg *msg_pl, *msg_en;
|
||||
struct tls_rec *rec;
|
||||
int required_size;
|
||||
int num_async = 0;
|
||||
|
@ -778,23 +595,26 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
goto send_end;
|
||||
}
|
||||
|
||||
orig_size = rec->sg_plaintext_size;
|
||||
msg_pl = &rec->msg_plaintext;
|
||||
msg_en = &rec->msg_encrypted;
|
||||
|
||||
orig_size = msg_pl->sg.size;
|
||||
full_record = false;
|
||||
try_to_copy = msg_data_left(msg);
|
||||
record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
|
||||
record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
|
||||
if (try_to_copy >= record_room) {
|
||||
try_to_copy = record_room;
|
||||
full_record = true;
|
||||
}
|
||||
|
||||
required_size = rec->sg_plaintext_size + try_to_copy +
|
||||
required_size = msg_pl->sg.size + try_to_copy +
|
||||
tls_ctx->tx.overhead_size;
|
||||
|
||||
if (!sk_stream_memory_free(sk))
|
||||
goto wait_for_sndbuf;
|
||||
|
||||
alloc_encrypted:
|
||||
ret = alloc_encrypted_sg(sk, required_size);
|
||||
ret = tls_alloc_encrypted_msg(sk, required_size);
|
||||
if (ret) {
|
||||
if (ret != -ENOSPC)
|
||||
goto wait_for_memory;
|
||||
|
@ -803,17 +623,13 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
* actually allocated. The difference is due
|
||||
* to max sg elements limit
|
||||
*/
|
||||
try_to_copy -= required_size - rec->sg_encrypted_size;
|
||||
try_to_copy -= required_size - msg_en->sg.size;
|
||||
full_record = true;
|
||||
}
|
||||
|
||||
if (!is_kvec && (full_record || eor) && !async_capable) {
|
||||
ret = zerocopy_from_iter(sk, &msg->msg_iter,
|
||||
try_to_copy, &rec->sg_plaintext_num_elem,
|
||||
&rec->sg_plaintext_size,
|
||||
&rec->sg_plaintext_data[1],
|
||||
ARRAY_SIZE(rec->sg_plaintext_data) - 1,
|
||||
true);
|
||||
ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
|
||||
msg_pl, try_to_copy);
|
||||
if (ret)
|
||||
goto fallback_to_reg_send;
|
||||
|
||||
|
@ -831,15 +647,12 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
continue;
|
||||
|
||||
fallback_to_reg_send:
|
||||
trim_sg(sk, &rec->sg_plaintext_data[1],
|
||||
&rec->sg_plaintext_num_elem,
|
||||
&rec->sg_plaintext_size,
|
||||
orig_size);
|
||||
sk_msg_trim(sk, msg_pl, orig_size);
|
||||
}
|
||||
|
||||
required_size = rec->sg_plaintext_size + try_to_copy;
|
||||
required_size = msg_pl->sg.size + try_to_copy;
|
||||
|
||||
ret = move_to_plaintext_sg(sk, required_size);
|
||||
ret = tls_clone_plaintext_msg(sk, required_size);
|
||||
if (ret) {
|
||||
if (ret != -ENOSPC)
|
||||
goto send_end;
|
||||
|
@ -848,20 +661,21 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
* actually allocated. The difference is due
|
||||
* to max sg elements limit
|
||||
*/
|
||||
try_to_copy -= required_size - rec->sg_plaintext_size;
|
||||
try_to_copy -= required_size - msg_pl->sg.size;
|
||||
full_record = true;
|
||||
|
||||
trim_sg(sk, &rec->sg_encrypted_data[1],
|
||||
&rec->sg_encrypted_num_elem,
|
||||
&rec->sg_encrypted_size,
|
||||
rec->sg_plaintext_size +
|
||||
tls_ctx->tx.overhead_size);
|
||||
sk_msg_trim(sk, msg_en, msg_pl->sg.size +
|
||||
tls_ctx->tx.overhead_size);
|
||||
}
|
||||
|
||||
ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
|
||||
if (ret)
|
||||
ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_pl,
|
||||
try_to_copy);
|
||||
if (ret < 0)
|
||||
goto trim_sgl;
|
||||
|
||||
/* Open records defined only if successfully copied, otherwise
|
||||
* we would trim the sg but not reset the open record frags.
|
||||
*/
|
||||
tls_ctx->pending_open_record_frags = true;
|
||||
copied += try_to_copy;
|
||||
if (full_record || eor) {
|
||||
ret = tls_push_record(sk, msg->msg_flags, record_type);
|
||||
|
@ -881,11 +695,11 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
ret = sk_stream_wait_memory(sk, &timeo);
|
||||
if (ret) {
|
||||
trim_sgl:
|
||||
trim_both_sgl(sk, orig_size);
|
||||
tls_trim_both_msgs(sk, orig_size);
|
||||
goto send_end;
|
||||
}
|
||||
|
||||
if (rec->sg_encrypted_size < required_size)
|
||||
if (msg_en->sg.size < required_size)
|
||||
goto alloc_encrypted;
|
||||
}
|
||||
|
||||
|
@ -929,7 +743,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
|
|||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
unsigned char record_type = TLS_RECORD_TYPE_DATA;
|
||||
size_t orig_size = size;
|
||||
struct scatterlist *sg;
|
||||
struct sk_msg *msg_pl;
|
||||
struct tls_rec *rec;
|
||||
int num_async = 0;
|
||||
bool full_record;
|
||||
|
@ -970,20 +784,23 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
|
|||
goto sendpage_end;
|
||||
}
|
||||
|
||||
msg_pl = &rec->msg_plaintext;
|
||||
|
||||
full_record = false;
|
||||
record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
|
||||
record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
|
||||
copy = size;
|
||||
if (copy >= record_room) {
|
||||
copy = record_room;
|
||||
full_record = true;
|
||||
}
|
||||
required_size = rec->sg_plaintext_size + copy +
|
||||
tls_ctx->tx.overhead_size;
|
||||
|
||||
required_size = msg_pl->sg.size + copy +
|
||||
tls_ctx->tx.overhead_size;
|
||||
|
||||
if (!sk_stream_memory_free(sk))
|
||||
goto wait_for_sndbuf;
|
||||
alloc_payload:
|
||||
ret = alloc_encrypted_sg(sk, required_size);
|
||||
ret = tls_alloc_encrypted_msg(sk, required_size);
|
||||
if (ret) {
|
||||
if (ret != -ENOSPC)
|
||||
goto wait_for_memory;
|
||||
|
@ -992,26 +809,18 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
|
|||
* actually allocated. The difference is due
|
||||
* to max sg elements limit
|
||||
*/
|
||||
copy -= required_size - rec->sg_plaintext_size;
|
||||
copy -= required_size - msg_pl->sg.size;
|
||||
full_record = true;
|
||||
}
|
||||
|
||||
get_page(page);
|
||||
sg = &rec->sg_plaintext_data[1] + rec->sg_plaintext_num_elem;
|
||||
sg_set_page(sg, page, copy, offset);
|
||||
sg_unmark_end(sg);
|
||||
|
||||
rec->sg_plaintext_num_elem++;
|
||||
|
||||
sk_msg_page_add(msg_pl, page, copy, offset);
|
||||
sk_mem_charge(sk, copy);
|
||||
|
||||
offset += copy;
|
||||
size -= copy;
|
||||
rec->sg_plaintext_size += copy;
|
||||
tls_ctx->pending_open_record_frags = rec->sg_plaintext_num_elem;
|
||||
|
||||
if (full_record || eor ||
|
||||
rec->sg_plaintext_num_elem ==
|
||||
ARRAY_SIZE(rec->sg_plaintext_data) - 1) {
|
||||
tls_ctx->pending_open_record_frags = true;
|
||||
if (full_record || eor || sk_msg_full(msg_pl)) {
|
||||
rec->inplace_crypto = 0;
|
||||
ret = tls_push_record(sk, flags, record_type);
|
||||
if (ret) {
|
||||
|
@ -1027,7 +836,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
|
|||
wait_for_memory:
|
||||
ret = sk_stream_wait_memory(sk, &timeo);
|
||||
if (ret) {
|
||||
trim_both_sgl(sk, rec->sg_plaintext_size);
|
||||
tls_trim_both_msgs(sk, msg_pl->sg.size);
|
||||
goto sendpage_end;
|
||||
}
|
||||
|
||||
|
@ -1092,6 +901,64 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
|
|||
return skb;
|
||||
}
|
||||
|
||||
static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
|
||||
int length, int *pages_used,
|
||||
unsigned int *size_used,
|
||||
struct scatterlist *to,
|
||||
int to_max_pages)
|
||||
{
|
||||
int rc = 0, i = 0, num_elem = *pages_used, maxpages;
|
||||
struct page *pages[MAX_SKB_FRAGS];
|
||||
unsigned int size = *size_used;
|
||||
ssize_t copied, use;
|
||||
size_t offset;
|
||||
|
||||
while (length > 0) {
|
||||
i = 0;
|
||||
maxpages = to_max_pages - num_elem;
|
||||
if (maxpages == 0) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
copied = iov_iter_get_pages(from, pages,
|
||||
length,
|
||||
maxpages, &offset);
|
||||
if (copied <= 0) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iov_iter_advance(from, copied);
|
||||
|
||||
length -= copied;
|
||||
size += copied;
|
||||
while (copied) {
|
||||
use = min_t(int, copied, PAGE_SIZE - offset);
|
||||
|
||||
sg_set_page(&to[num_elem],
|
||||
pages[i], use, offset);
|
||||
sg_unmark_end(&to[num_elem]);
|
||||
/* We do not uncharge memory from this API */
|
||||
|
||||
offset = 0;
|
||||
copied -= use;
|
||||
|
||||
i++;
|
||||
num_elem++;
|
||||
}
|
||||
}
|
||||
/* Mark the end in the last sg entry if newly added */
|
||||
if (num_elem > *pages_used)
|
||||
sg_mark_end(&to[num_elem - 1]);
|
||||
out:
|
||||
if (rc)
|
||||
iov_iter_revert(from, size - *size_used);
|
||||
*size_used = size;
|
||||
*pages_used = num_elem;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* This function decrypts the input skb into either out_iov or in out_sg
|
||||
* or in skb buffers itself. The input parameter 'zc' indicates if
|
||||
* zero-copy mode needs to be tried or not. With zero-copy mode, either
|
||||
|
@ -1189,9 +1056,9 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
|
|||
sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
|
||||
|
||||
*chunk = 0;
|
||||
err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
|
||||
chunk, &sgout[1],
|
||||
(n_sgout - 1), false);
|
||||
err = tls_setup_from_iter(sk, out_iov, data_len,
|
||||
&pages, chunk, &sgout[1],
|
||||
(n_sgout - 1));
|
||||
if (err < 0)
|
||||
goto fallback_to_reg_recv;
|
||||
} else if (out_sg) {
|
||||
|
@ -1619,25 +1486,15 @@ void tls_sw_free_resources_tx(struct sock *sk)
|
|||
|
||||
rec = list_first_entry(&ctx->tx_list,
|
||||
struct tls_rec, list);
|
||||
|
||||
free_sg(sk, &rec->sg_plaintext_data[1],
|
||||
&rec->sg_plaintext_num_elem,
|
||||
&rec->sg_plaintext_size);
|
||||
|
||||
list_del(&rec->list);
|
||||
sk_msg_free(sk, &rec->msg_plaintext);
|
||||
kfree(rec);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
|
||||
free_sg(sk, &rec->sg_encrypted_data[1],
|
||||
&rec->sg_encrypted_num_elem,
|
||||
&rec->sg_encrypted_size);
|
||||
|
||||
free_sg(sk, &rec->sg_plaintext_data[1],
|
||||
&rec->sg_plaintext_num_elem,
|
||||
&rec->sg_plaintext_size);
|
||||
|
||||
list_del(&rec->list);
|
||||
sk_msg_free(sk, &rec->msg_encrypted);
|
||||
sk_msg_free(sk, &rec->msg_plaintext);
|
||||
kfree(rec);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user