forked from luck/tmp_suning_uos_patched
net: filter: x86: internal BPF JIT
Maps all internal BPF instructions into x86_64 instructions. This patch replaces original BPF x64 JIT with internal BPF x64 JIT. sysctl net.core.bpf_jit_enable is reused as on/off switch. Performance: 1. old BPF JIT and internal BPF JIT generate equivalent x86_64 code. No performance difference is observed for filters that were JIT-able before Example assembler code for BPF filter "tcpdump port 22" original BPF -> old JIT: original BPF -> internal BPF -> new JIT: 0: push %rbp 0: push %rbp 1: mov %rsp,%rbp 1: mov %rsp,%rbp 4: sub $0x60,%rsp 4: sub $0x228,%rsp 8: mov %rbx,-0x8(%rbp) b: mov %rbx,-0x228(%rbp) // prologue 12: mov %r13,-0x220(%rbp) 19: mov %r14,-0x218(%rbp) 20: mov %r15,-0x210(%rbp) 27: xor %eax,%eax // clear A c: xor %ebx,%ebx 29: xor %r13,%r13 // clear X e: mov 0x68(%rdi),%r9d 2c: mov 0x68(%rdi),%r9d 12: sub 0x6c(%rdi),%r9d 30: sub 0x6c(%rdi),%r9d 16: mov 0xd8(%rdi),%r8 34: mov 0xd8(%rdi),%r10 3b: mov %rdi,%rbx 1d: mov $0xc,%esi 3e: mov $0xc,%esi 22: callq 0xffffffffe1021e15 43: callq 0xffffffffe102bd75 27: cmp $0x86dd,%eax 48: cmp $0x86dd,%rax 2c: jne 0x0000000000000069 4f: jne 0x000000000000009a 2e: mov $0x14,%esi 51: mov $0x14,%esi 33: callq 0xffffffffe1021e31 56: callq 0xffffffffe102bd91 38: cmp $0x84,%eax 5b: cmp $0x84,%rax 3d: je 0x0000000000000049 62: je 0x0000000000000074 3f: cmp $0x6,%eax 64: cmp $0x6,%rax 42: je 0x0000000000000049 68: je 0x0000000000000074 44: cmp $0x11,%eax 6a: cmp $0x11,%rax 47: jne 0x00000000000000c6 6e: jne 0x0000000000000117 49: mov $0x36,%esi 74: mov $0x36,%esi 4e: callq 0xffffffffe1021e15 79: callq 0xffffffffe102bd75 53: cmp $0x16,%eax 7e: cmp $0x16,%rax 56: je 0x00000000000000bf 82: je 0x0000000000000110 58: mov $0x38,%esi 88: mov $0x38,%esi 5d: callq 0xffffffffe1021e15 8d: callq 0xffffffffe102bd75 62: cmp $0x16,%eax 92: cmp $0x16,%rax 65: je 0x00000000000000bf 96: je 0x0000000000000110 67: jmp 0x00000000000000c6 98: jmp 0x0000000000000117 69: cmp $0x800,%eax 9a: cmp $0x800,%rax 6e: jne 0x00000000000000c6 a1: jne 0x0000000000000117 70: mov $0x17,%esi a3: mov $0x17,%esi 75: callq 0xffffffffe1021e31 a8: callq 0xffffffffe102bd91 7a: cmp $0x84,%eax ad: cmp $0x84,%rax 7f: je 0x000000000000008b b4: je 0x00000000000000c2 81: cmp $0x6,%eax b6: cmp $0x6,%rax 84: je 0x000000000000008b ba: je 0x00000000000000c2 86: cmp $0x11,%eax bc: cmp $0x11,%rax 89: jne 0x00000000000000c6 c0: jne 0x0000000000000117 8b: mov $0x14,%esi c2: mov $0x14,%esi 90: callq 0xffffffffe1021e15 c7: callq 0xffffffffe102bd75 95: test $0x1fff,%ax cc: test $0x1fff,%rax 99: jne 0x00000000000000c6 d3: jne 0x0000000000000117 d5: mov %rax,%r14 9b: mov $0xe,%esi d8: mov $0xe,%esi a0: callq 0xffffffffe1021e44 dd: callq 0xffffffffe102bd91 // MSH e2: and $0xf,%eax e5: shl $0x2,%eax e8: mov %rax,%r13 eb: mov %r14,%rax ee: mov %r13,%rsi a5: lea 0xe(%rbx),%esi f1: add $0xe,%esi a8: callq 0xffffffffe1021e0d f4: callq 0xffffffffe102bd6d ad: cmp $0x16,%eax f9: cmp $0x16,%rax b0: je 0x00000000000000bf fd: je 0x0000000000000110 ff: mov %r13,%rsi b2: lea 0x10(%rbx),%esi 102: add $0x10,%esi b5: callq 0xffffffffe1021e0d 105: callq 0xffffffffe102bd6d ba: cmp $0x16,%eax 10a: cmp $0x16,%rax bd: jne 0x00000000000000c6 10e: jne 0x0000000000000117 bf: mov $0xffff,%eax 110: mov $0xffff,%eax c4: jmp 0x00000000000000c8 115: jmp 0x000000000000011c c6: xor %eax,%eax 117: mov $0x0,%eax c8: mov -0x8(%rbp),%rbx 11c: mov -0x228(%rbp),%rbx // epilogue cc: leaveq 123: mov -0x220(%rbp),%r13 cd: retq 12a: mov -0x218(%rbp),%r14 131: mov -0x210(%rbp),%r15 138: leaveq 139: retq On fully cached SKBs both JITed functions take 12 nsec to execute. BPF interpreter executes the program in 30 nsec. The difference in generated assembler is due to the following: Old BPF imlements LDX_MSH instruction via sk_load_byte_msh() helper function inside bpf_jit.S. New JIT removes the helper and does it explicitly, so ldx_msh cost is the same for both JITs, but generated code looks longer. New JIT has 4 registers to save, so prologue/epilogue are larger, but the cost is within noise on x64. Old JIT checks whether first insn clears A and if not emits 'xor %eax,%eax'. New JIT clears %rax unconditionally. 2. old BPF JIT doesn't support ANC_NLATTR, ANC_PAY_OFFSET, ANC_RANDOM extensions. New JIT supports all BPF extensions. Performance of such filters improves 2-4 times depending on a filter. The longer the filter the higher performance gain. Synthetic benchmarks with many ancillary loads see 20x speedup which seems to be the maximum gain from JIT Notes: . net.core.bpf_jit_enable=2 + tools/net/bpf_jit_disasm is still functional and can be used to see generated assembler . there are two jit_compile() functions and code flow for classic filters is: sk_attach_filter() - load classic BPF bpf_jit_compile() - try to JIT from classic BPF sk_convert_filter() - convert classic to internal bpf_int_jit_compile() - JIT from internal BPF seccomp and tracing filters will just call bpf_int_jit_compile() Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
f3c2af7ba1
commit
622582786c
|
@ -12,13 +12,16 @@
|
|||
|
||||
/*
|
||||
* Calling convention :
|
||||
* rdi : skb pointer
|
||||
* rbx : skb pointer (callee saved)
|
||||
* esi : offset of byte(s) to fetch in skb (can be scratched)
|
||||
* r8 : copy of skb->data
|
||||
* r10 : copy of skb->data
|
||||
* r9d : hlen = skb->len - skb->data_len
|
||||
*/
|
||||
#define SKBDATA %r8
|
||||
#define SKBDATA %r10
|
||||
#define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
|
||||
#define MAX_BPF_STACK (512 /* from filter.h */ + \
|
||||
32 /* space for rbx,r13,r14,r15 */ + \
|
||||
8 /* space for skb_copy_bits */)
|
||||
|
||||
sk_load_word:
|
||||
.globl sk_load_word
|
||||
|
@ -68,53 +71,31 @@ sk_load_byte_positive_offset:
|
|||
movzbl (SKBDATA,%rsi),%eax
|
||||
ret
|
||||
|
||||
/**
|
||||
* sk_load_byte_msh - BPF_S_LDX_B_MSH helper
|
||||
*
|
||||
* Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf)
|
||||
* Must preserve A accumulator (%eax)
|
||||
* Inputs : %esi is the offset value
|
||||
*/
|
||||
sk_load_byte_msh:
|
||||
.globl sk_load_byte_msh
|
||||
test %esi,%esi
|
||||
js bpf_slow_path_byte_msh_neg
|
||||
|
||||
sk_load_byte_msh_positive_offset:
|
||||
.globl sk_load_byte_msh_positive_offset
|
||||
cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
|
||||
jle bpf_slow_path_byte_msh
|
||||
movzbl (SKBDATA,%rsi),%ebx
|
||||
and $15,%bl
|
||||
shl $2,%bl
|
||||
ret
|
||||
|
||||
/* rsi contains offset and can be scratched */
|
||||
#define bpf_slow_path_common(LEN) \
|
||||
push %rdi; /* save skb */ \
|
||||
mov %rbx, %rdi; /* arg1 == skb */ \
|
||||
push %r9; \
|
||||
push SKBDATA; \
|
||||
/* rsi already has offset */ \
|
||||
mov $LEN,%ecx; /* len */ \
|
||||
lea -12(%rbp),%rdx; \
|
||||
lea - MAX_BPF_STACK + 32(%rbp),%rdx; \
|
||||
call skb_copy_bits; \
|
||||
test %eax,%eax; \
|
||||
pop SKBDATA; \
|
||||
pop %r9; \
|
||||
pop %rdi
|
||||
pop %r9;
|
||||
|
||||
|
||||
bpf_slow_path_word:
|
||||
bpf_slow_path_common(4)
|
||||
js bpf_error
|
||||
mov -12(%rbp),%eax
|
||||
mov - MAX_BPF_STACK + 32(%rbp),%eax
|
||||
bswap %eax
|
||||
ret
|
||||
|
||||
bpf_slow_path_half:
|
||||
bpf_slow_path_common(2)
|
||||
js bpf_error
|
||||
mov -12(%rbp),%ax
|
||||
mov - MAX_BPF_STACK + 32(%rbp),%ax
|
||||
rol $8,%ax
|
||||
movzwl %ax,%eax
|
||||
ret
|
||||
|
@ -122,21 +103,11 @@ bpf_slow_path_half:
|
|||
bpf_slow_path_byte:
|
||||
bpf_slow_path_common(1)
|
||||
js bpf_error
|
||||
movzbl -12(%rbp),%eax
|
||||
ret
|
||||
|
||||
bpf_slow_path_byte_msh:
|
||||
xchg %eax,%ebx /* dont lose A , X is about to be scratched */
|
||||
bpf_slow_path_common(1)
|
||||
js bpf_error
|
||||
movzbl -12(%rbp),%eax
|
||||
and $15,%al
|
||||
shl $2,%al
|
||||
xchg %eax,%ebx
|
||||
movzbl - MAX_BPF_STACK + 32(%rbp),%eax
|
||||
ret
|
||||
|
||||
#define sk_negative_common(SIZE) \
|
||||
push %rdi; /* save skb */ \
|
||||
mov %rbx, %rdi; /* arg1 == skb */ \
|
||||
push %r9; \
|
||||
push SKBDATA; \
|
||||
/* rsi already has offset */ \
|
||||
|
@ -145,10 +116,8 @@ bpf_slow_path_byte_msh:
|
|||
test %rax,%rax; \
|
||||
pop SKBDATA; \
|
||||
pop %r9; \
|
||||
pop %rdi; \
|
||||
jz bpf_error
|
||||
|
||||
|
||||
bpf_slow_path_word_neg:
|
||||
cmp SKF_MAX_NEG_OFF, %esi /* test range */
|
||||
jl bpf_error /* offset lower -> error */
|
||||
|
@ -179,22 +148,12 @@ sk_load_byte_negative_offset:
|
|||
movzbl (%rax), %eax
|
||||
ret
|
||||
|
||||
bpf_slow_path_byte_msh_neg:
|
||||
cmp SKF_MAX_NEG_OFF, %esi
|
||||
jl bpf_error
|
||||
sk_load_byte_msh_negative_offset:
|
||||
.globl sk_load_byte_msh_negative_offset
|
||||
xchg %eax,%ebx /* dont lose A , X is about to be scratched */
|
||||
sk_negative_common(1)
|
||||
movzbl (%rax),%eax
|
||||
and $15,%al
|
||||
shl $2,%al
|
||||
xchg %eax,%ebx
|
||||
ret
|
||||
|
||||
bpf_error:
|
||||
# force a return 0 from jit handler
|
||||
xor %eax,%eax
|
||||
mov -8(%rbp),%rbx
|
||||
xor %eax,%eax
|
||||
mov - MAX_BPF_STACK(%rbp),%rbx
|
||||
mov - MAX_BPF_STACK + 8(%rbp),%r13
|
||||
mov - MAX_BPF_STACK + 16(%rbp),%r14
|
||||
mov - MAX_BPF_STACK + 24(%rbp),%r15
|
||||
leaveq
|
||||
ret
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -207,6 +207,9 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
|
|||
void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
|
||||
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
|
||||
|
||||
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
void bpf_int_jit_compile(struct sk_filter *fp);
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
#include <stdarg.h>
|
||||
#include <linux/linkage.h>
|
||||
|
|
|
@ -1524,6 +1524,10 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void __weak bpf_int_jit_compile(struct sk_filter *prog)
|
||||
{
|
||||
}
|
||||
|
||||
static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
|
||||
struct sock *sk)
|
||||
{
|
||||
|
@ -1544,9 +1548,12 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
|
|||
/* JIT compiler couldn't process this filter, so do the
|
||||
* internal BPF translation for the optimized interpreter.
|
||||
*/
|
||||
if (!fp->jited)
|
||||
if (!fp->jited) {
|
||||
fp = __sk_migrate_filter(fp, sk);
|
||||
|
||||
/* Probe if internal BPF can be jit-ed */
|
||||
bpf_int_jit_compile(fp);
|
||||
}
|
||||
return fp;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user