1use linux_uapi::sock_filter;
6use std::collections::HashMap;
7
8use crate::EbpfError;
9use crate::EbpfError::*;
10use crate::api::{
11 BPF_A, BPF_ABS, BPF_ADD, BPF_ALU, BPF_ALU64, BPF_AND, BPF_B, BPF_DIV, BPF_EXIT, BPF_H, BPF_IMM,
12 BPF_IND, BPF_JA, BPF_JEQ, BPF_JGE, BPF_JGT, BPF_JLE, BPF_JLT, BPF_JMP, BPF_JMP32, BPF_JNE,
13 BPF_JSET, BPF_K, BPF_LD, BPF_LDX, BPF_LEN, BPF_LSH, BPF_MEM, BPF_MISC, BPF_MOV, BPF_MSH,
14 BPF_MUL, BPF_NEG, BPF_OR, BPF_RET, BPF_RSH, BPF_ST, BPF_STX, BPF_SUB, BPF_TAX, BPF_TXA, BPF_W,
15 BPF_X, BPF_XOR, EbpfInstruction,
16};
17use crate::program::{
18 BpfProgramContext, EbpfProgram, ProgramArgument, StaticHelperSet, link_program,
19};
20use crate::verifier::{
21 CallingContext, NullVerifierLogger, Type, VerifiedEbpfProgram, verify_program,
22};
23
24const CBPF_WORD_SIZE: u32 = 4;
25
26const CBPF_SCRATCH_SIZE: u32 = 16;
28
29pub enum CbpfLenInstruction {
30 Static { len: i32 },
31 ContextField { offset: i16 },
32}
33
34pub struct CbpfConfig {
35 pub len: CbpfLenInstruction,
36 pub allow_msh: bool,
37}
38
39pub fn bpf_class(filter: &sock_filter) -> u8 {
48 (filter.code & 0x07) as u8
49}
50
51fn bpf_size(filter: &sock_filter) -> u8 {
55 (filter.code & 0x18) as u8
56}
57
58pub fn bpf_addressing_mode(filter: &sock_filter) -> u8 {
62 (filter.code & 0xe0) as u8
63}
64
65fn bpf_op(filter: &sock_filter) -> u8 {
69 (filter.code & 0xf0) as u8
70}
71
72fn bpf_src(filter: &sock_filter) -> u8 {
74 (filter.code & 0x08) as u8
75}
76
77fn bpf_rval(filter: &sock_filter) -> u8 {
79 (filter.code & 0x18) as u8
80}
81
82fn cbpf_scratch_offset(addr: u32) -> Result<i16, EbpfError> {
84 if addr < CBPF_SCRATCH_SIZE {
85 Ok((-(CBPF_SCRATCH_SIZE as i16) + addr as i16) * CBPF_WORD_SIZE as i16)
86 } else {
87 Err(EbpfError::InvalidCbpfScratchOffset(addr))
88 }
89}
90
91fn cbpf_to_ebpf(
97 bpf_code: &[sock_filter],
98 config: &CbpfConfig,
99) -> Result<Vec<EbpfInstruction>, EbpfError> {
100 const REG_A: u8 = 0;
106
107 const REG_ARG1: u8 = 1;
109
110 const REG_CONTEXT: u8 = 6;
114
115 const REG_TMP: u8 = 7;
117
118 const REG_X: u8 = 9;
120
121 const REG_STACK: u8 = 10;
123
124 let mut to_be_patched: HashMap<usize, Vec<usize>> = HashMap::new();
128
129 let mut ebpf_code: Vec<EbpfInstruction> = vec![];
130 ebpf_code.reserve(bpf_code.len() * 2 + 2);
131
132 ebpf_code.push(EbpfInstruction::new(BPF_ALU64 | BPF_MOV | BPF_X, REG_CONTEXT, REG_ARG1, 0, 0));
134
135 ebpf_code.push(EbpfInstruction::new(BPF_ALU | BPF_MOV | BPF_K, REG_A, 0, 0, 0));
137
138 for (i, bpf_instruction) in bpf_code.iter().enumerate() {
139 if let Some((_, entries)) = to_be_patched.remove_entry(&i) {
141 for index in entries {
142 let offset = (ebpf_code.len() - index - 1) as i16;
143 ebpf_code[index].set_offset(offset);
144 }
145 }
146
147 let mut prep_patch = |cbpf_offset: usize, ebpf_source: usize| -> Result<(), EbpfError> {
149 let cbpf_target = i + 1 + cbpf_offset;
150 if cbpf_target >= bpf_code.len() {
151 return Err(EbpfError::InvalidCbpfJumpOffset(cbpf_offset as u32));
152 }
153 to_be_patched.entry(cbpf_target).or_insert_with(Vec::new).push(ebpf_source);
154 Ok(())
155 };
156
157 match bpf_class(bpf_instruction) {
158 BPF_ALU => match bpf_op(bpf_instruction) {
159 BPF_ADD | BPF_SUB | BPF_MUL | BPF_DIV | BPF_AND | BPF_OR | BPF_XOR | BPF_LSH
160 | BPF_RSH => {
161 let e_instr = if bpf_src(bpf_instruction) == BPF_K {
162 EbpfInstruction::new(
163 bpf_instruction.code as u8,
164 REG_A,
165 0,
166 0,
167 bpf_instruction.k as i32,
168 )
169 } else {
170 EbpfInstruction::new(bpf_instruction.code as u8, REG_A, REG_X, 0, 0)
171 };
172 ebpf_code.push(e_instr);
173 }
174 BPF_NEG => {
175 ebpf_code.push(EbpfInstruction::new(BPF_ALU | BPF_NEG, REG_A, REG_A, 0, 0));
176 }
177 _ => return Err(InvalidCbpfInstruction(bpf_instruction.code)),
178 },
179 class @ (BPF_LD | BPF_LDX) => {
180 let dst_reg = if class == BPF_LDX { REG_X } else { REG_A };
181
182 let mode = bpf_addressing_mode(bpf_instruction);
183 let size = bpf_size(bpf_instruction);
184
185 match (size, mode, class) {
190 (BPF_H | BPF_B | BPF_W, BPF_ABS | BPF_IND, BPF_LD) => (),
191 (BPF_W, BPF_LEN | BPF_IMM | BPF_MEM, BPF_LD | BPF_LDX) => (),
192 (BPF_B, BPF_MSH, BPF_LDX) if config.allow_msh => (),
193 _ => return Err(InvalidCbpfInstruction(bpf_instruction.code)),
194 };
195
196 let k = bpf_instruction.k;
197
198 match mode {
199 BPF_ABS => {
200 ebpf_code.push(EbpfInstruction::new(
201 BPF_LD | BPF_ABS | size,
202 REG_A,
203 0,
204 0,
205 k as i32,
206 ));
207 }
208 BPF_IND => {
209 ebpf_code.push(EbpfInstruction::new(
210 BPF_LD | BPF_IND | size,
211 REG_A,
212 REG_X,
213 0,
214 k as i32,
215 ));
216 }
217 BPF_IMM => {
218 let imm = k as i32;
219 ebpf_code.push(EbpfInstruction::new(
220 BPF_ALU | BPF_MOV | BPF_K,
221 dst_reg,
222 0,
223 0,
224 imm,
225 ));
226 }
227 BPF_MEM => {
228 let offset = cbpf_scratch_offset(k)?;
230 ebpf_code.push(EbpfInstruction::new(
231 BPF_LDX | BPF_MEM,
232 dst_reg,
233 REG_STACK,
234 offset,
235 0,
236 ));
237 }
238 BPF_LEN => {
239 ebpf_code.push(match config.len {
240 CbpfLenInstruction::Static { len } => {
241 EbpfInstruction::new(BPF_ALU | BPF_MOV | BPF_K, REG_A, 0, 0, len)
242 }
243 CbpfLenInstruction::ContextField { offset } => EbpfInstruction::new(
244 BPF_LDX | BPF_MEM | BPF_W,
245 REG_A,
246 REG_CONTEXT,
247 offset,
248 0,
249 ),
250 });
251 }
252 BPF_MSH => {
253 ebpf_code.extend_from_slice(&[
255 EbpfInstruction::new(BPF_ALU | BPF_MOV | BPF_X, REG_TMP, REG_A, 0, 0),
257 EbpfInstruction::new(BPF_LD | BPF_ABS | BPF_B, REG_A, 0, 0, k as i32),
259 EbpfInstruction::new(BPF_ALU | BPF_AND | BPF_K, REG_A, 0, 0, 0x0f),
261 EbpfInstruction::new(BPF_ALU | BPF_MUL | BPF_K, REG_A, 0, 0, 4),
263 EbpfInstruction::new(BPF_ALU | BPF_MOV | BPF_X, REG_X, REG_A, 0, 0),
265 EbpfInstruction::new(BPF_ALU | BPF_MOV | BPF_X, REG_A, REG_TMP, 0, 0),
267 ]);
268 }
269 _ => return Err(InvalidCbpfInstruction(bpf_instruction.code)),
270 }
271 }
272 BPF_JMP => {
273 match bpf_op(bpf_instruction) {
274 BPF_JA => {
275 ebpf_code.push(EbpfInstruction::new(BPF_JMP | BPF_JA, 0, 0, -1, 0));
276 prep_patch(bpf_instruction.k as usize, ebpf_code.len() - 1)?;
277 }
278 op @ (BPF_JGT | BPF_JGE | BPF_JEQ | BPF_JSET) => {
279 let src = bpf_src(bpf_instruction);
286 let sock_filter { k, jt, jf, .. } = *bpf_instruction;
287 let (src_reg, imm) = if src == BPF_K { (0, k as i32) } else { (REG_X, 0) };
288
289 if jt == 0 && op != BPF_JSET {
294 let op = match op {
295 BPF_JGT => BPF_JLE,
296 BPF_JGE => BPF_JLT,
297 BPF_JEQ => BPF_JNE,
298 _ => panic!("Unexpected operation: {op:?}"),
299 };
300
301 ebpf_code.push(EbpfInstruction::new(
302 BPF_JMP32 | op | src,
303 REG_A,
304 src_reg,
305 -1,
306 imm,
307 ));
308 prep_patch(jf as usize, ebpf_code.len() - 1)?;
309 } else {
310 ebpf_code.push(EbpfInstruction::new(
312 BPF_JMP32 | op | src,
313 REG_A,
314 src_reg,
315 -1,
316 imm,
317 ));
318 prep_patch(jt as usize, ebpf_code.len() - 1)?;
319
320 if jf > 0 {
322 ebpf_code.push(EbpfInstruction::new(BPF_JMP | BPF_JA, 0, 0, -1, 0));
323 prep_patch(jf as usize, ebpf_code.len() - 1)?;
324 }
325 }
326 }
327 _ => return Err(InvalidCbpfInstruction(bpf_instruction.code)),
328 }
329 }
330 BPF_MISC => match bpf_op(bpf_instruction) {
331 BPF_TAX => {
332 ebpf_code.push(EbpfInstruction::new(
333 BPF_ALU | BPF_MOV | BPF_X,
334 REG_X,
335 REG_A,
336 0,
337 0,
338 ));
339 }
340 BPF_TXA => {
341 ebpf_code.push(EbpfInstruction::new(
342 BPF_ALU | BPF_MOV | BPF_X,
343 REG_A,
344 REG_X,
345 0,
346 0,
347 ));
348 }
349 _ => return Err(InvalidCbpfInstruction(bpf_instruction.code)),
350 },
351
352 class @ (BPF_ST | BPF_STX) => {
353 if bpf_addressing_mode(bpf_instruction) != 0 || bpf_size(bpf_instruction) != 0 {
354 return Err(InvalidCbpfInstruction(bpf_instruction.code));
355 }
356
357 let src_reg = if class == BPF_STX { REG_X } else { REG_A };
359 let offset = cbpf_scratch_offset(bpf_instruction.k)?;
360 ebpf_code.push(EbpfInstruction::new(
361 BPF_STX | BPF_MEM | BPF_W,
362 REG_STACK,
363 src_reg,
364 offset,
365 0,
366 ));
367 }
368 BPF_RET => {
369 match bpf_rval(bpf_instruction) {
370 BPF_K => {
371 let imm = bpf_instruction.k as i32;
374 ebpf_code.push(EbpfInstruction::new(
375 BPF_ALU | BPF_MOV | BPF_IMM,
376 REG_A,
377 0,
378 0,
379 imm,
380 ));
381 }
382 BPF_A => (),
383 _ => return Err(InvalidCbpfInstruction(bpf_instruction.code)),
384 };
385
386 ebpf_code.push(EbpfInstruction::new(BPF_JMP | BPF_EXIT, 0, 0, 0, 0));
387 }
388 _ => return Err(InvalidCbpfInstruction(bpf_instruction.code)),
389 }
390 }
391
392 assert!(to_be_patched.is_empty());
393
394 Ok(ebpf_code)
395}
396
397pub fn convert_and_verify_cbpf(
400 bpf_code: &[sock_filter],
401 packet_type: Type,
402 config: &CbpfConfig,
403) -> Result<VerifiedEbpfProgram, EbpfError> {
404 let context = CallingContext {
405 maps: vec![],
406 helpers: HashMap::new(),
407 args: vec![packet_type.clone()],
408 packet_type: Some(packet_type),
409 };
410 let ebpf_code = cbpf_to_ebpf(bpf_code, config)?;
411 verify_program(ebpf_code, context, &mut NullVerifierLogger)
412}
413
414pub fn convert_and_link_cbpf<C: BpfProgramContext + StaticHelperSet>(
416 bpf_code: &[sock_filter],
417) -> Result<EbpfProgram<C>, EbpfError> {
418 let verified = convert_and_verify_cbpf(
419 bpf_code,
420 <C as BpfProgramContext>::Packet::get_type().clone(),
421 C::CBPF_CONFIG,
422 )?;
423 link_program(&verified, vec![])
424}
425
426#[cfg(test)]
427mod tests {
428 use super::*;
429 use crate::{MemoryId, NoMap, empty_static_helper_set};
430 use linux_uapi::{
431 AUDIT_ARCH_AARCH64, AUDIT_ARCH_X86_64, SECCOMP_RET_ALLOW, SECCOMP_RET_TRAP, seccomp_data,
432 sock_filter,
433 };
434 use std::mem::offset_of;
435 use std::sync::LazyLock;
436 use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
437
438 pub const TEST_CBPF_CONFIG: CbpfConfig = CbpfConfig {
439 len: CbpfLenInstruction::Static { len: size_of::<seccomp_data>() as i32 },
440 allow_msh: true,
441 };
442
443 #[test]
444 fn test_cbpf_to_ebpf() {
445 assert_eq!(
447 cbpf_to_ebpf(
448 &vec![
449 sock_filter { code: (BPF_JMP | BPF_JA) as u16, jt: 0, jf: 0, k: 0 },
450 sock_filter { code: (BPF_RET | BPF_A) as u16, jt: 0, jf: 0, k: 0 },
451 ],
452 &TEST_CBPF_CONFIG
453 ),
454 Ok(vec![
455 EbpfInstruction::new(BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0),
456 EbpfInstruction::new(BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0),
457 EbpfInstruction::new(BPF_JMP | BPF_JA, 0, 0, 0, 0),
458 EbpfInstruction::new(BPF_JMP | BPF_EXIT, 0, 0, 0, 0),
459 ]),
460 );
461
462 assert_eq!(
464 cbpf_to_ebpf(
465 &vec![
466 sock_filter { code: (BPF_JMP | BPF_JA) as u16, jt: 0, jf: 0, k: 1 },
467 sock_filter { code: (BPF_RET | BPF_A) as u16, jt: 0, jf: 0, k: 0 },
468 ],
469 &TEST_CBPF_CONFIG
470 ),
471 Err(EbpfError::InvalidCbpfJumpOffset(1)),
472 );
473
474 assert_eq!(
476 cbpf_to_ebpf(
477 &vec![sock_filter { code: (BPF_JMP | BPF_JA) as u16, jt: 0, jf: 0, k: 0xffffffff }],
478 &TEST_CBPF_CONFIG
479 ),
480 Err(EbpfError::InvalidCbpfJumpOffset(0xffffffff)),
481 );
482
483 assert_eq!(
485 cbpf_to_ebpf(
486 &vec![
487 sock_filter { code: (BPF_JMP | BPF_JNE) as u16, jt: 0, jf: 0, k: 0 },
488 sock_filter { code: (BPF_RET | BPF_A) as u16, jt: 0, jf: 0, k: 0 },
489 ],
490 &TEST_CBPF_CONFIG
491 ),
492 Err(EbpfError::InvalidCbpfInstruction((BPF_JMP | BPF_JNE) as u16)),
493 );
494
495 assert_eq!(
497 cbpf_to_ebpf(
498 &vec![
499 sock_filter { code: (BPF_JMP | BPF_JEQ) as u16, jt: 1, jf: 0, k: 0 },
500 sock_filter { code: (BPF_RET | BPF_A) as u16, jt: 0, jf: 0, k: 0 },
501 sock_filter { code: (BPF_RET | BPF_A) as u16, jt: 0, jf: 0, k: 0 },
502 ],
503 &TEST_CBPF_CONFIG
504 ),
505 Ok(vec![
506 EbpfInstruction::new(BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0),
507 EbpfInstruction::new(BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0),
508 EbpfInstruction::new(BPF_JMP32 | BPF_JEQ, 0, 0, 1, 0),
509 EbpfInstruction::new(BPF_JMP | BPF_EXIT, 0, 0, 0, 0),
510 EbpfInstruction::new(BPF_JMP | BPF_EXIT, 0, 0, 0, 0),
511 ]),
512 );
513
514 assert_eq!(
516 cbpf_to_ebpf(
517 &vec![
518 sock_filter { code: (BPF_JMP | BPF_JA) as u16, jt: 0, jf: 0, k: 0 },
519 sock_filter { code: (BPF_RET | BPF_K) as u16, jt: 0, jf: 0, k: 1 },
520 ],
521 &TEST_CBPF_CONFIG
522 ),
523 Ok(vec![
524 EbpfInstruction::new(BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0),
525 EbpfInstruction::new(BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0),
526 EbpfInstruction::new(BPF_JMP | BPF_JA, 0, 0, 0, 0),
527 EbpfInstruction::new(BPF_ALU | BPF_MOV | BPF_IMM, 0, 0, 0, 1),
528 EbpfInstruction::new(BPF_JMP | BPF_EXIT, 0, 0, 0, 0),
529 ]),
530 );
531
532 assert_eq!(
534 cbpf_to_ebpf(
535 &vec![
536 sock_filter { code: (BPF_LD | BPF_MEM) as u16, jt: 0, jf: 0, k: 0 },
537 sock_filter { code: (BPF_LDX | BPF_MEM) as u16, jt: 0, jf: 0, k: 15 },
538 sock_filter { code: BPF_ST as u16, jt: 0, jf: 0, k: 0 },
539 sock_filter { code: BPF_STX as u16, jt: 0, jf: 0, k: 15 },
540 ],
541 &TEST_CBPF_CONFIG
542 ),
543 Ok(vec![
544 EbpfInstruction::new(BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0),
545 EbpfInstruction::new(BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0),
546 EbpfInstruction::new(BPF_LDX | BPF_MEM | BPF_W, 0, 10, -64, 0),
547 EbpfInstruction::new(BPF_LDX | BPF_MEM | BPF_W, 9, 10, -4, 0),
548 EbpfInstruction::new(BPF_STX | BPF_MEM | BPF_W, 10, 0, -64, 0),
549 EbpfInstruction::new(BPF_STX | BPF_MEM | BPF_W, 10, 9, -4, 0),
550 ]),
551 );
552
553 assert_eq!(
555 cbpf_to_ebpf(
556 &vec![sock_filter { code: (BPF_LD | BPF_MEM) as u16, jt: 0, jf: 0, k: 17 }],
557 &TEST_CBPF_CONFIG
558 ),
559 Err(EbpfError::InvalidCbpfScratchOffset(17)),
560 );
561 }
562
563 const BPF_ALU_ADD_K: u16 = (BPF_ALU | BPF_ADD | BPF_K) as u16;
564 const BPF_ALU_SUB_K: u16 = (BPF_ALU | BPF_SUB | BPF_K) as u16;
565 const BPF_ALU_MUL_K: u16 = (BPF_ALU | BPF_MUL | BPF_K) as u16;
566 const BPF_ALU_DIV_K: u16 = (BPF_ALU | BPF_DIV | BPF_K) as u16;
567 const BPF_ALU_AND_K: u16 = (BPF_ALU | BPF_AND | BPF_K) as u16;
568 const BPF_ALU_OR_K: u16 = (BPF_ALU | BPF_OR | BPF_K) as u16;
569 const BPF_ALU_XOR_K: u16 = (BPF_ALU | BPF_XOR | BPF_K) as u16;
570 const BPF_ALU_LSH_K: u16 = (BPF_ALU | BPF_LSH | BPF_K) as u16;
571 const BPF_ALU_RSH_K: u16 = (BPF_ALU | BPF_RSH | BPF_K) as u16;
572
573 const BPF_ALU_OR_X: u16 = (BPF_ALU | BPF_OR | BPF_X) as u16;
574
575 const BPF_LD_W_ABS: u16 = (BPF_LD | BPF_ABS | BPF_W) as u16;
576 const BPF_LD_W_MEM: u16 = (BPF_LD | BPF_MEM | BPF_W) as u16;
577 const BPF_JEQ_K: u16 = (BPF_JMP | BPF_JEQ | BPF_K) as u16;
578 const BPF_JSET_K: u16 = (BPF_JMP | BPF_JSET | BPF_K) as u16;
579 const BPF_RET_K: u16 = (BPF_RET | BPF_K) as u16;
580 const BPF_RET_A: u16 = (BPF_RET | BPF_A) as u16;
581 const BPF_ST_REG: u16 = BPF_ST as u16;
582 const BPF_MISC_TAX: u16 = (BPF_MISC | BPF_TAX) as u16;
583
584 struct TestProgramContext {}
585
586 impl BpfProgramContext for TestProgramContext {
587 type RunContext<'a> = ();
588 type Packet<'a> = &'a seccomp_data;
589 type Map = NoMap;
590 const CBPF_CONFIG: &'static CbpfConfig = &TEST_CBPF_CONFIG;
591 }
592
593 empty_static_helper_set!(TestProgramContext);
594
595 static SECCOMP_DATA_TYPE: LazyLock<Type> = LazyLock::new(|| Type::PtrToMemory {
596 id: MemoryId::new(),
597 offset: 0.into(),
598 buffer_size: 0,
599 });
600
601 impl ProgramArgument for &'_ seccomp_data {
602 fn get_type() -> &'static Type {
603 &*SECCOMP_DATA_TYPE
604 }
605 }
606
607 fn with_prg_assert_result(
608 prg: &EbpfProgram<TestProgramContext>,
609 mut data: seccomp_data,
610 result: u32,
611 msg: &str,
612 ) {
613 let return_value = prg.run(&mut (), &mut data);
614 assert_eq!(return_value, result as u64, "{}: filter return value is {}", msg, return_value);
615 }
616
617 #[test]
618 fn test_filter_with_dw_load() {
619 let test_prg = [
620 sock_filter { code: BPF_LD_W_ABS, jt: 0, jf: 0, k: 4 },
622 sock_filter { code: BPF_JEQ_K, jt: 1, jf: 0, k: AUDIT_ARCH_X86_64 },
623 sock_filter { code: BPF_RET_K, jt: 0, jf: 0, k: 1 },
625 sock_filter { code: BPF_LD_W_ABS, jt: 0, jf: 0, k: 0 },
627 sock_filter { code: BPF_JEQ_K, jt: 0, jf: 1, k: 41 },
629 sock_filter { code: BPF_RET_K, jt: 0, jf: 0, k: SECCOMP_RET_ALLOW },
630 sock_filter { code: BPF_JEQ_K, jt: 0, jf: 1, k: 115 },
632 sock_filter { code: BPF_RET_K, jt: 0, jf: 0, k: SECCOMP_RET_TRAP },
633 sock_filter { code: BPF_LD_W_ABS, jt: 0, jf: 0, k: 16 },
638 sock_filter { code: BPF_ST_REG, jt: 0, jf: 0, k: 0 },
639 sock_filter { code: BPF_LD_W_ABS, jt: 0, jf: 0, k: 20 },
641 sock_filter { code: BPF_ST_REG, jt: 0, jf: 0, k: 1 },
642 sock_filter { code: BPF_LD_W_MEM, jt: 0, jf: 0, k: 0 },
645 sock_filter { code: BPF_JSET_K, jt: 2, jf: 0, k: 4294967295 },
646 sock_filter { code: BPF_LD_W_MEM, jt: 0, jf: 0, k: 1 },
647 sock_filter { code: BPF_JSET_K, jt: 0, jf: 1, k: 4294967292 },
648 sock_filter { code: BPF_RET_K, jt: 0, jf: 0, k: SECCOMP_RET_TRAP },
649 sock_filter { code: BPF_RET_K, jt: 0, jf: 0, k: SECCOMP_RET_ALLOW },
650 ];
651
652 let prg =
653 convert_and_link_cbpf::<TestProgramContext>(&test_prg).expect("Error parsing program");
654
655 with_prg_assert_result(
656 &prg,
657 seccomp_data { arch: AUDIT_ARCH_AARCH64, ..Default::default() },
658 1,
659 "Did not reject incorrect arch",
660 );
661
662 with_prg_assert_result(
663 &prg,
664 seccomp_data { arch: AUDIT_ARCH_X86_64, nr: 41, ..Default::default() },
665 SECCOMP_RET_ALLOW,
666 "Did not pass simple RET_ALLOW",
667 );
668
669 with_prg_assert_result(
670 &prg,
671 seccomp_data {
672 arch: AUDIT_ARCH_X86_64,
673 nr: 100,
674 args: [0xFF00000000, 0, 0, 0, 0, 0],
675 ..Default::default()
676 },
677 SECCOMP_RET_TRAP,
678 "Did not treat load of first 32 bits correctly",
679 );
680
681 with_prg_assert_result(
682 &prg,
683 seccomp_data {
684 arch: AUDIT_ARCH_X86_64,
685 nr: 100,
686 args: [0x4, 0, 0, 0, 0, 0],
687 ..Default::default()
688 },
689 SECCOMP_RET_TRAP,
690 "Did not correctly reject load of second 32 bits",
691 );
692
693 with_prg_assert_result(
694 &prg,
695 seccomp_data {
696 arch: AUDIT_ARCH_X86_64,
697 nr: 100,
698 args: [0x0, 0, 0, 0, 0, 0],
699 ..Default::default()
700 },
701 SECCOMP_RET_ALLOW,
702 "Did not correctly accept load of second 32 bits",
703 );
704 }
705
706 #[test]
707 fn test_alu_insns() {
708 {
709 let test_prg = [
710 sock_filter { code: BPF_LD_W_ABS, jt: 0, jf: 0, k: 0 }, sock_filter { code: BPF_ALU_ADD_K, jt: 0, jf: 0, k: 3 }, sock_filter { code: BPF_ALU_SUB_K, jt: 0, jf: 0, k: 2 }, sock_filter { code: BPF_MISC_TAX, jt: 0, jf: 0, k: 0 }, sock_filter { code: BPF_ALU_MUL_K, jt: 0, jf: 0, k: 8 }, sock_filter { code: BPF_ALU_DIV_K, jt: 0, jf: 0, k: 2 }, sock_filter { code: BPF_ALU_AND_K, jt: 0, jf: 0, k: 15 }, sock_filter { code: BPF_ALU_OR_K, jt: 0, jf: 0, k: 16 }, sock_filter { code: BPF_ALU_XOR_K, jt: 0, jf: 0, k: 7 }, sock_filter { code: BPF_ALU_LSH_K, jt: 0, jf: 0, k: 2 }, sock_filter { code: BPF_ALU_OR_X, jt: 0, jf: 0, k: 1 }, sock_filter { code: BPF_ALU_RSH_K, jt: 0, jf: 0, k: 1 }, sock_filter { code: BPF_RET_A, jt: 0, jf: 0, k: 0 },
725 ];
726
727 let prg = convert_and_link_cbpf::<TestProgramContext>(&test_prg)
728 .expect("Error parsing program");
729
730 with_prg_assert_result(
731 &prg,
732 seccomp_data { nr: 1, ..Default::default() },
733 63,
734 "BPF math does not work",
735 );
736
737 with_prg_assert_result(
738 &prg,
739 seccomp_data { nr: 11, ..Default::default() },
740 46,
741 "BPF math does not work",
742 );
743 }
744
745 {
746 let test_prg = [
748 sock_filter { code: BPF_LD_W_ABS, jt: 0, jf: 0, k: 0 }, sock_filter { code: BPF_ALU_SUB_K, jt: 0, jf: 0, k: 2 }, sock_filter { code: BPF_RET_A, jt: 0, jf: 0, k: 0 },
752 ];
753
754 let prg = convert_and_link_cbpf::<TestProgramContext>(&test_prg)
755 .expect("Error parsing program");
756
757 with_prg_assert_result(
758 &prg,
759 seccomp_data { nr: -1, ..Default::default() },
760 u32::MAX - 2,
761 "BPF math does not work",
762 );
763 }
764 }
765
766 #[test]
768 fn test_ld_msh() {
769 let test_prg = [
770 sock_filter { code: (BPF_LDX | BPF_MSH | BPF_B) as u16, jt: 0, jf: 0, k: 0 },
772 sock_filter { code: (BPF_MISC | BPF_TXA) as u16, jt: 0, jf: 0, k: 0 },
774 sock_filter { code: BPF_RET_A, jt: 0, jf: 0, k: 0 },
776 ];
777
778 let prg =
779 convert_and_link_cbpf::<TestProgramContext>(&test_prg).expect("Error parsing program");
780
781 for i in [0x00, 0x01, 0x07, 0x15, 0xff].iter() {
782 with_prg_assert_result(
783 &prg,
784 seccomp_data { nr: *i, ..Default::default() },
785 4 * (*i & 0xf) as u32,
786 "BPF math does not work",
787 )
788 }
789 }
790
791 #[test]
792 fn test_static_packet_len() {
793 let test_prg = [
794 sock_filter { code: (BPF_LD | BPF_LEN | BPF_W) as u16, jt: 0, jf: 0, k: 0 },
796 sock_filter { code: BPF_RET_A, jt: 0, jf: 0, k: 0 },
798 ];
799
800 let prg =
801 convert_and_link_cbpf::<TestProgramContext>(&test_prg).expect("Error parsing program");
802
803 let data = seccomp_data::default();
804 assert_eq!(prg.run(&mut (), &data), size_of::<seccomp_data>() as u64);
805 }
806
807 #[repr(C)]
810 #[derive(Debug, Default, IntoBytes, Immutable, KnownLayout, FromBytes)]
811 struct VariableLengthPacket {
812 foo: u32,
813 len: i32,
814 bar: u64,
815 }
816
817 static VARIABLE_LENGTH_PACKET_TYPE: LazyLock<Type> = LazyLock::new(|| Type::PtrToMemory {
818 id: MemoryId::new(),
819 offset: 0.into(),
820 buffer_size: size_of::<VariableLengthPacket>() as u64,
821 });
822
823 impl ProgramArgument for &'_ VariableLengthPacket {
824 fn get_type() -> &'static Type {
825 &*VARIABLE_LENGTH_PACKET_TYPE
826 }
827 }
828
829 pub const VARIABLE_LENGTH_CBPF_CONFIG: CbpfConfig = CbpfConfig {
830 len: CbpfLenInstruction::ContextField {
831 offset: offset_of!(VariableLengthPacket, len) as i16,
832 },
833 allow_msh: true,
834 };
835
836 struct VariableLengthPacketContext {}
837
838 impl BpfProgramContext for VariableLengthPacketContext {
839 type RunContext<'a> = ();
840 type Packet<'a> = &'a VariableLengthPacket;
841 type Map = NoMap;
842 const CBPF_CONFIG: &'static CbpfConfig = &VARIABLE_LENGTH_CBPF_CONFIG;
843 }
844
845 empty_static_helper_set!(VariableLengthPacketContext);
846
847 #[test]
848 fn test_variable_packet_len() {
849 let test_prg = [
850 sock_filter { code: (BPF_LD | BPF_LEN | BPF_W) as u16, jt: 0, jf: 0, k: 0 },
852 sock_filter { code: BPF_RET_A, jt: 0, jf: 0, k: 0 },
854 ];
855
856 let prg = convert_and_link_cbpf::<VariableLengthPacketContext>(&test_prg)
857 .expect("Error parsing program");
858 let data = VariableLengthPacket { len: 42, ..VariableLengthPacket::default() };
859 assert_eq!(prg.run(&mut (), &data), data.len as u64);
860 }
861}