rust/nfs: handle GAPs

In normal records it will try to continue parsing.

GAP 'data' will be passed to file api as '0's. New call is used
so that the file API does know it is dealing with a GAP. Such
files are flagged as truncated at the end of the file and no
checksums are calculated.
pull/2787/head
Victor Julien 9 years ago
parent a116c16019
commit 58af39131f

@ -80,6 +80,10 @@ pub type SCFileAppendDataById = extern "C" fn (
file_container: &FileContainer, file_container: &FileContainer,
track_id: u32, track_id: u32,
data: *const u8, data_len: u32) -> i32; data: *const u8, data_len: u32) -> i32;
pub type SCFileAppendGAPById = extern "C" fn (
file_container: &FileContainer,
track_id: u32,
data: *const u8, data_len: u32) -> i32;
// void FilePrune(FileContainer *ffc) // void FilePrune(FileContainer *ffc)
pub type SCFilePrune = extern "C" fn ( pub type SCFilePrune = extern "C" fn (
file_container: &FileContainer); file_container: &FileContainer);
@ -109,6 +113,7 @@ pub struct SuricataContext {
pub FileOpenFile: SCFileOpenFileWithId, pub FileOpenFile: SCFileOpenFileWithId,
pub FileCloseFile: SCFileCloseFileById, pub FileCloseFile: SCFileCloseFileById,
pub FileAppendData: SCFileAppendDataById, pub FileAppendData: SCFileAppendDataById,
pub FileAppendGAP: SCFileAppendGAPById,
pub FileContainerRecycle: SCFileContainerRecycle, pub FileContainerRecycle: SCFileContainerRecycle,
pub FilePrune: SCFilePrune, pub FilePrune: SCFilePrune,
pub FileSetTx: SCFileSetTx, pub FileSetTx: SCFileSetTx,

@ -63,7 +63,7 @@ impl FileContainer {
} }
} }
pub fn file_append(&mut self, track_id: &u32, data: &[u8]) -> i32 { pub fn file_append(&mut self, track_id: &u32, data: &[u8], is_gap: bool) -> i32 {
SCLogDebug!("FILECONTAINER: append {}", data.len()); SCLogDebug!("FILECONTAINER: append {}", data.len());
if data.len() == 0 { if data.len() == 0 {
return 0 return 0
@ -71,8 +71,20 @@ impl FileContainer {
match unsafe {SC} { match unsafe {SC} {
None => panic!("BUG no suricata_config"), None => panic!("BUG no suricata_config"),
Some(c) => { Some(c) => {
let res = (c.FileAppendData)(&self, *track_id, let res = match is_gap {
data.as_ptr(), data.len() as u32); false => {
SCLogDebug!("appending file data");
let r = (c.FileAppendData)(&self, *track_id,
data.as_ptr(), data.len() as u32);
r
},
true => {
SCLogDebug!("appending GAP");
let r = (c.FileAppendGAP)(&self, *track_id,
data.as_ptr(), data.len() as u32);
r
},
};
if res != 0 { if res != 0 {
panic!("c.fn_fileappenddata failed"); panic!("c.fn_fileappenddata failed");
} }

@ -15,7 +15,19 @@
* 02110-1301, USA. * 02110-1301, USA.
*/ */
// written by Victor Julien /**
* \file
* \author Victor Julien <victor@inliniac.net>
*
* Tracks chunk based file transfers. Chunks may be transfered out
* of order, but cannot be transfered in parallel. So only one
* chunk at a time.
*
* GAP handling. If a data gap is encountered, the file is truncated
* and new data is no longer pushed down to the lower level APIs.
* The tracker does continue to follow the file.
*/
extern crate libc; extern crate libc;
use log::*; use log::*;
use core::*; use core::*;
@ -23,10 +35,26 @@ use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::collections::hash_map::Entry::{Occupied, Vacant};
use filecontainer::*; use filecontainer::*;
#[derive(Debug)]
pub struct FileChunk {
contains_gap: bool,
chunk: Vec<u8>,
}
impl FileChunk {
pub fn new(size: u32) -> FileChunk {
FileChunk {
contains_gap: false,
chunk: Vec::with_capacity(size as usize),
}
}
}
#[derive(Debug)] #[derive(Debug)]
pub struct FileTransferTracker { pub struct FileTransferTracker {
file_size: u64, file_size: u64,
tracked: u64, tracked: u64,
cur_ooo: u64, // how many bytes do we have queued from ooo chunks
track_id: u32, track_id: u32,
chunk_left: u32, chunk_left: u32,
@ -36,8 +64,9 @@ pub struct FileTransferTracker {
pub file_open: bool, pub file_open: bool,
chunk_is_last: bool, chunk_is_last: bool,
chunk_is_ooo: bool, chunk_is_ooo: bool,
file_is_truncated: bool,
chunks: HashMap<u64, Vec<u8>>, chunks: HashMap<u64, FileChunk>,
cur_ooo_chunk_offset: u64, cur_ooo_chunk_offset: u64,
} }
@ -46,6 +75,7 @@ impl FileTransferTracker {
FileTransferTracker { FileTransferTracker {
file_size:0, file_size:0,
tracked:0, tracked:0,
cur_ooo:0,
track_id:0, track_id:0,
chunk_left:0, chunk_left:0,
tx_id:0, tx_id:0,
@ -53,6 +83,7 @@ impl FileTransferTracker {
file_open:false, file_open:false,
chunk_is_last:false, chunk_is_last:false,
chunk_is_ooo:false, chunk_is_ooo:false,
file_is_truncated:false,
cur_ooo_chunk_offset:0, cur_ooo_chunk_offset:0,
chunks:HashMap::new(), chunks:HashMap::new(),
} }
@ -70,12 +101,25 @@ impl FileTransferTracker {
} }
pub fn close(&mut self, files: &mut FileContainer, flags: u16) { pub fn close(&mut self, files: &mut FileContainer, flags: u16) {
files.file_close(&self.track_id, flags); if !self.file_is_truncated {
files.file_close(&self.track_id, flags);
}
self.file_open = false; self.file_open = false;
self.tracked = 0; self.tracked = 0;
files.files_prune(); files.files_prune();
} }
pub fn trunc (&mut self, files: &mut FileContainer, flags: u16) {
if self.file_is_truncated {
return;
}
let myflags = flags | 1; // TODO util-file.c::FILE_TRUNCATED
files.file_close(&self.track_id, myflags);
SCLogDebug!("truncated file");
files.files_prune();
self.file_is_truncated = true;
}
pub fn create(&mut self, name: &[u8], file_size: u64) { pub fn create(&mut self, name: &[u8], file_size: u64) {
if self.file_open == true { panic!("close existing file first"); } if self.file_open == true { panic!("close existing file first"); }
@ -92,8 +136,15 @@ impl FileTransferTracker {
SCLogDebug!("NEW CHUNK: chunk_size {} fill_bytes {}", chunk_size, fill_bytes); SCLogDebug!("NEW CHUNK: chunk_size {} fill_bytes {}", chunk_size, fill_bytes);
// for now assume that is_last means its really the last chunk
// so no out of order chunks coming after. This means that if
// the last chunk is out or order, we've missed chunks before.
if chunk_offset != self.tracked { if chunk_offset != self.tracked {
SCLogDebug!("NEW CHUNK IS OOO: expected {}, got {}", self.tracked, chunk_offset); SCLogDebug!("NEW CHUNK IS OOO: expected {}, got {}", self.tracked, chunk_offset);
if is_last {
SCLogDebug!("last chunk is out of order, this means we missed data before");
self.trunc(files, flags);
}
self.chunk_is_ooo = true; self.chunk_is_ooo = true;
self.cur_ooo_chunk_offset = chunk_offset; self.cur_ooo_chunk_offset = chunk_offset;
} }
@ -108,14 +159,21 @@ impl FileTransferTracker {
self.open(config, files, flags, name); self.open(config, files, flags, name);
} }
let res = self.update(files, flags, data); let res = self.update(files, flags, data, 0);
SCLogDebug!("NEW CHUNK: update res {:?}", res); SCLogDebug!("NEW CHUNK: update res {:?}", res);
res res
} }
/// update the file tracker
/// If gap_size > 0 'data' should not be used.
/// return how much we consumed of data /// return how much we consumed of data
pub fn update(&mut self, files: &mut FileContainer, flags: u16, data: &[u8]) -> u32 { pub fn update(&mut self, files: &mut FileContainer, flags: u16, data: &[u8], gap_size: u32) -> u32 {
let mut consumed = 0 as usize; let mut consumed = 0 as usize;
let is_gap = gap_size > 0;
if is_gap || gap_size > 0 {
SCLogDebug!("is_gap {} size {} ooo? {}", is_gap, gap_size, self.chunk_is_ooo);
}
if self.chunk_left + self.fill_bytes as u32 == 0 { if self.chunk_left + self.fill_bytes as u32 == 0 {
//SCLogDebug!("UPDATE: nothing to do"); //SCLogDebug!("UPDATE: nothing to do");
return 0 return 0
@ -140,7 +198,7 @@ impl FileTransferTracker {
let d = &data[0..self.chunk_left as usize]; let d = &data[0..self.chunk_left as usize];
if self.chunk_is_ooo == false { if self.chunk_is_ooo == false {
let res = files.file_append(&self.track_id, d); let res = files.file_append(&self.track_id, d, is_gap);
if res != 0 { panic!("append failed"); } if res != 0 { panic!("append failed"); }
self.tracked += self.chunk_left as u64; self.tracked += self.chunk_left as u64;
@ -149,11 +207,13 @@ impl FileTransferTracker {
d.len(), self.cur_ooo_chunk_offset, self.tracked); d.len(), self.cur_ooo_chunk_offset, self.tracked);
let c = match self.chunks.entry(self.cur_ooo_chunk_offset) { let c = match self.chunks.entry(self.cur_ooo_chunk_offset) {
Vacant(entry) => { Vacant(entry) => {
entry.insert(Vec::with_capacity(self.chunk_left as usize)) entry.insert(FileChunk::new(self.chunk_left))
}, },
Occupied(entry) => entry.into_mut(), Occupied(entry) => entry.into_mut(),
}; };
c.extend(d); self.cur_ooo += d.len() as u64;
c.contains_gap |= is_gap;
c.chunk.extend(d);
} }
consumed += self.chunk_left as usize; consumed += self.chunk_left as usize;
@ -169,7 +229,6 @@ impl FileTransferTracker {
SCLogDebug!("CHUNK(post) fill bytes now still {}", self.fill_bytes); SCLogDebug!("CHUNK(post) fill bytes now still {}", self.fill_bytes);
} }
self.chunk_left = 0; self.chunk_left = 0;
//return consumed as u32
} else { } else {
self.chunk_left = 0; self.chunk_left = 0;
@ -177,13 +236,14 @@ impl FileTransferTracker {
loop { loop {
let offset = self.tracked; let offset = self.tracked;
match self.chunks.remove(&self.tracked) { match self.chunks.remove(&self.tracked) {
Some(a) => { Some(c) => {
let res = files.file_append(&self.track_id, &a); let res = files.file_append(&self.track_id, &c.chunk, c.contains_gap);
if res != 0 { panic!("append failed"); } if res != 0 { panic!("append failed: files.file_append() returned {}", res); }
self.tracked += a.len() as u64; self.tracked += c.chunk.len() as u64;
self.cur_ooo -= c.chunk.len() as u64;
SCLogDebug!("STORED OOO CHUNK at offset {}, tracked now {}, stored len {}", offset, self.tracked, a.len()); SCLogDebug!("STORED OOO CHUNK at offset {}, tracked now {}, stored len {}", offset, self.tracked, c.chunk.len());
}, },
_ => { _ => {
SCLogDebug!("NO STORED CHUNK found at offset {}", self.tracked); SCLogDebug!("NO STORED CHUNK found at offset {}", self.tracked);
@ -208,15 +268,17 @@ impl FileTransferTracker {
} else { } else {
if self.chunk_is_ooo == false { if self.chunk_is_ooo == false {
let res = files.file_append(&self.track_id, data); let res = files.file_append(&self.track_id, data, is_gap);
if res != 0 { panic!("append failed"); } if res != 0 { panic!("append failed"); }
self.tracked += data.len() as u64; self.tracked += data.len() as u64;
} else { } else {
let c = match self.chunks.entry(self.cur_ooo_chunk_offset) { let c = match self.chunks.entry(self.cur_ooo_chunk_offset) {
Vacant(entry) => entry.insert(Vec::with_capacity(32768)), Vacant(entry) => entry.insert(FileChunk::new(32768)),
Occupied(entry) => entry.into_mut(), Occupied(entry) => entry.into_mut(),
}; };
c.extend(data); c.chunk.extend(data);
c.contains_gap |= is_gap;
self.cur_ooo += data.len() as u64;
} }
self.chunk_left -= data.len() as u32; self.chunk_left -= data.len() as u32;
@ -226,4 +288,8 @@ impl FileTransferTracker {
files.files_prune(); files.files_prune();
consumed as u32 consumed as u32
} }
pub fn get_queued_size(&self) -> u64 {
self.cur_ooo
}
} }

@ -283,6 +283,9 @@ pub struct NFS3State {
ts_chunk_left: u32, ts_chunk_left: u32,
tc_chunk_left: u32, tc_chunk_left: u32,
ts_ssn_gap: bool,
tc_ssn_gap: bool,
/// tx counter for assigning incrementing id's to tx's /// tx counter for assigning incrementing id's to tx's
tx_id: u64, tx_id: u64,
@ -308,6 +311,8 @@ impl NFS3State {
tc_chunk_xid:0, tc_chunk_xid:0,
ts_chunk_left:0, ts_chunk_left:0,
tc_chunk_left:0, tc_chunk_left:0,
ts_ssn_gap:false,
tc_ssn_gap:false,
tx_id:0, tx_id:0,
de_state_count:0, de_state_count:0,
//ts_txs_updated:false, //ts_txs_updated:false,
@ -701,7 +706,14 @@ impl NFS3State {
let xidmap; let xidmap;
match self.requestmap.remove(&r.hdr.xid) { match self.requestmap.remove(&r.hdr.xid) {
Some(p) => { xidmap = p; }, Some(p) => { xidmap = p; },
_ => { SCLogDebug!("REPLY: xid {} NOT FOUND", r.hdr.xid); return 0; }, _ => {
SCLogDebug!("REPLY: xid {} NOT FOUND. GAPS? TS:{} TC:{}",
r.hdr.xid, self.ts_ssn_gap, self.tc_ssn_gap);
// TODO we might be able to try to infer from the size + data
// that this is a READ reply and pass the data to the file API anyway?
return 0;
},
} }
if xidmap.procedure == NFSPROC3_LOOKUP { if xidmap.procedure == NFSPROC3_LOOKUP {
@ -809,7 +821,7 @@ impl NFS3State {
// update in progress chunks for file transfers // update in progress chunks for file transfers
// return how much data we consumed // return how much data we consumed
fn filetracker_update(&mut self, direction: u8, data: &[u8]) -> u32 { fn filetracker_update(&mut self, direction: u8, data: &[u8], gap_size: u32) -> u32 {
let mut chunk_left = if direction == STREAM_TOSERVER { let mut chunk_left = if direction == STREAM_TOSERVER {
self.ts_chunk_left self.ts_chunk_left
} else { } else {
@ -877,6 +889,7 @@ impl NFS3State {
self.tc_chunk_left = chunk_left; self.tc_chunk_left = chunk_left;
} }
let ssn_gap = self.ts_ssn_gap | self.tc_ssn_gap;
// get the tx and update it // get the tx and update it
let consumed = match self.get_file_tx_by_handle(&file_handle, direction) { let consumed = match self.get_file_tx_by_handle(&file_handle, direction) {
Some((tx, files, flags)) => { Some((tx, files, flags)) => {
@ -884,7 +897,15 @@ impl NFS3State {
Some(NFS3TransactionTypeData::FILE(ref mut x)) => x, Some(NFS3TransactionTypeData::FILE(ref mut x)) => x,
_ => { panic!("BUG") }, _ => { panic!("BUG") },
}; };
let cs = tdf.file_tracker.update(files, flags, data); if ssn_gap {
let queued_data = tdf.file_tracker.get_queued_size();
if queued_data > 2000000 { // TODO should probably be configurable
SCLogDebug!("QUEUED size {} while we've seen GAPs. Truncating file.", queued_data);
tdf.file_tracker.trunc(files, flags);
}
}
let cs = tdf.file_tracker.update(files, flags, data, gap_size);
cs cs
}, },
None => { 0 }, None => { 0 },
@ -993,6 +1014,32 @@ impl NFS3State {
xidmap.procedure xidmap.procedure
} }
pub fn parse_tcp_data_ts_gap<'b>(&mut self, gap_size: u32) -> u32 {
if self.tcp_buffer_ts.len() > 0 {
self.tcp_buffer_ts.clear();
}
let gap = vec![0; gap_size as usize];
let consumed = self.filetracker_update(STREAM_TOSERVER, &gap, gap_size);
if consumed > gap_size {
panic!("consumed more than GAP size: {} > {}", consumed, gap_size);
}
self.ts_ssn_gap = true;
return 0
}
pub fn parse_tcp_data_tc_gap<'b>(&mut self, gap_size: u32) -> u32 {
if self.tcp_buffer_tc.len() > 0 {
self.tcp_buffer_tc.clear();
}
let gap = vec![0; gap_size as usize];
let consumed = self.filetracker_update(STREAM_TOCLIENT, &gap, gap_size);
if consumed > gap_size {
panic!("consumed more than GAP size: {} > {}", consumed, gap_size);
}
self.tc_ssn_gap = true;
return 0
}
/// Parsing function, handling TCP chunks fragmentation /// Parsing function, handling TCP chunks fragmentation
pub fn parse_tcp_data_ts<'b>(&mut self, i: &'b[u8]) -> u32 { pub fn parse_tcp_data_ts<'b>(&mut self, i: &'b[u8]) -> u32 {
let mut v : Vec<u8>; let mut v : Vec<u8>;
@ -1006,7 +1053,7 @@ impl NFS3State {
v = self.tcp_buffer_ts.split_off(0); v = self.tcp_buffer_ts.split_off(0);
// sanity check vector length to avoid memory exhaustion // sanity check vector length to avoid memory exhaustion
if self.tcp_buffer_ts.len() + i.len() > 1000000 { if self.tcp_buffer_ts.len() + i.len() > 1000000 {
SCLogNotice!("parse_tcp_data_ts: TS buffer exploded {} {}", SCLogDebug!("parse_tcp_data_ts: TS buffer exploded {} {}",
self.tcp_buffer_ts.len(), i.len()); self.tcp_buffer_ts.len(), i.len());
return 1; return 1;
}; };
@ -1017,17 +1064,15 @@ impl NFS3State {
//SCLogDebug!("tcp_buffer ({})",tcp_buffer.len()); //SCLogDebug!("tcp_buffer ({})",tcp_buffer.len());
let mut cur_i = tcp_buffer; let mut cur_i = tcp_buffer;
if cur_i.len() > 1000000 { if cur_i.len() > 1000000 {
SCLogNotice!("BUG buffer exploded: {}", cur_i.len()); SCLogDebug!("BUG buffer exploded: {}", cur_i.len());
} }
// take care of in progress file chunk transfers // take care of in progress file chunk transfers
// and skip buffer beyond it // and skip buffer beyond it
let consumed = self.filetracker_update(STREAM_TOSERVER, cur_i); let consumed = self.filetracker_update(STREAM_TOSERVER, cur_i, 0);
if consumed > 0 { if consumed > 0 {
if consumed > cur_i.len() as u32 { panic!("BUG consumed more than we gave it"); } if consumed > cur_i.len() as u32 { panic!("BUG consumed more than we gave it"); }
cur_i = &cur_i[consumed as usize..]; cur_i = &cur_i[consumed as usize..];
} }
while cur_i.len() > 0 { // min record size while cur_i.len() > 0 { // min record size
match parse_rpc_request_partial(cur_i) { match parse_rpc_request_partial(cur_i) {
IResult::Done(_, ref rpc_phdr) => { IResult::Done(_, ref rpc_phdr) => {
@ -1122,6 +1167,7 @@ impl NFS3State {
SCLogDebug!("TC buffer exploded"); SCLogDebug!("TC buffer exploded");
return 1; return 1;
}; };
v.extend_from_slice(i); v.extend_from_slice(i);
v.as_slice() v.as_slice()
}, },
@ -1130,17 +1176,16 @@ impl NFS3State {
let mut cur_i = tcp_buffer; let mut cur_i = tcp_buffer;
if cur_i.len() > 100000 { if cur_i.len() > 100000 {
SCLogNotice!("parse_tcp_data_tc: BUG buffer exploded {}", cur_i.len()); SCLogDebug!("parse_tcp_data_tc: BUG buffer exploded {}", cur_i.len());
} }
// take care of in progress file chunk transfers // take care of in progress file chunk transfers
// and skip buffer beyond it // and skip buffer beyond it
let consumed = self.filetracker_update(STREAM_TOCLIENT, cur_i); let consumed = self.filetracker_update(STREAM_TOCLIENT, cur_i, 0);
if consumed > 0 { if consumed > 0 {
if consumed > cur_i.len() as u32 { panic!("BUG consumed more than we gave it"); } if consumed > cur_i.len() as u32 { panic!("BUG consumed more than we gave it"); }
cur_i = &cur_i[consumed as usize..]; cur_i = &cur_i[consumed as usize..];
} }
while cur_i.len() > 0 { while cur_i.len() > 0 {
match parse_rpc_packet_header(cur_i) { match parse_rpc_packet_header(cur_i) {
IResult::Done(_, ref rpc_hdr) => { IResult::Done(_, ref rpc_hdr) => {
@ -1260,6 +1305,14 @@ pub extern "C" fn rs_nfs3_parse_request(_flow: *mut Flow,
{ {
let buf = unsafe{std::slice::from_raw_parts(input, input_len as usize)}; let buf = unsafe{std::slice::from_raw_parts(input, input_len as usize)};
SCLogDebug!("parsing {} bytes of request data", input_len); SCLogDebug!("parsing {} bytes of request data", input_len);
if buf.as_ptr().is_null() && input_len > 0 {
if state.parse_tcp_data_ts_gap(input_len as u32) == 0 {
return 1
}
return -1
}
if state.parse_tcp_data_ts(buf) == 0 { if state.parse_tcp_data_ts(buf) == 0 {
1 1
} else { } else {
@ -1278,6 +1331,14 @@ pub extern "C" fn rs_nfs3_parse_response(_flow: *mut Flow,
{ {
SCLogDebug!("parsing {} bytes of response data", input_len); SCLogDebug!("parsing {} bytes of response data", input_len);
let buf = unsafe{std::slice::from_raw_parts(input, input_len as usize)}; let buf = unsafe{std::slice::from_raw_parts(input, input_len as usize)};
if buf.as_ptr().is_null() && input_len > 0 {
if state.parse_tcp_data_tc_gap(input_len as u32) == 0 {
return 1
}
return -1
}
if state.parse_tcp_data_tc(buf) == 0 { if state.parse_tcp_data_tc(buf) == 0 {
1 1
} else { } else {

@ -346,6 +346,10 @@ void RegisterNFS3Parsers(void)
// NFS3StateGetEventInfo); // NFS3StateGetEventInfo);
// AppLayerParserRegisterGetEventsFunc(IPPROTO_TCP, ALPROTO_NFS3, // AppLayerParserRegisterGetEventsFunc(IPPROTO_TCP, ALPROTO_NFS3,
// NFS3GetEvents); // NFS3GetEvents);
/* This parser accepts gaps. */
AppLayerParserRegisterOptionFlags(IPPROTO_TCP, ALPROTO_NFS3,
APP_LAYER_PARSER_OPT_ACCEPT_GAPS);
} }
else { else {
SCLogDebug("NFS3 protocol parsing disabled."); SCLogDebug("NFS3 protocol parsing disabled.");

@ -124,6 +124,7 @@ static void FileWriteJsonRecord(JsonFileLogThread *aft, const Packet *p, const F
if (ff->magic) if (ff->magic)
json_object_set_new(fjs, "magic", json_string((char *)ff->magic)); json_object_set_new(fjs, "magic", json_string((char *)ff->magic));
#endif #endif
json_object_set_new(fjs, "gaps", json_boolean((ff->flags & FILE_HAS_GAPS)));
switch (ff->state) { switch (ff->state) {
case FILE_STATE_CLOSED: case FILE_STATE_CLOSED:
json_object_set_new(fjs, "state", json_string("CLOSED")); json_object_set_new(fjs, "state", json_string("CLOSED"));

@ -33,6 +33,8 @@ typedef struct SuricataContext_ {
const uint8_t *data, uint32_t data_len, uint16_t flags); const uint8_t *data, uint32_t data_len, uint16_t flags);
int (*FileAppendDataById)(FileContainer *, uint32_t track_id, int (*FileAppendDataById)(FileContainer *, uint32_t track_id,
const uint8_t *data, uint32_t data_len); const uint8_t *data, uint32_t data_len);
int (*FileAppendGAPById)(FileContainer *, uint32_t track_id,
const uint8_t *data, uint32_t data_len);
void (*FileContainerRecycle)(FileContainer *ffc); void (*FileContainerRecycle)(FileContainer *ffc);
void (*FilePrune)(FileContainer *ffc); void (*FilePrune)(FileContainer *ffc);
void (*FileSetTx)(FileContainer *, uint64_t); void (*FileSetTx)(FileContainer *, uint64_t);

@ -2795,6 +2795,7 @@ int main(int argc, char **argv)
context.FileOpenFileWithId = FileOpenFileWithId; context.FileOpenFileWithId = FileOpenFileWithId;
context.FileCloseFileById = FileCloseFileById; context.FileCloseFileById = FileCloseFileById;
context.FileAppendDataById = FileAppendDataById; context.FileAppendDataById = FileAppendDataById;
context.FileAppendGAPById = FileAppendGAPById;
context.FileContainerRecycle = FileContainerRecycle; context.FileContainerRecycle = FileContainerRecycle;
context.FilePrune = FilePrune; context.FilePrune = FilePrune;
context.FileSetTx = FileContainerSetTx; context.FileSetTx = FileContainerSetTx;

@ -689,6 +689,41 @@ int FileAppendDataById(FileContainer *ffc, uint32_t track_id,
SCReturnInt(-1); SCReturnInt(-1);
} }
/**
* \brief Store/handle a chunk of file data in the File structure
* The file with 'track_id' in the FileContainer will be used.
*
* \param ffc FileContainer used to append to
* \param track_id id to lookup the file
* \param data data chunk
* \param data_len data chunk len
*
* \retval 0 ok
* \retval -1 error
* \retval -2 no store for this file
*/
int FileAppendGAPById(FileContainer *ffc, uint32_t track_id,
const uint8_t *data, uint32_t data_len)
{
SCEnter();
if (ffc == NULL || ffc->tail == NULL || data == NULL || data_len == 0) {
SCReturnInt(-1);
}
File *ff = ffc->head;
for ( ; ff != NULL; ff = ff->next) {
if (track_id == ff->file_track_id) {
ff->flags |= FILE_HAS_GAPS;
ff->flags |= (FILE_NOMD5|FILE_NOSHA1|FILE_NOSHA256);
ff->flags &= ~(FILE_MD5|FILE_SHA1|FILE_SHA256);
SCLogDebug("FILE_HAS_GAPS set");
int r = FileAppendDataDo(ff, data, data_len);
SCReturnInt(r);
}
}
SCReturnInt(-1);
}
/** /**
* \brief Open a new File * \brief Open a new File
@ -837,7 +872,7 @@ static int FileCloseFilePtr(File *ff, const uint8_t *data,
} }
} }
if (flags & FILE_TRUNCATED) { if ((flags & FILE_TRUNCATED) || (ff->flags & FILE_HAS_GAPS)) {
ff->state = FILE_STATE_TRUNCATED; ff->state = FILE_STATE_TRUNCATED;
SCLogDebug("flowfile state transitioned to FILE_STATE_TRUNCATED"); SCLogDebug("flowfile state transitioned to FILE_STATE_TRUNCATED");

@ -48,6 +48,7 @@
#define FILE_NOTRACK BIT_U16(12) /**< track size of file */ #define FILE_NOTRACK BIT_U16(12) /**< track size of file */
#define FILE_USE_DETECT BIT_U16(13) /**< use content_inspected tracker */ #define FILE_USE_DETECT BIT_U16(13) /**< use content_inspected tracker */
#define FILE_USE_TRACKID BIT_U16(14) /**< File::file_track_id field is in use */ #define FILE_USE_TRACKID BIT_U16(14) /**< File::file_track_id field is in use */
#define FILE_HAS_GAPS BIT_U16(15)
typedef enum FileState_ { typedef enum FileState_ {
FILE_STATE_NONE = 0, /**< no state */ FILE_STATE_NONE = 0, /**< no state */
@ -159,6 +160,8 @@ int FileCloseFileById(FileContainer *, uint32_t track_id,
int FileAppendData(FileContainer *, const uint8_t *data, uint32_t data_len); int FileAppendData(FileContainer *, const uint8_t *data, uint32_t data_len);
int FileAppendDataById(FileContainer *, uint32_t track_id, int FileAppendDataById(FileContainer *, uint32_t track_id,
const uint8_t *data, uint32_t data_len); const uint8_t *data, uint32_t data_len);
int FileAppendGAPById(FileContainer *ffc, uint32_t track_id,
const uint8_t *data, uint32_t data_len);
/** /**
* \brief Tag a file for storing * \brief Tag a file for storing

Loading…
Cancel
Save