|
|
|
/* Copyright (C) 2007-2012 Open Information Security Foundation
|
|
|
|
*
|
|
|
|
* You can copy, redistribute or modify this Program under the terms of
|
|
|
|
* the GNU General Public License version 2 as published by the Free
|
|
|
|
* Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* version 2 along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
|
|
* 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* \file
|
|
|
|
*
|
|
|
|
* \author Victor Julien <victor@inliniac.net>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __FLOW_UTIL_H__
|
|
|
|
#define __FLOW_UTIL_H__
|
|
|
|
|
|
|
|
#include "flow.h"
|
|
|
|
#include "stream-tcp-private.h"
|
|
|
|
|
|
|
|
#define COPY_TIMESTAMP(src, dst) ((dst)->tv_sec = (src)->tv_sec, (dst)->tv_usec = (src)->tv_usec)
|
|
|
|
|
|
|
|
#define RESET_COUNTERS(f) \
|
|
|
|
do { \
|
|
|
|
(f)->todstpktcnt = 0; \
|
|
|
|
(f)->tosrcpktcnt = 0; \
|
|
|
|
(f)->todstbytecnt = 0; \
|
|
|
|
(f)->tosrcbytecnt = 0; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define FLOW_INITIALIZE(f) \
|
|
|
|
do { \
|
|
|
|
(f)->sp = 0; \
|
|
|
|
(f)->dp = 0; \
|
|
|
|
(f)->proto = 0; \
|
|
|
|
(f)->livedev = NULL; \
|
|
|
|
(f)->timeout_at = 0; \
|
|
|
|
(f)->timeout_policy = 0; \
|
|
|
|
(f)->vlan_idx = 0; \
|
|
|
|
(f)->next = NULL; \
|
|
|
|
(f)->flow_state = 0; \
|
|
|
|
(f)->use_cnt = 0; \
|
|
|
|
(f)->tenant_id = 0; \
|
|
|
|
(f)->parent_id = 0; \
|
|
|
|
(f)->probing_parser_toserver_alproto_masks = 0; \
|
|
|
|
(f)->probing_parser_toclient_alproto_masks = 0; \
|
|
|
|
(f)->flags = 0; \
|
|
|
|
(f)->file_flags = 0; \
|
|
|
|
(f)->protodetect_dp = 0; \
|
|
|
|
SCTIME_INIT((f)->lastts); \
|
|
|
|
FLOWLOCK_INIT((f)); \
|
|
|
|
(f)->protoctx = NULL; \
|
|
|
|
(f)->flow_end_flags = 0; \
|
|
|
|
(f)->alproto = 0; \
|
|
|
|
(f)->alproto_ts = 0; \
|
|
|
|
(f)->alproto_tc = 0; \
|
|
|
|
(f)->alproto_orig = 0; \
|
|
|
|
(f)->alproto_expect = 0; \
|
|
|
|
(f)->de_ctx_version = 0; \
|
|
|
|
(f)->thread_id[0] = 0; \
|
|
|
|
(f)->thread_id[1] = 0; \
|
|
|
|
(f)->alparser = NULL; \
|
|
|
|
(f)->alstate = NULL; \
|
|
|
|
(f)->sgh_toserver = NULL; \
|
|
|
|
(f)->sgh_toclient = NULL; \
|
|
|
|
(f)->flowvar = NULL; \
|
|
|
|
RESET_COUNTERS((f)); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/** \brief macro to recycle a flow before it goes into the spare queue for reuse.
|
|
|
|
*
|
flow: redesign of flow timeout handling
Goals:
- reduce locking
- take advantage of 'hot' caches
- better locality
Locking reduction
New flow spare pool. The global pool is implmented as a list of blocks,
where each block has a 100 spare flows. Worker threads fetch a block at
a time, storing the block in the local thread storage.
Flow Recycler now returns flows to the pool is blocks as well.
Flow Recycler fetches all flows to be processed in one step instead of
one at a time.
Cache 'hot'ness
Worker threads now check the timeout of flows they evaluate during lookup.
The worker will have to read the flow into cache anyway, so the added
overhead of checking the timeout value is minimal. When a flow is considered
timed out, one of 2 things happens:
- if the flow is 'owned' by the thread it is handled locally. Handling means
checking if the flow needs 'timeout' work.
- otherwise, the flow is added to a special 'evicted' list in the flow
bucket where it will be picked up by the flow manager.
Flow Manager timing
By default the flow manager now tries to do passes of the flow hash in
smaller steps, where the goal is to do full pass in 8 x the lowest timeout
value it has to enforce. So if the lowest timeout value is 30s, a full pass
will take 4 minutes. The goal here is to reduce locking overhead and not
get in the way of the workers.
In emergency mode each pass is full, and lower timeouts are used.
Timing of the flow manager is also no longer relying on pthread condition
variables, as these generally cause waking up much quicker than the desired
timout. Instead a simple (u)sleep loop is used.
Both changes reduce the number of hash passes a lot.
Emergency behavior
In emergency mode there a number of changes to the workers. In this scenario
the flow memcap is fully used up and it is unavoidable that some flows won't
be tracked.
1. flow spare pool fetches are reduced to once a second. This avoids locking
overhead, while the chance of success was very low.
2. getting an active flow directly from the hash skips flows that had very
recent activity to avoid the scenario where all flows get only into the
NEW state before getting reused. Rather allow some to have a chance of
completing.
3. TCP packets that are not SYN packets will not get a used flow, unless
stream.midstream is enabled. The goal here is again to avoid evicting
active flows unnecessarily.
Better Localily
Flow Manager injects flows into the worker threads now, instead of one or
two packets. Advantage of this is that the worker threads can get packets
from their local packet pools, avoiding constant overhead of packets returning
to 'foreign' pools.
Counters
A lot of flow counters have been added and some have been renamed.
Overall the worker threads increment 'flow.wrk.*' counters, while the flow
manager increments 'flow.mgr.*'.
Additionally, none of the counters are snapshots anymore, they all increment
over time. The flow.memuse and flow.spare counters are exceptions.
Misc
FlowQueue has been split into a FlowQueuePrivate (unlocked) and FlowQueue.
Flow no longer has 'prev' pointers and used a unified 'next' pointer for
both hash and queue use.
6 years ago
|
|
|
* Note that the lnext, lprev, hnext fields are untouched, those are
|
|
|
|
* managed by the queueing code. Same goes for fb (FlowBucket ptr) field.
|
|
|
|
*/
|
|
|
|
#define FLOW_RECYCLE(f) \
|
|
|
|
do { \
|
|
|
|
FlowCleanupAppLayer((f)); \
|
|
|
|
(f)->sp = 0; \
|
|
|
|
(f)->dp = 0; \
|
|
|
|
(f)->proto = 0; \
|
|
|
|
(f)->livedev = NULL; \
|
|
|
|
(f)->vlan_idx = 0; \
|
|
|
|
(f)->ffr = 0; \
|
|
|
|
(f)->next = NULL; \
|
|
|
|
(f)->timeout_at = 0; \
|
|
|
|
(f)->timeout_policy = 0; \
|
|
|
|
(f)->flow_state = 0; \
|
|
|
|
(f)->use_cnt = 0; \
|
|
|
|
(f)->tenant_id = 0; \
|
|
|
|
(f)->parent_id = 0; \
|
|
|
|
(f)->probing_parser_toserver_alproto_masks = 0; \
|
|
|
|
(f)->probing_parser_toclient_alproto_masks = 0; \
|
|
|
|
(f)->flags = 0; \
|
|
|
|
(f)->file_flags = 0; \
|
|
|
|
(f)->protodetect_dp = 0; \
|
|
|
|
SCTIME_INIT((f)->lastts); \
|
|
|
|
(f)->protoctx = NULL; \
|
|
|
|
(f)->flow_end_flags = 0; \
|
|
|
|
(f)->alparser = NULL; \
|
|
|
|
(f)->alstate = NULL; \
|
|
|
|
(f)->alproto = 0; \
|
|
|
|
(f)->alproto_ts = 0; \
|
|
|
|
(f)->alproto_tc = 0; \
|
|
|
|
(f)->alproto_orig = 0; \
|
|
|
|
(f)->alproto_expect = 0; \
|
|
|
|
(f)->de_ctx_version = 0; \
|
|
|
|
(f)->thread_id[0] = 0; \
|
|
|
|
(f)->thread_id[1] = 0; \
|
|
|
|
(f)->sgh_toserver = NULL; \
|
|
|
|
(f)->sgh_toclient = NULL; \
|
|
|
|
GenericVarFree((f)->flowvar); \
|
|
|
|
(f)->flowvar = NULL; \
|
|
|
|
if (MacSetFlowStorageEnabled()) { \
|
|
|
|
MacSet *ms = FlowGetStorageById((f), MacSetGetFlowStorageID()); \
|
|
|
|
if (ms != NULL) { \
|
|
|
|
MacSetReset(ms); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
RESET_COUNTERS((f)); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define FLOW_DESTROY(f) \
|
|
|
|
do { \
|
|
|
|
FlowCleanupAppLayer((f)); \
|
|
|
|
\
|
|
|
|
FLOWLOCK_DESTROY((f)); \
|
|
|
|
GenericVarFree((f)->flowvar); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/** \brief check if a memory alloc would fit in the memcap
|
|
|
|
*
|
|
|
|
* \param size memory allocation size to check
|
|
|
|
*
|
|
|
|
* \retval 1 it fits
|
|
|
|
* \retval 0 no fit
|
|
|
|
*/
|
|
|
|
#define FLOW_CHECK_MEMCAP(size) \
|
|
|
|
((((uint64_t)SC_ATOMIC_GET(flow_memuse) + (uint64_t)(size)) <= \
|
|
|
|
SC_ATOMIC_GET(flow_config.memcap)))
|
|
|
|
|
|
|
|
Flow *FlowAlloc(void);
|
|
|
|
Flow *FlowAllocDirect(void);
|
|
|
|
void FlowFree(Flow *);
|
|
|
|
uint8_t FlowGetProtoMapping(uint8_t);
|
|
|
|
void FlowInit(Flow *, const Packet *);
|
App layer API rewritten. The main files in question are:
app-layer.[ch], app-layer-detect-proto.[ch] and app-layer-parser.[ch].
Things addressed in this commit:
- Brings out a proper separation between protocol detection phase and the
parser phase.
- The dns app layer now is registered such that we don't use "dnstcp" and
"dnsudp" in the rules. A user who previously wrote a rule like this -
"alert dnstcp....." or
"alert dnsudp....."
would now have to use,
alert dns (ipproto:tcp;) or
alert udp (app-layer-protocol:dns;) or
alert ip (ipproto:udp; app-layer-protocol:dns;)
The same rules extend to other another such protocol, dcerpc.
- The app layer parser api now takes in the ipproto while registering
callbacks.
- The app inspection/detection engine also takes an ipproto.
- All app layer parser functions now take direction as STREAM_TOSERVER or
STREAM_TOCLIENT, as opposed to 0 or 1, which was taken by some of the
functions.
- FlowInitialize() and FlowRecycle() now resets proto to 0. This is
needed by unittests, which would try to clean the flow, and that would
call the api, AppLayerParserCleanupParserState(), which would try to
clean the app state, but the app layer now needs an ipproto to figure
out which api to internally call to clean the state, and if the ipproto
is 0, it would return without trying to clean the state.
- A lot of unittests are now updated where if they are using a flow and
they need to use the app layer, we would set a flow ipproto.
- The "app-layer" section in the yaml conf has also been updated as well.
12 years ago
|
|
|
uint8_t FlowGetReverseProtoMapping(uint8_t rproto);
|
|
|
|
|
|
|
|
/* flow end counter logic */
|
|
|
|
|
|
|
|
typedef struct FlowEndCounters_ {
|
|
|
|
uint16_t flow_state[FLOW_STATE_SIZE];
|
|
|
|
uint16_t flow_tcp_state[TCP_CLOSED + 1];
|
|
|
|
uint16_t flow_tcp_liberal;
|
|
|
|
} FlowEndCounters;
|
|
|
|
|
|
|
|
static inline void FlowEndCountersUpdate(ThreadVars *tv, FlowEndCounters *fec, Flow *f)
|
|
|
|
{
|
|
|
|
if (f->proto == IPPROTO_TCP && f->protoctx != NULL) {
|
|
|
|
TcpSession *ssn = f->protoctx;
|
|
|
|
StatsIncr(tv, fec->flow_tcp_state[ssn->state]);
|
|
|
|
if (ssn->lossy_be_liberal) {
|
|
|
|
StatsIncr(tv, fec->flow_tcp_liberal);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
StatsIncr(tv, fec->flow_state[f->flow_state]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void FlowEndCountersRegister(ThreadVars *t, FlowEndCounters *fec);
|
|
|
|
|
|
|
|
#endif /* __FLOW_UTIL_H__ */
|