|
|
|
/* Copyright (C) 2007-2014 Open Information Security Foundation
|
|
|
|
*
|
|
|
|
* You can copy, redistribute or modify this Program under the terms of
|
|
|
|
* the GNU General Public License version 2 as published by the Free
|
|
|
|
* Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* version 2 along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
|
|
* 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* \file
|
|
|
|
*
|
|
|
|
* \author Victor Julien <victor@inliniac.net>
|
|
|
|
*
|
|
|
|
* Packetpool queue handlers. Packet pool is implemented as a stack.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "suricata.h"
|
|
|
|
#include "packet-queue.h"
|
|
|
|
#include "decode.h"
|
|
|
|
#include "detect.h"
|
|
|
|
#include "detect-uricontent.h"
|
|
|
|
#include "threads.h"
|
|
|
|
#include "threadvars.h"
|
|
|
|
#include "flow.h"
|
|
|
|
#include "flow-util.h"
|
|
|
|
#include "host.h"
|
|
|
|
|
|
|
|
#include "stream.h"
|
|
|
|
#include "stream-tcp-reassemble.h"
|
|
|
|
|
|
|
|
#include "tm-queuehandlers.h"
|
|
|
|
|
|
|
|
#include "pkt-var.h"
|
|
|
|
|
|
|
|
#include "tmqh-packetpool.h"
|
|
|
|
|
|
|
|
#include "util-debug.h"
|
|
|
|
#include "util-error.h"
|
Add per packet profiling.
Per packet profiling uses tick based accounting. It has 2 outputs, a summary
and a csv file that contains per packet stats.
Stats per packet include:
1) total ticks spent
2) ticks spent per individual thread module
3) "threading overhead" which is simply calculated by subtracting (2) of (1).
A number of changes were made to integrate the new code in a clean way:
a number of generic enums are now placed in tm-threads-common.h so we can
include them from any part of the engine.
Code depends on --enable-profiling just like the rule profiling code.
New yaml parameters:
profiling:
# packet profiling
packets:
# Profiling can be disabled here, but it will still have a
# performance impact if compiled in.
enabled: yes
filename: packet_stats.log
append: yes
# per packet csv output
csv:
# Output can be disabled here, but it will still have a
# performance impact if compiled in.
enabled: no
filename: packet_stats.csv
Example output of summary stats:
IP ver Proto cnt min max avg
------ ----- ------ ------ ---------- -------
IPv4 6 19436 11448 5404365 32993
IPv4 256 4 11511 49968 30575
Per Thread module stats:
Thread Module IP ver Proto cnt min max avg
------------------------ ------ ----- ------ ------ ---------- -------
TMM_DECODEPCAPFILE IPv4 6 19434 1242 47889 1770
TMM_DETECT IPv4 6 19436 1107 137241 1504
TMM_ALERTFASTLOG IPv4 6 19436 90 1323 155
TMM_ALERTUNIFIED2ALERT IPv4 6 19436 108 1359 138
TMM_ALERTDEBUGLOG IPv4 6 19436 90 1134 154
TMM_LOGHTTPLOG IPv4 6 19436 414 5392089 7944
TMM_STREAMTCP IPv4 6 19434 828 1299159 19438
The proto 256 is a counter for handling of pseudo/tunnel packets.
Example output of csv:
pcap_cnt,ipver,ipproto,total,TMM_DECODENFQ,TMM_VERDICTNFQ,TMM_RECEIVENFQ,TMM_RECEIVEPCAP,TMM_RECEIVEPCAPFILE,TMM_DECODEPCAP,TMM_DECODEPCAPFILE,TMM_RECEIVEPFRING,TMM_DECODEPFRING,TMM_DETECT,TMM_ALERTFASTLOG,TMM_ALERTFASTLOG4,TMM_ALERTFASTLOG6,TMM_ALERTUNIFIEDLOG,TMM_ALERTUNIFIEDALERT,TMM_ALERTUNIFIED2ALERT,TMM_ALERTPRELUDE,TMM_ALERTDEBUGLOG,TMM_ALERTSYSLOG,TMM_LOGDROPLOG,TMM_ALERTSYSLOG4,TMM_ALERTSYSLOG6,TMM_RESPONDREJECT,TMM_LOGHTTPLOG,TMM_LOGHTTPLOG4,TMM_LOGHTTPLOG6,TMM_PCAPLOG,TMM_STREAMTCP,TMM_DECODEIPFW,TMM_VERDICTIPFW,TMM_RECEIVEIPFW,TMM_RECEIVEERFFILE,TMM_DECODEERFFILE,TMM_RECEIVEERFDAG,TMM_DECODEERFDAG,threading
1,4,6,172008,0,0,0,0,0,0,47889,0,0,48582,1323,0,0,0,0,1359,0,1134,0,0,0,0,0,8028,0,0,0,49356,0,0,0,0,0,0,0,14337
First line of the file contains labels.
2 example gnuplot scripts added to plot the data.
14 years ago
|
|
|
#include "util-profiling.h"
|
|
|
|
#include "util-device.h"
|
|
|
|
|
|
|
|
/* Number of freed packet to save for one pool before freeing them. */
|
|
|
|
#define MAX_PENDING_RETURN_PACKETS 32
|
|
|
|
|
|
|
|
#ifdef TLS
|
|
|
|
__thread PktPool thread_pkt_pool;
|
|
|
|
|
|
|
|
static inline PktPool *GetThreadPacketPool(void)
|
|
|
|
{
|
|
|
|
return &thread_pkt_pool;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
/* __thread not supported. */
|
|
|
|
static pthread_key_t pkt_pool_thread_key;
|
|
|
|
static SCMutex pkt_pool_thread_key_mutex = SCMUTEX_INITIALIZER;
|
|
|
|
static int pkt_pool_thread_key_initialized = 0;
|
|
|
|
|
|
|
|
static void PktPoolThreadDestroy(void * buf)
|
|
|
|
{
|
|
|
|
free(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void TmqhPacketPoolInit(void)
|
|
|
|
{
|
|
|
|
SCMutexLock(&pkt_pool_thread_key_mutex);
|
|
|
|
if (pkt_pool_thread_key_initialized) {
|
|
|
|
/* Key has already been created. */
|
|
|
|
SCMutexUnlock(&pkt_pool_thread_key_mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create the pthread Key that is used to look up thread specific
|
|
|
|
* data buffer. Needs to be created only once.
|
|
|
|
*/
|
|
|
|
int r = pthread_key_create(&pkt_pool_thread_key, PktPoolThreadDestroy);
|
|
|
|
if (r != 0) {
|
|
|
|
SCLogError(SC_ERR_MEM_ALLOC, "pthread_key_create failed with %d", r);
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
|
|
|
|
pkt_pool_thread_key_initialized = 1;
|
|
|
|
SCMutexUnlock(&pkt_pool_thread_key_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static PktPool *ThreadPacketPoolCreate(void)
|
|
|
|
{
|
|
|
|
TmqhPacketPoolInit();
|
|
|
|
|
|
|
|
/* Create a new pool for this thread. */
|
|
|
|
PktPool* pool = (PktPool*)SCMallocAligned(sizeof(PktPool), CLS);
|
|
|
|
if (pool == NULL) {
|
|
|
|
SCLogError(SC_ERR_MEM_ALLOC, "malloc failed");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
int r = pthread_setspecific(pkt_pool_thread_key, pool);
|
|
|
|
if (r != 0) {
|
|
|
|
SCLogError(SC_ERR_MEM_ALLOC, "pthread_setspecific failed with %d", r);
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline PktPool *GetThreadPacketPool(void)
|
|
|
|
{
|
|
|
|
PktPool* pool = (PktPool*)pthread_getspecific(pkt_pool_thread_key);
|
|
|
|
if (pool == NULL)
|
|
|
|
pool = ThreadPacketPoolCreate();
|
|
|
|
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* \brief TmqhPacketpoolRegister
|
|
|
|
* \initonly
|
|
|
|
*/
|
|
|
|
void TmqhPacketpoolRegister (void)
|
|
|
|
{
|
|
|
|
tmqh_table[TMQH_PACKETPOOL].name = "packetpool";
|
|
|
|
tmqh_table[TMQH_PACKETPOOL].InHandler = TmqhInputPacketpool;
|
|
|
|
tmqh_table[TMQH_PACKETPOOL].OutHandler = TmqhOutputPacketpool;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int PacketPoolIsEmpty(PktPool *pool)
|
|
|
|
{
|
|
|
|
/* Check local stack first. */
|
|
|
|
if (pool->head || pool->return_stack.head)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PacketPoolWait(void)
|
|
|
|
{
|
|
|
|
PktPool *my_pool = GetThreadPacketPool();
|
|
|
|
|
|
|
|
while(PacketPoolIsEmpty(my_pool))
|
|
|
|
cc_barrier();
|
|
|
|
}
|
|
|
|
|
|
|
|
/** \brief a initialized packet
|
|
|
|
*
|
|
|
|
* \warning Use *only* at init, not at packet runtime
|
|
|
|
*/
|
|
|
|
static void PacketPoolStorePacket(Packet *p)
|
|
|
|
{
|
|
|
|
/* Clear the PKT_ALLOC flag, since that indicates to push back
|
|
|
|
* onto the ring buffer. */
|
|
|
|
p->flags &= ~PKT_ALLOC;
|
|
|
|
p->pool = GetThreadPacketPool();
|
|
|
|
p->ReleasePacket = PacketPoolReturnPacket;
|
|
|
|
PacketPoolReturnPacket(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void PacketPoolGetReturnedPackets(PktPool *pool)
|
|
|
|
{
|
|
|
|
SCMutexLock(&pool->return_stack.mutex);
|
|
|
|
/* Move all the packets from the locked return stack to the local stack. */
|
|
|
|
pool->head = pool->return_stack.head;
|
|
|
|
pool->return_stack.head = NULL;
|
|
|
|
SCMutexUnlock(&pool->return_stack.mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** \brief Get a new packet from the packet pool
|
|
|
|
*
|
|
|
|
* Only allocates from the thread's local stack, or mallocs new packets.
|
|
|
|
* If the local stack is empty, first move all the return stack packets to
|
|
|
|
* the local stack.
|
|
|
|
* \retval Packet pointer, or NULL on failure.
|
|
|
|
*/
|
|
|
|
Packet *PacketPoolGetPacket(void)
|
|
|
|
{
|
|
|
|
PktPool *pool = GetThreadPacketPool();
|
|
|
|
|
|
|
|
if (pool->head) {
|
|
|
|
/* Stack is not empty. */
|
|
|
|
Packet *p = pool->head;
|
|
|
|
pool->head = p->next;
|
|
|
|
p->pool = pool;
|
|
|
|
PACKET_REINIT(p);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Local Stack is empty, so check the return stack, which requires
|
|
|
|
* locking. */
|
|
|
|
PacketPoolGetReturnedPackets(pool);
|
|
|
|
|
|
|
|
/* Try to allocate again. Need to check for not empty again, since the
|
|
|
|
* return stack might have been empty too.
|
|
|
|
*/
|
|
|
|
if (pool->head) {
|
|
|
|
/* Stack is not empty. */
|
|
|
|
Packet *p = pool->head;
|
|
|
|
pool->head = p->next;
|
|
|
|
p->pool = pool;
|
|
|
|
PACKET_REINIT(p);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Failed to allocate a packet, so return NULL. */
|
|
|
|
/* Optionally, could allocate a new packet here. */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** \brief Return packet to Packet pool
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void PacketPoolReturnPacket(Packet *p)
|
|
|
|
{
|
|
|
|
PktPool *my_pool = GetThreadPacketPool();
|
|
|
|
|
|
|
|
PACKET_RELEASE_REFS(p);
|
|
|
|
|
|
|
|
PktPool *pool = p->pool;
|
|
|
|
if (pool == NULL) {
|
|
|
|
free(p);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pool == my_pool) {
|
|
|
|
/* Push back onto this thread's own stack, so no locking. */
|
|
|
|
p->next = my_pool->head;
|
|
|
|
my_pool->head = p;
|
|
|
|
} else {
|
|
|
|
PktPool *pending_pool = my_pool->pending_pool;
|
For PktPool add local pending freed packets list.
Better handle the autofp case where one thread allocates the majority
of the packets and other threads free those packets.
Add a list of locally pending packets. The first packet freed goes on the
pending list, then subsequent freed packets for the same Packet Pool are
added to this list until it hits a fixed number of packets, then the
entire list of packets is pushed onto the pool's return stack. If a freed
packet is not for the pending pool, it is freed immediately to its pool's
return stack, as before.
For the autofp case, since there is only one Packet Pool doing all the
allocation, every other thread will keep a list of pending packets for
that pool.
For the worker run mode, most packets are allocated and freed locally. For
the case where packets are being returned to a remote pool, a pending list
will be kept for one of those other threads, all others are returned as before.
Which remote pool for which to keep a pending list is changed each time the
pending list is returned. Since the return pending pool is cleared when it is
freed, then next packet to be freed chooses the new pending pool.
11 years ago
|
|
|
if (pending_pool == NULL) {
|
|
|
|
/* No pending packet, so store the current packet. */
|
|
|
|
my_pool->pending_pool = pool;
|
|
|
|
my_pool->pending_head = p;
|
|
|
|
my_pool->pending_tail = p;
|
|
|
|
my_pool->pending_count = 1;
|
For PktPool add local pending freed packets list.
Better handle the autofp case where one thread allocates the majority
of the packets and other threads free those packets.
Add a list of locally pending packets. The first packet freed goes on the
pending list, then subsequent freed packets for the same Packet Pool are
added to this list until it hits a fixed number of packets, then the
entire list of packets is pushed onto the pool's return stack. If a freed
packet is not for the pending pool, it is freed immediately to its pool's
return stack, as before.
For the autofp case, since there is only one Packet Pool doing all the
allocation, every other thread will keep a list of pending packets for
that pool.
For the worker run mode, most packets are allocated and freed locally. For
the case where packets are being returned to a remote pool, a pending list
will be kept for one of those other threads, all others are returned as before.
Which remote pool for which to keep a pending list is changed each time the
pending list is returned. Since the return pending pool is cleared when it is
freed, then next packet to be freed chooses the new pending pool.
11 years ago
|
|
|
} else if (pending_pool == pool) {
|
|
|
|
/* Another packet for the pending pool list. */
|
|
|
|
p->next = my_pool->pending_head;
|
|
|
|
my_pool->pending_head = p;
|
|
|
|
my_pool->pending_count++;
|
|
|
|
if (my_pool->pending_count > MAX_PENDING_RETURN_PACKETS) {
|
For PktPool add local pending freed packets list.
Better handle the autofp case where one thread allocates the majority
of the packets and other threads free those packets.
Add a list of locally pending packets. The first packet freed goes on the
pending list, then subsequent freed packets for the same Packet Pool are
added to this list until it hits a fixed number of packets, then the
entire list of packets is pushed onto the pool's return stack. If a freed
packet is not for the pending pool, it is freed immediately to its pool's
return stack, as before.
For the autofp case, since there is only one Packet Pool doing all the
allocation, every other thread will keep a list of pending packets for
that pool.
For the worker run mode, most packets are allocated and freed locally. For
the case where packets are being returned to a remote pool, a pending list
will be kept for one of those other threads, all others are returned as before.
Which remote pool for which to keep a pending list is changed each time the
pending list is returned. Since the return pending pool is cleared when it is
freed, then next packet to be freed chooses the new pending pool.
11 years ago
|
|
|
/* Return the entire list of pending packets. */
|
|
|
|
SCMutexLock(&pool->return_stack.mutex);
|
|
|
|
my_pool->pending_tail->next = pool->return_stack.head;
|
|
|
|
pool->return_stack.head = my_pool->pending_head;
|
For PktPool add local pending freed packets list.
Better handle the autofp case where one thread allocates the majority
of the packets and other threads free those packets.
Add a list of locally pending packets. The first packet freed goes on the
pending list, then subsequent freed packets for the same Packet Pool are
added to this list until it hits a fixed number of packets, then the
entire list of packets is pushed onto the pool's return stack. If a freed
packet is not for the pending pool, it is freed immediately to its pool's
return stack, as before.
For the autofp case, since there is only one Packet Pool doing all the
allocation, every other thread will keep a list of pending packets for
that pool.
For the worker run mode, most packets are allocated and freed locally. For
the case where packets are being returned to a remote pool, a pending list
will be kept for one of those other threads, all others are returned as before.
Which remote pool for which to keep a pending list is changed each time the
pending list is returned. Since the return pending pool is cleared when it is
freed, then next packet to be freed chooses the new pending pool.
11 years ago
|
|
|
SCMutexUnlock(&pool->return_stack.mutex);
|
|
|
|
/* Clear the list of pending packets to return. */
|
|
|
|
my_pool->pending_pool = NULL;
|
For PktPool add local pending freed packets list.
Better handle the autofp case where one thread allocates the majority
of the packets and other threads free those packets.
Add a list of locally pending packets. The first packet freed goes on the
pending list, then subsequent freed packets for the same Packet Pool are
added to this list until it hits a fixed number of packets, then the
entire list of packets is pushed onto the pool's return stack. If a freed
packet is not for the pending pool, it is freed immediately to its pool's
return stack, as before.
For the autofp case, since there is only one Packet Pool doing all the
allocation, every other thread will keep a list of pending packets for
that pool.
For the worker run mode, most packets are allocated and freed locally. For
the case where packets are being returned to a remote pool, a pending list
will be kept for one of those other threads, all others are returned as before.
Which remote pool for which to keep a pending list is changed each time the
pending list is returned. Since the return pending pool is cleared when it is
freed, then next packet to be freed chooses the new pending pool.
11 years ago
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Push onto return stack for this pool */
|
|
|
|
SCMutexLock(&pool->return_stack.mutex);
|
|
|
|
p->next = pool->return_stack.head;
|
|
|
|
pool->return_stack.head = p;
|
|
|
|
SCMutexUnlock(&pool->return_stack.mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PacketPoolInit(void)
|
|
|
|
{
|
|
|
|
extern intmax_t max_pending_packets;
|
|
|
|
|
|
|
|
#ifndef TLS
|
|
|
|
TmqhPacketPoolInit();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
PktPool *my_pool = GetThreadPacketPool();
|
|
|
|
|
|
|
|
SCMutexInit(&my_pool->return_stack.mutex, NULL);
|
|
|
|
|
|
|
|
/* pre allocate packets */
|
For PktPool add local pending freed packets list.
Better handle the autofp case where one thread allocates the majority
of the packets and other threads free those packets.
Add a list of locally pending packets. The first packet freed goes on the
pending list, then subsequent freed packets for the same Packet Pool are
added to this list until it hits a fixed number of packets, then the
entire list of packets is pushed onto the pool's return stack. If a freed
packet is not for the pending pool, it is freed immediately to its pool's
return stack, as before.
For the autofp case, since there is only one Packet Pool doing all the
allocation, every other thread will keep a list of pending packets for
that pool.
For the worker run mode, most packets are allocated and freed locally. For
the case where packets are being returned to a remote pool, a pending list
will be kept for one of those other threads, all others are returned as before.
Which remote pool for which to keep a pending list is changed each time the
pending list is returned. Since the return pending pool is cleared when it is
freed, then next packet to be freed chooses the new pending pool.
11 years ago
|
|
|
SCLogDebug("preallocating packets... packet size %" PRIuMAX "",
|
|
|
|
(uintmax_t)SIZE_OF_PACKET);
|
|
|
|
int i = 0;
|
|
|
|
for (i = 0; i < max_pending_packets; i++) {
|
|
|
|
Packet *p = PacketGetFromAlloc();
|
|
|
|
if (unlikely(p == NULL)) {
|
|
|
|
SCLogError(SC_ERR_FATAL, "Fatal error encountered while allocating a packet. Exiting...");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
PacketPoolStorePacket(p);
|
|
|
|
}
|
|
|
|
SCLogInfo("preallocated %"PRIiMAX" packets. Total memory %"PRIuMAX"",
|
|
|
|
max_pending_packets, (uintmax_t)(max_pending_packets*SIZE_OF_PACKET));
|
|
|
|
}
|
|
|
|
|
|
|
|
void PacketPoolDestroy(void)
|
|
|
|
{
|
|
|
|
Packet *p = NULL;
|
|
|
|
PktPool *my_pool = GetThreadPacketPool();
|
|
|
|
if (my_pool && my_pool->pending_head != NULL) {
|
|
|
|
p = my_pool->pending_head;
|
|
|
|
while (p) {
|
|
|
|
Packet *next_p = p->next;
|
|
|
|
PacketFree(p);
|
|
|
|
p = next_p;
|
|
|
|
my_pool->pending_count--;
|
|
|
|
}
|
|
|
|
BUG_ON(my_pool->pending_count);
|
|
|
|
my_pool->pending_pool = NULL;
|
|
|
|
my_pool->pending_head = NULL;
|
|
|
|
my_pool->pending_tail = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((p = PacketPoolGetPacket()) != NULL) {
|
|
|
|
PacketFree(p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Packet *TmqhInputPacketpool(ThreadVars *tv)
|
|
|
|
{
|
|
|
|
return PacketPoolGetPacket();
|
|
|
|
}
|
|
|
|
|
|
|
|
void TmqhOutputPacketpool(ThreadVars *t, Packet *p)
|
|
|
|
{
|
|
|
|
int proot = 0;
|
|
|
|
|
|
|
|
SCEnter();
|
|
|
|
SCLogDebug("Packet %p, p->root %p, alloced %s", p, p->root, p->flags & PKT_ALLOC ? "true" : "false");
|
|
|
|
|
|
|
|
/** \todo make this a callback
|
|
|
|
* Release tcp segments. Done here after alerting can use them. */
|
|
|
|
if (p->flow != NULL && p->proto == IPPROTO_TCP) {
|
|
|
|
SCMutexLock(&p->flow->m);
|
|
|
|
StreamTcpPruneSession(p->flow, p->flowflags & FLOW_PKT_TOSERVER ?
|
|
|
|
STREAM_TOSERVER : STREAM_TOCLIENT);
|
|
|
|
SCMutexUnlock(&p->flow->m);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_TUNNEL_PKT(p)) {
|
|
|
|
SCLogDebug("Packet %p is a tunnel packet: %s",
|
|
|
|
p,p->root ? "upper layer" : "tunnel root");
|
|
|
|
|
|
|
|
/* get a lock to access root packet fields */
|
|
|
|
SCMutex *m = p->root ? &p->root->tunnel_mutex : &p->tunnel_mutex;
|
|
|
|
SCMutexLock(m);
|
|
|
|
|
|
|
|
if (IS_TUNNEL_ROOT_PKT(p)) {
|
|
|
|
SCLogDebug("IS_TUNNEL_ROOT_PKT == TRUE");
|
|
|
|
if (TUNNEL_PKT_TPR(p) == 0) {
|
|
|
|
SCLogDebug("TUNNEL_PKT_TPR(p) == 0, no more tunnel packet "
|
|
|
|
"depending on this root");
|
|
|
|
/* if this packet is the root and there are no
|
|
|
|
* more tunnel packets, return it to the pool */
|
|
|
|
|
|
|
|
/* fall through */
|
|
|
|
} else {
|
|
|
|
SCLogDebug("tunnel root Packet %p: TUNNEL_PKT_TPR(p) > 0, so "
|
|
|
|
"packets are still depending on this root, setting "
|
|
|
|
"p->tunnel_verdicted == 1", p);
|
|
|
|
/* if this is the root and there are more tunnel
|
|
|
|
* packets, return this to the pool. It's still referenced
|
|
|
|
* by the tunnel packets, and we will return it
|
|
|
|
* when we handle them */
|
|
|
|
SET_TUNNEL_PKT_VERDICTED(p);
|
Add per packet profiling.
Per packet profiling uses tick based accounting. It has 2 outputs, a summary
and a csv file that contains per packet stats.
Stats per packet include:
1) total ticks spent
2) ticks spent per individual thread module
3) "threading overhead" which is simply calculated by subtracting (2) of (1).
A number of changes were made to integrate the new code in a clean way:
a number of generic enums are now placed in tm-threads-common.h so we can
include them from any part of the engine.
Code depends on --enable-profiling just like the rule profiling code.
New yaml parameters:
profiling:
# packet profiling
packets:
# Profiling can be disabled here, but it will still have a
# performance impact if compiled in.
enabled: yes
filename: packet_stats.log
append: yes
# per packet csv output
csv:
# Output can be disabled here, but it will still have a
# performance impact if compiled in.
enabled: no
filename: packet_stats.csv
Example output of summary stats:
IP ver Proto cnt min max avg
------ ----- ------ ------ ---------- -------
IPv4 6 19436 11448 5404365 32993
IPv4 256 4 11511 49968 30575
Per Thread module stats:
Thread Module IP ver Proto cnt min max avg
------------------------ ------ ----- ------ ------ ---------- -------
TMM_DECODEPCAPFILE IPv4 6 19434 1242 47889 1770
TMM_DETECT IPv4 6 19436 1107 137241 1504
TMM_ALERTFASTLOG IPv4 6 19436 90 1323 155
TMM_ALERTUNIFIED2ALERT IPv4 6 19436 108 1359 138
TMM_ALERTDEBUGLOG IPv4 6 19436 90 1134 154
TMM_LOGHTTPLOG IPv4 6 19436 414 5392089 7944
TMM_STREAMTCP IPv4 6 19434 828 1299159 19438
The proto 256 is a counter for handling of pseudo/tunnel packets.
Example output of csv:
pcap_cnt,ipver,ipproto,total,TMM_DECODENFQ,TMM_VERDICTNFQ,TMM_RECEIVENFQ,TMM_RECEIVEPCAP,TMM_RECEIVEPCAPFILE,TMM_DECODEPCAP,TMM_DECODEPCAPFILE,TMM_RECEIVEPFRING,TMM_DECODEPFRING,TMM_DETECT,TMM_ALERTFASTLOG,TMM_ALERTFASTLOG4,TMM_ALERTFASTLOG6,TMM_ALERTUNIFIEDLOG,TMM_ALERTUNIFIEDALERT,TMM_ALERTUNIFIED2ALERT,TMM_ALERTPRELUDE,TMM_ALERTDEBUGLOG,TMM_ALERTSYSLOG,TMM_LOGDROPLOG,TMM_ALERTSYSLOG4,TMM_ALERTSYSLOG6,TMM_RESPONDREJECT,TMM_LOGHTTPLOG,TMM_LOGHTTPLOG4,TMM_LOGHTTPLOG6,TMM_PCAPLOG,TMM_STREAMTCP,TMM_DECODEIPFW,TMM_VERDICTIPFW,TMM_RECEIVEIPFW,TMM_RECEIVEERFFILE,TMM_DECODEERFFILE,TMM_RECEIVEERFDAG,TMM_DECODEERFDAG,threading
1,4,6,172008,0,0,0,0,0,0,47889,0,0,48582,1323,0,0,0,0,1359,0,1134,0,0,0,0,0,8028,0,0,0,49356,0,0,0,0,0,0,0,14337
First line of the file contains labels.
2 example gnuplot scripts added to plot the data.
14 years ago
|
|
|
|
|
|
|
PACKET_PROFILING_END(p);
|
|
|
|
SCMutexUnlock(m);
|
|
|
|
SCReturn;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
SCLogDebug("NOT IS_TUNNEL_ROOT_PKT, so tunnel pkt");
|
|
|
|
|
|
|
|
/* the p->root != NULL here seems unnecessary: IS_TUNNEL_PKT checks
|
|
|
|
* that p->tunnel_pkt == 1, IS_TUNNEL_ROOT_PKT checks that +
|
|
|
|
* p->root == NULL. So when we are here p->root can only be
|
|
|
|
* non-NULL, right? CLANG thinks differently. May be a FP, but
|
|
|
|
* better safe than sorry. VJ */
|
|
|
|
if (p->root != NULL && IS_TUNNEL_PKT_VERDICTED(p->root) &&
|
|
|
|
TUNNEL_PKT_TPR(p) == 1)
|
|
|
|
{
|
|
|
|
SCLogDebug("p->root->tunnel_verdicted == 1 && TUNNEL_PKT_TPR(p) == 1");
|
|
|
|
/* the root is ready and we are the last tunnel packet,
|
|
|
|
* lets enqueue them both. */
|
|
|
|
TUNNEL_DECR_PKT_TPR_NOLOCK(p);
|
|
|
|
|
|
|
|
/* handle the root */
|
|
|
|
SCLogDebug("setting proot = 1 for root pkt, p->root %p "
|
|
|
|
"(tunnel packet %p)", p->root, p);
|
|
|
|
proot = 1;
|
|
|
|
|
|
|
|
/* fall through */
|
|
|
|
} else {
|
|
|
|
/* root not ready yet, so get rid of the tunnel pkt only */
|
|
|
|
|
|
|
|
SCLogDebug("NOT p->root->tunnel_verdicted == 1 && "
|
|
|
|
"TUNNEL_PKT_TPR(p) == 1 (%" PRIu32 ")", TUNNEL_PKT_TPR(p));
|
|
|
|
|
|
|
|
TUNNEL_DECR_PKT_TPR_NOLOCK(p);
|
|
|
|
|
|
|
|
/* fall through */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SCMutexUnlock(m);
|
|
|
|
|
|
|
|
SCLogDebug("tunnel stuff done, move on (proot %d)", proot);
|
|
|
|
}
|
|
|
|
|
|
|
|
FlowDeReference(&p->flow);
|
|
|
|
|
|
|
|
/* we're done with the tunnel root now as well */
|
|
|
|
if (proot == 1) {
|
|
|
|
SCLogDebug("getting rid of root pkt... alloc'd %s", p->root->flags & PKT_ALLOC ? "true" : "false");
|
|
|
|
|
|
|
|
FlowDeReference(&p->root->flow);
|
|
|
|
|
|
|
|
p->root->ReleasePacket(p->root);
|
|
|
|
p->root = NULL;
|
|
|
|
}
|
|
|
|
|
Add per packet profiling.
Per packet profiling uses tick based accounting. It has 2 outputs, a summary
and a csv file that contains per packet stats.
Stats per packet include:
1) total ticks spent
2) ticks spent per individual thread module
3) "threading overhead" which is simply calculated by subtracting (2) of (1).
A number of changes were made to integrate the new code in a clean way:
a number of generic enums are now placed in tm-threads-common.h so we can
include them from any part of the engine.
Code depends on --enable-profiling just like the rule profiling code.
New yaml parameters:
profiling:
# packet profiling
packets:
# Profiling can be disabled here, but it will still have a
# performance impact if compiled in.
enabled: yes
filename: packet_stats.log
append: yes
# per packet csv output
csv:
# Output can be disabled here, but it will still have a
# performance impact if compiled in.
enabled: no
filename: packet_stats.csv
Example output of summary stats:
IP ver Proto cnt min max avg
------ ----- ------ ------ ---------- -------
IPv4 6 19436 11448 5404365 32993
IPv4 256 4 11511 49968 30575
Per Thread module stats:
Thread Module IP ver Proto cnt min max avg
------------------------ ------ ----- ------ ------ ---------- -------
TMM_DECODEPCAPFILE IPv4 6 19434 1242 47889 1770
TMM_DETECT IPv4 6 19436 1107 137241 1504
TMM_ALERTFASTLOG IPv4 6 19436 90 1323 155
TMM_ALERTUNIFIED2ALERT IPv4 6 19436 108 1359 138
TMM_ALERTDEBUGLOG IPv4 6 19436 90 1134 154
TMM_LOGHTTPLOG IPv4 6 19436 414 5392089 7944
TMM_STREAMTCP IPv4 6 19434 828 1299159 19438
The proto 256 is a counter for handling of pseudo/tunnel packets.
Example output of csv:
pcap_cnt,ipver,ipproto,total,TMM_DECODENFQ,TMM_VERDICTNFQ,TMM_RECEIVENFQ,TMM_RECEIVEPCAP,TMM_RECEIVEPCAPFILE,TMM_DECODEPCAP,TMM_DECODEPCAPFILE,TMM_RECEIVEPFRING,TMM_DECODEPFRING,TMM_DETECT,TMM_ALERTFASTLOG,TMM_ALERTFASTLOG4,TMM_ALERTFASTLOG6,TMM_ALERTUNIFIEDLOG,TMM_ALERTUNIFIEDALERT,TMM_ALERTUNIFIED2ALERT,TMM_ALERTPRELUDE,TMM_ALERTDEBUGLOG,TMM_ALERTSYSLOG,TMM_LOGDROPLOG,TMM_ALERTSYSLOG4,TMM_ALERTSYSLOG6,TMM_RESPONDREJECT,TMM_LOGHTTPLOG,TMM_LOGHTTPLOG4,TMM_LOGHTTPLOG6,TMM_PCAPLOG,TMM_STREAMTCP,TMM_DECODEIPFW,TMM_VERDICTIPFW,TMM_RECEIVEIPFW,TMM_RECEIVEERFFILE,TMM_DECODEERFFILE,TMM_RECEIVEERFDAG,TMM_DECODEERFDAG,threading
1,4,6,172008,0,0,0,0,0,0,47889,0,0,48582,1323,0,0,0,0,1359,0,1134,0,0,0,0,0,8028,0,0,0,49356,0,0,0,0,0,0,0,14337
First line of the file contains labels.
2 example gnuplot scripts added to plot the data.
14 years ago
|
|
|
PACKET_PROFILING_END(p);
|
|
|
|
|
|
|
|
p->ReleasePacket(p);
|
|
|
|
|
|
|
|
SCReturn;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* \brief Release all the packets in the queue back to the packetpool. Mainly
|
|
|
|
* used by threads that have failed, and wants to return the packets back
|
|
|
|
* to the packetpool.
|
|
|
|
*
|
|
|
|
* \param pq Pointer to the packetqueue from which the packets have to be
|
|
|
|
* returned back to the packetpool
|
|
|
|
*
|
|
|
|
* \warning this function assumes that the pq does not use locking
|
|
|
|
*/
|
|
|
|
void TmqhReleasePacketsToPacketPool(PacketQueue *pq)
|
|
|
|
{
|
|
|
|
Packet *p = NULL;
|
|
|
|
|
|
|
|
if (pq == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
while ( (p = PacketDequeue(pq)) != NULL)
|
|
|
|
TmqhOutputPacketpool(NULL, p);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|