flow: declare and use constansts where possible

pull/11353/head
Shivani Bhardwaj 1 year ago committed by Victor Julien
parent 00a644c5c2
commit 903283d76e

@ -573,6 +573,7 @@ static uint32_t FlowManagerHashRowCleanup(Flow *f, FlowQueuePrivate *recycle_q,
return cnt; return cnt;
} }
#define RECYCLE_MAX_QUEUE_ITEMS 25
/** /**
* \brief remove all flows from the hash * \brief remove all flows from the hash
* *
@ -598,12 +599,12 @@ static uint32_t FlowCleanupHash(void)
} }
FBLOCK_UNLOCK(fb); FBLOCK_UNLOCK(fb);
if (local_queue.len >= 25) { if (local_queue.len >= RECYCLE_MAX_QUEUE_ITEMS) {
FlowQueueAppendPrivate(&flow_recycle_q, &local_queue); FlowQueueAppendPrivate(&flow_recycle_q, &local_queue);
FlowWakeupFlowRecyclerThread(); FlowWakeupFlowRecyclerThread();
} }
} }
DEBUG_VALIDATE_BUG_ON(local_queue.len >= 25); DEBUG_VALIDATE_BUG_ON(local_queue.len >= RECYCLE_MAX_QUEUE_ITEMS);
FlowQueueAppendPrivate(&flow_recycle_q, &local_queue); FlowQueueAppendPrivate(&flow_recycle_q, &local_queue);
FlowWakeupFlowRecyclerThread(); FlowWakeupFlowRecyclerThread();
@ -1066,8 +1067,6 @@ static void Recycler(ThreadVars *tv, FlowRecyclerThreadData *ftd, Flow *f)
FLOWLOCK_UNLOCK(f); FLOWLOCK_UNLOCK(f);
} }
extern uint32_t flow_spare_pool_block_size;
/** \brief Thread that manages timed out flows. /** \brief Thread that manages timed out flows.
* *
* \param td ThreadVars cast to void ptr * \param td ThreadVars cast to void ptr
@ -1108,7 +1107,7 @@ static TmEcode FlowRecycler(ThreadVars *th_v, void *thread_data)
/* for every full sized block, add it to the spare pool */ /* for every full sized block, add it to the spare pool */
FlowQueuePrivateAppendFlow(&ret_queue, f); FlowQueuePrivateAppendFlow(&ret_queue, f);
if (ret_queue.len == flow_spare_pool_block_size) { if (ret_queue.len == FLOW_SPARE_POOL_BLOCK_SIZE) {
FlowSparePoolReturnFlows(&ret_queue); FlowSparePoolReturnFlows(&ret_queue);
} }
} }

@ -40,7 +40,6 @@ typedef struct FlowSparePool {
} FlowSparePool; } FlowSparePool;
static uint32_t flow_spare_pool_flow_cnt = 0; static uint32_t flow_spare_pool_flow_cnt = 0;
uint32_t flow_spare_pool_block_size = 100;
static FlowSparePool *flow_spare_pool = NULL; static FlowSparePool *flow_spare_pool = NULL;
static SCMutex flow_spare_pool_m = SCMUTEX_INITIALIZER; static SCMutex flow_spare_pool_m = SCMUTEX_INITIALIZER;
@ -65,8 +64,7 @@ static bool FlowSparePoolUpdateBlock(FlowSparePool *p)
{ {
DEBUG_VALIDATE_BUG_ON(p == NULL); DEBUG_VALIDATE_BUG_ON(p == NULL);
for (uint32_t i = p->queue.len; i < flow_spare_pool_block_size; i++) for (uint32_t i = p->queue.len; i < FLOW_SPARE_POOL_BLOCK_SIZE; i++) {
{
Flow *f = FlowAlloc(); Flow *f = FlowAlloc();
if (f == NULL) if (f == NULL)
return false; return false;
@ -84,8 +82,8 @@ static void Validate(FlowSparePool *top, const uint32_t target)
} }
assert(top->queue.len >= 1); assert(top->queue.len >= 1);
//if (top->next != NULL) // if (top->next != NULL)
// assert(top->next->queue.len == flow_spare_pool_block_size); // assert(top->next->queue.len == FLOW_SPARE_POOL_BLOCK_SIZE);
uint32_t cnt = 0; uint32_t cnt = 0;
for (FlowSparePool *p = top; p != NULL; p = p->next) for (FlowSparePool *p = top; p != NULL; p = p->next)
@ -106,7 +104,7 @@ void FlowSparePoolReturnFlow(Flow *f)
DEBUG_VALIDATE_BUG_ON(flow_spare_pool == NULL); DEBUG_VALIDATE_BUG_ON(flow_spare_pool == NULL);
/* if the top is full, get a new block */ /* if the top is full, get a new block */
if (flow_spare_pool->queue.len >= flow_spare_pool_block_size) { if (flow_spare_pool->queue.len >= FLOW_SPARE_POOL_BLOCK_SIZE) {
FlowSparePool *p = FlowSpareGetPool(); FlowSparePool *p = FlowSpareGetPool();
DEBUG_VALIDATE_BUG_ON(p == NULL); DEBUG_VALIDATE_BUG_ON(p == NULL);
p->next = flow_spare_pool; p->next = flow_spare_pool;
@ -128,10 +126,10 @@ void FlowSparePoolReturnFlows(FlowQueuePrivate *fqp)
SCMutexLock(&flow_spare_pool_m); SCMutexLock(&flow_spare_pool_m);
flow_spare_pool_flow_cnt += fqp->len; flow_spare_pool_flow_cnt += fqp->len;
if (flow_spare_pool != NULL) { if (flow_spare_pool != NULL) {
if (p->queue.len == flow_spare_pool_block_size) { if (p->queue.len == FLOW_SPARE_POOL_BLOCK_SIZE) {
/* full block insert */ /* full block insert */
if (flow_spare_pool->queue.len < flow_spare_pool_block_size) { if (flow_spare_pool->queue.len < FLOW_SPARE_POOL_BLOCK_SIZE) {
p->next = flow_spare_pool->next; p->next = flow_spare_pool->next;
flow_spare_pool->next = p; flow_spare_pool->next = p;
p = NULL; p = NULL;
@ -143,7 +141,7 @@ void FlowSparePoolReturnFlows(FlowQueuePrivate *fqp)
} else { } else {
/* incomplete block insert */ /* incomplete block insert */
if (p->queue.len + flow_spare_pool->queue.len <= flow_spare_pool_block_size) { if (p->queue.len + flow_spare_pool->queue.len <= FLOW_SPARE_POOL_BLOCK_SIZE) {
FlowQueuePrivateAppendPrivate(&flow_spare_pool->queue, &p->queue); FlowQueuePrivateAppendPrivate(&flow_spare_pool->queue, &p->queue);
/* free 'p' outside of lock below */ /* free 'p' outside of lock below */
} else { } else {
@ -182,7 +180,7 @@ FlowQueuePrivate FlowSpareGetFromPool(void)
} }
/* top if full or its the only block we have */ /* top if full or its the only block we have */
if (flow_spare_pool->queue.len >= flow_spare_pool_block_size || flow_spare_pool->next == NULL) { if (flow_spare_pool->queue.len >= FLOW_SPARE_POOL_BLOCK_SIZE || flow_spare_pool->next == NULL) {
FlowSparePool *p = flow_spare_pool; FlowSparePool *p = flow_spare_pool;
flow_spare_pool = p->next; flow_spare_pool = p->next;
DEBUG_VALIDATE_BUG_ON(flow_spare_pool_flow_cnt < p->queue.len); DEBUG_VALIDATE_BUG_ON(flow_spare_pool_flow_cnt < p->queue.len);
@ -222,7 +220,7 @@ void FlowSparePoolUpdate(uint32_t size)
if (todo < 0) { if (todo < 0) {
uint32_t to_remove = (uint32_t)(todo * -1) / 10; uint32_t to_remove = (uint32_t)(todo * -1) / 10;
while (to_remove) { while (to_remove) {
if (to_remove < flow_spare_pool_block_size) if (to_remove < FLOW_SPARE_POOL_BLOCK_SIZE)
return; return;
FlowSparePool *p = NULL; FlowSparePool *p = NULL;
@ -246,7 +244,7 @@ void FlowSparePoolUpdate(uint32_t size)
} else if (todo > 0) { } else if (todo > 0) {
FlowSparePool *head = NULL, *tail = NULL; FlowSparePool *head = NULL, *tail = NULL;
uint32_t blocks = ((uint32_t)todo / flow_spare_pool_block_size) + 1; uint32_t blocks = ((uint32_t)todo / FLOW_SPARE_POOL_BLOCK_SIZE) + 1;
uint32_t flow_cnt = 0; uint32_t flow_cnt = 0;
for (uint32_t cnt = 0; cnt < blocks; cnt++) { for (uint32_t cnt = 0; cnt < blocks; cnt++) {

@ -27,6 +27,8 @@
#include "suricata-common.h" #include "suricata-common.h"
#include "flow.h" #include "flow.h"
#define FLOW_SPARE_POOL_BLOCK_SIZE 100
void FlowSparePoolInit(void); void FlowSparePoolInit(void);
void FlowSparePoolDestroy(void); void FlowSparePoolDestroy(void);
void FlowSparePoolUpdate(uint32_t size); void FlowSparePoolUpdate(uint32_t size);

@ -151,8 +151,6 @@ static int FlowFinish(ThreadVars *tv, Flow *f, FlowWorkerThreadData *fw, void *d
return cnt; return cnt;
} }
extern uint32_t flow_spare_pool_block_size;
/** \param[in] max_work Max flows to process. 0 if unlimited. */ /** \param[in] max_work Max flows to process. 0 if unlimited. */
static void CheckWorkQueue(ThreadVars *tv, FlowWorkerThreadData *fw, FlowTimeoutCounters *counters, static void CheckWorkQueue(ThreadVars *tv, FlowWorkerThreadData *fw, FlowTimeoutCounters *counters,
FlowQueuePrivate *fq, const uint32_t max_work) FlowQueuePrivate *fq, const uint32_t max_work)
@ -190,9 +188,9 @@ static void CheckWorkQueue(ThreadVars *tv, FlowWorkerThreadData *fw, FlowTimeout
FlowClearMemory (f, f->protomap); FlowClearMemory (f, f->protomap);
FLOWLOCK_UNLOCK(f); FLOWLOCK_UNLOCK(f);
if (fw->fls.spare_queue.len >= (flow_spare_pool_block_size * 2)) { if (fw->fls.spare_queue.len >= (FLOW_SPARE_POOL_BLOCK_SIZE * 2)) {
FlowQueuePrivatePrependFlow(&ret_queue, f); FlowQueuePrivatePrependFlow(&ret_queue, f);
if (ret_queue.len == flow_spare_pool_block_size) { if (ret_queue.len == FLOW_SPARE_POOL_BLOCK_SIZE) {
FlowSparePoolReturnFlows(&ret_queue); FlowSparePoolReturnFlows(&ret_queue);
} }
} else { } else {

Loading…
Cancel
Save