@ -18,14 +18,13 @@
/**
* \ file
*
- * \ author nPulse Technologies , LLC .
- * \ author Matt Keeler < mk @ npulsetech . com >
- * \ author nPulse Technologies , LLC .
- * \ author Matt Keeler < mk @ npulsetech . com >
* *
* Support for NAPATECH adapter with the 3 GD Driver / API .
* Requires libntapi from Napatech A / S .
*
*/
# include "suricata-common.h"
# include "suricata.h"
# include "threadvars.h"
@ -40,7 +39,7 @@
# ifndef HAVE_NAPATECH
TmEcode NoNapatechSupportExit ( ThreadVars * , const void * , void * * ) ;
TmEcode NoNapatechSupportExit ( ThreadVars * , const void * , void * * ) ;
void TmModuleNapatechStreamRegister ( void )
{
@ -69,20 +68,21 @@ TmEcode NoNapatechSupportExit(ThreadVars *tv, const void *initdata, void **data)
{
SCLogError ( SC_ERR_NAPATECH_NOSUPPORT ,
" Error creating thread %s: you do not have support for Napatech adapter "
" enabled please recompile with --enable-napatech " , tv - > name ) ;
" enabled please recompile with --enable-napatech " ,
tv - > name ) ;
exit ( EXIT_FAILURE ) ;
}
# else /* Implied we do have NAPATECH support */
# include <numa.h>
# include <nt.h>
# define MAX_STREAMS 256
extern int max_pending_packets ;
typedef struct NapatechThreadVars_ {
typedef struct NapatechThreadVars_
{
ThreadVars * tv ;
NtNetStreamRx_t rx_stream ;
uint16_t stream_id ;
@ -90,10 +90,13 @@ typedef struct NapatechThreadVars_ {
TmSlot * slot ;
} NapatechThreadVars ;
# ifdef NAPATECH_ENABLE_BYPASS
static int NapatechBypassCallback ( Packet * p ) ;
# endif
TmEcode NapatechStreamThreadInit ( ThreadVars * , const void * , void * * ) ;
void NapatechStreamThreadExitStats ( ThreadVars * , void * ) ;
TmEcode NapatechPacketLoop ZC ( ThreadVars * tv , void * data , void * slot ) ;
TmEcode NapatechPacketLoop ( ThreadVars * tv , void * data , void * slot ) ;
TmEcode NapatechDecodeThreadInit ( ThreadVars * , const void * , void * * ) ;
TmEcode NapatechDecodeThreadDeinit ( ThreadVars * tv , void * data ) ;
@ -115,6 +118,12 @@ SC_ATOMIC_DECLARE(uint16_t, numa1_count);
SC_ATOMIC_DECLARE ( uint16_t , numa2_count ) ;
SC_ATOMIC_DECLARE ( uint16_t , numa3_count ) ;
SC_ATOMIC_DECLARE ( uint64_t , flow_callback_cnt ) ;
SC_ATOMIC_DECLARE ( uint64_t , flow_callback_handled_pkts ) ;
SC_ATOMIC_DECLARE ( uint64_t , flow_callback_udp_pkts ) ;
SC_ATOMIC_DECLARE ( uint64_t , flow_callback_tcp_pkts ) ;
SC_ATOMIC_DECLARE ( uint64_t , flow_callback_unhandled_pkts ) ;
/**
* \ brief Register the Napatech receiver ( reader ) module .
*/
@ -123,7 +132,7 @@ void TmModuleNapatechStreamRegister(void)
tmm_modules [ TMM_RECEIVENAPATECH ] . name = " NapatechStream " ;
tmm_modules [ TMM_RECEIVENAPATECH ] . ThreadInit = NapatechStreamThreadInit ;
tmm_modules [ TMM_RECEIVENAPATECH ] . Func = NULL ;
tmm_modules [ TMM_RECEIVENAPATECH ] . PktAcqLoop = NapatechPacketLoop ZC ;
tmm_modules [ TMM_RECEIVENAPATECH ] . PktAcqLoop = NapatechPacketLoop ;
tmm_modules [ TMM_RECEIVENAPATECH ] . PktAcqBreakLoop = NULL ;
tmm_modules [ TMM_RECEIVENAPATECH ] . ThreadExitPrintStats = NapatechStreamThreadExitStats ;
tmm_modules [ TMM_RECEIVENAPATECH ] . ThreadDeinit = NapatechStreamThreadDeinit ;
@ -140,6 +149,12 @@ void TmModuleNapatechStreamRegister(void)
SC_ATOMIC_INIT ( numa1_count ) ;
SC_ATOMIC_INIT ( numa2_count ) ;
SC_ATOMIC_INIT ( numa3_count ) ;
SC_ATOMIC_INIT ( flow_callback_cnt ) ;
SC_ATOMIC_INIT ( flow_callback_handled_pkts ) ;
SC_ATOMIC_INIT ( flow_callback_udp_pkts ) ;
SC_ATOMIC_INIT ( flow_callback_tcp_pkts ) ;
SC_ATOMIC_INIT ( flow_callback_unhandled_pkts ) ;
}
/**
@ -157,6 +172,437 @@ void TmModuleNapatechDecodeRegister(void)
tmm_modules [ TMM_DECODENAPATECH ] . flags = TM_FLAG_DECODE_TM ;
}
# ifdef NAPATECH_ENABLE_BYPASS
/**
* \ brief template of IPv4 header
*/
struct ipv4_hdr
{
uint8_t version_ihl ; /**< version and header length */
uint8_t type_of_service ; /**< type of service */
uint16_t total_length ; /**< length of packet */
uint16_t packet_id ; /**< packet ID */
uint16_t fragment_offset ; /**< fragmentation offset */
uint8_t time_to_live ; /**< time to live */
uint8_t next_proto_id ; /**< protocol ID */
uint16_t hdr_checksum ; /**< header checksum */
uint32_t src_addr ; /**< source address */
uint32_t dst_addr ; /**< destination address */
} __attribute__ ( ( __packed__ ) ) ;
/**
* \ brief template of IPv6 header
*/
struct ipv6_hdr
{
uint32_t vtc_flow ; /**< IP version, traffic class & flow label. */
uint16_t payload_len ; /**< IP packet length - includes sizeof(ip_header). */
uint8_t proto ; /**< Protocol, next header. */
uint8_t hop_limits ; /**< Hop limits. */
uint8_t src_addr [ 16 ] ; /**< IP address of source host. */
uint8_t dst_addr [ 16 ] ; /**< IP address of destination host(s). */
} __attribute__ ( ( __packed__ ) ) ;
/**
* \ brief template of UDP header
*/
struct udp_hdr
{
uint16_t src_port ; /**< UDP source port. */
uint16_t dst_port ; /**< UDP destination port. */
uint16_t dgram_len ; /**< UDP datagram length */
uint16_t dgram_cksum ; /**< UDP datagram checksum */
} __attribute__ ( ( __packed__ ) ) ;
/**
* \ brief template of TCP header
*/
struct tcp_hdr
{
uint16_t src_port ; /**< TCP source port. */
uint16_t dst_port ; /**< TCP destination port. */
uint32_t sent_seq ; /**< TX data sequence number. */
uint32_t recv_ack ; /**< RX data acknowledgement sequence number. */
uint8_t data_off ; /**< Data offset. */
uint8_t tcp_flags ; /**< TCP flags */
uint16_t rx_win ; /**< RX flow control window. */
uint16_t cksum ; /**< TCP checksum. */
uint16_t tcp_urp ; /**< TCP urgent pointer, if any. */
} __attribute__ ( ( __packed__ ) ) ;
/* The hardware will assign a "color" value indicating what filters are matched
* by a given packet . These constants indicate what bits are set in the color
* field for different protocols
*
*/
# define RTE_PTYPE_L2_ETHER 0x10000000
# define RTE_PTYPE_L3_IPV4 0x01000000
# define RTE_PTYPE_L3_IPV6 0x04000000
# define RTE_PTYPE_L4_TCP 0x00100000
# define RTE_PTYPE_L4_UDP 0x00200000
/* These masks are used to extract layer 3 and layer 4 protocol
* values from the color field in the packet descriptor .
*/
# define RTE_PTYPE_L3_MASK 0x0f000000
# define RTE_PTYPE_L4_MASK 0x00f00000
# define COLOR_IS_SPAN 0x00001000
static int inline_port_map [ MAX_PORTS ] = { - 1 } ;
/**
* \ brief Binds two ports together for inline operation .
*
* Get the ID of an adapter on which a given port resides .
*
* \ param port one of the ports in a pairing .
* \ param peer the other port in a pairing .
* \ return ID of the adapter .
*
*/
int NapatechSetPortmap ( int port , int peer )
{
if ( ( inline_port_map [ port ] = = - 1 ) & & ( inline_port_map [ peer ] = = - 1 ) ) {
inline_port_map [ port ] = peer ;
inline_port_map [ peer ] = port ;
} else {
SCLogError ( SC_ERR_NAPATECH_PARSE_CONFIG ,
" Port pairing is already configured. " ) ;
return 0 ;
}
return 1 ;
}
/**
* \ brief Returns the ID of the adapter
*
* Get the ID of an adapter on which a given port resides .
*
* \ param port for which adapter ID is requested .
* \ return ID of the adapter .
*
*/
int NapatechGetAdapter ( uint8_t port )
{
static int port_adapter_map [ MAX_PORTS ] = { - 1 } ;
int status ;
NtInfo_t h_info ; /* Info handle */
NtInfoStream_t h_info_stream ; /* Info stream handle */
if ( unlikely ( port_adapter_map [ port ] = = - 1 ) ) {
if ( ( status = NT_InfoOpen ( & h_info_stream , " ExampleInfo " ) ) ! = NT_SUCCESS ) {
NAPATECH_ERROR ( SC_ERR_NAPATECH_OPEN_FAILED , status ) ;
return - 1 ;
}
/* Read the system info */
h_info . cmd = NT_INFO_CMD_READ_PORT_V9 ;
h_info . u . port_v9 . portNo = ( uint8_t ) port ;
if ( ( status = NT_InfoRead ( h_info_stream , & h_info ) ) ! = NT_SUCCESS ) {
/* Get the status code as text */
NAPATECH_ERROR ( SC_ERR_NAPATECH_OPEN_FAILED , status ) ;
NT_InfoClose ( h_info_stream ) ;
return - 1 ;
}
port_adapter_map [ port ] = h_info . u . port_v9 . data . adapterNo ;
}
return port_adapter_map [ port ] ;
}
/**
* \ brief IPv4 4 - tuple convenience structure
*/
struct IPv4Tuple4
{
uint32_t sa ; /*!< Source address */
uint32_t da ; /*!< Destination address */
uint16_t sp ; /*!< Source port */
uint16_t dp ; /*!< Destination port */
} ;
/**
* \ brief IPv6 4 - tuple convenience structure
*/
struct IPv6Tuple4
{
uint8_t sa [ 16 ] ; /*!< Source address */
uint8_t da [ 16 ] ; /*!< Destination address */
uint16_t sp ; /*!< Source port */
uint16_t dp ; /*!< Destination port */
} ;
/**
* \ brief Compares the byte order value of two IPv6 addresses .
*
*
* \ param addr_a The first address to compare
* \ param addr_b The second adress to compare
*
* \ return - 1 if addr_a < addr_b
* 1 if addr_a > addr_b
* 0 if addr_a = = addr_b
*/
static int CompareIPv6Addr ( uint8_t addr_a [ 16 ] , uint8_t addr_b [ 16 ] ) {
uint16_t pos ;
for ( pos = 0 ; pos < 16 ; + + pos ) {
if ( addr_a [ pos ] < addr_b [ pos ] ) {
return - 1 ;
} else if ( addr_a [ pos ] > addr_b [ pos ] ) {
return 1 ;
} /* else they are equal - check next position*/
}
/* if we get here the addresses are equal */
return 0 ;
}
/**
* \ brief Callback function to process Bypass events on Napatech Adapter .
*
* Callback function that sets up the Flow tables on the Napatech card
* so that subsequent packets from this flow are bypassed on the hardware .
*
* \ param p packet containing information about the flow to be bypassed
* \ param is_inline indicates if Suricata is being run in inline mode .
*
* \ return Error code indicating success ( 1 ) or failure ( 0 ) .
*
*/
static int ProgramFlow ( Packet * p , int is_inline )
{
int status ;
NtFlow_t flow_match ;
memset ( & flow_match , 0 , sizeof ( flow_match ) ) ;
NapatechPacketVars * ntpv = & ( p - > ntpv ) ;
int adapter = NapatechGetAdapter ( ntpv - > dyn3 - > rxPort ) ;
NtFlowStream_t * phFlowStream = NapatechGetFlowStreamPtr ( adapter ) ;
/*
* The hardware decoder will " color " the packets according to the protocols
* in the packet and the port the packet arrived on . packet_type gets
* these bits and we mask out layer3 , layer4 , and is_span to determine
* the protocols and if the packet is coming in from a SPAN port .
*/
uint32_t packet_type = ( ( ntpv - > dyn3 - > color_hi < < 14 ) & 0xFFFFC000 ) | ntpv - > dyn3 - > color_lo ;
uint8_t * packet = ( uint8_t * ) ntpv - > dyn3 + ntpv - > dyn3 - > descrLength ;
uint32_t layer3 = packet_type & RTE_PTYPE_L3_MASK ;
uint32_t layer4 = packet_type & RTE_PTYPE_L4_MASK ;
uint32_t is_span = packet_type & COLOR_IS_SPAN ;
/*
* When we ' re programming the flows to arrive on a span port ,
* where upstream and downstream packets arrive on the same port ,
* the hardware is configured to swap the source and dest
* fields if the src addr > dest addr . We need to program the
* flow tables to match . We ' ll compare addresses and set
* do_swap accordingly .
*/
uint32_t do_swap = 0 ;
SC_ATOMIC_ADD ( flow_callback_cnt , 1 ) ;
/* Only bypass TCP and UDP */
if ( PKT_IS_TCP ( p ) ) {
SC_ATOMIC_ADD ( flow_callback_tcp_pkts , 1 ) ;
} else if PKT_IS_UDP ( p ) {
SC_ATOMIC_ADD ( flow_callback_udp_pkts , 1 ) ;
} else {
SC_ATOMIC_ADD ( flow_callback_unhandled_pkts , 1 ) ;
}
struct IPv4Tuple4 v4Tuple ;
struct IPv6Tuple4 v6Tuple ;
struct ipv4_hdr * pIPv4_hdr = NULL ;
struct ipv6_hdr * pIPv6_hdr = NULL ;
switch ( layer3 ) {
case RTE_PTYPE_L3_IPV4 :
{
pIPv4_hdr = ( struct ipv4_hdr * ) ( packet + ntpv - > dyn3 - > offset0 ) ;
if ( ! is_span ) {
v4Tuple . sa = pIPv4_hdr - > src_addr ;
v4Tuple . da = pIPv4_hdr - > dst_addr ;
} else {
do_swap = ( pIPv4_hdr - > src_addr > pIPv4_hdr - > dst_addr ) ;
if ( ! do_swap ) {
/* already in order */
v4Tuple . sa = pIPv4_hdr - > src_addr ;
v4Tuple . da = pIPv4_hdr - > dst_addr ;
} else { /* swap */
v4Tuple . sa = pIPv4_hdr - > dst_addr ;
v4Tuple . da = pIPv4_hdr - > src_addr ;
}
}
break ;
}
case RTE_PTYPE_L3_IPV6 :
{
pIPv6_hdr = ( struct ipv6_hdr * ) ( packet + ntpv - > dyn3 - > offset0 ) ;
do_swap = ( CompareIPv6Addr ( pIPv6_hdr - > src_addr , pIPv6_hdr - > dst_addr ) > 0 ) ;
if ( ! is_span ) {
memcpy ( & ( v6Tuple . sa ) , pIPv6_hdr - > src_addr , 16 ) ;
memcpy ( & ( v6Tuple . da ) , pIPv6_hdr - > dst_addr , 16 ) ;
} else {
/* sort src/dest address before programming */
if ( ! do_swap ) {
/* already in order */
memcpy ( & ( v6Tuple . sa ) , pIPv6_hdr - > src_addr , 16 ) ;
memcpy ( & ( v6Tuple . da ) , pIPv6_hdr - > dst_addr , 16 ) ;
} else { /* swap the addresses */
memcpy ( & ( v6Tuple . sa ) , pIPv6_hdr - > dst_addr , 16 ) ;
memcpy ( & ( v6Tuple . da ) , pIPv6_hdr - > src_addr , 16 ) ;
}
}
break ;
}
default :
{
return 0 ;
}
}
switch ( layer4 ) {
case RTE_PTYPE_L4_TCP :
{
struct tcp_hdr * tcp_hdr = ( struct tcp_hdr * ) ( packet + ntpv - > dyn3 - > offset1 ) ;
if ( layer3 = = RTE_PTYPE_L3_IPV4 ) {
if ( ! is_span ) {
v4Tuple . dp = tcp_hdr - > dst_port ;
v4Tuple . sp = tcp_hdr - > src_port ;
flow_match . keyId = NAPATECH_KEYTYPE_IPV4 ;
} else {
if ( ! do_swap ) {
v4Tuple . sp = tcp_hdr - > src_port ;
v4Tuple . dp = tcp_hdr - > dst_port ;
} else {
v4Tuple . sp = tcp_hdr - > dst_port ;
v4Tuple . dp = tcp_hdr - > src_port ;
}
flow_match . keyId = NAPATECH_KEYTYPE_IPV4_SPAN ;
}
memcpy ( & ( flow_match . keyData ) , & v4Tuple , sizeof ( v4Tuple ) ) ;
} else {
if ( ! is_span ) {
v6Tuple . dp = tcp_hdr - > dst_port ;
v6Tuple . sp = tcp_hdr - > src_port ;
flow_match . keyId = NAPATECH_KEYTYPE_IPV6 ;
} else {
if ( ! do_swap ) {
v6Tuple . sp = tcp_hdr - > src_port ;
v6Tuple . dp = tcp_hdr - > dst_port ;
} else {
v6Tuple . dp = tcp_hdr - > src_port ;
v6Tuple . sp = tcp_hdr - > dst_port ;
}
flow_match . keyId = NAPATECH_KEYTYPE_IPV6_SPAN ;
}
memcpy ( & ( flow_match . keyData ) , & v6Tuple , sizeof ( v6Tuple ) ) ;
}
flow_match . ipProtocolField = 6 ;
break ;
}
case RTE_PTYPE_L4_UDP :
{
struct udp_hdr * udp_hdr = ( struct udp_hdr * ) ( packet + ntpv - > dyn3 - > offset1 ) ;
if ( layer3 = = RTE_PTYPE_L3_IPV4 ) {
if ( ! is_span ) {
v4Tuple . dp = udp_hdr - > dst_port ;
v4Tuple . sp = udp_hdr - > src_port ;
flow_match . keyId = NAPATECH_KEYTYPE_IPV4 ;
} else {
if ( ! do_swap ) {
v4Tuple . sp = udp_hdr - > src_port ;
v4Tuple . dp = udp_hdr - > dst_port ;
} else {
v4Tuple . dp = udp_hdr - > src_port ;
v4Tuple . sp = udp_hdr - > dst_port ;
}
flow_match . keyId = NAPATECH_KEYTYPE_IPV4_SPAN ;
}
memcpy ( & ( flow_match . keyData ) , & v4Tuple , sizeof ( v4Tuple ) ) ;
} else { /* layer3 is IPV6 */
if ( ! is_span ) {
v6Tuple . dp = udp_hdr - > dst_port ;
v6Tuple . sp = udp_hdr - > src_port ;
flow_match . keyId = NAPATECH_KEYTYPE_IPV6 ;
} else {
if ( ! do_swap ) {
v6Tuple . sp = udp_hdr - > src_port ;
v6Tuple . dp = udp_hdr - > dst_port ;
} else {
v6Tuple . dp = udp_hdr - > src_port ;
v6Tuple . sp = udp_hdr - > dst_port ;
}
flow_match . keyId = NAPATECH_KEYTYPE_IPV6_SPAN ;
}
memcpy ( & ( flow_match . keyData ) , & v6Tuple , sizeof ( v6Tuple ) ) ;
}
flow_match . ipProtocolField = 17 ;
break ;
}
default :
{
return 0 ;
}
}
flow_match . op = 1 ; /* program flow */
flow_match . gfi = 1 ; /* Generate FlowInfo records */
flow_match . tau = 1 ; /* tcp automatic unlearn */
if ( PACKET_TEST_ACTION ( p , ACTION_DROP ) ) {
flow_match . keySetId = NAPATECH_FLOWTYPE_DROP ;
} else {
if ( is_inline ) {
flow_match . keySetId = NAPATECH_FLOWTYPE_PASS ;
} else {
flow_match . keySetId = NAPATECH_FLOWTYPE_DROP ;
}
}
status = NT_FlowWrite ( * phFlowStream , & flow_match , - 1 ) ;
if ( status = = NT_STATUS_TIMEOUT ) {
SCLogInfo ( " NT_FlowWrite returned NT_STATUS_TIMEOUT " ) ;
} else if ( status ! = NT_SUCCESS ) {
SCLogError ( SC_ERR_NAPATECH_OPEN_FAILED , " NT_FlowWrite failed!. " ) ;
exit ( EXIT_FAILURE ) ;
}
return 1 ;
}
/**
* \ brief Callback from Suricata when a flow that should be bypassed
* is identified .
*/
static int NapatechBypassCallback ( Packet * p )
{
NapatechPacketVars * ntpv = & ( p - > ntpv ) ;
/*
* Since , at this point , we don ' t know what action to take ,
* simply mark this packet as one that should be
* bypassed when the packet is returned by suricata with a
* pass / drop verdict .
*/
ntpv - > bypass = 1 ;
return 1 ;
}
# endif
/**
* \ brief Initialize the Napatech receiver thread , generate a single
* NapatechThreadVar structure for each thread , this will
@ -198,12 +644,27 @@ TmEcode NapatechStreamThreadInit(ThreadVars *tv, const void *initdata, void **da
static PacketQueue packets_to_release [ MAX_STREAMS ] ;
/**
* \ brief Callback to indicate that the packet buffer can be returned to the hardware .
*
* Called when Suricata is done processing the packet . The packet is placed into
* a queue so that it can be retrieved and released by the packet processing thread .
*
* \ param p Packet to return to the system .
*
*/
static void NapatechReleasePacket ( struct Packet_ * p )
{
PacketFreeOrRelease ( p ) ;
PacketEnqueue ( & packets_to_release [ p - > ntpv . stream_id ] , p ) ;
}
/**
* \ brief Returns the NUMA node associated with the currently running thread .
*
* \ return ID of the NUMA node .
*
*/
static int GetNumaNode ( void )
{
int cpu = 0 ;
@ -220,6 +681,12 @@ static int GetNumaNode(void)
return node ;
}
/**
* \ brief Outputs hints on the optimal host - buffer configuration to aid tuning .
*
* \ param log_level of the currently running instance .
*
*/
static void RecommendNUMAConfig ( SCLogLevel log_level )
{
char string0 [ 16 ] ;
@ -236,17 +703,20 @@ static void RecommendNUMAConfig(SCLogLevel log_level)
SCLog ( log_level , __FILE__ , __FUNCTION__ , __LINE__ ,
" Minimum host buffers that should be defined in ntservice.ini: " ) ;
SCLog ( log_level , __FILE__ , __FUNCTION__ , __LINE__ ,
" NUMA Node 0: %d " , ( SC_ATOMIC_GET ( numa0_count ) ) ) ;
SCLog ( log_level , __FILE__ , __FUNCTION__ , __LINE__ , " NUMA Node 0: %d " ,
( SC_ATOMIC_GET ( numa0_count ) ) ) ;
if ( numa_max_node ( ) > = 1 ) SCLog ( log_level , __FILE__ , __FUNCTION__ , __LINE__ ,
" NUMA Node 1: %d " , ( SC_ATOMIC_GET ( numa1_count ) ) ) ;
if ( numa_max_node ( ) > = 1 )
SCLog ( log_level , __FILE__ , __FUNCTION__ , __LINE__ ,
" NUMA Node 1: %d " , ( SC_ATOMIC_GET ( numa1_count ) ) ) ;
if ( numa_max_node ( ) > = 2 ) SCLog ( log_level , __FILE__ , __FUNCTION__ , __LINE__ ,
" NUMA Node 2: %d " , ( SC_ATOMIC_GET ( numa2_count ) ) ) ;
if ( numa_max_node ( ) > = 2 )
SCLog ( log_level , __FILE__ , __FUNCTION__ , __LINE__ ,
" NUMA Node 2: %d " , ( SC_ATOMIC_GET ( numa2_count ) ) ) ;
if ( numa_max_node ( ) > = 3 ) SCLog ( log_level , __FILE__ , __FUNCTION__ , __LINE__ ,
" NUMA Node 3: %d " , ( SC_ATOMIC_GET ( numa3_count ) ) ) ;
if ( numa_max_node ( ) > = 3 )
SCLog ( log_level , __FILE__ , __FUNCTION__ , __LINE__ ,
" NUMA Node 3: %d " , ( SC_ATOMIC_GET ( numa3_count ) ) ) ;
snprintf ( string0 , 16 , " [%d, 16, 0] " , SC_ATOMIC_GET ( numa0_count ) ) ;
snprintf ( string1 , 16 , ( numa_max_node ( ) > = 1 ? " ,[%d, 16, 1] " : " " ) ,
@ -257,14 +727,23 @@ static void RecommendNUMAConfig(SCLogLevel log_level)
SC_ATOMIC_GET ( numa3_count ) ) ;
SCLog ( log_level , __FILE__ , __FUNCTION__ , __LINE__ ,
" E.g.: HostBuffersRx=%s%s%s%s " , string0 , string1 , string2 , string3 ) ;
" E.g.: HostBuffersRx=%s%s%s%s " , string0 , string1 , string2 ,
string3 ) ;
} else if ( log_level = = SC_LOG_ERROR ) {
SCLogError ( SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED ,
" Or, try running /opt/napatech3/bin/ntpl -e \" delete=all \" to clean-up stream NUMA config. " ) ;
" Or, try running /opt/napatech3/bin/ntpl -e \" delete=all \" to clean-up stream NUMA config. " ) ;
}
}
TmEcode NapatechPacketLoopZC ( ThreadVars * tv , void * data , void * slot )
/**
* \ brief Main Napatechpacket processing loop
*
* \ param tv Thread variable to ThreadVars
* \ param data Pointer to NapatechThreadVars with data specific to Napatech
* \ param slot TMSlot where this instance is running .
*
*/
TmEcode NapatechPacketLoop ( ThreadVars * tv , void * data , void * slot )
{
int32_t status ;
char error_buffer [ 100 ] ;
@ -274,27 +753,39 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
uint64_t hba_pkt_drops = 0 ;
uint64_t hba_byte_drops = 0 ;
uint16_t hba_pkt = 0 ;
uint32_t filter_id = 0 ;
uint32_t hash_id = 0 ;
uint32_t numa_node = 0 ;
int numa_node = - 1 ;
int set_cpu_affinity = 0 ;
int closer = 0 ;
int is_inline = 0 ;
int is_autoconfig = 0 ;
/* This just keeps the startup output more orderly. */
usleep ( 200000 * ntv - > stream_id ) ;
if ( NapatechIsAutoConfigEnabled ( ) ) {
if ( ConfGetBool ( " napatech.inline " , & is_inline ) = = 0 ) {
is_inline = 0 ;
}
if ( ConfGetBool ( " napatech.auto-config " , & is_autoconfig ) = = 0 ) {
is_autoconfig = 0 ;
}
if ( is_autoconfig ) {
numa_node = GetNumaNode ( ) ;
switch ( numa_node ) {
case 0 : SC_ATOMIC_ADD ( numa0_count , 1 ) ;
break ;
case 1 : SC_ATOMIC_ADD ( numa1_count , 1 ) ;
break ;
case 2 : SC_ATOMIC_ADD ( numa2_count , 1 ) ;
break ;
case 3 : SC_ATOMIC_ADD ( numa3_count , 1 ) ;
break ;
default : break ;
case 0 :
SC_ATOMIC_ADD ( numa0_count , 1 ) ;
break ;
case 1 :
SC_ATOMIC_ADD ( numa1_count , 1 ) ;
break ;
case 2 :
SC_ATOMIC_ADD ( numa2_count , 1 ) ;
break ;
case 3 :
SC_ATOMIC_ADD ( numa3_count , 1 ) ;
break ;
default :
break ;
}
if ( ConfGetBool ( " threading.set-cpu-affinity " , & set_cpu_affinity ) ! = 1 ) {
@ -304,37 +795,41 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
if ( set_cpu_affinity ) {
NapatechSetupNuma ( ntv - > stream_id , numa_node ) ;
}
}
if ( NapatechIsAutoConfigEnabled ( ) ) {
numa_node = GetNumaNode ( ) ;
SC_ATOMIC_ADD ( stream_count , 1 ) ;
if ( SC_ATOMIC_GET ( stream_count ) = = NapatechGetNumConfiguredStreams ( ) ) {
/* The last thread to run sets up the streams */
status = NapatechSetupTraffic ( NapatechGetNumFirstStream ( ) ,
NapatechGetNumLastStream ( ) ,
& filter_id , & hash_id ) ;
if ( filter_id = = 0 ) {
# ifdef NAPATECH_ENABLE_BYPASS
/* Initialize the port map before we setup traffic filters */
for ( int i = 0 ; i < MAX_PORTS ; + + i ) {
inline_port_map [ i ] = - 1 ;
}
# endif
/* The last thread to run sets up and deletes the streams */
status = NapatechSetupTraffic ( NapatechGetNumFirstStream ( ) ,
NapatechGetNumLastStream ( ) ) ;
if ( status = = 0x20002061 ) {
SCLogError ( SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED ,
" Check host buffer configuration in ntservice.ini. " ) ;
RecommendNUMAConfig ( SC_LOG_ERROR ) ;
closer = 1 ;
} else if ( filter_id = = 0x20000008 ) {
SCLogError ( SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED ,
" Check napatech.ports in the suricata config file. " ) ;
}
if ( status = = 0x20002061 ) {
SCLogError ( SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED ,
" Check host buffer configuration in ntservice.ini. " ) ;
RecommendNUMAConfig ( SC_LOG_ERROR ) ;
exit ( EXIT_FAILURE ) ;
} else if ( status = = 0x20000008 ) {
SCLogError ( SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED ,
" Check napatech.ports in the suricata config file. " ) ;
exit ( EXIT_FAILURE ) ;
}
RecommendNUMAConfig( SC_LOG_INFO ) ;
RecommendNUMAConfig ( SC_LOG_PERF ) ;
SCLogNotice( " Napatech packet input engine started. " ) ;
}
}
} // is_autoconfig
SCLogInfo ( " Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u " ,
SCLogInfo (
" Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u " ,
sched_getcpu ( ) , numa_node , ntv - > stream_id ) ;
if ( ntv - > hba > 0 ) {
@ -352,13 +847,12 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
SCLogDebug ( " Opening NAPATECH Stream: %lu for processing " , ntv - > stream_id ) ;
if ( ( status = NT_NetRxOpen ( & ( ntv - > rx_stream ) , " SuricataStream " ,
NT_NET_INTERFACE_PACKET , ntv - > stream_id , ntv - > hba ) ) ! = NT_SUCCESS ) {
NT_NET_INTERFACE_PACKET , ntv - > stream_id , ntv - > hba ) ) ! = NT_SUCCESS ) {
NAPATECH_ERROR ( SC_ERR_NAPATECH_OPEN_FAILED , status ) ;
SCFree ( ntv ) ;
SCReturnInt ( TM_ECODE_FAILED ) ;
}
TmSlot * s = ( TmSlot * ) slot ;
ntv - > slot = s - > slot_next ;
@ -369,16 +863,21 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
/* Napatech returns packets 1 at a time */
status = NT_NetRxGet ( ntv - > rx_stream , & packet_buffer , 1000 ) ;
if ( unlikely ( status = = NT_STATUS_TIMEOUT | | status = = NT_STATUS_TRYAGAIN ) ) {
if ( unlikely (
status = = NT_STATUS_TIMEOUT | | status = = NT_STATUS_TRYAGAIN ) ) {
continue ;
} else if ( unlikely ( status ! = NT_SUCCESS ) ) {
NAPATECH_ERROR ( SC_ERR_NAPATECH_OPEN_FAILED , status ) ;
SCLogInfo ( " Failed to read from Napatech Stream %d: %s" ,
SCLogInfo ( " Failed to read from Napatech Stream %d: %s" ,
ntv - > stream_id , error_buffer ) ;
SCReturnInt ( TM_ECODE_FAILED ) ;
break ;
}
Packet * p = PacketGetFromQueueOrAlloc ( ) ;
# ifdef NAPATECH_ENABLE_BYPASS
p - > ntpv . bypass = 0 ;
# endif
if ( unlikely ( p = = NULL ) ) {
NT_NetRxRelease ( ntv - > rx_stream , packet_buffer ) ;
SCReturnInt ( TM_ECODE_FAILED ) ;
@ -402,14 +901,12 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
break ;
case NT_TIMESTAMP_TYPE_PCAP_NANOTIME :
p - > ts . tv_sec = pkt_ts > > 32 ;
p - > ts . tv_usec = ( ( pkt_ts & 0xFFFFFFFF ) / 1000 )
+ ( pkt_ts % 1000 ) > 500 ? 1 : 0 ;
p - > ts . tv_usec = ( ( pkt_ts & 0xFFFFFFFF ) / 1000 ) + ( pkt_ts % 1000 ) > 500 ? 1 : 0 ;
break ;
case NT_TIMESTAMP_TYPE_NATIVE_NDIS :
/* number of seconds between 1/1/1601 and 1/1/1970 */
p - > ts . tv_sec = ( pkt_ts / 100000000 ) - 11644473600 ;
p - > ts . tv_usec = ( ( pkt_ts % 100000000 ) / 100 )
+ ( pkt_ts % 100 ) > 50 ? 1 : 0 ;
p - > ts . tv_usec = ( ( pkt_ts % 100000000 ) / 100 ) + ( pkt_ts % 100 ) > 50 ? 1 : 0 ;
break ;
default :
SCLogError ( SC_ERR_NAPATECH_TIMESTAMP_TYPE_NOT_SUPPORTED ,
@ -425,8 +922,8 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
/* Update drop counter */
if ( unlikely ( ( status = NT_NetRxRead ( ntv - > rx_stream , & stat_cmd ) ) ! = NT_SUCCESS ) ) {
NAPATECH_ERROR ( SC_ERR_NAPATECH_OPEN_FAILED , status ) ;
SCLogInfo ( " Couldn't retrieve drop statistics from the RX stream: %u - %s " ,
ntv - > stream_id , error_buffer );
SCLogInfo ( " Couldn't retrieve drop statistics from the RX stream: %u " ,
ntv - > stream_id );
} else {
hba_pkt_drops = stat_cmd . u . streamDrop . pktsDropped ;
@ -435,15 +932,17 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
StatsSyncCountersIfSignalled ( tv ) ;
}
# ifdef NAPATECH_ENABLE_BYPASS
p - > ntpv . dyn3 = _NT_NET_GET_PKT_DESCR_PTR_DYN3 ( packet_buffer ) ;
p - > BypassPacketsFlow = ( NapatechIsBypassSupported ( ) ? NapatechBypassCallback : NULL ) ;
NT_NET_SET_PKT_TXPORT ( packet_buffer , inline_port_map [ p - > ntpv . dyn3 - > rxPort ] ) ;
# endif
p - > ReleasePacket = NapatechReleasePacket ;
p - > ntpv . nt_packet_buf = packet_buffer ;
p - > ntpv . stream_id = ntv - > stream_id ;
p - > datalink = LINKTYPE_ETHERNET ;
if ( unlikely ( PacketSetData ( p ,
( uint8_t * ) NT_NET_GET_PKT_L2_PTR ( packet_buffer ) ,
NT_NET_GET_PKT_WIRE_LENGTH ( packet_buffer ) ) ) ) {
if ( unlikely ( PacketSetData ( p , ( uint8_t * ) NT_NET_GET_PKT_L2_PTR ( packet_buffer ) , NT_NET_GET_PKT_WIRE_LENGTH ( packet_buffer ) ) ) ) {
TmqhOutputPacketpool ( ntv - > tv , p ) ;
NT_NetRxRelease ( ntv - > rx_stream , packet_buffer ) ;
SCReturnInt ( TM_ECODE_FAILED ) ;
@ -457,23 +956,31 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
/* Release any packets that were returned by the callback function */
Packet * rel_pkt = PacketDequeue ( & packets_to_release [ ntv - > stream_id ] ) ;
while ( rel_pkt ! = NULL ) {
# ifdef NAPATECH_ENABLE_BYPASS
if ( rel_pkt - > ntpv . bypass = = 1 ) {
if ( PACKET_TEST_ACTION ( p , ACTION_DROP ) ) {
if ( is_inline ) {
rel_pkt - > ntpv . dyn3 - > wireLength = 0 ;
}
}
ProgramFlow ( rel_pkt , is_inline ) ;
}
# endif
NT_NetRxRelease ( ntv - > rx_stream , rel_pkt - > ntpv . nt_packet_buf ) ;
rel_pkt = PacketDequeue ( & packets_to_release [ ntv - > stream_id ] ) ;
}
StatsSyncCountersIfSignalled ( tv ) ;
}
if ( filter_id ) {
NapatechDeleteFilter ( filter_id ) ;
}
} // while
if ( hash_id ) {
NapatechDeleteFilter ( hash_id ) ;
if ( closer ) {
# ifdef NAPATECH_ENABLE_BYPASS
NapatechCloseFlowStreams ( ) ;
# endif
NapatechDeleteFilters ( ) ;
}
if ( unlikely ( ntv - > hba > 0 ) ) {
SCLogInfo ( " Host Buffer Allowance Drops - pkts: %ld, bytes: %ld " ,
hba_pkt_drops , hba_byte_drops ) ;
SCLogInfo ( " Host Buffer Allowance Drops - pkts: %ld, bytes: %ld " , hba_pkt_drops , hba_byte_drops ) ;
}
SCReturnInt ( TM_ECODE_OK ) ;
@ -491,26 +998,34 @@ void NapatechStreamThreadExitStats(ThreadVars *tv, void *data)
NapatechCurrentStats stat = NapatechGetCurrentStats ( ntv - > stream_id ) ;
double percent = 0 ;
if ( stat . current_drop s > 0 )
percent = ( ( ( double ) stat . current_drop s)
/ ( stat . current_packets + stat . current_drop s) ) * 100 ;
if ( stat . current_drop _packet s > 0 )
percent = ( ( ( double ) stat . current_drop _packet s)
/ ( stat . current_packets + stat . current_drop _packet s) ) * 100 ;
SCLogInfo ( " nt%lu - pkts: %lu; drop: %lu (%5.2f%%); bytes: %lu " ,
( uint64_t ) ntv - > stream_id , stat . current_packets ,
stat . current_drop s, percent , stat . current_bytes ) ;
( uint64_t ) ntv - > stream_id , stat . current_packets ,
stat . current_drop _packet s, percent , stat . current_bytes ) ;
SC_ATOMIC_ADD ( total_packets , stat . current_packets ) ;
SC_ATOMIC_ADD ( total_drops , stat . current_drop s) ;
SC_ATOMIC_ADD ( total_drops , stat . current_drop _packet s) ;
SC_ATOMIC_ADD ( total_tallied , 1 ) ;
if ( SC_ATOMIC_GET ( total_tallied ) = = NapatechGetNumConfiguredStreams ( ) ) {
if ( SC_ATOMIC_GET ( total_drops ) > 0 )
percent = ( ( ( double ) SC_ATOMIC_GET ( total_drops ) ) / ( SC_ATOMIC_GET ( total_packets )
+ SC_ATOMIC_GET ( total_drops ) ) ) * 100 ;
+ SC_ATOMIC_GET ( total_drops ) ) ) * 100 ;
SCLogInfo ( " " ) ;
SCLogInfo ( " --- Total Packets: %ld Total Dropped: %ld (%5.2f%%) " ,
SC_ATOMIC_GET ( total_packets ) , SC_ATOMIC_GET ( total_drops ) , percent ) ;
# ifdef NAPATECH_ENABLE_BYPASS
SCLogInfo ( " --- BypassCB - Total: %ld, UDP: %ld, TCP: %ld, Unhandled: %ld " ,
SC_ATOMIC_GET ( flow_callback_cnt ) ,
SC_ATOMIC_GET ( flow_callback_udp_pkts ) ,
SC_ATOMIC_GET ( flow_callback_tcp_pkts ) ,
SC_ATOMIC_GET ( flow_callback_unhandled_pkts ) ) ;
# endif
}
}
@ -523,13 +1038,13 @@ TmEcode NapatechStreamThreadDeinit(ThreadVars *tv, void *data)
{
SCEnter ( ) ;
NapatechThreadVars * ntv = ( NapatechThreadVars * ) data ;
SCLogDebug ( " Closing Napatech Stream: %d " , ntv - > stream_id ) ;
NT_NetRxClose ( ntv - > rx_stream ) ;
SCReturnInt ( TM_ECODE_OK ) ;
}
/** Decode Napatech */
/**
* \ brief This function passes off to link type decoders .
*
@ -557,7 +1072,7 @@ TmEcode NapatechDecode(ThreadVars *tv, Packet *p, void *data)
break ;
default :
SCLogError ( SC_ERR_DATALINK_UNIMPLEMENTED ,
" Error: d atalink type %" PRId32 " not yet supported in module NapatechDecode " ,
" D atalink type %" PRId32 " not yet supported in module NapatechDecode " ,
p - > datalink ) ;
break ;
}
@ -566,23 +1081,38 @@ TmEcode NapatechDecode(ThreadVars *tv, Packet *p, void *data)
SCReturnInt ( TM_ECODE_OK ) ;
}
/**
* \ brief Initialization of Napatech Thread .
*
* \ param t pointer to ThreadVars
* \ param initdata - unused .
* \ param data pointer that gets cast into DecoderThreadVars
*/
TmEcode NapatechDecodeThreadInit ( ThreadVars * tv , const void * initdata , void * * data )
{
SCEnter ( ) ;
DecodeThreadVars * dtv = NULL ;
dtv = DecodeThreadVarsAlloc ( tv ) ;
if ( dtv = = NULL )
if ( dtv = = NULL ) {
SCReturnInt ( TM_ECODE_FAILED ) ;
}
DecodeRegisterPerfCounters ( dtv , tv ) ;
* data = ( void * ) dtv ;
SCReturnInt ( TM_ECODE_OK ) ;
}
/**
* \ brief Deinitialization of Napatech Thread .
*
* \ param tv pointer to ThreadVars
* \ param data pointer that gets cast into DecoderThreadVars
*/
TmEcode NapatechDecodeThreadDeinit ( ThreadVars * tv , void * data )
{
if ( data ! = NULL )
if ( data ! = NULL ) {
DecodeThreadVarsFree ( tv , data ) ;
}
SCReturnInt ( TM_ECODE_OK ) ;
}