napatech: add hardware based bypass support

Napatech hardware bypass support enables Suricata to utilize
capabilities of Napatech SmartNICs to selectively bypass flow-based
traffic.
pull/4566/head
Phil Young 6 years ago committed by Victor Julien
parent 3fbcacf9a8
commit 1c99536945

@ -1932,11 +1932,15 @@
fi
fi
# Napatech - Using the 3GD API
# Napatech - Using the 3GD API
AC_ARG_ENABLE(napatech,
AS_HELP_STRING([--enable-napatech],[Enabled Napatech Devices]),
[ enable_napatech=$enableval ],
[ enable_napatech=no])
AC_ARG_ENABLE(napatech_bypass,
AS_HELP_STRING([--disable-napatech-bypass],[Disable Bypass feature on Napatech cards]),
[ napatech_bypass=$enableval ],
[ napatech_bypass=yes])
AC_ARG_WITH(napatech_includes,
[ --with-napatech-includes=DIR napatech include directory],
[with_napatech_includes="$withval"],[with_napatech_includes="/opt/napatech3/include"])
@ -1974,6 +1978,17 @@
fi
AC_DEFINE([HAVE_NAPATECH],[1],(Napatech capture card support))
if test "$napatech_bypass" = "yes"; then
AC_CHECK_LIB(ntapi, NT_FlowOpenAttrInit,NTFLOW="yes",NTFLOW="no")
if test "$NTFLOW" = "yes"; then
echo " Napatech Flow Processing is Enabled (--disable-napatech-bypass if not needed)"
AC_DEFINE([NAPATECH_ENABLE_BYPASS],[1],(Napatech flowdirector support))
else
echo "Napatech Flow Processing is not available"
fi
else
echo "Napatech Flow Processing is Disabled."
fi
fi
# liblua
@ -2535,7 +2550,7 @@ fi
if test "${enable_ebpf}" = "yes" || test "${enable_unittests}" = "yes"; then
AC_DEFINE([CAPTURE_OFFLOAD_MANAGER], [1],[Building flow bypass manager code])
fi
if test "${enable_ebpf}" = "yes" || test "${enable_nfqueue}" = "yes" || test "${enable_pfring}" = "yes" || test "${enable_unittests}" = "yes"; then
if test "${enable_ebpf}" = "yes" || test "${enable_nfqueue}" = "yes" || test "${enable_pfring}" = "yes" || test "${enable_napatech}" = "yes" || test "${enable_unittests}" = "yes"; then
AC_DEFINE([CAPTURE_OFFLOAD], [1],[Building flow capture bypass code])
fi

@ -40,6 +40,8 @@
#define NT_RUNMODE_AUTOFP 1
#define NT_RUNMODE_WORKERS 2
static const char *default_mode = "workers";
#ifdef HAVE_NAPATECH
#define MAX_STREAMS 256
@ -47,6 +49,7 @@ static uint16_t num_configured_streams = 0;
static uint16_t first_stream = 0xffff;
static uint16_t last_stream = 0xffff;
static int auto_config = 0;
static int use_hw_bypass = 0;
uint16_t NapatechGetNumConfiguredStreams(void)
{
@ -68,11 +71,16 @@ bool NapatechIsAutoConfigEnabled(void)
return (auto_config != 0);
}
bool NapatechUseHWBypass(void)
{
return (use_hw_bypass != 0);
}
#endif
const char *RunModeNapatechGetDefaultMode(void)
{
return "workers";
return default_mode;
}
void RunModeNapatechRegister(void)
@ -103,8 +111,25 @@ static int NapatechRegisterDeviceStreams(void)
SCLogInfo("napatech.auto-config not found in config file. Defaulting to disabled.");
}
if (ConfGetBool("napatech.hardware-bypass", &use_hw_bypass) == 0) {
SCLogInfo("napatech.hardware-bypass not found in config file. Defaulting to disabled.");
}
/* use_all_streams uses existing streams created prior to starting Suricata. auto_config
* automatically creates streams. Therefore, these two options are mutually exclusive.
*/
if (use_all_streams && auto_config) {
SCLogError(SC_ERR_RUNMODE, "auto-config cannot be used with use-all-streams.");
SCLogError(SC_ERR_RUNMODE, "napatech.auto-config cannot be used in configuration file at the same time as napatech.use-all-streams.");
exit(EXIT_FAILURE);
}
/* to use hardware_bypass we need to configure the streams to be consistent.
* with the rest of the configuration. Therefore auto_config is not a valid
* option.
*/
if (use_hw_bypass && auto_config == 0) {
SCLogError(SC_ERR_RUNMODE, "napatech auto-config must be enabled when using napatech.use_hw_bypass.");
exit(EXIT_FAILURE);
}
/* Get the stream ID's either from the conf or by querying Napatech */
@ -129,7 +154,9 @@ static int NapatechRegisterDeviceStreams(void)
"Registering Napatech device: %s - active stream found.",
plive_dev_buf);
SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED,
"Delete the stream or disable auto-config before running.");
"run /opt/napatech3/bin/ntpl -e \"delete=all\" to delete existing stream");
SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED,
"or disable auto-config in the conf file before running.");
exit(EXIT_FAILURE);
}
} else {
@ -218,6 +245,24 @@ static int NapatechInit(int runmode)
SCLogInfo("Host Buffer Allowance: %d", (int) conf->hba);
}
if (use_hw_bypass) {
#ifdef NAPATECH_ENABLE_BYPASS
if (NapatechInitFlowStreams()) {
SCLogInfo("Napatech Hardware Bypass is supported and enabled.");
} else {
SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG,
"Napatech Hardware Bypass requested in conf but is not supported by the hardware.");
exit(EXIT_FAILURE);
}
#else
SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG,
"Napatech Hardware Bypass requested in conf but is not enabled by the software.");
exit(EXIT_FAILURE);
#endif
} else {
SCLogInfo("Hardware Bypass is disabled in the conf file.");
}
/* Start a thread to process the statistics */
NapatechStartStats();

@ -29,9 +29,7 @@
#ifdef HAVE_NAPATECH
#include "util-napatech.h"
#include <nt.h>
#endif
#endif /* HAVE_NAPATECH */
int RunModeNapatechAutoFp(void);
int RunModeNapatechWorkers(void);
@ -41,9 +39,8 @@ const char *RunModeNapatechGetDefaultMode(void);
uint16_t NapatechGetNumConfiguredStreams(void);
uint16_t NapatechGetNumFirstStream(void);
uint16_t NapatechGetNumLastStream(void);
bool NapatechIsAutoConfigEnabled(void);
bool NapatechUseHWBypass(void);
#endif /* __RUNMODE_NAPATECH_H__ */

@ -18,14 +18,13 @@
/**
* \file
*
- * \author nPulse Technologies, LLC.
- * \author Matt Keeler <mk@npulsetech.com>
- * \author nPulse Technologies, LLC.
- * \author Matt Keeler <mk@npulsetech.com>
* *
* Support for NAPATECH adapter with the 3GD Driver/API.
* Requires libntapi from Napatech A/S.
*
*/
#include "suricata-common.h"
#include "suricata.h"
#include "threadvars.h"
@ -40,7 +39,7 @@
#ifndef HAVE_NAPATECH
TmEcode NoNapatechSupportExit(ThreadVars *, const void *, void **);
TmEcode NoNapatechSupportExit(ThreadVars*, const void*, void**);
void TmModuleNapatechStreamRegister(void)
{
@ -69,20 +68,21 @@ TmEcode NoNapatechSupportExit(ThreadVars *tv, const void *initdata, void **data)
{
SCLogError(SC_ERR_NAPATECH_NOSUPPORT,
"Error creating thread %s: you do not have support for Napatech adapter "
"enabled please recompile with --enable-napatech", tv->name);
"enabled please recompile with --enable-napatech",
tv->name);
exit(EXIT_FAILURE);
}
#else /* Implied we do have NAPATECH support */
#include <numa.h>
#include <nt.h>
#define MAX_STREAMS 256
extern int max_pending_packets;
typedef struct NapatechThreadVars_ {
typedef struct NapatechThreadVars_
{
ThreadVars *tv;
NtNetStreamRx_t rx_stream;
uint16_t stream_id;
@ -90,10 +90,13 @@ typedef struct NapatechThreadVars_ {
TmSlot *slot;
} NapatechThreadVars;
#ifdef NAPATECH_ENABLE_BYPASS
static int NapatechBypassCallback(Packet *p);
#endif
TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **);
void NapatechStreamThreadExitStats(ThreadVars *, void *);
TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot);
TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot);
TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **);
TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data);
@ -115,6 +118,12 @@ SC_ATOMIC_DECLARE(uint16_t, numa1_count);
SC_ATOMIC_DECLARE(uint16_t, numa2_count);
SC_ATOMIC_DECLARE(uint16_t, numa3_count);
SC_ATOMIC_DECLARE(uint64_t, flow_callback_cnt);
SC_ATOMIC_DECLARE(uint64_t, flow_callback_handled_pkts);
SC_ATOMIC_DECLARE(uint64_t, flow_callback_udp_pkts);
SC_ATOMIC_DECLARE(uint64_t, flow_callback_tcp_pkts);
SC_ATOMIC_DECLARE(uint64_t, flow_callback_unhandled_pkts);
/**
* \brief Register the Napatech receiver (reader) module.
*/
@ -123,7 +132,7 @@ void TmModuleNapatechStreamRegister(void)
tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream";
tmm_modules[TMM_RECEIVENAPATECH].ThreadInit = NapatechStreamThreadInit;
tmm_modules[TMM_RECEIVENAPATECH].Func = NULL;
tmm_modules[TMM_RECEIVENAPATECH].PktAcqLoop = NapatechPacketLoopZC;
tmm_modules[TMM_RECEIVENAPATECH].PktAcqLoop = NapatechPacketLoop;
tmm_modules[TMM_RECEIVENAPATECH].PktAcqBreakLoop = NULL;
tmm_modules[TMM_RECEIVENAPATECH].ThreadExitPrintStats = NapatechStreamThreadExitStats;
tmm_modules[TMM_RECEIVENAPATECH].ThreadDeinit = NapatechStreamThreadDeinit;
@ -140,6 +149,12 @@ void TmModuleNapatechStreamRegister(void)
SC_ATOMIC_INIT(numa1_count);
SC_ATOMIC_INIT(numa2_count);
SC_ATOMIC_INIT(numa3_count);
SC_ATOMIC_INIT(flow_callback_cnt);
SC_ATOMIC_INIT(flow_callback_handled_pkts);
SC_ATOMIC_INIT(flow_callback_udp_pkts);
SC_ATOMIC_INIT(flow_callback_tcp_pkts);
SC_ATOMIC_INIT(flow_callback_unhandled_pkts);
}
/**
@ -157,6 +172,437 @@ void TmModuleNapatechDecodeRegister(void)
tmm_modules[TMM_DECODENAPATECH].flags = TM_FLAG_DECODE_TM;
}
#ifdef NAPATECH_ENABLE_BYPASS
/**
* \brief template of IPv4 header
*/
struct ipv4_hdr
{
uint8_t version_ihl; /**< version and header length */
uint8_t type_of_service; /**< type of service */
uint16_t total_length; /**< length of packet */
uint16_t packet_id; /**< packet ID */
uint16_t fragment_offset; /**< fragmentation offset */
uint8_t time_to_live; /**< time to live */
uint8_t next_proto_id; /**< protocol ID */
uint16_t hdr_checksum; /**< header checksum */
uint32_t src_addr; /**< source address */
uint32_t dst_addr; /**< destination address */
} __attribute__ ((__packed__));
/**
* \brief template of IPv6 header
*/
struct ipv6_hdr
{
uint32_t vtc_flow; /**< IP version, traffic class & flow label. */
uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */
uint8_t proto; /**< Protocol, next header. */
uint8_t hop_limits; /**< Hop limits. */
uint8_t src_addr[16]; /**< IP address of source host. */
uint8_t dst_addr[16]; /**< IP address of destination host(s). */
} __attribute__ ((__packed__));
/**
* \brief template of UDP header
*/
struct udp_hdr
{
uint16_t src_port; /**< UDP source port. */
uint16_t dst_port; /**< UDP destination port. */
uint16_t dgram_len; /**< UDP datagram length */
uint16_t dgram_cksum; /**< UDP datagram checksum */
} __attribute__ ((__packed__));
/**
* \brief template of TCP header
*/
struct tcp_hdr
{
uint16_t src_port; /**< TCP source port. */
uint16_t dst_port; /**< TCP destination port. */
uint32_t sent_seq; /**< TX data sequence number. */
uint32_t recv_ack; /**< RX data acknowledgement sequence number. */
uint8_t data_off; /**< Data offset. */
uint8_t tcp_flags; /**< TCP flags */
uint16_t rx_win; /**< RX flow control window. */
uint16_t cksum; /**< TCP checksum. */
uint16_t tcp_urp; /**< TCP urgent pointer, if any. */
} __attribute__ ((__packed__));
/* The hardware will assign a "color" value indicating what filters are matched
* by a given packet. These constants indicate what bits are set in the color
* field for different protocols
*
*/
#define RTE_PTYPE_L2_ETHER 0x10000000
#define RTE_PTYPE_L3_IPV4 0x01000000
#define RTE_PTYPE_L3_IPV6 0x04000000
#define RTE_PTYPE_L4_TCP 0x00100000
#define RTE_PTYPE_L4_UDP 0x00200000
/* These masks are used to extract layer 3 and layer 4 protocol
* values from the color field in the packet descriptor.
*/
#define RTE_PTYPE_L3_MASK 0x0f000000
#define RTE_PTYPE_L4_MASK 0x00f00000
#define COLOR_IS_SPAN 0x00001000
static int inline_port_map[MAX_PORTS] = { -1 };
/**
* \brief Binds two ports together for inline operation.
*
* Get the ID of an adapter on which a given port resides.
*
* \param port one of the ports in a pairing.
* \param peer the other port in a pairing.
* \return ID of the adapter.
*
*/
int NapatechSetPortmap(int port, int peer)
{
if ((inline_port_map[port] == -1) && (inline_port_map[peer] == -1)) {
inline_port_map[port] = peer;
inline_port_map[peer] = port;
} else {
SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG,
"Port pairing is already configured.");
return 0;
}
return 1;
}
/**
* \brief Returns the ID of the adapter
*
* Get the ID of an adapter on which a given port resides.
*
* \param port for which adapter ID is requested.
* \return ID of the adapter.
*
*/
int NapatechGetAdapter(uint8_t port)
{
static int port_adapter_map[MAX_PORTS] = { -1 };
int status;
NtInfo_t h_info; /* Info handle */
NtInfoStream_t h_info_stream; /* Info stream handle */
if (unlikely(port_adapter_map[port] == -1)) {
if ((status = NT_InfoOpen(&h_info_stream, "ExampleInfo")) != NT_SUCCESS) {
NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status);
return -1;
}
/* Read the system info */
h_info.cmd = NT_INFO_CMD_READ_PORT_V9;
h_info.u.port_v9.portNo = (uint8_t) port;
if ((status = NT_InfoRead(h_info_stream, &h_info)) != NT_SUCCESS) {
/* Get the status code as text */
NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status);
NT_InfoClose(h_info_stream);
return -1;
}
port_adapter_map[port] = h_info.u.port_v9.data.adapterNo;
}
return port_adapter_map[port];
}
/**
* \brief IPv4 4-tuple convenience structure
*/
struct IPv4Tuple4
{
uint32_t sa; /*!< Source address */
uint32_t da; /*!< Destination address */
uint16_t sp; /*!< Source port */
uint16_t dp; /*!< Destination port */
};
/**
* \brief IPv6 4-tuple convenience structure
*/
struct IPv6Tuple4
{
uint8_t sa[16]; /*!< Source address */
uint8_t da[16]; /*!< Destination address */
uint16_t sp; /*!< Source port */
uint16_t dp; /*!< Destination port */
};
/**
* \brief Compares the byte order value of two IPv6 addresses.
*
*
* \param addr_a The first address to compare
* \param addr_b The second adress to compare
*
* \return -1 if addr_a < addr_b
* 1 if addr_a > addr_b
* 0 if addr_a == addr_b
*/
static int CompareIPv6Addr(uint8_t addr_a[16], uint8_t addr_b[16]) {
uint16_t pos;
for (pos = 0; pos < 16; ++pos) {
if (addr_a[pos] < addr_b[pos]) {
return -1;
} else if (addr_a[pos] > addr_b[pos]) {
return 1;
} /* else they are equal - check next position*/
}
/* if we get here the addresses are equal */
return 0;
}
/**
* \brief Callback function to process Bypass events on Napatech Adapter.
*
* Callback function that sets up the Flow tables on the Napatech card
* so that subsequent packets from this flow are bypassed on the hardware.
*
* \param p packet containing information about the flow to be bypassed
* \param is_inline indicates if Suricata is being run in inline mode.
*
* \return Error code indicating success (1) or failure (0).
*
*/
static int ProgramFlow(Packet *p, int is_inline)
{
int status;
NtFlow_t flow_match;
memset(&flow_match, 0, sizeof(flow_match));
NapatechPacketVars *ntpv = &(p->ntpv);
int adapter = NapatechGetAdapter(ntpv->dyn3->rxPort);
NtFlowStream_t *phFlowStream = NapatechGetFlowStreamPtr(adapter);
/*
* The hardware decoder will "color" the packets according to the protocols
* in the packet and the port the packet arrived on. packet_type gets
* these bits and we mask out layer3, layer4, and is_span to determine
* the protocols and if the packet is coming in from a SPAN port.
*/
uint32_t packet_type = ((ntpv->dyn3->color_hi << 14) & 0xFFFFC000) | ntpv->dyn3->color_lo;
uint8_t *packet = (uint8_t *) ntpv->dyn3 + ntpv->dyn3->descrLength;
uint32_t layer3 = packet_type & RTE_PTYPE_L3_MASK;
uint32_t layer4 = packet_type & RTE_PTYPE_L4_MASK;
uint32_t is_span = packet_type & COLOR_IS_SPAN;
/*
* When we're programming the flows to arrive on a span port,
* where upstream and downstream packets arrive on the same port,
* the hardware is configured to swap the source and dest
* fields if the src addr > dest addr. We need to program the
* flow tables to match. We'll compare addresses and set
* do_swap accordingly.
*/
uint32_t do_swap = 0;
SC_ATOMIC_ADD(flow_callback_cnt, 1);
/* Only bypass TCP and UDP */
if (PKT_IS_TCP(p)) {
SC_ATOMIC_ADD(flow_callback_tcp_pkts, 1);
} else if PKT_IS_UDP(p) {
SC_ATOMIC_ADD(flow_callback_udp_pkts, 1);
} else {
SC_ATOMIC_ADD(flow_callback_unhandled_pkts, 1);
}
struct IPv4Tuple4 v4Tuple;
struct IPv6Tuple4 v6Tuple;
struct ipv4_hdr *pIPv4_hdr = NULL;
struct ipv6_hdr *pIPv6_hdr = NULL;
switch (layer3) {
case RTE_PTYPE_L3_IPV4:
{
pIPv4_hdr = (struct ipv4_hdr *) (packet + ntpv->dyn3->offset0);
if (!is_span) {
v4Tuple.sa = pIPv4_hdr->src_addr;
v4Tuple.da = pIPv4_hdr->dst_addr;
} else {
do_swap = (pIPv4_hdr->src_addr > pIPv4_hdr->dst_addr);
if (!do_swap) {
/* already in order */
v4Tuple.sa = pIPv4_hdr->src_addr;
v4Tuple.da = pIPv4_hdr->dst_addr;
} else { /* swap */
v4Tuple.sa = pIPv4_hdr->dst_addr;
v4Tuple.da = pIPv4_hdr->src_addr;
}
}
break;
}
case RTE_PTYPE_L3_IPV6:
{
pIPv6_hdr = (struct ipv6_hdr *) (packet + ntpv->dyn3->offset0);
do_swap = (CompareIPv6Addr(pIPv6_hdr->src_addr, pIPv6_hdr->dst_addr) > 0);
if (!is_span) {
memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
} else {
/* sort src/dest address before programming */
if (!do_swap) {
/* already in order */
memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
} else { /* swap the addresses */
memcpy(&(v6Tuple.sa), pIPv6_hdr->dst_addr, 16);
memcpy(&(v6Tuple.da), pIPv6_hdr->src_addr, 16);
}
}
break;
}
default:
{
return 0;
}
}
switch (layer4) {
case RTE_PTYPE_L4_TCP:
{
struct tcp_hdr *tcp_hdr = (struct tcp_hdr *) (packet + ntpv->dyn3->offset1);
if (layer3 == RTE_PTYPE_L3_IPV4) {
if (!is_span) {
v4Tuple.dp = tcp_hdr->dst_port;
v4Tuple.sp = tcp_hdr->src_port;
flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
} else {
if (!do_swap) {
v4Tuple.sp = tcp_hdr->src_port;
v4Tuple.dp = tcp_hdr->dst_port;
} else {
v4Tuple.sp = tcp_hdr->dst_port;
v4Tuple.dp = tcp_hdr->src_port;
}
flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
}
memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple));
} else {
if (!is_span) {
v6Tuple.dp = tcp_hdr->dst_port;
v6Tuple.sp = tcp_hdr->src_port;
flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
} else {
if (!do_swap) {
v6Tuple.sp = tcp_hdr->src_port;
v6Tuple.dp = tcp_hdr->dst_port;
} else {
v6Tuple.dp = tcp_hdr->src_port;
v6Tuple.sp = tcp_hdr->dst_port;
}
flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
}
memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple));
}
flow_match.ipProtocolField = 6;
break;
}
case RTE_PTYPE_L4_UDP:
{
struct udp_hdr *udp_hdr = (struct udp_hdr *) (packet + ntpv->dyn3->offset1);
if (layer3 == RTE_PTYPE_L3_IPV4) {
if (!is_span) {
v4Tuple.dp = udp_hdr->dst_port;
v4Tuple.sp = udp_hdr->src_port;
flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
} else {
if (!do_swap) {
v4Tuple.sp = udp_hdr->src_port;
v4Tuple.dp = udp_hdr->dst_port;
} else {
v4Tuple.dp = udp_hdr->src_port;
v4Tuple.sp = udp_hdr->dst_port;
}
flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
}
memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple));
} else { /* layer3 is IPV6 */
if (!is_span) {
v6Tuple.dp = udp_hdr->dst_port;
v6Tuple.sp = udp_hdr->src_port;
flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
} else {
if (!do_swap) {
v6Tuple.sp = udp_hdr->src_port;
v6Tuple.dp = udp_hdr->dst_port;
} else {
v6Tuple.dp = udp_hdr->src_port;
v6Tuple.sp = udp_hdr->dst_port;
}
flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
}
memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple));
}
flow_match.ipProtocolField = 17;
break;
}
default:
{
return 0;
}
}
flow_match.op = 1; /* program flow */
flow_match.gfi = 1; /* Generate FlowInfo records */
flow_match.tau = 1; /* tcp automatic unlearn */
if (PACKET_TEST_ACTION(p, ACTION_DROP)) {
flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
} else {
if (is_inline) {
flow_match.keySetId = NAPATECH_FLOWTYPE_PASS;
} else {
flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
}
}
status = NT_FlowWrite(*phFlowStream, &flow_match, -1);
if (status == NT_STATUS_TIMEOUT) {
SCLogInfo("NT_FlowWrite returned NT_STATUS_TIMEOUT");
} else if (status != NT_SUCCESS) {
SCLogError(SC_ERR_NAPATECH_OPEN_FAILED,"NT_FlowWrite failed!.");
exit(EXIT_FAILURE);
}
return 1;
}
/**
* \brief Callback from Suricata when a flow that should be bypassed
* is identified.
*/
static int NapatechBypassCallback(Packet *p)
{
NapatechPacketVars *ntpv = &(p->ntpv);
/*
* Since, at this point, we don't know what action to take,
* simply mark this packet as one that should be
* bypassed when the packet is returned by suricata with a
* pass/drop verdict.
*/
ntpv->bypass = 1;
return 1;
}
#endif
/**
* \brief Initialize the Napatech receiver thread, generate a single
* NapatechThreadVar structure for each thread, this will
@ -198,12 +644,27 @@ TmEcode NapatechStreamThreadInit(ThreadVars *tv, const void *initdata, void **da
static PacketQueue packets_to_release[MAX_STREAMS];
/**
* \brief Callback to indicate that the packet buffer can be returned to the hardware.
*
* Called when Suricata is done processing the packet. The packet is placed into
* a queue so that it can be retrieved and released by the packet processing thread.
*
* \param p Packet to return to the system.
*
*/
static void NapatechReleasePacket(struct Packet_ *p)
{
PacketFreeOrRelease(p);
PacketEnqueue(&packets_to_release[p->ntpv.stream_id], p);
}
/**
* \brief Returns the NUMA node associated with the currently running thread.
*
* \return ID of the NUMA node.
*
*/
static int GetNumaNode(void)
{
int cpu = 0;
@ -220,6 +681,12 @@ static int GetNumaNode(void)
return node;
}
/**
* \brief Outputs hints on the optimal host-buffer configuration to aid tuning.
*
* \param log_level of the currently running instance.
*
*/
static void RecommendNUMAConfig(SCLogLevel log_level)
{
char string0[16];
@ -236,17 +703,20 @@ static void RecommendNUMAConfig(SCLogLevel log_level)
SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
"Minimum host buffers that should be defined in ntservice.ini:");
SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
" NUMA Node 0: %d", (SC_ATOMIC_GET(numa0_count)));
SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, " NUMA Node 0: %d",
(SC_ATOMIC_GET(numa0_count)));
if (numa_max_node() >= 1) SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
" NUMA Node 1: %d ", (SC_ATOMIC_GET(numa1_count)));
if (numa_max_node() >= 1)
SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
" NUMA Node 1: %d ", (SC_ATOMIC_GET(numa1_count)));
if (numa_max_node() >= 2) SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
" NUMA Node 2: %d ", (SC_ATOMIC_GET(numa2_count)));
if (numa_max_node() >= 2)
SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
" NUMA Node 2: %d ", (SC_ATOMIC_GET(numa2_count)));
if (numa_max_node() >= 3) SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
" NUMA Node 3: %d ", (SC_ATOMIC_GET(numa3_count)));
if (numa_max_node() >= 3)
SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
" NUMA Node 3: %d ", (SC_ATOMIC_GET(numa3_count)));
snprintf(string0, 16, "[%d, 16, 0]", SC_ATOMIC_GET(numa0_count));
snprintf(string1, 16, (numa_max_node() >= 1 ? ",[%d, 16, 1]" : ""),
@ -257,14 +727,23 @@ static void RecommendNUMAConfig(SCLogLevel log_level)
SC_ATOMIC_GET(numa3_count));
SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
"E.g.: HostBuffersRx=%s%s%s%s", string0, string1, string2, string3);
"E.g.: HostBuffersRx=%s%s%s%s", string0, string1, string2,
string3);
} else if (log_level == SC_LOG_ERROR) {
SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED,
"Or, try running /opt/napatech3/bin/ntpl -e \"delete=all\" to clean-up stream NUMA config.");
"Or, try running /opt/napatech3/bin/ntpl -e \"delete=all\" to clean-up stream NUMA config.");
}
}
TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
/**
* \brief Main Napatechpacket processing loop
*
* \param tv Thread variable to ThreadVars
* \param data Pointer to NapatechThreadVars with data specific to Napatech
* \param slot TMSlot where this instance is running.
*
*/
TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot)
{
int32_t status;
char error_buffer[100];
@ -274,27 +753,39 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
uint64_t hba_pkt_drops = 0;
uint64_t hba_byte_drops = 0;
uint16_t hba_pkt = 0;
uint32_t filter_id = 0;
uint32_t hash_id = 0;
uint32_t numa_node = 0;
int numa_node = -1;
int set_cpu_affinity = 0;
int closer = 0;
int is_inline = 0;
int is_autoconfig = 0;
/* This just keeps the startup output more orderly. */
usleep(200000 * ntv->stream_id);
if (NapatechIsAutoConfigEnabled()) {
if (ConfGetBool("napatech.inline", &is_inline) == 0) {
is_inline = 0;
}
if (ConfGetBool("napatech.auto-config", &is_autoconfig) == 0) {
is_autoconfig = 0;
}
if (is_autoconfig) {
numa_node = GetNumaNode();
switch (numa_node) {
case 0: SC_ATOMIC_ADD(numa0_count, 1);
break;
case 1: SC_ATOMIC_ADD(numa1_count, 1);
break;
case 2: SC_ATOMIC_ADD(numa2_count, 1);
break;
case 3: SC_ATOMIC_ADD(numa3_count, 1);
break;
default: break;
case 0:
SC_ATOMIC_ADD(numa0_count, 1);
break;
case 1:
SC_ATOMIC_ADD(numa1_count, 1);
break;
case 2:
SC_ATOMIC_ADD(numa2_count, 1);
break;
case 3:
SC_ATOMIC_ADD(numa3_count, 1);
break;
default:
break;
}
if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
@ -304,37 +795,41 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
if (set_cpu_affinity) {
NapatechSetupNuma(ntv->stream_id, numa_node);
}
}
if (NapatechIsAutoConfigEnabled()) {
numa_node = GetNumaNode();
SC_ATOMIC_ADD(stream_count, 1);
if (SC_ATOMIC_GET(stream_count) == NapatechGetNumConfiguredStreams()) {
/* The last thread to run sets up the streams */
status = NapatechSetupTraffic(NapatechGetNumFirstStream(),
NapatechGetNumLastStream(),
&filter_id, &hash_id);
if (filter_id == 0) {
#ifdef NAPATECH_ENABLE_BYPASS
/* Initialize the port map before we setup traffic filters */
for (int i = 0; i < MAX_PORTS; ++i) {
inline_port_map[i] = -1;
}
#endif
/* The last thread to run sets up and deletes the streams */
status = NapatechSetupTraffic(NapatechGetNumFirstStream(),
NapatechGetNumLastStream());
if (status == 0x20002061) {
SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED,
"Check host buffer configuration in ntservice.ini.");
RecommendNUMAConfig(SC_LOG_ERROR);
closer = 1;
} else if (filter_id == 0x20000008) {
SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED,
"Check napatech.ports in the suricata config file.");
}
if (status == 0x20002061) {
SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED,
"Check host buffer configuration in ntservice.ini.");
RecommendNUMAConfig(SC_LOG_ERROR);
exit(EXIT_FAILURE);
} else if (status == 0x20000008) {
SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED,
"Check napatech.ports in the suricata config file.");
exit(EXIT_FAILURE);
}
RecommendNUMAConfig(SC_LOG_INFO);
RecommendNUMAConfig(SC_LOG_PERF);
SCLogNotice("Napatech packet input engine started.");
}
}
} // is_autoconfig
SCLogInfo("Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u ",
SCLogInfo(
"Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u ",
sched_getcpu(), numa_node, ntv->stream_id);
if (ntv->hba > 0) {
@ -352,13 +847,12 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
SCLogDebug("Opening NAPATECH Stream: %lu for processing", ntv->stream_id);
if ((status = NT_NetRxOpen(&(ntv->rx_stream), "SuricataStream",
NT_NET_INTERFACE_PACKET, ntv->stream_id, ntv->hba)) != NT_SUCCESS) {
NT_NET_INTERFACE_PACKET, ntv->stream_id, ntv->hba)) != NT_SUCCESS) {
NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status);
SCFree(ntv);
SCReturnInt(TM_ECODE_FAILED);
}
TmSlot *s = (TmSlot *) slot;
ntv->slot = s->slot_next;
@ -369,16 +863,21 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
/* Napatech returns packets 1 at a time */
status = NT_NetRxGet(ntv->rx_stream, &packet_buffer, 1000);
if (unlikely(status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) {
if (unlikely(
status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) {
continue;
} else if (unlikely(status != NT_SUCCESS)) {
NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status);
SCLogInfo("Failed to read from Napatech Stream%d: %s",
SCLogInfo("Failed to read from Napatech Stream %d: %s",
ntv->stream_id, error_buffer);
SCReturnInt(TM_ECODE_FAILED);
break;
}
Packet *p = PacketGetFromQueueOrAlloc();
#ifdef NAPATECH_ENABLE_BYPASS
p->ntpv.bypass = 0;
#endif
if (unlikely(p == NULL)) {
NT_NetRxRelease(ntv->rx_stream, packet_buffer);
SCReturnInt(TM_ECODE_FAILED);
@ -402,14 +901,12 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
break;
case NT_TIMESTAMP_TYPE_PCAP_NANOTIME:
p->ts.tv_sec = pkt_ts >> 32;
p->ts.tv_usec = ( (pkt_ts & 0xFFFFFFFF) / 1000)
+ (pkt_ts % 1000) > 500 ? 1 : 0;
p->ts.tv_usec = ((pkt_ts & 0xFFFFFFFF) / 1000) + (pkt_ts % 1000) > 500 ? 1 : 0;
break;
case NT_TIMESTAMP_TYPE_NATIVE_NDIS:
/* number of seconds between 1/1/1601 and 1/1/1970 */
p->ts.tv_sec = (pkt_ts / 100000000) - 11644473600;
p->ts.tv_usec = ( (pkt_ts % 100000000) / 100)
+ (pkt_ts % 100) > 50 ? 1 : 0;
p->ts.tv_usec = ((pkt_ts % 100000000) / 100) + (pkt_ts % 100) > 50 ? 1 : 0;
break;
default:
SCLogError(SC_ERR_NAPATECH_TIMESTAMP_TYPE_NOT_SUPPORTED,
@ -425,8 +922,8 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
/* Update drop counter */
if (unlikely((status = NT_NetRxRead(ntv->rx_stream, &stat_cmd)) != NT_SUCCESS)) {
NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status);
SCLogInfo("Couldn't retrieve drop statistics from the RX stream: %u - %s",
ntv->stream_id, error_buffer);
SCLogInfo("Couldn't retrieve drop statistics from the RX stream: %u",
ntv->stream_id);
} else {
hba_pkt_drops = stat_cmd.u.streamDrop.pktsDropped;
@ -435,15 +932,17 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
StatsSyncCountersIfSignalled(tv);
}
#ifdef NAPATECH_ENABLE_BYPASS
p->ntpv.dyn3 = _NT_NET_GET_PKT_DESCR_PTR_DYN3(packet_buffer);
p->BypassPacketsFlow = (NapatechIsBypassSupported() ? NapatechBypassCallback : NULL);
NT_NET_SET_PKT_TXPORT(packet_buffer, inline_port_map[p->ntpv.dyn3->rxPort]);
#endif
p->ReleasePacket = NapatechReleasePacket;
p->ntpv.nt_packet_buf = packet_buffer;
p->ntpv.stream_id = ntv->stream_id;
p->datalink = LINKTYPE_ETHERNET;
if (unlikely(PacketSetData(p,
(uint8_t *) NT_NET_GET_PKT_L2_PTR(packet_buffer),
NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) {
if (unlikely(PacketSetData(p, (uint8_t *)NT_NET_GET_PKT_L2_PTR(packet_buffer), NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) {
TmqhOutputPacketpool(ntv->tv, p);
NT_NetRxRelease(ntv->rx_stream, packet_buffer);
SCReturnInt(TM_ECODE_FAILED);
@ -457,23 +956,31 @@ TmEcode NapatechPacketLoopZC(ThreadVars *tv, void *data, void *slot)
/* Release any packets that were returned by the callback function */
Packet *rel_pkt = PacketDequeue(&packets_to_release[ntv->stream_id]);
while (rel_pkt != NULL) {
#ifdef NAPATECH_ENABLE_BYPASS
if (rel_pkt->ntpv.bypass == 1) {
if (PACKET_TEST_ACTION(p, ACTION_DROP)) {
if (is_inline) {
rel_pkt->ntpv.dyn3->wireLength = 0;
}
}
ProgramFlow(rel_pkt, is_inline);
}
#endif
NT_NetRxRelease(ntv->rx_stream, rel_pkt->ntpv.nt_packet_buf);
rel_pkt = PacketDequeue(&packets_to_release[ntv->stream_id]);
}
StatsSyncCountersIfSignalled(tv);
}
if (filter_id) {
NapatechDeleteFilter(filter_id);
}
} // while
if (hash_id) {
NapatechDeleteFilter(hash_id);
if (closer) {
#ifdef NAPATECH_ENABLE_BYPASS
NapatechCloseFlowStreams();
#endif
NapatechDeleteFilters();
}
if (unlikely(ntv->hba > 0)) {
SCLogInfo("Host Buffer Allowance Drops - pkts: %ld, bytes: %ld",
hba_pkt_drops, hba_byte_drops);
SCLogInfo("Host Buffer Allowance Drops - pkts: %ld, bytes: %ld", hba_pkt_drops, hba_byte_drops);
}
SCReturnInt(TM_ECODE_OK);
@ -491,26 +998,34 @@ void NapatechStreamThreadExitStats(ThreadVars *tv, void *data)
NapatechCurrentStats stat = NapatechGetCurrentStats(ntv->stream_id);
double percent = 0;
if (stat.current_drops > 0)
percent = (((double) stat.current_drops)
/ (stat.current_packets + stat.current_drops)) * 100;
if (stat.current_drop_packets > 0)
percent = (((double) stat.current_drop_packets)
/ (stat.current_packets + stat.current_drop_packets)) * 100;
SCLogInfo("nt%lu - pkts: %lu; drop: %lu (%5.2f%%); bytes: %lu",
(uint64_t) ntv->stream_id, stat.current_packets,
stat.current_drops, percent, stat.current_bytes);
(uint64_t) ntv->stream_id, stat.current_packets,
stat.current_drop_packets, percent, stat.current_bytes);
SC_ATOMIC_ADD(total_packets, stat.current_packets);
SC_ATOMIC_ADD(total_drops, stat.current_drops);
SC_ATOMIC_ADD(total_drops, stat.current_drop_packets);
SC_ATOMIC_ADD(total_tallied, 1);
if (SC_ATOMIC_GET(total_tallied) == NapatechGetNumConfiguredStreams()) {
if (SC_ATOMIC_GET(total_drops) > 0)
percent = (((double) SC_ATOMIC_GET(total_drops)) / (SC_ATOMIC_GET(total_packets)
+ SC_ATOMIC_GET(total_drops))) * 100;
+ SC_ATOMIC_GET(total_drops))) * 100;
SCLogInfo(" ");
SCLogInfo("--- Total Packets: %ld Total Dropped: %ld (%5.2f%%)",
SC_ATOMIC_GET(total_packets), SC_ATOMIC_GET(total_drops), percent);
#ifdef NAPATECH_ENABLE_BYPASS
SCLogInfo("--- BypassCB - Total: %ld, UDP: %ld, TCP: %ld, Unhandled: %ld",
SC_ATOMIC_GET(flow_callback_cnt),
SC_ATOMIC_GET(flow_callback_udp_pkts),
SC_ATOMIC_GET(flow_callback_tcp_pkts),
SC_ATOMIC_GET(flow_callback_unhandled_pkts));
#endif
}
}
@ -523,13 +1038,13 @@ TmEcode NapatechStreamThreadDeinit(ThreadVars *tv, void *data)
{
SCEnter();
NapatechThreadVars *ntv = (NapatechThreadVars *) data;
SCLogDebug("Closing Napatech Stream: %d", ntv->stream_id);
NT_NetRxClose(ntv->rx_stream);
SCReturnInt(TM_ECODE_OK);
}
/** Decode Napatech */
/**
* \brief This function passes off to link type decoders.
*
@ -557,7 +1072,7 @@ TmEcode NapatechDecode(ThreadVars *tv, Packet *p, void *data)
break;
default:
SCLogError(SC_ERR_DATALINK_UNIMPLEMENTED,
"Error: datalink type %" PRId32 " not yet supported in module NapatechDecode",
"Datalink type %" PRId32 " not yet supported in module NapatechDecode",
p->datalink);
break;
}
@ -566,23 +1081,38 @@ TmEcode NapatechDecode(ThreadVars *tv, Packet *p, void *data)
SCReturnInt(TM_ECODE_OK);
}
/**
* \brief Initialization of Napatech Thread.
*
* \param t pointer to ThreadVars
* \param initdata - unused.
* \param data pointer that gets cast into DecoderThreadVars
*/
TmEcode NapatechDecodeThreadInit(ThreadVars *tv, const void *initdata, void **data)
{
SCEnter();
DecodeThreadVars *dtv = NULL;
dtv = DecodeThreadVarsAlloc(tv);
if (dtv == NULL)
if (dtv == NULL) {
SCReturnInt(TM_ECODE_FAILED);
}
DecodeRegisterPerfCounters(dtv, tv);
*data = (void *) dtv;
SCReturnInt(TM_ECODE_OK);
}
/**
* \brief Deinitialization of Napatech Thread.
*
* \param tv pointer to ThreadVars
* \param data pointer that gets cast into DecoderThreadVars
*/
TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data)
{
if (data != NULL)
if (data != NULL) {
DecodeThreadVarsFree(tv, data);
}
SCReturnInt(TM_ECODE_OK);
}

@ -31,10 +31,14 @@ void TmModuleNapatechDecodeRegister(void);
#ifdef HAVE_NAPATECH
#include <nt.h>
struct NapatechStreamDevConf {
struct NapatechStreamDevConf
{
uint16_t stream_id;
intmax_t hba;
};
int NapatechSetPortmap(int port, int peer);
int NapatechGetAdapter(uint8_t port);
#endif /* HAVE_NAPATECH */
#endif /* __SOURCE_NAPATECH_H__ */

File diff suppressed because it is too large Load Diff

@ -14,43 +14,52 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/**
* \file
*
* \author Phil Young <py@napatech.com>
*
*/
#ifndef __UTIL_NAPATECH_H__
#define __UTIL_NAPATECH_H__
#ifdef HAVE_NAPATECH
#include <nt.h>
typedef struct NapatechPacketVars_ {
typedef struct NapatechPacketVars_
{
uint64_t stream_id;
NtNetBuf_t nt_packet_buf;
ThreadVars *tv;
#ifdef NAPATECH_ENABLE_BYPASS
NtDyn3Descr_t *dyn3;
int bypass;
#endif
} NapatechPacketVars;
typedef struct NapatechStreamConfig_ {
typedef struct NapatechStreamConfig_
{
uint16_t stream_id;
bool is_active;
bool initialized;
} NapatechStreamConfig;
typedef struct NapatechCurrentStats_ {
typedef struct NapatechCurrentStats_
{
uint64_t current_packets;
uint64_t current_drops;
uint64_t current_bytes;
uint64_t current_drop_packets;
uint64_t current_drop_bytes;
} NapatechCurrentStats;
#define MAX_HOSTBUFFER 4
#define MAX_STREAMS 256
#define MAX_PORTS 80
#define MAX_ADAPTERS 8
#define HB_HIGHWATER 2048 //1982
extern void NapatechStartStats(void);
#define NAPATECH_ERROR(err_type, status) { \
char errorBuffer[1024]; \
NT_ExplainError((status), errorBuffer, sizeof (errorBuffer) - 1); \
@ -75,11 +84,39 @@ extern void NapatechStartStats(void);
" %s", ntpl_info.u.errorData.errBuffer[2]); \
}
// #define ENABLE_NT_DEBUG
#ifdef ENABLE_NT_DEBUG
void NapatechPrintIP(uint32_t address);
#define NAPATECH_DEBUG(...) printf(__VA_ARGS__)
#define NAPATECH_PRINTIP(a) NapatechPrintIP(uint32_t address)
#else
#define NAPATECH_DEBUG(...)
#define NAPATECH_PRINTIP(a)
#endif
NapatechCurrentStats NapatechGetCurrentStats(uint16_t id);
int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]);
bool NapatechSetupNuma(uint32_t stream, uint32_t numa);
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream, uint32_t *filter_id, uint32_t *hash_id);
bool NapatechDeleteFilter(uint32_t filter_id);
#endif //HAVE_NAPATECH
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream);
uint32_t NapatechDeleteFilters(void);
#ifdef NAPATECH_ENABLE_BYPASS
/* */
#define NAPATECH_KEYTYPE_IPV4 3
#define NAPATECH_KEYTYPE_IPV4_SPAN 4
#define NAPATECH_KEYTYPE_IPV6 5
#define NAPATECH_KEYTYPE_IPV6_SPAN 6
#define NAPATECH_FLOWTYPE_DROP 7
#define NAPATECH_FLOWTYPE_PASS 8
int NapatechInitFlowStreams(void);
NtFlowStream_t *NapatechGetFlowStreamPtr(int device);
int NapatechCloseFlowStreams(void);
int NapatechIsBypassSupported(void);
#endif /* NAPATECH_ENABLE_BYPASS */
#endif /* HAVE_NAPATECH */
#endif /* __UTIL_NAPATECH_H__ */

@ -1765,21 +1765,55 @@ napatech:
#
streams: ["0-3"]
# Stream stats can be enabled to provide fine grain packet and byte counters
# for each thread/stream that is configured.
#
enable-stream-stats: no
# When auto-config is enabled the streams will be created and assigned
# automatically to the NUMA node where the thread resides. If cpu-affinity
# is enabled in the threading section. Then the streams will be created
# according to the number of worker threads specified in the worker cpu set.
# Otherwise, the streams array is used to define the streams.
#
# This option cannot be used simultaneous with "use-all-streams".
# This option is intended primarily to support legacy configurations.
#
# This option cannot be used simultaneous with either "use-all-streams"
# or hardware-bypass.
#
auto-config: yes
# Enable hardware level flow bypass.
#
hardware-bypass: yes
# Enable inline operation. When enabled traffic arriving on a given port is
# automatically forwarded out it's peer port after analysis by suricata.
#
inline: no
# Ports indicates which napatech ports are to be used in auto-config mode.
# these are the port ID's of the ports that will be merged prior to the
# traffic being distributed to the streams.
#
# This can be specified in any of the following ways:
# When hardware-bypass is enabled the ports must be configured as a segement
# specify the port(s) on which upstream and downstream traffic will arrive.
# This information is necessary for the hardware to properly process flows.
#
# When using a tap configuration one of the ports will receive inbound traffic
# for the network and the other will receive outbound traffic. The two ports on a
# given segment must reside on the same network adapter.
#
# When using a SPAN-port configuration the upstream and downstream traffic
# arrives on a single port. This is configured by setting the two sides of the
# segment to reference the same port. (e.g. 0-0 to configure a SPAN port on
# port 0).
#
# port segments are specified in the form:
# ports: [0-1,2-3,4-5,6-6,7-7]
#
# For legecy systems when hardware-bypass is disabled this can be specified in any
# of the following ways:
#
# a list of individual ports (e.g. ports: [0,1,2,3])
#
@ -1788,9 +1822,9 @@ napatech:
# "all" to indicate that all ports are to be merged together
# (e.g. ports: [all])
#
# This has no effect if auto-config is disabled.
# This parameter has no effect if auto-config is disabled.
#
ports: [all]
ports: [0-1,2-3]
# When auto-config is enabled the hashmode specifies the algorithm for
# determining to which stream a given packet is to be delivered.
@ -1801,7 +1835,7 @@ napatech:
#
# See Napatech NTPL documentation other hashmodes and details on their use.
#
# This has no effect if auto-config is disabled.
# This parameter has no effect if auto-config is disabled.
#
hashmode: hash5tuplesorted

Loading…
Cancel
Save