diff --git a/configure.ac b/configure.ac index 3ec7ebd57f..f3233f474d 100644 --- a/configure.ac +++ b/configure.ac @@ -1901,6 +1901,12 @@ fi fi + if test "x$enable_napatech" = "xyes"; then + AM_CONDITIONAL([BUILD_NAPATECH], [true]) + else + AM_CONDITIONAL([BUILD_NAPATECH], [false]) + fi + # libmaxminddb AC_ARG_ENABLE(geoip, AS_HELP_STRING([--enable-geoip],[Enable GeoIP2 support]), @@ -2513,6 +2519,7 @@ AC_CONFIG_FILES(examples/plugins/ci-capture/Makefile) AC_CONFIG_FILES(examples/lib/simple/Makefile examples/lib/simple/Makefile.example) AC_CONFIG_FILES(plugins/Makefile) AC_CONFIG_FILES(plugins/pfring/Makefile) +AC_CONFIG_FILES(plugins/napatech/Makefile) AC_OUTPUT diff --git a/plugins/Makefile.am b/plugins/Makefile.am index c2200d1f71..cb70413260 100644 --- a/plugins/Makefile.am +++ b/plugins/Makefile.am @@ -3,3 +3,7 @@ SUBDIRS = if BUILD_PFRING SUBDIRS += pfring endif + +if BUILD_NAPATECH +SUBDIRS += napatech +endif diff --git a/plugins/napatech/Makefile.am b/plugins/napatech/Makefile.am new file mode 100644 index 0000000000..8fe122757f --- /dev/null +++ b/plugins/napatech/Makefile.am @@ -0,0 +1,13 @@ +pkglib_LTLIBRARIES = napatech.la + +napatech_la_SOURCES = runmode-napatech.c source-napatech.c util-napatech.c plugin.c +napatech_la_LDFLAGS = -module -avoid-version -shared +napatech_la_LIBADD = -lntapi + +noinst_HEADERS = \ + runmode-napatech.h \ + source-napatech.h \ + util-napatech.h + +install-exec-hook: + cd $(DESTDIR)$(pkglibdir) && $(RM) $(pkglib_LTLIBRARIES) diff --git a/plugins/napatech/README.md b/plugins/napatech/README.md new file mode 100644 index 0000000000..bfc783d0b7 --- /dev/null +++ b/plugins/napatech/README.md @@ -0,0 +1,34 @@ +# Napatech Plugin Capture Plugin + +## Building + +To build this plugin, configure Suricata with the `--enable-napatech` and +optionally the `--with-napatech-includes` and +`--with-napatech-libraries` command line options. + +## Running +``` +/usr/local/suricata/bin/suricata \ + --set plugins.0=/usr/local/lib/suricata/napatech.so \ + --capture-plugin=napatech +``` + +### --set plugins.0=/usr/local/lib/suricata/napatech.so + +This command line option tells Suricata about this plugin. This could also +be done in `suricata.yaml` with the following section: +``` +plugins: + - /usr/local/lib/suricata/napatech.so +``` + +### --capture-plugin=napatech + +This is the option that tells Suricata to use a plugin for capture, much like +`--pcap` tells Suricata to use libpcap or `--af-packet` tells Suricata to use +AF_PACKET. Here we are telling it to look for a loaded plugin of the name +`napatech` to provide the capture method. + +There is another command line option `--capture-plugin-args` to pass arbitrary +data on the command line to a capture plugin, but this plugin does not yet handle +data provided through this command line parameter. diff --git a/plugins/napatech/plugin.c b/plugins/napatech/plugin.c new file mode 100644 index 0000000000..71631bf7f9 --- /dev/null +++ b/plugins/napatech/plugin.c @@ -0,0 +1,56 @@ +/* Copyright (C) 2020-2024 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include "suricata-common.h" +#include "suricata-plugin.h" + +#include "decode.h" +#include "source-napatech.h" +#include "runmode-napatech.h" +#include "util-device.h" + +void InitCapturePlugin(const char *args, int plugin_slot, int receive_slot, int decode_slot) +{ + LiveBuildDeviceList("plugin"); + RunModeNapatechRegister(plugin_slot); + TmModuleReceiveNapatechRegister(receive_slot); + TmModuleDecodeNapatechRegister(decode_slot); +} + +void SCPluginInit(void) +{ + SCCapturePlugin *plugin = SCCalloc(1, sizeof(SCCapturePlugin)); + if (plugin == NULL) { + FatalError("Failed to allocate memory for capture plugin"); + } + plugin->name = "napatech"; + plugin->Init = InitCapturePlugin; + plugin->GetDefaultMode = RunModeNapatechGetDefaultMode; + SCPluginRegisterCapture(plugin); +} + +const SCPlugin PluginRegistration = { + .name = "napatech", + .author = "Open Information Security Foundation", + .license = "GPLv2", + .Init = SCPluginInit, +}; + +const SCPlugin *SCPluginRegister() +{ + return &PluginRegistration; +} diff --git a/plugins/napatech/runmode-napatech.c b/plugins/napatech/runmode-napatech.c new file mode 100644 index 0000000000..1deaee328d --- /dev/null +++ b/plugins/napatech/runmode-napatech.c @@ -0,0 +1,259 @@ +/* Copyright (C) 2012-2017 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author nPulse Technologies, LLC. + * \author Matt Keeler + */ + +#include "suricata-common.h" +#include "suricata-plugin.h" + +#include "tm-threads.h" + +#include "runmode-napatech.h" +#include "source-napatech.h" // need NapatechStreamDevConf structure +#include "util-napatech.h" + +#include "conf.h" +#include "runmodes.h" +#include "util-debug.h" +#include "util-time.h" +#include "util-cpu.h" +#include "util-byte.h" +#include "util-affinity.h" +#include "util-runmodes.h" +#include "util-device.h" + +static const char *default_mode = "workers"; + +#define NT_RUNMODE_WORKERS 1 + +#define MAX_STREAMS 256 +static uint16_t num_configured_streams = 0; +static uint16_t first_stream = 0xffff; +static uint16_t last_stream = 0xffff; +static int auto_config = 0; +static int use_hw_bypass = 0; + +uint16_t NapatechGetNumConfiguredStreams(void) +{ + return num_configured_streams; +} + +uint16_t NapatechGetNumFirstStream(void) +{ + return first_stream; +} + +uint16_t NapatechGetNumLastStream(void) +{ + return last_stream; +} + +bool NapatechIsAutoConfigEnabled(void) +{ + return (auto_config != 0); +} + +bool NapatechUseHWBypass(void) +{ + return (use_hw_bypass != 0); +} + +const char *RunModeNapatechGetDefaultMode(void) +{ + return default_mode; +} + +void RunModeNapatechRegister(int slot) +{ + RunModeRegisterNewRunMode(slot, "workers", + "Workers Napatech mode, each thread does all" + " tasks from acquisition to logging", + RunModeNapatechWorkers, NULL); + return; +} + +static int NapatechRegisterDeviceStreams(void) +{ + /* Display the configuration mode */ + int use_all_streams; + + if (ConfGetBool("napatech.use-all-streams", &use_all_streams) == 0) { + SCLogInfo("Could not find napatech.use-all-streams in config file. Defaulting to \"no\"."); + use_all_streams = 0; + } + + if (ConfGetBool("napatech.auto-config", &auto_config) == 0) { + SCLogInfo("napatech.auto-config not found in config file. Defaulting to disabled."); + } + + if (ConfGetBool("napatech.hardware-bypass", &use_hw_bypass) == 0) { + SCLogInfo("napatech.hardware-bypass not found in config file. Defaulting to disabled."); + } + + /* use_all_streams uses existing streams created prior to starting Suricata. auto_config + * automatically creates streams. Therefore, these two options are mutually exclusive. + */ + if (use_all_streams && auto_config) { + FatalError("napatech.auto-config cannot be used in configuration file at the same time as " + "napatech.use-all-streams."); + } + + /* to use hardware_bypass we need to configure the streams to be consistent. + * with the rest of the configuration. Therefore auto_config is not a valid + * option. + */ + if (use_hw_bypass && auto_config == 0) { + FatalError("napatech auto-config must be enabled when using napatech.use_hw_bypass."); + } + + /* Get the stream ID's either from the conf or by querying Napatech */ + NapatechStreamConfig stream_config[MAX_STREAMS]; + + uint16_t stream_cnt = NapatechGetStreamConfig(stream_config); + num_configured_streams = stream_cnt; + SCLogDebug("Configuring %d Napatech Streams...", stream_cnt); + + for (uint16_t inst = 0; inst < stream_cnt; ++inst) { + char *plive_dev_buf = SCCalloc(1, 9); + if (unlikely(plive_dev_buf == NULL)) { + FatalError("Failed to allocate memory for NAPATECH stream counter."); + } + snprintf(plive_dev_buf, 9, "nt%d", stream_config[inst].stream_id); + + if (auto_config) { + if (stream_config[inst].is_active) { + SCLogError("Registering Napatech device: %s - active stream found.", plive_dev_buf); + SCLogError( + "run /opt/napatech3/bin/ntpl -e \"delete=all\" to delete existing stream"); + FatalError("or disable auto-config in the conf file before running."); + } + } else { + SCLogInfo("Registering Napatech device: %s - active stream%sfound.", plive_dev_buf, + stream_config[inst].is_active ? " " : " NOT "); + } + LiveRegisterDevice(plive_dev_buf); + + if (first_stream == 0xffff) { + first_stream = stream_config[inst].stream_id; + } + last_stream = stream_config[inst].stream_id; + } + + /* Napatech stats come from a separate thread. This will suppress + * the counters when suricata exits. + */ + LiveDeviceHasNoStats(); + return 0; +} + +static void *NapatechConfigParser(const char *device) +{ + /* Expect device to be of the form nt%d where %d is the stream id to use */ + int dev_len = strlen(device); + if (dev_len < 3 || dev_len > 5) { + SCLogError("Could not parse config for device: %s - invalid length", device); + return NULL; + } + + struct NapatechStreamDevConf *conf = SCCalloc(1, sizeof(struct NapatechStreamDevConf)); + if (unlikely(conf == NULL)) { + SCLogError("Failed to allocate memory for NAPATECH device name."); + return NULL; + } + + /* device+2 is a pointer to the beginning of the stream id after the constant nt portion */ + if (StringParseUint16(&conf->stream_id, 10, 0, device + 2) < 0) { + SCLogError("Invalid value for stream_id: %s", device + 2); + SCFree(conf); + return NULL; + } + + return (void *)conf; +} + +static int NapatechGetThreadsCount(void *conf __attribute__((unused))) +{ + /* No matter which live device it is there is no reason to ever use more than 1 thread + 2 or more thread would cause packet duplication */ + return 1; +} + +static int NapatechInit(int runmode) +{ + int status; + + TimeModeSetLive(); + + /* Initialize the API and check version compatibility */ + if ((status = NT_Init(NTAPI_VERSION)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + status = NapatechRegisterDeviceStreams(); + if (status < 0 || num_configured_streams <= 0) { + FatalError("Unable to find existing Napatech Streams"); + } + + struct NapatechStreamDevConf *conf = SCCalloc(1, sizeof(struct NapatechStreamDevConf)); + if (unlikely(conf == NULL)) { + FatalError("Failed to allocate memory for NAPATECH device."); + } + + if (use_hw_bypass) { +#ifdef NAPATECH_ENABLE_BYPASS + if (NapatechVerifyBypassSupport()) { + SCLogInfo("Napatech Hardware Bypass is supported and enabled."); + } else { + FatalError("Napatech Hardware Bypass requested in conf but is not supported by the " + "hardware."); + } +#else + FatalError( + "Napatech Hardware Bypass requested in conf but is not enabled by the software."); +#endif + } else { + SCLogInfo("Hardware Bypass is disabled in the conf file."); + } + + /* Start a thread to process the statistics */ + NapatechStartStats(); + + switch (runmode) { + case NT_RUNMODE_WORKERS: + status = RunModeSetLiveCaptureWorkers(NapatechConfigParser, NapatechGetThreadsCount, + "NapatechStream", "NapatechDecode", thread_name_workers, NULL); + break; + default: + status = -1; + } + + if (status != 0) { + FatalError("Runmode start failed"); + } + return 0; +} + +int RunModeNapatechWorkers(void) +{ + return NapatechInit(NT_RUNMODE_WORKERS); +} diff --git a/plugins/napatech/runmode-napatech.h b/plugins/napatech/runmode-napatech.h new file mode 100644 index 0000000000..f620669096 --- /dev/null +++ b/plugins/napatech/runmode-napatech.h @@ -0,0 +1,40 @@ +/* Copyright (C) 2012-2017 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \autor nPulse Technologies, LLC. + * \author Matt Keeler + */ + +#ifndef SURICATA_RUNMODE_NAPATECH_H +#define SURICATA_RUNMODE_NAPATECH_H + +#include + +int RunModeNapatechWorkers(void); +void RunModeNapatechRegister(int slot); +const char *RunModeNapatechGetDefaultMode(void); + +uint16_t NapatechGetNumConfiguredStreams(void); +uint16_t NapatechGetNumFirstStream(void); +uint16_t NapatechGetNumLastStream(void); +bool NapatechIsAutoConfigEnabled(void); +bool NapatechUseHWBypass(void); + +#endif /* SURICATA_RUNMODE_NAPATECH_H */ diff --git a/plugins/napatech/source-napatech.c b/plugins/napatech/source-napatech.c new file mode 100644 index 0000000000..5a5b40ed74 --- /dev/null +++ b/plugins/napatech/source-napatech.c @@ -0,0 +1,1074 @@ +/* Copyright (C) 2012-2020 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + - * \author nPulse Technologies, LLC. + - * \author Matt Keeler + * * + * Support for NAPATECH adapter with the 3GD Driver/API. + * Requires libntapi from Napatech A/S. + * + */ +#include "suricata-common.h" +#include "suricata-plugin.h" + +#include "action-globals.h" +#include "decode.h" +#include "packet.h" +#include "suricata.h" +#include "threadvars.h" +#include "util-datalink.h" +#include "util-optimize.h" +#include "tm-threads.h" +#include "tm-queuehandlers.h" +#include "tm-modules.h" +#include "util-privs.h" +#include "util-conf.h" +#include "tmqh-packetpool.h" +#include "util-napatech.h" +#include "source-napatech.h" +#include "runmode-napatech.h" + +#include +#include + +extern uint16_t max_pending_packets; + +typedef struct NapatechThreadVars_ { + ThreadVars *tv; + NtNetStreamRx_t rx_stream; + uint16_t stream_id; + TmSlot *slot; +} NapatechThreadVars; + +#ifdef NAPATECH_ENABLE_BYPASS +static int NapatechBypassCallback(Packet *p); +#endif + +TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **); +void NapatechStreamThreadExitStats(ThreadVars *, void *); +TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot); + +TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **); +TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data); +TmEcode NapatechDecode(ThreadVars *, Packet *, void *); + +/* These are used as the threads are exiting to get a comprehensive count of + * all the packets received and dropped. + */ +SC_ATOMIC_DECLARE(uint64_t, total_packets); +SC_ATOMIC_DECLARE(uint64_t, total_drops); +SC_ATOMIC_DECLARE(uint16_t, total_tallied); + +/* Streams are counted as they are instantiated in order to know when all threads + * are running*/ +SC_ATOMIC_DECLARE(uint16_t, stream_count); + +typedef struct NapatechNumaDetect_ { + SC_ATOMIC_DECLARE(uint16_t, count); +} NapatechNumaDetect; + +NapatechNumaDetect *numa_detect = NULL; + +SC_ATOMIC_DECLARE(uint64_t, flow_callback_cnt); +SC_ATOMIC_DECLARE(uint64_t, flow_callback_handled_pkts); +SC_ATOMIC_DECLARE(uint64_t, flow_callback_udp_pkts); +SC_ATOMIC_DECLARE(uint64_t, flow_callback_tcp_pkts); +SC_ATOMIC_DECLARE(uint64_t, flow_callback_unhandled_pkts); + +/** + * \brief Initialize the Napatech receiver (reader) module for globals. + */ +static TmEcode NapatechStreamInit(void) +{ + int i; + + SC_ATOMIC_INIT(total_packets); + SC_ATOMIC_INIT(total_drops); + SC_ATOMIC_INIT(total_tallied); + SC_ATOMIC_INIT(stream_count); + + numa_detect = SCMalloc(sizeof(*numa_detect) * (numa_max_node() + 1)); + if (numa_detect == NULL) { + FatalError("Failed to allocate memory for numa detection array: %s", strerror(errno)); + } + + for (i = 0; i <= numa_max_node(); ++i) { + SC_ATOMIC_INIT(numa_detect[i].count); + } + + SC_ATOMIC_INIT(flow_callback_cnt); + SC_ATOMIC_INIT(flow_callback_handled_pkts); + SC_ATOMIC_INIT(flow_callback_udp_pkts); + SC_ATOMIC_INIT(flow_callback_tcp_pkts); + SC_ATOMIC_INIT(flow_callback_unhandled_pkts); + + return TM_ECODE_OK; +} + +/** + * \brief Deinitialize the Napatech receiver (reader) module for globals. + */ +static TmEcode NapatechStreamDeInit(void) +{ + if (numa_detect != NULL) { + SCFree(numa_detect); + } + + return TM_ECODE_OK; +} + +/** + * \brief Register the Napatech receiver (reader) module. + */ +void TmModuleReceiveNapatechRegister(int slot) +{ + tmm_modules[slot].name = "NapatechStream"; + tmm_modules[slot].ThreadInit = NapatechStreamThreadInit; + tmm_modules[slot].Func = NULL; + tmm_modules[slot].PktAcqLoop = NapatechPacketLoop; + tmm_modules[slot].PktAcqBreakLoop = NULL; + tmm_modules[slot].ThreadExitPrintStats = NapatechStreamThreadExitStats; + tmm_modules[slot].ThreadDeinit = NapatechStreamThreadDeinit; + tmm_modules[slot].cap_flags = SC_CAP_NET_RAW; + tmm_modules[slot].flags = TM_FLAG_RECEIVE_TM; + tmm_modules[slot].Init = NapatechStreamInit; + tmm_modules[slot].DeInit = NapatechStreamDeInit; +} + +/** + * \brief Register the Napatech decoder module. + */ +void TmModuleDecodeNapatechRegister(int slot) +{ + tmm_modules[slot].name = "NapatechDecode"; + tmm_modules[slot].ThreadInit = NapatechDecodeThreadInit; + tmm_modules[slot].Func = NapatechDecode; + tmm_modules[slot].ThreadExitPrintStats = NULL; + tmm_modules[slot].ThreadDeinit = NapatechDecodeThreadDeinit; + tmm_modules[slot].cap_flags = 0; + tmm_modules[slot].flags = TM_FLAG_DECODE_TM; +} + +#ifdef NAPATECH_ENABLE_BYPASS +/** + * \brief template of IPv4 header + */ +struct ipv4_hdr { + uint8_t version_ihl; /**< version and header length */ + uint8_t type_of_service; /**< type of service */ + uint16_t total_length; /**< length of packet */ + uint16_t packet_id; /**< packet ID */ + uint16_t fragment_offset; /**< fragmentation offset */ + uint8_t time_to_live; /**< time to live */ + uint8_t next_proto_id; /**< protocol ID */ + uint16_t hdr_checksum; /**< header checksum */ + uint32_t src_addr; /**< source address */ + uint32_t dst_addr; /**< destination address */ +} __attribute__((__packed__)); + +/** + * \brief template of IPv6 header + */ +struct ipv6_hdr { + uint32_t vtc_flow; /**< IP version, traffic class & flow label. */ + uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */ + uint8_t proto; /**< Protocol, next header. */ + uint8_t hop_limits; /**< Hop limits. */ + uint8_t src_addr[16]; /**< IP address of source host. */ + uint8_t dst_addr[16]; /**< IP address of destination host(s). */ +} __attribute__((__packed__)); + +/** + * \brief template of UDP header + */ +struct udp_hdr { + uint16_t src_port; /**< UDP source port. */ + uint16_t dst_port; /**< UDP destination port. */ + uint16_t dgram_len; /**< UDP datagram length */ + uint16_t dgram_cksum; /**< UDP datagram checksum */ +} __attribute__((__packed__)); + +/** + * \brief template of TCP header + */ +struct tcp_hdr { + uint16_t src_port; /**< TCP source port. */ + uint16_t dst_port; /**< TCP destination port. */ + uint32_t sent_seq; /**< TX data sequence number. */ + uint32_t recv_ack; /**< RX data acknowledgement sequence number. */ + uint8_t data_off; /**< Data offset. */ + uint8_t tcp_flags; /**< TCP flags */ + uint16_t rx_win; /**< RX flow control window. */ + uint16_t cksum; /**< TCP checksum. */ + uint16_t tcp_urp; /**< TCP urgent pointer, if any. */ +} __attribute__((__packed__)); + +/* The hardware will assign a "color" value indicating what filters are matched + * by a given packet. These constants indicate what bits are set in the color + * field for different protocols + * + */ +// unused #define RTE_PTYPE_L2_ETHER 0x10000000 +#define RTE_PTYPE_L3_IPV4 0x01000000 +#define RTE_PTYPE_L3_IPV6 0x04000000 +#define RTE_PTYPE_L4_TCP 0x00100000 +#define RTE_PTYPE_L4_UDP 0x00200000 + +/* These masks are used to extract layer 3 and layer 4 protocol + * values from the color field in the packet descriptor. + */ +#define RTE_PTYPE_L3_MASK 0x0f000000 +#define RTE_PTYPE_L4_MASK 0x00f00000 + +#define COLOR_IS_SPAN 0x00001000 + +static int is_inline = 0; +static int inline_port_map[MAX_PORTS] = { -1 }; + +/** + * \brief Binds two ports together for inline operation. + * + * Get the ID of an adapter on which a given port resides. + * + * \param port one of the ports in a pairing. + * \param peer the other port in a pairing. + * \return ID of the adapter. + * + */ +int NapatechSetPortmap(int port, int peer) +{ + if ((inline_port_map[port] == -1) && (inline_port_map[peer] == -1)) { + inline_port_map[port] = peer; + inline_port_map[peer] = port; + } else { + SCLogError("Port pairing is already configured."); + return 0; + } + return 1; +} + +/** + * \brief Returns the ID of the adapter + * + * Get the ID of an adapter on which a given port resides. + * + * \param port for which adapter ID is requested. + * \return ID of the adapter. + * + */ +int NapatechGetAdapter(uint8_t port) +{ + static int port_adapter_map[MAX_PORTS] = { -1 }; + int status; + NtInfo_t h_info; /* Info handle */ + NtInfoStream_t h_info_stream; /* Info stream handle */ + + if (unlikely(port_adapter_map[port] == -1)) { + if ((status = NT_InfoOpen(&h_info_stream, "ExampleInfo")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return -1; + } + /* Read the system info */ + h_info.cmd = NT_INFO_CMD_READ_PORT_V9; + h_info.u.port_v9.portNo = (uint8_t)port; + if ((status = NT_InfoRead(h_info_stream, &h_info)) != NT_SUCCESS) { + /* Get the status code as text */ + NAPATECH_ERROR(status); + NT_InfoClose(h_info_stream); + return -1; + } + port_adapter_map[port] = h_info.u.port_v9.data.adapterNo; + } + return port_adapter_map[port]; +} + +/** + * \brief IPv4 4-tuple convenience structure + */ +struct IPv4Tuple4 { + uint32_t sa; /*!< Source address */ + uint32_t da; /*!< Destination address */ + uint16_t sp; /*!< Source port */ + uint16_t dp; /*!< Destination port */ +}; + +/** + * \brief IPv6 4-tuple convenience structure + */ +struct IPv6Tuple4 { + uint8_t sa[16]; /*!< Source address */ + uint8_t da[16]; /*!< Destination address */ + uint16_t sp; /*!< Source port */ + uint16_t dp; /*!< Destination port */ +}; + +/** + * \brief Compares the byte order value of two IPv6 addresses. + * + * + * \param addr_a The first address to compare + * \param addr_b The second address to compare + * + * \return -1 if addr_a < addr_b + * 1 if addr_a > addr_b + * 0 if addr_a == addr_b + */ +static int CompareIPv6Addr(uint8_t addr_a[16], uint8_t addr_b[16]) +{ + uint16_t pos; + for (pos = 0; pos < 16; ++pos) { + if (addr_a[pos] < addr_b[pos]) { + return -1; + } else if (addr_a[pos] > addr_b[pos]) { + return 1; + } /* else they are equal - check next position*/ + } + + /* if we get here the addresses are equal */ + return 0; +} + +/** + * \brief Initializes the FlowStreams used to program flow data. + * + * Opens a FlowStream on the adapter associated with the rx port. This + * FlowStream is subsequently used to program the adapter with + * flows to bypass. + * + * \return the flow stream handle, NULL if failure. + */ +static NtFlowStream_t InitFlowStream(int adapter, int stream_id) +{ + int status; + NtFlowStream_t hFlowStream; + + NtFlowAttr_t attr; + char flow_name[80]; + + NT_FlowOpenAttrInit(&attr); + NT_FlowOpenAttrSetAdapterNo(&attr, adapter); + + snprintf(flow_name, sizeof(flow_name), "Flow_stream_%d", stream_id); + SCLogDebug("Opening flow programming stream: %s", flow_name); + if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) { + SCLogWarning("Napatech bypass functionality not supported by the FPGA version on adapter " + "%d - disabling support.", + adapter); + return NULL; + } + return hFlowStream; +} + +/** + * \brief Callback function to process Bypass events on Napatech Adapter. + * + * Callback function that sets up the Flow tables on the Napatech card + * so that subsequent packets from this flow are bypassed on the hardware. + * + * \param p packet containing information about the flow to be bypassed + * \param is_inline indicates if Suricata is being run in inline mode. + * + * \return Error code indicating success (1) or failure (0). + * + */ +static int ProgramFlow(Packet *p, int inline_mode) +{ + NtFlow_t flow_match; + memset(&flow_match, 0, sizeof(flow_match)); + + NapatechPacketVars *ntpv = (NapatechPacketVars *)&p->plugin_v; + + /* + * The hardware decoder will "color" the packets according to the protocols + * in the packet and the port the packet arrived on. packet_type gets + * these bits and we mask out layer3, layer4, and is_span to determine + * the protocols and if the packet is coming in from a SPAN port. + */ + uint32_t packet_type = ((ntpv->dyn3->color_hi << 14) & 0xFFFFC000) | ntpv->dyn3->color_lo; + uint8_t *packet = (uint8_t *)ntpv->dyn3 + ntpv->dyn3->descrLength; + + uint32_t layer3 = packet_type & RTE_PTYPE_L3_MASK; + uint32_t layer4 = packet_type & RTE_PTYPE_L4_MASK; + uint32_t is_span = packet_type & COLOR_IS_SPAN; + + /* + * When we're programming the flows to arrive on a span port, + * where upstream and downstream packets arrive on the same port, + * the hardware is configured to swap the source and dest + * fields if the src addr > dest addr. We need to program the + * flow tables to match. We'll compare addresses and set + * do_swap accordingly. + */ + + uint32_t do_swap = 0; + + SC_ATOMIC_ADD(flow_callback_cnt, 1); + + /* Only bypass TCP and UDP */ + if (PacketIsTCP(p)) { + SC_ATOMIC_ADD(flow_callback_tcp_pkts, 1); + } else if (PacketIsUDP(p)) { + SC_ATOMIC_ADD(flow_callback_udp_pkts, 1); + } else { + SC_ATOMIC_ADD(flow_callback_unhandled_pkts, 1); + } + + struct IPv4Tuple4 v4Tuple; + struct IPv6Tuple4 v6Tuple; + struct ipv4_hdr *pIPv4_hdr = NULL; + struct ipv6_hdr *pIPv6_hdr = NULL; + + switch (layer3) { + case RTE_PTYPE_L3_IPV4: { + pIPv4_hdr = (struct ipv4_hdr *)(packet + ntpv->dyn3->offset0); + if (!is_span) { + v4Tuple.sa = pIPv4_hdr->src_addr; + v4Tuple.da = pIPv4_hdr->dst_addr; + } else { + do_swap = (htonl(pIPv4_hdr->src_addr) > htonl(pIPv4_hdr->dst_addr)); + if (!do_swap) { + /* already in order */ + v4Tuple.sa = pIPv4_hdr->src_addr; + v4Tuple.da = pIPv4_hdr->dst_addr; + } else { /* swap */ + v4Tuple.sa = pIPv4_hdr->dst_addr; + v4Tuple.da = pIPv4_hdr->src_addr; + } + } + break; + } + case RTE_PTYPE_L3_IPV6: { + pIPv6_hdr = (struct ipv6_hdr *)(packet + ntpv->dyn3->offset0); + do_swap = (CompareIPv6Addr(pIPv6_hdr->src_addr, pIPv6_hdr->dst_addr) > 0); + + if (!is_span) { + memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16); + memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16); + } else { + /* sort src/dest address before programming */ + if (!do_swap) { + /* already in order */ + memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16); + memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16); + } else { /* swap the addresses */ + memcpy(&(v6Tuple.sa), pIPv6_hdr->dst_addr, 16); + memcpy(&(v6Tuple.da), pIPv6_hdr->src_addr, 16); + } + } + break; + } + default: { + return 0; + } + } + + switch (layer4) { + case RTE_PTYPE_L4_TCP: { + struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(packet + ntpv->dyn3->offset1); + if (layer3 == RTE_PTYPE_L3_IPV4) { + if (!is_span) { + v4Tuple.dp = tcp_hdr->dst_port; + v4Tuple.sp = tcp_hdr->src_port; + flow_match.keyId = NAPATECH_KEYTYPE_IPV4; + } else { + if (!do_swap) { + v4Tuple.sp = tcp_hdr->src_port; + v4Tuple.dp = tcp_hdr->dst_port; + } else { + v4Tuple.sp = tcp_hdr->dst_port; + v4Tuple.dp = tcp_hdr->src_port; + } + flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN; + } + memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple)); + } else { + if (!is_span) { + v6Tuple.dp = tcp_hdr->dst_port; + v6Tuple.sp = tcp_hdr->src_port; + flow_match.keyId = NAPATECH_KEYTYPE_IPV6; + } else { + if (!do_swap) { + v6Tuple.sp = tcp_hdr->src_port; + v6Tuple.dp = tcp_hdr->dst_port; + } else { + v6Tuple.dp = tcp_hdr->src_port; + v6Tuple.sp = tcp_hdr->dst_port; + } + flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN; + } + memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple)); + } + flow_match.ipProtocolField = 6; + break; + } + case RTE_PTYPE_L4_UDP: { + struct udp_hdr *udp_hdr = (struct udp_hdr *)(packet + ntpv->dyn3->offset1); + if (layer3 == RTE_PTYPE_L3_IPV4) { + if (!is_span) { + v4Tuple.dp = udp_hdr->dst_port; + v4Tuple.sp = udp_hdr->src_port; + flow_match.keyId = NAPATECH_KEYTYPE_IPV4; + } else { + if (!do_swap) { + v4Tuple.sp = udp_hdr->src_port; + v4Tuple.dp = udp_hdr->dst_port; + } else { + v4Tuple.dp = udp_hdr->src_port; + v4Tuple.sp = udp_hdr->dst_port; + } + flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN; + } + memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple)); + } else { /* layer3 is IPV6 */ + if (!is_span) { + v6Tuple.dp = udp_hdr->dst_port; + v6Tuple.sp = udp_hdr->src_port; + flow_match.keyId = NAPATECH_KEYTYPE_IPV6; + } else { + if (!do_swap) { + v6Tuple.sp = udp_hdr->src_port; + v6Tuple.dp = udp_hdr->dst_port; + } else { + v6Tuple.dp = udp_hdr->src_port; + v6Tuple.sp = udp_hdr->dst_port; + } + flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN; + } + memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple)); + } + flow_match.ipProtocolField = 17; + break; + } + default: { + return 0; + } + } + + flow_match.op = 1; /* program flow */ + flow_match.gfi = 1; /* Generate FlowInfo records */ + flow_match.tau = 1; /* tcp automatic unlearn */ + + if (PacketCheckAction(p, ACTION_DROP)) { + flow_match.keySetId = NAPATECH_FLOWTYPE_DROP; + } else { + if (inline_mode) { + flow_match.keySetId = NAPATECH_FLOWTYPE_PASS; + } else { + flow_match.keySetId = NAPATECH_FLOWTYPE_DROP; + } + } + + if (NT_FlowWrite(ntpv->flow_stream, &flow_match, -1) != NT_SUCCESS) { + if (!(suricata_ctl_flags & SURICATA_STOP)) { + SCLogError("NT_FlowWrite failed!."); + exit(EXIT_FAILURE); + } + } + + return 1; +} + +/** + * \brief Callback from Suricata when a flow that should be bypassed + * is identified. + */ + +static int NapatechBypassCallback(Packet *p) +{ + NapatechPacketVars *ntpv = (NapatechPacketVars *)&p->plugin_v; + + /* + * Since, at this point, we don't know what action to take, + * simply mark this packet as one that should be + * bypassed when the packet is returned by suricata with a + * pass/drop verdict. + */ + ntpv->bypass = 1; + + return 1; +} + +#endif + +/** + * \brief Initialize the Napatech receiver thread, generate a single + * NapatechThreadVar structure for each thread, this will + * contain a NtNetStreamRx_t stream handle which is used when the + * thread executes to acquire the packets. + * + * \param tv Thread variable to ThreadVars + * \param initdata Initial data to the adapter passed from the user, + * this is processed by the user. + * + * For now, we assume that we have only a single name for the NAPATECH + * adapter. + * + * \param data data pointer gets populated with + * + */ +TmEcode NapatechStreamThreadInit(ThreadVars *tv, const void *initdata, void **data) +{ + SCEnter(); + struct NapatechStreamDevConf *conf = (struct NapatechStreamDevConf *)initdata; + uint16_t stream_id = conf->stream_id; + *data = NULL; + + NapatechThreadVars *ntv = SCCalloc(1, sizeof(NapatechThreadVars)); + if (unlikely(ntv == NULL)) { + FatalError("Failed to allocate memory for NAPATECH thread vars."); + } + + memset(ntv, 0, sizeof(NapatechThreadVars)); + ntv->stream_id = stream_id; + ntv->tv = tv; + + DatalinkSetGlobalType(LINKTYPE_ETHERNET); + + SCLogDebug("Started processing packets from NAPATECH Stream: %u", ntv->stream_id); + + *data = (void *)ntv; + SCReturnInt(TM_ECODE_OK); +} + +/** + * \brief Callback to indicate that the packet buffer can be returned to the hardware. + * + * Called when Suricata is done processing the packet. Before the packet is released + * this also checks the action to see if the packet should be dropped and programs the + * flow hardware if the flow is to be bypassed and the Napatech packet buffer is released. + * + * + * \param p Packet to return to the system. + * + */ +static void NapatechReleasePacket(struct Packet_ *p) +{ + /* + * If the packet is to be dropped we need to set the wirelength + * before releasing the Napatech buffer back to NTService. + */ + NapatechPacketVars *ntpv = (NapatechPacketVars *)&p->plugin_v; +#ifdef NAPATECH_ENABLE_BYPASS + if (is_inline && PacketCheckAction(p, ACTION_DROP)) { + ntpv->dyn3->wireLength = 0; + } + + /* + * If this flow is to be programmed for hardware bypass we do it now. This is done + * here because the action is not available in the packet structure at the time of the + * bypass callback and it needs to be done before we release the packet structure. + */ + if (ntpv->bypass == 1) { + ProgramFlow(p, is_inline); + } +#endif + + NT_NetRxRelease(ntpv->rx_stream, ntpv->nt_packet_buf); + PacketFreeOrRelease(p); +} + +/** + * \brief Returns the NUMA node associated with the currently running thread. + * + * \return ID of the NUMA node. + * + */ +static int GetNumaNode(void) +{ + int cpu = 0; + int node = 0; + +#if defined(__linux__) + cpu = sched_getcpu(); + node = numa_node_of_cpu(cpu); +#else + SCLogWarning("Auto configuration of NUMA node is not supported on this OS."); +#endif + + return node; +} + +/** + * \brief Outputs hints on the optimal host-buffer configuration to aid tuning. + * + * \param log_level of the currently running instance. + * + */ +static void RecommendNUMAConfig(void) +{ + char *buffer, *p; + int set_cpu_affinity = 0; + + p = buffer = SCCalloc(sizeof(char), (32 * (numa_max_node() + 1) + 1)); + if (buffer == NULL) { + FatalError("Failed to allocate memory for temporary buffer: %s", strerror(errno)); + } + + if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) { + set_cpu_affinity = 0; + } + + if (set_cpu_affinity) { + SCLogPerf("Minimum host buffers that should be defined in ntservice.ini:"); + for (int i = 0; i <= numa_max_node(); ++i) { + SCLogPerf(" NUMA Node %d: %d", i, SC_ATOMIC_GET(numa_detect[i].count)); + p += snprintf(p, 32, "%s[%d, 16, %d]", (i == 0 ? "" : ","), + SC_ATOMIC_GET(numa_detect[i].count), i); + } + SCLogPerf("E.g.: HostBuffersRx=%s", buffer); + } + + SCFree(buffer); +} + +/** + * \brief Main Napatechpacket processing loop + * + * \param tv Thread variable to ThreadVars + * \param data Pointer to NapatechThreadVars with data specific to Napatech + * \param slot TMSlot where this instance is running. + * + */ +TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot) +{ + int32_t status; + char error_buffer[100]; + uint64_t pkt_ts; + NtNetBuf_t packet_buffer; + NapatechThreadVars *ntv = (NapatechThreadVars *)data; + int numa_node = -1; + int set_cpu_affinity = 0; + int closer = 0; + int is_autoconfig = 0; + + /* This just keeps the startup output more orderly. */ + usleep(200000 * ntv->stream_id); + +#ifdef NAPATECH_ENABLE_BYPASS + NtFlowStream_t flow_stream[MAX_ADAPTERS] = { 0 }; + if (NapatechUseHWBypass()) { + /* Get a FlowStream handle for each adapter so we can efficiently find the + * correct handle corresponding to the port on which a packet is received. + */ + int adapter = 0; + for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) { + flow_stream[adapter] = InitFlowStream(adapter, ntv->stream_id); + } + } +#endif + + if (ConfGetBool("napatech.auto-config", &is_autoconfig) == 0) { + is_autoconfig = 0; + } + + if (is_autoconfig) { + numa_node = GetNumaNode(); + + if (numa_node <= numa_max_node()) { + SC_ATOMIC_ADD(numa_detect[numa_node].count, 1); + } + + if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) { + set_cpu_affinity = 0; + } + + if (set_cpu_affinity) { + NapatechSetupNuma(ntv->stream_id, numa_node); + } + + SC_ATOMIC_ADD(stream_count, 1); + if (SC_ATOMIC_GET(stream_count) == NapatechGetNumConfiguredStreams()) { + /* Print the recommended NUMA configuration early because it + * can fail with "No available hostbuffers" in NapatechSetupTraffic */ + RecommendNUMAConfig(); + +#ifdef NAPATECH_ENABLE_BYPASS + if (ConfGetBool("napatech.inline", &is_inline) == 0) { + is_inline = 0; + } + + /* Initialize the port map before we setup traffic filters */ + for (int i = 0; i < MAX_PORTS; ++i) { + inline_port_map[i] = -1; + } +#endif + /* The last thread to run sets up and deletes the streams */ + status = NapatechSetupTraffic(NapatechGetNumFirstStream(), NapatechGetNumLastStream()); + + closer = 1; + + if (status == 0x20002061) { + FatalError("Check host buffer configuration in ntservice.ini" + " or try running /opt/napatech3/bin/ntpl -e " + "\"delete=all\" to clean-up stream NUMA config."); + } else if (status == 0x20000008) { + FatalError("Check napatech.ports in the suricata config file."); + } + SCLogNotice("Napatech packet input engine started."); + } + } // is_autoconfig + + SCLogInfo("Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u ", + sched_getcpu(), numa_node, ntv->stream_id); + + SCLogDebug("Opening NAPATECH Stream: %u for processing", ntv->stream_id); + + if ((status = NT_NetRxOpen(&(ntv->rx_stream), "SuricataStream", NT_NET_INTERFACE_PACKET, + ntv->stream_id, -1)) != NT_SUCCESS) { + + NAPATECH_ERROR(status); + SCFree(ntv); + SCReturnInt(TM_ECODE_FAILED); + } + TmSlot *s = (TmSlot *)slot; + ntv->slot = s->slot_next; + + // Indicate that the thread is actually running its application level code (i.e., it can poll + // packets) + TmThreadsSetFlag(tv, THV_RUNNING); + + while (!(suricata_ctl_flags & SURICATA_STOP)) { + /* make sure we have at least one packet in the packet pool, to prevent + * us from alloc'ing packets at line rate */ + PacketPoolWait(); + + /* Napatech returns packets 1 at a time */ + status = NT_NetRxGet(ntv->rx_stream, &packet_buffer, 1000); + if (unlikely(status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) { + if (status == NT_STATUS_TIMEOUT) { + TmThreadsCaptureHandleTimeout(tv, NULL); + } + continue; + } else if (unlikely(status != NT_SUCCESS)) { + NAPATECH_ERROR(status); + SCLogInfo("Failed to read from Napatech Stream %d: %s", ntv->stream_id, error_buffer); + NapatechStreamThreadDeinit(tv, ntv); + break; + } + + Packet *p = PacketGetFromQueueOrAlloc(); + if (unlikely(p == NULL)) { + NT_NetRxRelease(ntv->rx_stream, packet_buffer); + SCReturnInt(TM_ECODE_FAILED); + } + + NapatechPacketVars *ntpv = (NapatechPacketVars *)&p->plugin_v; +#ifdef NAPATECH_ENABLE_BYPASS + ntpv->bypass = 0; +#endif + ntpv->rx_stream = ntv->rx_stream; + + pkt_ts = NT_NET_GET_PKT_TIMESTAMP(packet_buffer); + + /* + * Handle the different timestamp forms that the napatech cards could use + * - NT_TIMESTAMP_TYPE_NATIVE is not supported due to having an base + * of 0 as opposed to NATIVE_UNIX which has a base of 1/1/1970 + */ + switch (NT_NET_GET_PKT_TIMESTAMP_TYPE(packet_buffer)) { + case NT_TIMESTAMP_TYPE_NATIVE_UNIX: + p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS(pkt_ts / 100000000), + ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0)); + break; + case NT_TIMESTAMP_TYPE_PCAP: + p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS(pkt_ts >> 32), pkt_ts & 0xFFFFFFFF); + break; + case NT_TIMESTAMP_TYPE_PCAP_NANOTIME: + p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS(pkt_ts >> 32), + ((pkt_ts & 0xFFFFFFFF) / 1000) + ((pkt_ts % 1000) > 500 ? 1 : 0)); + break; + case NT_TIMESTAMP_TYPE_NATIVE_NDIS: + /* number of seconds between 1/1/1601 and 1/1/1970 */ + p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS((pkt_ts / 100000000) - 11644473600), + ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0)); + break; + default: + SCLogError("Packet from Napatech Stream: %u does not have a supported timestamp " + "format", + ntv->stream_id); + NT_NetRxRelease(ntv->rx_stream, packet_buffer); + SCReturnInt(TM_ECODE_FAILED); + } + +#ifdef NAPATECH_ENABLE_BYPASS + ntpv->dyn3 = _NT_NET_GET_PKT_DESCR_PTR_DYN3(packet_buffer); + p->BypassPacketsFlow = (NapatechIsBypassSupported() ? NapatechBypassCallback : NULL); + NT_NET_SET_PKT_TXPORT(packet_buffer, inline_port_map[ntpv->dyn3->rxPort]); + ntpv->flow_stream = flow_stream[NapatechGetAdapter(ntpv->dyn3->rxPort)]; + +#endif + + p->ReleasePacket = NapatechReleasePacket; + ntpv->nt_packet_buf = packet_buffer; + ntpv->stream_id = ntv->stream_id; + p->datalink = LINKTYPE_ETHERNET; + + if (unlikely(PacketSetData(p, (uint8_t *)NT_NET_GET_PKT_L2_PTR(packet_buffer), + NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) { + TmqhOutputPacketpool(ntv->tv, p); + SCReturnInt(TM_ECODE_FAILED); + } + + if (unlikely(TmThreadsSlotProcessPkt(ntv->tv, ntv->slot, p) != TM_ECODE_OK)) { + SCReturnInt(TM_ECODE_FAILED); + } + + /* + * At this point the packet and the Napatech Packet Buffer have been returned + * to the system in the NapatechReleasePacket() Callback. + */ + + StatsSyncCountersIfSignalled(tv); + } // while + + if (closer) { + NapatechDeleteFilters(); + } + + SCReturnInt(TM_ECODE_OK); +} + +/** + * \brief Print some stats to the log at program exit. + * + * \param tv Pointer to ThreadVars. + * \param data Pointer to data, ErfFileThreadVars. + */ +void NapatechStreamThreadExitStats(ThreadVars *tv, void *data) +{ + NapatechThreadVars *ntv = (NapatechThreadVars *)data; + NapatechCurrentStats stat = NapatechGetCurrentStats(ntv->stream_id); + + double percent = 0; + if (stat.current_drop_packets > 0) + percent = (((double)stat.current_drop_packets) / + (stat.current_packets + stat.current_drop_packets)) * + 100; + + SCLogInfo("nt%lu - pkts: %lu; drop: %lu (%5.2f%%); bytes: %lu", (uint64_t)ntv->stream_id, + stat.current_packets, stat.current_drop_packets, percent, stat.current_bytes); + + SC_ATOMIC_ADD(total_packets, stat.current_packets); + SC_ATOMIC_ADD(total_drops, stat.current_drop_packets); + SC_ATOMIC_ADD(total_tallied, 1); + + if (SC_ATOMIC_GET(total_tallied) == NapatechGetNumConfiguredStreams()) { + if (SC_ATOMIC_GET(total_drops) > 0) + percent = (((double)SC_ATOMIC_GET(total_drops)) / + (SC_ATOMIC_GET(total_packets) + SC_ATOMIC_GET(total_drops))) * + 100; + + SCLogInfo(" "); + SCLogInfo("--- Total Packets: %ld Total Dropped: %ld (%5.2f%%)", + SC_ATOMIC_GET(total_packets), SC_ATOMIC_GET(total_drops), percent); + +#ifdef NAPATECH_ENABLE_BYPASS + SCLogInfo("--- BypassCB - Total: %ld, UDP: %ld, TCP: %ld, Unhandled: %ld", + SC_ATOMIC_GET(flow_callback_cnt), SC_ATOMIC_GET(flow_callback_udp_pkts), + SC_ATOMIC_GET(flow_callback_tcp_pkts), SC_ATOMIC_GET(flow_callback_unhandled_pkts)); +#endif + } +} + +/** + * \brief Deinitializes the NAPATECH card. + * \param tv pointer to ThreadVars + * \param data pointer that gets cast into PcapThreadVars for ptv + */ +TmEcode NapatechStreamThreadDeinit(ThreadVars *tv, void *data) +{ + SCEnter(); + NapatechThreadVars *ntv = (NapatechThreadVars *)data; + + SCLogDebug("Closing Napatech Stream: %d", ntv->stream_id); + NT_NetRxClose(ntv->rx_stream); + + SCReturnInt(TM_ECODE_OK); +} + +/** + * \brief This function passes off to link type decoders. + * + * NapatechDecode decodes packets from Napatech and passes + * them off to the proper link type decoder. + * + * \param t pointer to ThreadVars + * \param p pointer to the current packet + * \param data pointer that gets cast into PcapThreadVars for ptv + */ +TmEcode NapatechDecode(ThreadVars *tv, Packet *p, void *data) +{ + SCEnter(); + + DecodeThreadVars *dtv = (DecodeThreadVars *)data; + + BUG_ON(PKT_IS_PSEUDOPKT(p)); + + // update counters + DecodeUpdatePacketCounters(tv, dtv, p); + + switch (p->datalink) { + case LINKTYPE_ETHERNET: + DecodeEthernet(tv, dtv, p, GET_PKT_DATA(p), GET_PKT_LEN(p)); + break; + default: + SCLogError("Datalink type %" PRId32 " not yet supported in module NapatechDecode", + p->datalink); + break; + } + + PacketDecodeFinalize(tv, dtv, p); + SCReturnInt(TM_ECODE_OK); +} + +/** + * \brief Initialization of Napatech Thread. + * + * \param t pointer to ThreadVars + * \param initdata - unused. + * \param data pointer that gets cast into DecoderThreadVars + */ +TmEcode NapatechDecodeThreadInit(ThreadVars *tv, const void *initdata, void **data) +{ + SCEnter(); + DecodeThreadVars *dtv = NULL; + dtv = DecodeThreadVarsAlloc(tv); + if (dtv == NULL) { + SCReturnInt(TM_ECODE_FAILED); + } + + DecodeRegisterPerfCounters(dtv, tv); + *data = (void *)dtv; + SCReturnInt(TM_ECODE_OK); +} + +/** + * \brief Deinitialization of Napatech Thread. + * + * \param tv pointer to ThreadVars + * \param data pointer that gets cast into DecoderThreadVars + */ +TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data) +{ + if (data != NULL) { + DecodeThreadVarsFree(tv, data); + } + SCReturnInt(TM_ECODE_OK); +} diff --git a/plugins/napatech/source-napatech.h b/plugins/napatech/source-napatech.h new file mode 100644 index 0000000000..e9228fce41 --- /dev/null +++ b/plugins/napatech/source-napatech.h @@ -0,0 +1,41 @@ +/* Copyright (C) 2012-2017 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author nPulse Technologies, LLC. + * \author Matt Keeler + */ +#ifndef SURICATA_SOURCE_NAPATECH_H +#define SURICATA_SOURCE_NAPATECH_H + +void TmModuleReceiveNapatechRegister(int slot); +void TmModuleDecodeNapatechRegister(int slot); + +TmEcode NapatechStreamThreadDeinit(ThreadVars *tv, void *data); + +#include + +struct NapatechStreamDevConf { + uint16_t stream_id; +}; + +int NapatechSetPortmap(int port, int peer); +int NapatechGetAdapter(uint8_t port); + +#endif /* SURICATA_SOURCE_NAPATECH_H */ diff --git a/plugins/napatech/util-napatech.c b/plugins/napatech/util-napatech.c new file mode 100644 index 0000000000..da63bdd698 --- /dev/null +++ b/plugins/napatech/util-napatech.c @@ -0,0 +1,1843 @@ +/* Copyright (C) 2017-2021 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ +/** + * \file + * + * \author Napatech Inc. + * \author Phil Young + * + * + */ +#include "suricata-common.h" +#include "suricata-plugin.h" + +#include "suricata.h" +#include "util-device.h" +#include "util-cpu.h" +#include "util-byte.h" +#include "util-conf.h" +#include "threadvars.h" +#include "tm-threads.h" +#include "util-napatech.h" +#include "source-napatech.h" +#include "runmode-napatech.h" + +#ifdef NAPATECH_ENABLE_BYPASS + +/* + * counters to track the number of flows programmed on + * the adapter. + */ +typedef struct FlowStatsCounters_ { + uint16_t active_bypass_flows; + uint16_t total_bypass_flows; +} FlowStatsCounters; + +static int bypass_supported; +int NapatechIsBypassSupported(void) +{ + return bypass_supported; +} + +/** + * \brief Returns the number of Napatech Adapters in the system. + * + * \return count of the Napatech adapters present in the system. + */ +int NapatechGetNumAdapters(void) +{ + NtInfoStream_t hInfo; + NtInfo_t hInfoSys; + int status; + static int num_adapters = -1; + + if (num_adapters == -1) { + if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + hInfoSys.cmd = NT_INFO_CMD_READ_SYSTEM; + if ((status = NT_InfoRead(hInfo, &hInfoSys)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + num_adapters = hInfoSys.u.system.data.numAdapters; + + NT_InfoClose(hInfo); + } + + return num_adapters; +} + +/** + * \brief Verifies that the Napatech adapters support bypass. + * + * Attempts to opens a FlowStream on each adapter present in the system. + * If successful then bypass is supported + * + * \return 1 if Bypass functionality is supported; zero otherwise. + */ +int NapatechVerifyBypassSupport(void) +{ + int status; + int adapter = 0; + int num_adapters = NapatechGetNumAdapters(); + SCLogInfo("Found %d Napatech adapters.", num_adapters); + NtFlowStream_t hFlowStream; + + if (!NapatechUseHWBypass()) { + /* HW Bypass is disabled in the conf file */ + return 0; + } + + for (adapter = 0; adapter < num_adapters; ++adapter) { + NtFlowAttr_t attr; + char flow_name[80]; + + NT_FlowOpenAttrInit(&attr); + NT_FlowOpenAttrSetAdapterNo(&attr, adapter); + + snprintf(flow_name, sizeof(flow_name), "Flow stream %d", adapter); + SCLogInfo("Opening flow programming stream: %s\n", flow_name); + if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) { + SCLogWarning("Napatech bypass functionality not supported by the FPGA version on " + "adapter %d - disabling support.", + adapter); + bypass_supported = 0; + return 0; + } + NT_FlowClose(hFlowStream); + } + + bypass_supported = 1; + return bypass_supported; +} + +/** + * \brief Updates statistic counters for Napatech FlowStats + * + * \param tv Thread variable to ThreadVars + * \param hInfo Handle to the Napatech InfoStream. + * \param hstat_stream Handle to the Napatech Statistics Stream. + * \param flow_counters The flow counters statistics to update. + * \param clear_stats Indicates if statistics on the card should be reset to zero. + * + */ +static void UpdateFlowStats(ThreadVars *tv, NtInfoStream_t hInfo, NtStatStream_t hstat_stream, + FlowStatsCounters flow_counters, int clear_stats) +{ + NtStatistics_t hStat; + int status; + + uint64_t programed = 0; + uint64_t removed = 0; + int adapter = 0; + + for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) { + hStat.cmd = NT_STATISTICS_READ_CMD_FLOW_V0; + hStat.u.flowData_v0.clear = clear_stats; + hStat.u.flowData_v0.adapterNo = adapter; + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(1); + } + programed = hStat.u.flowData_v0.learnDone; + removed = hStat.u.flowData_v0.unlearnDone + hStat.u.flowData_v0.automaticUnlearnDone + + hStat.u.flowData_v0.timeoutUnlearnDone; + } + + StatsSetUI64(tv, flow_counters.active_bypass_flows, programed - removed); + StatsSetUI64(tv, flow_counters.total_bypass_flows, programed); +} + +#endif /* NAPATECH_ENABLE_BYPASS */ + +/*----------------------------------------------------------------------------- + *----------------------------------------------------------------------------- + * Statistics code + *----------------------------------------------------------------------------- + */ +typedef struct PacketCounters_ { + uint16_t pkts; + uint16_t byte; + uint16_t drop_pkts; + uint16_t drop_byte; +} PacketCounters; + +NapatechCurrentStats total_stats; +NapatechCurrentStats current_stats[MAX_STREAMS]; + +NapatechCurrentStats NapatechGetCurrentStats(uint16_t id) +{ + + return current_stats[id]; +} + +enum CONFIG_SPECIFIER { + CONFIG_SPECIFIER_UNDEFINED = 0, + CONFIG_SPECIFIER_RANGE, + CONFIG_SPECIFIER_INDIVIDUAL +}; + +//#define MAX_HOSTBUFFERS 8 + +/** + * \brief Test to see if any of the configured streams are active + * + * \param hInfo Handle to Napatech Info Stream. + * \param hStatsStream Handle to Napatech Statistics stream + * \param stream_config array of stream configuration structures + * \param num_inst + * + */ +static uint16_t TestStreamConfig(NtInfoStream_t hInfo, NtStatStream_t hstat_stream, + NapatechStreamConfig stream_config[], uint16_t num_inst) +{ + uint16_t num_active = 0; + + for (uint16_t inst = 0; inst < num_inst; ++inst) { + int status; + NtStatistics_t stat; // Stat handle. + + /* Check to see if it is an active stream */ + memset(&stat, 0, sizeof(NtStatistics_t)); + + /* Read usage data for the chosen stream ID */ + stat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; + stat.u.usageData_v0.streamid = (uint8_t)stream_config[inst].stream_id; + + if ((status = NT_StatRead(hstat_stream, &stat)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return 0; + } + + if (stat.u.usageData_v0.data.numHostBufferUsed > 0) { + stream_config[inst].is_active = true; + num_active++; + } else { + stream_config[inst].is_active = false; + } + } + + return num_active; +} + +/** + * \brief Updates Napatech packet counters + * + * \param tv Pointer to ThreadVars structure + * \param hInfo Handle to Napatech Info Stream. + * \param hstat_stream Handle to Napatech Statistics stream + * \param num_streams the number of streams that are currently active + * \param stream_config array of stream configuration structures + * \param total_counters - cumulative count of all packets received. + * \param dispatch_host, - Count of packets that were delivered to the host buffer + * \param dispatch_drop - count of packets that were dropped as a result of a rule + * \param dispatch_fwd - count of packets forwarded out the egress port as the result of a rule + * \param is_inline - are we running in inline mode? + * \param enable_stream_stats - are per thread/stream statistics enabled. + * \param stream_counters - counters for each thread/stream configured. + * + * \return The number of active streams that were updated. + * + */ +static uint32_t UpdateStreamStats(ThreadVars *tv, NtInfoStream_t hInfo, NtStatStream_t hstat_stream, + uint16_t num_streams, NapatechStreamConfig stream_config[], PacketCounters total_counters, + PacketCounters dispatch_host, PacketCounters dispatch_drop, PacketCounters dispatch_fwd, + int is_inline, int enable_stream_stats, PacketCounters stream_counters[]) +{ + static uint64_t rxPktsStart[MAX_STREAMS] = { 0 }; + static uint64_t rxByteStart[MAX_STREAMS] = { 0 }; + static uint64_t dropPktStart[MAX_STREAMS] = { 0 }; + static uint64_t dropByteStart[MAX_STREAMS] = { 0 }; + + int status; + NtInfo_t hStreamInfo; + NtStatistics_t hStat; // Stat handle. + + /* Query the system to get the number of streams currently instantiated */ + hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM; + if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + uint16_t num_active; + if ((num_active = TestStreamConfig(hInfo, hstat_stream, stream_config, num_streams)) == 0) { + /* None of the configured streams are active */ + return 0; + } + + /* At least one stream is active so proceed with the stats. */ + uint16_t inst_id = 0; + uint32_t stream_cnt = 0; + for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) { + while (inst_id < num_streams) { + if (stream_config[inst_id].is_active) { + break; + } else { + ++inst_id; + } + } + if (inst_id == num_streams) + break; + + /* Read usage data for the chosen stream ID */ + memset(&hStat, 0, sizeof(NtStatistics_t)); + hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; + hStat.u.usageData_v0.streamid = (uint8_t)stream_config[inst_id].stream_id; + + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return 0; + } + + uint16_t stream_id = stream_config[inst_id].stream_id; + if (stream_config[inst_id].is_active) { + uint64_t rx_pkts_total = 0; + uint64_t rx_byte_total = 0; + uint64_t drop_pkts_total = 0; + uint64_t drop_byte_total = 0; + + for (uint32_t hbCount = 0; hbCount < hStat.u.usageData_v0.data.numHostBufferUsed; + hbCount++) { + if (unlikely(stream_config[inst_id].initialized == false)) { + rxPktsStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames; + rxByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes; + dropPktStart[stream_id] += + hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames; + dropByteStart[stream_id] += + hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes; + stream_config[inst_id].initialized = true; + } else { + rx_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames; + rx_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes; + drop_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames; + drop_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes; + } + } + + current_stats[stream_id].current_packets = rx_pkts_total - rxPktsStart[stream_id]; + current_stats[stream_id].current_bytes = rx_byte_total - rxByteStart[stream_id]; + current_stats[stream_id].current_drop_packets = + drop_pkts_total - dropPktStart[stream_id]; + current_stats[stream_id].current_drop_bytes = + drop_byte_total - dropByteStart[stream_id]; + } + + if (enable_stream_stats) { + StatsSetUI64( + tv, stream_counters[inst_id].pkts, current_stats[stream_id].current_packets); + StatsSetUI64(tv, stream_counters[inst_id].byte, current_stats[stream_id].current_bytes); + StatsSetUI64(tv, stream_counters[inst_id].drop_pkts, + current_stats[stream_id].current_drop_packets); + StatsSetUI64(tv, stream_counters[inst_id].drop_byte, + current_stats[stream_id].current_drop_bytes); + } + + ++inst_id; + } + + uint32_t stream_id; + for (stream_id = 0; stream_id < num_streams; ++stream_id) { + +#ifndef NAPATECH_ENABLE_BYPASS + total_stats.current_packets += current_stats[stream_id].current_packets; + total_stats.current_bytes += current_stats[stream_id].current_bytes; +#endif /* NAPATECH_ENABLE_BYPASS */ + total_stats.current_drop_packets += current_stats[stream_id].current_drop_packets; + total_stats.current_drop_bytes += current_stats[stream_id].current_drop_bytes; + } + +#ifndef NAPATECH_ENABLE_BYPASS + StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets); + StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes); +#endif /* NAPATECH_ENABLE_BYPASS */ + + StatsSetUI64(tv, total_counters.drop_pkts, total_stats.current_drop_packets); + StatsSetUI64(tv, total_counters.drop_byte, total_stats.current_drop_bytes); + + total_stats.current_packets = 0; + total_stats.current_bytes = 0; + total_stats.current_drop_packets = 0; + total_stats.current_drop_bytes = 0; + + /* Read usage data for the chosen stream ID */ + memset(&hStat, 0, sizeof(NtStatistics_t)); + +#ifdef NAPATECH_ENABLE_BYPASS + hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3; + hStat.u.query_v3.clear = 0; +#else /* NAPATECH_ENABLE_BYPASS */ + /* Older versions of the API have a different structure. */ + hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2; + hStat.u.query_v2.clear = 0; +#endif /* !NAPATECH_ENABLE_BYPASS */ + + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + if (status == NT_STATUS_TIMEOUT) { + SCLogInfo("Statistics timed out - will retry next time."); + return 0; + } else { + NAPATECH_ERROR(status); + return 0; + } + } + +#ifdef NAPATECH_ENABLE_BYPASS + + int adapter = 0; + uint64_t total_dispatch_host_pkts = 0; + uint64_t total_dispatch_host_byte = 0; + uint64_t total_dispatch_drop_pkts = 0; + uint64_t total_dispatch_drop_byte = 0; + uint64_t total_dispatch_fwd_pkts = 0; + uint64_t total_dispatch_fwd_byte = 0; + + for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) { + total_dispatch_host_pkts += + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts; + total_dispatch_host_byte += + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets; + + total_dispatch_drop_pkts += + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts; + total_dispatch_drop_byte += + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].octets; + + total_dispatch_fwd_pkts += + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].pkts; + total_dispatch_fwd_byte += + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].octets; + + total_stats.current_packets += + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts; + + total_stats.current_bytes = + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets + + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets; + } + + StatsSetUI64(tv, dispatch_host.pkts, total_dispatch_host_pkts); + StatsSetUI64(tv, dispatch_host.byte, total_dispatch_host_byte); + + StatsSetUI64(tv, dispatch_drop.pkts, total_dispatch_drop_pkts); + StatsSetUI64(tv, dispatch_drop.byte, total_dispatch_drop_byte); + + if (is_inline) { + StatsSetUI64(tv, dispatch_fwd.pkts, total_dispatch_fwd_pkts); + StatsSetUI64(tv, dispatch_fwd.byte, total_dispatch_fwd_byte); + } + + StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets); + StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes); + +#endif /* NAPATECH_ENABLE_BYPASS */ + + return num_active; +} + +/** + * \brief Statistics processing loop + * + * Instantiated on the stats thread. Periodically retrieves + * statistics from the Napatech card and updates the packet counters + * + * \param arg Pointer that is cast into a ThreadVars structure + */ +static void *NapatechStatsLoop(void *arg) +{ + ThreadVars *tv = (ThreadVars *)arg; + + int status; + NtInfoStream_t hInfo; + NtStatStream_t hstat_stream; + int is_inline = 0; + int enable_stream_stats = 0; + PacketCounters stream_counters[MAX_STREAMS]; + + if (ConfGetBool("napatech.inline", &is_inline) == 0) { + is_inline = 0; + } + + if (ConfGetBool("napatech.enable-stream-stats", &enable_stream_stats) == 0) { + /* default is "no" */ + enable_stream_stats = 0; + } + + NapatechStreamConfig stream_config[MAX_STREAMS]; + uint16_t stream_cnt = NapatechGetStreamConfig(stream_config); + + /* Open the info and Statistics */ + if ((status = NT_InfoOpen(&hInfo, "StatsLoopInfoStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return NULL; + } + + if ((status = NT_StatOpen(&hstat_stream, "StatsLoopStatsStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return NULL; + } + + NtStatistics_t hStat; + memset(&hStat, 0, sizeof(NtStatistics_t)); + +#ifdef NAPATECH_ENABLE_BYPASS + hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3; + hStat.u.query_v3.clear = 1; +#else /* NAPATECH_ENABLE_BYPASS */ + hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2; + hStat.u.query_v2.clear = 1; +#endif /* !NAPATECH_ENABLE_BYPASS */ + + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return 0; + } + + PacketCounters total_counters; + memset(&total_counters, 0, sizeof(total_counters)); + + PacketCounters dispatch_host; + memset(&dispatch_host, 0, sizeof(dispatch_host)); + + PacketCounters dispatch_drop; + memset(&dispatch_drop, 0, sizeof(dispatch_drop)); + + PacketCounters dispatch_fwd; + memset(&dispatch_fwd, 0, sizeof(dispatch_fwd)); + + total_counters.pkts = StatsRegisterCounter("napa_total.pkts", tv); + dispatch_host.pkts = StatsRegisterCounter("napa_dispatch_host.pkts", tv); + dispatch_drop.pkts = StatsRegisterCounter("napa_dispatch_drop.pkts", tv); + if (is_inline) { + dispatch_fwd.pkts = StatsRegisterCounter("napa_dispatch_fwd.pkts", tv); + } + + total_counters.byte = StatsRegisterCounter("napa_total.byte", tv); + dispatch_host.byte = StatsRegisterCounter("napa_dispatch_host.byte", tv); + dispatch_drop.byte = StatsRegisterCounter("napa_dispatch_drop.byte", tv); + if (is_inline) { + dispatch_fwd.byte = StatsRegisterCounter("napa_dispatch_fwd.byte", tv); + } + + total_counters.drop_pkts = StatsRegisterCounter("napa_total.overflow_drop_pkts", tv); + total_counters.drop_byte = StatsRegisterCounter("napa_total.overflow_drop_byte", tv); + + if (enable_stream_stats) { + for (int i = 0; i < stream_cnt; ++i) { + char *pkts_buf = SCCalloc(1, 32); + if (unlikely(pkts_buf == NULL)) { + FatalError("Failed to allocate memory for NAPATECH stream counter."); + } + + snprintf(pkts_buf, 32, "napa%d.pkts", stream_config[i].stream_id); + stream_counters[i].pkts = StatsRegisterCounter(pkts_buf, tv); + + char *byte_buf = SCCalloc(1, 32); + if (unlikely(byte_buf == NULL)) { + FatalError("Failed to allocate memory for NAPATECH stream counter."); + } + snprintf(byte_buf, 32, "napa%d.bytes", stream_config[i].stream_id); + stream_counters[i].byte = StatsRegisterCounter(byte_buf, tv); + + char *drop_pkts_buf = SCCalloc(1, 32); + if (unlikely(drop_pkts_buf == NULL)) { + FatalError("Failed to allocate memory for NAPATECH stream counter."); + } + snprintf(drop_pkts_buf, 32, "napa%d.drop_pkts", stream_config[i].stream_id); + stream_counters[i].drop_pkts = StatsRegisterCounter(drop_pkts_buf, tv); + + char *drop_byte_buf = SCCalloc(1, 32); + if (unlikely(drop_byte_buf == NULL)) { + FatalError("Failed to allocate memory for NAPATECH stream counter."); + } + snprintf(drop_byte_buf, 32, "napa%d.drop_byte", stream_config[i].stream_id); + stream_counters[i].drop_byte = StatsRegisterCounter(drop_byte_buf, tv); + } + } + +#ifdef NAPATECH_ENABLE_BYPASS + FlowStatsCounters flow_counters; + if (bypass_supported) { + flow_counters.active_bypass_flows = StatsRegisterCounter("napa_bypass.active_flows", tv); + flow_counters.total_bypass_flows = StatsRegisterCounter("napa_bypass.total_flows", tv); + } +#endif /* NAPATECH_ENABLE_BYPASS */ + + StatsSetupPrivate(tv); + + StatsSetUI64(tv, total_counters.pkts, 0); + StatsSetUI64(tv, total_counters.byte, 0); + StatsSetUI64(tv, total_counters.drop_pkts, 0); + StatsSetUI64(tv, total_counters.drop_byte, 0); + +#ifdef NAPATECH_ENABLE_BYPASS + if (bypass_supported) { + StatsSetUI64(tv, dispatch_host.pkts, 0); + StatsSetUI64(tv, dispatch_drop.pkts, 0); + + if (is_inline) { + StatsSetUI64(tv, dispatch_fwd.pkts, 0); + } + + StatsSetUI64(tv, dispatch_host.byte, 0); + StatsSetUI64(tv, dispatch_drop.byte, 0); + if (is_inline) { + StatsSetUI64(tv, dispatch_fwd.byte, 0); + } + + if (enable_stream_stats) { + for (int i = 0; i < stream_cnt; ++i) { + StatsSetUI64(tv, stream_counters[i].pkts, 0); + StatsSetUI64(tv, stream_counters[i].byte, 0); + StatsSetUI64(tv, stream_counters[i].drop_pkts, 0); + StatsSetUI64(tv, stream_counters[i].drop_byte, 0); + } + } + + StatsSetUI64(tv, flow_counters.active_bypass_flows, 0); + StatsSetUI64(tv, flow_counters.total_bypass_flows, 0); + UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 1); + } +#endif /* NAPATECH_ENABLE_BYPASS */ + + uint32_t num_active = UpdateStreamStats(tv, hInfo, hstat_stream, stream_cnt, stream_config, + total_counters, dispatch_host, dispatch_drop, dispatch_fwd, is_inline, + enable_stream_stats, stream_counters); + + if (!NapatechIsAutoConfigEnabled() && (num_active < stream_cnt)) { + SCLogInfo("num_active: %d, stream_cnt: %d", num_active, stream_cnt); + SCLogWarning("Some or all of the configured streams are not created. Proceeding with " + "active streams."); + } + + TmThreadsSetFlag(tv, THV_INIT_DONE | THV_RUNNING); + while (1) { + if (TmThreadsCheckFlag(tv, THV_KILL)) { + SCLogDebug("NapatechStatsLoop THV_KILL detected"); + break; + } + + UpdateStreamStats(tv, hInfo, hstat_stream, stream_cnt, stream_config, total_counters, + dispatch_host, dispatch_drop, dispatch_fwd, is_inline, enable_stream_stats, + stream_counters); + +#ifdef NAPATECH_ENABLE_BYPASS + if (bypass_supported) { + UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 0); + } +#endif /* NAPATECH_ENABLE_BYPASS */ + + StatsSyncCountersIfSignalled(tv); + usleep(1000000); + } + + /* CLEAN UP NT Resources and Close the info stream */ + if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return NULL; + } + + /* Close the statistics stream */ + if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return NULL; + } + + SCLogDebug("Exiting NapatechStatsLoop"); + TmThreadsSetFlag(tv, THV_RUNNING_DONE); + TmThreadWaitForFlag(tv, THV_DEINIT); + TmThreadsSetFlag(tv, THV_CLOSED); + + return NULL; +} + +//#define MAX_STREAMS 256 +//#define HB_HIGHWATER 2048 //1982 + +/** + * \brief Tests whether a particular stream_id is actively registered + * + * \param stream_id - ID of the stream to look up + * \param num_registered - The total number of registered streams + * \param registered_streams - An array containing actively registered streams. + * + * \return Bool indicating is the specified stream is registered. + * + */ +static bool RegisteredStream( + uint16_t stream_id, uint16_t num_registered, NapatechStreamConfig registered_streams[]) +{ + for (uint16_t reg_id = 0; reg_id < num_registered; ++reg_id) { + if (stream_id == registered_streams[reg_id].stream_id) { + return true; + } + } + return false; +} + +/** + * \brief Count the number of worker threads defined in the conf file. + * + * \return - The number of worker threads defined by the configuration + */ +static uint32_t CountWorkerThreads(void) +{ + int worker_count = 0; + + ConfNode *affinity; + ConfNode *root = ConfGetNode("threading.cpu-affinity"); + + if (root != NULL) { + + TAILQ_FOREACH (affinity, &root->head, next) { + if (strcmp(affinity->val, "decode-cpu-set") == 0 || + strcmp(affinity->val, "stream-cpu-set") == 0 || + strcmp(affinity->val, "reject-cpu-set") == 0 || + strcmp(affinity->val, "output-cpu-set") == 0) { + continue; + } + + if (strcmp(affinity->val, "worker-cpu-set") == 0) { + ConfNode *node = ConfNodeLookupChild(affinity->head.tqh_first, "cpu"); + ConfNode *lnode; + + enum CONFIG_SPECIFIER cpu_spec = CONFIG_SPECIFIER_UNDEFINED; + + TAILQ_FOREACH (lnode, &node->head, next) { + uint8_t start, end; + char *end_str; + if (strncmp(lnode->val, "all", 4) == 0) { + /* check that the sting in the config file is correctly specified */ + if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) { + FatalError("Only one Napatech port specifier type allowed."); + } + cpu_spec = CONFIG_SPECIFIER_RANGE; + worker_count = UtilCpuGetNumProcessorsConfigured(); + } else if ((end_str = strchr(lnode->val, '-'))) { + /* check that the sting in the config file is correctly specified */ + if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) { + FatalError("Only one Napatech port specifier type allowed."); + } + cpu_spec = CONFIG_SPECIFIER_RANGE; + + if (StringParseUint8(&start, 10, end_str - lnode->val, + (const char *)lnode->val) < 0) { + FatalError("Napatech invalid" + " worker range start: '%s'", + lnode->val); + } + if (StringParseUint8(&end, 10, 0, (const char *)(end_str + 1)) < 0) { + FatalError("Napatech invalid" + " worker range end: '%s'", + (end_str != NULL) ? (const char *)(end_str + 1) : "Null"); + } + if (end < start) { + FatalError("Napatech invalid" + " worker range start: '%d' is greater than end: '%d'", + start, end); + } + worker_count = end - start + 1; + + } else { + /* check that the sting in the config file is correctly specified */ + if (cpu_spec == CONFIG_SPECIFIER_RANGE) { + FatalError("Napatech port range specifiers cannot be combined with " + "individual stream specifiers."); + } + cpu_spec = CONFIG_SPECIFIER_INDIVIDUAL; + ++worker_count; + } + } + break; + } + } + } + return worker_count; +} + +/** + * \brief Reads and parses the stream configuration defined in the config file. + * + * \param stream_config - array to be filled in with active stream info. + * + * \return the number of streams configured or -1 if an error occurred + * + */ +int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]) +{ + int status; + char error_buffer[80]; // Error buffer + NtStatStream_t hstat_stream; + NtStatistics_t hStat; // Stat handle. + NtInfoStream_t info_stream; + NtInfo_t info; + uint16_t instance_cnt = 0; + int use_all_streams = 0; + int set_cpu_affinity = 0; + ConfNode *ntstreams; + uint16_t stream_id = 0; + uint8_t start = 0; + uint8_t end = 0; + + for (uint16_t i = 0; i < MAX_STREAMS; ++i) { + stream_config[i].stream_id = 0; + stream_config[i].is_active = false; + stream_config[i].initialized = false; + } + + if (ConfGetBool("napatech.use-all-streams", &use_all_streams) == 0) { + /* default is "no" */ + use_all_streams = 0; + } + + if ((status = NT_InfoOpen(&info_stream, "SuricataStreamInfo")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return -1; + } + + if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return -1; + } + + if (use_all_streams) { + info.cmd = NT_INFO_CMD_READ_STREAM; + if ((status = NT_InfoRead(info_stream, &info)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return -1; + } + + while (instance_cnt < info.u.stream.data.count) { + + /* + * For each stream ID query the number of host-buffers used by + * the stream. If zero, then that streamID is not used; skip + * over it and continue until we get a streamID with a non-zero + * count of the host-buffers. + */ + memset(&hStat, 0, sizeof(NtStatistics_t)); + + /* Read usage data for the chosen stream ID */ + hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; + hStat.u.usageData_v0.streamid = (uint8_t)stream_id; + + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + /* Get the status code as text */ + NT_ExplainError(status, error_buffer, sizeof(error_buffer)); + SCLogError("NT_StatRead() failed: %s\n", error_buffer); + return -1; + } + + if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) { + ++stream_id; + continue; + } + + /* if we get here it is an active stream */ + stream_config[instance_cnt].stream_id = stream_id++; + stream_config[instance_cnt].is_active = true; + instance_cnt++; + } + + } else { + (void)ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity); + if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 1)) { + start = 0; + end = CountWorkerThreads() - 1; + } else { + /* When not using the default streams we need to + * parse the array of streams from the conf */ + if ((ntstreams = ConfGetNode("napatech.streams")) == NULL) { + SCLogError("Failed retrieving napatech.streams from Config"); + if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 0)) { + SCLogError("if set-cpu-affinity: no in conf then napatech.streams must be " + "defined"); + } + exit(EXIT_FAILURE); + } + + /* Loop through all stream numbers in the array and register the devices */ + ConfNode *stream; + enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED; + instance_cnt = 0; + + TAILQ_FOREACH (stream, &ntstreams->head, next) { + + if (stream == NULL) { + SCLogError("Couldn't Parse Stream Configuration"); + return -1; + } + + char *end_str = strchr(stream->val, '-'); + if (end_str) { + if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) { + SCLogError("Only one Napatech stream range specifier allowed."); + return -1; + } + stream_spec = CONFIG_SPECIFIER_RANGE; + + if (StringParseUint8( + &start, 10, end_str - stream->val, (const char *)stream->val) < 0) { + FatalError("Napatech invalid " + "stream id start: '%s'", + stream->val); + } + if (StringParseUint8(&end, 10, 0, (const char *)(end_str + 1)) < 0) { + FatalError("Napatech invalid " + "stream id end: '%s'", + (end_str != NULL) ? (const char *)(end_str + 1) : "Null"); + } + } else { + if (stream_spec == CONFIG_SPECIFIER_RANGE) { + FatalError("Napatech range and individual specifiers cannot be combined."); + } + stream_spec = CONFIG_SPECIFIER_INDIVIDUAL; + if (StringParseUint8(&stream_config[instance_cnt].stream_id, 10, 0, + (const char *)stream->val) < 0) { + FatalError("Napatech invalid " + "stream id: '%s'", + stream->val); + } + start = stream_config[instance_cnt].stream_id; + end = stream_config[instance_cnt].stream_id; + } + } + } + + for (stream_id = start; stream_id <= end; ++stream_id) { + /* if we get here it is configured in the .yaml file */ + stream_config[instance_cnt].stream_id = stream_id; + + /* Check to see if it is an active stream */ + memset(&hStat, 0, sizeof(NtStatistics_t)); + + /* Read usage data for the chosen stream ID */ + hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; + hStat.u.usageData_v0.streamid = (uint8_t)stream_config[instance_cnt].stream_id; + + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return -1; + } + + if (hStat.u.usageData_v0.data.numHostBufferUsed > 0) { + stream_config[instance_cnt].is_active = true; + } + instance_cnt++; + } + } + + /* Close the statistics stream */ + if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return -1; + } + + if ((status = NT_InfoClose(info_stream)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return -1; + } + + return instance_cnt; +} + +static void *NapatechBufMonitorLoop(void *arg) +{ + ThreadVars *tv = (ThreadVars *)arg; + + NtInfo_t hStreamInfo; + NtStatistics_t hStat; // Stat handle. + NtInfoStream_t hInfo; + NtStatStream_t hstat_stream; + int status; // Status variable + + const uint32_t alertInterval = 25; + +#ifndef NAPATECH_ENABLE_BYPASS + uint32_t OB_fill_level[MAX_STREAMS] = { 0 }; + uint32_t OB_alert_level[MAX_STREAMS] = { 0 }; + uint32_t ave_OB_fill_level[MAX_STREAMS] = { 0 }; +#endif /* NAPATECH_ENABLE_BYPASS */ + + uint32_t HB_fill_level[MAX_STREAMS] = { 0 }; + uint32_t HB_alert_level[MAX_STREAMS] = { 0 }; + uint32_t ave_HB_fill_level[MAX_STREAMS] = { 0 }; + + /* Open the info and Statistics */ + if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + /* Read the info on all streams instantiated in the system */ + hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM; + if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + NapatechStreamConfig registered_streams[MAX_STREAMS]; + int num_registered = NapatechGetStreamConfig(registered_streams); + if (num_registered == -1) { + exit(EXIT_FAILURE); + } + + TmThreadsSetFlag(tv, THV_INIT_DONE | THV_RUNNING); + while (1) { + if (TmThreadsCheckFlag(tv, THV_KILL)) { + SCLogDebug("NapatechBufMonitorLoop THV_KILL detected"); + break; + } + + usleep(200000); + + /* Read the info on all streams instantiated in the system */ + hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM; + if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + char pktCntStr[4096]; + memset(pktCntStr, 0, sizeof(pktCntStr)); + + uint32_t stream_id = 0; + uint32_t stream_cnt = 0; + uint32_t num_streams = hStreamInfo.u.stream.data.count; + + for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) { + + do { + + /* Read usage data for the chosen stream ID */ + hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; + hStat.u.usageData_v0.streamid = (uint8_t)stream_id; + + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) { + ++stream_id; + continue; + } + } while (hStat.u.usageData_v0.data.numHostBufferUsed == 0); + + if (RegisteredStream(stream_id, num_registered, registered_streams)) { + +#ifndef NAPATECH_ENABLE_BYPASS + ave_OB_fill_level[stream_id] = 0; +#endif /* NAPATECH_ENABLE_BYPASS */ + + ave_HB_fill_level[stream_id] = 0; + + for (uint32_t hb_count = 0; hb_count < hStat.u.usageData_v0.data.numHostBufferUsed; + hb_count++) { + +#ifndef NAPATECH_ENABLE_BYPASS + OB_fill_level[hb_count] = + ((100 * hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.used) / + hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.size); + + if (OB_fill_level[hb_count] > 100) { + OB_fill_level[hb_count] = 100; + } +#endif /* NAPATECH_ENABLE_BYPASS */ + uint32_t bufSize = + hStat.u.usageData_v0.data.hb[hb_count].enQueuedAdapter / 1024 + + hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024 + + hStat.u.usageData_v0.data.hb[hb_count].enQueued / 1024 - HB_HIGHWATER; + + HB_fill_level[hb_count] = + (uint32_t)((100 * hStat.u.usageData_v0.data.hb[hb_count].deQueued / + 1024) / + bufSize); + +#ifndef NAPATECH_ENABLE_BYPASS + ave_OB_fill_level[stream_id] += OB_fill_level[hb_count]; +#endif /* NAPATECH_ENABLE_BYPASS */ + + ave_HB_fill_level[stream_id] += HB_fill_level[hb_count]; + } + +#ifndef NAPATECH_ENABLE_BYPASS + ave_OB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed; +#endif /* NAPATECH_ENABLE_BYPASS */ + + ave_HB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed; + + /* Host Buffer Fill Level warnings... */ + if (ave_HB_fill_level[stream_id] >= (HB_alert_level[stream_id] + alertInterval)) { + + while (ave_HB_fill_level[stream_id] >= + HB_alert_level[stream_id] + alertInterval) { + HB_alert_level[stream_id] += alertInterval; + } + SCLogPerf("nt%d - Increasing Host Buffer Fill Level : %4d%%", stream_id, + ave_HB_fill_level[stream_id] - 1); + } + + if (HB_alert_level[stream_id] > 0) { + if ((ave_HB_fill_level[stream_id] <= + (HB_alert_level[stream_id] - alertInterval))) { + SCLogPerf("nt%d - Decreasing Host Buffer Fill Level: %4d%%", stream_id, + ave_HB_fill_level[stream_id]); + + while (ave_HB_fill_level[stream_id] <= + (HB_alert_level[stream_id] - alertInterval)) { + if ((HB_alert_level[stream_id]) > 0) { + HB_alert_level[stream_id] -= alertInterval; + } else + break; + } + } + } + +#ifndef NAPATECH_ENABLE_BYPASS + /* On Board SDRAM Fill Level warnings... */ + if (ave_OB_fill_level[stream_id] >= (OB_alert_level[stream_id] + alertInterval)) { + while (ave_OB_fill_level[stream_id] >= + OB_alert_level[stream_id] + alertInterval) { + OB_alert_level[stream_id] += alertInterval; + } + SCLogPerf("nt%d - Increasing Adapter SDRAM Fill Level: %4d%%", stream_id, + ave_OB_fill_level[stream_id]); + } + + if (OB_alert_level[stream_id] > 0) { + if ((ave_OB_fill_level[stream_id] <= + (OB_alert_level[stream_id] - alertInterval))) { + SCLogPerf("nt%d - Decreasing Adapter SDRAM Fill Level : %4d%%", stream_id, + ave_OB_fill_level[stream_id]); + + while (ave_OB_fill_level[stream_id] <= + (OB_alert_level[stream_id] - alertInterval)) { + if ((OB_alert_level[stream_id]) > 0) { + OB_alert_level[stream_id] -= alertInterval; + } else + break; + } + } + } +#endif /* NAPATECH_ENABLE_BYPASS */ + } + ++stream_id; + } + } + + if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + /* Close the statistics stream */ + if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + SCLogDebug("Exiting NapatechStatsLoop"); + TmThreadsSetFlag(tv, THV_RUNNING_DONE); + TmThreadWaitForFlag(tv, THV_DEINIT); + TmThreadsSetFlag(tv, THV_CLOSED); + + return NULL; +} + +void NapatechStartStats(void) +{ + /* Creates the Statistic threads */ + ThreadVars *stats_tv = + TmThreadCreate("NapatechStats", NULL, NULL, NULL, NULL, "custom", NapatechStatsLoop, 0); + + if (stats_tv == NULL) { + FatalError("Error creating a thread for NapatechStats - Killing engine."); + } + + if (TmThreadSpawn(stats_tv) != 0) { + FatalError("Failed to spawn thread for NapatechStats - Killing engine."); + } + +#ifdef NAPATECH_ENABLE_BYPASS + if (bypass_supported) { + SCLogInfo("Napatech bypass functionality enabled."); + } +#endif /* NAPATECH_ENABLE_BYPASS */ + + ThreadVars *buf_monitor_tv = TmThreadCreate( + "NapatechBufMonitor", NULL, NULL, NULL, NULL, "custom", NapatechBufMonitorLoop, 0); + + if (buf_monitor_tv == NULL) { + FatalError("Error creating a thread for NapatechBufMonitor - Killing engine."); + } + + if (TmThreadSpawn(buf_monitor_tv) != 0) { + FatalError("Failed to spawn thread for NapatechBufMonitor - Killing engine."); + } +} + +bool NapatechSetupNuma(uint32_t stream, uint32_t numa) +{ + uint32_t status = 0; + static NtConfigStream_t hconfig; + + char ntpl_cmd[64]; + snprintf(ntpl_cmd, 64, "setup[numanode=%d] = streamid == %d", numa, stream); + + NtNtplInfo_t ntpl_info; + + if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return false; + } + + if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == + NT_SUCCESS) { + status = ntpl_info.ntplId; + + } else { + NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status); + return false; + } + + return status; +} + +static uint32_t NapatechSetHashmode(void) +{ + uint32_t status = 0; + const char *hash_mode; + static NtConfigStream_t hconfig; + char ntpl_cmd[64]; + NtNtplInfo_t ntpl_info; + + uint32_t filter_id = 0; + + /* Get the hashmode from the conf file. */ + ConfGet("napatech.hashmode", &hash_mode); + + snprintf(ntpl_cmd, 64, "hashmode = %s", hash_mode); + + /* Issue the NTPL command */ + if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + return false; + } + + if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == + NT_SUCCESS) { + filter_id = ntpl_info.ntplId; + SCLogConfig("Napatech hashmode: %s ID: %d", hash_mode, status); + } else { + NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status); + status = 0; + } + + return filter_id; +} + +static uint32_t GetStreamNUMAs(uint32_t stream_id, int stream_numas[]) +{ + NtStatistics_t hStat; // Stat handle. + NtStatStream_t hstat_stream; + int status; // Status variable + + for (int i = 0; i < MAX_HOSTBUFFERS; ++i) + stream_numas[i] = -1; + + if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + char pktCntStr[4096]; + memset(pktCntStr, 0, sizeof(pktCntStr)); + + /* Read usage data for the chosen stream ID */ + hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0; + hStat.u.usageData_v0.streamid = (uint8_t)stream_id; + + if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + for (uint32_t hb_id = 0; hb_id < hStat.u.usageData_v0.data.numHostBufferUsed; ++hb_id) { + stream_numas[hb_id] = hStat.u.usageData_v0.data.hb[hb_id].numaNode; + } + + return hStat.u.usageData_v0.data.numHostBufferUsed; +} + +static int NapatechSetFilter(NtConfigStream_t hconfig, char *ntpl_cmd) +{ + int status = 0; + int local_filter_id = 0; + + NtNtplInfo_t ntpl_info; + if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == + NT_SUCCESS) { + SCLogConfig( + "NTPL filter assignment \"%s\" returned filter id %4d", ntpl_cmd, local_filter_id); + } else { + NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status); + exit(EXIT_FAILURE); + } + + return local_filter_id; +} + +uint32_t NapatechDeleteFilters(void) +{ + uint32_t status = 0; + static NtConfigStream_t hconfig; + char ntpl_cmd[64]; + NtNtplInfo_t ntpl_info; + + if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + snprintf(ntpl_cmd, 64, "delete = all"); + if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == + NT_SUCCESS) { + status = ntpl_info.ntplId; + } else { + NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status); + status = 0; + } + + NT_ConfigClose(hconfig); + + return status; +} + +uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream) +{ +#define PORTS_SPEC_SIZE 64 + + struct ports_spec_s { + uint8_t first[MAX_PORTS]; + uint8_t second[MAX_PORTS]; + bool all; + char str[PORTS_SPEC_SIZE]; + } ports_spec; + + ports_spec.all = false; + + ConfNode *ntports; + int iteration = 0; + int status = 0; + NtConfigStream_t hconfig; + char ntpl_cmd[512]; + int is_inline = 0; +#ifdef NAPATECH_ENABLE_BYPASS + int is_span_port[MAX_PORTS] = { 0 }; +#endif + + char span_ports[128]; + memset(span_ports, 0, sizeof(span_ports)); + + if (ConfGetBool("napatech.inline", &is_inline) == 0) { + is_inline = 0; + } + + NapatechSetHashmode(); + + if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) { + NAPATECH_ERROR(status); + exit(EXIT_FAILURE); + } + + if (first_stream == last_stream) { + snprintf( + ntpl_cmd, sizeof(ntpl_cmd), "Setup[state=inactive] = StreamId == %d", first_stream); + } else { + snprintf(ntpl_cmd, sizeof(ntpl_cmd), "Setup[state=inactive] = StreamId == (%d..%d)", + first_stream, last_stream); + } + NapatechSetFilter(hconfig, ntpl_cmd); + +#ifdef NAPATECH_ENABLE_BYPASS + if (NapatechUseHWBypass()) { + SCLogInfo("Napatech Hardware Bypass enabled."); + } +#else + if (NapatechUseHWBypass()) { + SCLogInfo("Napatech Hardware Bypass requested in conf but is not available."); + exit(EXIT_FAILURE); + } else { + SCLogInfo("Napatech Hardware Bypass disabled."); + } +#endif + + if (is_inline) { + SCLogInfo("Napatech configured for inline mode."); + } else { + + SCLogInfo("Napatech configured for passive (non-inline) mode."); + } + + /* When not using the default streams we need to parse + * the array of streams from the conf + */ + if ((ntports = ConfGetNode("napatech.ports")) == NULL) { + FatalError("Failed retrieving napatech.ports from Conf"); + } + + /* Loop through all ports in the array */ + ConfNode *port; + enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED; + + if (NapatechUseHWBypass()) { + SCLogInfo("Listening on the following Napatech ports:"); + } + /* Build the NTPL command using values in the config file. */ + TAILQ_FOREACH (port, &ntports->head, next) { + if (port == NULL) { + FatalError("Couldn't Parse Port Configuration"); + } + + if (NapatechUseHWBypass()) { +#ifdef NAPATECH_ENABLE_BYPASS + if (strchr(port->val, '-')) { + stream_spec = CONFIG_SPECIFIER_RANGE; + + if (ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val) == -1) { + FatalError("Invalid value '%s' in napatech.ports specification in conf file.", + port->val); + } + + if (ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, + strchr(port->val, '-') + 1) == -1) { + FatalError("Invalid value '%s' in napatech.ports specification in conf file.", + port->val); + } + + if (ports_spec.first[iteration] == ports_spec.second[iteration]) { + if (is_inline) { + FatalError( + "Error with napatech.ports in conf file. When running in inline " + "mode the two ports specifying a segment must be different."); + } else { + /* SPAN port configuration */ + is_span_port[ports_spec.first[iteration]] = 1; + + if (strlen(span_ports) == 0) { + snprintf(span_ports, sizeof(span_ports), "%d", + ports_spec.first[iteration]); + } else { + char temp[16]; + snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]); + strlcat(span_ports, temp, sizeof(span_ports)); + } + } + } + + if (NapatechGetAdapter(ports_spec.first[iteration]) != + NapatechGetAdapter(ports_spec.first[iteration])) { + SCLogError("Invalid napatech.ports specification in conf file."); + SCLogError("Two ports on a segment must reside on the same adapter. port %d " + "is on adapter %d, port %d is on adapter %d.", + ports_spec.first[iteration], + NapatechGetAdapter(ports_spec.first[iteration]), + ports_spec.second[iteration], + NapatechGetAdapter(ports_spec.second[iteration])); + exit(EXIT_FAILURE); + } + + NapatechSetPortmap(ports_spec.first[iteration], ports_spec.second[iteration]); + if (ports_spec.first[iteration] == ports_spec.second[iteration]) { + SCLogInfo(" span_port: %d", ports_spec.first[iteration]); + } else { + SCLogInfo(" %s: %d - %d", is_inline ? "inline_ports" : "tap_ports", + ports_spec.first[iteration], ports_spec.second[iteration]); + } + + if (iteration == 0) { + if (ports_spec.first[iteration] == ports_spec.second[iteration]) { + snprintf(ports_spec.str, sizeof(ports_spec.str), "%d", + ports_spec.first[iteration]); + } else { + snprintf(ports_spec.str, sizeof(ports_spec.str), "%d,%d", + ports_spec.first[iteration], ports_spec.second[iteration]); + } + } else { + char temp[16]; + if (ports_spec.first[iteration] == ports_spec.second[iteration]) { + snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]); + } else { + snprintf(temp, sizeof(temp), ",%d,%d", ports_spec.first[iteration], + ports_spec.second[iteration]); + } + strlcat(ports_spec.str, temp, sizeof(ports_spec.str)); + } + } else { + FatalError("When using hardware flow bypass ports must be specified as segments. " + "E.g. ports: [0-1, 0-2]"); + } +#endif + } else { // !NapatechUseHWBypass() + if (strncmp(port->val, "all", 3) == 0) { + /* check that the sting in the config file is correctly specified */ + if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) { + FatalError("Only one Napatech port specifier type is allowed."); + } + stream_spec = CONFIG_SPECIFIER_RANGE; + + ports_spec.all = true; + snprintf(ports_spec.str, sizeof(ports_spec.str), "all"); + } else if (strchr(port->val, '-')) { + /* check that the sting in the config file is correctly specified */ + if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) { + FatalError("Only one Napatech port specifier is allowed when hardware bypass " + "is disabled. (E.g. ports: [0-4], NOT ports: [0-1,2-3])"); + } + stream_spec = CONFIG_SPECIFIER_RANGE; + + if (ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val) == -1) { + FatalError("Invalid value '%s' in napatech.ports specification in conf file.", + port->val); + } + + if (ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, + strchr(port->val, '-') + 1) == -1) { + FatalError("Invalid value '%s' in napatech.ports specification in conf file.", + port->val); + } + + snprintf(ports_spec.str, sizeof(ports_spec.str), "(%d..%d)", + ports_spec.first[iteration], ports_spec.second[iteration]); + } else { + /* check that the sting in the config file is correctly specified */ + if (stream_spec == CONFIG_SPECIFIER_RANGE) { + FatalError("Napatech port range specifiers cannot be combined with individual " + "stream specifiers."); + } + stream_spec = CONFIG_SPECIFIER_INDIVIDUAL; + + if (ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val) == -1) { + FatalError("Invalid value '%s' in napatech.ports specification in conf file.", + port->val); + } + + /* Determine the ports to use on the NTPL assign statement*/ + if (iteration == 0) { + snprintf(ports_spec.str, sizeof(ports_spec.str), "%s", port->val); + } else { + strlcat(ports_spec.str, ",", sizeof(ports_spec.str)); + strlcat(ports_spec.str, port->val, sizeof(ports_spec.str)); + } + } + } // if !NapatechUseHWBypass() + ++iteration; + } /* TAILQ_FOREACH */ + +#ifdef NAPATECH_ENABLE_BYPASS + if (bypass_supported) { + if (is_inline) { + char inline_setup_cmd[512]; + if (first_stream == last_stream) { + snprintf(inline_setup_cmd, sizeof(ntpl_cmd), + "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = " + "StreamId == %d", + ports_spec.str, first_stream); + } else { + snprintf(inline_setup_cmd, sizeof(ntpl_cmd), + "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = " + "StreamId == (%d..%d)", + ports_spec.str, first_stream, last_stream); + } + NapatechSetFilter(hconfig, inline_setup_cmd); + } + /* Build the NTPL command */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=3;streamid=(%d..%d);colormask=0x10000000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=" + "Layer4Header[0]]= %s%s", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=2;streamid=(%d..%d);colormask=0x11000000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=" + "Layer4Header[0]" + "]= %s%s and (Layer3Protocol==IPV4)", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=2;streamid=(%d..%d);colormask=0x14000000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=" + "Layer4Header[0]]= %s%s and (Layer3Protocol==IPV6)", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=2;streamid=(%d..%d);colormask=0x10100000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=" + "Layer4Header[0]]= %s%s and (Layer4Protocol==TCP)", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=2;streamid=(%d..%d);colormask=0x10200000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=" + "Layer4Header[0]" + "]= %s%s and (Layer4Protocol==UDP)", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + + if (strlen(span_ports) > 0) { + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=2;streamid=(%d..%d);colormask=0x00001000;" + "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=" + "Layer4Header[0]" + "]= port==%s", + first_stream, last_stream, span_ports); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), "KeyType[name=KT%u]={sw_32_32,sw_16_16}", + NAPATECH_KEYTYPE_IPV4); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[12]/32/" + "32,Layer4Header[0]/16/16)", + NAPATECH_KEYTYPE_IPV4, NAPATECH_KEYTYPE_IPV4); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), "KeyType[name=KT%u]={32,32,16,16}", + NAPATECH_KEYTYPE_IPV4_SPAN); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(" + "Layer3Header[12]/32,Layer3Header[16]/32,Layer4Header[0]/16,Layer4Header[2]/16)", + NAPATECH_KEYTYPE_IPV4_SPAN, NAPATECH_KEYTYPE_IPV4_SPAN); + NapatechSetFilter(hconfig, ntpl_cmd); + + /* IPv6 5tuple for inline and tap ports */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), "KeyType[name=KT%u]={sw_128_128,sw_16_16}", + NAPATECH_KEYTYPE_IPV6); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[8]/128/" + "128,Layer4Header[0]/16/16)", + NAPATECH_KEYTYPE_IPV6, NAPATECH_KEYTYPE_IPV6); + NapatechSetFilter(hconfig, ntpl_cmd); + + /* IPv6 5tuple for SPAN Ports */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), "KeyType[name=KT%u]={128,128,16,16}", + NAPATECH_KEYTYPE_IPV6_SPAN); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(" + "Layer3Header[8]/128,Layer3Header[24]/128,Layer4Header[0]/16,Layer4Header[2]/16)", + NAPATECH_KEYTYPE_IPV6_SPAN, NAPATECH_KEYTYPE_IPV6_SPAN); + NapatechSetFilter(hconfig, ntpl_cmd); + + int pair; + char ports_ntpl_a[64]; + char ports_ntpl_b[64]; + memset(ports_ntpl_a, 0, sizeof(ports_ntpl_a)); + memset(ports_ntpl_b, 0, sizeof(ports_ntpl_b)); + + for (pair = 0; pair < iteration; ++pair) { + char port_str[8]; + + if (!is_span_port[ports_spec.first[pair]]) { + snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_a) == 0 ? "" : ",", + ports_spec.first[pair]); + strlcat(ports_ntpl_a, port_str, sizeof(ports_ntpl_a)); + + snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_b) == 0 ? "" : ",", + ports_spec.second[pair]); + strlcat(ports_ntpl_b, port_str, sizeof(ports_ntpl_b)); + } + } + + if (strlen(ports_ntpl_a) > 0) { + /* This is the assign for dropping upstream traffic */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port " + "== %s)and(Key(KDEF%u,KeyID=%u)==%u)", + ports_ntpl_a, NAPATECH_KEYTYPE_IPV4, NAPATECH_KEYTYPE_IPV4, + NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + if (strlen(ports_ntpl_b) > 0) { + /* This is the assign for dropping downstream traffic */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port " + "== %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)", + ports_ntpl_b, // ports_spec.str, + NAPATECH_KEYTYPE_IPV4, NAPATECH_KEYTYPE_IPV4, NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + if (strlen(span_ports) > 0) { + /* This is the assign for dropping SPAN Port traffic */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port " + "== %s)and(Key(KDEF%u,KeyID=%u)==%u)", + span_ports, NAPATECH_KEYTYPE_IPV4_SPAN, NAPATECH_KEYTYPE_IPV4_SPAN, + NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + if (is_inline) { + for (pair = 0; pair < iteration; ++pair) { + /* This is the assignment for forwarding traffic */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(" + "Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u)==%u)", + ports_spec.second[pair], ports_spec.first[pair], NAPATECH_KEYTYPE_IPV4, + NAPATECH_KEYTYPE_IPV4, NAPATECH_FLOWTYPE_PASS); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(" + "Layer3Protocol==IPV4)and(port == " + "%d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)", + ports_spec.first[pair], ports_spec.second[pair], NAPATECH_KEYTYPE_IPV4, + NAPATECH_KEYTYPE_IPV4, NAPATECH_FLOWTYPE_PASS); + NapatechSetFilter(hconfig, ntpl_cmd); + } + } + + if (strlen(ports_ntpl_a) > 0) { + /* This is the assign for dropping upstream traffic */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port " + "== %s)and(Key(KDEF%u,KeyID=%u)==%u)", + ports_ntpl_a, NAPATECH_KEYTYPE_IPV6, NAPATECH_KEYTYPE_IPV6, + NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + if (strlen(ports_ntpl_b) > 0) { + /* This is the assign for dropping downstream traffic */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port " + "== %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)", + ports_ntpl_b, // ports_spec.str, + NAPATECH_KEYTYPE_IPV6, NAPATECH_KEYTYPE_IPV6, NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + if (strlen(span_ports) > 0) { + /* This is the assign for dropping SPAN Port traffic */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port " + "== %s)and(Key(KDEF%u,KeyID=%u)==%u)", + span_ports, NAPATECH_KEYTYPE_IPV6_SPAN, NAPATECH_KEYTYPE_IPV6_SPAN, + NAPATECH_FLOWTYPE_DROP); + NapatechSetFilter(hconfig, ntpl_cmd); + } + + if (is_inline) { + for (pair = 0; pair < iteration; ++pair) { + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(" + "Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u)==%u)", + ports_spec.second[pair], ports_spec.first[pair], NAPATECH_KEYTYPE_IPV6, + NAPATECH_KEYTYPE_IPV6, NAPATECH_FLOWTYPE_PASS); + NapatechSetFilter(hconfig, ntpl_cmd); + + snprintf(ntpl_cmd, sizeof(ntpl_cmd), + "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(" + "Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u,fieldaction=" + "swap)==%u)", + ports_spec.first[pair], ports_spec.second[pair], NAPATECH_KEYTYPE_IPV6, + NAPATECH_KEYTYPE_IPV6, NAPATECH_FLOWTYPE_PASS); + NapatechSetFilter(hconfig, ntpl_cmd); + } + } + } else { + if (is_inline) { + FatalError("Napatech Inline operation not supported by this FPGA version."); + } + + if (NapatechIsAutoConfigEnabled()) { + snprintf(ntpl_cmd, sizeof(ntpl_cmd), "assign[streamid=(%d..%d);colormask=0x0] = %s%s", + first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + } + } + +#else /* NAPATECH_ENABLE_BYPASS */ + snprintf(ntpl_cmd, sizeof(ntpl_cmd), "assign[streamid=(%d..%d)] = %s%s", first_stream, + last_stream, ports_spec.all ? "" : "port==", ports_spec.str); + NapatechSetFilter(hconfig, ntpl_cmd); + +#endif /* !NAPATECH_ENABLE_BYPASS */ + + SCLogConfig("Host-buffer NUMA assignments: "); + int numa_nodes[MAX_HOSTBUFFERS]; + uint32_t stream_id; + for (stream_id = first_stream; stream_id < last_stream; ++stream_id) { + char temp1[256]; + char temp2[256]; + + uint32_t num_host_buffers = GetStreamNUMAs(stream_id, numa_nodes); + + snprintf(temp1, 256, " stream %d: ", stream_id); + + for (uint32_t hb_id = 0; hb_id < num_host_buffers; ++hb_id) { + snprintf(temp2, 256, "%d ", numa_nodes[hb_id]); + strlcat(temp1, temp2, sizeof(temp1)); + } + + SCLogConfig("%s", temp1); + } + + if (first_stream == last_stream) { + snprintf(ntpl_cmd, sizeof(ntpl_cmd), "Setup[state=active] = StreamId == %d", first_stream); + } else { + snprintf(ntpl_cmd, sizeof(ntpl_cmd), "Setup[state=active] = StreamId == (%d..%d)", + first_stream, last_stream); + } + NapatechSetFilter(hconfig, ntpl_cmd); + + NT_ConfigClose(hconfig); + + return status; +} diff --git a/plugins/napatech/util-napatech.h b/plugins/napatech/util-napatech.h new file mode 100644 index 0000000000..e30deb29a2 --- /dev/null +++ b/plugins/napatech/util-napatech.h @@ -0,0 +1,115 @@ +/* Copyright (C) 2017 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ +/** + * \file + * + * \author Phil Young + * + */ +#ifndef SURICATA_UTIL_NAPATECH_H +#define SURICATA_UTIL_NAPATECH_H + +#include + +typedef struct NapatechPacketVars_ { + uint64_t stream_id; + NtNetBuf_t nt_packet_buf; + NtNetStreamRx_t rx_stream; + NtFlowStream_t flow_stream; + ThreadVars *tv; +#ifdef NAPATECH_ENABLE_BYPASS + NtDyn3Descr_t *dyn3; + int bypass; +#endif +} NapatechPacketVars; + +typedef struct NapatechStreamConfig_ { + uint8_t stream_id; + bool is_active; + bool initialized; +} NapatechStreamConfig; + +typedef struct NapatechCurrentStats_ { + uint64_t current_packets; + uint64_t current_bytes; + uint64_t current_drop_packets; + uint64_t current_drop_bytes; +} NapatechCurrentStats; + +#define MAX_HOSTBUFFERS 8 +#define MAX_STREAMS 256 +#define MAX_PORTS 80 +#define MAX_ADAPTERS 8 +#define HB_HIGHWATER 2048 // 1982 + +extern void NapatechStartStats(void); + +#define NAPATECH_ERROR(status) \ + { \ + char errorBuffer[1024]; \ + NT_ExplainError((status), errorBuffer, sizeof(errorBuffer) - 1); \ + SCLogError("Napatech Error: %s", errorBuffer); \ + } + +#define NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status) \ + { \ + char errorBuffer[1024]; \ + NT_ExplainError(status, errorBuffer, sizeof(errorBuffer) - 1); \ + SCLogError(" NTPL failed: %s", errorBuffer); \ + SCLogError(" cmd: %s", ntpl_cmd); \ + if (strncmp(ntpl_info.u.errorData.errBuffer[0], "", 256) != 0) \ + SCLogError(" %s", ntpl_info.u.errorData.errBuffer[0]); \ + if (strncmp(ntpl_info.u.errorData.errBuffer[1], "", 256) != 0) \ + SCLogError(" %s", ntpl_info.u.errorData.errBuffer[1]); \ + if (strncmp(ntpl_info.u.errorData.errBuffer[2], "", 256) != 0) \ + SCLogError(" %s", ntpl_info.u.errorData.errBuffer[2]); \ + } + +// #define ENABLE_NT_DEBUG +#ifdef ENABLE_NT_DEBUG +void NapatechPrintIP(uint32_t address); + +#define NAPATECH_DEBUG(...) printf(__VA_ARGS__) +#define NAPATECH_PRINTIP(a) NapatechPrintIP(uint32_t address) +#else +#define NAPATECH_DEBUG(...) +#define NAPATECH_PRINTIP(a) +#endif + +NapatechCurrentStats NapatechGetCurrentStats(uint16_t id); +int NapatechGetStreamConfig(NapatechStreamConfig stream_config[]); +bool NapatechSetupNuma(uint32_t stream, uint32_t numa); +uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream); +uint32_t NapatechDeleteFilters(void); + +#ifdef NAPATECH_ENABLE_BYPASS + +/* */ +#define NAPATECH_KEYTYPE_IPV4 3 +#define NAPATECH_KEYTYPE_IPV4_SPAN 4 +#define NAPATECH_KEYTYPE_IPV6 5 +#define NAPATECH_KEYTYPE_IPV6_SPAN 6 +#define NAPATECH_FLOWTYPE_DROP 7 +#define NAPATECH_FLOWTYPE_PASS 8 + +int NapatechVerifyBypassSupport(void); +int NapatechGetNumAdapters(void); + +int NapatechIsBypassSupported(void); + +#endif /* NAPATECH_ENABLE_BYPASS */ +#endif /* SURICATA_UTIL_NAPATECH_H */