diff --git a/oisf.yaml b/oisf.yaml index a8fd13eb55..88e3d6cf65 100644 --- a/oisf.yaml +++ b/oisf.yaml @@ -3,6 +3,11 @@ # overridden with the -l command line parameter. default-log-dir: /var/log/eidps +defrag: + max-frags: 65535 + prealloc: yes + timeout: 60 + # Logging configuration. This is not about logging IDS alerts, but # IDS output about what its doing, errors, etc. logging: diff --git a/src/Makefile.am b/src/Makefile.am index d2610ccfe9..4e4a76dba1 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -125,7 +125,9 @@ app-layer-http.c app-layer-http.h \ app-layer-tls.c app-layer-tls.h \ app-layer-protos.h \ conf.c conf.h \ -conf-yaml-loader.c conf-yaml-loader.h +conf-yaml-loader.c conf-yaml-loader.h \ +util-fix_checksum.c util-fix_checksum.h \ +defrag.c defrag.h # set the include path found by configure INCLUDES= $(all_includes) diff --git a/src/decode-ipv4.c b/src/decode-ipv4.c index d9f3494c4f..b14d7f2b05 100644 --- a/src/decode-ipv4.c +++ b/src/decode-ipv4.c @@ -9,6 +9,7 @@ #include "decode.h" #include "decode-ipv4.h" #include "decode-events.h" +#include "defrag.h" #include "util-unittest.h" #include "util-debug.h" @@ -565,23 +566,23 @@ void DecodeIPV4(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, uint8_t *pkt, case IPPROTO_IP: /* check PPP VJ uncompressed packets and decode tcp dummy */ if(p->ppph != NULL && ntohs(p->ppph->protocol) == PPP_VJ_UCOMP) { - return(DecodeTCP(tv, dtv, p, pkt + IPV4_GET_HLEN(p), - IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p), pq)); + DecodeTCP(tv, dtv, p, pkt + IPV4_GET_HLEN(p), + IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p), pq); } break; case IPPROTO_TCP: - return(DecodeTCP(tv, dtv, p, pkt + IPV4_GET_HLEN(p), - IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p), pq)); + DecodeTCP(tv, dtv, p, pkt + IPV4_GET_HLEN(p), + IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p), pq); break; case IPPROTO_UDP: //printf("DecodeIPV4: next layer is UDP\n"); - return(DecodeUDP(tv, dtv, p, pkt + IPV4_GET_HLEN(p), - IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p), pq)); + DecodeUDP(tv, dtv, p, pkt + IPV4_GET_HLEN(p), + IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p), pq); break; case IPPROTO_ICMP: //printf("DecodeIPV4: next layer is ICMP\n"); - return(DecodeICMPV4(tv, dtv, p, pkt + IPV4_GET_HLEN(p), - IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p), pq)); + DecodeICMPV4(tv, dtv, p, pkt + IPV4_GET_HLEN(p), + IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p), pq); break; case IPPROTO_IPV6: { @@ -604,11 +605,25 @@ void DecodeIPV4(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, uint8_t *pkt, break; } case IPPROTO_GRE: - return(DecodeGRE(tv, dtv, p, pkt + IPV4_GET_HLEN(p), - IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p), pq)); + DecodeGRE(tv, dtv, p, pkt + IPV4_GET_HLEN(p), + IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p), pq); break; } + /* If a fragment, pass off for re-assembly. */ + if (IPV4_GET_IPOFFSET(p) > 0 || IPV4_GET_MF(p) == 1) { + Packet *rp = Defrag4(tv, NULL, p); + if (rp != NULL) { + /* Got re-assembled packet, re-run through decoder. */ + DecodeIPV4(tv, dtv, rp, rp->pkt, rp->pktlen, pq); + PacketEnqueue(pq, rp); + + /* Not really a tunnel packet, but we're piggybacking that + * functionality for now. */ + SET_TUNNEL_PKT(p); + } + } + return; } diff --git a/src/defrag.c b/src/defrag.c new file mode 100644 index 0000000000..986ff3e465 --- /dev/null +++ b/src/defrag.c @@ -0,0 +1,1429 @@ +/* Copyright (c) 2009 Open Information Security Foundation */ + +/** + * \file + * + * Defragmentation module. + * + * \author Endace Technology Limited, Jason Ish + * + * References: + * - RFC 815 + * - OpenBSD PF's IP normalizaton (pf_norm.c) + * + * \todo pool for frag packet storage + * \todo policy bsd-right + * \todo profile hash function + */ + +#include + +#include "queue.h" + +#include "eidps.h" +#include "threads.h" +#include "conf.h" +#include "util-hashlist.h" +#include "util-pool.h" +#include "util-print.h" +#include "util-debug.h" +#include "util-fix_checksum.h" + +#ifdef UNITTESTS +#include "util-unittest.h" +#endif + +#define MAX(a, b) (a > b ? a : b) + +#define DEFAULT_DEFRAG_HASH_SIZE 0xffff + +/** + * Default timeout (in seconds) before a defragmentation tracker will + * be released. + */ +#define TIMEOUT_DEFAULT 60 + +/** + * Maximum allowed timeout, 24 hours. + */ +#define TIMEOUT_MAX 60 * 60 * 24 + +/** + * Minimum allowed timeout, 1 second. + */ +#define TIMEOUT_MIN 1 + +/** Fragment reassembly policies. */ +enum defrag_policies { + POLICY_FIRST = 0, + POLICY_LAST, + POLICY_BSD, + POLICY_BSD_RIGHT, + POLICY_LINUX, + POLICY_WINDOWS, + POLICY_SOLARIS, + + POLICY_DEFAULT = POLICY_BSD, +}; + +/** + * A context for an instance of a fragmentation re-assembler, in case + * we ever need more than one. + */ +typedef struct _DefragContext { + uint64_t ip4_frags; /**< Number of IPv4 fragments seen. */ + uint64_t ip6_frags; /**< Number of IPv6 fragments seen. */ + + HashListTable *frag_table; /**< Hash (list) table of fragment trackers. */ + pthread_mutex_t frag_table_lock; + + Pool *tracker_pool; /**< Pool of trackers. */ + pthread_mutex_t tracker_pool_lock; + + Pool *frag_pool; /**< Pool of fragments. */ + pthread_mutex_t frag_pool_lock; + + time_t timeout; /**< Default timeout. */ + + uint8_t default_policy; /**< Default policy. */ + +} DefragContext; + +/** + * Storage for an individual fragment. + */ +typedef struct _frag { + DefragContext *dc; /**< The defragmentation context this frag was + * allocated under. */ + + uint16_t offset; /**< The offset of this fragment, already + * multiplied by 8. */ + + uint16_t len; /**< The length of this fragment. */ + + uint8_t hlen; /**< The length of this fragments IP header. */ + + uint8_t more_frags; /**< More frags? */ + + uint8_t *pkt; /**< The actual packet. */ + + TAILQ_ENTRY(_frag) next; /**< Pointer to next fragment for tailq. */ +} Frag; + +/** + * A defragmentation tracker. Used to track fragments that make up a + * single packet. + */ +typedef struct _DefragTracker { + DefragContext *dc; /**< The defragmentation context this tracker + * was allocated under. */ + + uint8_t policy; /**< Reassembly policy this tracker will use. */ + + struct timeval timeout; /**< When this tracker will timeout. */ + + uint8_t family; /**< Address family for this tracker, AF_INET or + * AF_INET6. */ + + uint32_t id; /**< IP ID for this tracker. 32 bits for IPv6, 16 + * for IPv4. */ + + Address src_addr; /**< Source address for this tracker. */ + Address dst_addr; /**< Destination address for this tracker. */ + + uint8_t seen_last; /**< Has this tracker seen the last fragment? */ + + pthread_mutex_t lock; /**< Mutex for locking list operations on + * this tracker. */ + + TAILQ_HEAD(frag_tailq, _frag) frags; /**< Head of list of fragments. */ +} DefragTracker; + +/** A random value used for hash key generation. */ +static int defrag_hash_rand; + +/** Hash table size, and also the maximum number of trackers that will + * be allocated. */ +static int defrag_hash_size; + +/** The global DefragContext so all threads operate from the same + * context. */ +static DefragContext *defrag_context; + +/** + * Utility/debugging function to dump the frags associated with a + * tracker. Only enable when unit tests are enabled. + */ +#ifdef UNITTESTS +static void +DumpFrags(DefragTracker *tracker) +{ + Frag *frag; + + printf("Dumping frags for packet: ID=%d\n", tracker->id); + TAILQ_FOREACH(frag, &tracker->frags, next) { + printf("-> Frag: Offset=%d, Len=%d\n", frag->offset, frag->len); + PrintRawDataFp(stdout, frag->pkt, frag->len); + } +} +#endif /* UNITTESTS */ + +/** + * Generate a key for looking of a fragtracker in a hash + * table. Adapted from the hash function in flow-hash.c. + * + * \todo Test performance and distribution. + */ +static uint32_t +DefragHashFunc(HashListTable *ht, void *data, uint16_t datalen) +{ + DefragTracker *p = (DefragTracker *)data; + uint32_t key; + + if (p->family == AF_INET) { + key = (defrag_hash_rand + p->family + + p->src_addr.addr_data32[0] + p->dst_addr.addr_data32[0]) % + defrag_hash_size; + } + else if (p->family == AF_INET6) { + key = (defrag_hash_rand + p->family + + p->src_addr.addr_data32[0] + p->src_addr.addr_data32[1] + + p->src_addr.addr_data32[2] + p->src_addr.addr_data32[3] + + p->dst_addr.addr_data32[0] + p->dst_addr.addr_data32[1] + + p->dst_addr.addr_data32[2] + p->dst_addr.addr_data32[3]) % + defrag_hash_size; + } + else + key = 0; + + return key; +} + +/** + * \brief Compare 2 DefragTracker nodes in case of hash conflict. + * + * \retval 1 if a and b match, otherwise 0. + */ +static char +DefragHashCompare(void *a, uint16_t a_len, void *b, uint16_t b_len) +{ + DefragTracker *dta = (DefragTracker *)a; + DefragTracker *dtb = (DefragTracker *)b; + + if (dta->family != dtb->family) + return 0; + else if (dta->id != dtb->id) + return 0; + else if (!CMP_ADDR(&dta->src_addr, &dtb->src_addr)) + return 0; + else if (!CMP_ADDR(&dta->dst_addr, &dtb->dst_addr)) + return 0; + + /* Match. */ + return 1; +} + +/** + * \brief Called by the hash table when a tracker is removed from the + * hash table. + * + * We don't actually do anything here. The tracker will be reset and + * put back into a memory pool. + */ +static void +DefragHashFree(void *data) +{ +} + +/** + * \brief Reset a frag for reuse in a pool. + */ +static void +DefragFragReset(Frag *frag) +{ + DefragContext *dc = frag->dc; + + if (frag->pkt != NULL) + free(frag->pkt); + memset(frag, 0, sizeof(*frag)); + frag->dc = dc; +} + +/** + * \brief Allocate a new frag for use in a pool. + */ +static void * +DefragFragNew(void *arg) +{ + DefragContext *dc = arg; + Frag *frag; + + frag = calloc(1, sizeof(*frag)); + frag->dc = dc; + + return (void *)frag; +} + +/** + * \brief Free a frag when released from a pool. + */ +static void +DefragFragFree(void *arg) +{ + Frag *frag = arg; + free(frag); +} + +/** + * \brief Free all frags associated with a tracker. + */ +static void +DefragTrackerFreeFrags(DefragTracker *tracker) +{ + Frag *frag; + + /* Lock the frag pool as we'll be return items to it. */ + mutex_lock(&tracker->dc->frag_pool_lock); + + while ((frag = TAILQ_FIRST(&tracker->frags)) != NULL) { + TAILQ_REMOVE(&tracker->frags, frag, next); + + /* Don't free the frag, just give it back to its pool. */ + DefragFragReset(frag); + PoolReturn(frag->dc->frag_pool, frag); + } + + mutex_unlock(&tracker->dc->frag_pool_lock); +} + +/** + * \brief Reset a tracker for reuse. + */ +static void +DefragTrackerReset(DefragTracker *tracker) +{ + DefragContext *saved_dc = tracker->dc; + pthread_mutex_t saved_lock = tracker->lock; + + DefragTrackerFreeFrags(tracker); + memset(tracker, 0, sizeof(*tracker)); + tracker->dc = saved_dc; + tracker->lock = saved_lock; + TAILQ_INIT(&tracker->frags); +} + +/** + * \brief Allocates a new defragmentation tracker for use in the pool + * for trackers. + * + * \arg Pointer to DefragContext this new tracker will be associated + * with. + * + * \retval A new DefragTracker if successfull, NULL on failure. + */ +static void * +DefragTrackerNew(void *arg) +{ + DefragContext *dc = arg; + DefragTracker *tracker; + + tracker = calloc(1, sizeof(*tracker)); + if (tracker == NULL) + return NULL; + if (pthread_mutex_init(&tracker->lock, NULL) != 0) + return NULL; + tracker->dc = dc; + TAILQ_INIT(&tracker->frags); + + return (void *)tracker; +} + +/** + * \brief Free a defragmentation tracker that is being removed from + * the pool. + */ +static void +DefragTrackerFree(void *arg) +{ + DefragTracker *tracker = arg; + + pthread_mutex_destroy(&tracker->lock); + DefragTrackerFreeFrags(tracker); + free(tracker); +} + +/** + * \brief Create a new DefragContext. + * + * \retval On success a return an initialized DefragContext, otherwise + * NULL will be returned. + */ +static DefragContext * +DefragContextNew(void) +{ + DefragContext *dc; + + dc = calloc(1, sizeof(*dc)); + if (dc == NULL) + return NULL; + + /* Initialize the hash table. */ + dc->frag_table = HashListTableInit(DEFAULT_DEFRAG_HASH_SIZE, DefragHashFunc, + DefragHashCompare, DefragHashFree); + if (dc == NULL) { + SCLogError(SC_ERR_MEM_ALLOC, + "Defrag: Failed to initialize hash table."); + exit(EXIT_FAILURE); + } + if (pthread_mutex_init(&dc->frag_table_lock, NULL) != 0) { + SCLogError(SC_ERR_MEM_ALLOC, + "Defrag: Failed to initialize hash table mutex."); + exit(EXIT_FAILURE); + } + + /* Initialize the pool of trackers. */ + intmax_t tracker_pool_size; + if (!ConfGetInt("defrag.trackers", &tracker_pool_size)) { + tracker_pool_size = DEFAULT_DEFRAG_HASH_SIZE; + } + dc->tracker_pool = PoolInit(tracker_pool_size, tracker_pool_size, + DefragTrackerNew, dc, DefragTrackerFree); + if (dc->tracker_pool == NULL) { + SCLogError(SC_ERR_MEM_ALLOC, + "Defrag: Failed to initialize tracker pool."); + exit(EXIT_FAILURE); + } + if (pthread_mutex_init(&dc->tracker_pool_lock, NULL) != 0) { + SCLogError(SC_ERR_MEM_ALLOC, + "Defrag: Failed to initialize tracker pool mutex."); + exit(EXIT_FAILURE); + } + + /* Initialize the pool of frags. */ + int frag_pool_size = 0xffff; + int frag_pool_prealloc = frag_pool_size / 4; + dc->frag_pool = PoolInit(frag_pool_size, frag_pool_prealloc, + DefragFragNew, dc, DefragFragFree); + if (dc->frag_pool == NULL) { + SCLogError(SC_ERR_MEM_ALLOC, + "Defrag: Failed to initialize fragment pool."); + exit(EXIT_FAILURE); + } + if (pthread_mutex_init(&dc->frag_pool_lock, NULL) != 0) { + SCLogError(SC_ERR_MEM_ALLOC, + "Defrag: Failed to initialize frag pool mutex."); + exit(EXIT_FAILURE); + } + + /* Set the default timeout. */ + intmax_t timeout; + if (!ConfGetInt("defrag.timeout", &timeout)) { + dc->timeout = TIMEOUT_DEFAULT; + } + else { + if (timeout < TIMEOUT_MIN) { + SCLogError(SC_INVALID_ARGUMENT, + "defrag: Timeout less than minimum allowed value."); + exit(EXIT_FAILURE); + } + else if (timeout > TIMEOUT_MAX) { + SCLogError(SC_INVALID_ARGUMENT, + "defrag: Tiemout greater than maximum allowed value."); + exit(EXIT_FAILURE); + } + dc->timeout = timeout; + } + + SCLogDebug("Defrag Initialized:"); + SCLogDebug("\tTimeout: %"PRIuMAX, (uintmax_t)dc->timeout); + SCLogDebug("\tMaximum defrag trackers: %"PRIuMAX, tracker_pool_size); + SCLogDebug("\tPreallocated defrag trackers: %"PRIuMAX, tracker_pool_size); + SCLogDebug("\tMaximum fragments: %d", frag_pool_size); + SCLogDebug("\tPreallocated fragments: %d", frag_pool_prealloc); + + return dc; +} + +/** + * Insert a new IPv4 fragment into a tracker. + * + * \todo Allocate packet buffers from a pool. + */ +static void +Defrag4InsertFrag(DefragContext *dc, DefragTracker *tracker, Packet *p) +{ + Frag *frag, *prev, *new; + uint16_t offset = IPV4_GET_IPOFFSET(p) << 3; + uint16_t len = IPV4_GET_IPLEN(p); + uint8_t hlen = IPV4_GET_HLEN(p); + uint8_t more_frags = IPV4_GET_MF(p); + int end = offset + len - hlen; + + int ltrim; /* Number of bytes to trim from front of packet. */ + + int remove = 0; /* Will be set if we need to remove a fragment. */ + + int before = 0; /* Set if fragment should be inserted before + * instead of after. */ + + /* Lock this tracker as we'll be doing list operations on it. */ + mutex_lock(&tracker->lock); + + /* Update timeout. */ + tracker->timeout = p->ts; + tracker->timeout.tv_sec += dc->timeout; + + prev = NULL; + if (!TAILQ_EMPTY(&tracker->frags)) { + + /* First compare against the last frag. In the normal case + * this new fragment should fall after the last frag. */ + frag = TAILQ_LAST(&tracker->frags, frag_tailq); + if (offset >= frag->offset + frag->len - frag->hlen) { + prev = frag; + goto insert; + } + + /* Find where in the list to add this fragment. */ + TAILQ_FOREACH(frag, &tracker->frags, next) { + int prev_end = frag->offset + frag->len - frag->hlen; + prev = frag; + ltrim = 0; + + switch (tracker->policy) { + case POLICY_LAST: + if (offset <= frag->offset) { + goto insert; + } + break; + case POLICY_FIRST: + if ((offset >= frag->offset) && (end <= prev_end)) { + /* Packet is wholly contained within a previous + * packet. Drop. */ + goto done; + } + else if (offset < frag->offset) { + before = 1; + goto insert; + } + else if (offset < prev_end) { + ltrim = prev_end - offset; + goto insert; + } + case POLICY_SOLARIS: + if ((offset < frag->offset) && (end >= prev_end)) { + remove = 1; + goto insert; + } + /* Fall-through. */ + case POLICY_WINDOWS: + if (offset < frag->offset) { + if (end > prev_end) { + /* Starts before previous frag, and ends after + * previous drop. Drop the previous + * fragment. */ + remove = 1; + } + else { + /* Fill hole before previous fragment, trim + * this frags length. */ + len = hlen + (frag->offset - offset); + } + goto insert; + } + else if ((offset >= frag->offset) && (end <= prev_end)) { + /* New frag is completey contained within a + * previous frag, drop. */ + goto done; + } + else if ((offset == frag->offset) && (end > prev_end)) { + /* This fragment is filling a hole afte the + * previous frag. Trim the front . */ + ltrim = end - prev_end; + goto insert; + } + /* Fall-through. */ + case POLICY_LINUX: { + if (offset == frag->offset) { + if (end >= prev_end) { + /* Fragment starts at same offset as previous + * fragment and extends past the end of the + * previous fragment. Replace it + * completely. */ + remove = 1; + goto insert; + } + else if (end < prev_end) { + /* Fragment starts at the same offset as + * previous fragment but doesn't overlap it + * completely, insert it after the previous + * fragment and it will take precedence on + * re-assembly. */ + goto insert; + } + } + /* Fall-through. */ + } + case POLICY_BSD: + default: + if (offset < prev_end) { + /* Fragment overlaps with previous fragment, + * process. */ + if (offset >= frag->offset) { + if (end <= prev_end) { + /* New fragment falls completely within a + * previous fragment, new fragment will be + * dropped. */ + goto done; + } + else { + /* New fragment extends past the end of + * the previous fragment. Trim off the + * front of the new fragment that overlaps + * with the previous fragment. */ + ltrim = prev_end - offset; + } + } + else { + /* New fragment starts before the previous + * fragment and extends to the end of past the + * end of the previous fragment. Remove the + * previous fragment. */ + remove = 1; + } + goto insert; + } + break; + } + } + } + +insert: + + if (len - hlen - ltrim == 0) { + /* No data left. */ + goto done; + } + + /* Allocate frag and insert. */ + mutex_lock(&dc->frag_pool_lock); + new = PoolGet(dc->frag_pool); + mutex_unlock(&dc->frag_pool_lock); + if (new == NULL) + goto done; + new->pkt = malloc(len); + if (new->pkt == NULL) { + mutex_lock(&dc->frag_pool_lock); + PoolReturn(dc->frag_pool, new); + mutex_unlock(&dc->frag_pool_lock); + goto done; + } + memcpy(new->pkt, (uint8_t *)p->ip4h + ltrim, len - ltrim); + new->offset = offset + ltrim; + new->len = len - ltrim; + new->hlen = hlen; + new->more_frags = more_frags; + + if (prev) { + if (before) { + TAILQ_INSERT_BEFORE(prev, new, next); + } + else { + TAILQ_INSERT_AFTER(&tracker->frags, prev, new, next); + } + } + else + TAILQ_INSERT_HEAD(&tracker->frags, new, next); + + if (remove) { + TAILQ_REMOVE(&tracker->frags, prev, next); + DefragFragReset(prev); + mutex_lock(&dc->frag_pool_lock); + PoolReturn(dc->frag_pool, prev); + mutex_unlock(&dc->frag_pool_lock); + } + +done: + mutex_unlock(&tracker->lock); +} + +/** + * Attempt to re-assemble a packet. + * + * \param tracker The defragmentation tracker to reassemble from. + */ +static Packet * +Defrag4Reassemble(ThreadVars *tv, DefragContext *dc, DefragTracker *tracker, + Packet *p) +{ + Frag *frag, *prev = NULL; + Packet *rp = NULL; + int offset = 0; + int hlen; + int len; + + /* Lock the tracker. */ + mutex_lock(&tracker->lock); + + /* Should not be here unless we have seen the last fragment. */ + if (!tracker->seen_last) + return NULL; + + /* Check that we have all the data. */ + len = 0; + TAILQ_FOREACH(frag, &tracker->frags, next) { + if (frag == TAILQ_FIRST(&tracker->frags)) { + /* First frag should have an offset of 0. */ + if (frag->offset != 0) { + goto done; + } + len = frag->len - frag->hlen; + hlen = frag->hlen; + } + else { + if ((frag->offset - frag->hlen) <= len) { + len = MAX(len, frag->offset + frag->len - frag->hlen); + } + else { + goto done; + } + } + } + + /* Length (ip_len) of re-assembled packet. The length of the IP + * header was added when we hit the first fragment above. */ + len += hlen; + + if (tv == NULL) { + /* Unit test. */ + rp = SetupPkt(); + } + else { + /* Not really a tunnel packet, but more of a pseudo packet. + * But for the most part we should get the same result. */ + rp = TunnelPktSetup(tv, NULL, p, (uint8_t *)p->ip4h, IPV4_GET_IPLEN(p), + IPV4_GET_IPPROTO(p)); + } + + if (rp == NULL) { + SCLogError(SC_ERR_MEM_ALLOC, "Failed to allocate packet for fragmentation re-assembly, dumping fragments."); + mutex_lock(&dc->frag_table_lock); + HashListTableRemove(dc->frag_table, tracker, sizeof(tracker)); + mutex_unlock(&dc->frag_table_lock); + DefragTrackerReset(tracker); + mutex_lock(&dc->tracker_pool_lock); + PoolReturn(dc->tracker_pool, tracker); + mutex_unlock(&dc->tracker_pool_lock); + goto done; + } + + offset = 0; + prev = NULL; + + TAILQ_FOREACH(frag, &tracker->frags, next) { + if (frag->offset == 0) { + /* This is the first packet. We use this packets IP + * header. */ + memcpy(rp->pkt, frag->pkt, frag->len); + hlen = frag->hlen; + offset = frag->len - frag->hlen; + } + else { + /* Subsequent packets, copy them in minus their IP header. */ + + int diff = 0; + switch (tracker->policy) { + case POLICY_LAST: + case POLICY_FIRST: + case POLICY_WINDOWS: + case POLICY_SOLARIS: + memcpy(rp->pkt + hlen + frag->offset, + frag->pkt + frag->hlen, + frag->len - frag->hlen); + break; + case POLICY_LINUX: + if (frag->offset == prev->offset) { + memcpy(rp->pkt + hlen + frag->offset, + frag->pkt + frag->hlen, + frag->len - frag->hlen); + break; + } + case POLICY_BSD: + default: + if (frag->offset < offset) + diff = offset - frag->offset; + memcpy(rp->pkt + hlen + frag->offset + diff, + frag->pkt + frag->hlen + diff, + frag->len - frag->hlen - diff); + offset = frag->offset + frag->len - frag->hlen; + break; + } + } + prev = frag; + } + rp->pktlen = hlen + offset; + rp->ip4h = (IPV4Hdr *)rp->pkt; + + /* Checksum fixup. */ + int old = rp->ip4h->ip_len + rp->ip4h->ip_off; + rp->ip4h->ip_len = htons(offset + hlen); + rp->ip4h->ip_off = 0; + rp->ip4h->ip_csum = FixChecksum(rp->ip4h->ip_csum, + old, rp->ip4h->ip_len + rp->ip4h->ip_off); + + /* Remove the frag tracker. */ + HashListTableRemove(dc->frag_table, tracker, sizeof(tracker)); + DefragTrackerReset(tracker); + mutex_lock(&dc->tracker_pool_lock); + PoolReturn(dc->tracker_pool, tracker); + mutex_unlock(&dc->tracker_pool_lock); + +done: + mutex_unlock(&tracker->lock); + return rp; +} + +/** + * \brief Timeout a tracker. + * + * Called when we fail to get a tracker from the pool. The first + * tracker that has expired will be released back to the pool then the + * function will exit. + * + * Intended to be called with the tracker pool already locked. + * + * \param dc Current DefragContext. + * \param p Packet that triggered this timeout run, used for timestamp. + */ +static void +DefragTimeoutTracker(DefragContext *dc, Packet *p) +{ + struct timeval tv = p->ts; + + HashListTableBucket *next = HashListTableGetListHead(dc->frag_table); + DefragTracker *tracker; + while (next != NULL) { + tracker = HashListTableGetListData(next); + + if (timercmp(&tracker->timeout, &tv, <)) { + /* Tracker has timeout out. */ + HashListTableRemove(dc->frag_table, tracker, sizeof(tracker)); + DefragTrackerReset(tracker); + PoolReturn(dc->tracker_pool, tracker); + return; + } + + next = HashListTableGetListNext(next); + } +} + +/** + * \brief Entry point for IPv4 fragments. + * + * \param tv ThreadVars for the calling decoder. + * \param dc A DefragContext to use, may be NULL for the default. + * \param p The packet fragment. + * + * \retval A new Packet resembling the re-assembled packet if the most + * recent fragment allowed the packet to be re-assembled, otherwise + * NULL is returned. + */ +Packet * +Defrag4(ThreadVars *tv, DefragContext *dc, Packet *p) +{ + uint16_t frag_offset; + int more_frags; + DefragTracker *tracker, lookup; + + /* If no DefragContext was passed in, use the global one. Passing + * one in is primarily useful for unit tests. */ + if (dc == NULL) + dc = defrag_context; + + more_frags = IPV4_GET_MF(p); + frag_offset = IPV4_GET_IPOFFSET(p); + + if (frag_offset == 0 && more_frags == 0) { + return NULL; + } + + /* Create a lookup key. */ + lookup.family = AF_INET; + lookup.id = IPV4_GET_IPID(p); + lookup.src_addr = p->src; + lookup.dst_addr = p->dst; + mutex_lock(&dc->frag_table_lock); + tracker = HashListTableLookup(dc->frag_table, &lookup, sizeof(lookup)); + mutex_unlock(&dc->frag_table_lock); + if (tracker == NULL) { + mutex_lock(&dc->tracker_pool_lock); + tracker = PoolGet(dc->tracker_pool); + if (tracker == NULL) { + /* Timeout trackers and try again. */ + DefragTimeoutTracker(dc, p); + tracker = PoolGet(dc->tracker_pool); + } + mutex_unlock(&dc->tracker_pool_lock); + if (tracker == NULL) { + /* Report memory error - actually a pool allocation error. */ + SCLogError(SC_ERR_MEM_ALLOC, "Defrag: Failed to allocate tracker."); + return NULL; + } + DefragTrackerReset(tracker); + tracker->family = lookup.family; + tracker->id = lookup.id; + tracker->src_addr = lookup.src_addr; + tracker->dst_addr = lookup.dst_addr; + + /* XXX Do policy lookup. */ + tracker->policy = dc->default_policy; + + mutex_lock(&dc->frag_table_lock); + if (HashListTableAdd(dc->frag_table, tracker, sizeof(*tracker)) != 0) { + /* Failed to add new tracker. */ + mutex_unlock(&dc->frag_table_lock); + SCLogError(SC_ERR_MEM_ALLOC, + "Defrag: Failed to add new tracker to hash table."); + return NULL; + } + mutex_unlock(&dc->frag_table_lock); + } + + if (!more_frags) { + tracker->seen_last = 1; + } + Defrag4InsertFrag(dc, tracker, p); + if (tracker->seen_last) { + Packet *rp = Defrag4Reassemble(tv, dc, tracker, p); + return rp; + } + + return NULL; +} + +/** + * \brief Entry point for IPv4 fragments. + * + * \param tv ThreadVars for the calling decoder. + * \param dc A DefragContext to use, may be NULL for the default. + * \param p The packet fragment. + * + * \retval A new Packet resembling the re-assembled packet if the most + * recent fragment allowed the packet to be re-assembled, otherwise + * NULL is returned. + */ +Packet * +Defrag6(DefragContext *dc, Packet *p) +{ + /* If no DefragContext was passed in, use the global one. Passing + * one in is primarily useful for unit tests. */ + if (dc == NULL) + dc = defrag_context; + + return NULL; +} + +#ifdef UNITTESTS +#define IP_MF 0x2000 + +/** + * Allocate a test packet. Nothing to fancy, just a simple IP packet + * with some payload of no particular protocol. + */ +static Packet * +BuildTestPacket(uint16_t id, uint16_t off, int mf, const char content, + int content_len) +{ + Packet *p; + int hlen = 20; + int ttl = 64; + + p = calloc(1, sizeof(*p)); + if (p == NULL) + return NULL; + gettimeofday(&p->ts, NULL); + p->ip4h = (IPV4Hdr *)p->pkt; + p->ip4h->ip_verhl = 4 << 4; + p->ip4h->ip_verhl |= hlen >> 2; + p->ip4h->ip_len = htons(hlen + content_len); + p->ip4h->ip_id = htons(id); + p->ip4h->ip_off = htons(off); + if (mf) + p->ip4h->ip_off = htons(IP_MF | off); + else + p->ip4h->ip_off = htons(off); + p->ip4h->ip_ttl = ttl; + p->ip4h->ip_proto = IPPROTO_ICMP; + + p->ip4h->ip_src.s_addr = 0x01010101; /* 1.1.1.1 */ + p->ip4h->ip_dst.s_addr = 0x02020202; /* 2.2.2.2 */ + SET_IPV4_SRC_ADDR(p, &p->src); + SET_IPV4_DST_ADDR(p, &p->dst); + memset(p->pkt + hlen, content, content_len); + p->pktlen = hlen + content_len; + + p->ip4h->ip_csum = IPV4CalculateChecksum((uint16_t *)p->pkt, hlen); + + /* Self test. */ + IPV4_CACHE_INIT(p); + if (IPV4_GET_VER(p) != 4) + return NULL; + if (IPV4_GET_HLEN(p) != hlen) + return NULL; + if (IPV4_GET_IPLEN(p) != hlen + content_len) + return NULL; + if (IPV4_GET_IPID(p) != id) + return NULL; + if (IPV4_GET_IPOFFSET(p) != off) + return NULL; + if (IPV4_GET_MF(p) != mf) + return NULL; + if (IPV4_GET_IPTTL(p) != ttl) + return NULL; + if (IPV4_GET_IPPROTO(p) != IPPROTO_ICMP) + return NULL; + + return p; +} + +/** + * Test the simplest possible re-assembly scenario. All packet in + * order and no overlaps. + */ +static int +DefragInOrderSimpleTest(void) +{ + DefragContext *dc; + Packet *p1, *p2, *p3; + Packet *reassembled; + int id = 12; + int i; + + dc = DefragContextNew(); + if (dc == NULL) + return 0; + + p1 = BuildTestPacket(id, 0, 1, 'A', 8); + if (p1 == NULL) + return 0; + p2 = BuildTestPacket(id, 1, 1, 'B', 8); + if (p2 == NULL) + return 0; + p3 = BuildTestPacket(id, 2, 0, 'C', 3); + if (p3 == NULL) + return 0; + + if (Defrag4(NULL, dc, p1) != NULL) + return 0; + if (Defrag4(NULL, dc, p2) != NULL) + return 0; + reassembled = Defrag4(NULL, dc, p3); + if (reassembled == NULL) + return 0; + + /* 20 bytes in we should find 8 bytes of A. */ + for (i = 20; i < 20 + 8; i++) { + if (reassembled->pkt[i] != 'A') + return 0; + } + + /* 28 bytes in we should find 8 bytes of B. */ + for (i = 28; i < 28 + 8; i++) { + if (reassembled->pkt[i] != 'B') + return 0; + } + + /* And 36 bytes in we should find 3 bytes of C. */ + for (i = 36; i < 36 + 3; i++) { + if (reassembled->pkt[i] != 'C') + return 0; + } + + return 1; +} + +static int +DefragDoSturgesNovakTest(int policy, u_char *expected, size_t expected_len) +{ + int i; + + /* + * Build the packets. + */ + + int id = 1; + Packet *packets[17]; + + /* + * Original fragments. + */ + + /* A*24 at 0. */ + packets[0] = BuildTestPacket(id, 0, 1, 'A', 24); + + /* B*15 at 32. */ + packets[1] = BuildTestPacket(id, 32 >> 3, 1, 'B', 16); + + /* C*24 at 48. */ + packets[2] = BuildTestPacket(id, 48 >> 3, 1, 'C', 24); + + /* D*8 at 80. */ + packets[3] = BuildTestPacket(id, 80 >> 3, 1, 'D', 8); + + /* E*16 at 104. */ + packets[4] = BuildTestPacket(id, 104 >> 3, 1, 'E', 16); + + /* F*24 at 120. */ + packets[5] = BuildTestPacket(id, 120 >> 3, 1, 'F', 24); + + /* G*16 at 144. */ + packets[6] = BuildTestPacket(id, 144 >> 3, 1, 'G', 16); + + /* H*16 at 160. */ + packets[7] = BuildTestPacket(id, 160 >> 3, 1, 'H', 16); + + /* I*8 at 176. */ + packets[8] = BuildTestPacket(id, 176 >> 3, 1, 'I', 8); + + /* + * Overlapping subsequent fragments. + */ + + /* J*32 at 8. */ + packets[9] = BuildTestPacket(id, 8 >> 3, 1, 'J', 32); + + /* K*24 at 48. */ + packets[10] = BuildTestPacket(id, 48 >> 3, 1, 'K', 24); + + /* L*24 at 72. */ + packets[11] = BuildTestPacket(id, 72 >> 3, 1, 'L', 24); + + /* M*24 at 96. */ + packets[12] = BuildTestPacket(id, 96 >> 3, 1, 'M', 24); + + /* N*8 at 128. */ + packets[13] = BuildTestPacket(id, 128 >> 3, 1, 'N', 8); + + /* O*8 at 152. */ + packets[14] = BuildTestPacket(id, 152 >> 3, 1, 'O', 8); + + /* P*8 at 160. */ + packets[15] = BuildTestPacket(id, 160 >> 3, 1, 'P', 8); + + /* Q*16 at 176. */ + packets[16] = BuildTestPacket(id, 176 >> 3, 0, 'Q', 16); + + DefragContext *dc = DefragContextNew(); + if (dc == NULL) + return 0; + dc->default_policy = policy; + + /* Send all but the last. */ + for (i = 0; i < 16; i++) { + if (Defrag4(NULL, dc, packets[i]) != NULL) + return 0; + } + + /* And now the last one. */ + Packet *reassembled = Defrag4(NULL, dc, packets[16]); + if (reassembled == NULL) + return 0; + + if (memcmp(reassembled->pkt + 20, expected, expected_len) != 0) + return 0; + + return 1; +} + +static int +DefragSturgesNovakBsdTest(void) +{ + /* Expected data. */ + u_char expected[] = { + "AAAAAAAA" + "AAAAAAAA" + "AAAAAAAA" + "JJJJJJJJ" + "JJJJJJJJ" + "BBBBBBBB" + "CCCCCCCC" + "CCCCCCCC" + "CCCCCCCC" + "LLLLLLLL" + "LLLLLLLL" + "LLLLLLLL" + "MMMMMMMM" + "MMMMMMMM" + "MMMMMMMM" + "FFFFFFFF" + "FFFFFFFF" + "FFFFFFFF" + "GGGGGGGG" + "GGGGGGGG" + "HHHHHHHH" + "HHHHHHHH" + "IIIIIIII" + "QQQQQQQQ" + }; + + return DefragDoSturgesNovakTest(POLICY_BSD, expected, sizeof(expected)); +} + +static int +DefragSturgesNovakLinuxTest(void) +{ + /* Expected data. */ + u_char expected[] = { + "AAAAAAAA" + "AAAAAAAA" + "AAAAAAAA" + "JJJJJJJJ" + "JJJJJJJJ" + "BBBBBBBB" + "KKKKKKKK" + "KKKKKKKK" + "KKKKKKKK" + "LLLLLLLL" + "LLLLLLLL" + "LLLLLLLL" + "MMMMMMMM" + "MMMMMMMM" + "MMMMMMMM" + "FFFFFFFF" + "FFFFFFFF" + "FFFFFFFF" + "GGGGGGGG" + "GGGGGGGG" + "PPPPPPPP" + "HHHHHHHH" + "QQQQQQQQ" + "QQQQQQQQ" + }; + + return DefragDoSturgesNovakTest(POLICY_LINUX, expected, sizeof(expected)); +} + +static int +DefragSturgesNovakWindowsTest(void) +{ + /* Expected data. */ + u_char expected[] = { + "AAAAAAAA" + "AAAAAAAA" + "AAAAAAAA" + "JJJJJJJJ" + "BBBBBBBB" + "BBBBBBBB" + "CCCCCCCC" + "CCCCCCCC" + "CCCCCCCC" + "LLLLLLLL" + "LLLLLLLL" + "LLLLLLLL" + "MMMMMMMM" + "EEEEEEEE" + "EEEEEEEE" + "FFFFFFFF" + "FFFFFFFF" + "FFFFFFFF" + "GGGGGGGG" + "GGGGGGGG" + "HHHHHHHH" + "HHHHHHHH" + "IIIIIIII" + "QQQQQQQQ" + }; + + return DefragDoSturgesNovakTest(POLICY_WINDOWS, expected, sizeof(expected)); +} + +static int +DefragSturgesNovakSolarisTest(void) +{ + /* Expected data. */ + u_char expected[] = { + "AAAAAAAA" + "AAAAAAAA" + "AAAAAAAA" + "JJJJJJJJ" + "BBBBBBBB" + "BBBBBBBB" + "CCCCCCCC" + "CCCCCCCC" + "CCCCCCCC" + "LLLLLLLL" + "LLLLLLLL" + "LLLLLLLL" + "MMMMMMMM" + "MMMMMMMM" + "MMMMMMMM" + "FFFFFFFF" + "FFFFFFFF" + "FFFFFFFF" + "GGGGGGGG" + "GGGGGGGG" + "HHHHHHHH" + "HHHHHHHH" + "IIIIIIII" + "QQQQQQQQ" + }; + + return DefragDoSturgesNovakTest(POLICY_SOLARIS, expected, sizeof(expected)); +} + +static int +DefragSturgesNovakFirstTest(void) +{ + /* Expected data. */ + u_char expected[] = { + "AAAAAAAA" + "AAAAAAAA" + "AAAAAAAA" + "JJJJJJJJ" + "BBBBBBBB" + "BBBBBBBB" + "CCCCCCCC" + "CCCCCCCC" + "CCCCCCCC" + "LLLLLLLL" + "DDDDDDDD" + "LLLLLLLL" + "MMMMMMMM" + "EEEEEEEE" + "EEEEEEEE" + "FFFFFFFF" + "FFFFFFFF" + "FFFFFFFF" + "GGGGGGGG" + "GGGGGGGG" + "HHHHHHHH" + "HHHHHHHH" + "IIIIIIII" + "QQQQQQQQ" + }; + + return DefragDoSturgesNovakTest(POLICY_FIRST, expected, sizeof(expected)); +} + +static int +DefragSturgesNovakLastTest(void) +{ + /* Expected data. */ + u_char expected[] = { + "AAAAAAAA" + "JJJJJJJJ" + "JJJJJJJJ" + "JJJJJJJJ" + "JJJJJJJJ" + "BBBBBBBB" + "KKKKKKKK" + "KKKKKKKK" + "KKKKKKKK" + "LLLLLLLL" + "LLLLLLLL" + "LLLLLLLL" + "MMMMMMMM" + "MMMMMMMM" + "MMMMMMMM" + "FFFFFFFF" + "NNNNNNNN" + "FFFFFFFF" + "GGGGGGGG" + "OOOOOOOO" + "PPPPPPPP" + "HHHHHHHH" + "QQQQQQQQ" + "QQQQQQQQ" + }; + + return DefragDoSturgesNovakTest(POLICY_LAST, expected, sizeof(expected)); +} + +static int +DefragTimeoutTest(void) +{ + int i; + + /* Setup a small numberr of trackers. */ + ConfSet("defrag.trackers", "16", 1); + + DefragContext *dc = DefragContextNew(); + if (dc == NULL) + return 0; + + /* Load in 16 packets. */ + for (i = 0; i < 16; i++) { + Packet *p = BuildTestPacket(i, 0, 1, 'A' + i, 16); + if (Defrag4(NULL, dc, p) != NULL) + return 0; + } + + /* Build a new packet but push the timestamp out by our timeout. + * This should force our previous fragments to be timed out. */ + Packet *p = BuildTestPacket(99, 0, 1, 'A' + i, 16); + p->ts.tv_sec += dc->timeout; + if (Defrag4(NULL, dc, p) != NULL) + return 0; + + /* Iterate our HashList and look for the trackerr with id 99. */ + int found = 0; + HashListTableBucket *next = HashListTableGetListHead(dc->frag_table); + if (next == NULL) + return 0; + for (;;) { + if (next == NULL) + break; + DefragTracker *tracker = HashListTableGetListData(next); + if (tracker->id == 99) { + found = 1; + break; + } + + next = HashListTableGetListNext(next); + } + if (found == 0) + return 0; + + return 1; +} + +#endif /* UNITTESTS */ + +void +DefragRegisterTests(void) +{ +#ifdef UNITTESTS + UtRegisterTest("DefragInOrderSimpleTest", + DefragInOrderSimpleTest, 1); + UtRegisterTest("DefragSturgesNovakBsdTest", + DefragSturgesNovakBsdTest, 1); + UtRegisterTest("DefragSturgesNovakLinuxTest", + DefragSturgesNovakLinuxTest, 1); + UtRegisterTest("DefragSturgesNovakWindowsTest", + DefragSturgesNovakWindowsTest, 1); + UtRegisterTest("DefragSturgesNovakSolarisTest", + DefragSturgesNovakSolarisTest, 1); + UtRegisterTest("DefragSturgesNovakFirstTest", + DefragSturgesNovakFirstTest, 1); + UtRegisterTest("DefragSturgesNovakLastTest", + DefragSturgesNovakLastTest, 1); + UtRegisterTest("DefragTimeoutTest", + DefragTimeoutTest, 1); +#endif /* UNITTESTS */ +} + +void +DefragInit(void) +{ + /* Initialize random value for hashing and hash table size. */ + defrag_hash_rand = rand(); + defrag_hash_size = DEFAULT_DEFRAG_HASH_SIZE; + + /* Allocate the DefragContext. */ + defrag_context = DefragContextNew(); + if (defrag_context == NULL) { + SCLogError(SC_ERR_MEM_ALLOC, + "Failed to allocate memory for the Defrag module."); + exit(1); + } +} diff --git a/src/defrag.h b/src/defrag.h new file mode 100644 index 0000000000..af0c4d5b29 --- /dev/null +++ b/src/defrag.h @@ -0,0 +1,21 @@ +/* Copyright (c) 2009 Open Information Security Foundation */ + +/** + * \file + * + * Defragmentation module. + * + * \author Endace Technology Limited, Jason Ish + */ + +#ifndef __DEFRAG_H__ +#define __DEFRAG_H__ + +typedef struct _DefragContext DefragContext; + +void DefragInit(void); +Packet *Defrag4(ThreadVars *, DefragContext *, DefragContext *); +Packet *Defrag6(DefragContext *, Packet *); +void DefragRegisterTests(void); + +#endif /* __DEFRAG_H__ */ diff --git a/src/eidps.c b/src/eidps.c index 063c1981a9..17b4ff4478 100644 --- a/src/eidps.c +++ b/src/eidps.c @@ -69,6 +69,8 @@ #include "conf.h" #include "conf-yaml-loader.h" +#include "defrag.h" + #include "runmodes.h" #include "util-debug.h" @@ -168,6 +170,7 @@ Packet *SetupPkt (void) return p; } +/* \todo dtv not used. */ Packet *TunnelPktSetup(ThreadVars *t, DecodeThreadVars *dtv, Packet *parent, uint8_t *pkt, uint16_t len, uint8_t proto) { //printf("TunnelPktSetup: pkt %p, len %" PRIu32 ", proto %" PRIu32 "\n", pkt, len, proto); @@ -388,6 +391,8 @@ int main(int argc, char **argv) PatternMatchPrepare(mpm_ctx, MPM_B2G); PerfInitCounterApi(); + DefragInit(); + /** \todo we need an api for these */ AppLayerDetectProtoThreadInit(); RegisterAppLayerParsers(); @@ -452,6 +457,7 @@ int main(int argc, char **argv) SCSigRegisterSignatureOrderingTests(); SCLogRegisterTests(); SCRadixRegisterTests(); + DefragRegisterTests(); if (list_unittests) { UtListTests(regex_arg); } diff --git a/src/queue.h b/src/queue.h new file mode 100644 index 0000000000..f741a4cbb4 --- /dev/null +++ b/src/queue.h @@ -0,0 +1,527 @@ +/* $OpenBSD: queue.h,v 1.32 2007/04/30 18:42:34 pedro Exp $ */ +/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */ + +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef _SYS_QUEUE_H_ +#define _SYS_QUEUE_H_ + +/* + * This file defines five types of data structures: singly-linked lists, + * lists, simple queues, tail queues, and circular queues. + * + * + * A singly-linked list is headed by a single forward pointer. The elements + * are singly linked for minimum space and pointer manipulation overhead at + * the expense of O(n) removal for arbitrary elements. New elements can be + * added to the list after an existing element or at the head of the list. + * Elements being removed from the head of the list should use the explicit + * macro for this purpose for optimum efficiency. A singly-linked list may + * only be traversed in the forward direction. Singly-linked lists are ideal + * for applications with large datasets and few or no removals or for + * implementing a LIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A simple queue is headed by a pair of pointers, one the head of the + * list and the other to the tail of the list. The elements are singly + * linked to save space, so elements can only be removed from the + * head of the list. New elements can be added to the list before or after + * an existing element, at the head of the list, or at the end of the + * list. A simple queue may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * A circle queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or after + * an existing element, at the head of the list, or at the end of the list. + * A circle queue may be traversed in either direction, but has a more + * complex end of list detection. + * + * For details on the use of these macros, see the queue(3) manual page. + */ + +#if defined(QUEUE_MACRO_DEBUG) || (defined(_KERNEL) && defined(DIAGNOSTIC)) +#define _Q_INVALIDATE(a) (a) = ((void *)-1) +#else +#define _Q_INVALIDATE(a) +#endif + +/* + * Singly-linked List definitions. + */ +#define SLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define SLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define SLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} + +/* + * Singly-linked List access methods. + */ +#define SLIST_FIRST(head) ((head)->slh_first) +#define SLIST_END(head) NULL +#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head)) +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) + +#define SLIST_FOREACH(var, head, field) \ + for((var) = SLIST_FIRST(head); \ + (var) != SLIST_END(head); \ + (var) = SLIST_NEXT(var, field)) + +#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ + for ((varp) = &SLIST_FIRST((head)); \ + ((var) = *(varp)) != SLIST_END(head); \ + (varp) = &SLIST_NEXT((var), field)) + +/* + * Singly-linked List functions. + */ +#define SLIST_INIT(head) { \ + SLIST_FIRST(head) = SLIST_END(head); \ +} + +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ + (elm)->field.sle_next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = (elm); \ +} while (0) + +#define SLIST_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (0) + +#define SLIST_REMOVE_NEXT(head, elm, field) do { \ + (elm)->field.sle_next = (elm)->field.sle_next->field.sle_next; \ +} while (0) + +#define SLIST_REMOVE_HEAD(head, field) do { \ + (head)->slh_first = (head)->slh_first->field.sle_next; \ +} while (0) + +#define SLIST_REMOVE(head, elm, type, field) do { \ + if ((head)->slh_first == (elm)) { \ + SLIST_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->slh_first; \ + \ + while (curelm->field.sle_next != (elm)) \ + curelm = curelm->field.sle_next; \ + curelm->field.sle_next = \ + curelm->field.sle_next->field.sle_next; \ + _Q_INVALIDATE((elm)->field.sle_next); \ + } \ +} while (0) + +/* + * List definitions. + */ +#define LIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define LIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define LIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List access methods + */ +#define LIST_FIRST(head) ((head)->lh_first) +#define LIST_END(head) NULL +#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head)) +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + +#define LIST_FOREACH(var, head, field) \ + for((var) = LIST_FIRST(head); \ + (var)!= LIST_END(head); \ + (var) = LIST_NEXT(var, field)) + +/* + * List functions. + */ +#define LIST_INIT(head) do { \ + LIST_FIRST(head) = LIST_END(head); \ +} while (0) + +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ + if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ + (listelm)->field.le_next->field.le_prev = \ + &(elm)->field.le_next; \ + (listelm)->field.le_next = (elm); \ + (elm)->field.le_prev = &(listelm)->field.le_next; \ +} while (0) + +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ +} while (0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.le_next = (head)->lh_first) != NULL) \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ + (head)->lh_first = (elm); \ + (elm)->field.le_prev = &(head)->lh_first; \ +} while (0) + +#define LIST_REMOVE(elm, field) do { \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ + _Q_INVALIDATE((elm)->field.le_prev); \ + _Q_INVALIDATE((elm)->field.le_next); \ +} while (0) + +#define LIST_REPLACE(elm, elm2, field) do { \ + if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \ + (elm2)->field.le_next->field.le_prev = \ + &(elm2)->field.le_next; \ + (elm2)->field.le_prev = (elm)->field.le_prev; \ + *(elm2)->field.le_prev = (elm2); \ + _Q_INVALIDATE((elm)->field.le_prev); \ + _Q_INVALIDATE((elm)->field.le_next); \ +} while (0) + +/* + * Simple queue definitions. + */ +#define SIMPLEQ_HEAD(name, type) \ +struct name { \ + struct type *sqh_first; /* first element */ \ + struct type **sqh_last; /* addr of last next element */ \ +} + +#define SIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } + +#define SIMPLEQ_ENTRY(type) \ +struct { \ + struct type *sqe_next; /* next element */ \ +} + +/* + * Simple queue access methods. + */ +#define SIMPLEQ_FIRST(head) ((head)->sqh_first) +#define SIMPLEQ_END(head) NULL +#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head)) +#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) + +#define SIMPLEQ_FOREACH(var, head, field) \ + for((var) = SIMPLEQ_FIRST(head); \ + (var) != SIMPLEQ_END(head); \ + (var) = SIMPLEQ_NEXT(var, field)) + +/* + * Simple queue functions. + */ +#define SIMPLEQ_INIT(head) do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (0) + +#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ +} while (0) + +#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (0) + +#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ +} while (0) + +#define SIMPLEQ_REMOVE_HEAD(head, field) do { \ + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (0) + +/* + * Tail queue definitions. + */ +#define TAILQ_HEAD(name, type) \ +struct name { \ + struct type *tqh_first; /* first element */ \ + struct type **tqh_last; /* addr of last next element */ \ +} + +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define TAILQ_ENTRY(type) \ +struct { \ + struct type *tqe_next; /* next element */ \ + struct type **tqe_prev; /* address of previous next element */ \ +} + +/* + * tail queue access methods + */ +#define TAILQ_FIRST(head) ((head)->tqh_first) +#define TAILQ_END(head) NULL +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) +#define TAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) +/* XXX */ +#define TAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) +#define TAILQ_EMPTY(head) \ + (TAILQ_FIRST(head) == TAILQ_END(head)) + +#define TAILQ_FOREACH(var, head, field) \ + for((var) = TAILQ_FIRST(head); \ + (var) != TAILQ_END(head); \ + (var) = TAILQ_NEXT(var, field)) + +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for((var) = TAILQ_LAST(head, headname); \ + (var) != TAILQ_END(head); \ + (var) = TAILQ_PREV(var, headname, field)) + +/* + * Tail queue functions. + */ +#define TAILQ_INIT(head) do { \ + (head)->tqh_first = NULL; \ + (head)->tqh_last = &(head)->tqh_first; \ +} while (0) + +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ + (head)->tqh_first->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_first = (elm); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (0) + +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (0) + +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ + (elm)->field.tqe_next->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (listelm)->field.tqe_next = (elm); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (0) + +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (0) + +#define TAILQ_REMOVE(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) \ + (elm)->field.tqe_next->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ + _Q_INVALIDATE((elm)->field.tqe_prev); \ + _Q_INVALIDATE((elm)->field.tqe_next); \ +} while (0) + +#define TAILQ_REPLACE(head, elm, elm2, field) do { \ + if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \ + (elm2)->field.tqe_next->field.tqe_prev = \ + &(elm2)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm2)->field.tqe_next; \ + (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \ + *(elm2)->field.tqe_prev = (elm2); \ + _Q_INVALIDATE((elm)->field.tqe_prev); \ + _Q_INVALIDATE((elm)->field.tqe_next); \ +} while (0) + +/* + * Circular queue definitions. + */ +#define CIRCLEQ_HEAD(name, type) \ +struct name { \ + struct type *cqh_first; /* first element */ \ + struct type *cqh_last; /* last element */ \ +} + +#define CIRCLEQ_HEAD_INITIALIZER(head) \ + { CIRCLEQ_END(&head), CIRCLEQ_END(&head) } + +#define CIRCLEQ_ENTRY(type) \ +struct { \ + struct type *cqe_next; /* next element */ \ + struct type *cqe_prev; /* previous element */ \ +} + +/* + * Circular queue access methods + */ +#define CIRCLEQ_FIRST(head) ((head)->cqh_first) +#define CIRCLEQ_LAST(head) ((head)->cqh_last) +#define CIRCLEQ_END(head) ((void *)(head)) +#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) +#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) +#define CIRCLEQ_EMPTY(head) \ + (CIRCLEQ_FIRST(head) == CIRCLEQ_END(head)) + +#define CIRCLEQ_FOREACH(var, head, field) \ + for((var) = CIRCLEQ_FIRST(head); \ + (var) != CIRCLEQ_END(head); \ + (var) = CIRCLEQ_NEXT(var, field)) + +#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ + for((var) = CIRCLEQ_LAST(head); \ + (var) != CIRCLEQ_END(head); \ + (var) = CIRCLEQ_PREV(var, field)) + +/* + * Circular queue functions. + */ +#define CIRCLEQ_INIT(head) do { \ + (head)->cqh_first = CIRCLEQ_END(head); \ + (head)->cqh_last = CIRCLEQ_END(head); \ +} while (0) + +#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + (elm)->field.cqe_next = (listelm)->field.cqe_next; \ + (elm)->field.cqe_prev = (listelm); \ + if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \ + (head)->cqh_last = (elm); \ + else \ + (listelm)->field.cqe_next->field.cqe_prev = (elm); \ + (listelm)->field.cqe_next = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ + (elm)->field.cqe_next = (listelm); \ + (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ + if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \ + (head)->cqh_first = (elm); \ + else \ + (listelm)->field.cqe_prev->field.cqe_next = (elm); \ + (listelm)->field.cqe_prev = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.cqe_next = (head)->cqh_first; \ + (elm)->field.cqe_prev = CIRCLEQ_END(head); \ + if ((head)->cqh_last == CIRCLEQ_END(head)) \ + (head)->cqh_last = (elm); \ + else \ + (head)->cqh_first->field.cqe_prev = (elm); \ + (head)->cqh_first = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.cqe_next = CIRCLEQ_END(head); \ + (elm)->field.cqe_prev = (head)->cqh_last; \ + if ((head)->cqh_first == CIRCLEQ_END(head)) \ + (head)->cqh_first = (elm); \ + else \ + (head)->cqh_last->field.cqe_next = (elm); \ + (head)->cqh_last = (elm); \ +} while (0) + +#define CIRCLEQ_REMOVE(head, elm, field) do { \ + if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \ + (head)->cqh_last = (elm)->field.cqe_prev; \ + else \ + (elm)->field.cqe_next->field.cqe_prev = \ + (elm)->field.cqe_prev; \ + if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \ + (head)->cqh_first = (elm)->field.cqe_next; \ + else \ + (elm)->field.cqe_prev->field.cqe_next = \ + (elm)->field.cqe_next; \ + _Q_INVALIDATE((elm)->field.cqe_prev); \ + _Q_INVALIDATE((elm)->field.cqe_next); \ +} while (0) + +#define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \ + if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \ + CIRCLEQ_END(head)) \ + (head).cqh_last = (elm2); \ + else \ + (elm2)->field.cqe_next->field.cqe_prev = (elm2); \ + if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \ + CIRCLEQ_END(head)) \ + (head).cqh_first = (elm2); \ + else \ + (elm2)->field.cqe_prev->field.cqe_next = (elm2); \ + _Q_INVALIDATE((elm)->field.cqe_prev); \ + _Q_INVALIDATE((elm)->field.cqe_next); \ +} while (0) + +#endif /* !_SYS_QUEUE_H_ */ diff --git a/src/util-fix_checksum.c b/src/util-fix_checksum.c new file mode 100644 index 0000000000..f2bf881e70 --- /dev/null +++ b/src/util-fix_checksum.c @@ -0,0 +1,58 @@ +/* + * Reference: OpenBSD's pf.c. + * + * Copyright (c) 2001 Daniel Hartmeier + * Copyright (c) 2002 - 2008 Henning Brauer + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Effort sponsored in part by the Defense Advanced Research Projects + * Agency (DARPA) and Air Force Research Laboratory, Air Force + * Materiel Command, USAF, under agreement number F30602-01-2-0537. + */ + +#include + +/** + * \brief Fix-up an IP checksum. + * + * \param sum The current checksum. + * \param old Value of old header parameter. + * \param new Value of new header parameter. + * + * \retval New checksum. + */ +uint16_t +FixChecksum(uint16_t sum, uint16_t old, uint16_t new) +{ + uint32_t l; + + l = sum + old - new; + l = (l >> 16) + (l & 65535); + l = l & 65535; + + return l; +} diff --git a/src/util-fix_checksum.h b/src/util-fix_checksum.h new file mode 100644 index 0000000000..d5e53dd625 --- /dev/null +++ b/src/util-fix_checksum.h @@ -0,0 +1,37 @@ +/* + * Reference: OpenBSD's pf.c. + * + * Copyright (c) 2001 Daniel Hartmeier + * Copyright (c) 2002 - 2008 Henning Brauer + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Effort sponsored in part by the Defense Advanced Research Projects + * Agency (DARPA) and Air Force Research Laboratory, Air Force + * Materiel Command, USAF, under agreement number F30602-01-2-0537. + */ + +uint16_t FixChecksum(uint16_t sum, uint16_t old, uint16_t new);