af-packet: add support for XDP cpu redirect map

This patch adds a boolean option "xdp-cpu-redirect" to af-packet
interface configuration. If set, then the XDP filter will load
balance the skb creation on specified CPUs instead of doing the
creation on the CPU handling the packet. In the case of a card
with asymetric hashing this will allow to avoid saturating the
single CPU handling the trafic.

The XDP filter must contains a set of map allowing load balancing.
This is the case of xdp_filter.bpf.

Fixed-by: Jesper Dangaard Brouer <netoptimizer@brouer.com>
pull/3221/head
Eric Leblond 7 years ago
parent 33072a49fe
commit 4f57008a23

@ -33,6 +33,8 @@
#define LINUX_VERSION_CODE 263682
#define CPUMAP_MAX_CPUS 64
struct vlan_hdr {
__u16 h_vlan_TCI;
__u16 h_vlan_encapsulated_proto;
@ -78,6 +80,28 @@ struct bpf_map_def SEC("maps") flow_table_v6 = {
.max_entries = 32768,
};
/* Special map type that can XDP_REDIRECT frames to another CPU */
struct bpf_map_def SEC("maps") cpu_map = {
.type = BPF_MAP_TYPE_CPUMAP,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = CPUMAP_MAX_CPUS,
};
struct bpf_map_def SEC("maps") cpus_available = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = CPUMAP_MAX_CPUS,
};
struct bpf_map_def SEC("maps") cpus_count = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 1,
};
static __always_inline int get_sport(void *trans_data, void *data_end,
uint8_t protocol)
{
@ -129,6 +153,10 @@ static int __always_inline filter_ipv4(void *data, __u64 nh_off, void *data_end)
int sport;
struct flowv4_keys tuple;
struct pair *value;
uint32_t cpu_dest;
uint32_t key0 = 0;
uint32_t *cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
uint32_t *cpu_selected;
if ((void *)(iph + 1) > data_end)
return XDP_PASS;
@ -169,8 +197,18 @@ static int __always_inline filter_ipv4(void *data, __u64 nh_off, void *data_end)
return XDP_DROP;
}
if (cpu_max && *cpu_max) {
cpu_dest = (tuple.src + tuple.dst) % *cpu_max;
cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_dest);
if (!cpu_selected)
return XDP_ABORTED;
cpu_dest = *cpu_selected;
return bpf_redirect_map(&cpu_map, cpu_dest, 0);
} else {
return XDP_PASS;
}
}
static int __always_inline filter_ipv6(void *data, __u64 nh_off, void *data_end)
{
@ -179,6 +217,10 @@ static int __always_inline filter_ipv6(void *data, __u64 nh_off, void *data_end)
int sport;
struct flowv6_keys tuple;
struct pair *value;
uint32_t cpu_dest;
uint32_t key0 = 0;
int *cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
uint32_t *cpu_selected;
if ((void *)(ip6h + 1) > data_end)
return 0;
@ -210,8 +252,17 @@ static int __always_inline filter_ipv6(void *data, __u64 nh_off, void *data_end)
value->time = bpf_ktime_get_ns();
return XDP_DROP;
}
if (cpu_max && *cpu_max) {
cpu_dest = (tuple.src[0] + tuple.dst[0] + tuple.src[3] + tuple.dst[3]) % *cpu_max;
cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_dest);
if (!cpu_selected)
return XDP_ABORTED;
cpu_dest = *cpu_selected;
return bpf_redirect_map(&cpu_map, cpu_dest, 0);
} else {
return XDP_PASS;
}
}
int SEC("xdp") xdp_hashfilter(struct xdp_md *ctx)
{

@ -479,6 +479,22 @@ static void *ParseAFPConfig(const char *iface)
if (ret != 0) {
SCLogWarning(SC_ERR_INVALID_VALUE,
"Error when setting up XDP");
} else {
/* Try to get the xdp-cpu-redirect key */
const char *cpuset;
if (ConfGetChildValueWithDefault(if_root, if_default,
"xdp-cpu-redirect", &cpuset) == 1) {
SCLogConfig("Setting up CPU map XDP");
ConfNode *node = ConfGetChildWithDefault(if_root, if_default, "xdp-cpu-redirect");
if (node == NULL) {
SCLogError(SC_ERR_INVALID_VALUE, "Should not be there");
} else {
EBPFBuildCPUSet(node, aconf->iface);
}
} else {
/* It will just set CPU count to 0 */
EBPFBuildCPUSet(NULL, aconf->iface);
}
}
}
#else

@ -446,4 +446,72 @@ void EBPFRegisterExtension(void)
g_livedev_storage_id = LiveDevStorageRegister("bpfmap", sizeof(void *), NULL, BpfMapsInfoFree);
}
#ifdef HAVE_PACKET_XDP
static uint32_t g_redirect_iface_cpu_counter = 0;
static int EBPFAddCPUToMap(const char *iface, uint32_t i)
{
int cpumap = EBPFGetMapFDByName(iface, "cpu_map");
uint32_t queue_size = 4096;
int ret;
if (cpumap < 0) {
SCLogError(SC_ERR_AFP_CREATE, "Can't find cpu_map");
return -1;
}
ret = bpf_map_update_elem(cpumap, &i, &queue_size, 0);
if (ret) {
SCLogError(SC_ERR_AFP_CREATE, "Create CPU entry failed (err:%d)", ret);
return -1;
}
int cpus_available = EBPFGetMapFDByName(iface, "cpus_available");
if (cpus_available < 0) {
SCLogError(SC_ERR_AFP_CREATE, "Can't find cpus_available map");
return -1;
}
ret = bpf_map_update_elem(cpus_available, &g_redirect_iface_cpu_counter, &i, 0);
if (ret) {
SCLogError(SC_ERR_AFP_CREATE, "Create CPU entry failed (err:%d)", ret);
return -1;
}
return 0;
}
static void EBPFRedirectMapAddCPU(int i, void *data)
{
if (EBPFAddCPUToMap(data, i) < 0) {
SCLogError(SC_ERR_INVALID_VALUE,
"Unable to add CPU %d to set", i);
} else {
g_redirect_iface_cpu_counter++;
}
}
void EBPFBuildCPUSet(ConfNode *node, char *iface)
{
uint32_t key0 = 0;
int mapfd = EBPFGetMapFDByName(iface, "cpus_count");
if (mapfd < 0) {
SCLogError(SC_ERR_INVALID_VALUE,
"Unable to find 'cpus_count' map");
return;
}
g_redirect_iface_cpu_counter = 0;
if (node == NULL) {
bpf_map_update_elem(mapfd, &key0, &g_redirect_iface_cpu_counter,
BPF_ANY);
return;
}
BuildCpusetWithCallback("xdp-cpu-redirect", node,
EBPFRedirectMapAddCPU,
iface);
bpf_map_update_elem(mapfd, &key0, &g_redirect_iface_cpu_counter,
BPF_ANY);
}
#endif /* HAVE_PACKET_XDP */
#endif

@ -1,4 +1,4 @@
/* Copyright (C) 2016 Open Information Security Foundation
/* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
@ -72,6 +72,8 @@ int EBPFCheckBypassedFlowTimeout(struct flows_stats *bypassstats,
void EBPFRegisterExtension(void);
void EBPFBuildCPUSet(ConfNode *node, char *iface);
#endif
#endif

Loading…
Cancel
Save