Skip to content

Commit

Permalink
conntrack: Fix flush not flushing all elements.
Browse files Browse the repository at this point in the history
On netdev datapath, when a ct element was cleaned, the cmap
could be shrinked, potentially causing some elements to be skipped
in the flush iteration.

Fixes: 967bb5c ("conntrack: Add rcu support.")
Signed-off-by: Xavier Simonart <[email protected]>
Acked-by: Mike Pattrick <[email protected]>
Signed-off-by: Simon Horman <[email protected]>
  • Loading branch information
simonartxavier authored and Simon Horman committed Mar 6, 2024
1 parent e0aa15f commit 6c082a8
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 11 deletions.
14 changes: 4 additions & 10 deletions lib/conntrack.c
Original file line number Diff line number Diff line change
Expand Up @@ -2651,25 +2651,19 @@ conntrack_dump_start(struct conntrack *ct, struct conntrack_dump *dump,

dump->ct = ct;
*ptot_bkts = 1; /* Need to clean up the callers. */
dump->cursor = cmap_cursor_start(&ct->conns);
return 0;
}

int
conntrack_dump_next(struct conntrack_dump *dump, struct ct_dpif_entry *entry)
{
struct conntrack *ct = dump->ct;
long long now = time_msec();

for (;;) {
struct cmap_node *cm_node = cmap_next_position(&ct->conns,
&dump->cm_pos);
if (!cm_node) {
break;
}
struct conn_key_node *keyn;
struct conn *conn;
struct conn_key_node *keyn;
struct conn *conn;

INIT_CONTAINER(keyn, cm_node, cm_node);
CMAP_CURSOR_FOR_EACH_CONTINUE (keyn, cm_node, &dump->cursor) {
if (keyn->dir != CT_DIR_FWD) {
continue;
}
Expand Down
2 changes: 1 addition & 1 deletion lib/conntrack.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@ struct conntrack_dump {
struct conntrack *ct;
unsigned bucket;
union {
struct cmap_position cm_pos;
struct hmap_position hmap_pos;
struct cmap_cursor cursor;
};
bool filter_zone;
uint16_t zone;
Expand Down
47 changes: 47 additions & 0 deletions tests/system-traffic.at
Original file line number Diff line number Diff line change
Expand Up @@ -8390,6 +8390,53 @@ AT_CHECK([ovs-pcap client.pcap | grep 000000002010000000002000], [0], [dnl
OVS_TRAFFIC_VSWITCHD_STOP
AT_CLEANUP

AT_SETUP([conntrack - Flush many conntrack entries by port])
CHECK_CONNTRACK()
OVS_TRAFFIC_VSWITCHD_START()

ADD_NAMESPACES(at_ns0, at_ns1)

ADD_VETH(p0, at_ns0, br0, "10.1.1.1/24")
ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24")

AT_DATA([flows.txt], [dnl
priority=100,in_port=1,udp,action=ct(zone=1,commit),2
])

AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt])

dnl 20 packets from port 1 and 1 packet from port 2.
flow_l3="\
eth_src=50:54:00:00:00:09,eth_dst=50:54:00:00:00:0a,dl_type=0x0800,\
nw_src=10.1.1.1,nw_dst=10.1.1.2,nw_proto=17,nw_ttl=64,nw_frag=no"

for i in $(seq 1 20); do
frame=$(ovs-ofctl compose-packet --bare "$flow_l3, udp_src=1,udp_dst=$i")
AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=$frame actions=resubmit(,0)"])
done
frame=$(ovs-ofctl compose-packet --bare "$flow_l3, udp_src=2,udp_dst=1")
AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=$frame actions=resubmit(,0)"])

: > conntrack

for i in $(seq 1 20); do
echo "udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=1,dport=${i}),reply=(src=10.1.1.2,dst=10.1.1.1,sport=${i},dport=1),zone=1" >> conntrack
done
echo "udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=2,dport=1),reply=(src=10.1.1.2,dst=10.1.1.1,sport=1,dport=2),zone=1" >> conntrack

sort conntrack > expout

AT_CHECK([ovs-appctl dpctl/dump-conntrack zone=1 | grep -F "src=10.1.1.1," | sort ], [0], [expout])

dnl Check that flushing conntrack by port 1 flush all ct for port 1 but keeps ct for port 2.
AT_CHECK([ovs-appctl dpctl/flush-conntrack zone=1 'ct_nw_proto=17,ct_tp_src=1'])
AT_CHECK([ovs-appctl dpctl/dump-conntrack zone=1 | grep -F "src=10.1.1.1," | sort ], [0], [dnl
udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=2,dport=1),reply=(src=10.1.1.2,dst=10.1.1.1,sport=1,dport=2),zone=1
])

OVS_TRAFFIC_VSWITCHD_STOP
AT_CLEANUP

AT_BANNER([IGMP])

AT_SETUP([IGMP - flood under normal action])
Expand Down

0 comments on commit 6c082a8

Please sign in to comment.