Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

get rid of endpoint type indicators #2772

Merged
merged 4 commits into from
Jul 31, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions integration/301_internet_edge_without_ebpf_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@ scope_on "$HOST1" launch --probe.ebpf.connections=false
#
# has_connection_by_id containers "$HOST1" "in-theinternet" "$(node_id containers "$HOST1" nginx)"
#
# endpoints_have_ebpf "$HOST1"
#
# kill %do_connections

scope_end_suite
1 change: 0 additions & 1 deletion integration/310_container_to_container_edge_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,5 @@ list_containers "$HOST1"
list_connections "$HOST1"

has_connection containers "$HOST1" client nginx
endpoints_have_ebpf "$HOST1"

scope_end_suite
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,4 @@ list_connections "$HOST1"

has_connection containers "$HOST1" client nginx

endpoints_have_ebpf "$HOST1"

scope_end_suite
2 changes: 0 additions & 2 deletions integration/314_container_accept_before_kretprobe_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,4 @@ list_connections "$HOST1"

has_connection containers "$HOST1" client server

endpoints_have_ebpf "$HOST1"

scope_end_suite
26 changes: 0 additions & 26 deletions integration/config.sh
Original file line number Diff line number Diff line change
Expand Up @@ -122,32 +122,6 @@ has_connection_by_id() {
assert "curl -s http://$host:4040/api/topology/${view}?system=show | jq -r '.nodes[\"$from_id\"].adjacency | contains([\"$to_id\"])'" true
}

# this checks if ebpf is true on all endpoints on a given host
endpoints_have_ebpf() {
local host="$1"
local timeout="${2:-60}"
local number_of_endpoints=-1
local have_ebpf=-1
local report

for i in $(seq "$timeout"); do
report=$(curl -s "http://${host}:4040/api/report")
number_of_endpoints=$(echo "${report}" | jq -r '.Endpoint.nodes | length')
have_ebpf=$(echo "${report}" | jq -r '.Endpoint.nodes[].latest.eBPF | select(.value != null) | contains({"value": "true"})' | wc -l)
if [[ "$number_of_endpoints" -gt 0 && "$have_ebpf" -gt 0 && "$number_of_endpoints" -eq "$have_ebpf" ]]; then
echo "Found ${number_of_endpoints} endpoints with ebpf enabled"
assert "echo '$have_ebpf'" "$number_of_endpoints"
return
fi
sleep 1
done

echo "Only ${have_ebpf} endpoints of ${number_of_endpoints} have ebpf enabled, should be equal"
echo "Example of one endpoint:"
echo "${report}" | jq -r '[.Endpoint.nodes[]][0]'
assert "echo '$have_ebpf" "$number_of_endpoints"
}

has_connection() {
local view="$1"
local host="$2"
Expand Down
66 changes: 23 additions & 43 deletions probe/endpoint/connection_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,27 +91,17 @@ func (t *connectionTracker) ReportConnections(rpt *report.Report) {
t.useProcfs()
}

// seenTuples contains information about connections seen by
// conntrack
seenTuples := t.performFlowWalk(rpt)

if t.conf.WalkProc && t.conf.Scanner != nil {
t.performWalkProc(rpt, hostNodeID, seenTuples)
}
}

// performFlowWalk consults the flowWalker for short-lived connections
func (t *connectionTracker) performFlowWalk(rpt *report.Report) map[string]fourTuple {
// consult the flowWalker for short-lived (conntracked) connections
seenTuples := map[string]fourTuple{}
extraNodeInfo := map[string]string{
Conntracked: "true",
}
t.flowWalker.walkFlows(func(f flow, alive bool) {
tuple := flowToTuple(f)
seenTuples[tuple.key()] = tuple
t.addConnection(rpt, tuple, "", extraNodeInfo, extraNodeInfo)
t.addConnection(rpt, false, tuple, "", nil, nil)
})
return seenTuples

if t.conf.WalkProc && t.conf.Scanner != nil {
t.performWalkProc(rpt, hostNodeID, seenTuples)
}
}

func (t *connectionTracker) existingFlows() map[string]fourTuple {
Expand All @@ -138,19 +128,14 @@ func (t *connectionTracker) performWalkProc(rpt *report.Report, hostNodeID strin
}
for conn := conns.Next(); conn != nil; conn = conns.Next() {
tuple, namespaceID, incoming := connectionTuple(conn, seenTuples)
var (
toNodeInfo = map[string]string{Procspied: "true"}
fromNodeInfo = map[string]string{Procspied: "true"}
)
var toNodeInfo, fromNodeInfo map[string]string
if conn.Proc.PID > 0 {
fromNodeInfo[process.PID] = strconv.FormatUint(uint64(conn.Proc.PID), 10)
fromNodeInfo[report.HostNodeID] = hostNodeID
}
if incoming {
tuple.reverse()
toNodeInfo, fromNodeInfo = fromNodeInfo, toNodeInfo
fromNodeInfo = map[string]string{
process.PID: strconv.FormatUint(uint64(conn.Proc.PID), 10),
report.HostNodeID: hostNodeID,
}
}
t.addConnection(rpt, tuple, namespaceID, fromNodeInfo, toNodeInfo)
t.addConnection(rpt, incoming, tuple, namespaceID, fromNodeInfo, toNodeInfo)
}
return nil
}
Expand Down Expand Up @@ -186,28 +171,23 @@ func (t *connectionTracker) getInitialState() {

func (t *connectionTracker) performEbpfTrack(rpt *report.Report, hostNodeID string) error {
t.ebpfTracker.walkConnections(func(e ebpfConnection) {
fromNodeInfo := map[string]string{
EBPF: "true",
}
toNodeInfo := map[string]string{
EBPF: "true",
}
var toNodeInfo, fromNodeInfo map[string]string
if e.pid > 0 {
fromNodeInfo[process.PID] = strconv.Itoa(e.pid)
fromNodeInfo[report.HostNodeID] = hostNodeID
fromNodeInfo = map[string]string{
process.PID: strconv.Itoa(e.pid),
report.HostNodeID: hostNodeID,
}
}

if e.incoming {
t.addConnection(rpt, reverse(e.tuple), e.networkNamespace, toNodeInfo, fromNodeInfo)
} else {
t.addConnection(rpt, e.tuple, e.networkNamespace, fromNodeInfo, toNodeInfo)
}

t.addConnection(rpt, e.incoming, e.tuple, e.networkNamespace, fromNodeInfo, toNodeInfo)
})
return nil
}

func (t *connectionTracker) addConnection(rpt *report.Report, ft fourTuple, namespaceID string, extraFromNode, extraToNode map[string]string) {
func (t *connectionTracker) addConnection(rpt *report.Report, incoming bool, ft fourTuple, namespaceID string, extraFromNode, extraToNode map[string]string) {
if incoming {
ft = reverse(ft)
extraFromNode, extraToNode = extraToNode, extraFromNode
}
var (
fromNode = t.makeEndpointNode(namespaceID, ft.fromAddr, ft.fromPort, extraFromNode)
toNode = t.makeEndpointNode(namespaceID, ft.toAddr, ft.toPort, extraToNode)
Expand Down
8 changes: 2 additions & 6 deletions probe/endpoint/nat_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,16 +70,14 @@ func TestNat(t *testing.T) {
have := report.MakeReport()
originalID := report.MakeEndpointNodeID("host1", "", "10.0.47.1", "80")
have.Endpoint.AddNode(report.MakeNodeWith(originalID, map[string]string{
"foo": "bar",
Procspied: "true",
"foo": "bar",
}))

want := have.Copy()
wantID := report.MakeEndpointNodeID("host1", "", "1.2.3.4", "80")
want.Endpoint.AddNode(report.MakeNodeWith(wantID, map[string]string{
"copy_of": originalID,
"foo": "bar",
Procspied: "true",
}))

makeNATMapper(ct).applyNAT(have, "host1")
Expand Down Expand Up @@ -125,15 +123,13 @@ func TestNat(t *testing.T) {
have := report.MakeReport()
originalID := report.MakeEndpointNodeID("host2", "", "10.0.47.2", "22222")
have.Endpoint.AddNode(report.MakeNodeWith(originalID, map[string]string{
"foo": "baz",
Procspied: "true",
"foo": "baz",
}))

want := have.Copy()
want.Endpoint.AddNode(report.MakeNodeWith(report.MakeEndpointNodeID("host2", "", "2.3.4.5", "22223"), map[string]string{
"copy_of": originalID,
"foo": "baz",
Procspied: "true",
}))

makeNATMapper(ct).applyNAT(have, "host1")
Expand Down
3 changes: 0 additions & 3 deletions probe/endpoint/reporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,6 @@ import (

// Node metadata keys.
const (
Conntracked = "conntracked"
EBPF = "eBPF"
Procspied = "procspied"
ReverseDNSNames = "reverse_dns_names"
SnoopedDNSNames = "snooped_dns_names"
)
Expand Down
21 changes: 6 additions & 15 deletions render/short_lived_connections_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
"github.com/weaveworks/common/mtime"

"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/endpoint"
"github.com/weaveworks/scope/probe/host"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/report"
Expand Down Expand Up @@ -51,24 +50,16 @@ var (
rpt = report.Report{
Endpoint: report.Topology{
Nodes: report.Nodes{
randomEndpointNodeID: report.MakeNodeWith(randomEndpointNodeID, map[string]string{
endpoint.Conntracked: "true",
}).
WithAdjacent(serverEndpointNodeID).WithTopology(report.Endpoint),
randomEndpointNodeID: report.MakeNode(randomEndpointNodeID).
WithTopology(report.Endpoint).WithAdjacent(serverEndpointNodeID),

serverEndpointNodeID: report.MakeNodeWith(serverEndpointNodeID, map[string]string{
endpoint.Conntracked: "true",
}).
serverEndpointNodeID: report.MakeNode(serverEndpointNodeID).
WithTopology(report.Endpoint),

container1EndpointNodeID: report.MakeNodeWith(container1EndpointNodeID, map[string]string{
endpoint.Conntracked: "true",
}).
WithAdjacent(duplicatedEndpointNodeID).WithTopology(report.Endpoint),
container1EndpointNodeID: report.MakeNode(container1EndpointNodeID).
WithTopology(report.Endpoint).WithAdjacent(duplicatedEndpointNodeID),

duplicatedEndpointNodeID: report.MakeNodeWith(duplicatedEndpointNodeID, map[string]string{
endpoint.Conntracked: "true",
}).
duplicatedEndpointNodeID: report.MakeNode(duplicatedEndpointNodeID).
WithTopology(report.Endpoint),
},
},
Expand Down
41 changes: 13 additions & 28 deletions test/fixture/report_fixture.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"time"

"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/endpoint"
"github.com/weaveworks/scope/probe/host"
"github.com/weaveworks/scope/probe/kubernetes"
"github.com/weaveworks/scope/probe/process"
Expand Down Expand Up @@ -128,67 +127,53 @@ var (
// care to test into the fixture. Just be sure to include the bits
// that the mapping funcs extract :)
Client54001NodeID: report.MakeNode(Client54001NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
process.PID: Client1PID,
report.HostNodeID: ClientHostNodeID,
endpoint.Procspied: True,
process.PID: Client1PID,
report.HostNodeID: ClientHostNodeID,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(10),
EgressByteCount: newu64(100),
}),

Client54002NodeID: report.MakeNode(Client54002NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
process.PID: Client2PID,
report.HostNodeID: ClientHostNodeID,
endpoint.Procspied: True,
process.PID: Client2PID,
report.HostNodeID: ClientHostNodeID,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(20),
EgressByteCount: newu64(200),
}),

Server80NodeID: report.MakeNode(Server80NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
process.PID: ServerPID,
report.HostNodeID: ServerHostNodeID,
endpoint.Procspied: True,
process.PID: ServerPID,
report.HostNodeID: ServerHostNodeID,
}),

NonContainerNodeID: report.MakeNode(NonContainerNodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
process.PID: NonContainerPID,
report.HostNodeID: ServerHostNodeID,
endpoint.Procspied: True,
process.PID: NonContainerPID,
report.HostNodeID: ServerHostNodeID,
}).WithAdjacent(GoogleEndpointNodeID),

// Probe pseudo nodes
UnknownClient1NodeID: report.MakeNode(UnknownClient1NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
endpoint.Procspied: True,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
UnknownClient1NodeID: report.MakeNode(UnknownClient1NodeID).WithTopology(report.Endpoint).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(30),
EgressByteCount: newu64(300),
}),

UnknownClient2NodeID: report.MakeNode(UnknownClient2NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
endpoint.Procspied: True,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
UnknownClient2NodeID: report.MakeNode(UnknownClient2NodeID).WithTopology(report.Endpoint).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(40),
EgressByteCount: newu64(400),
}),

UnknownClient3NodeID: report.MakeNode(UnknownClient3NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
endpoint.Procspied: True,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
UnknownClient3NodeID: report.MakeNode(UnknownClient3NodeID).WithTopology(report.Endpoint).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(50),
EgressByteCount: newu64(500),
}),

RandomClientNodeID: report.MakeNode(RandomClientNodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
endpoint.Procspied: True,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
RandomClientNodeID: report.MakeNode(RandomClientNodeID).WithTopology(report.Endpoint).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(60),
EgressByteCount: newu64(600),
}),

GoogleEndpointNodeID: report.MakeNode(GoogleEndpointNodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
endpoint.Procspied: True,
}),
GoogleEndpointNodeID: report.MakeNode(GoogleEndpointNodeID).WithTopology(report.Endpoint),
},
},
Process: report.Topology{
Expand Down