-
Notifications
You must be signed in to change notification settings - Fork 54
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #397 from DataDog/jamie/kafkaadmin-topicstate
Jamie/kafkaadmin topicstate
- Loading branch information
Showing
6 changed files
with
329 additions
and
6 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
package kafkaadmin | ||
|
||
import ( | ||
"fmt" | ||
"regexp" | ||
) | ||
|
||
/* | ||
Enqueue some borrowed helpers from topicmappr that aren't worth exporting (plus, | ||
they're modified a bit for this library). | ||
*/ | ||
|
||
var ( | ||
// Accepted characters in Kafka topic names. | ||
topicNormalChar = regexp.MustCompile(`[a-zA-Z0-9_\\-]`) | ||
) | ||
|
||
// stringsToRegex takes a []string of topic names and returns a []*regexp.Regexp. | ||
// The values are either a string literal and become ^value$ or are regex and | ||
// compiled then added. | ||
func stringsToRegex(names []string) ([]*regexp.Regexp, error) { | ||
var out []*regexp.Regexp | ||
|
||
// Update string literals to ^value$ regex. | ||
for n, t := range names { | ||
if !containsRegex(t) { | ||
names[n] = fmt.Sprintf(`^%s$`, t) | ||
} | ||
} | ||
|
||
// Compile regex patterns. | ||
for _, t := range names { | ||
r, err := regexp.Compile(t) | ||
if err != nil { | ||
return nil, fmt.Errorf("invalid regex pattern: %s\n", t) | ||
} | ||
|
||
out = append(out, r) | ||
} | ||
|
||
return out, nil | ||
} | ||
|
||
// containsRegex takes a topic name string and returns whether or not | ||
// it should be interpreted as regex. | ||
func containsRegex(t string) bool { | ||
// Check each character of the topic name. If it doesn't contain a legal Kafka | ||
// topic name character, we're going to assume it's regex. | ||
for _, c := range t { | ||
if !topicNormalChar.MatchString(string(c)) { | ||
return true | ||
} | ||
} | ||
|
||
return false | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
package kafkaadmin | ||
|
||
import ( | ||
"testing" | ||
|
||
"github.com/confluentinc/confluent-kafka-go/kafka" | ||
"github.com/stretchr/testify/assert" | ||
) | ||
|
||
func TestTopicStatesFromMetadata(t *testing.T) { | ||
// Mock metadata. | ||
md := fakeKafkaMetadata() | ||
// Get a TopicStates. | ||
ts, err := topicStatesFromMetadata(md) | ||
assert.Nil(t, err) | ||
|
||
// Expected results. | ||
expected := NewTopicStates() | ||
|
||
// test1 topic. | ||
test1state := fakeTopicState("test1", 2) | ||
test1state.setPartitionState(0, []int32{1001, 1002}, []int32{1001, 1002}) | ||
test1state.setPartitionState(1, []int32{1002}, []int32{1002}) | ||
expected["test1"] = test1state | ||
|
||
// test2 topic. | ||
test2state := fakeTopicState("test2", 2) | ||
test2state.setPartitionState(0, []int32{1003, 1002}, []int32{1003, 1002}) | ||
test2state.setPartitionState(1, []int32{1002, 1003}, []int32{1003, 1002}) | ||
expected["test2"] = test2state | ||
|
||
assert.Equal(t, expected, ts) | ||
} | ||
|
||
// fakeTopicState takes a topic name and desired number of partitions and returns | ||
// a TopicState. Note that the PartitionStates are left empty; those are to be | ||
// filled as needed in each test. | ||
func fakeTopicState(name string, partitions int32) TopicState { | ||
ts := NewTopicState(name) | ||
ts.Partitions = partitions | ||
ts.ReplicationFactor = 2 | ||
ts.PartitionStates = map[int]PartitionState{} | ||
for i := int32(0); i < partitions; i++ { | ||
ts.PartitionStates[int(i)] = PartitionState{ | ||
ID: i, | ||
} | ||
} | ||
|
||
return ts | ||
} | ||
|
||
// setPartitionState takes a partition ID, the desired assigned replica and ISR | ||
// and sets the partition state accordingly | ||
func (t TopicState) setPartitionState(id int32, replicas []int32, isr []int32) { | ||
ps := t.PartitionStates[int(id)] | ||
ps.Leader = isr[0] | ||
ps.Replicas = replicas | ||
ps.ISR = isr | ||
t.PartitionStates[int(id)] = ps | ||
} | ||
|
||
func fakeKafkaMetadata() *kafka.Metadata { | ||
var noErr = kafka.NewError(kafka.ErrNoError, "Success", false) | ||
|
||
return &kafka.Metadata{ | ||
Brokers: []kafka.BrokerMetadata{ | ||
{ | ||
ID: 1001, | ||
Host: "host-a", | ||
Port: 9092, | ||
}, | ||
{ | ||
ID: 1002, | ||
Host: "host-b", | ||
Port: 9092, | ||
}, | ||
{ | ||
ID: 1003, | ||
Host: "host-c", | ||
Port: 9092, | ||
}, | ||
}, | ||
Topics: map[string]kafka.TopicMetadata{ | ||
"test1": { | ||
Topic: "test1", | ||
Partitions: []kafka.PartitionMetadata{ | ||
{ | ||
ID: 0, | ||
Error: noErr, | ||
Leader: 1001, | ||
Replicas: []int32{1001, 1002}, | ||
Isrs: []int32{1001, 1002}, | ||
}, | ||
{ | ||
ID: 1, | ||
Error: noErr, | ||
Leader: 1002, | ||
Replicas: []int32{1002}, | ||
Isrs: []int32{1002}, | ||
}, | ||
}, | ||
Error: noErr, | ||
}, | ||
"test2": { | ||
Topic: "test2", | ||
Partitions: []kafka.PartitionMetadata{ | ||
{ | ||
ID: 0, | ||
Error: noErr, | ||
Leader: 1003, | ||
Replicas: []int32{1003, 1002}, | ||
Isrs: []int32{1003, 1002}, | ||
}, | ||
{ | ||
ID: 1, | ||
Error: noErr, | ||
Leader: 1003, | ||
Replicas: []int32{1002, 1003}, | ||
Isrs: []int32{1003, 1002}, | ||
}, | ||
}, | ||
Error: noErr, | ||
}, | ||
}, | ||
OriginatingBroker: kafka.BrokerMetadata{ | ||
ID: 1001, | ||
Host: "host-a", | ||
Port: 9092, | ||
}, | ||
} | ||
} |