Skip to content

Commit

Permalink
Update benchmarks
Browse files Browse the repository at this point in the history
- Update README.md
- Add some test benchmarks
  • Loading branch information
Anton Bogdanovich committed Nov 10, 2015
1 parent 5b4fd08 commit 24b67b4
Show file tree
Hide file tree
Showing 9 changed files with 456 additions and 142 deletions.
16 changes: 9 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,21 +21,19 @@ Siberite also supports Kestrel's two-phase reliable fetch: if a client disconnec
a message, the message will be handed to the next client.

Compared to Kestrel and Darner, Siberite is easier to build, maintain and distribute.
It uses an order of magnitude less memory compared to Kestrel, but has less configuration far fewer features.

Siberite is used at [Spyonweb.com](http://spyonweb.com).<br>
We used to use Darner before, but got 2 large production queues corrupted at some point and decided to rewrite it in Go.
It uses an order of magnitude less memory compared to Kestrel, and has an ability
to consume queue multiple times (using durable cursors feature).

## Features

1. Multiple consumer groups per queue using `get <queue>:<cursor>` syntax.
1. Multiple durable cursors per queue using `get <queue>:<cursor_name>` syntax.

- When you read an item in a usual way: `get <queue>`, item gets expired and deleted.
- When you read an item using cursor syntax `get <queue>:<cursor>`, a durable
- When you read an item using cursor syntax `get <queue>:<cursor_name>`, a durable
cursor gets initialized. It shifts forward with every read without deleting
any messages in the source queue. Number of cursors per queue is not limited.
- If you continue reads from the source queue directly, siberite will continue
deleting messages from the head of that queue. Any existing cursor that is
to delete messages from the head of the queue. Any existing cursor that
internally points to an already deleted message will catch up during next read
and will start serving messages from the current source queue head.
- Durable cursors are also support two-phase reliable reads. All failed reliable
Expand Down Expand Up @@ -124,6 +122,10 @@ END
# get work/open
# get work/close/open
# get work/abort
# get work:cursor_name
# get work:cursor_name/open
# get work:my_cursor/close/open
# set work+fanout_queue
# flush work
# delete work
# flush_all
Expand Down
192 changes: 192 additions & 0 deletions cgroup/cgqueue_benchmark_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
package cgroup

import (
"crypto/rand"
"testing"
)

func Benchmark_CGQueue_Enqueue_1_Byte(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 1)
rand.Read(value)
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.Enqueue(value)
}
}

func Benchmark_CGQueue_Enqueue_128_Bytes(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 128)
rand.Read(value)
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.Enqueue(value)
}

}

func Benchmark_CGQueue_Enqueue_1024_Bytes(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 1024)
rand.Read(value)
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.Enqueue(value)
}

}

func Benchmark_CGQueue_Enqueue_10240_Bytes(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 10240)
rand.Read(value)
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.Enqueue(value)
}
}

func Benchmark_CGQueue_GetNext_1_Byte(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 1)
rand.Read(value)
for i := 0; i < 500000; i++ {
q.Enqueue(value)
}

q.Close()
q, _ = CGQueueOpen(cgQueueName, dir)
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.GetNext()
}
}

func Benchmark_CGQueue_GetNext_128_Bytes(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 128)
rand.Read(value)
for i := 0; i < 500000; i++ {
q.Enqueue(value)
}

q.Close()
q, _ = CGQueueOpen(cgQueueName, dir)
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.GetNext()
}
}

func Benchmark_CGQueue_GetNext_1024_Bytes(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 1024)
rand.Read(value)
for i := 0; i < 200000; i++ {
q.Enqueue(value)
}

q.Close()
q, _ = CGQueueOpen(cgQueueName, dir)
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.GetNext()
}
}

func Benchmark_CGQueue_GetNext_10240_Bytes(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 10240)
rand.Read(value)
for i := 0; i < 50000; i++ {
q.Enqueue(value)
}

q.Close()
q, _ = CGQueueOpen(cgQueueName, dir)
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.GetNext()
}
}

func Benchmark_CGQueue_ConsumerGroup_1_Byte(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 1)
rand.Read(value)
for i := 0; i < 500000; i++ {
q.Enqueue(value)
}

q.Close()
q, _ = CGQueueOpen(cgQueueName, dir)
cg, _ := q.ConsumerGroup("test")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cg.GetNext()
}
}

func Benchmark_CGQueue_ConsumerGroup_128_Bytes(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 128)
rand.Read(value)
for i := 0; i < 500000; i++ {
q.Enqueue(value)
}

q.Close()
q, _ = CGQueueOpen(cgQueueName, dir)
cg, _ := q.ConsumerGroup("test")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cg.GetNext()
}
}

func Benchmark_CGQueue_ConsumerGroup_1024_Bytes(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 1024)
rand.Read(value)
for i := 0; i < 200000; i++ {
q.Enqueue(value)
}

q.Close()
q, _ = CGQueueOpen(cgQueueName, dir)
cg, _ := q.ConsumerGroup("test")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cg.GetNext()
}
}

func Benchmark_CGQueue_ConsumerGroup_10240_Bytes(b *testing.B) {
q, _ := CGQueueOpen(cgQueueName, dir)
defer q.Drop()
value := make([]byte, 10240)
rand.Read(value)
for i := 0; i < 200000; i++ {
q.Enqueue(value)
}

q.Close()
q, _ = CGQueueOpen(cgQueueName, dir)
cg, _ := q.ConsumerGroup("test")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cg.GetNext()
}
}
Loading

0 comments on commit 24b67b4

Please sign in to comment.