Skip to content

Commit

Permalink
buffer: Use a double-buffering scheme to prevent data races
Browse files Browse the repository at this point in the history
  • Loading branch information
vmg committed Apr 2, 2019
1 parent f20f794 commit 8eb7523
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 14 deletions.
45 changes: 31 additions & 14 deletions buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,41 +21,58 @@ const defaultBufSize = 4096
// In other words, we can't write and read simultaneously on the same connection.
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
// Also highly optimized for this particular use case.
// This buffer is backed by two byte slices in a double-buffering scheme
type buffer struct {
buf []byte // buf is a byte buffer who's length and capacity are equal.
nc net.Conn
idx int
length int
timeout time.Duration
dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
flipcnt int // flipccnt is the current buffer counter for double-buffering
}

// newBuffer allocates and returns a new buffer.
func newBuffer(nc net.Conn) buffer {
fg := make([]byte, defaultBufSize)
bg := make([]byte, defaultBufSize)
return buffer{
buf: make([]byte, defaultBufSize),
nc: nc,
buf: fg,
nc: nc,
dbuf: [2][]byte{fg, bg},
}
}

// flip replaces the active buffer with the background buffer
// this is a delayed flip that simply increases the buffer counter;
// the actual flip will be performed the next time we call `buffer.fill`
func (b *buffer) flip() {
b.flipcnt += 1
}

// fill reads into the buffer until at least _need_ bytes are in it
func (b *buffer) fill(need int) error {
n := b.length

// move existing data to the beginning
if n > 0 && b.idx > 0 {
copy(b.buf[0:n], b.buf[b.idx:])
// fill data into its double-buffering target: if we've called
// flip on this buffer, we'll be copying to the background buffer,
// and then filling it with network data; otherwise we'll just move
// the contents of the current buffer to the front before filling it
dest := b.dbuf[b.flipcnt&1]

// grow buffer if necessary to fit the whole packet.
if need > len(dest) {
// Round up to the next multiple of the default size;
// this buffer will be discarded the next time we flip buffers
dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
}

// grow buffer if necessary
// TODO: let the buffer shrink again at some point
// Maybe keep the org buf slice and swap back?
if need > len(b.buf) {
// Round up to the next multiple of the default size
newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
copy(newBuf, b.buf)
b.buf = newBuf
// if we're filling the fg buffer, move the existing data to the start of it.
// if we're filling the bg buffer, copy over the data
if n > 0 {
copy(dest[0:n], b.buf[b.idx:])
}

b.buf = dest
b.idx = 0

for {
Expand Down
2 changes: 2 additions & 0 deletions rows.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,8 @@ func (rows *mysqlRows) Close() (err error) {
return err
}

mc.buf.flip()

// Remove unread packets from stream
if !rows.rs.done {
err = mc.readUntilEOF()
Expand Down

0 comments on commit 8eb7523

Please sign in to comment.