From 761eb8ab4b2d5a4bd61aadd539afc64ce7eadc7c Mon Sep 17 00:00:00 2001
From: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Date: Mon, 22 Jun 2020 16:52:34 +0900
Subject: [PATCH] vendor: update bbolt to v1.3.5

We had once updated bbolt from v1.3.3 to v1.3.4 in #4134,
but reverted to v1.3.3 in #4156 due to "fatal error: sweep increased
allocation count" (etcd-io/bbolt#214).

The issue was fixed in bbolt v1.3.5 (etcd-io/bbolt#220).

Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
---
 vendor.conf                              |  2 +-
 vendor/go.etcd.io/bbolt/README.md        | 15 ++--
 vendor/go.etcd.io/bbolt/bolt_386.go      |  3 -
 vendor/go.etcd.io/bbolt/bolt_amd64.go    |  3 -
 vendor/go.etcd.io/bbolt/bolt_arm.go      | 21 ------
 vendor/go.etcd.io/bbolt/bolt_arm64.go    |  3 -
 vendor/go.etcd.io/bbolt/bolt_mips64x.go  |  3 -
 vendor/go.etcd.io/bbolt/bolt_mipsx.go    |  3 -
 vendor/go.etcd.io/bbolt/bolt_ppc.go      |  3 -
 vendor/go.etcd.io/bbolt/bolt_ppc64.go    |  3 -
 vendor/go.etcd.io/bbolt/bolt_ppc64le.go  |  3 -
 vendor/go.etcd.io/bbolt/bolt_riscv64.go  |  3 -
 vendor/go.etcd.io/bbolt/bolt_s390x.go    |  3 -
 vendor/go.etcd.io/bbolt/bolt_unix.go     |  2 +-
 vendor/go.etcd.io/bbolt/bolt_unix_aix.go | 90 ++++++++++++++++++++++++
 vendor/go.etcd.io/bbolt/bucket.go        | 34 ++++-----
 vendor/go.etcd.io/bbolt/cursor.go        |  2 +-
 vendor/go.etcd.io/bbolt/db.go            |  4 +-
 vendor/go.etcd.io/bbolt/freelist.go      | 38 ++++++----
 vendor/go.etcd.io/bbolt/freelist_hmap.go |  2 +-
 vendor/go.etcd.io/bbolt/go.mod           |  5 ++
 vendor/go.etcd.io/bbolt/node.go          | 56 +++++++--------
 vendor/go.etcd.io/bbolt/page.go          | 41 ++++++-----
 vendor/go.etcd.io/bbolt/tx.go            | 18 +++--
 vendor/go.etcd.io/bbolt/unsafe.go        | 39 ++++++++++
 25 files changed, 251 insertions(+), 148 deletions(-)
 create mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_aix.go
 create mode 100644 vendor/go.etcd.io/bbolt/go.mod
 create mode 100644 vendor/go.etcd.io/bbolt/unsafe.go

diff --git a/vendor.conf b/vendor.conf
index 350d28d39ec9..81e153eaad51 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -42,7 +42,7 @@ github.com/russross/blackfriday                     v1.5.2
 github.com/sirupsen/logrus                          v1.6.0
 github.com/syndtr/gocapability                      d98352740cb2c55f81556b63d4a1ec64c5a319c2
 github.com/urfave/cli                               v1.22.0
-go.etcd.io/bbolt                                    v1.3.3
+go.etcd.io/bbolt                                    v1.3.5
 go.opencensus.io                                    v0.22.0
 golang.org/x/net                                    f3200d17e092c607f615320ecaad13d87ad9a2b3
 golang.org/x/sync                                   42b317875d0fa942474b76e1b46a6060d720ae6e
diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md
index e9989efc5074..c9e64b1a6150 100644
--- a/vendor/go.etcd.io/bbolt/README.md
+++ b/vendor/go.etcd.io/bbolt/README.md
@@ -152,11 +152,12 @@ are not thread safe. To work with data in multiple goroutines you must start
 a transaction for each one or use locking to ensure only one goroutine accesses
 a transaction at a time. Creating transaction from the `DB` is thread safe.
 
-Read-only transactions and read-write transactions should not depend on one
-another and generally shouldn't be opened simultaneously in the same goroutine.
-This can cause a deadlock as the read-write transaction needs to periodically
-re-map the data file but it cannot do so while a read-only transaction is open.
-
+Transactions should not depend on one another and generally shouldn't be opened
+simultaneously in the same goroutine. This can cause a deadlock as the read-write
+transaction needs to periodically re-map the data file but it cannot do so while
+any read-only transaction is open. Even a nested read-only transaction can cause
+a deadlock, as the child transaction can block the parent transaction from releasing
+its resources.
 
 #### Read-write transactions
 
@@ -275,7 +276,7 @@ should be writable.
 ### Using buckets
 
 Buckets are collections of key/value pairs within the database. All keys in a
-bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
+bucket must be unique. You can create a bucket using the `Tx.CreateBucket()`
 function:
 
 ```go
@@ -923,6 +924,7 @@ Below is a list of public, open source projects that use Bolt:
 * [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
 * [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
 * [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
+* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more)
 * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
 * [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
 * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
@@ -935,6 +937,7 @@ Below is a list of public, open source projects that use Bolt:
 * [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
 * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
 * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
+* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
 * [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
 * [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
 * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
diff --git a/vendor/go.etcd.io/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go
index 4d35ee7cf3d2..aee25960ff97 100644
--- a/vendor/go.etcd.io/bbolt/bolt_386.go
+++ b/vendor/go.etcd.io/bbolt/bolt_386.go
@@ -5,6 +5,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/vendor/go.etcd.io/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go
index 60a52dad56b2..5dd8f3f2aeb9 100644
--- a/vendor/go.etcd.io/bbolt/bolt_amd64.go
+++ b/vendor/go.etcd.io/bbolt/bolt_amd64.go
@@ -5,6 +5,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/vendor/go.etcd.io/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go
index 105d27ddb7dd..aee25960ff97 100644
--- a/vendor/go.etcd.io/bbolt/bolt_arm.go
+++ b/vendor/go.etcd.io/bbolt/bolt_arm.go
@@ -1,28 +1,7 @@
 package bbolt
 
-import "unsafe"
-
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0x7FFFFFFF // 2GB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned bool
-
-func init() {
-	// Simple check to see whether this arch handles unaligned load/stores
-	// correctly.
-
-	// ARM9 and older devices require load/stores to be from/to aligned
-	// addresses. If not, the lower 2 bits are cleared and that address is
-	// read in a jumbled up order.
-
-	// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
-
-	raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
-	val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
-
-	brokenUnaligned = val != 0x11222211
-}
diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go
index f5aa2a5ee248..810dfd55c53b 100644
--- a/vendor/go.etcd.io/bbolt/bolt_arm64.go
+++ b/vendor/go.etcd.io/bbolt/bolt_arm64.go
@@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go
index baeb289fd94b..dd8ffe123935 100644
--- a/vendor/go.etcd.io/bbolt/bolt_mips64x.go
+++ b/vendor/go.etcd.io/bbolt/bolt_mips64x.go
@@ -7,6 +7,3 @@ const maxMapSize = 0x8000000000 // 512GB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go
index 2d9b1a91f364..a669703a4e33 100644
--- a/vendor/go.etcd.io/bbolt/bolt_mipsx.go
+++ b/vendor/go.etcd.io/bbolt/bolt_mipsx.go
@@ -7,6 +7,3 @@ const maxMapSize = 0x40000000 // 1GB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go
index 69804714aaef..84e545ef3e77 100644
--- a/vendor/go.etcd.io/bbolt/bolt_ppc.go
+++ b/vendor/go.etcd.io/bbolt/bolt_ppc.go
@@ -7,6 +7,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go
index 356590857607..a76120908cb6 100644
--- a/vendor/go.etcd.io/bbolt/bolt_ppc64.go
+++ b/vendor/go.etcd.io/bbolt/bolt_ppc64.go
@@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go
index 422c7c69d667..c830f2fc77ad 100644
--- a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go
+++ b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go
@@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go
index 07b4b47cdb8d..c967613b0063 100644
--- a/vendor/go.etcd.io/bbolt/bolt_riscv64.go
+++ b/vendor/go.etcd.io/bbolt/bolt_riscv64.go
@@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = true
diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go
index 6d3fcb825d34..ff2a56097079 100644
--- a/vendor/go.etcd.io/bbolt/bolt_s390x.go
+++ b/vendor/go.etcd.io/bbolt/bolt_s390x.go
@@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go
index 5f2bb5145199..2938fed58457 100644
--- a/vendor/go.etcd.io/bbolt/bolt_unix.go
+++ b/vendor/go.etcd.io/bbolt/bolt_unix.go
@@ -1,4 +1,4 @@
-// +build !windows,!plan9,!solaris
+// +build !windows,!plan9,!solaris,!aix
 
 package bbolt
 
diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go
new file mode 100644
index 000000000000..a64c16f51297
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go
@@ -0,0 +1,90 @@
+// +build aix
+
+package bbolt
+
+import (
+	"fmt"
+	"syscall"
+	"time"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+	var t time.Time
+	if timeout != 0 {
+		t = time.Now()
+	}
+	fd := db.file.Fd()
+	var lockType int16
+	if exclusive {
+		lockType = syscall.F_WRLCK
+	} else {
+		lockType = syscall.F_RDLCK
+	}
+	for {
+		// Attempt to obtain an exclusive lock.
+		lock := syscall.Flock_t{Type: lockType}
+		err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
+		if err == nil {
+			return nil
+		} else if err != syscall.EAGAIN {
+			return err
+		}
+
+		// If we timed out then return an error.
+		if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+			return ErrTimeout
+		}
+
+		// Wait for a bit and try again.
+		time.Sleep(flockRetryTimeout)
+	}
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+	var lock syscall.Flock_t
+	lock.Start = 0
+	lock.Len = 0
+	lock.Type = syscall.F_UNLCK
+	lock.Whence = 0
+	return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+	// Map the data file to memory.
+	b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+	if err != nil {
+		return err
+	}
+
+	// Advise the kernel that the mmap is accessed randomly.
+	if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
+		return fmt.Errorf("madvise: %s", err)
+	}
+
+	// Save the original byte slice and convert to a byte array pointer.
+	db.dataref = b
+	db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+	db.datasz = sz
+	return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+	// Ignore the unmap if we have no mapped data.
+	if db.dataref == nil {
+		return nil
+	}
+
+	// Unmap using the original byte slice.
+	err := unix.Munmap(db.dataref)
+	db.dataref = nil
+	db.data = nil
+	db.datasz = 0
+	return err
+}
diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go
index 84bfd4d6a283..d8750b14871f 100644
--- a/vendor/go.etcd.io/bbolt/bucket.go
+++ b/vendor/go.etcd.io/bbolt/bucket.go
@@ -123,10 +123,12 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
 func (b *Bucket) openBucket(value []byte) *Bucket {
 	var child = newBucket(b.tx)
 
-	// If unaligned load/stores are broken on this arch and value is
-	// unaligned simply clone to an aligned byte array.
-	unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
-
+	// Unaligned access requires a copy to be made.
+	const unalignedMask = unsafe.Alignof(struct {
+		bucket
+		page
+	}{}) - 1
+	unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0
 	if unaligned {
 		value = cloneBytes(value)
 	}
@@ -206,7 +208,7 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
 }
 
 // DeleteBucket deletes a bucket at the given key.
-// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
+// Returns an error if the bucket does not exist, or if the key represents a non-bucket value.
 func (b *Bucket) DeleteBucket(key []byte) error {
 	if b.tx.db == nil {
 		return ErrTxClosed
@@ -228,7 +230,7 @@ func (b *Bucket) DeleteBucket(key []byte) error {
 	// Recursively delete all child buckets.
 	child := b.Bucket(key)
 	err := child.ForEach(func(k, v []byte) error {
-		if v == nil {
+		if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 {
 			if err := child.DeleteBucket(k); err != nil {
 				return fmt.Errorf("delete bucket: %s", err)
 			}
@@ -409,7 +411,7 @@ func (b *Bucket) Stats() BucketStats {
 
 			if p.count != 0 {
 				// If page has any elements, add all element headers.
-				used += leafPageElementSize * int(p.count-1)
+				used += leafPageElementSize * uintptr(p.count-1)
 
 				// Add all element key, value sizes.
 				// The computation takes advantage of the fact that the position
@@ -417,16 +419,16 @@ func (b *Bucket) Stats() BucketStats {
 				// of all previous elements' keys and values.
 				// It also includes the last element's header.
 				lastElement := p.leafPageElement(p.count - 1)
-				used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
+				used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize)
 			}
 
 			if b.root == 0 {
 				// For inlined bucket just update the inline stats
-				s.InlineBucketInuse += used
+				s.InlineBucketInuse += int(used)
 			} else {
 				// For non-inlined bucket update all the leaf stats
 				s.LeafPageN++
-				s.LeafInuse += used
+				s.LeafInuse += int(used)
 				s.LeafOverflowN += int(p.overflow)
 
 				// Collect stats from sub-buckets.
@@ -447,13 +449,13 @@ func (b *Bucket) Stats() BucketStats {
 
 			// used totals the used bytes for the page
 			// Add header and all element headers.
-			used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
+			used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1))
 
 			// Add size of all keys and values.
 			// Again, use the fact that last element's position equals to
 			// the total of key, value sizes of all previous elements.
-			used += int(lastElement.pos + lastElement.ksize)
-			s.BranchInuse += used
+			used += uintptr(lastElement.pos + lastElement.ksize)
+			s.BranchInuse += int(used)
 			s.BranchOverflowN += int(p.overflow)
 		}
 
@@ -593,7 +595,7 @@ func (b *Bucket) inlineable() bool {
 	// our threshold for inline bucket size.
 	var size = pageHeaderSize
 	for _, inode := range n.inodes {
-		size += leafPageElementSize + len(inode.key) + len(inode.value)
+		size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value))
 
 		if inode.flags&bucketLeafFlag != 0 {
 			return false
@@ -606,8 +608,8 @@ func (b *Bucket) inlineable() bool {
 }
 
 // Returns the maximum total size of a bucket to make it a candidate for inlining.
-func (b *Bucket) maxInlineBucketSize() int {
-	return b.tx.db.pageSize / 4
+func (b *Bucket) maxInlineBucketSize() uintptr {
+	return uintptr(b.tx.db.pageSize / 4)
 }
 
 // write allocates and writes a bucket to a byte slice.
diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go
index 3000aced6c4c..98aeb449a4ca 100644
--- a/vendor/go.etcd.io/bbolt/cursor.go
+++ b/vendor/go.etcd.io/bbolt/cursor.go
@@ -366,7 +366,7 @@ func (c *Cursor) node() *node {
 	}
 	for _, ref := range c.stack[:len(c.stack)-1] {
 		_assert(!n.isLeaf, "expected branch node")
-		n = n.childAt(int(ref.index))
+		n = n.childAt(ref.index)
 	}
 	_assert(n.isLeaf, "expected leaf node")
 	return n
diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go
index 870c8b1cc9b8..80b0095cc348 100644
--- a/vendor/go.etcd.io/bbolt/db.go
+++ b/vendor/go.etcd.io/bbolt/db.go
@@ -206,12 +206,12 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
 	}
 
 	// Open data file and separate sync handler for metadata writes.
-	db.path = path
 	var err error
-	if db.file, err = db.openFile(db.path, flag|os.O_CREATE, mode); err != nil {
+	if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil {
 		_ = db.close()
 		return nil, err
 	}
+	db.path = db.file.Name()
 
 	// Lock file so that other processes using Bolt in read-write mode cannot
 	// use the database  at the same time. This would cause corruption since
diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go
index 587b8cc02def..697a46968bac 100644
--- a/vendor/go.etcd.io/bbolt/freelist.go
+++ b/vendor/go.etcd.io/bbolt/freelist.go
@@ -71,7 +71,7 @@ func (f *freelist) size() int {
 		// The first element will be used to store the count. See freelist.write.
 		n++
 	}
-	return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
+	return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n)
 }
 
 // count returns count of pages on the freelist
@@ -93,7 +93,7 @@ func (f *freelist) pending_count() int {
 	return count
 }
 
-// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
+// copyall copies a list of all free ids and all pending ids in one sorted list.
 // f.count returns the minimum length required for dst.
 func (f *freelist) copyall(dst []pgid) {
 	m := make(pgids, 0, f.pending_count())
@@ -267,17 +267,23 @@ func (f *freelist) read(p *page) {
 	}
 	// If the page.count is at the max uint16 value (64k) then it's considered
 	// an overflow and the size of the freelist is stored as the first element.
-	idx, count := 0, int(p.count)
+	var idx, count = 0, int(p.count)
 	if count == 0xFFFF {
 		idx = 1
-		count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
+		c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
+		count = int(c)
+		if count < 0 {
+			panic(fmt.Sprintf("leading element count %d overflows int", c))
+		}
 	}
 
 	// Copy the list of page ids from the freelist.
 	if count == 0 {
 		f.ids = nil
 	} else {
-		ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count]
+		var ids []pgid
+		data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx)
+		unsafeSlice(unsafe.Pointer(&ids), data, count)
 
 		// copy the ids, so we don't modify on the freelist page directly
 		idsCopy := make([]pgid, count)
@@ -310,16 +316,22 @@ func (f *freelist) write(p *page) error {
 
 	// The page.count can only hold up to 64k elements so if we overflow that
 	// number then we handle it by putting the size in the first element.
-	lenids := f.count()
-	if lenids == 0 {
-		p.count = uint16(lenids)
-	} else if lenids < 0xFFFF {
-		p.count = uint16(lenids)
-		f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
+	l := f.count()
+	if l == 0 {
+		p.count = uint16(l)
+	} else if l < 0xFFFF {
+		p.count = uint16(l)
+		var ids []pgid
+		data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+		unsafeSlice(unsafe.Pointer(&ids), data, l)
+		f.copyall(ids)
 	} else {
 		p.count = 0xFFFF
-		((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
-		f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
+		var ids []pgid
+		data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+		unsafeSlice(unsafe.Pointer(&ids), data, l+1)
+		ids[0] = pgid(l)
+		f.copyall(ids[1:])
 	}
 
 	return nil
diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go
index 6a03a6c3c855..02ef2be04417 100644
--- a/vendor/go.etcd.io/bbolt/freelist_hmap.go
+++ b/vendor/go.etcd.io/bbolt/freelist_hmap.go
@@ -27,7 +27,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
 			f.allocs[pid] = txid
 
 			for i := pgid(0); i < pgid(n); i++ {
-				delete(f.cache, pid+pgid(i))
+				delete(f.cache, pid+i)
 			}
 			return pid
 		}
diff --git a/vendor/go.etcd.io/bbolt/go.mod b/vendor/go.etcd.io/bbolt/go.mod
new file mode 100644
index 000000000000..c2366daef6b8
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/go.mod
@@ -0,0 +1,5 @@
+module go.etcd.io/bbolt
+
+go 1.12
+
+require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go
index 6c3fa553ea68..73988b5c4c0a 100644
--- a/vendor/go.etcd.io/bbolt/node.go
+++ b/vendor/go.etcd.io/bbolt/node.go
@@ -41,19 +41,19 @@ func (n *node) size() int {
 	sz, elsz := pageHeaderSize, n.pageElementSize()
 	for i := 0; i < len(n.inodes); i++ {
 		item := &n.inodes[i]
-		sz += elsz + len(item.key) + len(item.value)
+		sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
 	}
-	return sz
+	return int(sz)
 }
 
 // sizeLessThan returns true if the node is less than a given size.
 // This is an optimization to avoid calculating a large node when we only need
 // to know if it fits inside a certain page size.
-func (n *node) sizeLessThan(v int) bool {
+func (n *node) sizeLessThan(v uintptr) bool {
 	sz, elsz := pageHeaderSize, n.pageElementSize()
 	for i := 0; i < len(n.inodes); i++ {
 		item := &n.inodes[i]
-		sz += elsz + len(item.key) + len(item.value)
+		sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
 		if sz >= v {
 			return false
 		}
@@ -62,7 +62,7 @@ func (n *node) sizeLessThan(v int) bool {
 }
 
 // pageElementSize returns the size of each page element based on the type of node.
-func (n *node) pageElementSize() int {
+func (n *node) pageElementSize() uintptr {
 	if n.isLeaf {
 		return leafPageElementSize
 	}
@@ -207,10 +207,17 @@ func (n *node) write(p *page) {
 	}
 
 	// Loop over each item and write it to the page.
-	b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
+	// off tracks the offset into p of the start of the next data.
+	off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
 	for i, item := range n.inodes {
 		_assert(len(item.key) > 0, "write: zero-length inode key")
 
+		// Create a slice to write into of needed size and advance
+		// byte pointer for next iteration.
+		sz := len(item.key) + len(item.value)
+		b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
+		off += uintptr(sz)
+
 		// Write the page element.
 		if n.isLeaf {
 			elem := p.leafPageElement(uint16(i))
@@ -226,20 +233,9 @@ func (n *node) write(p *page) {
 			_assert(elem.pgid != p.id, "write: circular dependency occurred")
 		}
 
-		// If the length of key+value is larger than the max allocation size
-		// then we need to reallocate the byte array pointer.
-		//
-		// See: https://github.com/boltdb/bolt/pull/335
-		klen, vlen := len(item.key), len(item.value)
-		if len(b) < klen+vlen {
-			b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
-		}
-
 		// Write data for the element to the end of the page.
-		copy(b[0:], item.key)
-		b = b[klen:]
-		copy(b[0:], item.value)
-		b = b[vlen:]
+		l := copy(b, item.key)
+		copy(b[l:], item.value)
 	}
 
 	// DEBUG ONLY: n.dump()
@@ -247,7 +243,7 @@ func (n *node) write(p *page) {
 
 // split breaks up a node into multiple smaller nodes, if appropriate.
 // This should only be called from the spill() function.
-func (n *node) split(pageSize int) []*node {
+func (n *node) split(pageSize uintptr) []*node {
 	var nodes []*node
 
 	node := n
@@ -270,7 +266,7 @@ func (n *node) split(pageSize int) []*node {
 
 // splitTwo breaks up a node into two smaller nodes, if appropriate.
 // This should only be called from the split() function.
-func (n *node) splitTwo(pageSize int) (*node, *node) {
+func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
 	// Ignore the split if the page doesn't have at least enough nodes for
 	// two pages or if the nodes can fit in a single page.
 	if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
@@ -312,18 +308,18 @@ func (n *node) splitTwo(pageSize int) (*node, *node) {
 // splitIndex finds the position where a page will fill a given threshold.
 // It returns the index as well as the size of the first page.
 // This is only be called from split().
-func (n *node) splitIndex(threshold int) (index, sz int) {
+func (n *node) splitIndex(threshold int) (index, sz uintptr) {
 	sz = pageHeaderSize
 
 	// Loop until we only have the minimum number of keys required for the second page.
 	for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
-		index = i
+		index = uintptr(i)
 		inode := n.inodes[i]
-		elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
+		elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value))
 
 		// If we have at least the minimum number of keys and adding another
 		// node would put us over the threshold then exit and return.
-		if i >= minKeysPerPage && sz+elsize > threshold {
+		if index >= minKeysPerPage && sz+elsize > uintptr(threshold) {
 			break
 		}
 
@@ -356,7 +352,7 @@ func (n *node) spill() error {
 	n.children = nil
 
 	// Split nodes into appropriate sizes. The first node will always be n.
-	var nodes = n.split(tx.db.pageSize)
+	var nodes = n.split(uintptr(tx.db.pageSize))
 	for _, node := range nodes {
 		// Add node's page to the freelist if it's not new.
 		if node.pgid > 0 {
@@ -587,9 +583,11 @@ func (n *node) dump() {
 
 type nodes []*node
 
-func (s nodes) Len() int           { return len(s) }
-func (s nodes) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
+func (s nodes) Len() int      { return len(s) }
+func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nodes) Less(i, j int) bool {
+	return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1
+}
 
 // inode represents an internal node inside of a node.
 // It can be used to point to elements in a page or point
diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go
index bca9615f0fd6..c9a158fb066c 100644
--- a/vendor/go.etcd.io/bbolt/page.go
+++ b/vendor/go.etcd.io/bbolt/page.go
@@ -7,12 +7,12 @@ import (
 	"unsafe"
 )
 
-const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
+const pageHeaderSize = unsafe.Sizeof(page{})
 
 const minKeysPerPage = 2
 
-const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
-const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
+const branchPageElementSize = unsafe.Sizeof(branchPageElement{})
+const leafPageElementSize = unsafe.Sizeof(leafPageElement{})
 
 const (
 	branchPageFlag   = 0x01
@@ -32,7 +32,6 @@ type page struct {
 	flags    uint16
 	count    uint16
 	overflow uint32
-	ptr      uintptr
 }
 
 // typ returns a human readable page type string used for debugging.
@@ -51,13 +50,13 @@ func (p *page) typ() string {
 
 // meta returns a pointer to the metadata section of the page.
 func (p *page) meta() *meta {
-	return (*meta)(unsafe.Pointer(&p.ptr))
+	return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
 }
 
 // leafPageElement retrieves the leaf node by index
 func (p *page) leafPageElement(index uint16) *leafPageElement {
-	n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
-	return n
+	return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
+		leafPageElementSize, int(index)))
 }
 
 // leafPageElements retrieves a list of leaf nodes.
@@ -65,12 +64,16 @@ func (p *page) leafPageElements() []leafPageElement {
 	if p.count == 0 {
 		return nil
 	}
-	return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
+	var elems []leafPageElement
+	data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+	unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
+	return elems
 }
 
 // branchPageElement retrieves the branch node by index
 func (p *page) branchPageElement(index uint16) *branchPageElement {
-	return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
+	return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
+		unsafe.Sizeof(branchPageElement{}), int(index)))
 }
 
 // branchPageElements retrieves a list of branch nodes.
@@ -78,12 +81,15 @@ func (p *page) branchPageElements() []branchPageElement {
 	if p.count == 0 {
 		return nil
 	}
-	return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
+	var elems []branchPageElement
+	data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+	unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
+	return elems
 }
 
 // dump writes n bytes of the page to STDERR as hex output.
 func (p *page) hexdump(n int) {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
+	buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
 	fmt.Fprintf(os.Stderr, "%x\n", buf)
 }
 
@@ -102,8 +108,7 @@ type branchPageElement struct {
 
 // key returns a byte slice of the node key.
 func (n *branchPageElement) key() []byte {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
+	return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
 }
 
 // leafPageElement represents a node on a leaf page.
@@ -116,14 +121,16 @@ type leafPageElement struct {
 
 // key returns a byte slice of the node key.
 func (n *leafPageElement) key() []byte {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
+	i := int(n.pos)
+	j := i + int(n.ksize)
+	return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
 }
 
 // value returns a byte slice of the node value.
 func (n *leafPageElement) value() []byte {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
+	i := int(n.pos) + int(n.ksize)
+	j := i + int(n.vsize)
+	return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
 }
 
 // PageInfo represents human readable information about a page.
diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go
index 2df7688c2f62..4b1a64a8b8aa 100644
--- a/vendor/go.etcd.io/bbolt/tx.go
+++ b/vendor/go.etcd.io/bbolt/tx.go
@@ -523,20 +523,18 @@ func (tx *Tx) write() error {
 
 	// Write pages to disk in order.
 	for _, p := range pages {
-		size := (int(p.overflow) + 1) * tx.db.pageSize
+		rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize)
 		offset := int64(p.id) * int64(tx.db.pageSize)
+		var written uintptr
 
 		// Write out page in "max allocation" sized chunks.
-		ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
 		for {
-			// Limit our write to our max allocation size.
-			sz := size
+			sz := rem
 			if sz > maxAllocSize-1 {
 				sz = maxAllocSize - 1
 			}
+			buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
 
-			// Write chunk to disk.
-			buf := ptr[:sz]
 			if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
 				return err
 			}
@@ -545,14 +543,14 @@ func (tx *Tx) write() error {
 			tx.stats.Write++
 
 			// Exit inner for loop if we've written all the chunks.
-			size -= sz
-			if size == 0 {
+			rem -= sz
+			if rem == 0 {
 				break
 			}
 
 			// Otherwise move offset forward and move pointer to next chunk.
 			offset += int64(sz)
-			ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
+			written += uintptr(sz)
 		}
 	}
 
@@ -571,7 +569,7 @@ func (tx *Tx) write() error {
 			continue
 		}
 
-		buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
+		buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
 
 		// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
 		for i := range buf {
diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/unsafe.go
new file mode 100644
index 000000000000..c0e50375007f
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/unsafe.go
@@ -0,0 +1,39 @@
+package bbolt
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
+	return unsafe.Pointer(uintptr(base) + offset)
+}
+
+func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
+	return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
+}
+
+func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
+	// See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
+	//
+	// This memory is not allocated from C, but it is unmanaged by Go's
+	// garbage collector and should behave similarly, and the compiler
+	// should produce similar code.  Note that this conversion allows a
+	// subslice to begin after the base address, with an optional offset,
+	// while the URL above does not cover this case and only slices from
+	// index 0.  However, the wiki never says that the address must be to
+	// the beginning of a C allocation (or even that malloc was used at
+	// all), so this is believed to be correct.
+	return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j]
+}
+
+// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by
+// the slice parameter.  This helper should be used over other direct
+// manipulation of reflect.SliceHeader to prevent misuse, namely, converting
+// from reflect.SliceHeader to a Go slice type.
+func unsafeSlice(slice, data unsafe.Pointer, len int) {
+	s := (*reflect.SliceHeader)(slice)
+	s.Data = uintptr(data)
+	s.Cap = len
+	s.Len = len
+}